SearchAI Prompt Configuration

To modify any prompts related to SearchAI functionalities, navigate to <SearchBlox-Instaation-folder>/webapps/ROOT/WEB-INF/serchai-config.yml

# -----------------------------------------------------------------------------------
# Active LLM Provider and prompt Configuration
#
# Specify which provider configuration to use from the 'llm-providers' map below.
# Modify the active-llm-provider key to switch between your pre-configured providers.
# -----------------------------------------------------------------------------------
active-llm-provider: "ollama"

# -----------------------------------------------------------------------------------
# Task-Specific Provider Configuration
#
# Override the default provider for specific tasks. Each task can use a different
# provider from the 'llm-providers' map above. If not specified, falls back to
# the 'active-llm-provider' setting.
# -----------------------------------------------------------------------------------
task-providers:
  chat: "ollama"
  document-enrichment: "ollama"
  smart-faq: "ollama"
  searchai-assist-text: "ollama"
  searchai-assist-image: "ollama"
  recommendations: "ollama"
  knowledge-graph: "ollama"
  document-query-decomposition: "ollama"
  product-query-decomposition: "ollama"
  product-kg-extraction: "ollama"
  analytics: "ollama"
  testing: "ollama"
  admin: "ollama"
  analysis: "ollama"
  extraction: "ollama"
  log-analysis: "ollama"
  # Agent-specific task providers (decoupled from non-agent features)
  agent-chat: "ollama"
  agent-analytics: "ollama"
  agent-testing: "ollama"
  agent-analysis: "ollama"
  agent-admin: "ollama"
  agent-extraction: "ollama"
# -----------------------------------------------------------------------------------
# LLM Provider Definitions
# -----------------------------------------------------------------------------------
llm-providers:
  # --- Ollama (for local models) ---
  ollama:
    platform: "ollama"
    url: "http://host.docker.internal:11434"
    models:
      chat: "qwen2.5:3b"
      document-enrichment: "qwen2.5:3b"
      smart-faq: "qwen2.5:3b"
      searchai-assist-text: "qwen2.5:3b"
      searchai-assist-image: "llama3.2-vision"
      recommendations: "qwen2.5:3b"
      knowledge-graph: "qwen2.5:3b"
      document-query-decomposition: "qwen2.5:3b"
      product-query-decomposition: "qwen2.5:3b"
      product-kg-extraction: "qwen2.5:3b"
      analytics: "qwen2.5:3b"
      testing: "qwen2.5:3b"
      admin: "qwen2.5:3b"
      analysis: "qwen2.5:3b"
      extraction: "qwen2.5:3b"
      log-analysis: "qwen2.5:3b"
      # Agent-specific models (decoupled from non-agent features)
      agent-chat: "qwen2.5:7b"
      agent-analytics: "qwen2.5:7b"
      agent-testing: "qwen2.5:7b"
      agent-analysis: "llama3.2-vision"
      agent-admin: "qwen2.5:7b"
      agent-extraction: "llama3.2-vision"


  # --- Official OpenAI API ---
  openai-official:
    platform: "openai"
    url: "https://api.openai.com/v1"
    # IMPORTANT: Replace with your actual secret API key from OpenAI
    api-key: "YOUR_OPENAI_API_KEY_HERE"
    models:
      chat: "gpt-4o"
      document-enrichment: "gpt-4o"
      smart-faq: "gpt-4o"
      searchai-assist-text: "gpt-4o"
      searchai-assist-image: "gpt-4o"
      recommendations: "gpt-4o"
      knowledge-graph: "gpt-4o"


  # --- OpenAI-Compatible Services (e.g., local vLLM) ---
  openai-compatible:
    platform: "openai"
    url: "http://localhost:8000/v1"
    api-key: ""
    models:
      chat: "Qwen/Qwen2.5-7B-Instruct"
      document-enrichment: "Qwen/Qwen2.5-7B-Instruct"
      smart-faq: "Qwen/Qwen2.5-7B-Instruct"
      searchai-assist-text: "Qwen/Qwen2.5-7B-Instruct"
      searchai-assist-image: "unsloth/Llama-3.2-11B-Vision"
      recommendations: "Qwen/Qwen2.5-7B-Instruct"


  # --- Llama.cpp server ---
  llamacpp:
    platform: "llamacpp"
    url: "http://localhost:8080"
    models:
      chat: "qwen2.5"
      document-enrichment: "qwen2.5"
      smart-faq: "qwen2.5"
      searchai-assist-text: "qwen2.5"
      searchai-assist-image: "llama3.2-vision"
      recommendations: "qwen2.5"


  # --- Built-in ONNX LLM (Qwen2.5-7B, no external service required) ---
  onnx:
    platform: "onnx"
    # Model type: "int8" (7GB, faster) or "fp16" (28GB, higher precision)
    model-type: "int8"
    models:
      chat: "qwen2.5-7b"
      document-enrichment: "qwen2.5-7b"
      smart-faq: "qwen2.5-7b"
      searchai-assist-text: "qwen2.5-7b"
      recommendations: "qwen2.5-7b"

  # --- Built-in ONNX Vision LLM (Qwen2-VL-2B, no external service required) ---
  # Model files: https://huggingface.co/pdufour/Qwen2-VL-2B-Instruct-ONNX-Q4-F16
  # Place files in: models/qwen2-vl-2b-onnx/
  onnx-vision:
    platform: "onnx-vision"
    models:
      searchai-assist-image: "qwen2-vl-2b"


# -----------------------------------------------------------------------------------
# Embedding Provider Configuration
#
# Configure embedding models for vector search. The active-embedding-provider
# specifies which provider to use from the 'embedding-providers' map below.
# -----------------------------------------------------------------------------------
active-embedding-provider: "onnx"

embedding-providers:
  # --- Built-in ONNX Embedding (BGE-base, no external service required) ---
  onnx:
    platform: "onnx"
    # Model path relative to models/embedding_llm/
    model-path: "bge_onnx"
    # Pooling mode: MEAN, CLS, or MAX
    pooling-mode: "MEAN"
    # Embedding dimension (768 for BGE-base)
    dimension: 768

  # --- Ollama Embeddings ---
  ollama:
    platform: "ollama"
    url: "http://host.docker.internal:11434"
    model: "nomic-embed-text"
    dimension: 768

  # --- OpenAI Embeddings ---
  openai-official:
    platform: "openai"
    url: "https://api.openai.com/v1"
    api-key: "YOUR_OPENAI_API_KEY_HERE"
    model: "text-embedding-3-small"
    # Dimensions: text-embedding-3-small (1536), text-embedding-3-large (3072)
    dimension: 1536

  # --- OpenAI-Compatible Embeddings (e.g., local TEI server) ---
  openai-compatible:
    platform: "openai"
    url: "http://localhost:8080/v1"
    api-key: ""
    model: "BAAI/bge-base-en-v1.5"
    dimension: 768

  # --- Cohere Embeddings ---
  cohere:
    platform: "cohere"
    api-key: "YOUR_COHERE_API_KEY_HERE"
    model: "embed-english-v3.0"
    # input_type: search_document, search_query, classification, clustering
    input-type: "search_document"
    dimension: 1024

  # --- Voyage AI Embeddings ---
  voyage:
    platform: "voyage"
    api-key: "YOUR_VOYAGE_API_KEY_HERE"
    model: "voyage-2"
    dimension: 1024

  # --- Jina Embeddings ---
  jina:
    platform: "jina"
    api-key: "YOUR_JINA_API_KEY_HERE"
    model: "jina-embeddings-v2-base-en"
    dimension: 768

  # --- Azure OpenAI Embeddings ---
  azure-openai:
    platform: "azure"
    # Azure OpenAI resource endpoint
    url: "https://YOUR_RESOURCE.openai.azure.com"
    api-key: "YOUR_AZURE_API_KEY_HERE"
    # Deployment name for embedding model
    deployment: "text-embedding-ada-002"
    dimension: 1536

  # --- AWS Bedrock Embeddings ---
  aws-bedrock:
    platform: "bedrock"
    # AWS credentials
    access-key-id: "YOUR_AWS_ACCESS_KEY_ID"
    secret-access-key: "YOUR_AWS_SECRET_ACCESS_KEY"
    # AWS region
    region: "us-east-1"
    model: "amazon.titan-embed-text-v1"
    dimension: 1536

  # --- Google Generative AI Embeddings (API Key) ---
  google-genai:
    platform: "google"
    api-key: "YOUR_GOOGLE_API_KEY_HERE"
    model: "text-embedding-004"
    dimension: 768

  # --- Google Vertex AI Embeddings (Service Account) ---
  google-vertex:
    platform: "google-vertex"
    # Access token from gcloud or service account
    access-token: "YOUR_GCLOUD_ACCESS_TOKEN"
    project-id: "YOUR_GCP_PROJECT_ID"
    location: "us-central1"
    model: "textembedding-gecko@003"
    dimension: 768


# -----------------------------------------------------------------------------------
# Reranker Provider Configuration
#
# Configure reranker models for improving search result relevance.
# The active-reranker-provider specifies which provider to use.
# -----------------------------------------------------------------------------------
active-reranker-provider: "onnx"

reranker-providers:
  # --- Built-in ONNX Reranker (BGE-reranker, no external service required) ---
  onnx:
    platform: "onnx"
    # Model path relative to models/ranker/
    model-path: "ranker"
    # Maximum sequence length
    max-length: 512

  # --- Ollama Reranker (for local reranking models) ---
  ollama:
    platform: "ollama"
    url: "http://host.docker.internal:11434"
    model: "bge-reranker-base"
    max-length: 512
    top-n: 10

  # --- Cohere Reranker ---
  cohere:
    platform: "cohere"
    api-key: "YOUR_COHERE_API_KEY_HERE"
    model: "rerank-english-v3.0"
    # Top N results to return after reranking
    top-n: 10

  # --- Jina Reranker ---
  jina:
    platform: "jina"
    api-key: "YOUR_JINA_API_KEY_HERE"
    model: "jina-reranker-v1-base-en"
    top-n: 10

  # --- Voyage Reranker ---
  voyage:
    platform: "voyage"
    api-key: "YOUR_VOYAGE_API_KEY_HERE"
    model: "rerank-1"
    top-n: 10

  # --- OpenAI-Compatible Reranker (e.g., local TEI reranker) ---
  openai-compatible:
    platform: "openai-compatible"
    url: "http://localhost:8081/rerank"
    api-key: ""
    model: "BAAI/bge-reranker-base"
    top-n: 10

  # --- AWS Bedrock Reranker (Cohere Rerank on Bedrock) ---
  aws-bedrock:
    platform: "bedrock"
    # AWS credentials
    access-key-id: "YOUR_AWS_ACCESS_KEY_ID"
    secret-access-key: "YOUR_AWS_SECRET_ACCESS_KEY"
    # AWS region
    region: "us-east-1"
    model: "cohere.rerank-v3-5:0"
    top-n: 10

  # --- Google Vertex AI Reranker (Discovery Engine Ranking API) ---
  google-vertex:
    platform: "google-vertex"
    # Access token from gcloud or service account
    access-token: "YOUR_GCLOUD_ACCESS_TOKEN"
    project-id: "YOUR_GCP_PROJECT_ID"
    location: "global"
    model: "semantic-ranker-512@latest"
    top-n: 10


searchai-agents-server:
num-thread:

cache-settings:
  use-cache: true
  fact-score-threshold: 30


# -----------------------------------------------------------------------------------
# Knowledge Graph Configuration
# -----------------------------------------------------------------------------------
knowledge-graph:
  enabled: true
  extraction:
    min-confidence: 0.7
    max-triples-per-chunk: 10
    async-processing: true
  indices:
    entities: sb_entities
    triples: sb_triples
    context: sb_context


prompts:
  standalone-question: |
    Given the conversation history and a follow-up question, rephrase the follow-up question to be a standalone question that includes all necessary context.

  structured-data: |
    You are a helpful AI assistant. The following data is from a structured database in JSON format.
    Please interpret this data and answer the question in a natural, conversational way.
    If you cannot answer based on the provided data, say 'I don't have enough information to answer that question.'
    Important: Do not refer to or mention the source or format of the information (like 'structured data', 'JSON', or 'provided information').
    Reply in 3 sentences or less.

  unstructured-data: |
    You are a helpful AI assistant. Use the following passages to answer the user's question.
    If you cannot answer based on the available information, simply say 'I don't have enough information to answer that question.'
    Important: Do not refer to or mention the source of information (like 'passages', 'text', or 'provided information').
    Reply in 3 sentences or less.

  mixed-data: |
    You are a helpful AI assistant. You have access to both structured database data (in JSON format)
    and unstructured text passages. Please use all available information to answer the question naturally.
    If you cannot answer based on the available information, simply say 'I don't have enough information to answer that question.
    Important: Do not refer to or mention the source or format of the information (like 'structured data', 'passages', or 'provided information').
    Reply in 3 sentences or less.


  document-enrichment-title: "- title: A concise title"
  document-enrichment-description: "- description: A brief description"
  document-enrichment-topics: |
    - topics: Extract 10 topics and return them and their weights. Do not include this example in your output, but use it as a reference for the format:
    Example format (DO NOT COPY THIS DATA):
    {
      "topics": {
        "AI Agents": 0.8,
        "Business Processes": 0.75,
        "Enterprise Search": 0.9,
        ...
      }
    }


  smart-faq: |
    From the provided content, generate 5 FAQs in a structured Q&A format. 
    Each FAQ should be relevant to the content and provide valuable information. 
    Format each FAQ as: Q: [Question] A: [Answer]
    

  searchai-assist-summarize: |
    Please summarize the provided content and/or the URL. Focus on the key points, main ideas, and important details. 
    Provide a concise overview that captures the essence of the content.

  searchai-assist-analyze: |
    Analyze the provided content and/or the URL. Provide a detailed analysis that highlights patterns, trends, or any notable insights. 
    Identify any anomalies, inconsistencies, or areas of interest within the content, and offer recommendations if applicable.

  searchai-assist-compare: |
    Compare the provided contents and/or the URLs, summarizing the key similarities and differences between them. 
    Organize the results in a table format, ensuring all contents and/or the URLs are included for comparison. 
    The table should highlight key points such as content, structure, themes, and any other relevant differences or similarities.

  searchai-assist-explain: |
    Please explain the content of the provided documents in clear and simple terms.
    Focus on breaking down complex ideas, technical language, or specialized concepts so they are easy to understand for a general audience.
    If helpful, use examples, analogies, or step-by-step reasoning to clarify the meaning.
    Highlight any key terms or sections that may require special explanation.

  searchai-assist-highlights: |
    Please highlight the key elements in the provided documents.
    Identify and mark important sentences, keywords, named entities (such as people, organizations, or dates), and notable phrases.
    Focus on content that is essential for quick understanding or further analysis.


  product-discovery: |
    Convert the following structured product data into a natural language description that would help answer various types of search queries.
    Include relationships between different attributes and their values, covering all relevant attributes.
    Incorporate variations of phrases for numeric values (like 'more than X', 'less than Y', 'approximately Z').
    Keep the response concise.


  image-description: |
    Generate a short, one-line title and a concise description for the following image.
    The description should objectively describe the main subject and overall scene.
    Respond in the following strict format (without bold or markdown or special symbols):
    Format your response as:
    Title: <title text>
    Description: <description text>


  knowledge-graph-extraction: |
    Analyze the following text and extract:

    1. ENTITIES AND RELATIONSHIPS: Extract all factual relationships as triples.
       Each triple: subject, subject_type, predicate, object, object_type, confidence (0.0-1.0)

       Entity types: PERSON, ORGANIZATION, TECHNOLOGY, PRODUCT, LOCATION, CONCEPT, EVENT, DATE, METRIC, OTHER

    2. SEMANTIC CONTEXT:
       - topics: List of 2-5 main topics/themes
       - sentiment: POSITIVE, NEGATIVE, or NEUTRAL
       - intent: INFORMATIONAL, INSTRUCTIONAL, PERSUASIVE, CONVERSATIONAL, or TRANSACTIONAL
       - language: Detected language code (e.g., "en")

    TEXT:
    {chunk_text}

    Respond ONLY with valid JSON:
    {
      "triples": [{"subject": "", "subject_type": "", "predicate": "", "object": "", "object_type": "", "confidence": 0.9}],
      "topics": ["topic1", "topic2"],
      "sentiment": "NEUTRAL",
      "intent": "INFORMATIONAL",
      "language": "en"
    }
  visual-search-similar: |
    Describe this product in detail for product search.
    Include: color, material, style, pattern, shape, size category, brand if visible, and any distinctive features.
    Be specific and use common product search terms.
    Respond with only the description, no preamble.
  visual-search-exact: |
    Identify this product as precisely as possible.
    Include: brand name if visible, model name/number if visible, exact color, any visible text or logos, distinguishing features, and material.
    If you can identify the exact product, state it.
    Respond with only the identification, no preamble.
  visual-search-complementary: |
    Identify this product and suggest what types of products would complement it.
    First describe the product (type, color, style, material).
    Then suggest 3-5 types of complementary items with style/color guidance.
    Format: PRODUCT: [description]
    COMPLEMENTS: [comma-separated complement types with style hints]
  visual-search-deconstruct: |
    List every distinct purchasable item visible in this image.
    Number each item. For each item, describe it with enough detail to search for it (type, color, material, style, distinguishing features).
    Format each item as: [number]. [item type]: [detailed description]
    Example:
    1. Shirt: White button-down cotton shirt with spread collar
    2. Skirt: Navy blue pleated midi skirt with high waist
    Only list items that are clearly visible and purchasable.
  visual-search-suggest: |
    Describe this product in detail and suggest what alternatives a shopper might want.
    Include: type, color, material, style, brand if visible.
    Then suggest alternative styles, materials, or variations that a shopper comparing options might consider.
    Respond with only the description and suggestions, no preamble.
  visual-search-default: |
    Describe this product image in detail for product search.


# -----------------------------------------------------------------------------------
# Prompt Templates Configuration
#
# Customizable prompt templates for LLM tasks. Each task type can have:
#   - base-prompt: The main prompt template with {{variable}} placeholders
#   - model-variants: Model-specific instructions appended to the prompt
#   - examples: Example outputs for JSON tasks (helps local/ONNX models)
#   - requires-json: Whether the response must be valid JSON
#
# These override the hardcoded defaults in FlexiblePromptManager.
# Collection-specific overrides can be set via the API at:
#   PUT /ui/v1/prompts/templates/{task}/collection/{colId}
# -----------------------------------------------------------------------------------
prompt-templates:

  # --- General Recommendations (Documents/Websites) ---
  # Used by: POST /rest/v2/api/recommendations
  # Generates a "next page title" from viewed content + user context
  # Available variables: {{original_query}}, {{viewed_content}}, {{user_context}}
  recommendations:
    base-prompt: |
      You are a recommendation engine. Based on the user's search query, browsing history, and preferences, predict what they want to see next.

      USER'S SEARCH QUERY: {{original_query}}

      VIEWED PAGES (in order):
      {{viewed_content}}

      {{user_context}}

      Based on the search query, viewed pages, and user preferences above, what is the user most likely interested in seeing next?
      Generate ONLY a concise title for the next page they would want to visit. Do not explain your reasoning.
    requires-json: false

  # --- E-commerce Recommendations (Product Discovery) ---
  # Used by: POST /rest/v2/api/recommendations with platform=ecommerce
  # Auto-selected for PRODUCT_DISCOVERY collections
  # Available variables: {{original_query}}, {{viewed_content}}, {{user_context}}
  recommendations-ecommerce:
    base-prompt: |
      You are a product recommendation engine for an e-commerce store. Based on the customer's search query, browsing history, purchase patterns, and preferences, predict which product they want to see next.

      CUSTOMER'S SEARCH QUERY: {{original_query}}

      PRODUCTS VIEWED (in order):
      {{viewed_content}}

      CUSTOMER PROFILE:
      {{user_context}}

      Generate a concise product title for what this customer would want to see next.
      Prioritize their favorite brands and categories when making your recommendation.
      Generate ONLY the product title — no explanations, no reasoning, no extra text.
    requires-json: false

Check for prompts or prompt-templates field and modify based on SearchAI functionaity.

prompts:
  standalone-question: |
    Given the conversation history and a follow-up question, rephrase the follow-up question to be a standalone question that includes all necessary context.

  structured-data: |
    You are a helpful AI assistant. The following data is from a structured database in JSON format.
    Please interpret this data and answer the question in a natural, conversational way.
    If you cannot answer based on the provided data, say 'I don't have enough information to answer that question.'
    Important: Do not refer to or mention the source or format of the information (like 'structured data', 'JSON', or 'provided information').
    Reply in 3 sentences or less.

  unstructured-data: |
    You are a helpful AI assistant. Use the following passages to answer the user's question.
    If you cannot answer based on the available information, simply say 'I don't have enough information to answer that question.'
    Important: Do not refer to or mention the source of information (like 'passages', 'text', or 'provided information').
    Reply in 3 sentences or less.

  mixed-data: |
    You are a helpful AI assistant. You have access to both structured database data (in JSON format)
    and unstructured text passages. Please use all available information to answer the question naturally.
    If you cannot answer based on the available information, simply say 'I don't have enough information to answer that question.
    Important: Do not refer to or mention the source or format of the information (like 'structured data', 'passages', or 'provided information').
    Reply in 3 sentences or less.


  document-enrichment-title: "- title: A concise title"
  document-enrichment-description: "- description: A brief description"
  document-enrichment-topics: |
    - topics: Extract 10 topics and return them and their weights. Do not include this example in your output, but use it as a reference for the format:
    Example format (DO NOT COPY THIS DATA):
    {
      "topics": {
        "AI Agents": 0.8,
        "Business Processes": 0.75,
        "Enterprise Search": 0.9,
        ...
      }
    }


  smart-faq: |
    From the provided content, generate 5 FAQs in a structured Q&A format. 
    Each FAQ should be relevant to the content and provide valuable information. 
    Format each FAQ as: Q: [Question] A: [Answer]
    

  searchai-assist-summarize: |
    Please summarize the provided content and/or the URL. Focus on the key points, main ideas, and important details. 
    Provide a concise overview that captures the essence of the content.

  searchai-assist-analyze: |
    Analyze the provided content and/or the URL. Provide a detailed analysis that highlights patterns, trends, or any notable insights. 
    Identify any anomalies, inconsistencies, or areas of interest within the content, and offer recommendations if applicable.

  searchai-assist-compare: |
    Compare the provided contents and/or the URLs, summarizing the key similarities and differences between them. 
    Organize the results in a table format, ensuring all contents and/or the URLs are included for comparison. 
    The table should highlight key points such as content, structure, themes, and any other relevant differences or similarities.

  searchai-assist-explain: |
    Please explain the content of the provided documents in clear and simple terms.
    Focus on breaking down complex ideas, technical language, or specialized concepts so they are easy to understand for a general audience.
    If helpful, use examples, analogies, or step-by-step reasoning to clarify the meaning.
    Highlight any key terms or sections that may require special explanation.

  searchai-assist-highlights: |
    Please highlight the key elements in the provided documents.
    Identify and mark important sentences, keywords, named entities (such as people, organizations, or dates), and notable phrases.
    Focus on content that is essential for quick understanding or further analysis.


  product-discovery: |
    Convert the following structured product data into a natural language description that would help answer various types of search queries.
    Include relationships between different attributes and their values, covering all relevant attributes.
    Incorporate variations of phrases for numeric values (like 'more than X', 'less than Y', 'approximately Z').
    Keep the response concise.


  image-description: |
    Generate a short, one-line title and a concise description for the following image.
    The description should objectively describe the main subject and overall scene.
    Respond in the following strict format (without bold or markdown or special symbols):
    Format your response as:
    Title: <title text>
    Description: <description text>


  knowledge-graph-extraction: |
    Analyze the following text and extract:

    1. ENTITIES AND RELATIONSHIPS: Extract all factual relationships as triples.
       Each triple: subject, subject_type, predicate, object, object_type, confidence (0.0-1.0)

       Entity types: PERSON, ORGANIZATION, TECHNOLOGY, PRODUCT, LOCATION, CONCEPT, EVENT, DATE, METRIC, OTHER

    2. SEMANTIC CONTEXT:
       - topics: List of 2-5 main topics/themes
       - sentiment: POSITIVE, NEGATIVE, or NEUTRAL
       - intent: INFORMATIONAL, INSTRUCTIONAL, PERSUASIVE, CONVERSATIONAL, or TRANSACTIONAL
       - language: Detected language code (e.g., "en")

    TEXT:
    {chunk_text}

    Respond ONLY with valid JSON:
    {
      "triples": [{"subject": "", "subject_type": "", "predicate": "", "object": "", "object_type": "", "confidence": 0.9}],
      "topics": ["topic1", "topic2"],
      "sentiment": "NEUTRAL",
      "intent": "INFORMATIONAL",
      "language": "en"
    }
  visual-search-similar: |
    Describe this product in detail for product search.
    Include: color, material, style, pattern, shape, size category, brand if visible, and any distinctive features.
    Be specific and use common product search terms.
    Respond with only the description, no preamble.
  visual-search-exact: |
    Identify this product as precisely as possible.
    Include: brand name if visible, model name/number if visible, exact color, any visible text or logos, distinguishing features, and material.
    If you can identify the exact product, state it.
    Respond with only the identification, no preamble.
  visual-search-complementary: |
    Identify this product and suggest what types of products would complement it.
    First describe the product (type, color, style, material).
    Then suggest 3-5 types of complementary items with style/color guidance.
    Format: PRODUCT: [description]
    COMPLEMENTS: [comma-separated complement types with style hints]
  visual-search-deconstruct: |
    List every distinct purchasable item visible in this image.
    Number each item. For each item, describe it with enough detail to search for it (type, color, material, style, distinguishing features).
    Format each item as: [number]. [item type]: [detailed description]
    Example:
    1. Shirt: White button-down cotton shirt with spread collar
    2. Skirt: Navy blue pleated midi skirt with high waist
    Only list items that are clearly visible and purchasable.
  visual-search-suggest: |
    Describe this product in detail and suggest what alternatives a shopper might want.
    Include: type, color, material, style, brand if visible.
    Then suggest alternative styles, materials, or variations that a shopper comparing options might consider.
    Respond with only the description and suggestions, no preamble.
  visual-search-default: |
    Describe this product image in detail for product search.


# -----------------------------------------------------------------------------------
# Prompt Templates Configuration
#
# Customizable prompt templates for LLM tasks. Each task type can have:
#   - base-prompt: The main prompt template with {{variable}} placeholders
#   - model-variants: Model-specific instructions appended to the prompt
#   - examples: Example outputs for JSON tasks (helps local/ONNX models)
#   - requires-json: Whether the response must be valid JSON
#
# These override the hardcoded defaults in FlexiblePromptManager.
# Collection-specific overrides can be set via the API at:
#   PUT /ui/v1/prompts/templates/{task}/collection/{colId}
# -----------------------------------------------------------------------------------
prompt-templates:

  # --- General Recommendations (Documents/Websites) ---
  # Used by: POST /rest/v2/api/recommendations
  # Generates a "next page title" from viewed content + user context
  # Available variables: {{original_query}}, {{viewed_content}}, {{user_context}}
  recommendations:
    base-prompt: |
      You are a recommendation engine. Based on the user's search query, browsing history, and preferences, predict what they want to see next.

      USER'S SEARCH QUERY: {{original_query}}

      VIEWED PAGES (in order):
      {{viewed_content}}

      {{user_context}}

      Based on the search query, viewed pages, and user preferences above, what is the user most likely interested in seeing next?
      Generate ONLY a concise title for the next page they would want to visit. Do not explain your reasoning.
    requires-json: false

  # --- E-commerce Recommendations (Product Discovery) ---
  # Used by: POST /rest/v2/api/recommendations with platform=ecommerce
  # Auto-selected for PRODUCT_DISCOVERY collections
  # Available variables: {{original_query}}, {{viewed_content}}, {{user_context}}
  recommendations-ecommerce:
    base-prompt: |
      You are a product recommendation engine for an e-commerce store. Based on the customer's search query, browsing history, purchase patterns, and preferences, predict which product they want to see next.

      CUSTOMER'S SEARCH QUERY: {{original_query}}

      PRODUCTS VIEWED (in order):
      {{viewed_content}}

      CUSTOMER PROFILE:
      {{user_context}}

      Generate a concise product title for what this customer would want to see next.
      Prioritize their favorite brands and categories when making your recommendation.
      Generate ONLY the product title — no explanations, no reasoning, no extra text.
    requires-json: false