Skip to content

Structured query

Vector store

Structured Query

get_structured_query(query) async

Description: Get the query, replace %20 with space and invoke the chain to get the answers based on the prompt.

Source code in structured_query/llm_service_structured_query.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
@app.get("/structuredquery/{query}", response_class=JSONResponse)
@retry(stop=stop_after_attempt(3), retry=retry_if_exception_type(ConnectTimeout))
async def get_structured_query(query: str):
    """
    Description: Get the query, replace %20 with space and invoke the chain to get the answers based on the prompt.

    """
    response, filter_condition = None, None
    try:
        query = query.replace("%20", " ")
        response = chain.invoke({"query": query})
        print(response)
        obj = ChromaTranslator()
        filter_condition = obj.visit_structured_query(structured_query=response)[1]

    except Exception as e:
        print(
            f"An error occurred: ",
            HTTPException(status_code=500, detail=f"An error occurred: {e}"),
        )

    return response, filter_condition

Deprecated

  • This section has the API reference that does not use Structured query processing from langchain. It is not used but is left in for future reference.

get_llm_query(query) async

Description: Get the query, replace %20 (url spacing) with space and invoke the chain to get the answers based on the prompt

Source code in llm_service/llm_service.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
@app.get("/llmquery/{query}", response_class=JSONResponse)
@retry(stop=stop_after_attempt(3), retry=retry_if_exception_type(ConnectTimeout))
async def get_llm_query(query: str):
    """
    Description: Get the query, replace %20 (url spacing) with space and invoke the chain to get the answers based on the prompt
    """
    query = query.replace("%20", " ")
    print(f"Query: {query}")
    try:
        response = chain_docker.invoke({"query": query})
    except:
        response = chain.invoke({"query": query})
    answers = parse_answers_initial(response, patterns, prompt_dict)
    return JSONResponse(content=answers)

create_chain(prompt, model='llama3', temperature=0, base_url='http://localhost:11434')

Description: Create a langchain chain with the given prompt and model and the temperature. The lower the temperature, the less "creative" the model will be.

Source code in llm_service/llm_service_utils.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
def create_chain(
    prompt,
    model: str = "llama3",
    temperature: int = 0,
    base_url: str = "http://localhost:11434",
):
    """
    Description: Create a langchain chain with the given prompt and model and the temperature.
    The lower the temperature, the less "creative" the model will be.
    """
    llm = ChatOllama(model=model, temperature=temperature, base_url=base_url)
    prompt = ChatPromptTemplate.from_template(prompt)

    return prompt | llm | StrOutputParser()

parse_answers_initial(response, patterns, prompt_dict)

Description: Parse the answers from the initial response - if the response contains a ? and a new line then join the next line with it (sometimes the LLM adds a new line after the ? instead of just printing it on the same line)

Source code in llm_service/llm_service_utils.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def parse_answers_initial(response: str, patterns: list, prompt_dict: dict) -> dict:
    """
    Description: Parse the answers from the initial response
    - if the response contains a ? and a new line then join the next line with it (sometimes the LLM adds a new line after the ? instead of just printing it on the same line)
    """

    answers = []
    response = response.replace("?\n", "?")

    # convert the response to lowercase and split it into lines
    lines = response.lower().split("\n")

    for line in lines:
        if "?" in line:
            # Extract the part of the line after the question mark
            potential_answer = line.split("?")[1].strip()
        else:
            potential_answer = line.strip()

        # Check if the potential answer matches any of the patterns
        for pattern in patterns:
            if re.match(pattern, potential_answer):
                answers.append(potential_answer)
                break  # Stop checking other patterns if a match is found

    # return answers as a dict using the prompt_dict keys
    answers_dict = {}
    for i, key in enumerate(prompt_dict.keys()):
        answers_dict[key] = answers[i]

    return answers_dict