agent2/app.py
2024-12-11 23:13:13 +08:00

252 lines
6.6 KiB
Python

# Displaying final output format
# from IPython.display import display, Markdown, Latex
# LangChain Dependencies
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
from langchain_community.chat_models import ChatOllama
from langgraph.graph import END, StateGraph
# For State Graph
from typing_extensions import TypedDict
import os
import json
# Defining LLM
local_llm = 'llama3.2'
llama3 = ChatOllama(model=local_llm, temperature=0)
llama3_json = ChatOllama(model=local_llm, format='json', temperature=0)
# Generation Prompt
generate_prompt = PromptTemplate(
template="""
<|begin_of_text|>
<|start_header_id|>system<|end_header_id|>
You are an AI assistant for Research Question Tasks, that synthesizes web search results.
Strictly use the following pieces of web search context to answer the question. If you don't know the answer, just say that you don't know.
keep the answer concise, but provide all of the details you can in the form of a research report.
Only make direct references to material if provided in the context.
<|eot_id|>
<|start_header_id|>user<|end_header_id|>
Question: {question}
Send Order Context: {context}
Answer:
<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>""",
input_variables=["question", "context"],
)
# Chain
generate_chain = generate_prompt | llama3 | StrOutputParser()
# Test Run
# question = "who is Yan Lecun?"
# context = ""
# generation = generate_chain.invoke({"context": context, "question": question})
# print(generation)
# Router
router_prompt = PromptTemplate(
template="""
<|begin_of_text|>
<|start_header_id|>system<|end_header_id|>
You are an expert at routing a user question to either the generation stage or send order.
Use the send order for questions that user want buy CEC from you.
Otherwise, you can skip and go straight to the generation phase to respond.
You do not need to be stringent with the keywords in the question related to these topics.
Give a binary choice 'send_order' or 'generate' based on the question.
Return the JSON with a single key 'choice' with no premable or explanation.
Question to route: {question}
<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>
""",
input_variables=["question"],
)
# Chain
question_router = router_prompt | llama3_json | JsonOutputParser()
# Test Run
# question = "What's up?"
# print(question_router.invoke({"question": question}))
# Query Transformation
query_prompt = PromptTemplate(
template="""
<|begin_of_text|>
<|start_header_id|>system<|end_header_id|>
You are an expert at sell CEC,
Return the JSON with a single key 'count' with amount which user want to buy.
Question to transform: {question}
<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>
""",
input_variables=["question"],
)
# Chain
query_chain = query_prompt | llama3_json | JsonOutputParser()
# Test Run
# question = "What's happened recently with Gaza?"
# print(query_chain.invoke({"question": question}))
# Graph State
class GraphState(TypedDict):
"""
Represents the state of our graph.
Attributes:
question: question
generation: LLM generation
send_order: revised question for send order
context: send_order result
"""
question : str
generation : str
send_order : str
context : str
# Node - Generate
def generate(state):
"""
Generate answer
Args:
state (dict): The current graph state
Returns:
state (dict): New key added to state, generation, that contains LLM generation
"""
print("Step: Generating Final Response")
question = state["question"]
context = state.get("context", None)
print(context)
# TODO:: 根据context特定的内容生产答案
if context.index("orderinfo") != -1:
return {"generation": context.replace("orderinfo:", "")}
else:
generation = generate_chain.invoke({"context": context, "question": question})
return {"generation": generation}
# Node - Query Transformation
def transform_query(state):
"""
Transform user question to order info
Args:
state (dict): The current graph state
Returns:
state (dict): Appended search query
"""
print("Step: Optimizing Query for Send Order")
question = state['question']
gen_query = query_chain.invoke({"question": question})
search_query = gen_query["count"]
print("send_order", search_query)
return {"send_order": search_query}
# Node - Send Order
def send_order(state):
"""
Send order based on the question
Args:
state (dict): The current graph state
Returns:
state (dict): Appended web results to context
"""
print("Step: before Send Order")
amount = state['send_order']
print(amount)
print(f'Step: build order info for : "{amount}" CEC')
order_info = {"amount": amount, "price": 0.1, "name": "CEC", "url": "https://www.example.com"}
search_result = f"orderinfo:{json.dumps(order_info)}"
return {"context": search_result}
# Conditional Edge, Routing
def route_question(state):
"""
route question to send order or generation.
Args:
state (dict): The current graph state
Returns:
str: Next node to call
"""
print("Step: Routing Query")
question = state['question']
output = question_router.invoke({"question": question})
if output['choice'] == "send_order":
print("Step: Routing Query to Send Order")
return "sendorder"
elif output['choice'] == 'generate':
print("Step: Routing Query to Generation")
return "generate"
# Build the nodes
workflow = StateGraph(GraphState)
workflow.add_node("sendorder", send_order)
workflow.add_node("transform_query", transform_query)
workflow.add_node("generate", generate)
# Build the edges
workflow.set_conditional_entry_point(
route_question,
{
"sendorder": "transform_query",
"generate": "generate",
},
)
workflow.add_edge("transform_query", "sendorder")
workflow.add_edge("sendorder", "generate")
workflow.add_edge("generate", END)
# Compile the workflow
local_agent = workflow.compile()
def run_agent(query):
output = local_agent.invoke({"question": query})
print("=======")
print(output["generation"])
# display(Markdown(output["generation"]))
run_agent("I want to buy 100 CEC")
# run_agent("What the weather of New York today?")