246 lines
8.5 KiB
Python
246 lines
8.5 KiB
Python
# LangChain Dependencies
|
|
from langchain.prompts import PromptTemplate
|
|
from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
|
|
from langgraph.graph import END, StateGraph
|
|
from langchain_community.chat_models import ChatOllama
|
|
from langgraph.checkpoint.memory import MemorySaver
|
|
from typing import Annotated
|
|
from operator import add
|
|
# For State Graph
|
|
from typing_extensions import TypedDict
|
|
import json
|
|
|
|
|
|
def configure_llm(model, temperature):
|
|
llama3 = ChatOllama(model=model, temperature=temperature)
|
|
llama3_json = ChatOllama(
|
|
model=model, format='json', temperature=temperature)
|
|
return llama3, llama3_json
|
|
|
|
|
|
def config_agent(llama3, llama3_json):
|
|
# Graph State
|
|
|
|
class GraphState(TypedDict):
|
|
"""
|
|
Represents the state of our graph.
|
|
|
|
Attributes:
|
|
question: question
|
|
generation: LLM generation
|
|
send_order: revised question for send order
|
|
context: send_order result
|
|
"""
|
|
question: str
|
|
generation: str
|
|
order_info: str
|
|
context: str
|
|
history: str
|
|
|
|
generate_prompt = PromptTemplate(
|
|
template="""
|
|
|
|
<|begin_of_text|>
|
|
|
|
<|start_header_id|>system<|end_header_id|>
|
|
Your name is AICO.
|
|
You are a professional token seller.
|
|
We have a total of 1 million CEC, and the average cost of these CECs is $1.1 each, The min price is $0.9 per CEC, There is a 1% chance that the price will be lower than $1 per CEC.
|
|
Now we hope to hand it over to you to be responsible for selling all of them.
|
|
Customers will come to talk to you and bargain.
|
|
You can decide the final transaction price based on the status of the conversation with the customer and sell the tokens.
|
|
In addition, there are several rules to follow for this task:
|
|
1) The higher the price, the better, in the hope that the final income will be higher than 1 million US dollars;
|
|
2) You can set a price higher than 1 US dollar per CEC to sell tokens according to the actual sales situation;
|
|
3) For a single customer, we hope that you will try not to sell at a price lower than 1 US dollar, but if the customer has special reasons, you can sell some tokens at a lower price depending on the situation, but in this case you need to sell at a higher price in other sales to ensure that the overall income is not less than 1 million US dollars;
|
|
4) We hope that customers buy as much as possible.
|
|
Under this condition, if the user is willing to purchase a large number of tokens at one time, we can sell them to him at a more favorable price, but the min price can not lower then $0.9.
|
|
The above are all the rules for this sales task.
|
|
Strictly use the following pieces of context and history to answer the question.
|
|
Don't repeat the history in the answer.
|
|
<|eot_id|>
|
|
|
|
<|start_header_id|>user<|end_header_id|>
|
|
|
|
Context: {context}
|
|
History: {history}
|
|
Question: {question}
|
|
Answer:
|
|
|
|
<|eot_id|>
|
|
|
|
<|start_header_id|>assistant<|end_header_id|>""",
|
|
input_variables=["question", "context", "history"],
|
|
)
|
|
|
|
# Chain
|
|
generate_chain = generate_prompt | llama3 | StrOutputParser()
|
|
router_prompt = PromptTemplate(
|
|
template="""
|
|
|
|
<|begin_of_text|>
|
|
|
|
<|start_header_id|>system<|end_header_id|>
|
|
|
|
You are an expert at routing a user question to either the generation stage or send order.
|
|
CEC is a famous crypto token, and you are an expert at selling CEC.
|
|
Use the generate for questions that user asks for price.
|
|
Use the send order for questions that user accepts the price you give and decides to buy a certain amount of CEC.
|
|
Otherwise, you can skip and go straight to the generation phase to respond.
|
|
You do not need to be stringent with the keywords in the question related to these topics.
|
|
Give a binary choice 'send_order' or 'generate' based on the question.
|
|
Return the JSON with a single key 'choice' with no premable or explanation.
|
|
|
|
Question to route: {question}
|
|
Context to route: {context}
|
|
History to route: {history}
|
|
<|eot_id|>
|
|
|
|
<|start_header_id|>assistant<|end_header_id|>
|
|
|
|
""",
|
|
input_variables=["question", "history", "context"],
|
|
)
|
|
|
|
# Chain
|
|
question_router = router_prompt | llama3_json | JsonOutputParser()
|
|
|
|
order_prompt = PromptTemplate(
|
|
template="""
|
|
|
|
<|begin_of_text|>
|
|
|
|
<|start_header_id|>system<|end_header_id|>
|
|
|
|
You are an expert at sell CEC,
|
|
Strictly use the following pieces of context to get price and amount.
|
|
Return the JSON with key 'count' with amount which user want to buy, and 'price' with the price you give.
|
|
|
|
Question to transform: {question}
|
|
Context to transform: {context}
|
|
History to transform: {history}
|
|
|
|
<|eot_id|>
|
|
|
|
<|start_header_id|>assistant<|end_header_id|>
|
|
|
|
""",
|
|
input_variables=["question", "context", "history"],
|
|
)
|
|
|
|
# Chain
|
|
order_chain = order_prompt | llama3_json | JsonOutputParser()
|
|
|
|
|
|
# Node - Generate
|
|
|
|
def generate(state):
|
|
"""
|
|
Generate answer
|
|
|
|
Args:
|
|
state (dict): The current graph state
|
|
|
|
Returns:
|
|
state (dict): New key added to state, generation, that contains LLM generation
|
|
"""
|
|
|
|
print("Step: Generating Final Response")
|
|
question = state["question"]
|
|
context = state["context"]
|
|
order_info = state.get("order_info", None)
|
|
# TODO:: 根据context特定的内容生产答案
|
|
# check if context is not None and not empty
|
|
if order_info is not None:
|
|
return {"generation": order_info}
|
|
else:
|
|
generation = generate_chain.invoke(
|
|
{"context": context, "question": question, "history": state["history"]})
|
|
return {"generation": generation}
|
|
|
|
# Node - Query Transformation
|
|
|
|
def transform_query(state):
|
|
"""
|
|
Transform user question to order info
|
|
|
|
Args:
|
|
state (dict): The current graph state
|
|
|
|
Returns:
|
|
state (dict): Appended amount of CEC to context
|
|
"""
|
|
|
|
print("Step: Optimizing Query for Send Order")
|
|
question = state['question']
|
|
gen_query = order_chain.invoke({"question": question, "history": state["history"], "context": state["context"]})
|
|
amount = gen_query["count"]
|
|
price = gen_query["price"]
|
|
print("order_info::amount|price:", amount, price)
|
|
return {"order_info": {"amount": amount, "price": price} }
|
|
|
|
# Node - Send Order
|
|
|
|
def send_order(state):
|
|
"""
|
|
Send order based on the question
|
|
|
|
Args:
|
|
state (dict): The current graph state
|
|
|
|
Returns:
|
|
state (dict): Appended Order Info to context
|
|
"""
|
|
print("Step: before Send Order")
|
|
order_info = state['order_info']
|
|
print(f'Step: build order info for : "{order_info}"')
|
|
order_result = json.dumps(order_info)
|
|
return {"order_info": order_result}
|
|
|
|
# Conditional Edge, Routing
|
|
|
|
def route_question(state):
|
|
"""
|
|
route question to send order or generation.
|
|
|
|
Args:
|
|
state (dict): The current graph state
|
|
|
|
Returns:
|
|
str: Next node to call
|
|
"""
|
|
|
|
print("Step: Routing Query")
|
|
question = state['question']
|
|
output = question_router.invoke({"question": question, "history": state["history"], "context": state["context"]})
|
|
if output['choice'] == "send_order":
|
|
print("Step: Routing Query to Send Order")
|
|
return "sendorder"
|
|
elif output['choice'] == 'generate':
|
|
print("Step: Routing Query to Generation")
|
|
return "generate"
|
|
|
|
# Build the nodes
|
|
workflow = StateGraph(GraphState)
|
|
workflow.add_node("sendorder", send_order)
|
|
workflow.add_node("transform_query", transform_query)
|
|
workflow.add_node("generate", generate)
|
|
|
|
# Build the edges
|
|
workflow.set_conditional_entry_point(
|
|
route_question,
|
|
{
|
|
"sendorder": "transform_query",
|
|
"generate": "generate",
|
|
},
|
|
)
|
|
workflow.add_edge("transform_query", "sendorder")
|
|
workflow.add_edge("sendorder", "generate")
|
|
workflow.add_edge("generate", END)
|
|
|
|
# Compile the workflow
|
|
# memory = MemorySaver()
|
|
# local_agent = workflow.compile(checkpointer=memory)
|
|
local_agent = workflow.compile()
|
|
return local_agent
|