code init
This commit is contained in:
commit
c21de6bdda
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
*.log
|
||||
/logs
|
||||
**__pycache__**
|
15
.vscode/launch.json
vendored
Normal file
15
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Python Debugger: Current File",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "${file}",
|
||||
"console": "integratedTerminal"
|
||||
}
|
||||
]
|
||||
}
|
13
README.md
Normal file
13
README.md
Normal file
@ -0,0 +1,13 @@
|
||||
|
||||
```bash
|
||||
conda env create -f env/agent2.yaml
|
||||
# conda create -n agent2 python
|
||||
|
||||
conda activate agent2
|
||||
|
||||
pip install -r env/requirements.txt
|
||||
|
||||
python app.py
|
||||
|
||||
streamlit run streamlit_app.py
|
||||
```
|
257
app.py
Normal file
257
app.py
Normal file
@ -0,0 +1,257 @@
|
||||
# Displaying final output format
|
||||
# from IPython.display import display, Markdown, Latex
|
||||
# LangChain Dependencies
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
|
||||
from langchain_community.chat_models import ChatOllama
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
||||
from langgraph.graph import END, StateGraph
|
||||
# For State Graph
|
||||
from typing_extensions import TypedDict
|
||||
import os
|
||||
|
||||
# Defining LLM
|
||||
local_llm = 'llama3.2'
|
||||
llama3 = ChatOllama(model=local_llm, temperature=0)
|
||||
llama3_json = ChatOllama(model=local_llm, format='json', temperature=0)
|
||||
|
||||
# Web Search Tool
|
||||
|
||||
wrapper = DuckDuckGoSearchAPIWrapper(max_results=25)
|
||||
web_search_tool = DuckDuckGoSearchRun(api_wrapper=wrapper)
|
||||
|
||||
# Generation Prompt
|
||||
|
||||
generate_prompt = PromptTemplate(
|
||||
template="""
|
||||
|
||||
<|begin_of_text|>
|
||||
|
||||
<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
You are an AI assistant for Research Question Tasks, that synthesizes web search results.
|
||||
Strictly use the following pieces of web search context to answer the question. If you don't know the answer, just say that you don't know.
|
||||
keep the answer concise, but provide all of the details you can in the form of a research report.
|
||||
Only make direct references to material if provided in the context.
|
||||
|
||||
<|eot_id|>
|
||||
|
||||
<|start_header_id|>user<|end_header_id|>
|
||||
|
||||
Question: {question}
|
||||
Web Search Context: {context}
|
||||
Answer:
|
||||
|
||||
<|eot_id|>
|
||||
|
||||
<|start_header_id|>assistant<|end_header_id|>""",
|
||||
input_variables=["question", "context"],
|
||||
)
|
||||
|
||||
# Chain
|
||||
generate_chain = generate_prompt | llama3 | StrOutputParser()
|
||||
|
||||
# Test Run
|
||||
# question = "who is Yan Lecun?"
|
||||
# context = ""
|
||||
# generation = generate_chain.invoke({"context": context, "question": question})
|
||||
# print(generation)
|
||||
|
||||
# Router
|
||||
|
||||
router_prompt = PromptTemplate(
|
||||
template="""
|
||||
|
||||
<|begin_of_text|>
|
||||
|
||||
<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
You are an expert at routing a user question to either the generation stage or web search.
|
||||
Use the web search for questions that require more context for a better answer, or recent events.
|
||||
Otherwise, you can skip and go straight to the generation phase to respond.
|
||||
You do not need to be stringent with the keywords in the question related to these topics.
|
||||
Give a binary choice 'web_search' or 'generate' based on the question.
|
||||
Return the JSON with a single key 'choice' with no premable or explanation.
|
||||
|
||||
Question to route: {question}
|
||||
|
||||
<|eot_id|>
|
||||
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
""",
|
||||
input_variables=["question"],
|
||||
)
|
||||
|
||||
# Chain
|
||||
question_router = router_prompt | llama3_json | JsonOutputParser()
|
||||
|
||||
# Test Run
|
||||
# question = "What's up?"
|
||||
# print(question_router.invoke({"question": question}))
|
||||
|
||||
# Query Transformation
|
||||
|
||||
query_prompt = PromptTemplate(
|
||||
template="""
|
||||
|
||||
<|begin_of_text|>
|
||||
|
||||
<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
You are an expert at crafting web search queries for research questions.
|
||||
More often than not, a user will ask a basic question that they wish to learn more about, however it might not be in the best format.
|
||||
Reword their query to be the most effective web search string possible.
|
||||
Return the JSON with a single key 'query' with no premable or explanation.
|
||||
|
||||
Question to transform: {question}
|
||||
|
||||
<|eot_id|>
|
||||
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
""",
|
||||
input_variables=["question"],
|
||||
)
|
||||
|
||||
# Chain
|
||||
query_chain = query_prompt | llama3_json | JsonOutputParser()
|
||||
|
||||
# Test Run
|
||||
# question = "What's happened recently with Gaza?"
|
||||
# print(query_chain.invoke({"question": question}))
|
||||
|
||||
|
||||
# Graph State
|
||||
class GraphState(TypedDict):
|
||||
"""
|
||||
Represents the state of our graph.
|
||||
|
||||
Attributes:
|
||||
question: question
|
||||
generation: LLM generation
|
||||
search_query: revised question for web search
|
||||
context: web_search result
|
||||
"""
|
||||
question : str
|
||||
generation : str
|
||||
search_query : str
|
||||
context : str
|
||||
|
||||
# Node - Generate
|
||||
|
||||
def generate(state):
|
||||
"""
|
||||
Generate answer
|
||||
|
||||
Args:
|
||||
state (dict): The current graph state
|
||||
|
||||
Returns:
|
||||
state (dict): New key added to state, generation, that contains LLM generation
|
||||
"""
|
||||
|
||||
print("Step: Generating Final Response")
|
||||
question = state["question"]
|
||||
context = state["context"]
|
||||
print(context)
|
||||
# TODO:: 根据context特定的内容生产答案
|
||||
# Answer Generation
|
||||
generation = generate_chain.invoke({"context": context, "question": question})
|
||||
return {"generation": generation}
|
||||
|
||||
# Node - Query Transformation
|
||||
|
||||
def transform_query(state):
|
||||
"""
|
||||
Transform user question to web search
|
||||
|
||||
Args:
|
||||
state (dict): The current graph state
|
||||
|
||||
Returns:
|
||||
state (dict): Appended search query
|
||||
"""
|
||||
|
||||
print("Step: Optimizing Query for Web Search")
|
||||
question = state['question']
|
||||
gen_query = query_chain.invoke({"question": question})
|
||||
search_query = gen_query["query"]
|
||||
return {"search_query": search_query}
|
||||
|
||||
|
||||
# Node - Web Search
|
||||
|
||||
def web_search(state):
|
||||
"""
|
||||
Web search based on the question
|
||||
|
||||
Args:
|
||||
state (dict): The current graph state
|
||||
|
||||
Returns:
|
||||
state (dict): Appended web results to context
|
||||
"""
|
||||
|
||||
# search_query = state['search_query']
|
||||
# print(f'Step: Searching the Web for: "{search_query}"')
|
||||
|
||||
# # Web search tool call
|
||||
# search_result = web_search_tool.invoke(search_query)
|
||||
print("Step: Web Search")
|
||||
search_result = "Web Search Results"
|
||||
return {"context": search_result}
|
||||
|
||||
|
||||
# Conditional Edge, Routing
|
||||
|
||||
def route_question(state):
|
||||
"""
|
||||
route question to web search or generation.
|
||||
|
||||
Args:
|
||||
state (dict): The current graph state
|
||||
|
||||
Returns:
|
||||
str: Next node to call
|
||||
"""
|
||||
|
||||
print("Step: Routing Query")
|
||||
question = state['question']
|
||||
output = question_router.invoke({"question": question})
|
||||
if output['choice'] == "web_search":
|
||||
print("Step: Routing Query to Web Search")
|
||||
return "websearch"
|
||||
elif output['choice'] == 'generate':
|
||||
print("Step: Routing Query to Generation")
|
||||
return "generate"
|
||||
|
||||
# Build the nodes
|
||||
workflow = StateGraph(GraphState)
|
||||
workflow.add_node("websearch", web_search)
|
||||
workflow.add_node("transform_query", transform_query)
|
||||
workflow.add_node("generate", generate)
|
||||
|
||||
# Build the edges
|
||||
workflow.set_conditional_entry_point(
|
||||
route_question,
|
||||
{
|
||||
"websearch": "transform_query",
|
||||
"generate": "generate",
|
||||
},
|
||||
)
|
||||
workflow.add_edge("transform_query", "websearch")
|
||||
workflow.add_edge("websearch", "generate")
|
||||
workflow.add_edge("generate", END)
|
||||
|
||||
# Compile the workflow
|
||||
local_agent = workflow.compile()
|
||||
|
||||
def run_agent(query):
|
||||
output = local_agent.invoke({"question": query})
|
||||
print("=======")
|
||||
print(output["generation"])
|
||||
# display(Markdown(output["generation"]))
|
||||
|
||||
run_agent("What is Latest news About Open AI?")
|
8
env/agent2.yaml
vendored
Normal file
8
env/agent2.yaml
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
name: agent2
|
||||
channels:
|
||||
- defaults
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- python=3.10
|
||||
- pip:
|
||||
- streamlit==1.40.2
|
6
env/requirements.txt
vendored
Normal file
6
env/requirements.txt
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
langchain==0.2.12
|
||||
langgraph==0.2.2
|
||||
langchain-ollama==0.1.1
|
||||
langsmith== 0.1.98
|
||||
langchain_community==0.2.11
|
||||
duckduckgo-search==6.2.13
|
244
streamlit_app.py
Normal file
244
streamlit_app.py
Normal file
@ -0,0 +1,244 @@
|
||||
# Displaying final output format
|
||||
from IPython.display import display, Markdown, Latex
|
||||
# LangChain Dependencies
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
|
||||
from langchain_community.chat_models import ChatOllama
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
||||
from langgraph.graph import END, StateGraph
|
||||
# For State Graph
|
||||
from typing_extensions import TypedDict
|
||||
import streamlit as st
|
||||
import os
|
||||
# Defining LLM
|
||||
def configure_llm():
|
||||
st.sidebar.header("Configure LLM")
|
||||
|
||||
# Model Selection
|
||||
model_options = ["llama3.2"]
|
||||
selected_model = st.sidebar.selectbox("Choose the LLM Model", options=model_options, index=0)
|
||||
|
||||
# Temperature Setting
|
||||
temperature = st.sidebar.slider("Set the Temperature", min_value=0.0, max_value=1.0, value=0.5, step=0.1)
|
||||
|
||||
# Create LLM Instances based on user selection
|
||||
llama_model = ChatOllama(model=selected_model, temperature=temperature)
|
||||
llama_model_json = ChatOllama(model=selected_model, format='json', temperature=temperature)
|
||||
|
||||
return llama_model, llama_model_json
|
||||
|
||||
# Streamlit Application Interface
|
||||
st.title("Personal Research Assistant powered By Llama3.2")
|
||||
llama3, llama3_json=configure_llm()
|
||||
wrapper = DuckDuckGoSearchAPIWrapper(max_results=25)
|
||||
web_search_tool = DuckDuckGoSearchRun(api_wrapper=wrapper)
|
||||
generate_prompt = PromptTemplate(
|
||||
template="""
|
||||
|
||||
<|begin_of_text|>
|
||||
|
||||
<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
You are an AI assistant for Research Question Tasks, that synthesizes web search results.
|
||||
Strictly use the following pieces of web search context to answer the question. If you don't know the answer, just say that you don't know.
|
||||
keep the answer concise, but provide all of the details you can in the form of a research report.
|
||||
Only make direct references to material if provided in the context.
|
||||
|
||||
<|eot_id|>
|
||||
|
||||
<|start_header_id|>user<|end_header_id|>
|
||||
|
||||
Question: {question}
|
||||
Web Search Context: {context}
|
||||
Answer:
|
||||
|
||||
<|eot_id|>
|
||||
|
||||
<|start_header_id|>assistant<|end_header_id|>""",
|
||||
input_variables=["question", "context"],
|
||||
)
|
||||
|
||||
# Chain
|
||||
generate_chain = generate_prompt | llama3 | StrOutputParser()
|
||||
router_prompt = PromptTemplate(
|
||||
template="""
|
||||
|
||||
<|begin_of_text|>
|
||||
|
||||
<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
You are an expert at routing a user question to either the generation stage or web search.
|
||||
Use the web search for questions that require more context for a better answer, or recent events.
|
||||
Otherwise, you can skip and go straight to the generation phase to respond.
|
||||
You do not need to be stringent with the keywords in the question related to these topics.
|
||||
Give a binary choice 'web_search' or 'generate' based on the question.
|
||||
Return the JSON with a single key 'choice' with no premable or explanation.
|
||||
|
||||
Question to route: {question}
|
||||
|
||||
<|eot_id|>
|
||||
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
""",
|
||||
input_variables=["question"],
|
||||
)
|
||||
|
||||
# Chain
|
||||
question_router = router_prompt | llama3_json | JsonOutputParser()
|
||||
|
||||
query_prompt = PromptTemplate(
|
||||
template="""
|
||||
|
||||
<|begin_of_text|>
|
||||
|
||||
<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
You are an expert at crafting web search queries for research questions.
|
||||
More often than not, a user will ask a basic question that they wish to learn more about, however it might not be in the best format.
|
||||
Reword their query to be the most effective web search string possible.
|
||||
Return the JSON with a single key 'query' with no premable or explanation.
|
||||
|
||||
Question to transform: {question}
|
||||
|
||||
<|eot_id|>
|
||||
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
""",
|
||||
input_variables=["question"],
|
||||
)
|
||||
|
||||
# Chain
|
||||
query_chain = query_prompt | llama3_json | JsonOutputParser()
|
||||
# Graph State
|
||||
class GraphState(TypedDict):
|
||||
"""
|
||||
Represents the state of our graph.
|
||||
|
||||
Attributes:
|
||||
question: question
|
||||
generation: LLM generation
|
||||
search_query: revised question for web search
|
||||
context: web_search result
|
||||
"""
|
||||
question : str
|
||||
generation : str
|
||||
search_query : str
|
||||
context : str
|
||||
|
||||
# Node - Generate
|
||||
|
||||
def generate(state):
|
||||
"""
|
||||
Generate answer
|
||||
|
||||
Args:
|
||||
state (dict): The current graph state
|
||||
|
||||
Returns:
|
||||
state (dict): New key added to state, generation, that contains LLM generation
|
||||
"""
|
||||
|
||||
print("Step: Generating Final Response")
|
||||
question = state["question"]
|
||||
context = state["context"]
|
||||
|
||||
# Answer Generation
|
||||
generation = generate_chain.invoke({"context": context, "question": question})
|
||||
return {"generation": generation}
|
||||
|
||||
# Node - Query Transformation
|
||||
|
||||
def transform_query(state):
|
||||
"""
|
||||
Transform user question to web search
|
||||
|
||||
Args:
|
||||
state (dict): The current graph state
|
||||
|
||||
Returns:
|
||||
state (dict): Appended search query
|
||||
"""
|
||||
|
||||
print("Step: Optimizing Query for Web Search")
|
||||
question = state['question']
|
||||
gen_query = query_chain.invoke({"question": question})
|
||||
search_query = gen_query["query"]
|
||||
return {"search_query": search_query}
|
||||
|
||||
|
||||
# Node - Web Search
|
||||
|
||||
def web_search(state):
|
||||
"""
|
||||
Web search based on the question
|
||||
|
||||
Args:
|
||||
state (dict): The current graph state
|
||||
|
||||
Returns:
|
||||
state (dict): Appended web results to context
|
||||
"""
|
||||
|
||||
search_query = state['search_query']
|
||||
print(f'Step: Searching the Web for: "{search_query}"')
|
||||
|
||||
# Web search tool call
|
||||
search_result = web_search_tool.invoke(search_query)
|
||||
return {"context": search_result}
|
||||
|
||||
|
||||
# Conditional Edge, Routing
|
||||
|
||||
def route_question(state):
|
||||
"""
|
||||
route question to web search or generation.
|
||||
|
||||
Args:
|
||||
state (dict): The current graph state
|
||||
|
||||
Returns:
|
||||
str: Next node to call
|
||||
"""
|
||||
|
||||
print("Step: Routing Query")
|
||||
question = state['question']
|
||||
output = question_router.invoke({"question": question})
|
||||
if output['choice'] == "web_search":
|
||||
print("Step: Routing Query to Web Search")
|
||||
return "websearch"
|
||||
elif output['choice'] == 'generate':
|
||||
print("Step: Routing Query to Generation")
|
||||
return "generate"
|
||||
# Build the nodes
|
||||
workflow = StateGraph(GraphState)
|
||||
workflow.add_node("websearch", web_search)
|
||||
workflow.add_node("transform_query", transform_query)
|
||||
workflow.add_node("generate", generate)
|
||||
|
||||
# Build the edges
|
||||
workflow.set_conditional_entry_point(
|
||||
route_question,
|
||||
{
|
||||
"websearch": "transform_query",
|
||||
"generate": "generate",
|
||||
},
|
||||
)
|
||||
workflow.add_edge("transform_query", "websearch")
|
||||
workflow.add_edge("websearch", "generate")
|
||||
workflow.add_edge("generate", END)
|
||||
|
||||
# Compile the workflow
|
||||
local_agent = workflow.compile()
|
||||
def run_agent(query):
|
||||
output = local_agent.invoke({"question": query})
|
||||
print("=======")
|
||||
|
||||
return output["generation"]
|
||||
user_query = st.text_input("Enter your research question:", "")
|
||||
|
||||
if st.button("Run Query"):
|
||||
if user_query:
|
||||
st.write(run_agent(user_query))
|
Loading…
x
Reference in New Issue
Block a user