from ethanicbot.langchain.retriever import load_vectorstore
from langgraph.graph import StateGraph, END
from langchain.prompts import PromptTemplate
from langchain_community.llms import OpenAI
retriever = load_vectorstore()
llm = OpenAI(temperature=0)
prompt = PromptTemplate.from_template("""
You are Ethanic Bot, a personal portfolio assistant. Use the following context:
{context}
User: {question}
Bot:
""")
def retrieval_node(state):
query = state["input"]
docs = retriever.similarity_search(query)
context = "\n".join([d.page_content for d in docs])
state["context"] = context
return state
def generate_node(state):
formatted = prompt.format(question=state["input"], context=state["context"])
state["response"] = llm(formatted)
return state
def get_agent():
# Define a state schema with the expected keys
state_schema = {
"input": str, # Expecting a string input
"context": str, # Expecting a string context
"response": str # Expecting a string response
}
# Initialize the StateGraph with the schema
workflow = StateGraph(state_schema=state_schema)
# Add nodes to the workflow
workflow.add_node("retrieve", retrieval_node)
workflow.add_node("generate", generate_node)
# Define the flow of execution in the workflow
workflow.set_entry_point("retrieve")
workflow.add_edge("retrieve", "generate")
workflow.set_finish_point("generate")
# Compile and return the agent
return workflow.compile()