Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Atualizando arquivos #65

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
__pycache__/
.DS_Store
secrets.toml
.env
8 changes: 4 additions & 4 deletions Chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@

with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
openai_model = st.text_input("OpenAI Model", value="gpt-3.5-turbo", key="chatbot_model_key")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
"[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
"[Select an OpenAI Model](https://platform.openai.com/docs/models)"

st.title("💬 Chatbot")
st.caption("🚀 A Streamlit chatbot powered by OpenAI")
st.caption("🚀 Para quem quer ter acesso ao OpenAI. Tirem o escorpião do Bolso!")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]

Expand All @@ -23,7 +23,7 @@
client = OpenAI(api_key=openai_api_key)
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
response = client.chat.completions.create(model=openai_model, messages=st.session_state.messages)
msg = response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": msg})
st.chat_message("assistant").write(msg)
File renamed without changes.
File renamed without changes.
263 changes: 263 additions & 0 deletions notebooks/6_CloudIA_Chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,263 @@
# https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_agentic_rag/

from typing import Annotated, Literal, TypedDict
from pymongo import MongoClient
from openai import OpenAI
from langchain_mongodb import MongoDBAtlasVectorSearch
from langchain_openai import OpenAIEmbeddings
from typing import Annotated, Literal, Sequence
from typing_extensions import TypedDict
from langchain.tools.retriever import create_retriever_tool
from langchain import hub
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from typing import Annotated, Sequence
from typing_extensions import TypedDict
from langchain_core.messages import BaseMessage
from langgraph.graph.message import add_messages
from langgraph.prebuilt import tools_condition
from langgraph.graph import END, StateGraph, START
from langgraph.prebuilt import ToolNode
import streamlit as st
import anthropic
import os
from dotenv import load_dotenv
from IPython.display import Image, display
import pprint

load_dotenv()
anthropic_api_key = os.getenv("ANTHROPIC_KEY")

client = MongoClient(os.getenv("MONGO_URI"))
dbName = os.getenv("MONGO_DB")
collectionName = os.getenv("MONGO_COLLECTION")
collection = client[dbName][collectionName]
INDEX = os.getenv("MONGO_INDEX")

embeddings = OpenAIEmbeddings(openai_api_key=os.getenv("OPENAI_API_KEY"))
vectorStore = MongoDBAtlasVectorSearch(
collection=collection,
embedding=embeddings,
index_name=INDEX,
)

retriever = vectorStore.as_retriever(top_k=3)

retriever_tool = create_retriever_tool(
retriever,
"retrieve_mongodb",
"Search and return information stored on MongoDB.",
)

tools = [retriever_tool]

class AgentState(TypedDict):
# The add_messages function defines how an update should be processed
# Default is to replace. add_messages says "append"
messages: Annotated[Sequence[BaseMessage], add_messages]


def grade_documents(state) -> Literal["generate", "rewrite"]:
"""
Determines whether the retrieved documents are relevant to the question.

Args:
state (messages): The current state

Returns:
str: A decision for whether the documents are relevant or not
"""

print("---CHECK RELEVANCE---")

# Data model
class grade(BaseModel):
"""Binary score for relevance check."""

binary_score: str = Field(description="Relevance score 'yes' or 'no'")

# LLM
model = ChatOpenAI(temperature=0, model="gpt-4-0125-preview", streaming=True)

# LLM with tool and validation
llm_with_tool = model.with_structured_output(grade)

# Prompt
prompt = PromptTemplate(
template="""You are a grader assessing relevance of a retrieved document to a user question. \n
Here is the retrieved document: \n\n {context} \n\n
Here is the user question: {question} \n
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.""",
input_variables=["context", "question"],
)

# Chain
chain = prompt | llm_with_tool

messages = state["messages"]
last_message = messages[-1]

question = messages[0].content
docs = last_message.content

scored_result = chain.invoke({"question": question, "context": docs})

score = scored_result.binary_score

if score == "yes":
print("---DECISION: DOCS RELEVANT---")
return "generate"

else:
print("---DECISION: DOCS NOT RELEVANT---")
print(score)
return "rewrite"

def agent(state):
"""
Invokes the agent model to generate a response based on the current state. Given
the question, it will decide to retrieve using the retriever tool, or simply end.

Args:
state (messages): The current state

Returns:
dict: The updated state with the agent response appended to messages
"""
print("---CALL AGENT---")
messages = state["messages"]
model = ChatOpenAI(temperature=0, streaming=True, model="gpt-4-turbo")
model = model.bind_tools(tools)
response = model.invoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}


def rewrite(state):
"""
Transform the query to produce a better question.

Args:
state (messages): The current state

Returns:
dict: The updated state with re-phrased question
"""

print("---TRANSFORM QUERY---")
messages = state["messages"]
question = messages[0].content

msg = [
HumanMessage(
content=f""" \n
Look at the input and try to reason about the underlying semantic intent / meaning. \n
Here is the initial question:
\n ------- \n
{question}
\n ------- \n
Formulate an improved question: """,
)
]

# Grader
model = ChatOpenAI(temperature=0, model="gpt-4-0125-preview", streaming=True)
response = model.invoke(msg)
return {"messages": [response]}


def generate(state):
"""
Generate answer

Args:
state (messages): The current state

Returns:
dict: The updated state with re-phrased question
"""
print("---GENERATE---")
messages = state["messages"]
question = messages[0].content
last_message = messages[-1]

docs = last_message.content

# Prompt
prompt = hub.pull("rlm/rag-prompt")

# LLM
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True)

# Post-processing
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)

# Chain
rag_chain = prompt | llm | StrOutputParser()

# Run
response = rag_chain.invoke({"context": docs, "question": question})
return {"messages": [response]}


# Define a new graph
workflow = StateGraph(AgentState)

# Define the nodes we will cycle between
workflow.add_node("agent", agent) # agent
retrieve = ToolNode([retriever_tool])
workflow.add_node("retrieve", retrieve) # retrieval
workflow.add_node("rewrite", rewrite) # Re-writing the question
workflow.add_node(
"generate", generate
) # Generating a response after we know the documents are relevant
# Call agent node to decide to retrieve or not
workflow.add_edge(START, "agent")

# Decide whether to retrieve
workflow.add_conditional_edges(
"agent",
# Assess agent decision
tools_condition,
{
# Translate the condition outputs to nodes in our graph
"tools": "retrieve",
END: END,
},
)

# Edges taken after the `action` node is called.
workflow.add_conditional_edges(
"retrieve",
# Assess agent decision
grade_documents,
)
workflow.add_edge("generate", END)
workflow.add_edge("rewrite", "agent")

# Compile
graph = workflow.compile()

try:
display(Image(graph.get_graph(xray=True).draw_mermaid_png()))
except Exception:
# This requires some extra dependencies and is optional
pass


inputs = {
"messages": [
("user", "Quais sao os produtos de IP estatico?"),
]
}
for output in graph.stream(inputs):
for key, value in output.items():
pprint.pprint(f"Output from node '{key}':")
pprint.pprint("---")
pprint.pprint(value, indent=2, width=80, depth=None)
pprint.pprint("\n---\n")
36 changes: 36 additions & 0 deletions notebooks/teste.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"from pymongo import MongoClient\n",
"MONGO_URI = \"mongodb+srv://cloudia:[email protected]/\"\n",
"MONGO_DB = \"cloudiaDB\"\n",
"MONGO_COLLECTION = \"hcso_collection\"\n",
"\n",
"# Create a MongoClient instance\n",
"client = MongoClient(MONGO_URI)\n",
"\n",
"# Access the database\n",
"db = client[MONGO_DB]\n",
"\n",
"# Access the collection\n",
"collection = db[MONGO_COLLECTION]\n"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
55 changes: 55 additions & 0 deletions pages/1_CloudIA_Q&A.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import streamlit as st
import anthropic
import os
from dotenv import load_dotenv
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate



load_dotenv()

os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_KEY")

llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620",
temperature=0,
max_tokens=1024,
timeout=None,
max_retries=2,
# other params...
)

st.title("📝 CloudIA - Q&A")
uploaded_file = st.file_uploader("Faça o upload de arquivo TXT ou MD e faça perguntas diretamente pelo Chat.", type=("txt", "md"))
question = st.text_input(
"Ask something about the article",
placeholder="Can you give me a short summary?",
disabled=not uploaded_file,
)


if uploaded_file and question :
article = uploaded_file.read().decode()

prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant that understands the following article: to {article}.",
),
("human", "{question}"),
]
)

chain = prompt | llm

response = chain.invoke(
{
"article": article,
"question": question
}
)

st.write("### Answer")
st.write(response.content)
Loading