diff options
author | Navan Chauhan <navanchauhan@gmail.com> | 2023-10-14 14:47:49 -0600 |
---|---|---|
committer | Navan Chauhan <navanchauhan@gmail.com> | 2023-10-14 14:47:49 -0600 |
commit | 42e54a2cb29943b793bf9a47dd7e0121e1c0c87d (patch) | |
tree | d5570585dc0bfccdab3ec3f9aaa79a9e4290470e /lang_prompt_demo.py | |
parent | 704b6407b4e51800376e73fe934a762e94b30d9d (diff) |
Diffstat (limited to 'lang_prompt_demo.py')
-rw-r--r-- | lang_prompt_demo.py | 60 |
1 files changed, 51 insertions, 9 deletions
diff --git a/lang_prompt_demo.py b/lang_prompt_demo.py index 3d8f1cd..cde0a59 100644 --- a/lang_prompt_demo.py +++ b/lang_prompt_demo.py @@ -5,6 +5,7 @@ from dotenv import load_dotenv from tools.contacts import get_all_contacts from tools.vocode import call_phone_number +from tools.summarize import summarize from tools.get_user_inputs import get_desired_inputs from tools.email_tool import email_tasks from langchain.memory import ConversationBufferMemory @@ -15,30 +16,71 @@ from stdout_filterer import RedactPhoneNumbers load_dotenv() from langchain.chat_models import ChatOpenAI -from langchain.chat_models import BedrockChat +# from langchain.chat_models import BedrockChat from langchain.agents import initialize_agent from langchain.agents import AgentType +from langchain.tools import WikipediaQueryRun + +import argparse + +memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) +tools=load_tools(["human", "wikipedia"]) + [get_all_contacts, call_phone_number, email_tasks, summarize] + +tools_desc = "" +for tool in tools: + tools_desc += tool.name + " : " + tool.description + "\n" + +def rephrase_prompt(objective: str) -> str: + # llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # type: ignore + # pred = llm.predict(f"Based on these tools {tools_desc} with the {objective} should be done in the following manner (outputting a single sentence), allowing for failure: ") + # print(pred) + # return pred + return f"{objective}" + +with open("info.txt") as f: + my_info = f.read() + memory.chat_memory.add_user_message("User information to us " + my_info + " end of user information.") + if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Command line argument parser example") + + parser.add_argument("--objective", type=str, help="Objective for the program") + parser.add_argument("--verbose", type=bool, help="Verbosity of the program", default=False) + + # Parse the arguments + args = parser.parse_args() + + # Get the value of --objective + objective_value = args.objective + + # Get the value of --verbose + verbose_value = args.verbose + # Redirect stdout to our custom class sys.stdout = typing.cast(typing.TextIO, RedactPhoneNumbers(sys.stdout)) + if objective_value is None: + objective_value = input("What is your objective? ") + OBJECTIVE = ( - input("Objective: ") - + "make sure you use the proper tool before calling final action to meet objective, feel free to say you need more information or cannot do something." + objective_value or "Find a random person in my contacts and tell them a joke" ) - #llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # type: ignore - llm = BedrockChat(model_id="anthropic.claude-instant-v1", model_kwargs={"temperature":0}) # type: ignore - memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) + llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # type: ignore + #llm = BedrockChat(model_id="anthropic.claude-instant-v1", model_kwargs={"temperature":0}) # type: ignore + #memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) # Logging of LLMChains verbose = True agent = initialize_agent( - tools=[get_all_contacts, call_phone_number, email_tasks] + load_tools(["serpapi", "human"]), + tools=tools, llm=llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, - verbose=verbose, + verbose=verbose_value, memory=memory, ) - agent.run(OBJECTIVE) + out = agent.run(OBJECTIVE) + print(out) + + |