aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lang_prompt_demo.py12
-rw-r--r--main.py78
-rw-r--r--poetry.lock382
-rw-r--r--pyproject.toml6
-rw-r--r--speller_agent.py42
-rw-r--r--stdout_filterer.py6
-rw-r--r--test.py32
-rw-r--r--tools/contacts.py5
-rw-r--r--tools/summarize.py32
-rw-r--r--tools/vocode.py15
10 files changed, 579 insertions, 31 deletions
diff --git a/lang_prompt_demo.py b/lang_prompt_demo.py
index 31b5077..3d8f1cd 100644
--- a/lang_prompt_demo.py
+++ b/lang_prompt_demo.py
@@ -6,6 +6,7 @@ from dotenv import load_dotenv
from tools.contacts import get_all_contacts
from tools.vocode import call_phone_number
from tools.get_user_inputs import get_desired_inputs
+from tools.email_tool import email_tasks
from langchain.memory import ConversationBufferMemory
from langchain.agents import load_tools
@@ -14,6 +15,7 @@ from stdout_filterer import RedactPhoneNumbers
load_dotenv()
from langchain.chat_models import ChatOpenAI
+from langchain.chat_models import BedrockChat
from langchain.agents import initialize_agent
from langchain.agents import AgentType
@@ -26,17 +28,13 @@ if __name__ == "__main__":
+ "make sure you use the proper tool before calling final action to meet objective, feel free to say you need more information or cannot do something."
or "Find a random person in my contacts and tell them a joke"
)
- llm = ChatOpenAI(temperature=0, model_name="gpt-4") # type: ignore
+ #llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # type: ignore
+ llm = BedrockChat(model_id="anthropic.claude-instant-v1", model_kwargs={"temperature":0}) # type: ignore
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Logging of LLMChains
verbose = True
agent = initialize_agent(
- tools=[
- get_all_contacts,
- call_phone_number,
- get_desired_inputs,
- ]
- + load_tools(["human"]),
+ tools=[get_all_contacts, call_phone_number, email_tasks] + load_tools(["serpapi", "human"]),
llm=llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=verbose,
diff --git a/main.py b/main.py
index c49cdd6..54b6c9c 100644
--- a/main.py
+++ b/main.py
@@ -1,3 +1,11 @@
+import asyncio
+
+import os
+from typing import AsyncGenerator, AsyncIterable, Awaitable, Optional, Tuple
+
+from vocode.streaming.models.agent import AgentConfig, AgentType
+from vocode.streaming.agent.base_agent import BaseAgent, RespondAgent
+
import logging
import os
from fastapi import FastAPI
@@ -14,6 +22,17 @@ from vocode.streaming.telephony.server.base import (
TelephonyServer,
)
+from vocode.streaming.telephony.server.base import TwilioCallConfig
+
+import uvicorn
+from dotenv import load_dotenv
+from fastapi import FastAPI
+from fastapi.responses import StreamingResponse
+from langchain.callbacks import AsyncIteratorCallbackHandler
+from langchain.chat_models import ChatOpenAI
+from langchain.schema import HumanMessage
+from pydantic import BaseModel
+
from speller_agent import SpellerAgentFactory
import sys
@@ -23,7 +42,7 @@ from dotenv import load_dotenv
load_dotenv()
-app = FastAPI(docs_url=None)
+app = FastAPI()
logging.basicConfig()
logger = logging.getLogger(__name__)
@@ -46,17 +65,23 @@ if not BASE_URL:
if not BASE_URL:
raise ValueError("BASE_URL must be set in environment if not using pyngrok")
+from speller_agent import SpellerAgentConfig
+
+print(AgentType)
+
telephony_server = TelephonyServer(
base_url=BASE_URL,
config_manager=config_manager,
inbound_call_configs=[
TwilioInboundCallConfig(
url="/inbound_call",
- agent_config=ChatGPTAgentConfig(
- initial_message=BaseMessage(text="What up."),
- prompt_preamble="Act as a customer talking to 'Cosmos', a pizza establisment ordering a large pepperoni pizza for pickup. If asked for a name, your name is 'Hunter McRobie', and your credit card number is 4743 2401 5792 0539 CVV: 123 and expiratoin is 10/25. If asked for numbers, say them one by one",#"Have a polite conversation about life while talking like a pirate.",
- generate_responses=True,
- ),
+ # agent_config=ChatGPTAgentConfig(
+ # initial_message=BaseMessage(text="What up."),
+ # prompt_preamble="Act as a customer talking to 'Cosmos', a pizza establisment ordering a large pepperoni pizza for pickup. If asked for a name, your name is 'Hunter McRobie', and your credit card number is 4743 2401 5792 0539 CVV: 123 and expiratoin is 10/25. If asked for numbers, say them one by one",#"Have a polite conversation about life while talking like a pirate.",
+ # generate_responses=True,
+ # model_name="gpt-3.5-turbo"
+ # ),
+ agent_config=SpellerAgentConfig(generate_responses=False, initial_message=BaseMessage(text="What up.")),
twilio_config=TwilioConfig(
account_sid=os.environ["TWILIO_ACCOUNT_SID"],
auth_token=os.environ["TWILIO_AUTH_TOKEN"],
@@ -71,4 +96,45 @@ telephony_server = TelephonyServer(
logger=logger,
)
+async def send_message(message: str) -> AsyncIterable[str]:
+ callback = AsyncIteratorCallbackHandler()
+ model = ChatOpenAI(
+ streaming=True,
+ verbose=True,
+ callbacks=[callback],
+ )
+
+ async def wrap_done(fn: Awaitable, event: asyncio.Event):
+ """Wrap an awaitable with a event to signal when it's done or an exception is raised."""
+ try:
+ await fn
+ except Exception as e:
+ # TODO: handle exception
+ print(f"Caught exception: {e}")
+ finally:
+ # Signal the aiter to stop.
+ event.set()
+
+ # Begin a task that runs in the background.
+ task = asyncio.create_task(wrap_done(
+ model.agenerate(messages=[[HumanMessage(content=message)]]),
+ callback.done),
+ )
+
+ async for token in callback.aiter():
+ # Use server-sent-events to stream the response
+ yield f"data: {token}\n\n"
+
+ await task
+
+
+class StreamRequest(BaseModel):
+ """Request body for streaming."""
+ message: str
+
+
+@app.post("/stream")
+def stream(body: StreamRequest):
+ return StreamingResponse(send_message(body.message), media_type="text/event-stream")
+
app.include_router(telephony_server.get_router()) \ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index d728172..e96f2d0 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -260,6 +260,24 @@ files = [
]
[[package]]
+name = "beautifulsoup4"
+version = "4.12.2"
+description = "Screen-scraping library"
+optional = false
+python-versions = ">=3.6.0"
+files = [
+ {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"},
+ {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"},
+]
+
+[package.dependencies]
+soupsieve = ">1.2"
+
+[package.extras]
+html5lib = ["html5lib"]
+lxml = ["lxml"]
+
+[[package]]
name = "blinker"
version = "1.6.3"
description = "Fast, simple object-to-object and broadcast signaling"
@@ -271,6 +289,55 @@ files = [
]
[[package]]
+name = "boto3"
+version = "1.28.63"
+description = "The AWS SDK for Python"
+optional = false
+python-versions = ">= 3.7"
+files = [
+ {file = "boto3-1.28.63-py3-none-any.whl", hash = "sha256:65d052ec13197460586ee385aa2d6bba0e7378d2d2c7f3e93c044c43ae1ca782"},
+ {file = "boto3-1.28.63.tar.gz", hash = "sha256:94218aba2feb5b404b665b8d76c172dc654f79b4c5fa0e9e92459c098da87bf4"},
+]
+
+[package.dependencies]
+botocore = ">=1.31.63,<1.32.0"
+jmespath = ">=0.7.1,<2.0.0"
+s3transfer = ">=0.7.0,<0.8.0"
+
+[package.extras]
+crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
+
+[[package]]
+name = "botocore"
+version = "1.31.63"
+description = "Low-level, data-driven core of boto 3."
+optional = false
+python-versions = ">= 3.7"
+files = [
+ {file = "botocore-1.31.63-py3-none-any.whl", hash = "sha256:cb9db5db5af865b1fc2e1405b967db5d78dd0f4d84e5dc1974e082733c1034b7"},
+ {file = "botocore-1.31.63.tar.gz", hash = "sha256:6e582c811ea74f25bdb490ac372b2645de4a60286b42ddd8c69f3b6df82b6b12"},
+]
+
+[package.dependencies]
+jmespath = ">=0.7.1,<2.0.0"
+python-dateutil = ">=2.1,<3.0.0"
+urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""}
+
+[package.extras]
+crt = ["awscrt (==0.16.26)"]
+
+[[package]]
+name = "cachetools"
+version = "5.3.1"
+description = "Extensible memoizing collections and decorators"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "cachetools-5.3.1-py3-none-any.whl", hash = "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590"},
+ {file = "cachetools-5.3.1.tar.gz", hash = "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b"},
+]
+
+[[package]]
name = "certifi"
version = "2023.7.22"
description = "Python package for providing Mozilla's CA Bundle."
@@ -768,6 +835,132 @@ ssh = ["paramiko"]
tqdm = ["tqdm"]
[[package]]
+name = "google-api-core"
+version = "2.12.0"
+description = "Google API client core library"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "google-api-core-2.12.0.tar.gz", hash = "sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553"},
+ {file = "google_api_core-2.12.0-py3-none-any.whl", hash = "sha256:ec6054f7d64ad13b41e43d96f735acbd763b0f3b695dabaa2d579673f6a6e160"},
+]
+
+[package.dependencies]
+google-auth = ">=2.14.1,<3.0.dev0"
+googleapis-common-protos = ">=1.56.2,<2.0.dev0"
+protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0"
+requests = ">=2.18.0,<3.0.0.dev0"
+
+[package.extras]
+grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"]
+grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
+grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
+
+[[package]]
+name = "google-api-python-client"
+version = "2.103.0"
+description = "Google API Client Library for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "google-api-python-client-2.103.0.tar.gz", hash = "sha256:5b48dc23913b9a1b447991add03f27c335831559b5a870c522316eae671caf44"},
+ {file = "google_api_python_client-2.103.0-py2.py3-none-any.whl", hash = "sha256:5d6cf80cc34598a85b73e7e689e6eb1ba34f342095aeab9ec408f94521382a7c"},
+]
+
+[package.dependencies]
+google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0"
+google-auth = ">=1.19.0,<3.0.0.dev0"
+google-auth-httplib2 = ">=0.1.0"
+httplib2 = ">=0.15.0,<1.dev0"
+uritemplate = ">=3.0.1,<5"
+
+[[package]]
+name = "google-auth"
+version = "2.23.3"
+description = "Google Authentication Library"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "google-auth-2.23.3.tar.gz", hash = "sha256:6864247895eea5d13b9c57c9e03abb49cb94ce2dc7c58e91cba3248c7477c9e3"},
+ {file = "google_auth-2.23.3-py2.py3-none-any.whl", hash = "sha256:a8f4608e65c244ead9e0538f181a96c6e11199ec114d41f1d7b1bffa96937bda"},
+]
+
+[package.dependencies]
+cachetools = ">=2.0.0,<6.0"
+pyasn1-modules = ">=0.2.1"
+rsa = ">=3.1.4,<5"
+
+[package.extras]
+aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"]
+enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"]
+pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"]
+reauth = ["pyu2f (>=0.1.5)"]
+requests = ["requests (>=2.20.0,<3.0.0.dev0)"]
+
+[[package]]
+name = "google-auth-httplib2"
+version = "0.1.1"
+description = "Google Authentication Library: httplib2 transport"
+optional = false
+python-versions = "*"
+files = [
+ {file = "google-auth-httplib2-0.1.1.tar.gz", hash = "sha256:c64bc555fdc6dd788ea62ecf7bccffcf497bf77244887a3f3d7a5a02f8e3fc29"},
+ {file = "google_auth_httplib2-0.1.1-py2.py3-none-any.whl", hash = "sha256:42c50900b8e4dcdf8222364d1f0efe32b8421fb6ed72f2613f12f75cc933478c"},
+]
+
+[package.dependencies]
+google-auth = "*"
+httplib2 = ">=0.19.0"
+
+[[package]]
+name = "google-auth-oauthlib"
+version = "1.1.0"
+description = "Google Authentication Library"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "google-auth-oauthlib-1.1.0.tar.gz", hash = "sha256:83ea8c3b0881e453790baff4448e8a6112ac8778d1de9da0b68010b843937afb"},
+ {file = "google_auth_oauthlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:089c6e587d36f4803ac7e0720c045c6a8b1fd1790088b8424975b90d0ee61c12"},
+]
+
+[package.dependencies]
+google-auth = ">=2.15.0"
+requests-oauthlib = ">=0.7.0"
+
+[package.extras]
+tool = ["click (>=6.0.0)"]
+
+[[package]]
+name = "google-search-results"
+version = "2.4.2"
+description = "Scrape and search localized results from Google, Bing, Baidu, Yahoo, Yandex, Ebay, Homedepot, youtube at scale using SerpApi.com"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "google_search_results-2.4.2.tar.gz", hash = "sha256:603a30ecae2af8e600b22635757a6df275dad4b934f975e67878ccd640b78245"},
+]
+
+[package.dependencies]
+requests = "*"
+
+[[package]]
+name = "googleapis-common-protos"
+version = "1.61.0"
+description = "Common protobufs used in Google APIs"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "googleapis-common-protos-1.61.0.tar.gz", hash = "sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b"},
+ {file = "googleapis_common_protos-1.61.0-py2.py3-none-any.whl", hash = "sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0"},
+]
+
+[package.dependencies]
+protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0"
+
+[package.extras]
+grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
+
+[[package]]
name = "greenlet"
version = "3.0.0"
description = "Lightweight in-process concurrent programming"
@@ -875,6 +1068,20 @@ http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
[[package]]
+name = "httplib2"
+version = "0.22.0"
+description = "A comprehensive HTTP client library."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"},
+ {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"},
+]
+
+[package.dependencies]
+pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""}
+
+[[package]]
name = "httpx"
version = "0.25.0"
description = "The next generation HTTP client."
@@ -1060,6 +1267,17 @@ MarkupSafe = ">=2.0"
i18n = ["Babel (>=2.7)"]
[[package]]
+name = "jmespath"
+version = "1.0.1"
+description = "JSON Matching Expressions"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"},
+ {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"},
+]
+
+[[package]]
name = "joblib"
version = "1.3.2"
description = "Lightweight pipelining with Python functions"
@@ -1484,6 +1702,22 @@ files = [
]
[[package]]
+name = "oauthlib"
+version = "3.2.2"
+description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"},
+ {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"},
+]
+
+[package.extras]
+rsa = ["cryptography (>=3.0.0)"]
+signals = ["blinker (>=1.4.0)"]
+signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
+
+[[package]]
name = "openai"
version = "0.27.10"
description = "Python client library for the OpenAI API"
@@ -1627,6 +1861,28 @@ files = [
wcwidth = "*"
[[package]]
+name = "protobuf"
+version = "4.24.4"
+description = ""
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "protobuf-4.24.4-cp310-abi3-win32.whl", hash = "sha256:ec9912d5cb6714a5710e28e592ee1093d68c5ebfeda61983b3f40331da0b1ebb"},
+ {file = "protobuf-4.24.4-cp310-abi3-win_amd64.whl", hash = "sha256:1badab72aa8a3a2b812eacfede5020472e16c6b2212d737cefd685884c191085"},
+ {file = "protobuf-4.24.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8e61a27f362369c2f33248a0ff6896c20dcd47b5d48239cb9720134bef6082e4"},
+ {file = "protobuf-4.24.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:bffa46ad9612e6779d0e51ae586fde768339b791a50610d85eb162daeb23661e"},
+ {file = "protobuf-4.24.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:b493cb590960ff863743b9ff1452c413c2ee12b782f48beca77c8da3e2ffe9d9"},
+ {file = "protobuf-4.24.4-cp37-cp37m-win32.whl", hash = "sha256:dbbed8a56e56cee8d9d522ce844a1379a72a70f453bde6243e3c86c30c2a3d46"},
+ {file = "protobuf-4.24.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6b7d2e1c753715dcfe9d284a25a52d67818dd43c4932574307daf836f0071e37"},
+ {file = "protobuf-4.24.4-cp38-cp38-win32.whl", hash = "sha256:02212557a76cd99574775a81fefeba8738d0f668d6abd0c6b1d3adcc75503dbe"},
+ {file = "protobuf-4.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:2fa3886dfaae6b4c5ed2730d3bf47c7a38a72b3a1f0acb4d4caf68e6874b947b"},
+ {file = "protobuf-4.24.4-cp39-cp39-win32.whl", hash = "sha256:b77272f3e28bb416e2071186cb39efd4abbf696d682cbb5dc731308ad37fa6dd"},
+ {file = "protobuf-4.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:9fee5e8aa20ef1b84123bb9232b3f4a5114d9897ed89b4b8142d81924e05d79b"},
+ {file = "protobuf-4.24.4-py3-none-any.whl", hash = "sha256:80797ce7424f8c8d2f2547e2d42bfbb6c08230ce5832d6c099a37335c9c90a92"},
+ {file = "protobuf-4.24.4.tar.gz", hash = "sha256:5a70731910cd9104762161719c3d883c960151eea077134458503723b60e3667"},
+]
+
+[[package]]
name = "ptyprocess"
version = "0.7.0"
description = "Run a subprocess in a pseudo terminal"
@@ -1652,6 +1908,31 @@ files = [
tests = ["pytest"]
[[package]]
+name = "pyasn1"
+version = "0.5.0"
+description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
+files = [
+ {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"},
+ {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"},
+]
+
+[[package]]
+name = "pyasn1-modules"
+version = "0.3.0"
+description = "A collection of ASN.1-based protocols modules"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
+files = [
+ {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"},
+ {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.4.6,<0.6.0"
+
+[[package]]
name = "pycparser"
version = "2.21"
description = "C parser in Python"
@@ -1773,6 +2054,34 @@ files = [
PyYAML = "*"
[[package]]
+name = "pyparsing"
+version = "3.1.1"
+description = "pyparsing module - Classes and methods to define and execute parsing grammars"
+optional = false
+python-versions = ">=3.6.8"
+files = [
+ {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"},
+ {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"},
+]
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
name = "python-dotenv"
version = "1.0.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
@@ -2007,6 +2316,55 @@ socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
+name = "requests-oauthlib"
+version = "1.3.1"
+description = "OAuthlib authentication support for Requests."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"},
+ {file = "requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5"},
+]
+
+[package.dependencies]
+oauthlib = ">=3.0.0"
+requests = ">=2.0.0"
+
+[package.extras]
+rsa = ["oauthlib[signedtoken] (>=3.0.0)"]
+
+[[package]]
+name = "rsa"
+version = "4.9"
+description = "Pure-Python RSA implementation"
+optional = false
+python-versions = ">=3.6,<4"
+files = [
+ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"},
+ {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.1.3"
+
+[[package]]
+name = "s3transfer"
+version = "0.7.0"
+description = "An Amazon S3 Transfer Manager"
+optional = false
+python-versions = ">= 3.7"
+files = [
+ {file = "s3transfer-0.7.0-py3-none-any.whl", hash = "sha256:10d6923c6359175f264811ef4bf6161a3156ce8e350e705396a7557d6293c33a"},
+ {file = "s3transfer-0.7.0.tar.gz", hash = "sha256:fd3889a66f5fe17299fe75b82eae6cf722554edca744ca5d5fe308b104883d2e"},
+]
+
+[package.dependencies]
+botocore = ">=1.12.36,<2.0a.0"
+
+[package.extras]
+crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"]
+
+[[package]]
name = "scipy"
version = "1.11.3"
description = "Fundamental algorithms for scientific computing in Python"
@@ -2108,6 +2466,17 @@ CFFI = ">=1.0"
numpy = ["NumPy"]
[[package]]
+name = "soupsieve"
+version = "2.5"
+description = "A modern CSS selector implementation for Beautiful Soup."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"},
+ {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"},
+]
+
+[[package]]
name = "speechrecognition"
version = "3.10.0"
description = "Library for performing speech recognition, with support for several engines and APIs, online and offline."
@@ -2454,6 +2823,17 @@ mypy-extensions = ">=0.3.0"
typing-extensions = ">=3.7.4"
[[package]]
+name = "uritemplate"
+version = "4.1.1"
+description = "Implementation of RFC 6570 URI Templates"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"},
+ {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"},
+]
+
+[[package]]
name = "urllib3"
version = "2.0.6"
description = "HTTP library with thread-safe connection pooling, file post, and more."
@@ -2871,4 +3251,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = "^3.11,<3.12"
-content-hash = "61dba4f1ee780b26b6e4a8b39f5b04b198665a3635a0e01b9d1482b6828f12e3"
+content-hash = "0f5b568a4317c9cda4af20640c3bbdac76b4756ded9604bf57d75f44c36c679a"
diff --git a/pyproject.toml b/pyproject.toml
index 1c83745..1e66585 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -17,6 +17,12 @@ redis = "^5.0.1"
vonage = "^3.10.0"
vocode = "^0.1.111"
elevenlabs = "0.2.20"
+google-api-python-client = "^2.103.0"
+google-auth-oauthlib = "^1.1.0"
+google-auth-httplib2 = "^0.1.1"
+beautifulsoup4 = "^4.12.2"
+google-search-results = "^2.4.2"
+boto3 = "^1.28.63"
[build-system]
diff --git a/speller_agent.py b/speller_agent.py
index ff25915..ce7c197 100644
--- a/speller_agent.py
+++ b/speller_agent.py
@@ -6,6 +6,43 @@ from vocode.streaming.models.agent import AgentConfig, AgentType, ChatGPTAgentCo
from vocode.streaming.agent.base_agent import BaseAgent, RespondAgent
from vocode.streaming.agent.factory import AgentFactory
+import os
+import sys
+import typing
+from dotenv import load_dotenv
+
+from tools.contacts import get_all_contacts
+from tools.vocode import call_phone_number
+from tools.email_tool import email_tasks
+from tools.summarize import summarize
+from langchain.memory import ConversationBufferMemory
+from langchain.utilities import SerpAPIWrapper
+
+from langchain.agents import load_tools
+
+from stdout_filterer import RedactPhoneNumbers
+
+load_dotenv()
+
+from langchain.chat_models import ChatOpenAI
+from langchain.chat_models import BedrockChat
+from langchain.agents import initialize_agent
+from langchain.agents import AgentType as LangAgentType
+
+
+llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # type: ignore
+#llm = BedrockChat(model_id="anthropic.claude-instant-v1", model_kwargs={"temperature":0}) # type: ignore
+memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
+ # Logging of LLMChains
+verbose = True
+agent = initialize_agent(
+ tools=[get_all_contacts, call_phone_number, email_tasks, summarize] + load_tools(["serpapi", "human"]),
+ llm=llm,
+ agent=LangAgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
+ verbose=verbose,
+ memory=memory,
+)
+
class SpellerAgentConfig(AgentConfig, type="agent_speller"):
pass
@@ -22,14 +59,15 @@ class SpellerAgent(RespondAgent[SpellerAgentConfig]):
is_interrupt: bool = False,
) -> Tuple[Optional[str], bool]:
print("SpellerAgent: ", human_input)
- return "".join(c + " " for c in human_input), False
+ res = agent.run(human_input)
+ return res, False
class SpellerAgentFactory(AgentFactory):
def create_agent(
self, agent_config: AgentConfig, logger: Optional[logging.Logger] = None
) -> BaseAgent:
- print("Setting up agent")
+ print("Setting up agent", agent_config, agent_config.type)
if agent_config.type == AgentType.CHAT_GPT:
return ChatGPTAgent(
agent_config=typing.cast(ChatGPTAgentConfig, agent_config)
diff --git a/stdout_filterer.py b/stdout_filterer.py
index c4cf845..cb04b2d 100644
--- a/stdout_filterer.py
+++ b/stdout_filterer.py
@@ -7,9 +7,9 @@ class RedactPhoneNumbers:
def write(self, text):
# Regular expression to match phone numbers
- phone_regex = r"(\+\d{1,2}\s?)?1?\-?\.?\s?\(?\d{3}\)?[\s.-]?\d{3}[\s.-]?\d{4}"
- redacted_text = re.sub(phone_regex, "****", text)
- self.stream.write(redacted_text)
+ #phone_regex = r"(\+\d{1,2}\s?)?1?\-?\.?\s?\(?\d{3}\)?[\s.-]?\d{3}[\s.-]?\d{4}"
+ #redacted_text = re.sub(phone_regex, "****", text)
+ self.stream.write(text)
def flush(self):
self.stream.flush()
diff --git a/test.py b/test.py
new file mode 100644
index 0000000..568e449
--- /dev/null
+++ b/test.py
@@ -0,0 +1,32 @@
+import os
+from dotenv import load_dotenv
+
+load_dotenv()
+
+from vocode.streaming.telephony.conversation.outbound_call import OutboundCall
+from vocode.streaming.telephony.config_manager.redis_config_manager import (
+ RedisConfigManager,
+)
+
+from speller_agent import SpellerAgentConfig
+
+BASE_URL = os.environ["BASE_URL"]
+
+
+async def main():
+ config_manager = RedisConfigManager()
+
+ outbound_call = OutboundCall(
+ base_url=BASE_URL,
+ to_phone="+17208828227",
+ from_phone="+18445610144",
+ config_manager=config_manager,
+ agent_config=SpellerAgentConfig(generate_responses=False),
+ )
+
+ input("Press enter to start call...")
+ await outbound_call.start()
+
+if __name__ == "__main__":
+ import asyncio
+ asyncio.run(main()) \ No newline at end of file
diff --git a/tools/contacts.py b/tools/contacts.py
index 4f4a3be..afdc8a4 100644
--- a/tools/contacts.py
+++ b/tools/contacts.py
@@ -9,11 +9,12 @@ import os
CONTACTS = [
{
"name": "Greg",
- "phone" : os.getenv("TEST_PHONE_NUMBER")
+ "phone" : os.getenv("TEST_PHONE_NUMBER"),
+ "email": "grsi2038@colorado.edu"
}
]
@tool("get_all_contacts")
def get_all_contacts(placeholder: str) -> List[dict]:
- """Returns all contacts in the user's phone book."""
+ """Returns all contacts in the user's phone book which includes email and phone numbers."""
return CONTACTS \ No newline at end of file
diff --git a/tools/summarize.py b/tools/summarize.py
new file mode 100644
index 0000000..d90c49d
--- /dev/null
+++ b/tools/summarize.py
@@ -0,0 +1,32 @@
+import logging
+import asyncio
+import os
+from langchain.agents import tool
+from dotenv import load_dotenv
+
+from langchain.agents.agent_toolkits import GmailToolkit
+
+from langchain.llms import OpenAI
+from langchain.agents import initialize_agent, AgentType
+
+load_dotenv()
+toolkit = GmailToolkit()
+
+tools = toolkit.get_tools()
+
+@tool("summarize")
+def summarize(input: str) -> bool:
+ """
+ Summarize the response to the input prompt.
+ """
+ prompt = input
+
+ llm = OpenAI(temperature=0)
+ agent = initialize_agent(
+ prompt=prompt,
+ llm=llm,
+ agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
+ )
+
+ return agent.run(prompt)
+
diff --git a/tools/vocode.py b/tools/vocode.py
index d50c8a3..975cb4c 100644
--- a/tools/vocode.py
+++ b/tools/vocode.py
@@ -29,8 +29,8 @@ asyncio.set_event_loop(LOOP)
@tool("call phone number")
def call_phone_number(input: str) -> str:
- """Use when you need to make a phone call. Calls a phone number as a bot and returns a transcript of the conversation.
-
+ """calls a phone number as a bot and returns a transcript of the conversation.
+ make sure you call `get all contacts` first to get a list of phone numbers to call.
the input to this tool is a pipe separated list of a phone number, a prompt, and the first thing the bot should say.
The prompt should instruct the bot with what to do on the call and be in the 3rd person,
like 'the assistant is performing this task' instead of 'perform this task'.
@@ -39,7 +39,8 @@ def call_phone_number(input: str) -> str:
for example, `+15555555555|the assistant is explaining the meaning of life|i'm going to tell you the meaning of life` will call +15555555555, say 'i'm going to tell you the meaning of life', and instruct the assistant to tell the human what the meaning of life is.
"""
- phone_number, prompt, initial_message = input.split("|", 2)
+ phone_number, prompt, initial_message = input.split("|",2)
+ print(phone_number, prompt, initial_message)
call = OutboundCall(
base_url=os.environ["BASE_URL"],
to_phone=phone_number,
@@ -60,10 +61,4 @@ def call_phone_number(input: str) -> str:
logger=logging.Logger("OutboundCall"),
)
LOOP.run_until_complete(call.start())
- while True:
- maybe_transcript = get_transcript(call.conversation_id)
- if maybe_transcript:
- delete_transcript(call.conversation_id)
- return maybe_transcript
- else:
- time.sleep(1)
+ return "Call Started"