Skip to content

Commit ac7b57e

Browse files
committed
fix: LangChain breaking changes and deprecations
1 parent efb9d91 commit ac7b57e

File tree

8 files changed

+31
-32
lines changed

8 files changed

+31
-32
lines changed

Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ PINECONE_INDEX_NAME=rag\n\
1919
PINECONE_VECTORSTORE_TEXT_KEY=lc_id\n\
2020
PINECONE_METRIC=dotproduct\n\
2121
PINECONE_DIMENSIONS=1536\n\
22-
OPENAI_CHAT_MODEL_NAME=gpt-3.5-turbo\n\
23-
OPENAI_PROMPT_MODEL_NAME=gpt-3.5-turbo-instruct\n\
22+
OPENAI_CHAT_MODEL_NAME=gpt-4\n\
23+
OPENAI_PROMPT_MODEL_NAME=gpt-4\n\
2424
OPENAI_CHAT_TEMPERATURE=0.0\n\
2525
OPENAI_CHAT_MAX_RETRIES=3\n\
2626
DEBUG_MODE=True\n" >> .env)

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,9 +134,9 @@ Set these as environment variables on the command line, or in a .env file that s
134134
OPENAI_API_ORGANIZATION=PLEASE-ADD-ME
135135
OPENAI_API_KEY=PLEASE-ADD-ME
136136
OPENAI_CHAT_MAX_RETRIES=3
137-
OPENAI_CHAT_MODEL_NAME=gpt-3.5-turbo
137+
OPENAI_CHAT_MODEL_NAME=gpt-4
138138
OPENAI_CHAT_TEMPERATURE=0.0
139-
OPENAI_PROMPT_MODEL_NAME=gpt-3.5-turbo-instruct
139+
OPENAI_PROMPT_MODEL_NAME=gpt-4
140140

141141
# Pinecone API
142142
PINECONE_API_KEY=PLEASE-ADD-ME

models/conf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ class SettingsDefaults:
7676

7777
PINECONE_API_KEY: SecretStr = SecretStr(None)
7878
PINECONE_ENVIRONMENT = "gcp-starter"
79-
PINECONE_INDEX_NAME = "rag"
79+
PINECONE_INDEX_NAME = "openai-embeddings"
8080
PINECONE_VECTORSTORE_TEXT_KEY = "lc_id"
8181
PINECONE_METRIC = "dotproduct"
8282
PINECONE_DIMENSIONS = 1536
@@ -86,8 +86,8 @@ class SettingsDefaults:
8686
OPENAI_ENDPOINT_IMAGE_N = 4
8787
OPENAI_ENDPOINT_IMAGE_SIZE = "1024x768"
8888
OPENAI_CHAT_CACHE = True
89-
OPENAI_CHAT_MODEL_NAME = "gpt-3.5-turbo"
90-
OPENAI_PROMPT_MODEL_NAME = "gpt-3.5-turbo-instruct"
89+
OPENAI_CHAT_MODEL_NAME = "gpt-4"
90+
OPENAI_PROMPT_MODEL_NAME = "gpt-4"
9191
OPENAI_CHAT_TEMPERATURE = 0.0
9292
OPENAI_CHAT_MAX_RETRIES = 3
9393

models/hybrid_search_retreiver.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636

3737
# from langchain_community.chat_models import ChatOpenAI
3838
# prompting and chat
39-
from langchain_openai import ChatOpenAI, OpenAI
39+
from langchain_openai import ChatOpenAI
4040
from pinecone_text.sparse import BM25Encoder # pylint: disable=import-error
4141

4242
# this project
@@ -114,17 +114,13 @@ def cached_chat_request(
114114
retval = self.chat.invoke(messages)
115115
return retval
116116

117+
# pylint: disable=unused-argument
117118
def prompt_with_template(
118119
self, prompt: PromptTemplate, concept: str, model: str = settings.openai_prompt_model_name
119120
) -> str:
120121
"""Prompt with template."""
121-
llm = OpenAI(
122-
model=model,
123-
api_key=settings.openai_api_key.get_secret_value(), # pylint: disable=no-member
124-
organization=settings.openai_api_organization,
125-
)
126-
retval = llm(prompt.format(concept=concept))
127-
return retval
122+
retval = self.chat.invoke(prompt.format(concept=concept))
123+
return retval.content if retval else "no response"
128124

129125
def load(self, filepath: str):
130126
"""Pdf loader."""

models/pinecone.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,8 @@ def openai_embeddings(self) -> OpenAIEmbeddings:
106106
def pinecone(self):
107107
"""Pinecone lazy read-only property."""
108108
if self._pinecone is None:
109+
print("Initializing Pinecone...")
110+
print(f"API Key: {settings.pinecone_api_key.get_secret_value()}")
109111
self._pinecone = Pinecone(api_key=settings.pinecone_api_key.get_secret_value())
110112
return self._pinecone
111113

@@ -153,7 +155,7 @@ def create(self):
153155
print("Creating index. This may take a few minutes...")
154156
serverless_spec = ServerlessSpec(
155157
cloud="aws",
156-
region="us-west-2",
158+
region="us-east-1",
157159
)
158160
try:
159161
self.pinecone.create_index(

models/tests/mock_data/.env.test_01

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
OPENAI_ENDPOINT_IMAGE_N = 1
1010
OPENAI_ENDPOINT_IMAGE_SIZE = "TEST_1024x768"
1111
OPENAI_CHAT_CACHE = False
12-
OPENAI_CHAT_MODEL_NAME = "TEST_gpt-3.5-turbo"
13-
OPENAI_PROMPT_MODEL_NAME = "TEST_gpt-3.5-turbo-instruct"
12+
OPENAI_CHAT_MODEL_NAME = "TEST_gpt-4"
13+
OPENAI_PROMPT_MODEL_NAME = "TEST_gpt-4"
1414
OPENAI_CHAT_TEMPERATURE = 1.0
1515
OPENAI_CHAT_MAX_RETRIES = 5

models/tests/test_configuration.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def test_env_overrides(self):
9797
assert mock_settings.openai_endpoint_image_n == 1
9898
assert mock_settings.openai_endpoint_image_size == "TEST_1024x768"
9999
assert mock_settings.openai_chat_cache is False
100-
assert mock_settings.openai_chat_model_name == "TEST_gpt-3.5-turbo"
100+
assert mock_settings.openai_chat_model_name == "TEST_gpt-4"
101101
assert mock_settings.openai_prompt_model_name == "TEST_text-davinci-003"
102102
assert mock_settings.openai_chat_temperature == 1.0
103103
assert mock_settings.openai_chat_max_retries == 5
@@ -139,7 +139,7 @@ def test_configure_with_class_constructor(self):
139139
openai_endpoint_image_n=1,
140140
openai_endpoint_image_size="TEST_1024x768",
141141
openai_chat_cache=False,
142-
openai_chat_model_name="TEST_gpt-3.5-turbo",
142+
openai_chat_model_name="TEST_gpt-4",
143143
openai_prompt_model_name="TEST_text-davinci-003",
144144
openai_chat_temperature=1.0,
145145
openai_chat_max_retries=5,
@@ -156,7 +156,7 @@ def test_configure_with_class_constructor(self):
156156
assert mock_settings.openai_endpoint_image_n == 1
157157
assert mock_settings.openai_endpoint_image_size == "TEST_1024x768"
158158
assert mock_settings.openai_chat_cache is False
159-
assert mock_settings.openai_chat_model_name == "TEST_gpt-3.5-turbo"
159+
assert mock_settings.openai_chat_model_name == "TEST_gpt-4"
160160
assert mock_settings.openai_prompt_model_name == "TEST_text-davinci-003"
161161
assert mock_settings.openai_chat_temperature == 1.0
162162
assert mock_settings.openai_chat_max_retries == 5
@@ -184,7 +184,7 @@ def test_readonly_settings(self):
184184
with pytest.raises(PydanticValidationError):
185185
mock_settings.openai_chat_cache = False
186186
with pytest.raises(PydanticValidationError):
187-
mock_settings.openai_chat_model_name = "TEST_gpt-3.5-turbo"
187+
mock_settings.openai_chat_model_name = "TEST_gpt-4"
188188
with pytest.raises(PydanticValidationError):
189189
mock_settings.openai_prompt_model_name = "TEST_text-davinci-003"
190190
with pytest.raises(PydanticValidationError):

models/yt.py

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,23 +15,24 @@
1515

1616
# 5.) sequential chains
1717
# 4.) chains
18-
from langchain.chains import LLMChain, SimpleSequentialChain
19-
20-
# 1.) wrappers
21-
from langchain.llms.openai import OpenAI
18+
from langchain.chains.llm import LLMChain
19+
from langchain.chains.sequential import SimpleSequentialChain
2220

2321
# 3.) prompt templates
2422
from langchain.prompts import PromptTemplate
25-
from langchain.python import PythonREPL
2623

2724
# 2.) models and messages
2825
from langchain.schema import HumanMessage, SystemMessage # AIMessage (not used)
2926

3027
# 6.) embeddings
3128
from langchain.text_splitter import RecursiveCharacterTextSplitter
3229

30+
# 1.) wrappers
31+
from langchain_community.llms.openai import OpenAI
32+
3333
# 8.) LangChain agents
3434
from langchain_experimental.agents.agent_toolkits.python.base import create_python_agent
35+
from langchain_experimental.utilities.python import PythonREPL
3536

3637
# from langchain_community.chat_models import ChatOpenAI
3738
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
@@ -77,14 +78,14 @@ class LangChainDev:
7778
def test_01_basic(self):
7879
"""Test a basic request"""
7980

80-
llm = OpenAI(model_name="gpt-3.5-turbo-instruct")
81+
llm = OpenAI(model_name="gpt-4")
8182
retval = llm("explain large language models in one sentence")
8283
print(retval)
8384

8485
# 2.) models and messages. minute 6:08
8586
def test_02_chat_model(self):
8687
"""Test a chat model"""
87-
chat = ChatOpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0.3)
88+
chat = ChatOpenAI(model_name="gpt-4", temperature=0.3)
8889
messages = [
8990
SystemMessage(content="You are an expert data scientist"),
9091
HumanMessage(content="Write a Python script that trains a neural network on simulated data"),
@@ -104,7 +105,7 @@ def get_prompt(self):
104105

105106
def test_03_prompt_templates(self):
106107
"""Test prompt templates"""
107-
llm = OpenAI(model_name="gpt-3.5-turbo-instruct")
108+
llm = OpenAI(model_name="gpt-4")
108109
prompt = self.get_prompt()
109110
retval = llm(prompt.format(concept="regularization"))
110111
print(retval)
@@ -117,7 +118,7 @@ def get_chain(self, llm, prompt):
117118

118119
def test_04_chain(self):
119120
"""Test a chain"""
120-
llm = OpenAI(model_name="gpt-3.5-turbo-instruct")
121+
llm = OpenAI(model_name="gpt-4")
121122
prompt = self.get_prompt()
122123
chain = self.get_chain(llm=llm, prompt=prompt)
123124
print(chain.run("autoencoder"))
@@ -139,7 +140,7 @@ def get_prompt_two(self):
139140

140141
def get_explanation(self):
141142
"""Get an explanation"""
142-
llm = OpenAI(model_name="gpt-3.5-turbo-instruct")
143+
llm = OpenAI(model_name="gpt-4")
143144
prompt = self.get_prompt()
144145
chain_one = self.get_chain(llm=llm, prompt=prompt)
145146

0 commit comments

Comments
 (0)