diff --git a/.github/actions/merge-branch/action.yml b/.github/actions/merge-branch/action.yml index 896656a..4eca73e 100644 --- a/.github/actions/merge-branch/action.yml +++ b/.github/actions/merge-branch/action.yml @@ -22,7 +22,7 @@ inputs: type: string python-version: - description: "The version of Python to use, such as 3.11.0" + description: "The version of Python to use, such as 3.12" required: true type: string diff --git a/.github/actions/tests/python/action.yml b/.github/actions/tests/python/action.yml index 8243439..06d7ce0 100644 --- a/.github/actions/tests/python/action.yml +++ b/.github/actions/tests/python/action.yml @@ -8,7 +8,7 @@ branding: color: "orange" inputs: python-version: - description: "The version of Python to use, such as 3.11.0" + description: "The version of Python to use, such as 3.12" required: true type: string openai-api-organization: diff --git a/.github/workflows/precommitVersionBumps.yml b/.github/workflows/precommitVersionBumps.yml index 05dc20d..3b9a012 100644 --- a/.github/workflows/precommitVersionBumps.yml +++ b/.github/workflows/precommitVersionBumps.yml @@ -51,7 +51,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: locate site-packages path shell: bash diff --git a/.github/workflows/pullRequestController.yml b/.github/workflows/pullRequestController.yml index e2cf597..89398b5 100644 --- a/.github/workflows/pullRequestController.yml +++ b/.github/workflows/pullRequestController.yml @@ -50,7 +50,7 @@ on: - "./models/**" env: - python-version: "3.11" + python-version: "3.12" jobs: check_for_pending_release: diff --git a/.github/workflows/semanticVersionBump.yml b/.github/workflows/semanticVersionBump.yml index 415007a..6ba8fe3 100644 --- a/.github/workflows/semanticVersionBump.yml +++ b/.github/workflows/semanticVersionBump.yml @@ -39,10 +39,10 @@ jobs: restore-keys: | ${{ runner.os }}-node - - name: Set up Python 3.11 + - name: Set up Python 3.12 uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Setup Node.js environment uses: actions/setup-node@v4 diff --git a/.github/workflows/testsPython.yml b/.github/workflows/testsPython.yml index 97146b0..8a74292 100644 --- a/.github/workflows/testsPython.yml +++ b/.github/workflows/testsPython.yml @@ -15,7 +15,7 @@ on: - next-major env: - python-version: "3.11" + python-version: "3.12" jobs: python-unit-tests: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ed5cec7..01cbe22 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ default_language_version: # default language version for each language - python: python3.11 + python: python3.12 repos: - repo: https://github.com/codespell-project/codespell rev: v2.3.0 diff --git a/Makefile b/Makefile index 2f7149c..9103059 100644 --- a/Makefile +++ b/Makefile @@ -19,8 +19,8 @@ PINECONE_INDEX_NAME=rag\n\ PINECONE_VECTORSTORE_TEXT_KEY=lc_id\n\ PINECONE_METRIC=dotproduct\n\ PINECONE_DIMENSIONS=1536\n\ -OPENAI_CHAT_MODEL_NAME=gpt-3.5-turbo\n\ -OPENAI_PROMPT_MODEL_NAME=gpt-3.5-turbo-instruct\n\ +OPENAI_CHAT_MODEL_NAME=gpt-4\n\ +OPENAI_PROMPT_MODEL_NAME=gpt-4\n\ OPENAI_CHAT_TEMPERATURE=0.0\n\ OPENAI_CHAT_MAX_RETRIES=3\n\ DEBUG_MODE=True\n" >> .env) diff --git a/README.md b/README.md index 2f893a6..8bf2cae 100644 --- a/README.md +++ b/README.md @@ -122,7 +122,7 @@ These are just a few examples of the analytics and accounting courses offered at - [OpenAI platform API key](https://platform.openai.com/). _If you're new to OpenAI API then see [How to Get an OpenAI API Key](./doc/OPENAI_API_GETTING_STARTED_GUIDE.md)_ - [Pinecone](https://www.pinecone.io/) API key. -- [Python 3.11](https://www.python.org/downloads/): for creating virtual environment used for building AWS Lambda Layer, and locally by pre-commit linters and code formatters. +- [Python 3.12](https://www.python.org/downloads/): for creating virtual environment used for building AWS Lambda Layer, and locally by pre-commit linters and code formatters. - [NodeJS](https://nodejs.org/en/download): used with NPM for local ReactJS developer environment, and for configuring/testing Semantic Release. ## Configuration defaults @@ -134,9 +134,9 @@ Set these as environment variables on the command line, or in a .env file that s OPENAI_API_ORGANIZATION=PLEASE-ADD-ME OPENAI_API_KEY=PLEASE-ADD-ME OPENAI_CHAT_MAX_RETRIES=3 -OPENAI_CHAT_MODEL_NAME=gpt-3.5-turbo +OPENAI_CHAT_MODEL_NAME=gpt-4 OPENAI_CHAT_TEMPERATURE=0.0 -OPENAI_PROMPT_MODEL_NAME=gpt-3.5-turbo-instruct +OPENAI_PROMPT_MODEL_NAME=gpt-4 # Pinecone API PINECONE_API_KEY=PLEASE-ADD-ME diff --git a/models/__version__.py b/models/__version__.py index 4652028..856cb68 100644 --- a/models/__version__.py +++ b/models/__version__.py @@ -1,2 +1,2 @@ # Managed via automated CI/CD in .github/workflows/semanticVersionBump.yml. -__version__ = "1.3.4" +__version__ = "1.3.5" diff --git a/models/conf.py b/models/conf.py index dd804e6..7ef9dc6 100644 --- a/models/conf.py +++ b/models/conf.py @@ -76,7 +76,7 @@ class SettingsDefaults: PINECONE_API_KEY: SecretStr = SecretStr(None) PINECONE_ENVIRONMENT = "gcp-starter" - PINECONE_INDEX_NAME = "rag" + PINECONE_INDEX_NAME = "openai-embeddings" PINECONE_VECTORSTORE_TEXT_KEY = "lc_id" PINECONE_METRIC = "dotproduct" PINECONE_DIMENSIONS = 1536 @@ -86,8 +86,8 @@ class SettingsDefaults: OPENAI_ENDPOINT_IMAGE_N = 4 OPENAI_ENDPOINT_IMAGE_SIZE = "1024x768" OPENAI_CHAT_CACHE = True - OPENAI_CHAT_MODEL_NAME = "gpt-3.5-turbo" - OPENAI_PROMPT_MODEL_NAME = "gpt-3.5-turbo-instruct" + OPENAI_CHAT_MODEL_NAME = "gpt-4" + OPENAI_PROMPT_MODEL_NAME = "gpt-4" OPENAI_CHAT_TEMPERATURE = 0.0 OPENAI_CHAT_MAX_RETRIES = 3 diff --git a/models/hybrid_search_retreiver.py b/models/hybrid_search_retreiver.py index c352fdd..9a665d4 100644 --- a/models/hybrid_search_retreiver.py +++ b/models/hybrid_search_retreiver.py @@ -36,7 +36,7 @@ # from langchain_community.chat_models import ChatOpenAI # prompting and chat -from langchain_openai import ChatOpenAI, OpenAI +from langchain_openai import ChatOpenAI from pinecone_text.sparse import BM25Encoder # pylint: disable=import-error # this project @@ -114,17 +114,13 @@ def cached_chat_request( retval = self.chat.invoke(messages) return retval + # pylint: disable=unused-argument def prompt_with_template( self, prompt: PromptTemplate, concept: str, model: str = settings.openai_prompt_model_name ) -> str: """Prompt with template.""" - llm = OpenAI( - model=model, - api_key=settings.openai_api_key.get_secret_value(), # pylint: disable=no-member - organization=settings.openai_api_organization, - ) - retval = llm(prompt.format(concept=concept)) - return retval + retval = self.chat.invoke(prompt.format(concept=concept)) + return retval.content if retval else "no response" def load(self, filepath: str): """Pdf loader.""" diff --git a/models/pinecone.py b/models/pinecone.py index 8e9b376..acdcd8c 100644 --- a/models/pinecone.py +++ b/models/pinecone.py @@ -106,7 +106,10 @@ def openai_embeddings(self) -> OpenAIEmbeddings: def pinecone(self): """Pinecone lazy read-only property.""" if self._pinecone is None: - self._pinecone = Pinecone(api_key=settings.pinecone_api_key.get_secret_value()) + print("Initializing Pinecone...") + api_key = settings.pinecone_api_key.get_secret_value() + print(f"API Key: {api_key[:12]}****------") + self._pinecone = Pinecone(api_key=api_key) return self._pinecone @property @@ -153,7 +156,7 @@ def create(self): print("Creating index. This may take a few minutes...") serverless_spec = ServerlessSpec( cloud="aws", - region="us-west-2", + region="us-east-1", ) try: self.pinecone.create_index( diff --git a/models/tests/mock_data/.env.test_01 b/models/tests/mock_data/.env.test_01 index f32e45d..4d0dfbd 100644 --- a/models/tests/mock_data/.env.test_01 +++ b/models/tests/mock_data/.env.test_01 @@ -9,7 +9,7 @@ OPENAI_ENDPOINT_IMAGE_N = 1 OPENAI_ENDPOINT_IMAGE_SIZE = "TEST_1024x768" OPENAI_CHAT_CACHE = False - OPENAI_CHAT_MODEL_NAME = "TEST_gpt-3.5-turbo" - OPENAI_PROMPT_MODEL_NAME = "TEST_gpt-3.5-turbo-instruct" + OPENAI_CHAT_MODEL_NAME = "TEST_gpt-4" + OPENAI_PROMPT_MODEL_NAME = "TEST_gpt-4" OPENAI_CHAT_TEMPERATURE = 1.0 OPENAI_CHAT_MAX_RETRIES = 5 diff --git a/models/tests/test_configuration.py b/models/tests/test_configuration.py index 51953d7..06b2839 100644 --- a/models/tests/test_configuration.py +++ b/models/tests/test_configuration.py @@ -97,7 +97,7 @@ def test_env_overrides(self): assert mock_settings.openai_endpoint_image_n == 1 assert mock_settings.openai_endpoint_image_size == "TEST_1024x768" assert mock_settings.openai_chat_cache is False - assert mock_settings.openai_chat_model_name == "TEST_gpt-3.5-turbo" + assert mock_settings.openai_chat_model_name == "TEST_gpt-4" assert mock_settings.openai_prompt_model_name == "TEST_text-davinci-003" assert mock_settings.openai_chat_temperature == 1.0 assert mock_settings.openai_chat_max_retries == 5 @@ -139,7 +139,7 @@ def test_configure_with_class_constructor(self): openai_endpoint_image_n=1, openai_endpoint_image_size="TEST_1024x768", openai_chat_cache=False, - openai_chat_model_name="TEST_gpt-3.5-turbo", + openai_chat_model_name="TEST_gpt-4", openai_prompt_model_name="TEST_text-davinci-003", openai_chat_temperature=1.0, openai_chat_max_retries=5, @@ -156,7 +156,7 @@ def test_configure_with_class_constructor(self): assert mock_settings.openai_endpoint_image_n == 1 assert mock_settings.openai_endpoint_image_size == "TEST_1024x768" assert mock_settings.openai_chat_cache is False - assert mock_settings.openai_chat_model_name == "TEST_gpt-3.5-turbo" + assert mock_settings.openai_chat_model_name == "TEST_gpt-4" assert mock_settings.openai_prompt_model_name == "TEST_text-davinci-003" assert mock_settings.openai_chat_temperature == 1.0 assert mock_settings.openai_chat_max_retries == 5 @@ -184,7 +184,7 @@ def test_readonly_settings(self): with pytest.raises(PydanticValidationError): mock_settings.openai_chat_cache = False with pytest.raises(PydanticValidationError): - mock_settings.openai_chat_model_name = "TEST_gpt-3.5-turbo" + mock_settings.openai_chat_model_name = "TEST_gpt-4" with pytest.raises(PydanticValidationError): mock_settings.openai_prompt_model_name = "TEST_text-davinci-003" with pytest.raises(PydanticValidationError): diff --git a/models/yt.py b/models/yt.py index c28e8c6..fb0e58a 100644 --- a/models/yt.py +++ b/models/yt.py @@ -15,14 +15,11 @@ # 5.) sequential chains # 4.) chains -from langchain.chains import LLMChain, SimpleSequentialChain - -# 1.) wrappers -from langchain.llms.openai import OpenAI +from langchain.chains.llm import LLMChain +from langchain.chains.sequential import SimpleSequentialChain # 3.) prompt templates from langchain.prompts import PromptTemplate -from langchain.python import PythonREPL # 2.) models and messages from langchain.schema import HumanMessage, SystemMessage # AIMessage (not used) @@ -30,8 +27,12 @@ # 6.) embeddings from langchain.text_splitter import RecursiveCharacterTextSplitter +# 1.) wrappers +from langchain_community.llms.openai import OpenAI + # 8.) LangChain agents from langchain_experimental.agents.agent_toolkits.python.base import create_python_agent +from langchain_experimental.utilities.python import PythonREPL # from langchain_community.chat_models import ChatOpenAI from langchain_openai import ChatOpenAI, OpenAIEmbeddings @@ -77,14 +78,14 @@ class LangChainDev: def test_01_basic(self): """Test a basic request""" - llm = OpenAI(model_name="gpt-3.5-turbo-instruct") + llm = OpenAI(model_name="gpt-4") retval = llm("explain large language models in one sentence") print(retval) # 2.) models and messages. minute 6:08 def test_02_chat_model(self): """Test a chat model""" - chat = ChatOpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0.3) + chat = ChatOpenAI(model_name="gpt-4", temperature=0.3) messages = [ SystemMessage(content="You are an expert data scientist"), HumanMessage(content="Write a Python script that trains a neural network on simulated data"), @@ -104,7 +105,7 @@ def get_prompt(self): def test_03_prompt_templates(self): """Test prompt templates""" - llm = OpenAI(model_name="gpt-3.5-turbo-instruct") + llm = OpenAI(model_name="gpt-4") prompt = self.get_prompt() retval = llm(prompt.format(concept="regularization")) print(retval) @@ -117,7 +118,7 @@ def get_chain(self, llm, prompt): def test_04_chain(self): """Test a chain""" - llm = OpenAI(model_name="gpt-3.5-turbo-instruct") + llm = OpenAI(model_name="gpt-4") prompt = self.get_prompt() chain = self.get_chain(llm=llm, prompt=prompt) print(chain.run("autoencoder")) @@ -139,7 +140,7 @@ def get_prompt_two(self): def get_explanation(self): """Get an explanation""" - llm = OpenAI(model_name="gpt-3.5-turbo-instruct") + llm = OpenAI(model_name="gpt-4") prompt = self.get_prompt() chain_one = self.get_chain(llm=llm, prompt=prompt) diff --git a/tox.ini b/tox.ini index fdc75bd..d5ce821 100644 --- a/tox.ini +++ b/tox.ini @@ -14,6 +14,7 @@ python = 3.9: gitlint,py39,flake8 3.10: gitlint,py310,flake8 3.11: gitlint,py311,flake8,mypy,black,pylint + 3.12: gitlint,py311,flake8,mypy,black,pylint [testenv] deps = -rrequirements.txt