Skip to content

Commit 4f29e76

Browse files
authored
Add deprecation warnings for embeddings (#2244)
Add deprecation warnings for embeddings
1 parent ce380da commit 4f29e76

File tree

24 files changed

+234
-85
lines changed

24 files changed

+234
-85
lines changed

docs/extra/components/choose_evaluator_llm.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,13 @@
1515

1616
```python
1717
from ragas.llms import LangchainLLMWrapper
18-
from ragas.embeddings import LangchainEmbeddingsWrapper
1918
from langchain_openai import ChatOpenAI
20-
from langchain_openai import OpenAIEmbeddings
19+
from ragas.embeddings import OpenAIEmbeddings
20+
import openai
21+
2122
evaluator_llm = LangchainLLMWrapper(ChatOpenAI(model="gpt-4o"))
22-
evaluator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings())
23+
openai_client = openai.OpenAI()
24+
evaluator_embeddings = OpenAIEmbeddings(client=openai_client)
2325
```
2426

2527

docs/extra/components/choose_generator_llm.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,13 @@
1616

1717
```python
1818
from ragas.llms import LangchainLLMWrapper
19-
from ragas.embeddings import LangchainEmbeddingsWrapper
2019
from langchain_openai import ChatOpenAI
21-
from langchain_openai import OpenAIEmbeddings
20+
from ragas.embeddings import OpenAIEmbeddings
21+
import openai
22+
2223
generator_llm = LangchainLLMWrapper(ChatOpenAI(model="gpt-4o"))
23-
generator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings())
24+
openai_client = openai.OpenAI()
25+
generator_embeddings = OpenAIEmbeddings(client=openai_client)
2426
```
2527

2628

docs/getstarted/rag_eval.md

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,12 @@ We will use `langchain_openai` to set the LLM and embedding model for building o
99

1010
```python
1111
from langchain_openai import ChatOpenAI
12-
from langchain_openai import OpenAIEmbeddings
12+
from ragas.embeddings import OpenAIEmbeddings
13+
import openai
14+
1315
llm = ChatOpenAI(model="gpt-4o")
14-
embeddings = OpenAIEmbeddings()
16+
openai_client = openai.OpenAI()
17+
embeddings = OpenAIEmbeddings(client=openai_client)
1518
```
1619

1720
### Build a Simple RAG System
@@ -30,8 +33,10 @@ To build a simple RAG system, we need to define the following components:
3033

3134
class RAG:
3235
def __init__(self, model="gpt-4o"):
36+
import openai
3337
self.llm = ChatOpenAI(model=model)
34-
self.embeddings = OpenAIEmbeddings()
38+
openai_client = openai.OpenAI()
39+
self.embeddings = OpenAIEmbeddings(client=openai_client)
3540
self.doc_embeddings = None
3641
self.docs = None
3742

docs/howtos/applications/compare_embeddings.md

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,9 @@ For this tutorial notebook, I am using papers from Semantic Scholar that is rela
3131
from llama_index.core import download_loader
3232
from ragas.testset.evolutions import simple, reasoning, multi_context
3333
from ragas.testset.generator import TestsetGenerator
34-
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
34+
from langchain_openai import ChatOpenAI
35+
from ragas.embeddings import OpenAIEmbeddings
36+
import openai
3537

3638
SemanticScholarReader = download_loader("SemanticScholarReader")
3739
loader = SemanticScholarReader()
@@ -41,7 +43,8 @@ documents = loader.load_data(query=query_space, limit=100)
4143
# generator with openai models
4244
generator_llm = ChatOpenAI(model="gpt-4o-mini")
4345
critic_llm = ChatOpenAI(model="gpt-4o")
44-
embeddings = OpenAIEmbeddings()
46+
openai_client = openai.OpenAI()
47+
embeddings = OpenAIEmbeddings(client=openai_client)
4548

4649
generator = TestsetGenerator.from_langchain(
4750
generator_llm,
@@ -83,7 +86,9 @@ Here I am using llama-index to build a basic RAG pipeline with my documents. The
8386

8487
import nest_asyncio
8588
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
86-
from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings
89+
from langchain.embeddings import HuggingFaceEmbeddings
90+
from ragas.embeddings import OpenAIEmbeddings
91+
import openai
8792
import pandas as pd
8893

8994
nest_asyncio.apply()

docs/howtos/applications/singlehop_testset_gen.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,14 @@ docs = loader.load()
3737

3838
```python
3939
from ragas.llms import LangchainLLMWrapper
40-
from ragas.embeddings import LangchainEmbeddingsWrapper
41-
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
40+
from ragas.embeddings import OpenAIEmbeddings
41+
from langchain_openai import ChatOpenAI
42+
import openai
4243

4344

4445
generator_llm = LangchainLLMWrapper(ChatOpenAI(model="gpt-4o-mini"))
45-
generator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings(model="text-embedding-3-small"))
46+
openai_client = openai.OpenAI()
47+
generator_embeddings = OpenAIEmbeddings(client=openai_client, model="text-embedding-3-small")
4648
```
4749

4850
## Create Knowledge Graph

docs/howtos/customizations/testgenerator/_language_adaptation.md

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
## Synthetic test generation from non-english corpus
2+
## Synthetic test generation from non-english corpus
23

34
In this notebook, you'll learn how to adapt synthetic test data generation to non-english corpus settings. For the sake of this tutorial, I am generating queries in Spanish from Spanish wikipedia articles.
45

@@ -48,12 +49,13 @@ len(docs)
4849

4950
```python
5051
from ragas.llms import LangchainLLMWrapper
51-
from ragas.embeddings import LangchainEmbeddingsWrapper
52+
from ragas.embeddings import OpenAIEmbeddings
5253
from langchain_openai import ChatOpenAI
53-
from langchain_openai import OpenAIEmbeddings
54+
import openai
5455

5556
generator_llm = LangchainLLMWrapper(ChatOpenAI(model="gpt-4o-mini"))
56-
generator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings())
57+
openai_client = openai.OpenAI()
58+
generator_embeddings = OpenAIEmbeddings(client=openai_client)
5759
```
5860

5961
/opt/homebrew/Caskroom/miniforge/base/envs/ragas/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html

docs/howtos/customizations/testgenerator/_testgen-custom-single-hop.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,12 @@ You may use any of [your choice](./../../customizations/customize_models.md), he
4545

4646
```python
4747
from ragas.llms.base import llm_factory
48-
from ragas.embeddings.base import embedding_factory
48+
from ragas.embeddings import OpenAIEmbeddings
49+
import openai
4950

5051
llm = llm_factory()
51-
embedding = embedding_factory()
52+
openai_client = openai.OpenAI()
53+
embedding = OpenAIEmbeddings(client=openai_client)
5254
```
5355

5456
### Setup the transforms

docs/howtos/customizations/testgenerator/_testgen-customisation.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,12 @@ You may use any of [your choice](./../../customizations/customize_models.md), he
4848

4949
```python
5050
from ragas.llms.base import llm_factory
51-
from ragas.embeddings.base import embedding_factory
51+
from ragas.embeddings import OpenAIEmbeddings
52+
import openai
5253

5354
llm = llm_factory()
54-
embedding = embedding_factory()
55+
openai_client = openai.OpenAI()
56+
embedding = OpenAIEmbeddings(client=openai_client)
5557
```
5658

5759
### Setup Extractors and Relationship builders

docs/howtos/customizations/testgenerator/language_adaptation.ipynb

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@
8989
},
9090
{
9191
"cell_type": "code",
92-
"execution_count": 4,
92+
"execution_count": null,
9393
"metadata": {},
9494
"outputs": [
9595
{
@@ -102,13 +102,15 @@
102102
}
103103
],
104104
"source": [
105-
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
105+
"import openai\n",
106+
"from langchain_openai import ChatOpenAI\n",
106107
"\n",
107-
"from ragas.embeddings import LangchainEmbeddingsWrapper\n",
108+
"from ragas.embeddings import OpenAIEmbeddings\n",
108109
"from ragas.llms import LangchainLLMWrapper\n",
109110
"\n",
110111
"generator_llm = LangchainLLMWrapper(ChatOpenAI(model=\"gpt-4o-mini\"))\n",
111-
"generator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings())"
112+
"openai_client = openai.OpenAI()\n",
113+
"generator_embeddings = OpenAIEmbeddings(client=openai_client)"
112114
]
113115
},
114116
{

docs/howtos/customizations/testgenerator/testgen-custom-single-hop.ipynb

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,16 +83,19 @@
8383
},
8484
{
8585
"cell_type": "code",
86-
"execution_count": 4,
86+
"execution_count": null,
8787
"id": "52f6d1ae-c9ed-4d82-99d7-d130a36e41e8",
8888
"metadata": {},
8989
"outputs": [],
9090
"source": [
91-
"from ragas.embeddings.base import embedding_factory\n",
91+
"import openai\n",
92+
"\n",
93+
"from ragas.embeddings import OpenAIEmbeddings\n",
9294
"from ragas.llms.base import llm_factory\n",
9395
"\n",
9496
"llm = llm_factory()\n",
95-
"embedding = embedding_factory()"
97+
"openai_client = openai.OpenAI()\n",
98+
"embedding = OpenAIEmbeddings(client=openai_client)"
9699
]
97100
},
98101
{

0 commit comments

Comments
 (0)