Skip to content

Commit eefb0ca

Browse files
authored
chore: renamed files and added tqdm (#65)
1 parent 3cf07c4 commit eefb0ca

File tree

5 files changed

+6
-11
lines changed

5 files changed

+6
-11
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
src="./docs/assets/logo.png">
44
</h1>
55
<p align="center">
6-
<i>SOTA metrics for evaluating Retrieval Augmented Generation (RAG)</i>
6+
<i>Evaluation framework for your Retrieval Augmented Generation (RAG) pipelines</i>
77
</p>
88

99
<p align="center">

src/ragas/metrics/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from ragas.metrics.answer_relevance import AnswerRelevancy, answer_relevancy
22
from ragas.metrics.context_relevance import ContextRelevancy, context_relevancy
3-
from ragas.metrics.factual import Faithfulness, faithfulness
3+
from ragas.metrics.faithfulnes import Faithfulness, faithfulness
44

55
__all__ = [
66
"Faithfulness",

src/ragas/metrics/context_relevance.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import numpy as np
99
from datasets import Dataset
1010
from sentence_transformers import CrossEncoder
11+
from tqdm import tqdm
1112

1213
from ragas.metrics.base import Metric
1314
from ragas.metrics.llms import openai_completion
@@ -135,7 +136,7 @@ def score(self: t.Self, dataset: Dataset) -> Dataset:
135136
prompts.append(prompt)
136137

137138
responses = []
138-
for batch_idx in range(0, len(prompts), 20):
139+
for batch_idx in tqdm(range(0, len(prompts), 20)):
139140
batch_responses = openai_completion(
140141
prompts[batch_idx : batch_idx + 20], n=self.strictness
141142
)
File renamed without changes.

tests/benchmarks/benchmark_eval.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,8 @@
88

99
DEVICE = "cuda" if is_available() else "cpu"
1010

11-
PATH_TO_DATSET_GIT_REPO = "../../../datasets/fiqa/"
12-
dataset_dir = os.environ.get("DATASET_DIR", PATH_TO_DATSET_GIT_REPO)
13-
if os.path.isdir(dataset_dir):
14-
ds = Dataset.from_csv(os.path.join(dataset_dir, "baseline.csv"))
15-
assert isinstance(ds, Dataset)
16-
else:
17-
# data
18-
ds = load_dataset("explodinggradients/fiqa", "ragas_eval")["baseline"]
11+
# data
12+
ds = load_dataset("explodinggradients/fiqa", "ragas_eval")["baseline"]
1913

2014
if __name__ == "__main__":
2115
result = evaluate(

0 commit comments

Comments
 (0)