Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file not shown.
22 changes: 22 additions & 0 deletions Exercises/Exercise_1/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline


app = FastAPI(title="Hello LLM API")

# Load Hugging Face text generation pipeline
generator = pipeline("text-generation", model="distilgpt2")


class Prompt(BaseModel):
text: str
max_length: int = 50

@app.post("/hello-llm")
async def hello_llm(prompt: Prompt):
"""
Generate text from a given prompt using distilgpt2.
"""
output = generator(prompt.text, max_length=prompt.max_length, num_return_sequences=1)
return {"prompt": prompt.text, "generated_text": output[0]["generated_text"]}
15 changes: 15 additions & 0 deletions Exercises/Exercise_1/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
langchain_chroma
fastapi
uvicorn
pydantic
python-multipart
transformers
torch
langchain
langchain_community
diffusers
accelerate
safetensors
Pillow
python_dotenv
streamlit
67 changes: 67 additions & 0 deletions Exercises/Exercise_1/streamlit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# streamlit_app.py
import streamlit as st
import requests

st.set_page_config(layout="wide", page_title="Hello LLM", page_icon="🤖")

# Sidebar
with st.sidebar:
st.title("🤖 Hello LLM")
st.markdown("*play with text generation*")
st.markdown("___")

st.subheader("📊 Session Stats")
col1, col2 = st.columns(2)
with col1:
st.header("Messages")
if "messages" in st.session_state:
st.markdown(f"# {len(st.session_state.messages) // 2}")
else:
st.markdown("# 0")
with col2:
st.header("Total")
if "messages" in st.session_state:
st.markdown(f"# {len(st.session_state.messages)}")
else:
st.markdown("# 0")
st.markdown("___")

st.header("⚙ Controls")
max_length = st.slider("Max Length", min_value=10, max_value=200, value=50, step=5)
st.markdown("___")

# Main Chat Section
st.subheader("🗯 Chat with Hello LLM")

if "messages" not in st.session_state:
st.session_state.messages = []

chat_box = st.container(height=500, border=True)

# Display chat history
with chat_box:
for i, msg in enumerate(st.session_state.messages):
with st.chat_message(msg["role"]):
st.markdown(msg["text"])

# Input area
user_input = st.chat_input("Type your prompt here...")
if user_input:
# Save user message
st.session_state.messages.append({"role": "user", "text": user_input})

# Send to FastAPI
with st.spinner("AI is generating response..."):
try:
url = "http://127.0.0.1:8000/hello-llm" # FastAPI endpoint
payload = {"text": user_input, "max_length": max_length}
response = requests.post(url, json=payload)
response.raise_for_status()
data = response.json()
ai_reply = data["generated_text"]
except requests.exceptions.RequestException as e:
ai_reply = f"⚠️ Error: {e}"

# Save assistant reply
st.session_state.messages.append({"role": "assistant", "text": ai_reply})
st.rerun()
Binary file not shown.
42 changes: 42 additions & 0 deletions Exercises/Exercise_2/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline

# Initialize FastAPI app
app = FastAPI(title="Summarizer API")

# Load Hugging Face summarization pipeline
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")

# Request body schema
class SummarizeRequest(BaseModel):
text: str
max_length: int = 150
min_length: int = 30
temperature: float = 1.0
top_k: int = 50
top_p: float = 0.9


@app.post("/summarize")
async def summarize(request: SummarizeRequest):
"""
Summarize a long text using facebook/bart-large-cnn
with controllable generation parameters.
"""
try:
summary = summarizer(
request.text,
max_length=request.max_length,
min_length=request.min_length,
do_sample=True,
temperature=request.temperature,
top_k=request.top_k,
top_p=request.top_p,
)
return {
"input_text": request.text,
"summary_text": summary[0]["summary_text"]
}
except Exception as e:
return {"error": str(e)}
77 changes: 77 additions & 0 deletions Exercises/Exercise_2/frontend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# streamlit_app.py
import streamlit as st
import requests

st.set_page_config(layout="wide", page_title="Summarizer LLM", page_icon="📝")

# Sidebar
with st.sidebar:
st.title("📝 Summarizer LLM")
st.markdown("*Summarize long text with BART Large CNN*")
st.markdown("___")

st.subheader("📊 Session Stats")
col1, col2 = st.columns(2)
with col1:
st.header("Summaries")
if "messages" in st.session_state:
st.markdown(f"# {len(st.session_state.messages) // 2}")
else:
st.markdown("# 0")
with col2:
st.header("Total")
if "messages" in st.session_state:
st.markdown(f"# {len(st.session_state.messages)}")
else:
st.markdown("# 0")
st.markdown("___")

st.header("⚙ Controls")
max_length = st.slider("Max Length", min_value=50, max_value=500, value=150, step=10)
min_length = st.slider("Min Length", min_value=10, max_value=100, value=30, step=5)
temperature = st.slider("Temperature", min_value=0.1, max_value=2.0, value=1.0, step=0.1)
top_k = st.slider("Top-K", min_value=1, max_value=100, value=50, step=1)
top_p = st.slider("Top-P", min_value=0.1, max_value=1.0, value=0.9, step=0.05)
st.markdown("___")

# Main Section
st.subheader("🗯 Summarize Your Text")

if "messages" not in st.session_state:
st.session_state.messages = []

chat_box = st.container(height=500, border=True)

# Display history
with chat_box:
for i, msg in enumerate(st.session_state.messages):
with st.chat_message(msg["role"]):
st.markdown(msg["text"])

# Input area
user_input = st.chat_input("Paste your long text here...")
if user_input:

st.session_state.messages.append({"role": "user", "text": user_input})

with st.spinner("AI is summarizing your text..."):
try:
url = "http://127.0.0.1:8000/summarize"
payload = {
"text": user_input,
"max_length": max_length,
"min_length": min_length,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
}
response = requests.post(url, json=payload)
response.raise_for_status()
data = response.json()
ai_reply = data.get("summary_text", "⚠️ No summary returned.")
except requests.exceptions.RequestException as e:
ai_reply = f"⚠️ Error: {e}"

# Save assistant reply
st.session_state.messages.append({"role": "assistant", "text": ai_reply})
st.rerun()
27 changes: 27 additions & 0 deletions Exercises/Exercise_3/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline

# Create FastAPI app
app = FastAPI(title="Sentiment Analysis API")


sentiment_model = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")


# Request body
class SentimentRequest(BaseModel):
text: str


@app.post("/sentiment")
async def analyze_sentiment(request: SentimentRequest):
"""
Analyze the sentiment of a given text (positive/negative).
"""
result = sentiment_model(request.text)[0]
return {
"text": request.text,
"label": result["label"],
"score": round(result["score"], 4)
}
62 changes: 62 additions & 0 deletions Exercises/Exercise_3/frontend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import streamlit as st
import requests

st.set_page_config(layout="wide", page_title="Sentiment Analyzer", page_icon="😊")

# Sidebar
with st.sidebar:
st.title("😊 Sentiment Analyzer")
st.markdown("*detect positive or negative tone*")
st.markdown("___")

st.subheader("📊 Session Stats")
col1, col2 = st.columns(2)
with col1:
st.header("Messages")
if "messages" in st.session_state:
st.markdown(f"# {len(st.session_state.messages) // 2}")
else:
st.markdown("# 0")
with col2:
st.header("Total")
if "messages" in st.session_state:
st.markdown(f"# {len(st.session_state.messages)}")
else:
st.markdown("# 0")
st.markdown("___")

# Main Chat Section
st.subheader("🗯 Chat with Sentiment Analyzer")

if "messages" not in st.session_state:
st.session_state.messages = []

chat_box = st.container(height=500, border=True)

# Display chat history
with chat_box:
for i, msg in enumerate(st.session_state.messages):
with st.chat_message(msg["role"]):
st.markdown(msg["text"])


user_input = st.chat_input("Enter a sentence to analyze sentiment...")
if user_input:

st.session_state.messages.append({"role": "user", "text": user_input})


with st.spinner("Analyzing sentiment..."):
try:
url = "http://127.0.0.1:8000/sentiment"
payload = {"text": user_input}
response = requests.post(url, json=payload)
response.raise_for_status()
data = response.json()
ai_reply = f"**Sentiment:** {data['label']} \n**Confidence:** {data['score']}"
except requests.exceptions.RequestException as e:
ai_reply = f"⚠️ Error: {e}"

# Save assistant reply
st.session_state.messages.append({"role": "assistant", "text": ai_reply})
st.rerun()