Skip to content

Commit d523345

Browse files
committed
feat(MrqGeneration): add MRQ generation page
1 parent 333fb8b commit d523345

File tree

32 files changed

+2532
-271
lines changed

32 files changed

+2532
-271
lines changed

app/controllers/course/assessment/question/multiple_responses_controller.rb

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,98 @@ def destroy
4949
end
5050
end
5151

52+
def generate
53+
# Parse the form data
54+
custom_prompt = params[:custom_prompt] || ''
55+
number_of_questions = (params[:number_of_questions] || 1).to_i
56+
57+
# Parse source_question_data from JSON string
58+
source_question_data = {}
59+
if params[:source_question_data].present?
60+
begin
61+
source_question_data = JSON.parse(params[:source_question_data])
62+
rescue JSON::ParserError => e
63+
Rails.logger.warn "Failed to parse source_question_data: #{e.message}"
64+
source_question_data = {}
65+
end
66+
end
67+
68+
# Validate parameters
69+
if custom_prompt.blank?
70+
render json: { success: false, message: 'Custom prompt is required' }, status: :bad_request
71+
return
72+
end
73+
74+
if number_of_questions < 1 || number_of_questions > 3
75+
render json: { success: false, message: 'Number of questions must be between 1 and 3' }, status: :bad_request
76+
return
77+
end
78+
79+
# Create generation service
80+
generation_service = Course::Assessment::Question::MrqGenerationService.new(
81+
@assessment,
82+
{
83+
custom_prompt: custom_prompt,
84+
number_of_questions: number_of_questions,
85+
source_question_data: source_question_data
86+
}
87+
)
88+
89+
# Generate questions
90+
generated_questions = generation_service.generate_questions
91+
# Transform the response to match the expected frontend format
92+
questions = generated_questions['questions'] || []
93+
94+
if questions.empty?
95+
render json: { success: false, message: 'No questions were generated' }, status: :internal_server_error
96+
return
97+
end
98+
# Format response for frontend
99+
response_data = {
100+
success: true,
101+
data: {
102+
title: questions.first['title'],
103+
description: questions.first['description'],
104+
options: questions.first['options'].map.with_index do |option, index|
105+
{
106+
id: index + 1,
107+
option: option['option'],
108+
correct: option['correct'],
109+
weight: index + 1,
110+
explanation: option['explanation'] || '',
111+
ignoreRandomization: false,
112+
toBeDeleted: false
113+
}
114+
end,
115+
allQuestions: questions.map.with_index do |question, q_index|
116+
{
117+
title: question['title'],
118+
description: question['description'],
119+
options: question['options'].map.with_index do |option, index|
120+
{
121+
id: index + 1,
122+
option: option['option'],
123+
correct: option['correct'],
124+
weight: index + 1,
125+
explanation: option['explanation'] || '',
126+
ignoreRandomization: false,
127+
toBeDeleted: false
128+
}
129+
end
130+
}
131+
end,
132+
numberOfQuestions: questions.length
133+
}
134+
}
135+
136+
render json: response_data, status: :ok
137+
rescue StandardError => e
138+
Rails.logger.error "MRQ Generation Error: #{e.message}"
139+
Rails.logger.error e.backtrace.join("\n")
140+
render json: { success: false, message: 'An error occurred while generating questions' },
141+
status: :internal_server_error
142+
end
143+
52144
private
53145

54146
def respond_to_switch_mcq_mrq_type
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
# frozen_string_literal: true
2+
class Course::Assessment::Question::MrqGenerationService
3+
@output_schema = JSON.parse(
4+
File.read('app/services/course/assessment/question/prompts/mrq_generation_output_format.json')
5+
)
6+
@output_parser = Langchain::OutputParsers::StructuredOutputParser.from_json_schema(
7+
@output_schema
8+
)
9+
@system_prompt = Langchain::Prompt.load_from_path(
10+
file_path: 'app/services/course/assessment/question/prompts/mrq_generation_system_prompt.json'
11+
)
12+
@user_prompt = Langchain::Prompt.load_from_path(
13+
file_path: 'app/services/course/assessment/question/prompts/mrq_generation_user_prompt.json'
14+
)
15+
@llm = LANGCHAIN_OPENAI
16+
17+
class << self
18+
attr_reader :system_prompt, :user_prompt, :output_schema, :output_parser
19+
attr_accessor :llm
20+
end
21+
22+
def initialize(assessment, params)
23+
@assessment = assessment
24+
@params = params
25+
@custom_prompt = params[:custom_prompt].to_s
26+
@number_of_questions = params[:number_of_questions].to_i || 1
27+
@source_question_data = params[:source_question_data]
28+
end
29+
30+
# Calls the LLM service to generate MRQ questions.
31+
# @return [Hash] The LLM's generation response containing multiple questions.
32+
def generate_questions
33+
formatted_system_prompt = self.class.system_prompt.format
34+
formatted_user_prompt = self.class.user_prompt.format(
35+
custom_prompt: @custom_prompt,
36+
number_of_questions: @number_of_questions,
37+
source_question_title: @source_question_data&.dig(:title) || '',
38+
source_question_description: @source_question_data&.dig(:description) || '',
39+
source_question_options: format_source_options(@source_question_data&.dig(:options) || [])
40+
)
41+
42+
messages = [
43+
{ role: 'system', content: formatted_system_prompt },
44+
{ role: 'user', content: formatted_user_prompt }
45+
]
46+
47+
response = self.class.llm.chat(
48+
messages: messages,
49+
response_format: {
50+
type: 'json_schema',
51+
json_schema: {
52+
name: 'mrq_generation_output',
53+
strict: true,
54+
schema: self.class.output_schema
55+
}
56+
}
57+
).completion
58+
59+
parse_llm_response(response)
60+
end
61+
62+
private
63+
64+
# Formats source question options for inclusion in the LLM prompt
65+
# @param [Array] options The source question options
66+
# @return [String] Formatted string representation of options
67+
def format_source_options(options)
68+
return 'None' if options.empty?
69+
70+
options.map.with_index do |option, index|
71+
"- Option #{index + 1}: #{option['option']} (Correct: #{option['correct']})"
72+
end.join("\n")
73+
end
74+
75+
# Parses LLM response with retry logic for handling parsing failures
76+
# @param [String] response The raw LLM response to parse
77+
# @return [Hash] The parsed response as a structured hash
78+
def parse_llm_response(response)
79+
fix_parser = Langchain::OutputParsers::OutputFixingParser.from_llm(
80+
llm: self.class.llm,
81+
parser: self.class.output_parser
82+
)
83+
fix_parser.parse(response)
84+
end
85+
end
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
{
2+
"_type": "json_schema",
3+
"type": "object",
4+
"properties": {
5+
"questions": {
6+
"type": "array",
7+
"items": {
8+
"type": "object",
9+
"properties": {
10+
"title": {
11+
"type": "string",
12+
"description": "The title of the question"
13+
},
14+
"description": {
15+
"type": "string",
16+
"description": "The question description"
17+
},
18+
"options": {
19+
"type": "array",
20+
"items": {
21+
"type": "object",
22+
"properties": {
23+
"option": {
24+
"type": "string",
25+
"description": "The text of the option"
26+
},
27+
"correct": {
28+
"type": "boolean",
29+
"description": "Whether this option is correct"
30+
},
31+
"explanation": {
32+
"type": "string",
33+
"description": "Explanation for why this option is correct or incorrect"
34+
}
35+
},
36+
"required": ["option", "correct", "explanation"],
37+
"additionalProperties": false
38+
},
39+
"description": "Array of 4-6 options for the question"
40+
}
41+
},
42+
"required": ["title", "description", "options"],
43+
"additionalProperties": false
44+
},
45+
"description": "Array of generated multiple response questions"
46+
}
47+
},
48+
"required": ["questions"],
49+
"additionalProperties": false
50+
}
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"_type": "prompt",
3+
"input_variables": ["format_instructions"],
4+
"template": "You are an expert educational content creator specializing in multiple response questions (MRQ).\n\nYour task is to generate high-quality multiple response questions based on the provided instructions and context.\n\nKey requirements for MRQ generation:\n1. Create questions that may have one or more correct answers. It’s acceptable for some questions to have only one correct answer, or for options like \"None of the above\" to be correct.\n2. Ensure all options are plausible and well-written\n3. Include 2-6 options per question\n4. Questions should be clear, concise, and educational\n5. Options should be mutually exclusive when possible\n6. Avoid obvious incorrect answers\n7. Use an appropriate difficulty level for the target audience\n\nWhen provided with a source question, you may use it as inspiration or reference, but create original questions.\n\n{format_instructions}"
5+
}
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
{
2+
"_type": "prompt",
3+
"input_variables": [
4+
"custom_prompt",
5+
"number_of_questions",
6+
"source_question_title",
7+
"source_question_description",
8+
"source_question_options"
9+
],
10+
"template": "Please generate {number_of_questions} multiple response question(s) based on the following instructions:\n\nCustom Instructions: {custom_prompt}\n\nSource Question Context (for reference only):\nTitle: {source_question_title}\nDescription: {source_question_description}\nOptions:\n{source_question_options}\n\nGenerate {number_of_questions} high-quality multiple response question(s) that:\n- Have clear, educational content\n- Include 4-6 options per question\n- Have multiple correct answers (at least 2)\n- Are appropriate for educational assessment\n- Follow the custom instructions provided\n\nEach question should be original and well-structured for educational use."
11+
}

client/app/api/course/Assessment/Question/McqMrq.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import {
22
McqMrqFormData,
33
McqMrqPostData,
44
} from 'types/course/assessment/question/multiple-responses';
5+
import { MrqGenerateResponse } from 'types/course/assessment/question-generation';
56

67
import { APIResponse, JustRedirect } from 'api/types';
78

@@ -33,4 +34,8 @@ export default class McqMrqAPI extends BaseAPI {
3334
update(id: number, data: McqMrqPostData): APIResponse<JustRedirect> {
3435
return this.client.patch(`${this.#urlPrefix}/${id}`, data);
3536
}
37+
38+
generate(data: FormData): APIResponse<MrqGenerateResponse> {
39+
return this.client.post(`${this.#urlPrefix}/generate`, data);
40+
}
3641
}

client/app/bundles/course/assessment/pages/AssessmentGenerate/GenerateTabs.tsx

Lines changed: 44 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -112,53 +112,57 @@ const GenerateTabs: FC<Props> = (props) => {
112112
className="min-h-17 p-2"
113113
id={metadata.id}
114114
label={
115-
<span>
115+
<span className="flex items-center min-w-0 max-w-full">
116116
{metadata.isGenerating && (
117117
<LoadingIndicator
118118
bare
119-
className={`mr-2${metadata.id === activeConversationId ? '' : ' text-gray-600'}`}
119+
className={`mr-2 flex-shrink-0${metadata.id === activeConversationId ? '' : ' text-gray-600'}`}
120120
size={15}
121121
/>
122122
)}
123-
{metadata.title ?? 'Untitled Question'}
124-
<IconButton
125-
className="-ml-0.25 -mr-0.25 py-0 px-0.5 scale-[0.86] origin-right"
126-
color="inherit"
127-
component="span"
128-
disabled={metadata.isGenerating}
129-
onClick={(e) => {
130-
e.stopPropagation();
131-
duplicateConversation(conversations[metadata.id]);
132-
}}
133-
onMouseDown={(e) => {
134-
e.stopPropagation();
135-
}}
136-
size="small"
137-
>
138-
<ContentCopy />
139-
</IconButton>
140-
<IconButton
141-
className="-ml-0.25 -mr-0.25 py-0 px-0.5 scale-[0.86] origin-right"
142-
color="inherit"
143-
component="span"
144-
disabled={
145-
conversationIds.length <= 1 || metadata.isGenerating
146-
}
147-
onClick={(e) => {
148-
e.stopPropagation();
149-
if (metadata.hasData) {
150-
setConversationToDeleteId(metadata.id);
151-
} else {
152-
deleteConversation(conversations[metadata.id]);
123+
<span className="overflow-hidden text-ellipsis whitespace-nowrap min-w-0 flex-1">
124+
{metadata.title ?? 'Untitled Question'}
125+
</span>
126+
<div className="flex items-center flex-shrink-0 ml-1">
127+
<IconButton
128+
className="-ml-0.25 -mr-0.25 py-0 px-0.5 scale-[0.86] origin-right"
129+
color="inherit"
130+
component="span"
131+
disabled={metadata.isGenerating}
132+
onClick={(e) => {
133+
e.stopPropagation();
134+
duplicateConversation(conversations[metadata.id]);
135+
}}
136+
onMouseDown={(e) => {
137+
e.stopPropagation();
138+
}}
139+
size="small"
140+
>
141+
<ContentCopy />
142+
</IconButton>
143+
<IconButton
144+
className="-ml-0.25 -mr-0.25 py-0 px-0.5 scale-[0.86] origin-right"
145+
color="inherit"
146+
component="span"
147+
disabled={
148+
conversationIds.length <= 1 || metadata.isGenerating
153149
}
154-
}}
155-
onMouseDown={(e) => {
156-
e.stopPropagation();
157-
}}
158-
size="small"
159-
>
160-
<Close />
161-
</IconButton>
150+
onClick={(e) => {
151+
e.stopPropagation();
152+
if (metadata.hasData) {
153+
setConversationToDeleteId(metadata.id);
154+
} else {
155+
deleteConversation(conversations[metadata.id]);
156+
}
157+
}}
158+
onMouseDown={(e) => {
159+
e.stopPropagation();
160+
}}
161+
size="small"
162+
>
163+
<Close />
164+
</IconButton>
165+
</div>
162166
</span>
163167
}
164168
value={metadata.id}

0 commit comments

Comments
 (0)