diff --git a/athena/athena/endpoints.py b/athena/athena/endpoints.py index 4aff85cd..6d259a2a 100644 --- a/athena/athena/endpoints.py +++ b/athena/athena/endpoints.py @@ -235,7 +235,7 @@ def feedback_consumer(func: Union[ feedback_type = inspect.signature(func).parameters["feedbacks"].annotation.__args__[0] module_config_type = inspect.signature(func).parameters["module_config"].annotation if "module_config" in inspect.signature(func).parameters else None - @app.post("/feed_feedbacks", responses=module_responses) + @app.post("/feedbacks", responses=module_responses) @authenticated @with_meta async def wrapper( diff --git a/modules/text/module_text_llm/embeddings.index b/modules/text/module_text_llm/embeddings.index new file mode 100644 index 00000000..866f2422 Binary files /dev/null and b/modules/text/module_text_llm/embeddings.index differ diff --git a/modules/text/module_text_llm/indices.json b/modules/text/module_text_llm/indices.json new file mode 100644 index 00000000..b7b4f99f --- /dev/null +++ b/modules/text/module_text_llm/indices.json @@ -0,0 +1,47 @@ +{ + "0": { + "exercise_id": 3002, + "submission_id": 787782, + "feedbacks": [ + { + "id": 6620, + "title": "Assessment of the Difference between Coupling/Cohesion", + "description": "You correctly explained the difference between coupling and cohesion, well done!", + "credits": 1.0, + "structured_grading_instruction_id": 6053, + "is_graded": null, + "meta": {}, + "exercise_id": 3002, + "submission_id": 787782, + "index_start": 0, + "index_end": 722 + }, + { + "id": 6621, + "title": "Assessment of the Example", + "description": "Great example, well done!", + "credits": 1.0, + "structured_grading_instruction_id": 6058, + "is_graded": null, + "meta": {}, + "exercise_id": 3002, + "submission_id": 787782, + "index_start": 909, + "index_end": 1299 + }, + { + "id": 6622, + "title": "Assessment of the Explanation why Coupling/Cohesion are important", + "description": "You correctly explained why coupling and cohesion are important, well done!", + "credits": 1.0, + "structured_grading_instruction_id": 6056, + "is_graded": null, + "meta": {}, + "exercise_id": 3002, + "submission_id": 787782, + "index_start": null, + "index_end": null + } + ] + } +} \ No newline at end of file diff --git a/modules/text/module_text_llm/module_text_llm/__main__.py b/modules/text/module_text_llm/module_text_llm/__main__.py index 135edcb1..d1dad801 100644 --- a/modules/text/module_text_llm/module_text_llm/__main__.py +++ b/modules/text/module_text_llm/module_text_llm/__main__.py @@ -32,7 +32,7 @@ def process_incoming_feedback(exercise: Exercise, submission: Submission, feedba submission_id = submission.id exercise_id = exercise.id embedded_submission = embed_text(submission.text) - store_embedding_index(exercise_id, submission_id) + store_embedding_index(exercise_id, submission_id, feedbacks) save_embedding(embedded_submission) @feedback_provider diff --git a/modules/text/module_text_llm/module_text_llm/feedback_storage.py b/modules/text/module_text_llm/module_text_llm/feedback_storage.py new file mode 100644 index 00000000..e69de29b diff --git a/modules/text/module_text_llm/module_text_llm/icl_rag_approach/generate_suggestions.py b/modules/text/module_text_llm/module_text_llm/icl_rag_approach/generate_suggestions.py index 7ade1f23..376fbe22 100644 --- a/modules/text/module_text_llm/module_text_llm/icl_rag_approach/generate_suggestions.py +++ b/modules/text/module_text_llm/module_text_llm/icl_rag_approach/generate_suggestions.py @@ -11,7 +11,7 @@ from llm_core.utils.predict_and_parse import predict_and_parse from module_text_llm.helpers.utils import add_sentence_numbers, get_index_range_from_line_range, format_grading_instructions from module_text_llm.icl_rag_approach.prompt_generate_suggestions import AssessmentModel -from module_text_llm.index_storage import retrieve_embedding_index +from module_text_llm.index_storage import retrieve_embedding_index, retrieve_feedbacks from module_text_llm.storage_embeddings import query_embedding from module_text_llm.generate_embeddings import embed_text from athena.text import get_stored_feedback @@ -47,10 +47,12 @@ async def generate_suggestions(exercise: Exercise, submission: Submission, confi for index in list_of_indices[0]: if index != -1: exercise_id, submission_id = retrieve_embedding_index(list_of_indices) - stored_feedback = list(get_stored_feedback(exercise_id, submission_id)) + stored_feedback = retrieve_feedbacks(index) # -> List[Feedback] + # stored_feedback = list(get_stored_feedback(exercise_id, submission_id)) logger.info("Stored feedback:") - for feedback_item in stored_feedback: - logger.info("- %s", feedback_item) + if stored_feedback is not None: + for feedback_item in stored_feedback: + logger.info("- %s", feedback_item) logger.info("Stored submission:") rag_context.append({"submission": submission.text, "feedback": stored_feedback}) diff --git a/modules/text/module_text_llm/module_text_llm/index_storage.py b/modules/text/module_text_llm/module_text_llm/index_storage.py index c7cf0651..f72e55c1 100644 --- a/modules/text/module_text_llm/module_text_llm/index_storage.py +++ b/modules/text/module_text_llm/module_text_llm/index_storage.py @@ -1,6 +1,5 @@ import json import os - INDEX_FILE = "indices.json" def load_indices(): @@ -10,15 +9,15 @@ def load_indices(): return json.load(f) else: return {} - -def store_embedding_index(exercise_id, submission_id): + +def store_embedding_index(exercise_id, submission_id,feedbacks): """ Store a new submission and exercise ID with an auto-incrementing index. """ indices = load_indices() next_index = len(indices) - indices[next_index] = { "exercise_id": exercise_id, - "submission_id": submission_id + "submission_id": submission_id, + "feedbacks": [feedback.dict() for feedback in feedbacks] } with open(INDEX_FILE, 'w', encoding="utf-8") as f: @@ -34,3 +33,12 @@ def retrieve_embedding_index(index): return indices[index]["exercise_id"], indices[index]["submission_id"] return None, None + +def retrieve_feedbacks(index): + index = str(index) + indices = load_indices() + + if index in indices: + return indices[index]["feedbacks"] + + return None \ No newline at end of file