Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 0 additions & 9 deletions openedx/envs/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1347,15 +1347,6 @@ def add_optional_apps(optional_apps, installed_apps):
# .. toggle_tickets: 'https://github.com/open-craft/edx-platform/pull/429'
DISABLE_UNENROLLMENT = False

# .. toggle_name: ENABLE_GRADING_METHOD_IN_PROBLEMS
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Enables the grading method feature in capa problems.
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2024-03-22
# .. toggle_tickets: https://github.com/openedx/edx-platform/pull/33911
ENABLE_GRADING_METHOD_IN_PROBLEMS = False

# .. toggle_name: BADGES_ENABLED
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
Expand Down
18 changes: 2 additions & 16 deletions xmodule/capa/capa_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,15 +244,6 @@ def __init__( # pylint: disable=too-many-positional-arguments,too-many-argument
if extract_tree:
self.extracted_tree = self._extract_html(self.tree)

@property
def is_grading_method_enabled(self) -> bool:
"""
Returns whether the grading method feature is enabled. If the
feature is not enabled, the grading method field will not be shown in
Studio settings and the default grading method will be used.
"""
return settings.FEATURES.get("ENABLE_GRADING_METHOD_IN_PROBLEMS", False)

def make_xml_compatible(self, tree):
"""
Adjust tree xml in-place for compatibility before creating
Expand Down Expand Up @@ -503,7 +494,7 @@ def get_grade_from_current_answers(self, student_answers, correct_map: Optional[
and student_answers_history). The correct map will always be updated, depending on
the student answers. The student answers will always remain the same over time.
"""
oldcmap = correct_map if self.is_grading_method_enabled else self.correct_map
oldcmap = correct_map if correct_map is not None else self.correct_map
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems like a logic change from what was there previously. I would have expected this line to be updated to just be, oldcmap = correct_map, why still have this conditional?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We had to fall back to self.correct_map because correct_map can be None on the first grading pass, and hint logic assumes oldcmap is always a valid/non-null CorrectMap.

    def get_hints(self, student_answers, new_cmap, old_cmap):  # pylint: disable=too-many-locals
        """
        Generate adaptive hints for this problem based on student answers, the old CorrectMap,
        and the new CorrectMap produced by get_score.
    
        Does not return anything.
    
        Modifies new_cmap, by adding hints to answer_id entries as appropriate.
        """
    
        hintfn = None
        hint_function_provided = False
        hintgroup = self.xml.find("hintgroup")
        if hintgroup is not None:
            hintfn = hintgroup.get("hintfn")
            if hintfn is not None:
                hint_function_provided = True
    
        if hint_function_provided:
            # if a hint function has been supplied, it will take precedence
            # Hint is determined by a function defined in the <script> context; evaluate
            # that function to obtain list of hint, hintmode for each answer_id.
    
            # The function should take arguments (answer_ids, student_answers, new_cmap, old_cmap)
            # and it should modify new_cmap as appropriate.
    
            # We may extend this in the future to add another argument which provides a
            # callback procedure to a social hint generation system.
    
            global CORRECTMAP_PY  # pylint: disable=global-statement
            if CORRECTMAP_PY is None:
                # We need the CorrectMap code for hint functions. No, this is not great.
                CORRECTMAP_PY = inspect.getsource(correctmap)
    
            code = (
                CORRECTMAP_PY
                + "\n"
                + self.context["script_code"]
                + "\n"
                + textwrap.dedent(
                    """
                    new_cmap = CorrectMap()
                    new_cmap.set_dict(new_cmap_dict)
                    old_cmap = CorrectMap()
                    old_cmap.set_dict(old_cmap_dict)
                    {hintfn}(answer_ids, student_answers, new_cmap, old_cmap)
                    new_cmap_dict.update(new_cmap.get_dict())
                    old_cmap_dict.update(old_cmap.get_dict())
                    """
                ).format(hintfn=hintfn)
            )
            globals_dict = {
                "answer_ids": self.answer_ids,
                "student_answers": student_answers,
                "new_cmap_dict": new_cmap.get_dict(),
>               "old_cmap_dict": old_cmap.get_dict(),
            }
E           AttributeError: 'NoneType' object has no attribute 'get_dict'

xmodule/capa/responsetypes.py:475: AttributeError


# start new with empty CorrectMap
newcmap = CorrectMap()
Expand All @@ -524,12 +515,7 @@ def get_grade_from_current_answers(self, student_answers, correct_map: Optional[
# submission that would not exist in the persisted "student_answers".
# If grading method is enabled, we need to pass each student answers and the
# correct map in the history fields.
if (
"filesubmission" in responder.allowed_inputfields and student_answers is not None
) or self.is_grading_method_enabled:
results = responder.evaluate_answers(student_answers, oldcmap)
else:
results = responder.evaluate_answers(self.student_answers, oldcmap)
results = responder.evaluate_answers(student_answers, oldcmap)
newcmap.update(results)

return newcmap
Expand Down
8 changes: 0 additions & 8 deletions xmodule/capa/tests/test_capa_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@

import ddt
import pytest
from django.conf import settings
from django.test import override_settings
from lxml import etree
from markupsafe import Markup

Expand All @@ -19,9 +17,6 @@
from xmodule.capa.tests.helpers import new_loncapa_problem
from xmodule.capa.tests.test_util import UseUnsafeCodejail

FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS = settings.FEATURES.copy()
FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS["ENABLE_GRADING_METHOD_IN_PROBLEMS"] = True


@ddt.ddt
@UseUnsafeCodejail()
Expand Down Expand Up @@ -752,7 +747,6 @@ def test_get_question_answer(self):
# function can eventualy be serialized to json without issues.
assert isinstance(problem.get_question_answers()["1_solution_1"], str)

@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
def test_get_grade_from_current_answers(self):
"""
Verify that `responder.evaluate_answers` is called with `student_answers`
Expand Down Expand Up @@ -786,7 +780,6 @@ def test_get_grade_from_current_answers(self):
self.assertDictEqual(result.get_dict(), correct_map.get_dict())
responder_mock.evaluate_answers.assert_called_once_with(student_answers, correct_map)

@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
def test_get_grade_from_current_answers_without_student_answers(self):
"""
Verify that `responder.evaluate_answers` is called with appropriate arguments.
Expand Down Expand Up @@ -820,7 +813,6 @@ def test_get_grade_from_current_answers_without_student_answers(self):
self.assertDictEqual(result.get_dict(), correct_map.get_dict())
responder_mock.evaluate_answers.assert_called_once_with(None, correct_map)

@override_settings(FEATURES=FEATURES_WITH_GRADING_METHOD_IN_PROBLEMS)
def test_get_grade_from_current_answers_with_filesubmission(self):
"""
Verify that an exception is raised when `responder.evaluate_answers` is called
Expand Down
41 changes: 5 additions & 36 deletions xmodule/capa_block.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,8 +500,7 @@ def display_name_with_default(self):

def grading_method_display_name(self) -> str | None:
"""
If the `ENABLE_GRADING_METHOD_IN_PROBLEMS` feature flag is enabled,
return the grading method, else return None.
Return the grading method
"""
_ = self.runtime.service(self, "i18n").gettext
display_name = {
Expand All @@ -510,18 +509,7 @@ def grading_method_display_name(self) -> str | None:
GRADING_METHOD.HIGHEST_SCORE: _("Highest Score"),
GRADING_METHOD.AVERAGE_SCORE: _("Average Score"),
}
if self.is_grading_method_enabled:
return display_name[self.grading_method]
return None

@property
def is_grading_method_enabled(self) -> bool:
"""
Returns whether the grading method feature is enabled. If the
feature is not enabled, the grading method field will not be shown in
Studio settings and the default grading method will be used.
"""
return settings.FEATURES.get("ENABLE_GRADING_METHOD_IN_PROBLEMS", False)
return display_name[self.grading_method]

@property
def debug(self):
Expand Down Expand Up @@ -571,8 +559,6 @@ def non_editable_metadata_fields(self):
ProblemBlock.matlab_api_key,
]
)
if not self.is_grading_method_enabled:
non_editable_fields.append(ProblemBlock.grading_method)
return non_editable_fields

@property
Expand Down Expand Up @@ -1838,8 +1824,7 @@ def submit_problem( # pylint: disable=too-many-statements,too-many-branches,too

current_score = self.score_from_lcp(self.lcp)
self.score_history.append(current_score)
if self.is_grading_method_enabled:
current_score = self.get_score_with_grading_method(current_score)
current_score = self.get_score_with_grading_method(current_score)
self.set_score(current_score)
self.set_last_submission_time()

Expand Down Expand Up @@ -2313,18 +2298,6 @@ def get_score(self):
"""
return self.score

def update_correctness(self):
"""
Updates correct map of the LCP.
Operates by creating a new correctness map based on the current
state of the LCP, and updating the old correctness map of the LCP.
"""
# Make sure that the attempt number is always at least 1 for grading purposes,
# even if the number of attempts have been reset and this problem is regraded.
self.lcp.context["attempt"] = max(self.attempts, 1)
new_correct_map = self.lcp.get_grade_from_current_answers(None)
self.lcp.correct_map.update(new_correct_map)

def update_correctness_list(self):
"""
Updates the `correct_map_history` and the `correct_map` of the LCP.
Expand All @@ -2347,13 +2320,9 @@ def calculate_score(self):
"""
Returns the score calculated from the current problem state.

If the grading method is enabled, the score is calculated based on the grading method.
The score is calculated based on the grading method.
"""
if self.is_grading_method_enabled:
return self.get_rescore_with_grading_method()
self.update_correctness()
new_score = self.lcp.calculate_score()
return Score(raw_earned=new_score["score"], raw_possible=new_score["total"])
return self.get_rescore_with_grading_method()

def calculate_score_list(self):
"""
Expand Down
Loading
Loading