diff --git a/client_apps/canvas_quizzes/apps/statistics/test/unit/views/questions/fill_in_multiple_blanks_test.js b/client_apps/canvas_quizzes/apps/statistics/test/unit/views/questions/fill_in_multiple_blanks_test.js
index deef01b8f2c..83617d8b4e8 100644
--- a/client_apps/canvas_quizzes/apps/statistics/test/unit/views/questions/fill_in_multiple_blanks_test.js
+++ b/client_apps/canvas_quizzes/apps/statistics/test/unit/views/questions/fill_in_multiple_blanks_test.js
@@ -59,7 +59,6 @@ define(function(require) {
answerSets: answerSetFixture,
});
- debugger;
expect(find('.answer-set-tabs .active').innerText).toMatch('color');
var answerTextMatches = findAll("th.answer-textfield");
expect(answerTextMatches[0].innerText).toEqual('red');
diff --git a/gems/canvas_quiz_statistics/lib/canvas_quiz_statistics/analyzers/essay.rb b/gems/canvas_quiz_statistics/lib/canvas_quiz_statistics/analyzers/essay.rb
index ee59af4d9bd..1ff55041dca 100644
--- a/gems/canvas_quiz_statistics/lib/canvas_quiz_statistics/analyzers/essay.rb
+++ b/gems/canvas_quiz_statistics/lib/canvas_quiz_statistics/analyzers/essay.rb
@@ -74,5 +74,87 @@ module CanvasQuizStatistics::Analyzers
{ score: score, count: point_distribution[score] }
end.sort_by { |v| v[:score] || -1 }
end
+
+
+ # Statistics for answers which scored specific values
+ #
+ # @return [Hash]
+ #
+ # Output synopsis:
+ #
+ # ```json
+ # {
+ # "answers": [
+ # {
+ # // Number of students who picked this answer.
+ # "responses": 3,
+ #
+ # // The names of the students who scored this value.
+ # "user_names": ["John", "Jim", "Jenny"],
+ #
+ # // The score shared by these students
+ # "score": 0.5,
+ #
+ # // The id (or type) of the answer bucket
+ # // The top and bottom buckets represent the respective extreme 27%
+ # // ends of the student performance.
+ # // The middle represents the middle 46% in performance across the item.
+ # "id": "bottom", # one of %w|bottom top middle ungraded|
+ #
+ # // If the score represents full credit on the item
+ # "full_credit": true,
+ # }
+ # ]
+ # }
+ # ```
+ metric :answers do |responses|
+ answers = Hash.new do |h,k|
+ h[k] = {
+ user_names: [],
+ responses: 0
+ }
+ end
+
+ buckets = [
+ [:top, 0.73],
+ [:middle, 0.27],
+ [:bottom, 0]
+ ]
+
+ graded_responses = []
+ ungraded_responses = []
+ responses.each {|r| r[:correct] == 'defined' ? graded_responses << r : ungraded_responses << r}
+ ranked_responses_by_score = graded_responses.sort_by {|h| h[:points]}
+
+ previous_floor = ranked_responses_by_score.length
+ buckets.each do |name, cutoff|
+ floor = (cutoff * ranked_responses_by_score.length).round
+ floor_score = ranked_responses_by_score[floor].try{|h| h[:points]}
+
+ # include all tied users in this bucket
+ floor -= 1 while (floor > 0) && (ranked_responses_by_score[floor - 1][:points] == floor_score)
+
+ # Set bucket for selected buckets
+ ranked_responses_by_score[floor...previous_floor].map {|r| r[:performance_bucket] = name.to_s}
+ previous_floor = floor
+ end
+
+ ungraded_responses.each {|r| r[:performance_bucket] = "ungraded"}
+
+ sorted_graded_responses = graded_responses.sort_by {|h| h[:performance_bucket]}.reverse
+
+ (sorted_graded_responses + ungraded_responses).each do |response|
+
+ hash = answers[response[:performance_bucket]]
+ hash[:id] ||= response[:performance_bucket]
+ hash[:score] ||= response[:points]
+ # This will indicate correct if any point value reaches 100%
+ hash[:full_credit] ||= response[:points].to_f >= @question_data[:points_possible].to_f
+
+ hash[:user_names] << response[:user_name]
+ hash[:responses] += 1
+ end
+ answers.values
+ end
end
end
diff --git a/gems/canvas_quiz_statistics/spec/canvas_quiz_statistics/analyzers/essay_spec.rb b/gems/canvas_quiz_statistics/spec/canvas_quiz_statistics/analyzers/essay_spec.rb
index e1136316a50..3ec3b3e7ae1 100644
--- a/gems/canvas_quiz_statistics/spec/canvas_quiz_statistics/analyzers/essay_spec.rb
+++ b/gems/canvas_quiz_statistics/spec/canvas_quiz_statistics/analyzers/essay_spec.rb
@@ -68,6 +68,50 @@ describe CanvasQuizStatistics::Analyzers::Essay do
end
end
+ describe ':answers' do
+ let :question_data do
+ { points_possible: 10 }
+ end
+
+ it 'should group items into answer type buckets with appropriate data' do
+ output = subject.run([
+ { points: 0, correct: 'undefined', user_name: 'Joe0'},
+ { points: 0, correct: 'undefined', user_name: 'Joe0'},
+ { points: 0, correct: 'undefined', user_name: 'Joe0'},
+ { points: 1, correct: 'defined', user_name: 'Joe1'},
+ { points: 2, correct: 'defined', user_name: 'Joe2'},
+ { points: 3, correct: 'defined', user_name: 'Joe3'},
+ { points: 4, correct: 'defined', user_name: 'Joe4'},
+ { points: 6, correct: 'defined', user_name: 'Joe6'},
+ { points: 7, correct: 'defined', user_name: 'Joe7'},
+ { points: 8, correct: 'defined', user_name: 'Joe8'},
+ { points: 9, correct: 'defined', user_name: 'Joe9'},
+ { points: 10, correct: 'defined', user_name: 'Joe10'},
+ ])
+ answers = output[:answers]
+
+ bottom = answers[2]
+ expect(bottom[:responses]).to eq 2
+ expect(bottom[:user_names]).to include('Joe1')
+ expect(bottom[:full_credit]).to be_false
+
+ middle = answers[1]
+ expect(middle[:responses]).to eq 5
+ expect(middle[:user_names]).to include('Joe6')
+ expect(middle[:full_credit]).to be_false
+
+ top = answers[0]
+ expect(top[:responses]).to eq 2
+ expect(top[:user_names]).to include('Joe10')
+ expect(top[:full_credit]).to be_true
+
+ undefined = answers[3]
+ expect(undefined[:responses]).to eq 3
+ expect(undefined[:user_names].uniq).to eq ['Joe0']
+ expect(undefined[:full_credit]).to be_false
+ end
+ end
+
describe ':point_distribution' do
it 'should map each score to the number of receivers' do
output = subject.run([