Ember Quiz Stats [+Backend] - Numerical

Adds numerical question support to CanvasQuizStatistics and the renderer
in the Ember quiz statistics (subclass of ShortAnswer question).

Closes CNVS-13087, CNVS-13086

TEST PLAN
---- ----

  - create a quiz with a numerical question:
    - mix and match between the answer types: exact, exact with margin,
      and range answers
  - take the quiz by a number of students, we need to score like this to
    cover the possible cases:
    - a correct answer
    - a missing answer
    - an incorrect/out-of-range answer
  - hitting the stats api should return the documented metrics in the
    output of "question_statistics":
    /api/v1/courses/:course_id/quizzes/:quiz_id/statistics [GET]
  - visiting ember quizzes should now render the numerical stats which
    is similar to short answer (fill in the blank) and multiple answers:
    - make sure the "Other" and "No Answer" answers and bars show up
  - verify the API docs are updated

Change-Id: I0c9ec0dbc7c729410241177b6ed43b0cd4dad143
Reviewed-on: https://gerrit.instructure.com/35431
Tested-by: Jenkins <jenkins@instructure.com>
QA-Review: Caleb Guanzon <cguanzon@instructure.com>
Reviewed-by: Derek DeVries <ddevries@instructure.com>
Product-Review: Derek DeVries <ddevries@instructure.com>
This commit is contained in:
Ahmad Amireh 2014-05-27 10:20:56 +03:00 committed by Derek DeVries
parent e25a0b71d0
commit 741a6e0036
25 changed files with 692 additions and 202 deletions

View File

@ -56,7 +56,7 @@ define [
switch @get('questionType')
when 'multiple_choice_question', 'true_false_question'
'multiple_choice'
when 'short_answer_question', 'multiple_answers_question'
when 'short_answer_question', 'multiple_answers_question', 'numerical_question'
'short_answer'
when 'fill_in_multiple_blanks_question', 'multiple_dropdowns_question', 'matching_question'
'fill_in_multiple_blanks'
@ -75,4 +75,4 @@ define [
pointBiserials.findBy('correct', true).point_biserial
else
return 0
).property('pointBiserials')
).property('pointBiserials')

View File

@ -20,8 +20,10 @@ define [
yAxisLabel: ''
xAxisLabels: false
linearScale: true
w: 220
h: 120
width: (->
@$().width()
).property()
height: 120
tooltipOptions:
position:
@ -36,8 +38,8 @@ define [
highest = Math.max.apply(Math, data.mapBy('y'))
margin = { top: 0, right: 0, bottom: 0, left: 0 }
width = @get('w') - margin.left - margin.right
height = @get('h') - margin.top - margin.bottom
width = @get('width') - margin.left - margin.right
height = @get('height') - margin.top - margin.bottom
barWidth = @get('barWidth')
barMargin = @get('barMargin')
xOffset = @get('xOffset')

View File

@ -1,2 +1,4 @@
define [ '../questions_view' ], (Base) ->
Base
Base.extend({
classNames: [ 'question-statistics', 'stretched-answer-distribution' ]
})

View File

@ -171,6 +171,10 @@ $muteTextColor: #949494;
}
}
&.stretched-answer-distribution .answer-distribution-section {
width: 580px;
}
.discrimination-index-section {
width: 270px;
position: relative;

View File

@ -257,3 +257,73 @@ File Upload question statistics look just like the statistics for [Essays](#essa
#### Formula
Formula question statistics look just like the statistics for [Essays](#essay-question-stats).
#### Numerical
```javascript
{
// Number of students who have provided any kind of answer.
"responses": 2,
// Number of students who have provided a correct answer.
"correct": 1,
// Number of students who have provided a correct answer and received full
// credit or higher.
"full_credit": 2,
// Number of students who have provided an answer which was not correct.
"incorrect": 1,
"answers": [
{
// Unique ID of this answer.
"id": "9711",
// This metric contains a formatted version of the correct answer
// ready for display.
"text": "15.00",
// Number of students who provided this answer.
"responses": 3,
// Whether this answer is a correct one.
"correct": true,
// Lower and upper boundaries of the answer range. This is consistent
// regardless of the answer type (e.g., exact vs range).
//
// In the case of exact answers, the range will be the exact value
// minus plus the defined margin.
"value": [ 13.5, 16.5 ],
// Margin of error tolerance. This is always zero for range answers.
"margin": 1.5
},
// "Other" answers:
//
// This is an auto-generated answer that will be present if any student
// provides a number for an answer that is incorrect (doesn't map to
// any of the pre-defined answers.)
{
"id": "other",
"text": "Other",
"responses": 0,
"correct": false
},
// "Missing" answers:
//
// This is an auto-generated answer to account for all students who
// left this question unanswered.
{
"id": "none",
"text": "No Answer",
"responses": 0,
"correct": false
}
]
}
```

View File

@ -42,10 +42,12 @@ module CanvasQuizStatistics::Analyzers
end
require 'canvas_quiz_statistics/analyzers/base'
require 'canvas_quiz_statistics/analyzers/concerns/has_answers'
require 'canvas_quiz_statistics/analyzers/essay'
require 'canvas_quiz_statistics/analyzers/fill_in_multiple_blanks'
require 'canvas_quiz_statistics/analyzers/multiple_dropdowns'
require 'canvas_quiz_statistics/analyzers/file_upload'
require 'canvas_quiz_statistics/analyzers/calculated'
require 'canvas_quiz_statistics/analyzers/matching'
require 'canvas_quiz_statistics/analyzers/numerical'
end

View File

@ -58,5 +58,12 @@ module CanvasQuizStatistics::Analyzers
def build_context(responses)
{}
end
# Test whether the response contains an answer to the question.
#
# Default behavior is to text whether the "text" field is populated.
def answer_present?(response)
response[:text].present?
end
end
end

View File

@ -16,7 +16,7 @@
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
module CanvasQuizStatistics::Analyzers
class Calculated < Essay
class Calculated < Base
inherit_metrics :essay_question
metric :graded do |responses|

View File

@ -0,0 +1,125 @@
module CanvasQuizStatistics::Analyzers::Concerns
# Helpers for calculating numbers of responses each answer has received.
#
# Only works for question types that have pre-defined answers, like
# MultipleChoice, FIMB, etc.
module HasAnswers
Constants = CanvasQuizStatistics::Analyzers::Base::Constants
protected
# Override if you need more sophisticated answer metrics. Alternatively, you
# can pass a block that will be run on each built answer so you can customize.
#
# Stock routine provides the following:
#
# {
# "id": "1234", // ID is always stringifed
# "text": "Answer text.",
# "correct": true // based on weight
# }
def parse_answers(source=@question_data[:answers], &formatter)
return [] if source.blank?
source.map do |answer|
stats = build_answer(answer[:id], answer[:text], answer[:weight] == 100)
yield answer, stats if block_given?
stats
end
end
# Loop over the responses and calculate how many responses each answer
# has received.
#
# @param [Array<Hash>] answers
# The answer set which contains the answers the responses may map to.
# See #parse_answers for generating such a set.
#
# @param [Any...] args
# Extra parameters you may need to pass on to your resolvers in
# #locate_answer and #answer_present_but_unknown? to do the work.
#
# @warn
# Has side-effects on @answers:
#
# - the :responses key of each answer entry may be incremented
# - the set itself may be mutated by adding new answers to it (the
# aggregate "Other" and "Missing" answers)
#
# @return [NilClass]
def calculate_responses(responses, answers, *args)
responses.each do |response|
answer = locate_answer(response, answers, *args)
answer ||= begin
if answer_present_but_unknown?(response, *args)
generate_unknown_answer(answers)
else
generate_missing_answer(answers)
end
end
answer[:responses] += 1
end
end
# Search for a pre-defined answer in the given answer set based on the
# student's response.
#
# Example implementation that attempts to locate the answer by id:
#
# answers.detect { |a| a[:id] == "#{response[:answer_id]}" }
#
# @return [Hash|NilClass]
def locate_answer(response, answers, *args)
raise NotImplementedError
end
# If this question type supports "free-form" input where students can
# provide a response that does not map to any answer the teacher pre-defines,
# then these responses can be aggregated into an "unknown" answer instead of
# considering them missing.
#
# @note
# This will only be considered if the question was unable to locate a
# pre-defined answer in #locate_answer.
#
# @return [Boolean]
def answer_present_but_unknown?(response, *args)
answer_present?(response)
end
private
def build_answer(id, text, correct=false)
{
id: "#{id}",
text: text.to_s,
correct: correct,
responses: 0
}
end
def generate_unknown_answer(set)
__generate_incorrect_answer(Constants::UnknownAnswerKey,
Constants::UnknownAnswerText,
set)
end
def generate_missing_answer(set)
__generate_incorrect_answer(Constants::MissingAnswerKey,
Constants::MissingAnswerText,
set)
end
def __generate_incorrect_answer(id, text, answer_set)
answer = answer_set.detect { |a| a[:id] == id }
if answer.nil?
answer = build_answer(id, text)
answer_set << answer
end
answer
end
end
end

View File

@ -74,14 +74,5 @@ module CanvasQuizStatistics::Analyzers
{ score: score, count: point_distribution[score] }
end.sort_by { |v| v[:score] || -1 }
end
private
# Test whether the response contains an answer to the question.
#
# Default behavior is to text whether the "text" field is populated.
def answer_present?(response)
response[:text].present?
end
end
end

View File

@ -36,20 +36,24 @@ module CanvasQuizStatistics::Analyzers
# }
# ```
class FillInMultipleBlanks < Base
include Base::Constants
include Concerns::HasAnswers
# Number of students who have filled at least one blank.
#
# @return [Integer]
metric :responses do |responses|
responses.select { |r| answer_present?(r) }.length
metric :responses => [ :blanks ] do |responses, blanks|
responses.select do |response|
blanks.any? do |blank|
answer_present_for_blank?(response, blank)
end
end.length
end
# Number of students who have filled every blank.
#
# @return [Integer]
metric :answered do |responses|
responses.select { |r| answer_present?(r, true) }.length
responses.select { |r| answer_present?(r) }.length
end
# Number of students who filled all blanks correctly.
@ -70,7 +74,7 @@ module CanvasQuizStatistics::Analyzers
#
# @return [Integer]
metric :incorrect => [ :grades ] do |responses, grades|
grades.select { |r| FalseLike.include?(r) }.length
grades.select { |r| Base::Constants::FalseLike.include?(r) }.length
end
# Statistics for the answer sets (blanks).
@ -122,13 +126,33 @@ module CanvasQuizStatistics::Analyzers
# ]
# }
metric :answer_sets => [ :blanks ] do |responses, blanks|
build_answer_sets(blanks).tap do |sets|
calculate_answer_responses(responses, blanks, sets)
answer_sets = blanks.map do |blank|
answers_for_blank = @question_data[:answers].select do |answer|
answer[:blank_id] == blank
end
{
id: CanvasQuizStatistics::Util.digest(blank),
text: blank,
answers: parse_answers(answers_for_blank)
}
end
blanks.each do |blank|
answer_sets.detect { |set| set[:text] == blank }.tap do |answer_set|
calculate_responses(responses, answer_set[:answers], blank)
end
end
answer_sets
end
private
def question_blanks
@question_data[:answers].map { |a| a[:blank_id] }.uniq
end
def build_context(responses)
{}.tap do |ctx|
ctx[:grades] = responses.map { |r| r.fetch(:correct, nil) }.map(&:to_s)
@ -136,105 +160,32 @@ module CanvasQuizStatistics::Analyzers
end
end
def answer_present?(response, answered_all_blanks=false)
!question_blanks.send(answered_all_blanks ? 'any?' : 'all?') do |blank|
answer_id = response[answer_key(blank, true)]
answer_text = response[answer_key(blank, false)]
answer_id.blank? && answer_text.blank?
end
def answer_present?(response)
question_blanks.all? { |blank| answer_present_for_blank?(response, blank) }
end
def question_blanks
@question_data[:answers].map { |a| a[:blank_id] }.uniq
def answer_present_for_blank?(response, blank)
response[key_for_answer_id(blank)].present? ||
response[key_for_answer_text(blank)].present?
end
def build_answer_sets(blanks)
blanks.map do |blank|
answers_for_blank = @question_data[:answers].select do |answer|
answer[:blank_id] == blank
end
answers = answers_for_blank.map do |answer|
build_answer(answer[:id], answer[:text], answer[:weight] == 100)
end
{
id: CanvasQuizStatistics::Util.digest(blank),
text: blank,
answers: answers
}
end
def locate_answer(response, answers, blank)
answer_id = response[key_for_answer_id(blank)].to_s
answers.detect { |answer| answer[:id] == answer_id }
end
def build_answer(id, text, correct=false)
{
id: "#{id}",
text: text.to_s,
correct: correct,
responses: 0
}
def answer_present_but_unknown?(response, blank)
response[key_for_answer_text(blank)].present?
end
def calculate_answer_responses(responses, blanks, answer_sets)
blanks.each do |blank|
answer_set = answer_sets.detect { |set| set[:text] == blank }
responses.each do |response|
analyze_response_for_blank(response, blank, answer_set)
end
end
# The key to use to lookup the _resolved_ answer ID.
def key_for_answer_id(blank)
:"answer_id_for_#{blank}"
end
def analyze_response_for_blank(response, blank, answer_set)
answer_id = response.fetch(answer_key(blank), nil).to_s
answer_text = response.fetch(answer_key(blank, false), nil)
answer = if answer_id.present?
answer_set[:answers].detect { |a| "#{a[:id]}" == answer_id }
elsif answer_text.present?
generate_incorrect_answer({
id: UnknownAnswerKey,
text: UnknownAnswerText,
in: answer_set
})
else
generate_incorrect_answer({
id: MissingAnswerKey,
text: MissingAnswerText,
in: answer_set
})
end
answer[:responses] += 1
end
# The key to use to lookup the response in the input submission_data fragment
#
# The key will be "answer_id_for_blank" or "answer_for_blank" based on
# @resolved, use the former if you're looking for a resolved answer (a
# correct one that maps to an ID), and the second to query the text they
# wrote.
def answer_key(blank, resolved=true)
[
'answer',
resolved ? 'id' : nil,
'for',
blank
].compact.join('_').to_sym
end
def generate_incorrect_answer(props)
id, text, answer_set = *[ props[:id], props[:text], props[:in] ]
answer = answer_set[:answers].detect { |a| a[:id] == id }
unless answer.present?
answer = build_answer(id, text)
answer_set[:answers] << answer
end
answer
# The key to use to lookup the text the student wrote.
def key_for_answer_text(blank)
:"answer_for_#{blank}"
end
end
end

View File

@ -35,7 +35,7 @@ module CanvasQuizStatistics::Analyzers
# }
# ```
class Matching < Base
include Base::Constants
include Concerns::HasAnswers
inherit :correct, :partially_correct, :incorrect, {
from: :fill_in_multiple_blanks
@ -110,27 +110,16 @@ module CanvasQuizStatistics::Analyzers
# ]
# }
metric :answer_sets => [ :answers, :matches ] do |responses, answers, matches|
answer_sets = answers.map do |answer|
{
id: answer[:id].to_s,
text: answer[:text].to_s,
responses: 0,
answers: matches.map do |match|
build_answer(match[:match_id],
match[:text],
answer[:match_id].to_s == match[:match_id].to_s)
end
}
answer_sets = parse_answers do |answer, stats|
stats[:answers] = matches.map do |match|
build_answer(match[:match_id],
match[:text],
answer[:match_id].to_s == match[:match_id].to_s)
end
end
answer_sets.each do |set|
responses.each do |response|
match_id = response[answer_key(set[:id])].to_s
match = set[:answers].detect { |a| a[:id] == match_id }
match ||= generate_missing_answer(set)
match[:responses] += 1
end
calculate_responses(responses, set[:answers], set[:id])
end
end
@ -161,24 +150,13 @@ module CanvasQuizStatistics::Analyzers
answer_ids.include?(id.to_s) && match_ids.include?(match_id)
end
def build_answer(id, text, correct=false)
{
id: "#{id}",
text: text.to_s,
correct: correct,
responses: 0
}
def locate_answer(response, answers, set_id)
match_id = response[answer_key(set_id)].to_s
answers.detect { |a| a[:id] == match_id }
end
def generate_missing_answer(answer_set)
answer = answer_set[:answers].detect { |a| a[:id] == MissingAnswerKey }
unless answer.present?
answer = build_answer(MissingAnswerKey, MissingAnswerText)
answer_set[:answers] << answer
end
answer
def answer_present_but_unknown?(*args)
false
end
end
end

View File

@ -0,0 +1,161 @@
#
# Copyright (C) 2014 Instructure, Inc.
#
# This file is part of Canvas.
#
# Canvas is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, version 3 of the License.
#
# Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
module CanvasQuizStatistics::Analyzers
# Generates statistics for a set of student responses to a numerical question.
#
# Response is expected to look something like this:
#
# ```javascript
# {
# "correct": true,
# "points": 1,
# "question_id": 10,
# "answer_id": 8224,
# "text": "-7.0000"
# }
# ```
#
class Numerical < Base
include Concerns::HasAnswers
inherit :responses, :full_credit, from: :essay
inherit :correct, :incorrect, from: :fill_in_multiple_blanks
# Statistics for the pre-defined answers.
#
# @return [Array<Hash>]
#
# Each entry could represent an "exact" answer, or a "range" answer.
# Exact answers can have margins.
#
# Output synopsis:
#
# ```json
# {
# "answers": [
# {
# // Unique ID of this answer.
# "id": "9711",
#
# // This metric contains a formatted version of the correct answer
# // ready for display.
# "text": "15.00",
#
# // Number of students who provided this answer.
# "responses": 3,
#
# // Whether this answer is a correct one.
# "correct": true,
#
# // Lower and upper boundaries of the answer range. This is consistent
# // regardless of the answer type (e.g., exact vs range).
# //
# // In the case of exact answers, the range will be the exact value
# // minus plus the defined margin.
# "value": [ 13.5, 16.5 ],
#
# // Margin of error tolerance. This is always zero for range answers.
# "margin": 1.5
# },
#
# // "Other" answers:
# //
# // This is an auto-generated answer that will be present if any student
# // provides a number for an answer that is incorrect (doesn't map to
# // any of the pre-defined answers.)
# {
# "id": "other",
# "text": "Other",
# "responses": 0,
# "correct": false
# },
#
# // "Missing" answers:
# //
# // This is an auto-generated answer to account for all students who
# // left this question unanswered.
# {
# "id": "none",
# "text": "No Answer",
# "responses": 0,
# "correct": false
# }
# ]
# }
metric :answers do |responses|
answers = parse_answers do |answer, answer_stats|
is_range = range_answer?(answer)
bounds = generate_answer_boundaries(answer, is_range)
text = generate_text_for_answer(answer, is_range)
answer_stats.merge!({
text: text,
value: bounds,
responses: 0,
margin: answer[:margin].to_f,
is_range: is_range
})
end
answers.tap { calculate_responses(responses, answers) }
end
private
def build_context(responses)
{
grades: responses.map { |r| r.fetch(:correct, nil) }.map(&:to_s)
}
end
def range_answer?(answer)
answer[:numerical_answer_type] == 'range_answer'
end
# Exact answers will look like this: "15.00"
# Range answers will look like this: "[3.00..54.12]"
def generate_text_for_answer(answer, is_range)
format = ->(value) { sprintf('%.2f', value) }
if is_range
range = [ answer[:start], answer[:end] ].map(&format).join('..')
"[#{range}]"
else
value = format.call(answer[:exact])
"#{value}"
end
end
# Generates an array that represents the correct answer range.
#
# The range will be simulated for exact answers using the margin (if any).
def generate_answer_boundaries(answer, is_range)
if is_range
# there's no margin in range answers
[ answer[:start].to_f, answer[:end].to_f ]
else
margin = answer[:margin].to_f
[ answer[:exact] - margin, answer[:exact] + margin ]
end
end
def locate_answer(response, answers)
answers.detect { |answer| answer[:id] == "#{response[:answer_id]}" }
end
end
end

View File

@ -10,6 +10,8 @@ describe CanvasQuizStatistics::Analyzers::Essay do
}.to_not raise_error
end
it_behaves_like 'essay [:responses]'
describe 'output [#run]' do
describe '[:responses]' do
it 'should count students who have written anything' do

View File

@ -1,7 +1,7 @@
require 'spec_helper'
describe CanvasQuizStatistics::Analyzers::FillInMultipleBlanks do
FIMB = CanvasQuizStatistics::Analyzers::FillInMultipleBlanks
Constants = CanvasQuizStatistics::Analyzers::Base::Constants
let(:question_data) { QuestionHelpers.fixture('fill_in_multiple_blanks_question') }
subject { described_class.new(question_data) }
@ -49,7 +49,7 @@ describe CanvasQuizStatistics::Analyzers::FillInMultipleBlanks do
])
answer_set = stats[:answer_sets].detect { |as| as[:text] == 'color1' }
answer = answer_set[:answers].detect { |a| a[:id] == FIMB::UnknownAnswerKey }
answer = answer_set[:answers].detect { |a| a[:id] == Constants::UnknownAnswerKey }
answer.should be_present
answer[:responses].should == 1
end
@ -63,7 +63,7 @@ describe CanvasQuizStatistics::Analyzers::FillInMultipleBlanks do
])
answer_set = stats[:answer_sets].detect { |as| as[:text] == 'color1' }
answer = answer_set[:answers].detect { |a| a[:id] == FIMB::MissingAnswerKey }
answer = answer_set[:answers].detect { |a| a[:id] == Constants::MissingAnswerKey }
answer.should be_present
answer[:responses].should == 1
end
@ -75,18 +75,19 @@ describe CanvasQuizStatistics::Analyzers::FillInMultipleBlanks do
answer_id_for_color1: "9711"
}
])
puts stats.to_json
stats[:answer_sets].detect { |as| as[:text] == 'color1' }.tap do |answer_set|
unknown_answer = answer_set[:answers].detect { |a| a[:id] == FIMB::UnknownAnswerKey }
missing_answer = answer_set[:answers].detect { |a| a[:id] == FIMB::MissingAnswerKey }
unknown_answer = answer_set[:answers].detect { |a| a[:id] == Constants::UnknownAnswerKey }
missing_answer = answer_set[:answers].detect { |a| a[:id] == Constants::MissingAnswerKey }
unknown_answer.should_not be_present
missing_answer.should_not be_present
end
stats[:answer_sets].detect { |as| as[:text] == 'color2' }.tap do |answer_set|
unknown_answer = answer_set[:answers].detect { |a| a[:id] == FIMB::UnknownAnswerKey }
missing_answer = answer_set[:answers].detect { |a| a[:id] == FIMB::MissingAnswerKey }
unknown_answer = answer_set[:answers].detect { |a| a[:id] == Constants::UnknownAnswerKey }
missing_answer = answer_set[:answers].detect { |a| a[:id] == Constants::MissingAnswerKey }
unknown_answer.should_not be_present
missing_answer.should be_present
@ -96,38 +97,9 @@ describe CanvasQuizStatistics::Analyzers::FillInMultipleBlanks do
end
end
describe '[:correct]' do
it 'should count all fully correct responses' do
stats = subject.run([
{ correct: "true" },
{ correct: true }
])
stats[:correct].should == 2
end
end
describe '[:partial]' do
it 'should count all partially correct responses' do
stats = subject.run([
{ correct: "true" },
{ correct: "partial" }
])
stats[:partially_correct].should == 1
end
end
describe '[:incorrect]' do
it 'should count all incorrect responses' do
stats = subject.run([
{ correct: nil },
{ correct: false }
])
stats[:incorrect].should == 2
end
end
it_behaves_like '[:correct]'
it_behaves_like '[:partially_correct]'
it_behaves_like '[:incorrect]'
describe '[:responses]' do
it 'should count all students who have filled any blank' do

View File

@ -103,4 +103,8 @@ describe CanvasQuizStatistics::Analyzers::Matching do
lhs[:responses].should == 1
end
end
it_behaves_like '[:correct]'
it_behaves_like '[:partially_correct]'
it_behaves_like '[:incorrect]'
end

View File

@ -1,7 +1,7 @@
require 'spec_helper'
describe CanvasQuizStatistics::Analyzers::MultipleDropdowns do
MD = CanvasQuizStatistics::Analyzers::MultipleDropdowns
Constants = CanvasQuizStatistics::Analyzers::Base::Constants
let(:question_data) { QuestionHelpers.fixture('multiple_dropdowns_question') }
subject { described_class.new(question_data) }
@ -44,7 +44,7 @@ describe CanvasQuizStatistics::Analyzers::MultipleDropdowns do
])
answer_set = stats[:answer_sets].detect { |as| as[:text] == 'organ' }
answer = answer_set[:answers].detect { |a| a[:id] == MD::MissingAnswerKey }
answer = answer_set[:answers].detect { |a| a[:id] == Constants::MissingAnswerKey }
answer.should be_present
answer[:responses].should == 1
end
@ -57,7 +57,7 @@ describe CanvasQuizStatistics::Analyzers::MultipleDropdowns do
])
answer_set = stats[:answer_sets].detect { |as| as[:text] == 'organ' }
answer = answer_set[:answers].detect { |a| a[:id] == MD::MissingAnswerKey }
answer = answer_set[:answers].detect { |a| a[:id] == Constants::MissingAnswerKey }
answer.should be_present
answer[:responses].should == 1
end
@ -68,16 +68,16 @@ describe CanvasQuizStatistics::Analyzers::MultipleDropdowns do
])
stats[:answer_sets].detect { |as| as[:text] == 'organ' }.tap do |answer_set|
unknown_answer = answer_set[:answers].detect { |a| a[:id] == MD::UnknownAnswerKey }
missing_answer = answer_set[:answers].detect { |a| a[:id] == MD::MissingAnswerKey }
unknown_answer = answer_set[:answers].detect { |a| a[:id] == Constants::UnknownAnswerKey }
missing_answer = answer_set[:answers].detect { |a| a[:id] == Constants::MissingAnswerKey }
unknown_answer.should_not be_present
missing_answer.should_not be_present
end
stats[:answer_sets].detect { |as| as[:text] == 'color' }.tap do |answer_set|
unknown_answer = answer_set[:answers].detect { |a| a[:id] == MD::UnknownAnswerKey }
missing_answer = answer_set[:answers].detect { |a| a[:id] == MD::MissingAnswerKey }
unknown_answer = answer_set[:answers].detect { |a| a[:id] == Constants::UnknownAnswerKey }
missing_answer = answer_set[:answers].detect { |a| a[:id] == Constants::MissingAnswerKey }
unknown_answer.should_not be_present
missing_answer.should be_present

View File

@ -0,0 +1,76 @@
require 'spec_helper'
describe CanvasQuizStatistics::Analyzers::Numerical do
let(:question_data) { QuestionHelpers.fixture('numerical_question') }
subject { described_class.new(question_data) }
it 'should not blow up when no responses are provided' do
expect {
subject.run([]).should be_present
}.to_not raise_error
end
it_behaves_like 'essay [:responses]'
it_behaves_like 'essay [:full_credit]'
it_behaves_like '[:correct]'
it_behaves_like '[:incorrect]'
describe '[:answers]' do
it 'generates the "none" answer when a student skips the question' do
stats = subject.run([ { text: '' } ])
stats[:answers].last.tap do |no_answer|
no_answer[:id].should == 'none'
no_answer[:responses].should == 1
end
end
it 'generates the "other" answer for incorrect answers' do
stats = subject.run([{ text: '12345' }])
stats[:answers].last.tap do |other_answer|
other_answer[:id].should == 'other'
other_answer[:responses].should == 1
end
end
end
describe '[:answers][]' do
describe '[:id]' do
it 'should stringify the answer id' do
subject.run([])[:answers].detect { |a| a[:id] == '4343' }.should be_present
end
end
describe '[:is_range]' do
it 'should be true for range answers' do
stats = subject.run([])
stats[:answers].each do |answer|
# we have one range answer with id 6959
answer[:is_range].should == (answer[:id] == '6959')
end
end
end
describe '[:text]' do
it 'should read 12.00 for an exact answer with no margin' do
subject.run([])[:answers][0][:text].should == '12.00'
end
it 'should read [3.00..6.00] for a range answer' do
subject.run([])[:answers][1][:text].should == '[3.00..6.00]'
end
it 'should read 1.50 for an exact answer with margin' do
subject.run([])[:answers][3][:text].should == '1.50'
end
end
describe '[:responses]' do
it 'should count the number of students who got it right' do
stats = subject.run([{answer_id: 4343}])
answer = stats[:answers].detect { |answer| answer[:id] == '4343' }
answer[:responses].should == 1
end
end
end
end

View File

@ -0,0 +1,15 @@
shared_examples '[:correct]' do
it 'should count all fully correct responses' do
stats = subject.run([
{ correct: "true" },
{ correct: true },
{ correct: false },
{ correct: nil },
{ correct: 'partial' },
{ correct: 'undefined' },
{ correct: 'defined' }
])
stats[:correct].should == 2
end
end

View File

@ -0,0 +1,34 @@
shared_examples 'essay [:full_credit]' do
let :question_data do
{ points_possible: 3 }
end
it 'should count all students who received full credit' do
output = subject.run([
{ points: 3 }, { points: 2 }, { points: 3 }
])
output[:full_credit].should == 2
end
it 'should count students who received more than full credit' do
output = subject.run([
{ points: 3 }, { points: 2 }, { points: 5 }
])
output[:full_credit].should == 2
end
it 'should be 0 otherwise' do
output = subject.run([
{ points: 1 }
])
output[:full_credit].should == 0
end
it 'should count those who exceed the maximum points possible' do
output = subject.run([{ points: 5 }])
output[:full_credit].should == 1
end
end

View File

@ -0,0 +1,11 @@
shared_examples 'essay [:responses]' do
it 'should count students who have written anything' do
subject.run([{ text: 'foo' }])[:responses].should == 1
end
it 'should not count students who have written a blank response' do
subject.run([{ }])[:responses].should == 0
subject.run([{ text: nil }])[:responses].should == 0
subject.run([{ text: '' }])[:responses].should == 0
end
end

View File

@ -0,0 +1,16 @@
shared_examples '[:incorrect]' do
it 'should count all incorrect responses' do
stats = subject.run([
{ correct: "true" },
{ correct: true },
{ correct: 'false' },
{ correct: false },
{ correct: nil },
{ correct: 'partial' },
{ correct: 'undefined' },
{ correct: 'defined' }
])
stats[:incorrect].should == 3
end
end

View File

@ -0,0 +1,10 @@
shared_examples '[:partially_correct]' do
it 'should count all partially correct responses' do
stats = subject.run([
{ correct: "true" },
{ correct: "partial" }
])
stats[:partially_correct].should == 1
end
end

View File

@ -0,0 +1,55 @@
{
"id": 21,
"position": 11,
"quiz_group_id": null,
"quiz_id": 8,
"question_name": "Question",
"question_type": "numerical_question",
"question_text": "<p>[Numerical:Exact] What's 9 plus 3, 2.5 minus 1, or 4 minus zero?</p>",
"points_possible": 4,
"correct_comments": "",
"incorrect_comments": "",
"neutral_comments": "",
"answers": [
{
"id": 4343,
"text": "",
"comments": "",
"weight": 100,
"numerical_answer_type": "exact_answer",
"exact": 12,
"margin": 0
},
{
"id": 6959,
"text": "",
"comments": "",
"weight": 100,
"numerical_answer_type": "range_answer",
"start": 3,
"end": 6
},
{
"id": 8617,
"text": "",
"comments": "",
"weight": 100,
"numerical_answer_type": "exact_answer",
"exact": 4,
"margin": 0
},
{
"id": 6704,
"text": "",
"comments": "",
"weight": 100,
"numerical_answer_type": "exact_answer",
"exact": 1.5,
"margin": 1
}
],
"variables": null,
"formulas": null,
"matches": null,
"matching_answer_incorrect_matches": null
}

View File

@ -17,13 +17,15 @@ RSpec.configure do |config|
config.color = true
config.order = 'random'
support_files = File.join(
File.dirname(__FILE__),
'canvas_quiz_statistics',
'support',
'**',
'*.rb'
)
File.join(File.dirname(__FILE__), 'canvas_quiz_statistics').tap do |cwd|
# spec support in support/
Dir.glob(File.join([
cwd, 'support', '**', '*.rb'
])).each { |file| require file }
Dir.glob(support_files).each { |file| require file }
# specs for shared metrics in analyzers/shared_metrics
Dir.glob(File.join([
cwd, 'analyzers', 'shared_metrics', '**', '*.rb'
])).each { |file| require file }
end
end