Add statistics for manually graded question types

This adds the ability to see the breakdown of grades for
essay/file-upload question types.  They are broken up as the top 27%,
the bottom 27%, the middle 46%, and any ungraded answers, then displayed
in table format.

Refs CNVS-25737
Closes CNVS-26756

Test Plan:
  - Check that essay+file_upload question type tables look good
  - Buckets show themselves as "correct" if they contain a student
    score >= to the question points possible
  - Buckets can contain more students than 27% if the scores are
    identical (All 100% scores show up in top bucket/middle bucket
    even if 100% are 100% scores)
  - New answer tables are accessible like other tables
  - Buckets are explicitly ordered as top, middle, bottom, ungraded

Change-Id: I62798938b9176de97df2e498a2f9b3b02a81086c
Reviewed-on: https://gerrit.instructure.com/70907
Tested-by: Jenkins
Reviewed-by: John Corrigan <jcorrigan@instructure.com>
Reviewed-by: Davis McClellan <dmcclellan@instructure.com>
QA-Review: Michael Hargiss <mhargiss@instructure.com>
Product-Review: Jason Sparks <jsparks@instructure.com>
This commit is contained in:
Ryan Taylor 2016-01-22 21:25:52 -07:00
parent 94eb0d90c3
commit ccddb9d036
7 changed files with 169 additions and 39 deletions

View File

@ -40,16 +40,16 @@ define(function(require) {
maxWidth: 150,
// animeDuration: 500
useAnswerBuckets: false
};
},
buildChartParams: function(answers) {
buildParams: function(answers) {
return answers.map(function(answer) {
return {
id: ''+answer.id,
count: answer.responses,
correct: answer.correct,
correct: answer.correct || answer.full_credit,
special: SPECIAL_DATUM_IDS.indexOf(answer.id) > -1,
answer: answer
};
@ -57,17 +57,18 @@ define(function(require) {
},
render: function() {
var data = this.buildChartParams(this.props.answers);
var data = this.buildParams(this.props.answers);
var highest = d3.max(_.map(data, 'count'));
var xScale = d3.scale.linear()
.domain([ highest, 0 ])
.range([ this.props.maxWidth, 0 ]);
var visibilityThreshold = Math.max(this.props.visibilityThreshold, xScale(highest) / 100.0);
var graphParams = {
var globalParams = {
xScale: xScale,
visibilityThreshold: visibilityThreshold,
maxWidth: this.props.maxWidth,
barHeight: this.props.barHeight
barHeight: this.props.barHeight,
useAnswerBuckets: this.props.useAnswerBuckets
};
return (
@ -77,17 +78,18 @@ define(function(require) {
</caption>
{this.renderTableHeader()}
<tbody>
{this.renderTableRows(data, graphParams)}
{this.renderTableRows(data, globalParams)}
</tbody>
</table>
);
},
renderTableHeader: function() {
var firstColumnLabel = this.props.useAnswerBuckets ? I18n.t("Answer Description") : I18n.t("Answer Text");
return (
<thead className="screenreader-only">
<tr>
<th scope="col">{I18n.t("Answer Text")}</th>
<th scope="col">{firstColumnLabel}</th>
<th scope="col">{I18n.t("Number of Respondents")}</th>
<th scope="col">{I18n.t("Percent of respondents selecting this answer")}</th>
<th scope="col" aria-hidden>{I18n.t("Answer Distribution")}</th>
@ -96,10 +98,10 @@ define(function(require) {
);
},
renderTableRows: function(data, graphParams) {
renderTableRows: function(data, globalParams) {
return data.map(function(datum) {
return (
<AnswerRow key={datum.id} datum={datum} graphSettings={graphParams} />
<AnswerRow key={datum.id} datum={datum} globalSettings={globalParams} />
);
});
}

View File

@ -9,7 +9,7 @@ define(function(require) {
var AnswerRow = React.createClass({
propTypes: {
datum: React.PropTypes.object.isRequired,
graphSettings: React.PropTypes.object.isRequired
globalSettings: React.PropTypes.object.isRequired
},
getInitialState: function() {
@ -46,15 +46,36 @@ define(function(require) {
this.setState({neverLoaded: false});
},
getScoreValueDescription: function(datum) {
var string;
switch (datum.id) {
case "top":
string = I18n.t("Answers which scored in the top 27%");
break;
case "middle":
string = I18n.t("Answers which scored in the middle 46%");
break;
case "bottom":
string = I18n.t("Answers which scored in the bottom 27%");
break;
case "ungraded":
string = I18n.t("Ungraded answers");
break;
default:
string = I18n.t("Unknown answers");
}
return string;
},
getBarStyles: function() {
var width = this.props.graphSettings.xScale(this.props.datum.count) + this.props.graphSettings.visibilityThreshold + "px";
var width = this.props.globalSettings.xScale(this.props.datum.count) + this.props.globalSettings.visibilityThreshold + "px";
// Hacky way to get initial state width animations
if (this.state.neverLoaded) {
width = "0px";
}
return {
width: width,
height: this.props.graphSettings.barHeight - 2 + "px"
height: this.props.globalSettings.barHeight - 2 + "px"
};
},
@ -65,10 +86,11 @@ define(function(require) {
render: function() {
var datum = this.props.datum;
var answerText = this.props.globalSettings.useAnswerBuckets ? this.getScoreValueDescription(datum) : datum.answer.text;
return (
<tr className={datum.correct ? 'correct' : undefined}>
<th scope="row" className="answer-textfield">
{datum.answer.text}
{answerText}
</th>
<td className="respondent-link">
{this.dialogBuilder(datum.answer)}
@ -76,7 +98,7 @@ define(function(require) {
<td className="answer-ratio">
{datum.answer.ratio} <sup>%</sup>
</td>
<td className="answer-distribution-cell" aria-hidden style={{width: this.props.graphSettings.maxWidth}}>
<td className="answer-distribution-cell" aria-hidden style={{width: this.props.globalSettings.maxWidth}}>
{this.renderBarPlot()}
</td>
</tr>

View File

@ -5,18 +5,11 @@ define(function(require) {
// var CorrectAnswerDonut = require('jsx!../charts/correct_answer_donut');
var QuestionHeader = require('jsx!./header');
var I18n = require('i18n!quiz_statistics');
var AnswerTable = require('jsx!./answer_table');
var Essay = React.createClass({
render: function() {
var props = this.props;
// var correctResponseRatio;
//
// if (props.participantCount <= 0) {
// correctResponseRatio = 0;
// }
// else {
// correctResponseRatio = props.fullCredit / props.participantCount;
// }
return(
<Question>
@ -38,12 +31,10 @@ define(function(require) {
</div>
<div className="grid-row">
<div className="col-sm-8 question-bottom-left">
{ /* TODO: render an essay specific answer table here */ }
<AnswerTable answers={this.props.answers} useAnswerBuckets={true} />
{this.renderLinkButton()}
</div>
<div className="col-sm-4 question-bottom-right">
{/* <CorrectAnswerDonut correctResponseRatio={correctResponseRatio} /> */ }
</div>
<div className="col-sm-4 question-bottom-right"></div>
</div>
</Question>
);

View File

@ -31,17 +31,7 @@ define(function(require) {
aria-hidden
dangerouslySetInnerHTML={{ __html: this.props.questionText }} />
</div>
<div className="col-sm-4 question-top-right">
<DiscriminationIndex
discriminationIndex={this.props.discriminationIndex}
topStudentCount={this.props.topStudentCount}
middleStudentCount={this.props.middleStudentCount}
bottomStudentCount={this.props.bottomStudentCount}
correctTopStudentCount={this.props.correctTopStudentCount}
correctMiddleStudentCount={this.props.correctMiddleStudentCount}
correctBottomStudentCount={this.props.correctBottomStudentCount}
/>
</div>
<div className="col-sm-4 question-top-right"></div>
</div>
<div className="grid-row">
<div className="col-sm-8 question-bottom-left">

View File

@ -59,7 +59,6 @@ define(function(require) {
answerSets: answerSetFixture,
});
debugger;
expect(find('.answer-set-tabs .active').innerText).toMatch('color');
var answerTextMatches = findAll("th.answer-textfield");
expect(answerTextMatches[0].innerText).toEqual('red');

View File

@ -74,5 +74,87 @@ module CanvasQuizStatistics::Analyzers
{ score: score, count: point_distribution[score] }
end.sort_by { |v| v[:score] || -1 }
end
# Statistics for answers which scored specific values
#
# @return [Hash]
#
# Output synopsis:
#
# ```json
# {
# "answers": [
# {
# // Number of students who picked this answer.
# "responses": 3,
#
# // The names of the students who scored this value.
# "user_names": ["John", "Jim", "Jenny"],
#
# // The score shared by these students
# "score": 0.5,
#
# // The id (or type) of the answer bucket
# // The top and bottom buckets represent the respective extreme 27%
# // ends of the student performance.
# // The middle represents the middle 46% in performance across the item.
# "id": "bottom", # one of %w|bottom top middle ungraded|
#
# // If the score represents full credit on the item
# "full_credit": true,
# }
# ]
# }
# ```
metric :answers do |responses|
answers = Hash.new do |h,k|
h[k] = {
user_names: [],
responses: 0
}
end
buckets = [
[:top, 0.73],
[:middle, 0.27],
[:bottom, 0]
]
graded_responses = []
ungraded_responses = []
responses.each {|r| r[:correct] == 'defined' ? graded_responses << r : ungraded_responses << r}
ranked_responses_by_score = graded_responses.sort_by {|h| h[:points]}
previous_floor = ranked_responses_by_score.length
buckets.each do |name, cutoff|
floor = (cutoff * ranked_responses_by_score.length).round
floor_score = ranked_responses_by_score[floor].try{|h| h[:points]}
# include all tied users in this bucket
floor -= 1 while (floor > 0) && (ranked_responses_by_score[floor - 1][:points] == floor_score)
# Set bucket for selected buckets
ranked_responses_by_score[floor...previous_floor].map {|r| r[:performance_bucket] = name.to_s}
previous_floor = floor
end
ungraded_responses.each {|r| r[:performance_bucket] = "ungraded"}
sorted_graded_responses = graded_responses.sort_by {|h| h[:performance_bucket]}.reverse
(sorted_graded_responses + ungraded_responses).each do |response|
hash = answers[response[:performance_bucket]]
hash[:id] ||= response[:performance_bucket]
hash[:score] ||= response[:points]
# This will indicate correct if any point value reaches 100%
hash[:full_credit] ||= response[:points].to_f >= @question_data[:points_possible].to_f
hash[:user_names] << response[:user_name]
hash[:responses] += 1
end
answers.values
end
end
end

View File

@ -68,6 +68,50 @@ describe CanvasQuizStatistics::Analyzers::Essay do
end
end
describe ':answers' do
let :question_data do
{ points_possible: 10 }
end
it 'should group items into answer type buckets with appropriate data' do
output = subject.run([
{ points: 0, correct: 'undefined', user_name: 'Joe0'},
{ points: 0, correct: 'undefined', user_name: 'Joe0'},
{ points: 0, correct: 'undefined', user_name: 'Joe0'},
{ points: 1, correct: 'defined', user_name: 'Joe1'},
{ points: 2, correct: 'defined', user_name: 'Joe2'},
{ points: 3, correct: 'defined', user_name: 'Joe3'},
{ points: 4, correct: 'defined', user_name: 'Joe4'},
{ points: 6, correct: 'defined', user_name: 'Joe6'},
{ points: 7, correct: 'defined', user_name: 'Joe7'},
{ points: 8, correct: 'defined', user_name: 'Joe8'},
{ points: 9, correct: 'defined', user_name: 'Joe9'},
{ points: 10, correct: 'defined', user_name: 'Joe10'},
])
answers = output[:answers]
bottom = answers[2]
expect(bottom[:responses]).to eq 2
expect(bottom[:user_names]).to include('Joe1')
expect(bottom[:full_credit]).to be_false
middle = answers[1]
expect(middle[:responses]).to eq 5
expect(middle[:user_names]).to include('Joe6')
expect(middle[:full_credit]).to be_false
top = answers[0]
expect(top[:responses]).to eq 2
expect(top[:user_names]).to include('Joe10')
expect(top[:full_credit]).to be_true
undefined = answers[3]
expect(undefined[:responses]).to eq 3
expect(undefined[:user_names].uniq).to eq ['Joe0']
expect(undefined[:full_credit]).to be_false
end
end
describe ':point_distribution' do
it 'should map each score to the number of receivers' do
output = subject.run([