Skip to content

Instantly share code, notes, and snippets.

@psychemedia
Created April 28, 2020 16:24
Show Gist options
  • Save psychemedia/e73debfa00c1c1a3664afc30ae3db3c8 to your computer and use it in GitHub Desktop.
Save psychemedia/e73debfa00c1c1a3664afc30ae3db3c8 to your computer and use it in GitHub Desktop.
First pass at trying to parse Moodle CodeRunner quiz questions
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"xml=\"\"\"\n",
"<quiz>\n",
"<!-- question: 850758 -->\n",
" <question type=\"coderunner\">\n",
" <name>\n",
" <text>How many items in a list?</text>\n",
" </name>\n",
" <questiontext format=\"html\">\n",
" <text><![CDATA[<p>Write a function <code>how_many(list_in)</code> which returns the number of items in <code>list_in</code>.</p>\n",
"<p><br></p>]]></text>\n",
" </questiontext>\n",
" <generalfeedback format=\"html\">\n",
" <text><![CDATA[<p><b>len() </b>returns the length (the number of items) of an object. The argument may be a sequence (such as a string, bytes, tuple, list, or range) or a collection (such as a dictionary, set, or frozen set).<br></p>]]></text>\n",
" </generalfeedback>\n",
" <defaultgrade>1</defaultgrade>\n",
" <penalty>0</penalty>\n",
" <hidden>0</hidden>\n",
" <idnumber></idnumber>\n",
" <coderunnertype>python3</coderunnertype>\n",
" <prototypetype>0</prototypetype>\n",
" <allornothing>1</allornothing>\n",
" <penaltyregime>10, 20, ...</penaltyregime>\n",
" <precheck>0</precheck>\n",
" <showsource>0</showsource>\n",
" <answerboxlines>6</answerboxlines>\n",
" <answerboxcolumns>100</answerboxcolumns>\n",
" <answerpreload>def how_many(list_in):\n",
" '''returns the number of elements in list_in'''\n",
" </answerpreload>\n",
" <globalextra></globalextra>\n",
" <useace></useace>\n",
" <resultcolumns></resultcolumns>\n",
" <template></template>\n",
" <iscombinatortemplate></iscombinatortemplate>\n",
" <allowmultiplestdins></allowmultiplestdins>\n",
" <answer>def how_many(list_in):\n",
" '''returns the number of elements in list_in'''\n",
" return len(list_in)</answer>\n",
" <validateonsave>1</validateonsave>\n",
" <testsplitterre></testsplitterre>\n",
" <language></language>\n",
" <acelang></acelang>\n",
" <sandbox></sandbox>\n",
" <grader></grader>\n",
" <cputimelimitsecs></cputimelimitsecs>\n",
" <memlimitmb></memlimitmb>\n",
" <sandboxparams></sandboxparams>\n",
" <templateparams></templateparams>\n",
" <hoisttemplateparams>1</hoisttemplateparams>\n",
" <twigall>0</twigall>\n",
" <uiplugin></uiplugin>\n",
" <attachments>0</attachments>\n",
" <attachmentsrequired>0</attachmentsrequired>\n",
" <maxfilesize>10240</maxfilesize>\n",
" <filenamesregex></filenamesregex>\n",
" <filenamesexplain></filenamesexplain>\n",
" <displayfeedback>1</displayfeedback>\n",
" <testcases>\n",
" <testcase testtype=\"0\" useasexample=\"1\" hiderestiffail=\"0\" mark=\"1.0000000\" >\n",
" <testcode>\n",
" <text>print ( how_many(['fig', 'kiwi', 13.7]))</text>\n",
" </testcode>\n",
" <stdin>\n",
" <text></text>\n",
" </stdin>\n",
" <expected>\n",
" <text>3</text>\n",
" </expected>\n",
" <extra>\n",
" <text></text>\n",
" </extra>\n",
" <display>\n",
" <text>SHOW</text>\n",
" </display>\n",
" </testcase>\n",
" <testcase testtype=\"0\" useasexample=\"0\" hiderestiffail=\"0\" mark=\"1.0000000\" >\n",
" <testcode>\n",
" <text>print(how_many([]))</text>\n",
" </testcode>\n",
" <stdin>\n",
" <text></text>\n",
" </stdin>\n",
" <expected>\n",
" <text>0 </text>\n",
" </expected>\n",
" <extra>\n",
" <text></text>\n",
" </extra>\n",
" <display>\n",
" <text>SHOW</text>\n",
" </display>\n",
" </testcase>\n",
" <testcase testtype=\"0\" useasexample=\"0\" hiderestiffail=\"0\" mark=\"1.0000000\" >\n",
" <testcode>\n",
" <text>print(how_many([' ']))</text>\n",
" </testcode>\n",
" <stdin>\n",
" <text></text>\n",
" </stdin>\n",
" <expected>\n",
" <text>1</text>\n",
" </expected>\n",
" <extra>\n",
" <text></text>\n",
" </extra>\n",
" <display>\n",
" <text>SHOW</text>\n",
" </display>\n",
" </testcase>\n",
" <testcase testtype=\"0\" useasexample=\"1\" hiderestiffail=\"0\" mark=\"1.0000000\" >\n",
" <testcode>\n",
" <text>print(how_many([['only', 'count'], ['the' 'top'], ['level', 'lists']]))</text>\n",
" </testcode>\n",
" <stdin>\n",
" <text></text>\n",
" </stdin>\n",
" <expected>\n",
" <text>3</text>\n",
" </expected>\n",
" <extra>\n",
" <text></text>\n",
" </extra>\n",
" <display>\n",
" <text>SHOW</text>\n",
" </display>\n",
" </testcase>\n",
" <testcase testtype=\"0\" useasexample=\"0\" hiderestiffail=\"0\" mark=\"1.0000000\" >\n",
" <testcode>\n",
" <text>print(how_many([1, [2, 3, [4, 5], 6], 7]))</text>\n",
" </testcode>\n",
" <stdin>\n",
" <text></text>\n",
" </stdin>\n",
" <expected>\n",
" <text>3</text>\n",
" </expected>\n",
" <extra>\n",
" <text></text>\n",
" </extra>\n",
" <display>\n",
" <text>SHOW</text>\n",
" </display>\n",
" </testcase>\n",
" </testcases>\n",
" </question>\n",
"\n",
"</quiz>\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<Element 'quiz' at 0x108729fb0>"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import xml.etree.ElementTree as ET\n",
"\n",
"\n",
"root = ET.fromstring(xml)\n",
"root"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'quiz'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"root.tag"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"question {'type': 'coderunner'}\n",
"name {}\n",
"questiontext {'format': 'html'}\n",
"generalfeedback {'format': 'html'}\n",
"defaultgrade {}\n",
"penalty {}\n",
"hidden {}\n",
"idnumber {}\n",
"coderunnertype {}\n",
"prototypetype {}\n",
"allornothing {}\n",
"penaltyregime {}\n",
"precheck {}\n",
"showsource {}\n",
"answerboxlines {}\n",
"answerboxcolumns {}\n",
"answerpreload {}\n",
"globalextra {}\n",
"useace {}\n",
"resultcolumns {}\n",
"template {}\n",
"iscombinatortemplate {}\n",
"allowmultiplestdins {}\n",
"answer {}\n",
"validateonsave {}\n",
"testsplitterre {}\n",
"language {}\n",
"acelang {}\n",
"sandbox {}\n",
"grader {}\n",
"cputimelimitsecs {}\n",
"memlimitmb {}\n",
"sandboxparams {}\n",
"templateparams {}\n",
"hoisttemplateparams {}\n",
"twigall {}\n",
"uiplugin {}\n",
"attachments {}\n",
"attachmentsrequired {}\n",
"maxfilesize {}\n",
"filenamesregex {}\n",
"filenamesexplain {}\n",
"displayfeedback {}\n",
"testcases {}\n"
]
}
],
"source": [
"for child in root:\n",
" print(child.tag, child.attrib)\n",
" for gchild in child:\n",
" print(gchild.tag, gchild.attrib)"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<p>Write a function <code>how_many(list_in)</code> which returns the number of items in <code>list_in</code>.</p>\n",
"<p><br></p>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"def how_many(list_in):\n",
" '''returns the number of elements in list_in'''\n",
" return len(list_in)\n",
"\n",
"print ( how_many(['fig', 'kiwi', 13.7])) 3\n",
"print(how_many([])) 0 \n",
"print(how_many([' '])) 1\n",
"print(how_many([['only', 'count'], ['the' 'top'], ['level', 'lists']])) 3\n",
"print(how_many([1, [2, 3, [4, 5], 6], 7])) 3\n"
]
}
],
"source": [
"from IPython.display import HTML\n",
"for question in root.findall('question'):\n",
" display(HTML(question.find('questiontext/text').text))\n",
" answer = question.find('answer').text\n",
" print(answer)\n",
" print()\n",
" tests = question.find('testcases')\n",
" for test in tests.findall('testcase'):\n",
" print(test.find('testcode/text').text, test.find('expected/text').text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"There are a couple of ways we can evaluate the code represented by the string, specifically using `eval()` or `exec()`.\n",
"\n",
"- `exec()` doesn't return anything;\n",
"- `eval()` returns the final value;"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
"exec(answer)"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"how_many(['fig', 'kiwi', 13.7])"
]
},
{
"cell_type": "code",
"execution_count": 122,
"metadata": {},
"outputs": [],
"source": [
"%%capture my_answer --no-stderr\n",
"answers = []\n",
"for question in root.findall('question'):\n",
" tests = question.find('testcases')\n",
" for test in tests.findall('testcase'):\n",
" exec(test.find('testcode/text').text)\n",
" answers.append(test.find('expected/text').text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The way the question is written and handled is really shonky... I suspect it gives all sorts of opportunities for false somethings?!"
]
},
{
"cell_type": "code",
"execution_count": 124,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"('3\\n0\\n1\\n3\\n3\\n', '3\\n0 \\n1\\n3\\n3\\n')"
]
},
"execution_count": 124,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"my_answer.stdout, '\\n'.join(answers)+'\\n'"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"('3\\n0\\n1\\n3\\n3\\n', '3\\n0 \\n1\\n3\\n3\\n')"
]
},
"execution_count": 57,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"my_answer.stdout, '\\n'.join(answers)+'\\n'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If we try to do some simple assertion tests on evaluated code, things all go wrong... we need to to type checking, white space clearning (nut might whitespace be important?) etc etc."
]
},
{
"cell_type": "code",
"execution_count": 121,
"metadata": {},
"outputs": [],
"source": [
"import re\n",
" \n",
"for question in root.findall('question'):\n",
" tests = question.find('testcases')\n",
" for test in tests.findall('testcase'):\n",
" _test = test.find('testcode/text').text\n",
" print(_test, '..')\n",
" matches = re.findall(r\"print\\s*\\(\\s*(.*)\\s*\\)\", _test)\n",
" if matches:\n",
" print(matches[0])\n",
" answer = eval(matches[0])\n",
" \n",
" #Hack the assertion so tests pass - this leads to a loss of information\n",
" # and suggests there's something wrong with how answers are represented?\n",
" assert str(answer) == test.find('expected/text').text.strip()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Addenda - Possibly Useful Fragments"
]
},
{
"cell_type": "code",
"execution_count": 112,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'hello\\n'"
]
},
"execution_count": 112,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#The following replicates the %%capture \n",
"\n",
"#https://stackoverflow.com/a/3906309/454773\n",
"import sys\n",
"from io import StringIO\n",
"import contextlib\n",
"\n",
"\n",
"@contextlib.contextmanager\n",
"def stdoutIO(stdout=None):\n",
" old = sys.stdout\n",
" if stdout is None:\n",
" stdout = StringIO()\n",
" sys.stdout = stdout\n",
" yield stdout\n",
" sys.stdout = old\n",
" \n",
"with stdoutIO() as s:\n",
" exec('print(\"hello\")')\n",
" \n",
"s.getvalue()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment