Skip to content

Instantly share code, notes, and snippets.

@neuromusic
Last active January 22, 2016 06:04
Show Gist options
  • Save neuromusic/b59d6fe6f8a2e76d2e04 to your computer and use it in GitHub Desktop.
Save neuromusic/b59d6fe6f8a2e76d2e04 to your computer and use it in GitHub Desktop.
## The subject object. Importantly, the subject object manages DATA & the
## history of the subject's experiences
subj = Subect(name='B9999',
store='json', # csv, postgres
path='/home/bird/opdat/B9999'
)
## Panel objects are defined in ~/.pyoperant/panel_config.py
## Panels have methods for manipulating panels. The "availability" of a panel
## (that is, light schedules and trial schedules) are governed by the panel
## object
panel = get_panel(id=7,schedule='sun',light_schedule='sun')
## OK, let's construct a block.
##
## blocks are built from a list of dictionaries which contain the possible trial
## conditions.
condition_list = [
{'stimulus': 'a.wav', 'class': 'left'},
{'stimulus': 'b.wav', 'class': 'right'},
]
condition_list *= 5
condition_list += {'stimulus': 'c.wav', 'class': 'probe'} # probe
condition_list *= 10
# trial logic (that is, which trial comes next) is managed by the TrialQueue.
# All trial queues require access to the trial history (via the subject
# instance)
queue = RandomQueue(
condition_list=condition_list,
correction_trials=True,
)
CORRECT = {'feed': 2.0,'flash': 1.0,'timeout': False}
NOFEED = {'feed': False,'flash': 1.0,'timeout': False}
WRONG = {'feed': False,'flash': False,'timeout': 10.0}
# each consequence can get it's own reinforcement schedule. we are only going to
# implement a schedule for feeds
reinforcement = VariableRatio(ratio=3)
# Trial objects maintain necessary information about how to run a trial.
# for each consequence (feed, flash, timeout, etc) it knows what to do on a panel
trial_builder = TwoAltChoiceBuilder(
subject=subject,
panel=panel,
queue=queue, # optional
reinforcement=reinforcement,
consequate=consequate,
correction=correction_consequate,
)
for trial_parameters in queue:
if trial_parameters['class']=='probe':
on_left = random.choice([CORRECT,WRONG])
on_right = on_left
elif trial_parameters['class']=='left':
on_left = CORRECT if reinforcement.consequate(correct=True) else NOFEED
on_right = WRONG
elif trial_parameters['class']=='right':
on_left = WRONG
on_right = CORRECT if reinforcement.consequate(correct=True) else NOFEED
trial = Trial(panel=panel,
stimulus=trial_parameters['stimulus'],
on_left=on_left,
on_right=on_right,
)
trial.run()
subj.save(trial)
if trial_parameters['type']=='normal' and trial_parameters['class']!='probe':
reinforcement.update(trial.correct)
time.sleep(intertrial_min)
if correction_trials==True:
while trial.correct==False:
if trial_parameters['type']=='correction':
if trial_parameters['class']=='left':
on_left = NOFEED
on_right = WRONG
elif trial_parameters['class']=='right':
on_left = WRONG
on_right = NOFEED
trial = Trial(panel=panel,
stimulus=trial_parameters['stimulus'],
on_left=on_left,
on_right=on_right,
)
trial.run()
subj.save(trial)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment