Created
May 17, 2010 14:24
-
-
Save j1n3l0/403811 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env perl | |
# | |
# Author:: Nelo Onyiah (mailto:io1@sanger.ac.uk) | |
# | |
# This is an example script demonstrating how you can | |
# interact with the I-DCC Targeting Repository via its | |
# web services interface. | |
# | |
# In this Perl example we use the REST::Client module | |
# to handle the HTTP requests, and use JSON as our data | |
# encapsulation method. | |
# | |
# If you plan to use Perl to upload your data, you can | |
# simply copy this script and just modify the function | |
# 'get_alleles_to_load' to read in real data from your | |
# systems. | |
# | |
use Modern::Perl; | |
use Data::Dumper; | |
use JSON; | |
use REST::Client; | |
use URI; | |
# | |
# Define our connection parameters | |
# | |
my $user = 'user'; | |
my $pass = 'pass'; | |
my $base = 'www.i-dcc.org/labs/targ_rep'; | |
my $client = REST::Client->new( | |
{ host => URI->new("http://$user:$pass\@$base")->as_iri } ); | |
# | |
# Set up our REST::Client | |
# | |
$client->addHeader( content_type => 'application/json' ); | |
# | |
# Methods | |
# | |
# Generic helper method for handling the web calls to the | |
# repository. Gives us a single place to handle service errors. | |
sub request { | |
my %options = @_; | |
my $method = uc $options{method} || 'GET'; | |
my @args = ( URI->new( $options{url} )->as_iri ); | |
if ( $method =~ m/POST|PUT/ and defined $options{payload} ) { | |
push @args, $options{payload}; | |
} | |
$client->$method(@args); | |
unless ( $client->responseCode == 200 or $client->responseCode == 201 ) { | |
say "Error comminicating with repository:\n", | |
Dumper { | |
responseCode => $client->responseCode, | |
responseContent => $client->responseContent, | |
}; | |
} | |
return from_json( $client->responseContent ); | |
} | |
# In your scripts, this would be where you access your | |
# database(s) and build up structures of your alleles and | |
# products. | |
sub get_alleles_to_load { | |
my %pipelines = @_; | |
return [ | |
{ | |
'pipeline_id' => 1, | |
'mgi_accession_id' => "MGI:44556", | |
'project_design_id' => 10000, | |
'cassette' => "L1L2_gt2", | |
'backbone' => "L3L4_pZero_kan", | |
'assembly' => "NCBIM37", | |
'chromosome' => "11", | |
'strand' => "+", | |
'design_type' => "Knock Out", | |
'design_subtype' => "Frameshift", | |
'homology_arm_start' => 10, | |
'homology_arm_end' => 10000, | |
'cassette_start' => 50, | |
'cassette_end' => 500, | |
'loxp_start' => 1000, | |
'loxp_end' => 1500 | |
}, | |
{ | |
'pipeline_id' => 1, | |
'mgi_accession_id' => "MGI:456789", | |
'project_design_id' => 2, | |
'cassette' => "L1L2_gt2", | |
'backbone' => "L3L4_pZero_kan", | |
'assembly' => "NCBIM37", | |
'chromosome' => "1", | |
'strand' => "+", | |
'design_type' => "Knock Out", | |
'design_subtype' => "Frameshift", | |
'homology_arm_start' => 10, | |
'homology_arm_end' => 10000, | |
'cassette_start' => 50, | |
'cassette_end' => 500, | |
'loxp_start' => 1000, | |
'loxp_end' => 1500, | |
'targeting_vectors' => [ | |
{ | |
'name' => 'PRPGD001', | |
'intermediate_vector' => 'PGS001', | |
'ikmc_project_id' => 1, | |
'es_cells' => [ | |
{ 'name' => 'EPD001' }, | |
{ 'name' => 'EPD002' }, | |
{ 'name' => 'EPD003' }, | |
] | |
}, | |
{ | |
'name' => 'PRPGD002', | |
'intermediate_vector' => 'PGS001', | |
'ikmc_project_id' => 1, | |
'es_cells' => [ | |
{ 'name' => 'EPD004' }, | |
{ 'name' => 'EPD005' }, | |
{ 'name' => 'EPD006' }, | |
] | |
} | |
], | |
'genbank_file' => { | |
'escell_clone' => "A GENBANK FILE IN PLAIN TEXT", | |
'targeting_vector' => "A GENBANK FILE IN PLAIN TEXT" | |
} | |
} | |
]; | |
} | |
# Helper function to interact with the web services and find | |
# an allele. | |
sub find_allele { | |
my %allele = @_; | |
no warnings 'uninitialized'; | |
my $search_url = | |
"alleles.json?allele_symbol_superscript=" | |
. "$allele{allele_symbol_superscript}&ikmc_project_id=" | |
. "$allele{ikmc_project_id}&mgi_accession_id=$allele{mgi_accession_id}"; | |
my $allele_data = request( url => $search_url ); | |
# Check that we have a unique allele - the repository does | |
# handle this for us, but you can't be too cautious! | |
if ( @{$allele_data} > 1 ) { | |
die "Error: found more than one allele for " | |
. "$allele{mgi_accession_id} | $allele{allele_symbol_superscript}"; | |
} | |
elsif ( @{$allele_data} == 1 ) { | |
return $allele_data->[0]; | |
} | |
else { | |
return; | |
} | |
} | |
# Helper function to interact with the web services and create | |
# an allele. | |
sub create_allele { | |
my %allele = @_; | |
my $allele_data = request( | |
url => 'alleles.json', | |
method => 'post', | |
payload => to_json( { molecular_structure => \%allele } ), | |
); | |
return $allele_data; | |
} | |
# Helper function to interact with the web services and find | |
# a product. | |
sub find_es_cell { | |
my %es_cell = @_; | |
no warnings 'uninitialized'; | |
my $search_url = "es_cells.json?name=$es_cell{name}"; | |
my $es_cell_data = request( url => $search_url ); | |
# Check that we have a unique allele - the repository does | |
# handle this for us, but you can't be too cautious! | |
if ( @{$es_cell_data} > 1 ) { | |
die "Error: found more than one es_cell for $es_cell{name}"; | |
} | |
elsif ( @{$es_cell_data} == 1 ) { | |
return $es_cell_data->[0]; | |
} | |
else { | |
return; | |
} | |
} | |
# Helper function to interact with the web services and create | |
# a product. | |
sub create_es_cell { | |
my %es_cell = @_; | |
my $es_cell_data = request( | |
url => 'es_cells.json', | |
method => 'post', | |
payload => to_json( { es_cell => \%es_cell } ), | |
); | |
return $es_cell_data; | |
} | |
# | |
# Main body of script | |
# | |
# First communicate with the repository and get a list | |
# of all of the pipelines represented and thier details. | |
# | |
# (We're storing the pipeline details in a hash, keyed | |
# by the pipeline name for use in the allele building). | |
my %pipelines = (); | |
my $pipeline_data = request( url => 'pipelines.json' ); | |
for my $pipeline ( @{$pipeline_data} ) { | |
$pipelines{ $pipeline->{name} } = $pipeline; | |
} | |
# Now we define the alleles and products that we want | |
# to load into the repository. | |
# | |
# (In your code you will need to retrieve data from | |
# your production systems here). | |
# | |
# NOTE that we also define the pipeline_id here when | |
# constructing our object. | |
my $alleles_ref = get_alleles_to_load(%pipelines); | |
# | |
# Now we shall loop through each of our alleles and | |
# create entries in the targeting repository for both | |
# the alleles and thier products. | |
# | |
for my $allele ( @{$alleles_ref} ) { | |
# See if our Allele is already in the database | |
my $allele_found = find_allele( %{$allele} ); | |
unless ($allele_found) { | |
$allele_found = create_allele( %{$allele} ); | |
} | |
} | |
exit 0; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment