General documentation: https://www.gnu.org/software/screen/
Download from: https://ftp.gnu.org/gnu/screen/
Compiling instruction: https://www.gnu.org/software/screen/manual/html_node/Compiling-Screen.html
#!/usr/bin/env python | |
import json | |
reordered='/data/predict1/charts1.json' | |
raw='/data/predict1/predict-mongodb-backup/charts_20240612.json' | |
# find out the private charts in raw | |
# print 0-indexed indices of private charts from raw file |
General documentation: https://www.gnu.org/software/screen/
Download from: https://ftp.gnu.org/gnu/screen/
Compiling instruction: https://www.gnu.org/software/screen/manual/html_node/Compiling-Screen.html
import pandas as pd | |
df=pd.read_csv('pennebakerctq01_definitions.csv') | |
dfgiven=pd.read_excel('cnb_ndar_codebook_v5.xlsx') | |
_aliases=df['Aliases'].unique() | |
__aliases=[] | |
for e in _aliases: | |
if pd.isna(e): |
#!/bin/bash | |
# convert SSH public key to pem format | |
ssh-keygen -e -m PKCS8 -f git_rsa.pub > git_rsa.pem | |
# encrypt | |
openssl rsautl -encrypt -pubin -inkey git_rsa.pem -in message.txt -out message.enc | |
# decrypt | |
openssl rsautl -decrypt -inkey git_rsa -in message.enc |
import pandas as pd | |
# cd /data/predict1/to_nda/nda-submissions | |
df=pd.read_csv('blood_saliva_rack_Pronet.csv') | |
index=[] | |
for i,row in df.iterrows(): | |
rc=row['Rack Code'] |
#!/usr/bin/env python | |
import sys | |
import pandas as pd | |
df=pd.read_csv(sys.argv[1],dtype=str) | |
df1=df.copy() | |
df1.rename(columns={'chric_record_id':'participant_id'},inplace=True) | |
df1["redcap_system_data_format_version"]=1 |
#!/usr/bin/env python | |
from glob import glob | |
from os.path import isfile, basename | |
import pandas as pd | |
subjects=glob('/data/predict1/data_from_nda/Pronet/PHOENIX/PROTECTED/*/raw/*') | |
subjects=[basename(s) for s in subjects] | |
print('for how many subjects inclusionexclusion_criteria_review does not exist?') |
# execute it from within /data/predict1/data_from_nda/Prescient/PHOENIX/GENERAL | |
from os.path import dirname, join as pjoin | |
import pandas as pd | |
from glob import glob | |
files=glob('*/processed/*/interviews/open/*_combinedQCRecords.csv') | |
j=0 | |
for file in files: |
import pandas as pd | |
# this is API pulled from Yale REDCap | |
filename='pronet_dict_20230321.csv' | |
df=pd.read_csv(filename, dtype=str) | |
df1=df.copy() | |
# erase calcs, logics | |
# replace calc by text |
#!/usr/bin/env python | |
import pandas as pd | |
from glob import glob | |
def get_count(): | |
for file in files: | |
df=pd.read_csv(file) |