This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from icevision.models.inference import * | |
import json | |
import numpy as np | |
[pred.add_component(FilepathRecordComponent()) for pred in preds] | |
[preds[_].set_filepath(img_files[_]) for _ in range(len(preds))] | |
for p in preds: | |
process_bbox_predictions(p, | |
PIL.Image.open(Path(p.pred.filepath)), |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def twosample_tstat_welch(experimental, control): | |
""" | |
Runs Welch's unequal variances t-test | |
Accepts 2 pandas series or numpy arrays | |
Returns a t-stat | |
""" | |
xbar_1 = experimental.mean() | |
xbar_2 = control.mean() | |
var_1 = experimental.var() | |
var_2 = control.var() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import re | |
def extract_scene_headings(film_script_txtfile_path="scripts/no_country.txt", mode=1): | |
# A couple options on regex patterns, depending on script format. Might need tweaks per script | |
film_scene_heading_regexp_1 = "(?<=INT. |EXT. ).*(?=,)" | |
film_scene_heading_regexp_2 = "(?<=INT. |EXT. ).*(?= -)" | |
if mode == 1: | |
regexp = film_scene_heading_regexp_1 | |
elif mode == 2: |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd | |
from sqlalchemy import create_engine | |
# Your connection string format may vary by SQL flavor; | |
con = f'postgresql://{username}:{password}@{host}:5432/{database}' | |
eng = create_engine(con) | |
# Open csv file as stream and write to SQL, appending as you go: | |
for chunk in pd.read_csv('filename.csv', chunksize = 1000): | |
chunk.to_sql(name = 'giant_table', |