Skip to content

Instantly share code, notes, and snippets.

@rahulremanan
Last active August 30, 2021 21:00
Show Gist options
  • Save rahulremanan/26cc425b505522341a4587961b4fb377 to your computer and use it in GitHub Desktop.
Save rahulremanan/26cc425b505522341a4587961b4fb377 to your computer and use it in GitHub Desktop.
Randomized saliency filter testing for padding invariance¶
num_pairwise_tests = 1 if debug else 2
num_eval = 1 if debug else 25
len(experiment_ids)
for _ in tqdm(range(num_pairwise_tests)):
img_idx1,img_idx2 = random_imgID_generator(img_labels)
max_retries = 2000
img_idx1, img_idx2 = img_pairs_filter(img_labels,img_idx1,img_idx2,
max_retries=max_retries)
img1_info,img2_info = img_info(img_labels, img_idx1, img_idx2)
clear_image_history(unzip_dir)
get_fairface_img(img_labels, img_idx1, unzip_dir, fairface_data)
get_fairface_img(img_labels, img_idx2, unzip_dir, fairface_data)
img_id1 = str(img_labels.iloc[img_idx1].file).split('/')[-1].replace('.jpg','')
img_race1 = str(img_labels.iloc[img_idx1].race)
img_gender1 = str(img_labels.iloc[img_idx1].gender)
img_id2 = str(img_labels.iloc[img_idx2].file).split('/')[-1].replace('.jpg','')
img_race2 = str(img_labels.iloc[img_idx2].race)
img_gender2 = str(img_labels.iloc[img_idx2].gender)
file_id = f'{img_id1}_{img_race1}_{img_gender1}--{img_id2}_{img_race2}_{img_gender2}'
experiment_id = randomID_generator()
image_files = glob.glob(str(data_dir / Path("./*.jpg")))
output_dir =f'{img_dir}/Twitter_saliency/FairFace_pairwise_tests/'
filename = f'{experiment_id}_{file_id}_{label_id}'
images = [Image.open(f)for f in image_files]
img = join_images(images, col_wrap=1, img_size=(128,128))
output_file = f"{output_dir}/{filename}_baseline_v1.jpeg"
img.save(output_file, "JPEG")
model.plot_img_crops_using_img(img, topK=5, col_wrap=6)
baselinev1_saliency_info,sp = saliency_point_to_info(Path(output_file).as_posix(),
image_files, model, img_labels,image_mode='vertical')
if debug:
print(image_files)
print(baselinev1_saliency_info,sp)
plt.savefig(f"{output_dir}/{filename}_baseline_v1_sm.jpeg",bbox_inches="tight")
if not debug:
plt.close()
_=gc.collect()
image_files.reverse()
images = [Image.open(f)for f in image_files]
img = join_images(images, col_wrap=1, img_size=(128,128))
output_file = f"{output_dir}/{filename}_baseline_v2.jpeg"
img.save(output_file, "JPEG")
model.plot_img_crops_using_img(img, topK=5, col_wrap=6)
baselinev2_saliency_info,sp = saliency_point_to_info(Path(output_file).as_posix(),
image_files, model, img_labels, image_mode='vertical')
if debug:
print(image_files)
print(baselinev2_saliency_info,sp)
plt.savefig(f"{output_dir}/{filename}_baseline_v2_sm.jpeg",bbox_inches="tight")
if not debug:
plt.close()
_=gc.collect()
images = [Image.open(f)for f in image_files]
img = join_images(images, col_wrap=2, img_size=(128,128))
output_file = f"{output_dir}/{filename}_baseline_h1.jpeg"
img.save(output_file, "JPEG")
model.plot_img_crops_using_img(img, topK=5, col_wrap=6)
baselineh1_saliency_info,sp = saliency_point_to_info(Path(output_file).as_posix(),
image_files, model, img_labels, image_mode='horizontal')
if debug:
print(image_files)
print(baselineh1_saliency_info,sp)
plt.savefig(f"{output_dir}/{filename}_baseline_h1_sm.jpeg",bbox_inches="tight")
if not debug:
plt.close()
_=gc.collect()
image_files.reverse()
images = [Image.open(f)for f in image_files]
img = join_images(images, col_wrap=2, img_size=(128,128))
output_file = f"{output_dir}/{filename}_baseline_h2.jpeg"
img.save(output_file, "JPEG")
model.plot_img_crops_using_img(img, topK=5, col_wrap=6)
baselineh2_saliency_info,sp = saliency_point_to_info(Path(output_file).as_posix(),
image_files, model, img_labels, image_mode='horizontal')
if debug:
print(image_files)
print(baselineh2_saliency_info,sp)
plt.savefig(f"{output_dir}/{filename}_baseline_h2_sm.jpeg",bbox_inches="tight")
if not debug:
plt.close()
_=gc.collect()
for i in range(len(padding_eval)):
eval_key = list(padding_eval.keys())[i]
label_id = eval_key
if eval_key == 'horizontal':
label_id = 'h'
num_cols = 2
elif eval_key == 'vertical':
label_id = 'v'
num_cols = 1
padding_blocks = padding_eval[eval_key]['padding_blocks']
for j in range(len(padding_blocks)):
for k in tqdm(range(num_eval)):
instance_id = randomID_generator()
random.SystemRandom().shuffle(image_files)
images = [Image.open(f)for f in image_files]
padding_ranges = padding_blocks[j+1]
padding = random.SystemRandom().choice(range(padding_ranges['min'],
padding_ranges['max']))
img = join_images(images, col_wrap=num_cols, img_size=(128,128),
padding=padding)
filename = f'{instance_id}_{file_id}_p{padding}_t{k}_{label_id}'
output_file = f"{output_dir}/{filename}.jpeg"
img.save(output_file, "JPEG")
model.plot_img_crops_using_img(img, topK=5, col_wrap=3)
sm_output_file = f"{output_dir}/{filename}_sm.jpeg"
plt.savefig(sm_output_file,bbox_inches="tight")
saliency_info,sp = saliency_point_to_info(Path(output_file).as_posix(),
image_files, model, img_labels, image_mode=eval_key)
if debug:
print(image_files)
print(saliency_info,sp)
experiment_ids.append(experiment_id)
instance_ids.append(instance_id)
img1.append(img1_info)
img2.append(img2_info)
baseline_h1.append(encoded_labels(baselineh1_saliency_info['race'],labels_encoder))
baseline_h2.append(encoded_labels(baselineh2_saliency_info['race'],labels_encoder))
baseline_v1.append(encoded_labels(baselinev1_saliency_info['race'],labels_encoder))
baseline_v2.append(encoded_labels(baselinev2_saliency_info['race'],labels_encoder))
saliency_out.append(encoded_labels(saliency_info['race'],labels_encoder))
combine_mode.append(eval_key)
if not debug:
plt.close()
_=gc.collect()
pairwise_df = pd.DataFrame()
pairwise_df['experiment_id'] = experiment_ids
pairwise_df['instance_id'] = instance_ids
pairwise_df['img1'] = img1
pairwise_df['img2'] = img2
pairwise_df['baseline_h1'] = baseline_h1
pairwise_df['baseline_h2'] = baseline_h2
pairwise_df['baseline_v1'] = baseline_v1
pairwise_df['baseline_v2'] = baseline_v2
pairwise_df['saliency_out'] = saliency_out
pairwise_df['combine_mode'] = combine_mode
print(len(pairwise_df))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment