This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diameter(g1, directed=F, weights=NA) | |
edge_density(g1,loops=F) | |
reciprocity(g1) | |
closeness(g1, mode="all", weights=NA) | |
betweenness(g1, directed=T, weight=NA) | |
edge_betweenness(g1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
degree(g1, mode="all") | |
degree(g1, mode="in") | |
degree(g1, mode="out") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Network measures | |
degree(g1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
g1 <- graph(c("Heena","Tina","Tina","Disha","Disha","Heena","Heena","Disha","Li","Disha")) | |
plot(g1, | |
vertex.color="green", | |
vertex.size=40, | |
edge.color="red") | |
g1 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
library(igraph) | |
g <- graph(c(1,2,2,3,3,4,4,1)) | |
plot(g, | |
vertex.color="green", | |
vertex.size=40, | |
edge.color="red") | |
g[] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
model = T5Model("t5-small", args=model_args) | |
model.train_model(paraphrase_train_df, eval_data=paraphrase_dev_df) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
model_args = { | |
"reprocess_input_data": True, | |
"overwrite_output_dir": True, | |
"max_seq_length": 128, | |
"train_batch_size": 16, | |
"num_train_epochs": 25, | |
"num_beams": None, | |
"do_sample": True, | |
"max_length": 20, | |
"top_k": 50, |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
files.upload() | |
pd.set_option('display.max_colwidth', None) | |
df = pd.read_csv('train.tsv',sep='\t') | |
df.head(5) | |
df.describe() | |
paraphrase_train_df = df[df['label']==1] | |
paraphrase_train_df.head(5) | |
paraphrase_train_df.describe() | |
paraphrase_train_df["prefix"] = "generate paraphrase" | |
paraphrase_train_df = paraphrase_train_df.rename(columns={"sentence1": "input_text", "sentence2": "target_text"}) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd | |
from google.colab import files | |
from simpletransformers.t5 import T5Model | |
from pprint import pprint | |
import logging | |
logging.basicConfig(level=logging.ERROR) | |
transformers_logger = logging.getLogger("transformers") | |
transformers_logger.setLevel(logging.ERROR) | |
%env WANDB_DISABLED=True |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
german_to_english = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.de-en.single_model', tokenizer='moses', bpe='fastbpe') | |
data = ["back translation is one of the best data augmentation techniques"] | |
def augment_data(data, x_to_y, y_to_x, n): | |
augmented_data = dict() | |
for d in data: | |
augmented_data[d] = list() | |
y_result = x_to_y.generate(x_to_y.encode(d), beam=n) | |
for y in y_result: | |
x_result = y_to_x.generate(y_to_x.encode(x_to_y.decode(y['tokens'])), beam=n) |