Skip to content

Instantly share code, notes, and snippets.

@SzaboGergo01
Created March 23, 2023 12:11
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save SzaboGergo01/5ca9abeaaf199a686d8b41335e9fe261 to your computer and use it in GitHub Desktop.
Save SzaboGergo01/5ca9abeaaf199a686d8b41335e9fe261 to your computer and use it in GitHub Desktop.
configs
[paths]
tagger_model = null
parser_model = null
ner_model = null
lemmatizer_lookups = null
[nlp]
lang = "hu"
;pipeline = ["transformer", "senter", "tagger", "morphologizer", "lookup_lemmatizer", "trainable_lemmatizer", "lemma_smoother", "experimental_arc_predicter", "experimental_arc_labeler", "ner"]
pipeline = ["transformer", "senter", "tagger", "morphologizer", "trainable_lemmatizer", "experimental_arc_predicter", "experimental_arc_labeler", "ner"]
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
[components]
[components.transformer]
source = ${paths.tagger_model}
component = "transformer"
[components.senter]
source = ${paths.parser_model}
component = "senter"
[components.tagger]
source = ${paths.tagger_model}
component = "tagger"
[components.morphologizer]
source = ${paths.tagger_model}
component = "morphologizer"
;[components.lookup_lemmatizer]
;factory = "hu.lookup_lemmatizer"
;source = ${paths.lemmatizer_lookups}
[components.trainable_lemmatizer]
source = ${paths.tagger_model}
component = "trainable_lemmatizer"
;[components.lemma_smoother]
;factory = "hu.lemma_smoother"
[components.experimental_arc_predicter]
source = ${paths.parser_model}
component = "experimental_arc_predicter"
;replace_listeners = ["model.tok2vec"]
[components.experimental_arc_labeler]
source = ${paths.parser_model}
component = "experimental_arc_labeler"
;replace_listeners = ["model.tok2vec"]
[components.ner]
source = ${paths.ner_model}
component = "ner"
;replace_listeners = ["model.tok2vec"]
[paths]
train = null
dev = null
vectors = null
init_tok2vec = null
tagger_model = null
[system]
gpu_allocator = "pytorch"
seed = 0
[nlp]
lang = "hu"
pipeline = ["transformer","ner"]
batch_size = 128
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
[components]
[components.ner]
factory = "beam_ner"
beam_update_prob = 1
incorrect_spans_key = null
moves = null
scorer = {"@scorers":"spacy.ner_scorer.v1"}
update_with_oracle_cut_size = 100
[components.ner.model]
@architectures = "spacy.TransitionBasedParser.v2"
state_type = "ner"
extra_state_tokens = true
hidden_width = 64
maxout_pieces = 3
use_upper = false
nO = null
[components.ner.model.tok2vec]
@architectures = "curated-transformers.LastTransformerLayerListener.v1"
width = 768
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"
[components.transformer]
source = ${paths.tagger_model}
[components.transformer.model]
@architectures = "curated-transformers.XlmrTransformer.v1"
vocab_size = 250002
piece_encoder = {"@architectures": "curated-transformers.XlmrSentencepieceEncoder.v1"}
[components.transformer.model.with_spans]
@architectures = "curated-transformers.WithStridedSpans.v1"
window = 128
stride = 96
[corpora]
[corpora.dev]
@readers = "spacy.Corpus.v1"
path = ${paths.dev}
max_length = 0
gold_preproc = true
limit = 0
augmenter = null
[corpora.train]
@readers = "spacy.Corpus.v1"
path = ${paths.train}
max_length = 2000
gold_preproc = true
limit = 0
augmenter = null
[training]
accumulate_gradient = 3
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.5
patience = 10000
max_epochs = 25
max_steps = 0
eval_frequency = 200
frozen_components = []
annotating_components = []
before_to_disk = null
[training.batcher]
@batchers = "spacy.batch_by_padded.v1"
#TODO: should be false
discard_oversize = true
#TODO: can we increase this to fully utilize A100 GPUs?
size = 2000
buffer = 256
get_length = null
[training.logger]
@loggers = "spacy.WandbLogger.v4"
project_name = "test"
run_name = "ner"
[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.1
grad_clip = 1.0
use_averages = false
eps = 0.00000001
[training.optimizer.learn_rate]
@schedules = "warmup_linear.v1"
warmup_steps = 250
total_steps = 20000
initial_rate = 0.00005
[training.score_weights]
ents_f = 1.0
ents_p = 0.0
ents_r = 0.0
ents_per_type = null
[pretraining]
[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = null
lookups = null
before_init = null
after_init = null
[initialize.components]
[initialize.components.transformer]
[initialize.components.transformer.encoder_loader]
@model_loaders = "curated-transformers.HFTransformerEncoderLoader.v1"
name = "xlm-roberta-base"
[initialize.components.transformer.piecer_loader]
@model_loaders = "curated-transformers.HFPieceEncoderLoader.v1"
name = "xlm-roberta-base"
[paths]
train = null
dev = null
tagger_model = null
[system]
gpu_allocator = "pytorch"
seed = 0
[nlp]
lang = "hu"
pipeline = ["transformer","senter","experimental_arc_predicter","experimental_arc_labeler"]
batch_size = 256
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
[components]
[components.senter]
source = ${paths.tagger_model}
[components.senter.model]
@architectures = "spacy.Tagger.v1"
nO = null
[components.senter.model.tok2vec]
@architectures = "curated-transformers.LastTransformerLayerListener.v1"
width = 768
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"
[components.experimental_arc_labeler]
factory = "experimental_arc_labeler"
[components.experimental_arc_labeler.model]
@architectures = "spacy-experimental.Bilinear.v1"
hidden_width = 128
mixed_precision = false
[components.experimental_arc_labeler.model.tok2vec]
@architectures = "curated-transformers.LastTransformerLayerListener.v1"
width = 768
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"
[components.experimental_arc_predicter]
factory = "experimental_arc_predicter"
[components.experimental_arc_predicter.model]
@architectures = "spacy-experimental.PairwiseBilinear.v1"
hidden_width = 256
nO = 1
mixed_precision = false
[components.experimental_arc_predicter.model.tok2vec]
@architectures = "curated-transformers.LastTransformerLayerListener.v1"
width = 768
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"
[components.transformer]
source = ${paths.tagger_model}
[components.transformer.model]
@architectures = "curated-transformers.XlmrTransformer.v1"
vocab_size = 250002
piece_encoder = {"@architectures": "curated-transformers.XlmrSentencepieceEncoder.v1"}
[components.transformer.model.with_spans]
@architectures = "curated-transformers.WithStridedSpans.v1"
window = 128
stride = 96
[corpora]
[corpora.dev]
@readers = "spacy.Corpus.v1"
path = ${paths.dev}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null
[corpora.train]
@readers = "spacy.Corpus.v1"
path = ${paths.train}
max_length = 2000
gold_preproc = false
limit = 0
augmenter = null
[training]
accumulate_gradient = 3
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.1
patience = 10000
max_epochs = 7500
max_steps = 0
eval_frequency = 200
frozen_components = []
before_to_disk = null
annotating_components = ["senter"]
[training.batcher]
@batchers = "spacy.batch_by_padded.v1"
discard_oversize = true
get_length = null
size = 2000
buffer = 256
[training.logger]
@loggers = "spacy.WandbLogger.v4"
project_name = "test"
run_name = "parser"
[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001
[training.optimizer.learn_rate]
@schedules = "warmup_linear.v1"
warmup_steps = 250
total_steps = 20000
initial_rate = 0.00005
[training.score_weights]
dep_uas = 0.15
dep_las = 0.15
dep_las_per_type = null
sents_p = null
sents_r = null
sents_f = 0.2
pos_acc = null
morph_acc = null
morph_per_feat = null
tag_acc = null
lemma_acc = null
bound_dep_uas = 0.1
bound_dep_las = 0.1
[pretraining]
[initialize]
vectors = null
[initialize.components]
[initialize.components.transformer]
[initialize.components.transformer.encoder_loader]
@model_loaders = "curated-transformers.HFTransformerEncoderLoader.v1"
name = "xlm-roberta-base"
[initialize.components.transformer.piecer_loader]
@model_loaders = "curated-transformers.HFPieceEncoderLoader.v1"
name = "xlm-roberta-base"
[paths]
train = null
dev = null
vectors = null
init_tok2vec = null
[system]
gpu_allocator = "pytorch"
seed = 0
[nlp]
lang = "hu"
pipeline = ["transformer","senter","tagger","morphologizer","trainable_lemmatizer"]
batch_size = 256
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
[components]
[components.senter]
factory = "senter"
overwrite = false
scorer = {"@scorers":"spacy.senter_scorer.v1"}
[components.senter.model]
@architectures = "spacy.Tagger.v1"
nO = null
[components.senter.model.tok2vec]
@architectures = "curated-transformers.LastTransformerLayerListener.v1"
width = 768
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"
[components.morphologizer]
factory = "morphologizer"
extend = false
overwrite = true
scorer = {"@scorers":"spacy.morphologizer_scorer.v1"}
[components.morphologizer.model]
@architectures = "spacy.Tagger.v1"
nO = null
[components.morphologizer.model.tok2vec]
@architectures = "curated-transformers.LastTransformerLayerListener.v1"
width = 768
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"
[components.tagger]
factory = "tagger"
neg_prefix = "!"
overwrite = false
scorer = {"@scorers":"spacy.tagger_scorer.v1"}
[components.tagger.model]
@architectures = "spacy.Tagger.v1"
nO = null
[components.tagger.model.tok2vec]
@architectures = "curated-transformers.LastTransformerLayerListener.v1"
width = 768
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"
[components.trainable_lemmatizer]
factory = "trainable_lemmatizer"
backoff = "orth"
min_tree_freq = 1
overwrite = false
scorer = {"@scorers":"spacy.lemmatizer_scorer.v1"}
top_k = 3
[components.trainable_lemmatizer.model]
@architectures = "spacy.Tagger.v2"
nO = null
[components.trainable_lemmatizer.model.tok2vec]
@architectures = "curated-transformers.LastTransformerLayerListener.v1"
width = 768
pooling = {"@layers":"reduce_mean.v1"}
upstream = "*"
[components.transformer]
factory = "curated_transformer"
[components.transformer.model]
@architectures = "curated-transformers.XlmrTransformer.v1"
vocab_size = 250002
piece_encoder = {"@architectures": "curated-transformers.XlmrSentencepieceEncoder.v1"}
[components.transformer.model.with_spans]
@architectures = "curated-transformers.WithStridedSpans.v1"
window = 128
stride = 96
[corpora]
[corpora.dev]
@readers = "spacy.Corpus.v1"
path = ${paths.dev}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null
[corpora.train]
@readers = "spacy.Corpus.v1"
path = ${paths.train}
max_length = 2000
gold_preproc = false
limit = 0
augmenter = null
[training]
accumulate_gradient = 3
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.3
# Circa 5 epochs
patience = 5000
max_epochs = 25
max_steps = 0
eval_frequency = 200
frozen_components = []
annotating_components = []
before_to_disk = null
[training.batcher]
@batchers = "spacy.batch_by_padded.v1"
discard_oversize = true
size = 2000
buffer = 256
get_length = null
[training.logger]
@loggers = "spacy.WandbLogger.v4"
project_name = "test"
run_name = "tagger"
[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001
[training.optimizer.learn_rate]
@schedules = "warmup_linear.v1"
warmup_steps = 250
total_steps = 20000
initial_rate = 0.00005
[training.score_weights]
sents_f = 0.1
sents_p = null
sents_r = null
tag_acc = 0.2
pos_acc = 0.2
morph_acc = 0.3
morph_per_feat = null
lemma_acc = 0.2
[pretraining]
[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = null
lookups = null
before_init = null
after_init = null
[initialize.components]
[initialize.components.transformer]
[initialize.components.transformer.encoder_loader]
@model_loaders = "curated-transformers.HFTransformerEncoderLoader.v1"
name = "xlm-roberta-base"
[initialize.components.transformer.piecer_loader]
@model_loaders = "curated-transformers.HFPieceEncoderLoader.v1"
name = "xlm-roberta-base"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment