Skip to content

Instantly share code, notes, and snippets.

@andreaskoepf
Last active August 20, 2023 23:55
Show Gist options
  • Save andreaskoepf/65e6739a7387bd17f97bb05374e51a5e to your computer and use it in GitHub Desktop.
Save andreaskoepf/65e6739a7387bd17f97bb05374e51a5e to your computer and use it in GitHub Desktop.
megatron args
Namespace(num_layers=80, encoder_num_layers=80, decoder_num_layers=None, hidden_size=8192, ffn_hidden_size=28672,
num_attention_heads=64, num_attention_heads_kv=8, kv_channels=128, max_position_embeddings=4096,
make_vocab_size_divisible_by=1, layernorm_epsilon=1e-05, apply_residual_connection_post_layernorm=False, use_bias=False,
use_rms_norm=True, use_post_ln=False, onnx_safe=None, glu_activation='swiglu',
position_embedding_type=<PositionEmbeddingType.rotary: 1>, rope_scaling_factor=1.0, parallel_attn=False,
parallel_layernorm=False,
tie_embed_logits=False, attention_dropout=0.1, hidden_dropout=0.1, lima_dropout=False, weight_decay=0.01,
start_weight_decay=0.01, end_weight_decay=0.01, weight_decay_incr_style='constant', clip_grad=1.0,
adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, micro_batch_size=1, global_batch_size=1,
rampup_batch_size=None, recompute_granularity=None, distribute_saved_activations=False, recompute_method=None,
recompute_num_layers=1, train_iters=None, train_samples=None, log_interval=100, exit_interval=None,
exit_duration_in_mins=None, exit_signal_handler=False, tensorboard_dir=None, masked_softmax_fusion=False,
bias_gelu_fusion=False, bias_dropout_fusion=False, use_flash_attn=False, optimizer='adam', dataloader_type='single',
async_tensor_model_parallel_allreduce=True, no_persist_layer_norm=False, sequence_parallel=False,
gradient_accumulation_fusion=True, seed=1234, data_parallel_random_init=False, init_method_std=0.02,
init_method_xavier_uniform=False, lr=None, lr_decay_style='linear', lr_decay_iters=None, lr_decay_samples=None,
lr_warmup_fraction=None, lr_warmup_iters=0, lr_warmup_samples=0, min_lr=0.0, override_opt_param_scheduler=False,
use_checkpoint_opt_param_scheduler=False, save='/pure-mlo-scratch/akoepf/checkpoints/tmp-unsharded-oasst_sft10',
save_interval=1, no_save_optim=True, no_save_rng=True, load=None, no_load_optim=True, no_load_rng=True,
finetune=False, perform_initialization=False, use_checkpoint_args=False, fp16=False, bf16=True, loss_scale=None,
initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2,
fp32_residual_connection=False, apply_query_key_layer_scaling=True, attention_softmax_in_fp32=False,
accumulate_allreduce_grads_in_fp32=True, fp16_lm_cross_entropy=False, tensor_model_parallel_size=1,
pipeline_model_parallel_size=1, pipeline_model_parallel_split_rank=None, num_layers_per_virtual_pipeline_stage=None,
distributed_backend='nccl', DDP_impl='local', use_contiguous_buffers_in_local_ddp=True,
scatter_gather_tensors_in_pipeline=True, use_ring_exchange_p2p=False, local_rank=None, use_cpu_initialization=True,
empty_unused_memory_level=0, standalone_embedding_stage=False, use_distributed_optimizer=False, eval_iters=100,
eval_interval=1000, data_path=None, split='969, 30, 1', train_data_path=None, valid_data_path=None,
test_data_path=None, vocab_file=None, merge_file=None, vocab_extra_ids=0, vocab_extra_ids_list=None,
seq_length=4096, variable_seq_lengths=False, encoder_seq_length=4096, decoder_seq_length=None,
retriever_seq_length=256, sample_rate=1.0, mask_prob=0.15, short_seq_prob=0.1, mmap_warmup=False,
num_workers=2, tokenizer_type='SentencePieceTokenizer', tokenizer_model=None, new_tokens=True,
data_impl='infer', reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False,
adlr_autoresume=False, adlr_autoresume_interval=1000, ict_head_size=None, biencoder_projection_dim=0,
biencoder_shared_query_context_model=False, ict_load=None, bert_load=None, titles_data_path=None,
query_in_block_prob=0.1, use_one_sent_docs=False, evidence_data_path=None, retriever_report_topk_accuracies=[],
retriever_score_scaling=False, block_data_path=None, embedding_path=None, indexer_batch_size=128,
indexer_log_interval=1000, num_classes=1000, img_h=224, img_w=224, num_channels=3, patch_dim=16,
classes_fraction=1.0, data_per_class_fraction=1.0, data_sharding=True, head_lr_mult=1.0,
iter_per_epoch=1250, dino_local_img_size=96, dino_local_crops_number=10, dino_head_hidden_size=2048,
dino_bottleneck_size=256, dino_freeze_last_layer=1, dino_norm_last_layer=False, dino_warmup_teacher_temp=0.04,
dino_teacher_temp=0.07, dino_warmup_teacher_temp_epochs=30, log_params_norm=False, log_num_zeros_in_grad=False,
timing_log_level=0, barrier_with_L1_time=True, timing_log_option='minmax', tensorboard_log_interval=1,
tensorboard_queue_size=1000, log_timers_to_tensorboard=False, log_batch_size_to_tensorboard=False,
log_validation_ppl_to_tensorboard=False, log_memory_to_tensorboard=False, log_world_size_to_tensorboard=False,
wandb_logger=False, wandb_project=None, wandb_entity='meditron', wandb_id=None, wandb_resume=False, wandb_api_key=None,
inference_batch_times_seqlen_threshold=512, max_tokens_to_oom=12000, fp8_e4m3=False, fp8_hybrid=False, fp8_wgrad=True,
fp8_margin=0, fp8_interval=1, transformer_impl='local', fp8_amax_history_len=1, fp8_amax_compute_algo='most_recent',
rank=0, world_size=1, transformer_pipeline_model_parallel_size=1, data_parallel_size=1,
virtual_pipeline_model_parallel_size=None, params_dtype=torch.bfloat16, consumed_train_samples=22144,
consumed_valid_samples=3840, model_name='llama2', model_type=<ModelType.encoder_or_decoder: 1>,
padded_vocab_size=32007)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment