{ "_matformer_config_dict": { "attention_type": [], "bias": false, "block_size_for_attention": 128, "bos_token_id": 1, "compile_flexattn": false, "custom_layers": {}, "decoder": null, "default_layer": { "attn_impl": "flash", "ffn_activation": "swiglu", "hooks": {}, "normalization": "rmsnorm", "normalization_position": "post", "positional_encoding": "alibi", "sliding_window_size": null }, "encoder": null, "entropy": null, "eos_token_id": 2, "ffn_factor": 3.0, "has_entropy_model": null, "has_text_autoencoder": null, "hidden_size": 768, "is_causal": true, "mask_token_id": null, "masked_substitution_rate": null, "max_position_embeddings": 1024, "model_class": null, "name": "BabyLM", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "rms_norm_eps": 1e-06, "rope_theta": 10000.0, "sliding_type": null, "tie_word_embeddings": false, "training_objective": "autoregressive", "vocab_size": 32777 }, "architectures": [ "MatformerCausalLM" ], "attention_type": [], "bias": false, "block_size_for_attention": 128, "bos_token_id": 1, "compile_flexattn": false, "custom_layers": {}, "decoder": null, "default_layer": { "attn_impl": "flash", "ffn_activation": "swiglu", "hooks": {}, "normalization": "rmsnorm", "normalization_position": "post", "positional_encoding": "alibi", "sliding_window_size": null }, "encoder": null, "entropy": null, "eos_token_id": 2, "ffn_factor": 3.0, "has_entropy_model": null, "has_text_autoencoder": null, "hidden_size": 768, "is_causal": true, "mask_token_id": null, "masked_substitution_rate": null, "max_position_embeddings": 1024, "model_class": null, "model_type": "matformer", "name": "BabyLM", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "rms_norm_eps": 1e-06, "rope_theta": 10000.0, "sliding_type": null, "torch_dtype": "bfloat16", "training_objective": "autoregressive", "transformers_version": "4.53.0.dev0", "use_cache": true, "vocab_size": 32777 }