| from transformers.configuration_utils import PretrainedConfig | |
| from transformers.utils import logging | |
| from transformers import GPT2Config | |
| logger = logging.get_logger(__name__) | |
| class LOLAConfig(PretrainedConfig): | |
| """ | |
| This is the configuration class is a modified copy of https://huggingface.co/openai-community/gpt2 with MoE support. | |
| """ | |
| model_type = "lola_v1" | |
| keys_to_ignore_at_inference = ["past_key_values"] | |
| attribute_map = { | |
| "hidden_size": "n_embd", | |
| "max_position_embeddings": "n_positions", | |
| "num_attention_heads": "n_head", | |
| "num_hidden_layers": "n_layer", | |
| } | |
| def __init__( | |
| self, | |
| vocab_size=100096, | |
| n_positions=2048, | |
| n_embd=2048, | |
| n_layer=24, | |
| n_head=16, | |
| n_inner=8192, | |
| activation_function="gelu_new", | |
| resid_pdrop=0.1, | |
| embd_pdrop=0.1, | |
| attn_pdrop=0.1, | |
| layer_norm_epsilon=1e-5, | |
| initializer_range=0.02, | |
| summary_type="cls_index", | |
| summary_use_proj=True, | |
| summary_activation=None, | |
| summary_proj_to_labels=True, | |
| summary_first_dropout=0.1, | |
| scale_attn_weights=True, | |
| use_cache=True, | |
| bos_token_id=100095, | |
| eos_token_id=100095, | |
| scale_attn_by_inverse_layer_idx=False, | |
| reorder_and_upcast_attn=False, | |
| num_experts=16, | |
| topk=1, | |
| **kwargs, | |
| ): | |
| self.vocab_size = vocab_size | |
| self.n_positions = n_positions | |
| self.n_embd = n_embd | |
| self.n_layer = n_layer | |
| self.n_head = n_head | |
| self.n_inner = n_inner | |
| self.activation_function = activation_function | |
| self.resid_pdrop = resid_pdrop | |
| self.embd_pdrop = embd_pdrop | |
| self.attn_pdrop = attn_pdrop | |
| self.layer_norm_epsilon = layer_norm_epsilon | |
| self.initializer_range = initializer_range | |
| self.summary_type = summary_type | |
| self.summary_use_proj = summary_use_proj | |
| self.summary_activation = summary_activation | |
| self.summary_first_dropout = summary_first_dropout | |
| self.summary_proj_to_labels = summary_proj_to_labels | |
| self.scale_attn_weights = scale_attn_weights | |
| self.use_cache = use_cache | |
| self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx | |
| self.reorder_and_upcast_attn = reorder_and_upcast_attn | |
| self.num_experts = num_experts | |
| self.topk = topk | |
| self.bos_token_id = bos_token_id | |
| self.eos_token_id = eos_token_id | |
| super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) | |