jais.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. # coding=utf-8
  2. # Copyright 2023 The OpenAI Team Authors and HuggingFace Inc. team.
  3. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  4. # Copyright 2023 Cerebras Systems.
  5. #
  6. # Licensed under the Apache License, Version 2.0 (the "License");
  7. # you may not use this file except in compliance with the License.
  8. # You may obtain a copy of the License at
  9. #
  10. # http://www.apache.org/licenses/LICENSE-2.0
  11. #
  12. # Unless required by applicable law or agreed to in writing, software
  13. # distributed under the License is distributed on an "AS IS" BASIS,
  14. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. # See the License for the specific language governing permissions and
  16. # limitations under the License.
  17. """JAIS configuration"""
  18. from transformers.configuration_utils import PretrainedConfig
  19. from transformers.utils import logging
  20. logger = logging.get_logger(__name__)
  21. class JAISConfig(PretrainedConfig):
  22. """
  23. This is the configuration class to store the configuration of a
  24. [`JAISModel`]. It is used to instantiate a JAIS model according to the
  25. specified arguments, defining the model architecture.
  26. Configuration objects inherit from [`PretrainedConfig`] and can be used
  27. to control the model outputs. Read the documentation from
  28. [`PretrainedConfig`] for more information.
  29. Args:
  30. vocab_size (`int`, *optional*, defaults to 50257):
  31. Vocabulary size of the JAIS model. Defines the number of different
  32. tokens that can be represented by the
  33. `inputs_ids` passed when calling [`JAISModel`].
  34. n_positions (`int`, *optional*, defaults to 1024):
  35. The maximum sequence length that this model might ever be used
  36. with. Typically set this to something large just in case
  37. (e.g., 512 or 1024 or 2048).
  38. n_embd (`int`, *optional*, defaults to 768):
  39. Dimensionality of the embeddings and hidden states.
  40. n_layer (`int`, *optional*, defaults to 12):
  41. Number of hidden layers in the Transformer encoder.
  42. n_head (`int`, *optional*, defaults to 12):
  43. Number of attention heads for each attention layer in the
  44. Transformer encoder.
  45. n_inner (`int`, *optional*, defaults to None):
  46. Dimensionality of the inner feed-forward layers. `None` will set
  47. it to 4 times n_embd
  48. activation_function (`str`, *optional*, defaults to `"gelu"`):
  49. Activation function, to be selected in the list
  50. `["relu", "silu", "gelu", "tanh", "gelu_new", "swiglu"]`.
  51. resid_pdrop (`float`, *optional*, defaults to 0.1):
  52. The dropout probability for all fully connected layers in
  53. the embeddings, encoder, and pooler.
  54. embd_pdrop (`float`, *optional*, defaults to 0.1):
  55. The dropout ratio for the embeddings.
  56. attn_pdrop (`float`, *optional*, defaults to 0.1):
  57. The dropout ratio for the attention.
  58. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
  59. The epsilon to use in the layer normalization layers.
  60. initializer_range (`float`, *optional*, defaults to 0.02):
  61. The standard deviation of the truncated_normal_initializer for
  62. initializing all weight matrices.
  63. scale_attn_weights (`bool`, *optional*, defaults to `True`):
  64. Scale attention weights by dividing by sqrt(hidden_size)..
  65. use_cache (`bool`, *optional*, defaults to `True`):
  66. Whether or not the model should return the last key/values
  67. attentions (not used by all models).
  68. scale_attn_by_inverse_layer_idx (`bool`, *optional*,
  69. defaults to `False`):
  70. Whether to additionally scale attention weights by
  71. `1 / layer_idx + 1`.
  72. reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
  73. Whether to scale keys (K) prior to computing attention
  74. (dot-product)
  75. and upcast attention dot-product/softmax to float() when training
  76. with mixed precision.
  77. position_embedding_type (`str`, *optional*, defaults to `"learned"`):
  78. Positional embedding can be either `"alibi"` or `"learned"`.
  79. mup_width_scale (`float`, *optional*, defaults to 1.0):
  80. muP parameter to scale learning rate and initializers. Calculated
  81. as (`d_model,0 / d_model`), where
  82. `d_model` is the model's width and `d_model,0` is the proxy
  83. model's width.
  84. mup_embeddings_scale (`float`, *optional*, defaults to 1.0):
  85. muP parameter to scale token and position embeddings.
  86. mup_output_alpha (`float`, *optional*, defaults to 1.0):
  87. muP parameter to scale output logits
  88. (`output_logits_scale = mup_output_alpha * mup_width_scale`).
  89. mup_scale_qk_dot_by_d (`bool`, *optional*, defaults to `False`):
  90. Scale attention weights by dividing by hidden_size instead of
  91. sqrt(hidden_size). Need to set scale_attn_weights to `True` as
  92. well.
  93. alibi_scaling (`Dict`, *optional*):
  94. Dictionary containing the scaling configuration for ALiBi
  95. embeddings. Currently only supports linear
  96. scaling strategy. Can specify either the scaling `factor` (must be
  97. a float greater than 1) for fixed scaling
  98. or `train_seq_len` for dynamic scaling on input samples with
  99. sequence length > `train_seq_len`. The expected
  100. formats are `{"type": strategy name, "factor": scaling factor}` or
  101. `{"type": strategy name,
  102. "train_seq_len": training sequence length}`.
  103. architectures (`List`, *optional*, defaults to ['JAISLMHeadModel']):
  104. architecture names for Jais.
  105. Example:
  106. ```python
  107. >>> from transformers import JAISConfig, JAISModel
  108. >>> # Initializing a JAIS configuration
  109. >>> configuration = JAISConfig()
  110. >>> # Initializing a model (with random weights) from the configuration
  111. >>> model = JAISModel(configuration)
  112. >>> # Accessing the model configuration
  113. >>> configuration = model.config
  114. ```"""
  115. model_type = "jais"
  116. keys_to_ignore_at_inference = ["past_key_values"]
  117. attribute_map = {
  118. "hidden_size": "n_embd",
  119. "max_position_embeddings": "n_positions",
  120. "num_attention_heads": "n_head",
  121. "num_hidden_layers": "n_layer",
  122. }
  123. def __init__(
  124. self,
  125. vocab_size=50257,
  126. n_positions=1024,
  127. n_embd=768,
  128. n_layer=12,
  129. n_head=12,
  130. n_inner=None,
  131. activation_function="gelu_new",
  132. resid_pdrop=0.1,
  133. embd_pdrop=0.1,
  134. attn_pdrop=0.1,
  135. layer_norm_epsilon=1e-5,
  136. initializer_range=0.02,
  137. scale_attn_weights=True,
  138. use_cache=True,
  139. bos_token_id=50256,
  140. eos_token_id=50256,
  141. scale_attn_by_inverse_layer_idx=False,
  142. reorder_and_upcast_attn=False,
  143. position_embedding_type="learned",
  144. mup_width_scale=1.0,
  145. mup_embeddings_scale=1.0,
  146. mup_output_alpha=1.0,
  147. mup_scale_qk_dot_by_d=False,
  148. alibi_scaling=None,
  149. architectures=None,
  150. **kwargs,
  151. ):
  152. self.vocab_size = vocab_size
  153. self.n_positions = n_positions
  154. self.n_embd = n_embd
  155. self.n_layer = n_layer
  156. self.n_head = n_head
  157. self.n_inner = n_inner
  158. self.activation_function = activation_function
  159. self.resid_pdrop = resid_pdrop
  160. self.embd_pdrop = embd_pdrop
  161. self.attn_pdrop = attn_pdrop
  162. self.layer_norm_epsilon = layer_norm_epsilon
  163. self.initializer_range = initializer_range
  164. self.scale_attn_weights = scale_attn_weights
  165. self.use_cache = use_cache
  166. self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
  167. self.reorder_and_upcast_attn = reorder_and_upcast_attn
  168. self.bos_token_id = bos_token_id
  169. self.eos_token_id = eos_token_id
  170. self.position_embedding_type = position_embedding_type
  171. self.mup_width_scale = mup_width_scale
  172. self.mup_embeddings_scale = mup_embeddings_scale
  173. self.mup_output_alpha = mup_output_alpha
  174. self.mup_scale_qk_dot_by_d = mup_scale_qk_dot_by_d
  175. self.alibi_scaling = alibi_scaling
  176. self._alibi_scaling_validation()
  177. if architectures is None:
  178. architectures = ["JAISLMHeadModel"]
  179. super().__init__(
  180. bos_token_id=bos_token_id,
  181. eos_token_id=eos_token_id,
  182. architectures=architectures,
  183. **kwargs,
  184. )
  185. def _alibi_scaling_validation(self):
  186. """
  187. Validate the `alibi_scaling` configuration.
  188. """
  189. if self.alibi_scaling is None:
  190. return
  191. if (not isinstance(self.alibi_scaling, dict)
  192. or len(self.alibi_scaling) != 2):
  193. raise ValueError(
  194. "`alibi_scaling` must be a dictionary with two fields,"
  195. "`type` and `factor` or `type` and `train_seq_len`, "
  196. f"got {self.alibi_scaling}")
  197. alibi_scaling_type = self.alibi_scaling.get("type", None)
  198. alibi_scaling_factor = self.alibi_scaling.get("factor", None)
  199. alibi_dynamic_scaling = self.alibi_scaling.get("train_seq_len", None)
  200. if alibi_scaling_type is None or alibi_scaling_type != "linear":
  201. raise ValueError(f"`alibi_scaling`'s type field must be 'linear',"
  202. f"got {alibi_scaling_type}")
  203. if (alibi_scaling_factor is not None
  204. and not isinstance(alibi_scaling_factor, float)
  205. or (alibi_scaling_factor is not None
  206. and alibi_scaling_factor <= 1.0)):
  207. raise ValueError(
  208. f"`alibi_scaling`'s factor field must be a float > 1.0,"
  209. f"got {alibi_scaling_factor}")
  210. if (alibi_dynamic_scaling is not None
  211. and not isinstance(alibi_dynamic_scaling, int)
  212. or (alibi_dynamic_scaling is not None
  213. and alibi_dynamic_scaling <= 1)):
  214. raise ValueError(
  215. f"`alibi_scaling`'s `train_seq_len` field must be an"
  216. f"integer > 1, got {alibi_dynamic_scaling}")