|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +from __future__ import annotations |
| 8 | + |
| 9 | +import torch.nn as nn |
| 10 | + |
| 11 | +from fairseq2.models.opt.config import OPTConfig |
| 12 | +from fairseq2.models.transformer import ( |
| 13 | + CausalAttentionBias, |
| 14 | + FeedForwardNetwork, |
| 15 | + LocalAttentionStateFactory, |
| 16 | + MultiheadAttention, |
| 17 | + StandardFeedForwardNetwork, |
| 18 | + StandardMultiheadAttention, |
| 19 | + TransformerEmbeddingFrontend, |
| 20 | + TransformerFrontend, |
| 21 | + TransformerNormOrder, |
| 22 | + create_default_sdpa, |
| 23 | +) |
| 24 | +from fairseq2.models.transformer_lm import ( |
| 25 | + StandardTransformerLMDecoder, |
| 26 | + StandardTransformerLMDecoderLayer, |
| 27 | + TransformerLM, |
| 28 | + TransformerLMDecoder, |
| 29 | + TransformerLMDecoderLayer, |
| 30 | +) |
| 31 | +from fairseq2.nn import ( |
| 32 | + Embedding, |
| 33 | + LayerNorm, |
| 34 | + LearnedPositionEncoder, |
| 35 | + Linear, |
| 36 | + PositionEncoder, |
| 37 | + Projection, |
| 38 | + StandardEmbedding, |
| 39 | + StandardLayerNorm, |
| 40 | +) |
| 41 | + |
| 42 | + |
| 43 | +def create_opt_model(config: OPTConfig) -> TransformerLM: |
| 44 | + return OPTFactory(config).create_model() |
| 45 | + |
| 46 | + |
| 47 | +class OPTFactory: |
| 48 | + def __init__(self, config: OPTConfig) -> None: |
| 49 | + self._config = config |
| 50 | + |
| 51 | + def create_model(self) -> TransformerLM: |
| 52 | + config = self._config |
| 53 | + |
| 54 | + decoder_frontend = self.create_decoder_frontend() |
| 55 | + |
| 56 | + decoder = self.create_decoder() |
| 57 | + |
| 58 | + final_proj = self.create_final_projection() |
| 59 | + |
| 60 | + return TransformerLM( |
| 61 | + config.model_dim, |
| 62 | + decoder_frontend, |
| 63 | + decoder, |
| 64 | + final_proj, |
| 65 | + config.pad_idx, |
| 66 | + config.max_seq_len, |
| 67 | + ) |
| 68 | + |
| 69 | + def create_decoder_frontend(self) -> TransformerFrontend: |
| 70 | + config = self._config |
| 71 | + |
| 72 | + embed = self.create_embedding() |
| 73 | + |
| 74 | + pos_encoder = self.create_position_encoder() |
| 75 | + |
| 76 | + return TransformerEmbeddingFrontend( |
| 77 | + config.model_dim, |
| 78 | + embed, |
| 79 | + pos_encoder=pos_encoder, |
| 80 | + no_scale=True, |
| 81 | + # dropout_p=config.dropout_p, # TODO: check if there is dropout here |
| 82 | + ) |
| 83 | + |
| 84 | + def create_embedding(self) -> Embedding: |
| 85 | + config = self._config |
| 86 | + |
| 87 | + return StandardEmbedding(config.vocab_size, config.model_dim, config.pad_idx) |
| 88 | + |
| 89 | + def create_decoder(self) -> TransformerLMDecoder: |
| 90 | + config = self._config |
| 91 | + |
| 92 | + layers = [] |
| 93 | + |
| 94 | + for _ in range(config.num_layers): |
| 95 | + layer = self.create_decoder_layer() |
| 96 | + |
| 97 | + layers.append(layer) |
| 98 | + |
| 99 | + layer_norm = self.create_layer_norm() |
| 100 | + |
| 101 | + return StandardTransformerLMDecoder(layers, layer_norm) |
| 102 | + |
| 103 | + def create_position_encoder(self) -> PositionEncoder: |
| 104 | + config = self._config |
| 105 | + |
| 106 | + return LearnedPositionEncoder( |
| 107 | + config.model_dim, config.max_seq_len, _legacy_pad_idx=1 |
| 108 | + ) |
| 109 | + |
| 110 | + def create_decoder_layer(self) -> TransformerLMDecoderLayer: |
| 111 | + config = self._config |
| 112 | + |
| 113 | + self_attn = self.create_self_attention() |
| 114 | + |
| 115 | + self_attn_layer_norm = self.create_layer_norm() |
| 116 | + |
| 117 | + ffn = self.create_ffn() |
| 118 | + |
| 119 | + ffn_layer_norm = self.create_layer_norm() |
| 120 | + |
| 121 | + return StandardTransformerLMDecoderLayer( |
| 122 | + self_attn, |
| 123 | + self_attn_layer_norm, |
| 124 | + ffn, |
| 125 | + ffn_layer_norm, |
| 126 | + norm_order=TransformerNormOrder.PRE, |
| 127 | + dropout_p=config.dropout_p, |
| 128 | + ) |
| 129 | + |
| 130 | + def create_self_attention(self) -> MultiheadAttention: |
| 131 | + config = self._config |
| 132 | + |
| 133 | + attn_bias = CausalAttentionBias(attn_window_len=config.attn_window_len) |
| 134 | + |
| 135 | + sdpa = create_default_sdpa(attn_bias) |
| 136 | + |
| 137 | + state_factory = LocalAttentionStateFactory(config.attn_window_len) |
| 138 | + |
| 139 | + return StandardMultiheadAttention( |
| 140 | + config.model_dim, |
| 141 | + config.num_attn_heads, |
| 142 | + sdpa, |
| 143 | + num_key_value_heads=config.num_key_value_heads, |
| 144 | + bias=True, |
| 145 | + state_factory=state_factory, |
| 146 | + ) |
| 147 | + |
| 148 | + def create_ffn(self) -> FeedForwardNetwork: |
| 149 | + config = self._config |
| 150 | + |
| 151 | + return StandardFeedForwardNetwork( |
| 152 | + config.model_dim, config.ffn_inner_dim, bias=True |
| 153 | + ) |
| 154 | + |
| 155 | + def create_layer_norm(self) -> LayerNorm: |
| 156 | + config = self._config |
| 157 | + |
| 158 | + return StandardLayerNorm(config.model_dim, bias=True) |
| 159 | + |
| 160 | + def create_final_projection(self) -> Projection: |
| 161 | + config = self._config |
| 162 | + |
| 163 | + return Linear( |
| 164 | + config.model_dim, |
| 165 | + config.vocab_size, |
| 166 | + bias=False, |
| 167 | + init_fn=_init_final_projection, |
| 168 | + ) |
| 169 | + |
| 170 | + |
| 171 | +def _init_final_projection(proj: Linear) -> None: |
| 172 | + nn.init.normal_(proj.weight, std=proj.input_dim**-0.5) |
| 173 | + |
| 174 | + if proj.bias is not None: |
| 175 | + nn.init.zeros_(proj.bias) |
0 commit comments