-
Notifications
You must be signed in to change notification settings - Fork 81
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Implement Qwen-1.5 via megatron-core (#158)
Co-authored-by: 同润 <[email protected]>
- Loading branch information
1 parent
bd50dbf
commit 2087710
Showing
45 changed files
with
4,816 additions
and
495 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,169 @@ | ||
# Copyright (c) 2023 Alibaba PAI and Nvidia Megatron-LM Team. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
import torch | ||
from typing import Union | ||
from megatron.core.enums import ModelType | ||
import megatron.model | ||
from megatron import get_args | ||
from megatron import print_rank_0 | ||
from megatron.core import parallel_state, tensor_parallel | ||
from megatron.core.pipeline_parallel.p2p_communication import recv_forward | ||
from megatron.core.pipeline_parallel.p2p_communication import send_forward | ||
from megatron.initialize import initialize_megatron | ||
from megatron.utils import unwrap_model | ||
from megatron.utils import get_ltor_masks_and_position_ids | ||
from megatron.arguments import core_transformer_config_from_args | ||
from megatron.checkpointing import load_checkpoint | ||
|
||
from megatron_patch.training import get_model | ||
from megatron_patch.data import build_evaluation_dataset | ||
from megatron_patch.finetune_utils import build_data_loader | ||
from megatron_patch.model.qwen1_5.layer_specs import get_gpt_layer_with_transformer_engine_spec | ||
from megatron_patch.model.qwen1_5.model import GPTModel | ||
from megatron_patch.tokenizer import get_tokenizer, build_tokenizer | ||
from megatron_patch.arguments import get_patch_args | ||
from megatron_patch.data.utils import get_batch_on_this_tp_rank_original | ||
import torch._dynamo | ||
torch._dynamo.config.suppress_errors = True | ||
|
||
|
||
def get_model_provider(): | ||
def model_provider( | ||
pre_process=True, post_process=True | ||
) -> Union[GPTModel, megatron.model.GPTModel]: | ||
args = get_args() | ||
build_tokenizer(args) | ||
config = core_transformer_config_from_args(get_args()) | ||
|
||
transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm) | ||
model = GPTModel( | ||
config=config, | ||
transformer_layer_spec=transformer_layer_spec, | ||
vocab_size=args.padded_vocab_size, | ||
max_sequence_length=args.max_position_embeddings, | ||
pre_process=pre_process, | ||
post_process=post_process, | ||
fp16_lm_cross_entropy=args.fp16_lm_cross_entropy, | ||
parallel_output=True, | ||
share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights, | ||
position_embedding_type=args.position_embedding_type, | ||
rotary_percent=args.rotary_percent, | ||
rotary_base=args.rotary_base, | ||
seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor | ||
) | ||
|
||
return model | ||
|
||
return model_provider | ||
|
||
def forward_step(batch, model): | ||
"""Forward step.""" | ||
|
||
batch = get_batch_on_this_tp_rank_original(batch) | ||
tokens = batch['tokens'] | ||
labels = batch['labels'] | ||
position_ids = batch["position_ids"] | ||
attention_mask = batch["attention_mask"] | ||
loss_mask = batch['loss_mask'] | ||
# Tell the model what our actual batch size will be | ||
args = get_args() | ||
args.micro_batch_size = len(labels) | ||
config = core_transformer_config_from_args(args) | ||
tensor_shape = (args.seq_length, args.micro_batch_size, args.hidden_size) | ||
input_tensor = recv_forward(tensor_shape, config) | ||
|
||
# Forward pass through the model. | ||
unwrapped_model = unwrap_model(model) | ||
unwrapped_model.set_input_tensor(input_tensor) | ||
output = model(tokens, position_ids, attention_mask) | ||
send_forward(output, config) | ||
#if parallel_state.is_pipeline_last_stage(): | ||
if output.shape[-1] != args.hidden_size: | ||
loss_mask = loss_mask.view(-1).float() | ||
# For loss, return the unreduced loss. | ||
losses = tensor_parallel.vocab_parallel_cross_entropy( | ||
output.contiguous().float(), labels.contiguous()) | ||
loss = torch.sum( | ||
losses.view(-1) * loss_mask.contiguous().view(-1).float()) / loss_mask.sum() | ||
print(loss) | ||
print_rank_0(loss) | ||
return loss | ||
|
||
return None | ||
|
||
|
||
def evaluate(data_loader, model): | ||
"""Evaluation.""" | ||
args = get_args() | ||
|
||
# Turn on evaluation mode which disables dropout. | ||
model.eval() | ||
|
||
total_output = 0.0 | ||
with torch.no_grad(): | ||
# For all the batches in the dataset. | ||
for iteration, batch in enumerate(data_loader): | ||
if iteration % args.log_interval == 0: | ||
print_rank_0('> working on iteration: {}'.format(iteration)) | ||
# Forward evaluation. | ||
output = forward_step(batch, model) | ||
|
||
# Reduce across processes. | ||
if parallel_state.is_pipeline_last_stage(): | ||
torch.distributed.all_reduce( | ||
output, group=parallel_state.get_data_parallel_group()) | ||
|
||
total_output += output | ||
|
||
return total_output | ||
|
||
|
||
def main(): | ||
"""Main program.""" | ||
args = get_args() | ||
if args.num_layers_per_virtual_pipeline_stage is not None: | ||
print('Interleaved pipeline schedule ' | ||
'is not yet supported for text generation.') | ||
exit() | ||
|
||
# Data stuff. | ||
dataset = build_evaluation_dataset(args.dataset) | ||
dataloader = build_data_loader(dataset, | ||
args.micro_batch_size, | ||
args.num_workers, | ||
drop_last=False) | ||
|
||
|
||
# Set up model and load checkpoint. | ||
model = get_model(get_model_provider(), | ||
model_type=ModelType.encoder_or_decoder, | ||
wrap_with_ddp=False) | ||
|
||
if args.load is not None: | ||
load_checkpoint(model, None, None) | ||
|
||
assert len(model) == 1, 'Above condition should have caught this' | ||
model = model[0] | ||
|
||
|
||
|
||
# Run evaluation. | ||
evaluate(dataloader, model) | ||
print_rank_0('done :-)') | ||
|
||
|
||
if __name__ == '__main__': | ||
initialize_megatron(extra_args_provider=get_patch_args) | ||
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,126 @@ | ||
# Copyright (c) 2023 Alibaba PAI and Nvidia Megatron-LM Team. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from functools import partial | ||
import torch | ||
import os | ||
from typing import Union | ||
import megatron.model | ||
from megatron.core.enums import ModelType | ||
from megatron.arguments import core_transformer_config_from_args | ||
from megatron import get_args | ||
from megatron.core import tensor_parallel | ||
from megatron.utils import average_losses_across_data_parallel_group | ||
from megatron.training import pretrain | ||
|
||
from megatron_patch.model.qwen1_5.layer_specs import get_gpt_layer_with_transformer_engine_spec | ||
from megatron_patch.data.utils import get_batch_on_this_tp_rank_original | ||
from megatron_patch.data import \ | ||
build_pretrain_dataset_from_original, build_pretrain_dataset_from_idxmap | ||
from megatron_patch.model.qwen1_5.model import GPTModel | ||
from megatron_patch.tokenizer import get_tokenizer, build_tokenizer | ||
from megatron_patch.arguments import get_patch_args | ||
import torch._dynamo | ||
torch._dynamo.config.suppress_errors = True | ||
|
||
def model_provider( | ||
pre_process=True, post_process=True | ||
) -> Union[GPTModel, megatron.model.GPTModel]: | ||
|
||
args = get_args() | ||
build_tokenizer(args) | ||
config = core_transformer_config_from_args(get_args()) | ||
|
||
transformer_layer_spec = get_gpt_layer_with_transformer_engine_spec(args.num_experts, args.moe_grouped_gemm) | ||
model = GPTModel( | ||
config=config, | ||
transformer_layer_spec=transformer_layer_spec, | ||
vocab_size=args.padded_vocab_size, | ||
max_sequence_length=args.max_position_embeddings, | ||
pre_process=pre_process, | ||
post_process=post_process, | ||
fp16_lm_cross_entropy=args.fp16_lm_cross_entropy, | ||
parallel_output=True, | ||
share_embeddings_and_output_weights=not args.untie_embeddings_and_output_weights, | ||
position_embedding_type=args.position_embedding_type, | ||
rotary_percent=args.rotary_percent, | ||
rotary_base=args.rotary_base, | ||
seq_len_interpolation_factor=args.rotary_seq_len_interpolation_factor | ||
) | ||
|
||
return model | ||
|
||
|
||
def forward_step(data_iterator, model): | ||
args = get_args() | ||
batch = get_batch_on_this_tp_rank_original(data_iterator) | ||
tokens = batch['tokens'] | ||
labels = batch['labels'] | ||
position_ids = batch["position_ids"] | ||
attention_mask = batch["attention_mask"] | ||
loss_mask = batch['loss_mask'] | ||
logits = model(input_ids=tokens, | ||
position_ids=position_ids, | ||
attention_mask=attention_mask) | ||
|
||
if args.enable_parallel_output: | ||
|
||
def loss_func(loss_mask, logits): | ||
losses = tensor_parallel.vocab_parallel_cross_entropy( | ||
logits.contiguous().float(), labels.contiguous()) | ||
loss_mask = loss_mask.view(-1).float() | ||
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum() | ||
averaged_loss = average_losses_across_data_parallel_group([loss]) | ||
return loss, {'lm loss': averaged_loss[0]} | ||
else: | ||
|
||
def loss_func(loss_mask, logits): | ||
loss_func = torch.nn.CrossEntropyLoss(ignore_index=-100) | ||
loss = loss_func(torch.squeeze(logits).contiguous().float(), torch.squeeze(labels)) | ||
averaged_loss = average_losses_across_data_parallel_group([loss]) | ||
return loss, {'lm loss': averaged_loss[0]} | ||
|
||
return logits, partial(loss_func, loss_mask) | ||
|
||
def train_valid_test_datasets_provider(train_val_test_num_samples): | ||
"""Build train, valid, and test datasets.""" | ||
args = get_args() | ||
|
||
if os.path.isfile(args.train_data_path[0]): | ||
train_ds, valid_ds, test_ds = \ | ||
build_pretrain_dataset_from_original(args.dataset) | ||
else: | ||
train_ds, valid_ds, test_ds = \ | ||
build_pretrain_dataset_from_idxmap( | ||
data_prefix=args.train_data_path, | ||
max_padding_length=args.max_padding_length, | ||
dataset_type=args.dataset, | ||
splits_string=args.split, | ||
train_valid_test_num_samples=train_val_test_num_samples, | ||
seed=args.seed, | ||
skip_warmup=(not args.mmap_warmup) | ||
) | ||
|
||
return train_ds, valid_ds, test_ds | ||
|
||
|
||
if __name__ == "__main__": | ||
train_valid_test_datasets_provider.is_distributed = True | ||
pretrain( | ||
train_valid_test_datasets_provider, | ||
model_provider, | ||
ModelType.encoder_or_decoder, | ||
forward_step, | ||
extra_args_provider=get_patch_args, | ||
) |
Oops, something went wrong.