|
@@ -153,7 +153,7 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
|
|
|
print(f"Max CUDA memory reserved was {memtrace.max_reserved} GB")
|
|
|
print(f"Peak active CUDA memory was {memtrace.peak_active_gb} GB")
|
|
|
print(f"Cuda Malloc retires : {memtrace.cuda_malloc_retires}")
|
|
|
- print(f"CPU Total Peak Memory consumed during the train (max): {memtrace.cpu_peaked + memtrace.cpu_begin} GB")
|
|
|
+ print(f"CPU Total Peak Memory consumed during the train (max): {memtrace.cpu_peaked + memtrace.cpu_begin} GB")
|
|
|
|
|
|
# Update the learning rate as needed
|
|
|
lr_scheduler.step()
|
|
@@ -445,4 +445,4 @@ def save_train_params(train_config, fsdp_config, rank):
|
|
|
with open(file_name, 'w') as f:
|
|
|
f.write(config_yaml)
|
|
|
if rank==0:
|
|
|
- print(f"training params are saved in {file_name}")
|
|
|
+ print(f"training params are saved in {file_name}")
|