Browse Source

revert gradient loss detachment

abhilash1910 1 year ago
parent
commit
ace8b55b1b
1 changed files with 0 additions and 1 deletions
  1. 0 1
      src/llama_recipes/utils/train_utils.py

+ 0 - 1
src/llama_recipes/utils/train_utils.py

@@ -83,7 +83,6 @@ def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_sche
                 loss = model(**batch).loss
                 loss = loss / gradient_accumulation_steps
                 total_loss += loss.detach().float()
-                loss = torch.autograd.Variable(loss, required_grad = True)
                 if train_config.use_fp16:
                     # if fp16 is enabled, use gradient scaler to handle gradient update
                     scaler.scale(loss).backward()