accelerator = Accelerator(log_with='wandb')
device = accelerator.device

accelerator.init_trackers("SUMO", config=args, init_kwargs=wandb_args)

# Helpful funcs
def save_model(accelerator, model_to_save, model_save_path):
  state = accelerator.get_state_dict(model_to_save) # This will call the unwrap model as well
  accelerator.save(state, model_save_path)

#==============================
# MODEL SETUP
#==============================
input_context = tf.distribute.InputContext(
    input_pipeline_id=1,  # Worker id
    num_input_pipelines=16,  # Total number of workers
)
read_config = tfds.ReadConfig(
    input_context=input_context,
)

dataset = tfds.load(name='dataset_', data_dir='s3:/.., as_supervised=True, read_config=read_config)

def main():
    model = timm.create_model(args.model_name, pretrained=args.pretrained, num_classes=51, in_chans=6).to(device)
    # log model summary
    model_summary = summary( model, input_shape=(args.batch_size, *args.input_shape, 6) )
    accelerator.print(model_summary)

    optimizer = create_optimizer_v2(lr=args.lr, opt=args.optimizer, weight_decay=args.weight_decay, model_or_params=model)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs-1)

    loss_function = torch.nn.CrossEntropyLoss() # loss function

    train_ds = dataset['train'].unbatch().batch(args.batch_size).prefetch(16)
    val_ds = dataset['test'].unbatch().batch(args.batch_size)

    # Convert TF datasets to torch dataloaders
    train_loader = tfds.as_numpy(train_ds)
    val_loader = tfds.as_numpy(val_ds)

    # Accelerate
    model, optimizer, train_loader, val_loader, scheduler = accelerator.prepare(model, optimizer, train_loader, val_loader, scheduler)

    train_metric_accuracy = Accuracy().to(device)
    val_metric_accuracy = Accuracy().to(device)
    val_top_k = Accuracy(top_k=5).to(device)
    # Training loop
    for epoch in range(args.epochs):
        print('Starting loop')
        # Training loop
        model.train()
        for idx, batch in enumerate(train_loader):
            #Timing each step
            start = time.time()
            optimizer.zero_grad()
            # obtaining the data
            inputs, targets = batch[0], batch[1]           
            inputs, targets = torch.from_numpy(inputs).to(device, non_blocking=True), torch.from_numpy(targets).to(device, non_blocking=True)
            # converting BHWC TO BCHW for PyTorch
            # performiung it inplace 
            inputs = inputs.permute(0, 3, 1, 2).float()
            logits = model(inputs)

            loss = loss_function(logits, targets)
            accelerator.backward(loss)

            optimizer.step()
            scheduler.step()

            if idx % args.log_frequency == 0:
                accuracy = train_metric_accuracy(logits, torch.argmax(targets, dim=1))
                accelerator.print('Epoch: {}, Step: {}, Loss: {} , Acc: {} | Time taken: {}'.format(epoch, idx, loss.item(), accuracy, time.time() - start))
                accelerator.log({'acc': accuracy, 'Epoch': epoch, 'train_loss': loss.item(), 'time_per_n_step': time.time() - start}, step=idx)

        total_train_accuracy = train_metric_accuracy.compute()
        accelerator.print(f"\n{'='*50}\nTraining acc for epoch {epoch}: {total_train_accuracy}\n{'='*50}")
        accelerator.log({'epoch_end_train_acc': total_train_accuracy, 'Epoch': epoch})

        # Validation loop
        if epoch % args.val_frequency == 0:
            model.eval()
            for idx, batch in enumerate(val_loader):
                with torch.no_grad():
                    # Disabling gradient computation
                    inputs, targets = torch.from_numpy(batch[0]).to(device), torch.from_numpy(batch[1]).to(device)
                    # converting BHWC TO BCHW for PyTorch
                    inputs = inputs.permute(0, 3, 1, 2).float()

                    logits = model(inputs)

                    loss = loss_function(logits, targets)

                    val_metric_accuracy.update(logits, torch.argmax(targets, dim=1))
                    val_top_k.update(logits, torch.argmax(targets, dim=1))

                    if idx % args.log_frequency == 0:
                        accelerator.log({'val_acc': val_metric_accuracy, f'Val_Top-{val_top_k.top_k}':val_top_k ,'Epoch': epoch, 'val_loss': loss.item()}, step=idx)
                        accelerator.print(f'val_acc: {val_metric_accuracy} | val_top-{val_top_k.top_k}: {val_top_k} | val_loss: {loss.item()}')

        # Calculate validation metrics
        total_val_accuracy = val_metric_accuracy.compute()
        accelerator.print(f"\n{'-'*50}\nValidation acc for epoch {epoch}: {total_val_accuracy}\n{'-'*50}")
        accelerator.log({'final_val_acc': total_val_accuracy, f'final_top_{val_top_k.top_k}_acc':val_top_k.compute() ,'Epoch': epoch})

        # Save checkpoint to Wandb
        if epoch % 2 == 0:
            chkp_path = f'./checkpoints/{args.model_name}_{epoch}.pth'
            save_model(accelerator, model, chkp_path)

        # Reset metric for next epoch
        train_metric_accuracy.reset()
        val_metric_accuracy.reset()

if __name__ == '__main__':
    # Executing everything
    main()
    accelerator.end_training()
Edit
Pub: 19 Aug 2022 11:10 UTC
Views: 60