Main script

Some stuff has been removed for retaining the relevant code snippets

class tfds_ds(torch.utils.data.IterableDataset):
    def __init__(self, subset, nodes, rank):
        self.subset = subset
        self.total_nodes = nodes
        self.rank = rank

        assert self.subset is not None

        self.ds = dataset.as_dataset(
            split=self.subset, as_supervised=True
            ).shard(self.total_nodes, self.rank
            ).unbatch().batch(args.batch_size).prefetch(32)

        self.dataset = tfds.as_numpy(self.ds)

    def to_ten(self, tensor):
        return torch.from_numpy(tensor)

    def __len__(self):
        return 45491349 // args.batch_size

    def __iter__(self):
        for image, label in self.dataset:
            yield self.to_ten(image), self.to_ten(label)

class wandb_logger():
    def __init__(self, args):
        self.wandb_args = {'entity': 'neel', 'name': args.model_name, 'config': args, 'magic': True, 'group':args.group_name, 'project': 'SUMO'}
        self.rank = None

    def setup(self, rank):
        if rank == 0:
            print(f'Initialization with W&B on rank {rank}')
            self.rank = rank
            wandb.init(**self.wandb_args)

    def log(self, obj, idx=None):
        if self.rank is not None and idx is not None: # check if wandb has been init
            return wandb.log(obj, step=idx)
        elif self.rank is not None:
            return wandb.log(obj)
        else:
            return None

    def save(self, path):
        if self.rank is not None:
            return wandb.save(path)
        else:
            return None

def save_model(model_to_save, model_save_path, logger):
    '''
    DDP save the model to the given path
    '''
    # Save the model to the given path
    torch.save(model_to_save.state_dict(), model_save_path)
    # log model to wandb
    logger.save(model_save_path)

#==============================
# MODEL SETUP
#==============================
input_context = tf.distribute.InputContext(
    input_pipeline_id=0,  # Worker id
    num_input_pipelines=16,  # Total number of workers
)
read_config = tfds.ReadConfig(
    input_context=input_context,
)

dataset = tfds.builder_from_directory("s3://...")

def main():
    # SHS = 4
    # initializing WandDB
    logger = wandb_logger(args)
    # Init on rank 0
    logger.setup(rank)

    # create model and move it to GPU with id rank
    device_id = rank % torch.cuda.device_count()

    model = timm.create_model(args.model_name, pretrained=args.pretrained, num_classes=51, in_chans=6).to(device_id)
    model = DDP(model, device_ids=[device_id])
    # log model summary
    model_summary = summary( model, input_shape=(args.batch_size, *args.input_shape, 6) )
    print(model_summary)

    optimizer = create_optimizer_v2(lr=args.lr, opt=args.optimizer, weight_decay=args.weight_decay, model_or_params=model)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs-1)

    # Creating the datasets
    #nodes = os.environ['COUNT_NODE'] #set by SLURM script

    tfds_train = tfds_ds('train', nodes, rank)
    tfds_val = tfds_ds('test', nodes, rank)

    train_loader, val_loader = tfds_train, tfds_val

    torch.backends.cudnn.benchmark = True

    # Boring stuff
    loss_function = torch.nn.CrossEntropyLoss().to(device_id) # loss function

    train_metric_accuracy = Accuracy(compute_on_cpu=True).to(device_id)
    val_metric_accuracy = Accuracy(compute_on_cpu=True).to(device_id)
    val_top_k = Accuracy(compute_on_cpu=True, top_k=5).to(device_id)

    # AMP
    scaler = GradScaler()

    # Training loop
    for epoch in range(args.epochs):
        print('Starting loop')

        # Training loop
        model.train()
        for idx, batch in enumerate(train_loader):
            #Timing each step
            start = time.time()
            optimizer.zero_grad()

            # obtaining the data
            inputs, targets = batch[0], batch[1]           
            inputs, targets = inputs.to(device_id, non_blocking=True), targets.to(device_id, non_blocking=True)
            # converting BHWC TO BCHW for PyTorch
            inputs = inputs.permute(0, 3, 1, 2).float()

            with autocast(dtype=torch.float16):
                logits = model(inputs)
                loss = loss_function(logits, targets)

            # scale loss
            scaler.scale(loss).backward()
            scaler.step(optimizer)

            # stepping through scheduler and AMP's scaler
            scheduler.step()
            scaler.update()

            if idx % args.log_frequency == 0:
                accuracy = train_metric_accuracy(logits, torch.argmax(targets, dim=1))
                print('Epoch: {}, Step: {}, Loss: {} , Acc: {} | Time taken: {}'.format(epoch, idx, loss.item(), accuracy, time.time() - start))
                logger.log({'acc': accuracy, 'Epoch': epoch, 'train_loss': loss.item(), 'time_per_n_step': time.time() - start}, idx=idx)

        total_train_accuracy = train_metric_accuracy.compute()
        print(f"\n{'='*50}\nTraining acc for epoch {epoch}: {total_train_accuracy}\n{'='*50}")
        logger.log({'epoch_end_train_acc': total_train_accuracy, 'Epoch': epoch})

        # Validation loop
        if epoch % args.val_frequency == 0:
            model.eval()
            for idx, batch in enumerate(val_loader):
                with torch.no_grad():
                    # Disabling gradient computation
                    inputs, targets = batch[0].to(device_id), batch[1].to(device_id)
                    # converting BHWC TO BCHW for PyTorch
                    inputs = inputs.permute(0, 3, 1, 2).float()

                    logits = model(inputs)

                    loss = loss_function(logits, targets)

                    val_metric_accuracy.update(logits, torch.argmax(targets, dim=1))
                    val_top_k.update(logits, torch.argmax(targets, dim=1))

                    if idx % args.log_frequency == 0:
                        logger.log({'val_acc': val_metric_accuracy, f'Val_Top-{val_top_k.top_k}':val_top_k ,'Epoch': epoch, 'val_loss': loss.item()}, idx=idx)
                        print(f'val_acc: {val_metric_accuracy} | val_top-{val_top_k.top_k}: {val_top_k} | val_loss: {loss.item()}')

        # Calculate validation metrics
        total_val_accuracy = val_metric_accuracy.compute()
        print(f"\n{'-'*50}\nValidation acc for epoch {epoch}: {total_val_accuracy}\n{'-'*50}")
        logger.log({'final_val_acc': total_val_accuracy, f'final_top_{val_top_k.top_k}_acc':val_top_k.compute() ,'Epoch': epoch})

        # Save checkpoint to Wandb
        if epoch % 2 == 0:
            chkp_path = f'./checkpoints/{args.model_name}_{epoch}.pth'
            save_model(model, chkp_path, logger)

        # Reset metric for next epoch
        train_metric_accuracy.reset()
        val_metric_accuracy.reset()

if __name__ == '__main__':
    dist.init_process_group("nccl")
    rank = dist.get_rank()
    nodes = dist.get_world_size()

    print(f"Start running SUMO w/ DDP @ {rank} | World size: {nodes}")

    main()
Edit
Pub: 26 Aug 2022 22:55 UTC
Views: 323