From e54b4f0903152d14c6261370fd43ca22be150f05 Mon Sep 17 00:00:00 2001 From: Parichay Kapoor Date: Tue, 8 Dec 2020 13:34:11 +0900 Subject: [PATCH] [vgg] Added pytorch model for vgg16 Added pytorch model for vgg16 This is to benchmark against tf and nntrainer **Self evaluation:** 1. Build test: [x]Passed [ ]Failed [ ]Skipped 2. Run test: [x]Passed [ ]Failed [ ]Skipped Signed-off-by: Parichay Kapoor --- Applications/VGG/PyTorch/main.py | 146 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 Applications/VGG/PyTorch/main.py diff --git a/Applications/VGG/PyTorch/main.py b/Applications/VGG/PyTorch/main.py new file mode 100644 index 0000000..3051393 --- /dev/null +++ b/Applications/VGG/PyTorch/main.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: Apache-2.0 +# +# @file main.py +# @date 08 Dec 2020 +# @brief This is VGG16 Example using PyTorch +# @see https://github.com/nnstreamer/nntrainer +# @author Parichay Kapoor +# @bug No known bugs except for NYI items +# +# This is based on official pytorch examples + +'''Train CIFAR100 with PyTorch.''' +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.backends.cudnn as cudnn + +import torchvision +import torchvision.transforms as transforms + +import os +import argparse +import sys + +device = 'cuda' if torch.cuda.is_available() else 'cpu' + +# Data +print('==> Preparing data..') +transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +trainset = torchvision.datasets.CIFAR100( + root='./data', train=True, download=True, transform=transform_train) +trainloader = torch.utils.data.DataLoader( + trainset, batch_size=128, shuffle=True, num_workers=4) + +testset = torchvision.datasets.CIFAR100( + root='./data', train=False, download=True, transform=transform_test) +testloader = torch.utils.data.DataLoader( + testset, batch_size=128, shuffle=False, num_workers=4) + +# Model + +cfg = {'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']} +class VGG(nn.Module): + def __init__(self, vgg_name): + super(VGG, self).__init__() + self.features = self._make_layers(cfg[vgg_name]) + self.classifier = nn.Sequential(nn.Linear(512, 256), nn.BatchNorm1d(256), + nn.ReLU(inplace=True), nn.Linear(256, 100)) + + def forward(self, x): + out = self.features(x) + out = out.view(out.size(0), -1) + out = self.classifier(out) + return out + + def _make_layers(self, cfg): + layers = [] + in_channels = 3 + for x in cfg: + if x == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), + nn.ReLU(inplace=True)] + in_channels = x + layers += [nn.AvgPool2d(kernel_size=1, stride=1)] + return nn.Sequential(*layers) + +print('Building model..') +net = VGG('VGG16') +net = net.to(device) +if device == 'cuda': + net = torch.nn.DataParallel(net) + cudnn.benchmark = True + +criterion = nn.CrossEntropyLoss() +optimizer = optim.Adam(net.parameters(), lr=1e-3, weight_decay=5e-4) + +# Training +def train(epoch): + print('\nEpoch: %d' % epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + print('Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) + +def test(epoch): + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + print('Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + # Save checkpoint. + state = {'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/ckpt.pth') + +if __name__ == '__main__': + for epoch in range(1): + train(epoch) + # test(epoch) -- 2.7.4