Commit b69cab49 authored by Belyaeva Oksana's avatar Belyaeva Oksana
Browse files

some fixed

parent 4c8a2c12
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
......@@ -3,7 +3,7 @@ from typing import List
import torchvision.transforms as transforms
import torchvision
from utils import imshow
#from utils import imshow
import numpy as np
import torch
import pandas as pd
......
import argparse
import torch
from dataset_executor import DataLoaderImageOrient, show_images, DatasetExecutorCIFAR
from train import NetExecutor
from model import Net, ClassificationModelTorch
......@@ -6,19 +9,22 @@ from model import Net, ClassificationModelTorch
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--train", type=bool, help="run for train model", default=True)
parser.add_argument("-c", "--checkpoint", help="Path to checkpoint for save or load", default='./orient_class_resnet18.pth')
parser.add_argument("-cl", "--checkpoint_load", help="Path to checkpoint for load or load", default='./orient_class_resnet18.pth')
parser.add_argument("-t", "--train", type=bool, help="run for train model", default=False)
parser.add_argument("-s", "--checkpoint_save", help="Path to checkpoint for save or load", default='./orient_class_resnet18_bigger_bs1.pth')
parser.add_argument("-l", "--checkpoint_load", help="Path to checkpoint for load or load", default='./orient_class_resnet18_bigger_bs1.pth')
parser.add_argument("-f", "--from_checkpoint", type=bool, help="run for train model", default=True)
args = parser.parse_args()
batch_size = 2
ON_GPU = False
def train_step(data_executor: DataLoaderImageOrient, net_executor: NetExecutor):
# Part 1 - load datas
trainloader = data_executor.load_dataset(
csv_path='/Users/ox/work/datasets/classification_orient_dataset/train/labels.csv',
image_path='/Users/ox/work/datasets/classification_orient_dataset',
csv_path='/home/ox/work/datasets/classification_orient_dataset_big/train/labels.csv',
image_path='/home/ox/work/datasets/classification_orient_dataset_big',
batch_size=batch_size
)
......@@ -28,44 +34,50 @@ def train_step(data_executor: DataLoaderImageOrient, net_executor: NetExecutor):
images, labels = sample['image'], sample['class']
# show images
show_images(images)
#show_images(images)
# print labels
print(' '.join('%5s' % data_executor.classes[labels[j]] for j in range(batch_size)))
# Part 2 - train model
if args.checkpoint_load:
net_executor.load_weights(args.checkpoint)
net_executor.train_model(trainloader, args.checkpoint)
net_executor.save_weights(args.checkpoint)
if args.from_checkpoint:
net_executor.load_weights(args.checkpoint_load)
net_executor.train_model(trainloader, args.checkpoint_save, device)
net_executor.save_weights(args.checkpoint_save)
def accuracy_step(data_executor: DataLoaderImageOrient, net_executor: NetExecutor):
testloader = data_executor.load_dataset(
csv_path='/Users/ox/work/datasets/classification_orient_dataset/test/labels.csv',
image_path='/Users/ox/work/datasets/classification_orient_dataset/',
csv_path='/home/ox/work/datasets/classification_orient_dataset_big/test/labels.csv',
image_path='/home/ox/work/datasets/classification_orient_dataset_big/',
batch_size=batch_size
)
dataiter = iter(testloader)
sample = dataiter.__next__()
images, labels = sample['image'], sample['class']
show_images(images)
#show_images(images)
#print('GroundTruth: ', ' '.join('%5s' % data_executor.classes[labels[j]] for j in range(1)))
print('GroundTruth: {}'.format(labels))
net_executor.load_weights(args.checkpoint)
net_executor.calc_accuracy_by_classes(testloader, data_executor.classes)
net_executor.load_weights(args.checkpoint_load)
net_executor.calc_accuracy_by_classes(testloader, data_executor.classes, device, batch_size=batch_size)
if __name__ == "__main__":
data_executor = DataLoaderImageOrient()
datas = DatasetExecutorCIFAR()
loader = datas.load_train_dataset()
dataiter = iter(loader)
sample = dataiter.__next__()
net_executor = NetExecutor(ClassificationModelTorch())
print(torch.cuda.is_available())
if torch.cuda.is_available() and ON_GPU:
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
#datas = DatasetExecutorCIFAR()
#loader = datas.load_train_dataset()
#dataiter = iter(loader)
#sample = dataiter.__next__()
net = ClassificationModelTorch()
net.to(device)
net_executor = NetExecutor(net)
if args.train:
train_step(data_executor, net_executor)
......
......@@ -2,9 +2,6 @@ import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
# MY simple CNN network :)
class Net(nn.Module):
......
from time import time
import torch.optim as optim
import torch.nn as nn
import torch
......@@ -13,7 +15,7 @@ class NetExecutor(object):
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
def train_model(self, trainloader, checkpoint_path: str, epoch_cnt=2, save_step=500):
def train_model(self, trainloader, checkpoint_path: str, device, epoch_cnt=5, save_step=500):
for epoch in range(epoch_cnt): # loop over the dataset multiple times
running_loss = 0.0
......@@ -25,10 +27,10 @@ class NetExecutor(object):
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.net(inputs.float())
print(outputs)
outputs = self.net(inputs.float().to(device))
#print(outputs)
loss = self.criterion(outputs, labels)
loss = self.criterion(outputs, labels.to(device))
loss.backward()
self.optimizer.step()
running_loss += loss.item()
......@@ -57,28 +59,38 @@ class NetExecutor(object):
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
def calc_accuracy_by_classes(self, testloader, classes, batch_size=1):
def calc_accuracy_by_classes(self, testloader, classes, device, batch_size=1):
class_correct = list(0. for i in range(len(classes)))
class_total = list(0. for i in range(len(classes)))
time_predict = 0
cnt_predict = 0
with torch.no_grad():
for data in testloader:
images, labels = data['image'], data['class']
outputs = self.net(images.float())
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
if batch_size == 1:
print("for class={}, predict={} => result={}".format(outputs, predicted, c.item()))
class_correct[labels] += c.item()
class_total[labels] += 1
else:
for i in range(batch_size):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
time_begin = time()
outputs = self.net(images.float().to(device))
time_predict += time() - time_begin
cnt_predict += len(images)
#print('outputs {}'.format(outputs))
maxs, predicted = torch.max(outputs, 1)
#print('maxs {}'.format(maxs))
c = (predicted == labels.to(device)).squeeze()
#print(labels)
#print('predicts {}'.format(predicted))
#print(data['image_name'])
#print(c)
for i in range(batch_size):
label = labels[i]
boolPredict = c.item() if batch_size == 1 else c[i].item()
class_correct[label] += boolPredict
class_total[label] += 1
if not boolPredict:
print('{} predict as {}'.format(data['image_name'][i], classes[predicted[i]]))
for i in range(len(classes)):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
print('=== AVG Time predict {}'.format(time_predict / cnt_predict))
def save_weights(self, path_checkpoint):
torch.save(self.net.state_dict(), path_checkpoint)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment