上午: AIoT资料分析应用系统框架设计与实作
今日运用Django架设Framework,只完成一部份,下次课程会继续完成,画面如下:
下午: Pytroch与深度学习初探
使用CNN建构网路,预测MNIST
######################### step1: load data (generate) ############import torchimport torch.nn as nnimport torch.nn.functional as F import matplotlib.pyplot as pltimport numpy as npfrom torchvision import datasets, transforms#Assign cuda GPU located at location '0' to a variable# device= torch.device('cuda:0')print(torch.cuda.device_count())print(torch.cuda.get_device_name(0))device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")print(device)t1=transforms.Resize((28,28))t2=transforms.ToTensor()t3=transforms.Normalize((0.5,),(0.5,))transform=transforms.Compose([t1,t2,t3])train_data= datasets.MNIST(root='./data',train=True, download=True, transform=transform)validate_data=datasets.MNIST(root='./data',train=False, download=True, transform=transform)print(len(train_data),type(train_data))print(len(validate_data),type(validate_data))train_loader=torch.utils.data.DataLoader(train_data, batch_size=100,shuffle=True)validation_loader=torch.utils.data.DataLoader(validate_data,batch_size=100,shuffle=False)print(len(train_loader),type(train_loader))print(len(validation_loader),type(validation_loader))
def im_convert(tensor): image = tensor.clone().detach().numpy() image = image.transpose(1, 2, 0) image = image * np.array((0.5, 0.5, 0.5)) + np.array((0.5, 0.5, 0.5)) image = image.clip(0, 1) return image dataiter = iter(train_loader)images, labels = dataiter.next()fig = plt.figure(figsize=(25, 4))for idx in np.arange(20): ax = fig.add_subplot(2, 10, idx+1, xticks=[], yticks=[]) plt.imshow(im_convert(images[idx])) ax.set_title([labels[idx].item()])
########################### step3: build model ############class LeNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4*4*50, 500) self.dropout1 = nn.Dropout(0.5) self.fc2 = nn.Linear(500, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) x = self.dropout1(x) x = self.fc2(x) return xmodel=LeNet().to(device)print(model)# class myDNN(nn.Module):# def __init__(self,numIn,numH1,numH2,numOut):# super(myDNN,self).__init__()# self.layer1=torch.nn.Linear(numIn,numH1)# self.layer2=torch.nn.Linear(numH1,numH2)# self.layer3=torch.nn.Linear(numH2,numOut)# def forward(self,x):# x=F.relu(self.layer1(x))# x=F.relu(self.layer2(x))# yProb=self.layer3(x)# return yProb# model=myDNN(784,256,64,10)#backward pathcriterion= nn.CrossEntropyLoss()optimizer=torch.optim.Adam(model.parameters(),lr=0.0001)#advance 2:lr=0.0001
############################ step4: traing model############epochs = 15running_loss_history = []running_corrects_history = []val_running_loss_history = []val_running_corrects_history = []for e in range(epochs): running_loss = 0.0 running_corrects = 0.0 val_running_loss = 0.0 val_running_corrects = 0.0 for inputs, labels in train_loader: inputs = inputs.to(device) labels= labels.to(device) outputs = model(inputs) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() _, preds = torch.max(outputs, 1) running_loss += loss.item() running_corrects += torch.sum(preds == labels.data) else: with torch.no_grad(): for val_inputs, val_labels in validation_loader: val_inputs = val_inputs.to(device) val_labels = val_labels.to(device) val_outputs = model(val_inputs) val_loss = criterion(val_outputs, val_labels) _, val_preds = torch.max(val_outputs, 1) val_running_loss += val_loss.item() val_running_corrects += torch.sum(val_preds == val_labels.data) epoch_loss = running_loss/len(train_loader) epoch_acc = running_corrects.float()/ len(train_loader) running_loss_history.append(epoch_loss) running_corrects_history.append(epoch_acc) val_epoch_loss = val_running_loss/len(validation_loader) val_epoch_acc = val_running_corrects.float()/ len(validation_loader) val_running_loss_history.append(val_epoch_loss) val_running_corrects_history.append(val_epoch_acc) print('epoch :', (e+1)) print('training loss: {:.4f}, acc {:.4f} '.format(epoch_loss, epoch_acc.item())) print('validation loss: {:.4f}, validation acc {:.4f} '.format(val_epoch_loss, val_epoch_acc.item()))