上午: Python机器学习套件与资料分析
挑几个不错的片段分享
# 储存每个epoch的weightsfrom tensorflow.keras.callbacks import ModelCheckpointMNIST_Model.set_weights(MNIST_weights)MNIST_Model.compile(optimizer = 'rmsprop', loss = 'categorical_crossentropy', metrics = ['acc'])MCP_path = './temp/'+'weights_{epoch:03d}.h5'MNIST_MCP = ModelCheckpoint(filepath = MCP_path)MNIST_Model.fit(X_train, y_train, epochs = 5, batch_size = 128, verbose = 0, callbacks = [MNIST_MCP])
from tensorflow.keras.callbacks import TensorBoardMTB = TensorBoard(log_dir='./logs0804', histogram_freq=1, write_graph=True)MNIST_Model.fit(X_train, y_train, epochs = 16, batch_size = 128, verbose = 0, callbacks = [MTB], validation_data = (X_test, y_test) )# 显示tensorboard%load_ext tensorboard%tensorboard --logdir='./log0804'
下午: Pytorch 与深度学习初探
练习使用pytorch
######################### step1: load data (generate) ############import torchimport torch.nn as nnimport matplotlib.pyplot as pltimport numpy as npfrom sklearn import datasetsdef scatter_plot(): plt.scatter(X[Y==0,0], X[Y==0,1]) # scatter(X1,X2) where y==0 plt.scatter(X[Y==1,0], X[Y==1,1]) #==> scatter(X[Y==0,0],X[Y==0,1])def plot_fit(option=1): if option==0: [w1,w2,b]=[-7,5,0] else: [w,b]=model.parameters() #[w,b]=[[w1,w2],b] w1,w2=w.view(2) w1=w1.item() w2=w2.item() b=b[0].detach().item() x1=np.array([-1.5,1.5]) x2=(w1*x1+b)/(-1*w2) scatter_plot() plt.plot(x1,x2,'r') plt.show()# def plot_fit(option=0):# plt.scatter(X,Y)# mx=np.array([torch.min(X),torch.max(X)])# if option==0: # [w,b]=[-7,5]# else:# [w,b]=model.parameters()# w=w[0][0].detach().item()# b=b[0].detach().item()# my=w*mx+b # plt.plot(mx,my,'r')# plt.show()# generate data n_samples=100centers=[[-0.5,-0.5],[0.5,0.5]]X,Y=datasets.make_blobs(n_samples=n_samples,random_state=1,centers=centers,cluster_std=0.2)print("X=",X.shape,"Y=",Y.shape)
########################### step2: preprocessing X,Y ############tensorX=torch.Tensor(X)tensorY=torch.Tensor(Y.reshape(len(X),1))print(tensorX.shape,tensorY.shape)print(type(X))plot_fit(0)
########################### step3: build model ############# class myModel(nn.Module):# def __init__(self):# super().__init__()# self.linear=nn.Linear(2,1)# def forward(self,x):# pred=torch.sigmoid(self.linear(x))# return pred# def predict(self,x):# pred=self.forward(x)# if pred >=0.5:# return 1# else: # return 0model=myModel()# torch.nn.init.xavier_uniform_(model.weight) # advance 1: init weights[w,b]=model.parameters()print(w,b)plot_fit()criterion= nn.BCELoss()optimizer=torch.optim.SGD(model.parameters(),lr=0.0001)#advance 2:lr=0.0001
############################ step4: traing model############torch.manual_seed(1)epochs=1000losses=[]for e in range(epochs): preY=model.forward(tensorX) loss=criterion(preY,tensorY) losses.append(loss) optimizer.zero_grad() loss.backward() optimizer.step()plt.plot(range(epochs),losses)plt.show()plot_fit(1)plt.show()# step5: evaluate model (draw)