从头开始实施辍学 [英] Implementing dropout from scratch

查看:99
本文介绍了从头开始实施辍学的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

此代码尝试利用dropout的自定义实现:

This code attempts to utilize a custom implementation of dropout :

%reset -f

import torch
import torch.nn as nn
# import torchvision
# import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torch.utils.data as data_utils
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F

num_epochs = 1000

number_samples = 10

from sklearn.datasets import make_moons
from matplotlib import pyplot
from pandas import DataFrame
# generate 2d classification dataset
X, y = make_moons(n_samples=number_samples, noise=0.1)
# scatter plot, dots colored by class value

x_data = [a for a in enumerate(X)]
x_data_train = x_data[:int(len(x_data) * .5)]
x_data_train = [i[1] for i in x_data_train]
x_data_train

y_data = [y[i[0]] for i in x_data]
y_data_train = y_data[:int(len(y_data) * .5)]
y_data_train

x_test = [a[1] for a in x_data[::-1][:int(len(x_data) * .5)]]
y_test = [a for a in y_data[::-1][:int(len(y_data) * .5)]]

x = torch.tensor(x_data_train).float() # <2>
print(x)

y = torch.tensor(y_data_train).long()
print(y)

x_test = torch.tensor(x_test).float()
print(x_test)

y_test = torch.tensor(y_test).long()
print(y_test)

class Dropout(nn.Module):
    def __init__(self, p=0.5, inplace=False):
#         print(p)
        super(Dropout, self).__init__()
        if p < 0 or p > 1:
            raise ValueError("dropout probability has to be between 0 and 1, "
                             "but got {}".format(p))
        self.p = p
        self.inplace = inplace

    def forward(self, input):
        print(list(input.shape))
        return np.random.binomial([np.ones((len(input),np.array(list(input.shape))))],1-dropout_percent)[0] * (1.0/(1-self.p))

    def __repr__(self):
        inplace_str = ', inplace' if self.inplace else ''
        return self.__class__.__name__ + '(' \
            + 'p=' + str(self.p) \
            + inplace_str + ')'

class MyLinear(nn.Linear):
    def __init__(self, in_feats, out_feats, drop_p, bias=True):
        super(MyLinear, self).__init__(in_feats, out_feats, bias=bias)
        self.custom_dropout = Dropout(p=drop_p)

    def forward(self, input):
        dropout_value = self.custom_dropout(self.weight)
        return F.linear(input, dropout_value, self.bias)



my_train = data_utils.TensorDataset(x, y)
train_loader = data_utils.DataLoader(my_train, batch_size=2, shuffle=True)

my_test = data_utils.TensorDataset(x_test, y_test)
test_loader = data_utils.DataLoader(my_train, batch_size=2, shuffle=True)

# Device configuration
device = 'cpu'
print(device)

# Hyper-parameters 
input_size = 2
hidden_size = 100
num_classes = 2

learning_rate = 0.0001

pred = []

# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes, p):
        super(NeuralNet, self).__init__()
#         self.drop_layer = nn.Dropout(p=p)
#         self.drop_layer = MyLinear()
#         self.fc1 = MyLinear(input_size, hidden_size, p)
        self.fc1 = MyLinear(input_size, hidden_size , p) 
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, num_classes)  

    def forward(self, x):
#         out = self.drop_layer(x)
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out

model = NeuralNet(input_size, hidden_size, num_classes, p=0.9).to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)  

# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):  
        # Move tensors to the configured device
        images = images.reshape(-1, 2).to(device)
        labels = labels.to(device)

        # Forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if (epoch) % 100 == 0:
        print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item()))

自定义辍学的实现方式为:

Custom dropout is implemented as :

class Dropout(nn.Module):
    def __init__(self, p=0.5, inplace=False):
#         print(p)
        super(Dropout, self).__init__()
        if p < 0 or p > 1:
            raise ValueError("dropout probability has to be between 0 and 1, "
                             "but got {}".format(p))
        self.p = p
        self.inplace = inplace

    def forward(self, input):
        print(list(input.shape))
        return np.random.binomial([np.ones((len(input),np.array(list(input.shape))))],1-dropout_percent)[0] * (1.0/(1-self.p))

    def __repr__(self):
        inplace_str = ', inplace' if self.inplace else ''
        return self.__class__.__name__ + '(' \
            + 'p=' + str(self.p) \
            + inplace_str + ')'

class MyLinear(nn.Linear):
    def __init__(self, in_feats, out_feats, drop_p, bias=True):
        super(MyLinear, self).__init__(in_feats, out_feats, bias=bias)
        self.custom_dropout = Dropout(p=drop_p)

    def forward(self, input):
        dropout_value = self.custom_dropout(self.weight)
        return F.linear(input, dropout_value, self.bias)

似乎我没有正确实现辍学功能? :

It seems I've implemented the dropout function incorrectly ? :

np.random.binomial([np.ones((len(input),np.array(list(input.shape))))],1-dropout_percent)[0] * (1.0/(1-self.p))

如何修改才能正确利用辍学?

How to modify in order to correctly utilize dropout ?

这些帖子对于达到这一点很有用:

These posts were useful in getting to this point :

Hinton在Python的3行中的退出: https://iamtrask.github.io/2015/07/28/dropout/

Hinton's Dropout in 3 Lines of Python : https://iamtrask.github.io/2015/07/28/dropout/

设置自定义退出功能: https://describe.pytorch.org/t/making-a-custom-dropout-function/14053/2

Making a Custom Dropout Function : https://discuss.pytorch.org/t/making-a-custom-dropout-function/14053/2

推荐答案

似乎我没有正确实现辍学功能?

It seems I've implemented the dropout function incorrectly?

np.random.binomial([np.ones((len(input),np.array(list(input.shape))))],1 dropout_percent)[0] * (1.0/(1-self.p))

实际上,以上实现被称为 Inverted Dropout .反向辍学是在各种深度学习框架中实践中实现辍学的方式.

In fact, the above implementation is known as Inverted Dropout. Inverted Dropout is how Dropout is implemented in practice in the various deep learning frameworks.

什么是反向辍学?

在跳入反向辍学之前,了解辍学对单个神经元的工作方式可能会有帮助:

Before jump into the inverted dropout, it can be helpful to see how Dropout works for a single neuron:

由于在训练阶段神经元以概率q(= 1-p)保持打开状态,因此在测试阶段,我们必须模拟训练阶段使用的网络集合的行为.为此,作者建议在测试阶段将激活函数按q比例缩放,以便将训练阶段产生的预期输出用作测试阶段所需的单个输出(

Since during train phase a neuron is kept on with probability q (=1-p), during the testing phase we have to emulate the behavior of the ensemble of networks used in the training phase. To this end, the authors suggest scaling the activation function by a factor of q during the test phase in order to use the expected output produced in the training phase as the single output required in the test phase (Section 10, Multiplicative Gaussian Noise). Thus:

反向辍学有些不同.这种方法包括在训练阶段对激活进行缩放,而保持测试阶段不变.比例因子是保持概率1/1-p = 1/q的倒数,因此:

Inverted dropout is a bit different. This approach consists in the scaling of the activations during the training phase, leaving the test phase untouched. The scale factor is the inverse of the keep probability 1/1-p = 1/q, thus:

倒置辍学有助于一次定义模型,只需更改参数(保持/掉落概率)即可在同一模型上运行训练和测试.相反,直接丢弃会强制您在测试阶段修改网络,因为如果不乘以q输出,则神经元将产生比连续神经元期望的值更高的值(因此,以下神经元)可能会饱和或爆炸):这就是为什么反向辍学"是更常见的实现方式.

Inverted dropout helps to define the model once and just change a parameter (the keep/drop probability) to run train and test on the same model. Direct Dropout, instead, force you to modify the network during the test phase because if you don’t multiply by q the output the neuron will produce values that are higher respect to the one expected by the successive neurons (thus the following neurons can saturate or explode): that’s why Inverted Dropout is the more common implementation.

参考文献:

什么是反向辍学?

Dropout:扩展激活与反转辍学

辍学分析

如何实现反向辍学Pytorch?

class MyDropout(nn.Module):
    def __init__(self, p: float = 0.5):
        super(MyDropout, self).__init__()
        if p < 0 or p > 1:
            raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p))
        self.p = p

    def forward(self, X):
        if self.training:
            binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
            return X * binomial.sample(X.size()) * (1.0/(1-self.p))
        return weights

如何在Numpy中实现?

import numpy as np

pKeep = 0.8
weights = np.ones([1, 5])
binary_value = np.random.rand(weights.shape[0], weights.shape[1]) < pKeep
res = np.multiply(weights, binary_value)
res /= pKeep  # this line is called inverted dropout technique
print(res)

如何在Tensorflow中实现?

import tensorflow as tf
tf.enable_eager_execution()

weights = tf.ones(shape=[1, 5])
keep_prob = 0.8
random_tensor = keep_prob
random_tensor += tf.random_uniform(weights.shape)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
ret = tf.div(weights, keep_prob) * binary_tensor
print(ret)

这篇关于从头开始实施辍学的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆