0

Python と numpy を使用してニューラル ネットワークを実装しようとしています。問題は、ネットワークをトレーニングしようとすると、エラーが約 0.5 になることです。これ以上学習することはできません。学習率 0.001 と 1 を試してみました。逆伝播中に何か間違ったことをしていると思います。しかし、私は何が間違っているのか理解していません。

ps私は多くのオーバーフローの問題を抱えていたので、np.clip()メソッドを使い始めました。

これが私の逆伝播コードです:

# z2 is softmax output
def calculateBackpropagation(self, z1, z2, y):
    delta3 = z2
    delta3[range(self.numOfSamples), y] -= 1
    dW2 = (np.transpose(z1)).dot(delta3)
    db2 = np.sum(delta3, axis=0, keepdims=True)
    delta2 = delta3.dot(np.transpose(self.W2)) * ActivationFunction.DRELU(z1)
    dW1 = np.dot(np.transpose(self.train_data), delta2)
    db1 = np.sum(delta2, axis=0)

    self.W1 += -self.alpha * dW1
    self.b1 += -self.alpha * db1
    self.W2 += -self.alpha * dW2
    self.b2 += -self.alpha * db2

# RELU can be approximated with soft max function
# so the derivative of this function is g(x) = log(1+exp(x))
# Source: https://imiloainf.wordpress.com/2013/11/06/rectifier-nonlinearities/
@staticmethod
def DRELU(x):
    x = np.clip( x, -500, 500 )
    return np.log(1 + np.exp(x))

def softmax(self, x):
    """Compute softmax values for each sets of scores in x."""
    x = np.clip( x, -500, 500 )
    e = np.exp(x)
    return e / np.sum(e, axis=1, keepdims=True)

def train(self):
    X = self.train_data
    Y = self.train_labels
    (row, col) = np.shape(self.train_data)
    for i in xrange(self.ephocs):
        [p1, z1, p2, z2] = self.feedForward(X)
        probs = z2
        self.backPropagate(X, Y, z1, probs)

        self.learning_rate = self.learning_rate * (self.learning_rate / (self.learning_rate + (self.learning_rate * self.rate_decay)))

def softmax(self, x):
    """Compute softmax values for each sets of scores in x."""
    x = np.clip( x, -500, 500 )
    e = np.exp(x)
    return e / np.sum(e, axis=1, keepdims=True)
def feedForward(self, X):

    p1 = X.dot(self.W1) + self.b1
    z1 = self.neuron(p1)
    p2 = z1.dot(self.W2) + self.b2
    # z2 = self.neuron(p2)
    z2 = self.softmax(p2)
    return [p1, z1, p2, z2]

def predict(self, X):
    [p1, z1, p2, z2] = self.feedForward(X)
    return np.argmax(z2, axis=1)

# Calculates the cross-entropy loss
# P.S. In some cases true distribution is unknown so cross-entropy cannot be directly calculated.
# hence, I will use the cross entropy estimation formula on wikipedia
# https://en.wikipedia.org/wiki/Cross_entropy
def calculateLoss(self, x):
    [p1, z1, p2, z2] = self.feedForward(x)
    softmax_probs = self.softmax(p2)
    # Calculates the estimated loss based on wiki
    return np.sum(-np.log(softmax_probs[range(self.numOfSamples), self.train_labels]))

def neuron(self, p):
    return ActivationFunction.RELU(p)

def CreateRandomW(self, row, col):
    return np.random.uniform(low=-1.0, high=1.0, size=(row, col))

def normalizeData(self, rawpoints, high=255.0, low=0.0):
    return (rawpoints/128.0) - 1

@staticmethod
def RELU(x):
    # x = np.clip( x, -1, 1 )
    x = np.clip( x, -500, 500 )
    return np.maximum(0.001, x)

# RELU can be approximated with soft max function
# so the derivative of this function is g(x) = log(1+exp(x))
# Source: https://imiloainf.wordpress.com/2013/11/06/rectifier-nonlinearities/
@staticmethod
def DRELU(x):
    x = np.clip( x, -500, 500 )
    return np.log(1 + np.exp(x))
4

1 に答える 1