これは 0/1 活性化関数の代わりにシグモイド関数を使用しているため、これが勾配降下を計算する正しい方法だと思います。そうですか?
static double calculateOutput( int theta, double weights[], double[][] feature_matrix, int file_index, int globo_dict_size )
{
//double sum = x * weights[0] + y * weights[1] + z * weights[2] + weights[3];
double sum = 0.0;
for (int i = 0; i < globo_dict_size; i++)
{
sum += ( weights[i] * feature_matrix[file_index][i] );
}
//bias
sum += weights[ globo_dict_size ];
return sigmoid(sum);
}
private static double sigmoid(double x)
{
return 1 / (1 + Math.exp(-x));
}
この次のコードでは、Θ 値を更新しようとしています (パーセプトロンの重みに相当しますよね?) 。関連する質問LEARNING_RATE * localError * feature_matrix__train[p][i] * output_gradient[i]
で、その目的のためにこの式が与えられました。パーセプトロンからの重みの更新をコメントアウトしました。
この新しい更新ルールは正しいアプローチですか?
output_gradient とはどういう意味ですか? calculateOutput
それは私の方法で計算した合計と同等ですか?
//LEARNING WEIGHTS
double localError, globalError;
int p, iteration, output;
iteration = 0;
do
{
iteration++;
globalError = 0;
//loop through all instances (complete one epoch)
for (p = 0; p < number_of_files__train; p++)
{
// calculate predicted class
output = calculateOutput( theta, weights, feature_matrix__train, p, globo_dict_size );
// difference between predicted and actual class values
localError = outputs__train[p] - output;
//update weights and bias
for (int i = 0; i < globo_dict_size; i++)
{
//weights[i] += ( LEARNING_RATE * localError * feature_matrix__train[p][i] );
weights[i] += LEARNING_RATE * localError * feature_matrix__train[p][i] * output_gradient[i]
}
weights[ globo_dict_size ] += ( LEARNING_RATE * localError );
//summation of squared error (error value for all instances)
globalError += (localError*localError);
}
/* Root Mean Squared Error */
if (iteration < 10)
System.out.println("Iteration 0" + iteration + " : RMSE = " + Math.sqrt( globalError/number_of_files__train ) );
else
System.out.println("Iteration " + iteration + " : RMSE = " + Math.sqrt( globalError/number_of_files__train ) );
//System.out.println( Arrays.toString( weights ) );
}
while(globalError != 0 && iteration<=MAX_ITER);
更新 更新しましたが、次のようになります。
double loss, cost, hypothesis, gradient;
int p, iteration;
iteration = 0;
do
{
iteration++;
cost = 0.0;
loss = 0.0;
//loop through all instances (complete one epoch)
for (p = 0; p < number_of_files__train; p++)
{
// 1. Calculate the hypothesis h = X * theta
hypothesis = calculateHypothesis( theta, feature_matrix__train, p, globo_dict_size );
// 2. Calculate the loss = h - y and maybe the squared cost (loss^2)/2m
loss = hypothesis - outputs__train[p];
// 3. Calculate the gradient = X' * loss / m
gradient = calculateGradent( theta, feature_matrix__train, p, globo_dict_size, loss );
// 4. Update the parameters theta = theta - alpha * gradient
for (int i = 0; i < globo_dict_size; i++)
{
theta[i] = theta[i] - (LEARNING_RATE * gradient);
}
}
//summation of squared error (error value for all instances)
cost += (loss*loss);
/* Root Mean Squared Error */
if (iteration < 10)
System.out.println("Iteration 0" + iteration + " : RMSE = " + Math.sqrt( cost/number_of_files__train ) );
else
System.out.println("Iteration " + iteration + " : RMSE = " + Math.sqrt( cost/number_of_files__train ) );
//System.out.println( Arrays.toString( weights ) );
}
while(cost != 0 && iteration<=MAX_ITER);
}
static double calculateHypothesis( double theta[], double[][] feature_matrix, int file_index, int globo_dict_size )
{
double hypothesis = 0.0;
for (int i = 0; i < globo_dict_size; i++)
{
hypothesis += ( theta[i] * feature_matrix[file_index][i] );
}
//bias
hypothesis += theta[ globo_dict_size ];
return hypothesis;
}
static double calculateGradent( double theta[], double[][] feature_matrix, int file_index, int globo_dict_size, double loss )
{
double gradient = 0.0;
for (int i = 0; i < globo_dict_size; i++)
{
gradient += ( feature_matrix[file_index][i] * loss);
}
return gradient;
}
public static double hingeLoss()
{
// l(y, f(x)) = max(0, 1 − y · f(x))
return HINGE;
}