2

私の場合 - 入力時にList<List<Float>>(単語表現ベクトルのリスト) を持っています。そして - Double1つのシーケンスからの出力に1つ持っています。

だから私は次の構造を構築します(最初のインデックス - 例番号、2番目 - 文項目番号、3番目 - 単語ベクトル要素番号):http://pastebin.com/KGdjwnki

出力: http://pastebin.com/fY8zrxEL

しかし、次の 1 つ ( http://pastebin.com/wvFFC4Hw ) を model.output にマストすると[0.25, 0.24, 0.25, 0.25]、1 つの値ではなく vector が取得されます。

何が間違っている可能性がありますか?添付コード (Kotlin)。classCount1であります。

import org.deeplearning4j.nn.multilayer.MultiLayerNetwork
import org.deeplearning4j.nn.conf.NeuralNetConfiguration.Builder
import org.deeplearning4j.nn.api.OptimizationAlgorithm
import org.deeplearning4j.nn.conf.Updater
import org.deeplearning4j.nn.weights.WeightInit
import org.deeplearning4j.nn.conf.layers.GravesLSTM
import org.deeplearning4j.nn.conf.layers.RnnOutputLayer
import org.deeplearning4j.nn.conf.BackpropType
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.cpu.nativecpu.NDArray
import org.nd4j.linalg.indexing.NDArrayIndex
import org.nd4j.linalg.factory.Nd4j
import org.nd4j.linalg.lossfunctions.LossFunctions
import java.util.*

class ClassifierNetwork(wordVectorSize: Int, classCount: Int) {
    data class Dimension(val x: Array<Int>, val y: Array<Int>)
    val model: MultiLayerNetwork
    val optimization = OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT
    val iterations = 1
    val learningRate = 0.1
    val rmsDecay = 0.95
    val seed = 12345
    val l2 = 0.001
    val weightInit = WeightInit.XAVIER
    val updater = Updater.RMSPROP
    val backtropType = BackpropType.TruncatedBPTT
    val tbpttLength = 50
    val epochs = 50
    var dimensions = Dimension(intArrayOf(0).toTypedArray(), intArrayOf(0).toTypedArray())

    init {
        val baseConfiguration = Builder().optimizationAlgo(optimization)
                .iterations(iterations).learningRate(learningRate).rmsDecay(rmsDecay).seed(seed).regularization(true).l2(l2)
                .weightInit(weightInit).updater(updater)
                .list()
        baseConfiguration.layer(0, GravesLSTM.Builder().nIn(wordVectorSize).nOut(64).activation("tanh").build())
        baseConfiguration.layer(1, GravesLSTM.Builder().nIn(64).nOut(32).activation("tanh").build())
        baseConfiguration.layer(2, GravesLSTM.Builder().nIn(32).nOut(16).activation("tanh").build())
        baseConfiguration.layer(3, RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT)
                .activation("softmax").weightInit(WeightInit.XAVIER).nIn(16).nOut(classCount).build())
        val cfg = baseConfiguration.build()!!
        cfg.backpropType = backtropType
        cfg.tbpttBackLength = tbpttLength
        cfg.tbpttFwdLength = tbpttLength
        cfg.isPretrain = false
        cfg.isBackprop = true
        model = MultiLayerNetwork(cfg)
    }

    private fun dataDimensions(x: List<List<Array<Double>>>, y: List<Array<Double>>): Dimension {
        assert(x.size == y.size)
        val exampleCount = x.size
        assert(x.size > 0)
        val sentenceLength = x[0].size
        assert(sentenceLength > 0)
        val wordVectorLength = x[0][0].size
        assert(wordVectorLength > 0)
        val classCount = y[0].size
        assert(classCount > 0)
        return Dimension(
                intArrayOf(exampleCount, wordVectorLength, sentenceLength).toTypedArray(),
                intArrayOf(exampleCount, classCount).toTypedArray()
        )
    }

    data class Fits(val x: INDArray, val y: INDArray)
    private fun fitConversion(x: List<List<Array<Double>>>, y: List<Array<Double>>): Fits {
        val dim = dataDimensions(x, y)
        val xItems = ArrayList<INDArray>()
        for (i in 0..dim.x[0]-1) {
            val itemList = ArrayList<DoubleArray>();
            for (j in 0..dim.x[1]-1) {
                var rowList = ArrayList<Double>()
                for (k in 0..dim.x[2]-1) {
                    rowList.add(x[i][k][j])
                }
                itemList.add(rowList.toTypedArray().toDoubleArray())
            }
            xItems.add(Nd4j.create(itemList.toTypedArray()))
        }
        val xFits = Nd4j.create(xItems, dim.x.toIntArray(), 'c')
        val yItems = ArrayList<DoubleArray>();
        for (i in 0..y.size-1) {
            yItems.add(y[i].toDoubleArray())
        }
        val yFits = Nd4j.create(yItems.toTypedArray())
        return Fits(xFits, yFits)
    }

    private fun error(epoch: Int, x: List<List<Array<Double>>>, y: List<Array<Double>>) {
        var totalDiff = 0.0
        for (i in 0..x.size-1) {
            val source = x[i]
            val result = y[i]
            val realResult = predict(source)
            var diff = 0.0
            for (j in 0..result.size-1) {
                val elementDiff = result[j] - realResult[j]
                diff += Math.pow(elementDiff, 2.0)
            }
            diff = Math.sqrt(diff)
            totalDiff += Math.pow(diff, 2.0)
        }
        totalDiff = Math.sqrt(totalDiff)
        print("Epoch ")
        print(epoch)
        print(", diff ")
        println(totalDiff)
    }

    fun train(x: List<List<Array<Double>>>, y: List<Array<Double>>) {
        dimensions = dataDimensions(x, y)
        val(xFit, yFit) = fitConversion(x, y)
        for (i in 0..epochs-1) {
            model.input = xFit
            model.labels = yFit
            model.fit()
            error(i+1, x, y)
        }
    }

    fun predict(x: List<Array<Double>>): Array<Double> {
        val xList = ArrayList<DoubleArray>();
        for (i in 0..dimensions.x[1]-1) {
            var row = ArrayList<Double>()
            for (j in 0..dimensions.x[2]-1) {
                row.add(x[j][i])
            }
            xList.add(row.toDoubleArray())
        }
        val xItem = Nd4j.create(xList.toTypedArray())
        val y = model.output(xItem)
        val result = ArrayList<Double>()
        return result.toTypedArray()
    }
}

更新。次の例には「近い」タスクがあるように見えるので、後で確認して解決策を投稿します。 /org/deeplearning4j/examples/recurrent/word2vecsentiment/Word2VecSentimentRNN.java

4

3 に答える 3