本文整理了Java中org.deeplearning4j.nn.api.Layer.activate()
方法的一些代码示例,展示了Layer.activate()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Layer.activate()
方法的具体详情如下:
包路径:org.deeplearning4j.nn.api.Layer
类名称:Layer
方法名:activate
[英]Trigger an activation with the last specified input
[中]使用最后指定的输入触发激活
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
@Override
public INDArray activate() {
return insideLayer.activate();
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
@Override
public INDArray activate(INDArray input) {
return insideLayer.activate(input);
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
/**
* Triggers the activation for a given layer
*
* @param layer the layer to activate on
* @return the activation for a given layer
*/
public INDArray activate(int layer) {
return getLayer(layer).activate();
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
@Override
public INDArray activate(INDArray input, TrainingMode training) {
logTestMode(training);
return insideLayer.activate(input, TrainingMode.TEST);
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
@Override
public INDArray activate(boolean training) {
logTestMode(training);
return insideLayer.activate(false);
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
@Override
public INDArray activate(INDArray input, boolean training) {
logTestMode(training);
return insideLayer.activate(input, false);
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
/**
* Triggers the activation of the given layer
*
* @param layer the layer to trigger on
* @param input the input to the hidden layer
* @return the activation of the layer based on the input
*/
public INDArray activate(int layer, INDArray input) {
return getLayer(layer).activate(input);
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
@Override
public INDArray doForward(boolean training) {
if (!canDoForward())
throw new IllegalStateException("Cannot do forward pass: all inputs not set");
return layer.activate(training);
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
@Override
public INDArray activate(TrainingMode training) {
logTestMode(training);
return insideLayer.activate(TrainingMode.TEST);
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
/**
* Triggers the activation of the last hidden layer ie: not logistic regression
*
* @return the activation of the last hidden layer given the last input to the network
*/
public INDArray activate() {
return getLayers()[getLayers().length - 1].activate();
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
currInput = temp.get(temp.size() - 1);
} else {
currInput = layers[i].activate(currInput, training);
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
/**
* Calculate activation from previous layer including pre processing where necessary
*
* @param curr the current layer
* @param input the input
* @return the activation from the previous layer
*/
public INDArray activationFromPrevLayer(int curr, INDArray input, boolean training) {
if (getLayerWiseConfigurations().getInputPreProcess(curr) != null)
input = getLayerWiseConfigurations().getInputPreProcess(curr).preProcess(input, getInputMiniBatchSize());
INDArray ret = layers[curr].activate(input, training);
return ret;
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-ui_2.10
for (Layer layer : l.getLayers()) {
if (!(layer instanceof FrozenLayer) && layer.type() == Layer.Type.CONVOLUTIONAL) {
INDArray output = layer.activate();
int sampleDim = output.shape()[0] == 1 ? 0 : rnd.nextInt(output.shape()[0] - 1) + 1;
if (cnt == 0) {
for (Layer layer : l.getLayers()) {
if (!(layer instanceof FrozenLayer) && layer.type() == Layer.Type.CONVOLUTIONAL) {
INDArray output = layer.activate();
int sampleDim = output.shape()[0] == 1 ? 0 : rnd.nextInt(output.shape()[0] - 1) + 1;
if (cnt == 0) {
代码示例来源:origin: org.deeplearning4j/deeplearning4j-ui_2.11
for (Layer layer : l.getLayers()) {
if (!(layer instanceof FrozenLayer) && layer.type() == Layer.Type.CONVOLUTIONAL) {
INDArray output = layer.activate();
int sampleDim = output.shape()[0] == 1 ? 0 : rnd.nextInt(output.shape()[0] - 1) + 1;
if (cnt == 0) {
for (Layer layer : l.getLayers()) {
if (!(layer instanceof FrozenLayer) && layer.type() == Layer.Type.CONVOLUTIONAL) {
INDArray output = layer.activate();
int sampleDim = output.shape()[0] == 1 ? 0 : rnd.nextInt(output.shape()[0] - 1) + 1;
if (cnt == 0) {
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
@Override
public INDArray preOutput(INDArray x) {
INDArray lastLayerActivation = x;
for (int i = 0; i < layers.length - 1; i++) {
if (getLayerWiseConfigurations().getInputPreProcess(i) != null)
lastLayerActivation = getLayerWiseConfigurations().getInputPreProcess(i).preProcess(lastLayerActivation,
getInputMiniBatchSize());
lastLayerActivation = layers[i].activate(lastLayerActivation);
}
if (getLayerWiseConfigurations().getInputPreProcess(layers.length - 1) != null)
lastLayerActivation = getLayerWiseConfigurations().getInputPreProcess(layers.length - 1)
.preProcess(lastLayerActivation, getInputMiniBatchSize());
return layers[layers.length - 1].preOutput(lastLayerActivation);
}
代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn
input = ((MultiLayerNetwork) layers[i]).rnnTimeStep(input);
} else {
input = layers[i].activate(input, false);
内容来源于网络,如有侵权,请联系作者删除!