org.deeplearning4j.nn.api.Layer.conf()方法的使用及代码示例

x33g5p2x  于2022-01-24 转载在 其他  
字(8.9k)|赞(0)|评价(0)|浏览(118)

本文整理了Java中org.deeplearning4j.nn.api.Layer.conf()方法的一些代码示例,展示了Layer.conf()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Layer.conf()方法的具体详情如下:
包路径:org.deeplearning4j.nn.api.Layer
类名称:Layer
方法名:conf

Layer.conf介绍

暂无

代码示例

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

@Override
public NeuralNetConfiguration conf() {
  return insideLayer.conf();
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

@Override
public void applyLearningRateScoreDecay() {
  for (Layer layer : layers) {
    if (!layer.conf().getLearningRateByParam().isEmpty()) {
      for (Map.Entry<String, Double> lrPair : layer.conf().getLearningRateByParam().entrySet()) {
        layer.conf().setLearningRateByParam(lrPair.getKey(),
                lrPair.getValue() * (layer.conf().getLrPolicyDecayRate() + Nd4j.EPS_THRESHOLD));
      }
    }
  }
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

@Override
protected boolean isMiniBatch() {
  return network.conf().isMiniBatch();
}

代码示例来源:origin: CampagneLaboratory/variationanalysis

private void decreaseLearningRate(ComputationGraph computationGraph) {
    for (Layer layer : computationGraph.getLayers()) {
      if (!layer.conf().getLearningRateByParam().isEmpty()) {
        for (Map.Entry<String, Double> lrPair : layer.conf().getLearningRateByParam().entrySet()) {
          final double rate = lrPair.getValue() * (0.5 + Nd4j.EPS_THRESHOLD);
          layer.conf().setLearningRateByParam(lrPair.getKey(),
              rate);
        }
      }
    }
  }

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public LayerUpdater(Layer layer, INDArray updaterState) {
  super(layer, updaterState);
  if (layer instanceof MultiLayerNetwork) {
    throw new UnsupportedOperationException("Cannot use LayerUpdater for a MultiLayerNetwork");
  }
  layersByName = new HashMap<>();
  layersByName.put(layer.conf().getLayer().getLayerName(), layer);
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public Map<String, INDArray> paramTable(boolean backpropParamsOnly) {
  //Get all parameters from all layers
  Map<String, INDArray> allParams = new LinkedHashMap<>();
  for (Layer layer : layers) {
    Map<String, INDArray> paramMap = layer.paramTable(backpropParamsOnly);
    for (Map.Entry<String, INDArray> entry : paramMap.entrySet()) {
      String newKey = layer.conf().getLayer().getLayerName() + "_" + entry.getKey();
      allParams.put(newKey, entry.getValue());
    }
  }
  return allParams;
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public ComputationGraphUpdater(ComputationGraph graph, INDArray updaterState) {
  super(graph, updaterState);
  layersByName = new HashMap<>();
  Layer[] layers = getOrderedLayers();
  for (Layer l : layers) {
    layersByName.put(l.conf().getLayer().getLayerName(), l);
  }
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public boolean skipDueToPretrainConfig() {
  if (!isPretrainUpdaterBlock())
    return false;
  ParamState vs = layersAndVariablesInBlock.get(0);
  return !vs.getLayer().conf().isPretrain(); //Skip if not pretrain
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Get a map of states for ALL RNN layers, as used in {@link #rnnTimeStep(INDArray...)}.
 * Layers that are not RNN layers will not have an entry in the returned map
 *
 * @return Map of states (keyed by layer name) or null if layer is not an RNN layer
 * @see #rnnSetPreviousStates(Map)
 */
public Map<String, Map<String, INDArray>> rnnGetPreviousStates() {
  Map<String, Map<String, INDArray>> states = new HashMap<>();
  for (Layer l : layers) {
    if (l instanceof RecurrentLayer) {
      states.put(l.conf().getLayer().getLayerName(), ((RecurrentLayer) l).rnnGetPreviousState());
    }
  }
  return states;
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Set the state of the RNN layer, for use in {@link #rnnTimeStep(INDArray...)}
 *
 * @param layer The number/index of the layer.
 * @param state The state to set the specified layer to
 */
public void rnnSetPreviousState(int layer, Map<String, INDArray> state) {
  rnnSetPreviousState(layers[layer].conf().getLayer().getLayerName(), state);
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public boolean isPretrainUpdaterBlock() {
  //All in block should be the same layer, and all be pretrain params
  ParamState vs = layersAndVariablesInBlock.get(0);
  return vs.getLayer().conf().getLayer().isPretrainParam(vs.getParamName());
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Get the state of the RNN layer, as used in {@link #rnnTimeStep(INDArray...)}.
 *
 * @param layer Number/index of the layer.
 * @return Hidden state, or null if layer is not an RNN layer
 */
public Map<String, INDArray> rnnGetPreviousState(int layer) {
  return rnnGetPreviousState(layers[layer].conf().getLayer().getLayerName());
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public void setLayerAsFrozen() {
  if (this.layer instanceof FrozenLayer)
    return;
  this.layer = new FrozenLayer(this.layer);
  this.layer.conf().getLayer().setLayerName(vertexName);
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

protected String layerId() {
  String name = insideLayer.conf().getLayer().getLayerName();
  return "(layer name: " + (name == null ? "\"\"" : name) + ", layer index: " + insideLayer.getIndex() + ")";
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public void init() {
  if (gradientUpdater == null) {
    ParamState varState = layersAndVariablesInBlock.get(0);
    String varName = varState.getParamName();
    gradientUpdater = varState.getLayer().conf().getLayer().getIUpdaterByParam(varName).instantiate(updaterView,
            updaterViewRequiresInitialization); //UpdaterUtils.getGradientUpdater(varState.getLayer(), varState.getParamName());
  }
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-modelimport

/**
 * Copy Keras layer weights to DL4J Layer.
 *
 * @param layer
 * @throws InvalidKerasConfigurationException
 */
public void copyWeightsToLayer(org.deeplearning4j.nn.api.Layer layer) throws InvalidKerasConfigurationException {
  if (this.getNumParams() > 0) {
    String dl4jLayerName = layer.conf().getLayer().getLayerName();
    String kerasLayerName = this.getLayerName();
    String msg = "Error when attempting to copy weights from Keras layer " + kerasLayerName + " to DL4J layer "
            + dl4jLayerName;
    if (this.weights == null)
      throw new InvalidKerasConfigurationException(msg + "(weights is null)");
    Set<String> paramsInLayer = new HashSet<String>(layer.paramTable().keySet());
    Set<String> paramsInKerasLayer = new HashSet<String>(this.weights.keySet());
    /* Check for parameters in layer for which we don't have weights. */
    paramsInLayer.removeAll(paramsInKerasLayer);
    for (String paramName : paramsInLayer)
      throw new InvalidKerasConfigurationException(
              msg + "(no stored weights for parameter " + paramName + ")");
    /* Check for parameters NOT in layer for which we DO have weights. */
    paramsInKerasLayer.removeAll(layer.paramTable().keySet());
    for (String paramName : paramsInKerasLayer)
      throw new InvalidKerasConfigurationException(msg + "(found no parameter named " + paramName + ")");
    /* Copy weights. */
    for (String paramName : layer.paramTable().keySet())
      layer.setParam(paramName, this.weights.get(paramName));
  }
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Fit the model
 *
 * @param examples the examples to classify (one example in each row)
 * @param labels   the labels for each example (the number of labels must match
 */
@Override
public void fit(INDArray examples, int[] labels) {
  org.deeplearning4j.nn.conf.layers.OutputLayer layerConf =
          (org.deeplearning4j.nn.conf.layers.OutputLayer) getOutputLayer().conf().getLayer();
  fit(examples, FeatureUtil.toOutcomeMatrix(labels, layerConf.getNOut()));
}

代码示例来源:origin: Waikato/wekaDeeplearning4j

private List<BaseLayer> getConfiguredLayers(NeuralNetConfiguration conf) throws Exception {
 Dl4jMlpClassifier clf = setupClf(conf);
 return Arrays.stream(clf.getModel().getLayers())
   .map(l -> (BaseLayer) l.conf().getLayer())
   .collect(Collectors.toList());
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Apply drop connect to the given variable
 * @param layer the layer with the variables
 * @param variable the variable to apply
 * @return the post applied drop connect
 */
public static INDArray applyDropConnect(Layer layer, String variable) {
  INDArray result = layer.getParam(variable).dup();
  if (Nd4j.getRandom().getStatePointer() != null) {
    Nd4j.getExecutioner().exec(new DropOut(result, result, layer.conf().getLayer().getDropOut()));
  } else {
    Nd4j.getExecutioner().exec(new LegacyDropOut(result, result, layer.conf().getLayer().getDropOut()));
  }
  return result;
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Apply L1 and L2 regularization, if necessary. Note that L1/L2 may differ for different layers in the same block
 *
 * @param layer        The layer to apply L1/L2 to
 * @param paramName    Parameter name in the given layer
 * @param gradientView Gradient view array for the layer + param
 * @param paramsView   Parameter view array for the layer + param
 */
public void postApply(Layer layer, String paramName, INDArray gradientView, INDArray paramsView) {
  NeuralNetConfiguration conf = layer.conf();
  //TODO: do this for multiple contiguous params/layers (fewer, larger ops)
  double l2 = conf.getL2ByParam(paramName);
  if (conf.isUseRegularization() && l2 > 0) {
    //This can be an axpy op, saving an allocation...
    //gradientView += params * l2           i.e., dC/dw = dC0/dw + lambda/n * w where C0 is pre-l2 cost function
    //Equivalent to gradientView.addi(paramsView.mul(conf.getL2ByParam(paramName)));
    int length = gradientView.length();
    Nd4j.getBlasWrapper().level1().axpy(length, l2, paramsView, gradientView);
  }
  if (conf.isUseRegularization() && conf.getL1ByParam(paramName) > 0) {
    gradientView.addi(Transforms.sign(paramsView, true).muli(conf.getL1ByParam(paramName)));
  }
}

相关文章