本文整理了Java中gov.sandia.cognition.math.matrix.Vector.norm2Squared()
方法的一些代码示例,展示了Vector.norm2Squared()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Vector.norm2Squared()
方法的具体详情如下:
包路径:gov.sandia.cognition.math.matrix.Vector
类名称:Vector
方法名:norm2Squared
暂无
代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core
protected double computeScaleFactor(
Vector gradientCurrent,
Vector gradientPrevious )
{
double gradientTgradient = gradientCurrent.norm2Squared();
double denom = gradientPrevious.norm2Squared();
double beta = gradientTgradient / denom;
return beta;
}
代码示例来源:origin: algorithmfoundry/Foundry
protected double computeScaleFactor(
Vector gradientCurrent,
Vector gradientPrevious )
{
double gradientTgradient = gradientCurrent.norm2Squared();
double denom = gradientPrevious.norm2Squared();
double beta = gradientTgradient / denom;
return beta;
}
代码示例来源:origin: algorithmfoundry/Foundry
protected double computeScaleFactor(
Vector gradientCurrent,
Vector gradientPrevious )
{
double gradientTgradient = gradientCurrent.norm2Squared();
double denom = gradientPrevious.norm2Squared();
double beta = gradientTgradient / denom;
return beta;
}
代码示例来源:origin: algorithmfoundry/Foundry
@Override
public double euclideanDistanceSquared(
final Vector other )
{
return this.minus( other ).norm2Squared();
}
代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-common-core
@Override
public double euclideanDistanceSquared(
final Vector other )
{
return this.minus( other ).norm2Squared();
}
代码示例来源:origin: algorithmfoundry/Foundry
@Override
public double euclideanDistanceSquared(
final Vector other )
{
return this.minus( other ).norm2Squared();
}
代码示例来源:origin: algorithmfoundry/Foundry
@Override
protected double computeUpdate(
final LinearBinaryCategorizer target,
final Vector input,
final boolean actualCategory,
final double predicted)
{
// Get the actual category.
final double actual = actualCategory ? +1.0 : -1.0;
// Compute the margin.
final double margin = actual * predicted;
final double norm = input.norm2Squared();
return computeUpdate(margin, norm);
}
代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core
@Override
protected double computeUpdate(
final LinearBinaryCategorizer target,
final Vector input,
final boolean actualCategory,
final double predicted)
{
// Get the actual category.
final double actual = actualCategory ? +1.0 : -1.0;
// Compute the margin.
final double margin = actual * predicted;
final double norm = input.norm2Squared();
return computeUpdate(margin, norm);
}
代码示例来源:origin: algorithmfoundry/Foundry
@Override
protected double computeUpdate(
final LinearBinaryCategorizer target,
final Vector input,
final boolean actualCategory,
final double predicted)
{
// Get the actual category.
final double actual = actualCategory ? +1.0 : -1.0;
// Compute the margin.
final double margin = actual * predicted;
final double norm = input.norm2Squared();
return computeUpdate(margin, norm);
}
代码示例来源:origin: algorithmfoundry/Foundry
@Override
public double computeUpdate(
final LinearBinaryCategorizer target,
final Vector input,
final boolean actualCategory,
final double predicted)
{
final double actual = actualCategory ? +1.0 : -1.0;
final double margin = actual * predicted;
final double hingeLoss = 1.0 - margin;
if (Math.abs(margin) > 1.0)
{
// Passive when there is no loss.
return 0.0;
}
else
{
// Update methods use ||x||^2.
final double inputNorm2Squared = input.norm2Squared();
// Compute the update value (tau).
return this.computeUpdate(
actual, predicted, hingeLoss, inputNorm2Squared);
}
}
代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core
@Override
public double computeUpdate(
final LinearBinaryCategorizer target,
final Vector input,
final boolean actualCategory,
final double predicted)
{
final double actual = actualCategory ? +1.0 : -1.0;
final double loss = 1.0 - actual * predicted;
if (loss <= 0.0)
{
// Passive when there is no loss.
return 0.0;
}
else
{
// Update methods use ||x||^2.
final double inputNorm2Squared = input.norm2Squared();
// Compute the update value (tau).
return this.computeUpdate(
actual, predicted, loss, inputNorm2Squared);
}
}
代码示例来源:origin: algorithmfoundry/Foundry
@Override
public double computeUpdate(
final LinearBinaryCategorizer target,
final Vector input,
final boolean actualCategory,
final double predicted)
{
final double actual = actualCategory ? +1.0 : -1.0;
final double loss = 1.0 - actual * predicted;
if (loss <= 0.0)
{
// Passive when there is no loss.
return 0.0;
}
else
{
// Update methods use ||x||^2.
final double inputNorm2Squared = input.norm2Squared();
// Compute the update value (tau).
return this.computeUpdate(
actual, predicted, loss, inputNorm2Squared);
}
}
代码示例来源:origin: algorithmfoundry/Foundry
@Override
public double computeUpdate(
final LinearBinaryCategorizer target,
final Vector input,
final boolean actualCategory,
final double predicted)
{
final double actual = actualCategory ? +1.0 : -1.0;
final double loss = 1.0 - actual * predicted;
if (loss <= 0.0)
{
// Passive when there is no loss.
return 0.0;
}
else
{
// Update methods use ||x||^2.
final double inputNorm2Squared = input.norm2Squared();
// Compute the update value (tau).
return this.computeUpdate(
actual, predicted, loss, inputNorm2Squared);
}
}
代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core
protected double computeScaleFactor(
Vector gradientCurrent,
Vector gradientPrevious )
{
Vector deltaGradient = gradientCurrent.minus( gradientPrevious );
double deltaTgradient = deltaGradient.dotProduct( gradientCurrent );
double denom = gradientPrevious.norm2Squared();
double beta = deltaTgradient / denom;
return beta;
}
代码示例来源:origin: algorithmfoundry/Foundry
protected double computeScaleFactor(
Vector gradientCurrent,
Vector gradientPrevious )
{
Vector deltaGradient = gradientCurrent.minus( gradientPrevious );
double deltaTgradient = deltaGradient.dotProduct( gradientCurrent );
double denom = gradientPrevious.norm2Squared();
double beta = deltaTgradient / denom;
return beta;
}
代码示例来源:origin: algorithmfoundry/Foundry
protected double computeScaleFactor(
Vector gradientCurrent,
Vector gradientPrevious )
{
Vector deltaGradient = gradientCurrent.minus( gradientPrevious );
double deltaTgradient = deltaGradient.dotProduct( gradientCurrent );
double denom = gradientPrevious.norm2Squared();
double beta = deltaTgradient / denom;
return beta;
}
代码示例来源:origin: algorithmfoundry/Foundry
/**
* Gets the regularization penalty term for the current result. It
* computes the squared 2-norm of the parameters of the factorization
* machine, each multiplied with their appropriate regularization weight.
*
* @return
* The regularization penalty term for the objective.
*/
public double getRegularizationPenalty()
{
final double bias = this.result.getBias();
double penalty = this.biasRegularization * bias * bias;
if (this.result.hasWeights())
{
penalty += this.weightRegularization
* this.result.getWeights().norm2Squared();
}
if (this.result.hasFactors())
{
penalty += this.factorRegularization
* this.result.getFactors().normFrobeniusSquared();
}
return penalty;
}
代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core
/**
* Gets the regularization penalty term in the error for the objective.
*
* @return
* The regularization penalty term.
*/
public double getRegularizationPenalty()
{
if (this.result == null)
{
return 0.0;
}
double penalty = this.biasRegularization * this.result.getBias();
if (this.result.hasWeights())
{
penalty += this.weightRegularization * this.result.getWeights().norm2Squared();
}
if (this.result.hasFactors())
{
penalty += this.factorRegularization * this.result.getFactors().normFrobeniusSquared();
}
return penalty;
}
代码示例来源:origin: algorithmfoundry/Foundry
/**
* Gets the regularization penalty term in the error for the objective.
*
* @return
* The regularization penalty term.
*/
public double getRegularizationPenalty()
{
if (this.result == null)
{
return 0.0;
}
double penalty = this.biasRegularization * this.result.getBias();
if (this.result.hasWeights())
{
penalty += this.weightRegularization * this.result.getWeights().norm2Squared();
}
if (this.result.hasFactors())
{
penalty += this.factorRegularization * this.result.getFactors().normFrobeniusSquared();
}
return penalty;
}
代码示例来源:origin: algorithmfoundry/Foundry
/**
* Gets the regularization penalty term in the error for the objective.
*
* @return
* The regularization penalty term.
*/
public double getRegularizationPenalty()
{
if (this.result == null)
{
return 0.0;
}
double penalty = this.biasRegularization * this.result.getBias();
if (this.result.hasWeights())
{
penalty += this.weightRegularization * this.result.getWeights().norm2Squared();
}
if (this.result.hasFactors())
{
penalty += this.factorRegularization * this.result.getFactors().normFrobeniusSquared();
}
return penalty;
}
内容来源于网络,如有侵权,请联系作者删除!