266 references to VectorUtils
Microsoft.ML.Core.Tests (33)
UnitTests\TestVBuffer.cs (33)
57VectorUtils.ScaleBy(ref actualDst, c); 77VectorUtils.ScaleBy(ref dst, c); 78VectorUtils.ScaleBy(a, ref actualDst, c); 103Assert.True(CompareNumbersWithTolerance(sum, VectorUtils.Sum(in a), digitsOfPrecision: tol)); 104Assert.True(CompareNumbersWithTolerance(l1, VectorUtils.L1Norm(in a), digitsOfPrecision: tol)); 105Assert.True(CompareNumbersWithTolerance(l2Squared, VectorUtils.NormSquared(in a), digitsOfPrecision: tol)); 106Assert.True(CompareNumbersWithTolerance(l2, VectorUtils.Norm(in a), digitsOfPrecision: tol)); 107Assert.True(CompareNumbersWithTolerance(infNorm, VectorUtils.MaxNorm(in a), digitsOfPrecision: tol)); 122VectorUtils.ScaleBy(ref a, d); 151VectorUtils.MulElementWise(in a, ref a2DenseVbuff); 174VectorUtils.MulElementWise(in a2DenseVbuff, ref a); 192VectorUtils.MulElementWise(in a, ref a2); 193VectorUtils.SparsifyNormalize(ref a2, 2, 2, normalize: false); 212VectorUtils.MulElementWise(in a, ref a2); 213VectorUtils.SparsifyNormalize(ref a2, 2, 2, normalize: false); 240VectorUtils.MulElementWise(in a, ref a2); 241VectorUtils.SparsifyNormalize(ref a2, 2, 3, normalize: norm); 266VectorUtils.MulElementWise(in a, ref b); 267VectorUtils.SparsifyNormalize(ref b, 2, 2, normalize: norm); 287VectorUtils.SparsifyNormalize(ref a, 3, 3, normalize); 311VectorUtils.SparsifyNormalize(ref a, top, bottom, false); 540/// Naive version of <see cref="VectorUtils.AddMultWithOffset(in VBuffer{float}, float, ref VBuffer{float}, int)"/>, 589VectorUtils.AddMult(in a, c, ref actualDst); 595VectorUtils.Add(in a, ref actualDst); 618VectorUtils.AddMult(in a, c, ref b, ref actualDst); 643VectorUtils.AddMultWithOffset(in a, c, ref actualDst, offset); 670VectorUtils.ScaleInto(in a, c, ref actualDst); 692VectorUtils.AddMultInto(in a, c, in b, ref actualDst); 836Assert.True(CompareNumbersWithTolerance(l1Dist, VectorUtils.L1Distance(in a, in b), digitsOfPrecision: tol)); 837Assert.True(CompareNumbersWithTolerance(l2Dist2, VectorUtils.L2DistSquared(in a, in b), digitsOfPrecision: tol)); 838Assert.True(CompareNumbersWithTolerance(l2Dist, VectorUtils.Distance(in a, in b), digitsOfPrecision: tol)); 839Assert.True(CompareNumbersWithTolerance(dot, VectorUtils.DotProduct(in a, in b), digitsOfPrecision: tol)); 1077/// we see in <see cref="VectorUtils"/> and <see cref="VBufferUtils"/>, e.g., various
Microsoft.ML.Data (9)
Evaluators\ClusteringEvaluator.cs (4)
307Single denom = VectorUtils.Distance(in centroidI, in centroidJ); 364VectorUtils.ScaleBy(ref _clusterCentroids[i], (Single)(1.0 / _numInstancesOfClstr[i])); 372var distance = VectorUtils.Distance(in _clusterCentroids[assigned], in features); 463VectorUtils.Add(in _features, ref _clusterCentroids[_indicesArr[0]]);
Evaluators\MultiOutputRegressionEvaluator.cs (3)
516dst = VectorUtils.L1Distance(in label, in score); 526dst = VectorUtils.L2DistSquared(in label, in score); 536dst = MathUtils.Sqrt(VectorUtils.L2DistSquared(in label, in score));
Scorers\ClusteringScorer.cs (1)
111int index = VectorUtils.ArgMin(in score);
Scorers\MulticlassClassificationScorer.cs (1)
579int index = VectorUtils.ArgMax(in score);
Microsoft.ML.Ensemble (8)
OutputCombiners\BaseMultiAverager.cs (3)
49VectorUtils.Add(in src[i], ref dst); 58VectorUtils.AddMult(in src[i], w, ref dst); 62VectorUtils.ScaleBy(ref dst, 1 / weightTotal);
OutputCombiners\BaseMultiCombiner.cs (2)
89var sum = VectorUtils.L1Norm(in values[i]); 93VectorUtils.ScaleBy(ref values[i], 1 / sum);
OutputCombiners\MultiVoting.cs (1)
92int index = VectorUtils.ArgMax(in src[i]);
Selector\DiversityMeasure\MultiDisagreementDiversityMeasure.cs (2)
22return (VectorUtils.ArgMax(in valueX) != VectorUtils.ArgMax(in valueY)) ? 1 : 0;
Microsoft.ML.FastTree (2)
FastTree.cs (1)
2943Numeric.VectorUtils.SparsifyNormalize(ref dst, top, bottom, normalize);
GamModelParameters.cs (1)
433Numeric.VectorUtils.SparsifyNormalize(ref contributions, top, bottom, normalize);
Microsoft.ML.KMeansClustering (29)
KMeansModelParameters.cs (3)
169float instanceL2 = VectorUtils.NormSquared(in src); 173-2 * VectorUtils.DotProduct(in _centroids[i], in src) + _centroidL2s[i] + instanceL2); 270_centroidL2s[i] = VectorUtils.NormSquared(_centroids[i]);
KMeansPlusPlusTrainer.cs (26)
394l2 = VectorUtils.NormSquared(cursor.Features); 399var distance = -2 * VectorUtils.DotProduct(in cursor.Features, in centroids[j]) 441centroidL2s[i] = cachedCandidateL2 ?? VectorUtils.NormSquared(candidate); 642MathUtils.Sqrt(newClusterL2 - 2 * VectorUtils.DotProduct(in newClusterFeatures, in oldClusterFeatures) + oldClusterL2); 699float pointNorm = VectorUtils.NormSquared(in point); 720Contracts.Assert(-2 * VectorUtils.DotProduct(in point, in clusters[j]) + clustersL2s[j] > bestWeight); 724float weight = -2 * VectorUtils.DotProduct(in point, in clusters[j]) + clustersL2s[j]; 860clustersL2s[clusterCount] = VectorUtils.NormSquared(clusters[clusterCount]); 896clustersL2s[clusterCount] = VectorUtils.NormSquared(clusters[clusterCount]); 1116VectorUtils.Add(in features, ref CachedSum[cluster]); 1122VectorUtils.Add(in features, ref CachedSum[cluster]); 1124VectorUtils.AddMult(in features, -1, ref CachedSum[previousCluster]); 1135VectorUtils.Add(in features, ref Centroids[cluster]); 1164VectorUtils.Add(in workChunkArr[i].CachedSum[j], ref reducedState.CachedSum[j]); 1165VectorUtils.Add(in workChunkArr[i].Centroids[j], ref reducedState.Centroids[j]); 1211VectorUtils.Add(in CachedSum[i], ref Centroids[i]); 1214VectorUtils.ScaleBy(ref Centroids[i], (float)(1.0 / ClusterSizes[i])); 1218float clusterDelta = MathUtils.Sqrt(VectorUtils.L2DistSquared(in Centroids[i], in centroids[i])); 1225centroidL2s[i] = VectorUtils.NormSquared(Centroids[i]); 1305float instanceNormSquared = VectorUtils.NormSquared(in features); 1337float bestDistance = MathUtils.Sqrt(VectorUtils.L2DistSquared(in features, in centroids[_bestCluster[n]])); 1343float distance = MathUtils.Sqrt(VectorUtils.L2DistSquared(in features, in centroids[j])); 1363centroidL2s[i] = VectorUtils.NormSquared(in centroids[i]); 1425VectorUtils.Add(in cursor.Features, ref cachedSumCopy[state.GetBestCluster(id)]); 1804float distance = -2 * VectorUtils.DotProduct(in features, in centroids[j]) + centroidL2s[j]; 1827float l2 = VectorUtils.NormSquared(in features);
Microsoft.ML.PCA (33)
PcaTrainer.cs (19)
245VectorUtils.ScaleBy(v, 1 / VectorUtils.Norm(y[i])); 249VectorUtils.AddMult(v, y[j], -VectorUtils.DotProduct(v, y[j])); 261b2[i * oversampledRank + j] = b2[j * oversampledRank + i] = VectorUtils.DotProduct(b[i], b[j]); 318VectorUtils.AddMult(in cursor.Features, cursor.Weight, ref mean); 321VectorUtils.AddMult( 324cursor.Weight * VectorUtils.DotProduct(omega[i], in cursor.Features)); 337VectorUtils.ScaleBy(y[i], invn); 341VectorUtils.ScaleBy(ref mean, invn); 343VectorUtils.AddMult(in mean, y[i], -VectorUtils.DotProduct(omega[i], in mean)); 463_meanProjected[i] = VectorUtils.DotProduct(in _eigenVectors[i], in mean); 470_norm2Mean = VectorUtils.NormSquared(mean); 497_norm2Mean = VectorUtils.NormSquared(_mean); 512_meanProjected[i] = VectorUtils.DotProduct(in _eigenVectors[i], in _mean); 637float norm2X = VectorUtils.NormSquared(in src) - 6382 * VectorUtils.DotProduct(in mean, in src) + _norm2Mean; 647float component = VectorUtils.DotProduct(in _eigenVectors[i], in src) - _meanProjected[i];
PcaTransformer.cs (14)
177MeanProjected[i] = VectorUtils.DotProduct(Eigenvectors[i], mean); 357VectorUtils.ScaleBy(v, 1 / VectorUtils.Norm(y[iinfo][i])); // normalize 361VectorUtils.AddMult(v, y[iinfo][j], -VectorUtils.DotProduct(v, y[iinfo][j])); //subtract the projection of y[j] on v 376b2[i * oversampledRank[iinfo] + j] = b2[j * oversampledRank[iinfo] + i] = VectorUtils.DotProduct(b[iinfo][i], b[iinfo][j]); 444VectorUtils.AddMult(in features, mean[iinfo], weight); 447VectorUtils.AddMult(in features, y[iinfo][i], weight * VectorUtils.DotProductWithOffset(omega[iinfo][i], 0, in features)); 463VectorUtils.ScaleBy(y[iinfo][i], invn); 467VectorUtils.ScaleBy(mean[iinfo], invn); 469VectorUtils.AddMult(mean[iinfo], y[iinfo][i], -VectorUtils.DotProduct(omega[iinfo][i], mean[iinfo])); 594editor.Values[i] = VectorUtils.DotProductWithOffset(transformInfo.Eigenvectors[i], 0, in src) -
Microsoft.ML.StandardTrainers (146)
LdSvm\LdSvmModelParameters.cs (5)
254score += Math.Tanh(_sigma * (VectorUtils.DotProduct(in _thetaPrime[current], in src) + _biasThetaPrime[current])) * 255(VectorUtils.DotProduct(in _w[current], in src) + _biasW[current]); 256childIndicator = VectorUtils.DotProduct(in _theta[current], in src) + _biasTheta[current]; 259score += Math.Tanh(_sigma * (VectorUtils.DotProduct(in _thetaPrime[current], in src) + _biasThetaPrime[current])) * 260(VectorUtils.DotProduct(in _w[current], in src) + _biasW[current]);
LdSvm\LdSvmTrainer.cs (12)
212tanhThetaTx[i] = (float)Math.Tanh(gamma * (VectorUtils.DotProduct(in feat, in theta[i]) + biasTheta[i])); 216float tempGrad = pathWt[i] * localWt[i] * (VectorUtils.DotProduct(in feat, in w[i]) + biasW[i]); 241tempSum += Math.Abs(VectorUtils.DotProduct(in s, in theta[thetaIdx]) + biasTheta[thetaIdx]); 299VectorUtils.ScaleBy(ref tempW[i], coef); 301VectorUtils.ScaleBy(ref tempTheta[i], coef); 303VectorUtils.ScaleBy(ref tempThetaPrime[i], coef); 324var tanhDist = (float)Math.Tanh(gamma * (VectorUtils.DotProduct(in features, in theta[i]) + biasTheta[i])); 331localWt[l] = (float)Math.Tanh(_options.Sigma * (VectorUtils.DotProduct(in features, in thetaPrime[l]) + biasThetaPrime[l])); 337wDotX[l] = VectorUtils.DotProduct(in features, in w[l]) + biasW[l]; 355VectorUtils.AddMult(in features, tempGradW, ref tempW[l]); 363VectorUtils.AddMult(in features, tempGradThetaPrime, ref tempThetaPrime[l]); 371VectorUtils.AddMult(in features, tempGradTheta, ref tempTheta[m]);
Optimizer\DifferentiableFunction.cs (22)
98VectorUtils.ScaleBy(ref _tempGrads[chunkIndex], 0); 108VectorUtils.Add(in tempGrad, ref _tempGrads[chunkIndex]); 131VectorUtils.ScaleBy(ref gradient, 0); 138VectorUtils.Add(in _tempGrads[c], ref gradient); 190float normX = VectorUtils.Norm(x); 216float norm = VectorUtils.Norm(dir); 217VectorUtils.ScaleBy(ref dir, 1 / norm); 219VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); 222VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); 225float dirDeriv = VectorUtils.DotProduct(in grad, in dir); 256float normX = VectorUtils.Norm(x); 266VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); 269VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); 272float dirDeriv = VectorUtils.DotProduct(in grad, in dir); 295float normX = VectorUtils.Norm(x); 305VectorUtils.AddMultInto(in x, Eps, in dir, ref newX); 308VectorUtils.AddMultInto(in x, -Eps, in dir, ref newX); 311float dirDeriv = VectorUtils.DotProduct(in grad, in dir); 333float normDir = VectorUtils.Norm(dir); 336float dirDeriv = VectorUtils.DotProduct(in newGrad, in dir); 340VectorUtils.AddMultInto(in x, scaledEps, in dir, ref newX); 343VectorUtils.AddMultInto(in x, -scaledEps, in dir, ref newX);
Optimizer\L1Optimizer.cs (9)
166VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); 199float dirDeriv = -VectorUtils.DotProduct(in _dir, in _steepestDescDir); 209float alpha = (Iter == 1 ? (1 / VectorUtils.Norm(_dir)) : 1); 211float unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x); 222unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x); 242unnormCos = VectorUtils.DotProduct(in _steepestDescDir, in _newX) - VectorUtils.DotProduct(in _steepestDescDir, in _x);
Optimizer\LineSearch.cs (8)
464float d1 = VectorUtils.DotProduct(in x, in _c1); 465float d2 = VectorUtils.DotProduct(in x, in _c2); 466float d3 = VectorUtils.DotProduct(in x, in _c3); 468VectorUtils.AddMult(in _c1, d1, ref grad); 469VectorUtils.AddMult(in _c2, d2, ref grad); 517float norm = VectorUtils.Norm(grad); 528Console.WriteLine(VectorUtils.Norm(grad)); 537Console.WriteLine(VectorUtils.Norm(grad));
Optimizer\OptimizationMonitor.cs (2)
323float gradientNormSquared = VectorUtils.NormSquared(gradient); 380float norm = VectorUtils.Norm(grad);
Optimizer\Optimizer.cs (16)
261alphas[i] = -VectorUtils.DotProduct(in _sList[i], in _dir) / _roList[i]; 262VectorUtils.AddMult(in _yList[i], alphas[i], ref _dir); 272float yDotY = VectorUtils.DotProduct(in _yList[lastGoodRo], in _yList[lastGoodRo]); 273VectorUtils.ScaleBy(ref _dir, _roList[lastGoodRo] / yDotY); 279float beta = VectorUtils.DotProduct(in _yList[i], in _dir) / _roList[i]; 280VectorUtils.AddMult(in _sList[i], -alphas[i] - beta, ref _dir); 318VectorUtils.ScaleInto(in _grad, -1, ref _dir); 357VectorUtils.AddMultInto(in _newX, -1, in _x, ref nextS); 358VectorUtils.AddMultInto(in _newGrad, -1, in _grad, ref nextY); 359float ro = VectorUtils.DotProduct(in nextS, in nextY); 383float dirDeriv = VectorUtils.DotProduct(in _dir, in _grad); 395float alpha = (Iter == 1 ? (1 / VectorUtils.Norm(_dir)) : 1); 404VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); 425dirDeriv = VectorUtils.DotProduct(in _dir, in _newGrad); 485VectorUtils.AddMultInto(in _x, alpha, in _dir, ref _newX); 499dirDeriv = VectorUtils.DotProduct(in _dir, in _newGrad);
Optimizer\SgdOptimizer.cs (16)
183VectorUtils.ScaleBy(ref step, _momentum); 205VectorUtils.AddMult(in grad, scale, ref step); 211VectorUtils.ScaleBy(prev, ref avg, (float)n / (n + 1)); 212VectorUtils.AddMult(in step, -stepSize, ref x); 213VectorUtils.AddMult(in x, (float)1 / (n + 1), ref avg); 224VectorUtils.AddMult(in step, -stepSize, ref prev, ref x); 314public float Deriv => VectorUtils.DotProduct(in _dir, in _grad); 324VectorUtils.ScaleInto(in _grad, -1, ref _dir); 331VectorUtils.AddMultInto(in _point, step, in _dir, ref _newPoint); 333deriv = VectorUtils.DotProduct(in _dir, in _newGrad); 341float newByNew = VectorUtils.NormSquared(_newGrad); 342float newByOld = VectorUtils.DotProduct(in _newGrad, in _grad); 343float oldByOld = VectorUtils.NormSquared(_grad); 346VectorUtils.ScaleBy(ref _dir, beta); 347VectorUtils.AddMult(in _newGrad, -1, ref _dir); 350VectorUtils.ScaleInto(in _newGrad, -1, ref _dir);
Standard\LinearModelParameters.cs (6)
269return Bias + VectorUtils.DotProduct(in weights, in src); 272return Bias + VectorUtils.DotProduct(in _weightsDense, in src); 282VectorUtils.MulElementWise(in weights, ref contributions); 283VectorUtils.SparsifyNormalize(ref contributions, top, bottom, normalize); 345VectorUtils.Add(in subweights, ref weights); 348VectorUtils.ScaleBy(ref weights, (float)1 / models.Count);
Standard\LogisticRegression\LbfgsPredictorBase.cs (4)
357VectorUtils.AddMult(in x, -1, ref oldWeights); 358float normDiff = VectorUtils.Norm(oldWeights); 788VectorUtils.ScaleBy(ref gradient, scaleFactor); 828VectorUtils.Add(in _localGradients[i - 1], ref gradient);
Standard\LogisticRegression\LogisticRegression.cs (4)
195float score = bias + VectorUtils.DotProductWithOffset(in x, 1, in feat); 210VectorUtils.AddMultWithOffset(in feat, mult, ref grad, 1); // Note that 0th L-BFGS weight is for bias. 241var regLoss = VectorUtils.NormSquared(currentWeightsValues.Slice(1)) * L2Weight; 349var score = bias + VectorUtils.DotProductWithOffset(in CurrentWeights, 1, in cursor.Features);
Standard\LogisticRegression\MulticlassLogisticRegression.cs (4)
254scores[c] = bias + VectorUtils.DotProductWithOffset(in x, start, in feat); 269VectorUtils.AddMultWithOffset(in feat, mult, ref grad, start); 327var regLoss = VectorUtils.NormSquared(CurrentWeights.GetValues().Slice(BiasCount)) * L2Weight; 791editor.Values[i] = Biases[i] + VectorUtils.DotProduct(in weights[i], in src);
Standard\Online\AveragedLinear.cs (9)
185return (TotalBias + VectorUtils.DotProduct(in feat, in TotalWeights)) / (float)NumWeightUpdates; 199VectorUtils.AddMult(in Weights, NumNoUpdates * WeightsScale, ref TotalWeights); 211VectorUtils.ScaleInto(in TotalWeights, 1 / (float)NumWeightUpdates, ref Weights); 236VectorUtils.AddMult(in Weights, NumNoUpdates * WeightsScale, ref TotalWeights); 251VectorUtils.AddMult(in feat, biasUpdate / WeightsScale, ref Weights); 270VectorUtils.ScaleInto(in TotalWeights, 1 / (float)NumWeightUpdates, ref Weights); 284VectorUtils.AddMult(in Weights, WeightsScale, ref TotalWeights); 289VectorUtils.AddMult(in Weights, Gain * WeightsScale, ref TotalWeights); 300VectorUtils.ScaleBy(ref TotalWeights, scale);
Standard\Online\AveragedPerceptron.cs (1)
144VectorUtils.ScaleBy(ref weights, 1 / (float)NumWeightUpdates);
Standard\Online\LinearSvm.cs (5)
195VectorUtils.ScaleInto(in feat, currentBiasUpdate, ref _weightsUpdate); 199VectorUtils.AddMult(in feat, currentBiasUpdate, ref _weightsUpdate); 234VectorUtils.AddMult(in weightsUpdate, rate * weightsUpdateScale / (_numBatchExamples * WeightsScale), ref Weights); 243float normalizer = 1 / (MathUtils.Sqrt(_lambda) * VectorUtils.Norm(Weights) * Math.Abs(WeightsScale)); 258=> Bias + VectorUtils.DotProduct(in feat, in Weights) * WeightsScale;
Standard\Online\OnlineGradientDescent.cs (1)
121VectorUtils.ScaleBy(ref weights, 1 / (float)NumWeightUpdates);
Standard\Online\OnlineLinear.cs (3)
183VectorUtils.ScaleBy(ref Weights, WeightsScale); 237=> Bias + VectorUtils.DotProduct(in feat, in Weights) * WeightsScale; 298float maxNorm = Math.Max(VectorUtils.MaxNorm(in state.Weights), Math.Abs(state.Bias));
Standard\PoissonRegression\PoissonRegression.cs (2)
171float dot = VectorUtils.DotProductWithOffset(in x, 1, in feat) + bias; 176VectorUtils.AddMultWithOffset(in feat, mult, ref grad, 1);
Standard\SdcaBinary.cs (12)
132return VectorUtils.DotProduct(in weights, in features) + bias; 137return VectorUtils.DotProduct(in weights, in features) * (float)scaling + bias; 319return VectorUtils.DotProduct(in weights, in features) + bias; 583var normSquared = VectorUtils.NormSquared(features); 824var featuresNormSquared = VectorUtils.NormSquared(features); 861VectorUtils.AddMult(in features, weightsEditor.Values, primalUpdate); 996Double l1Regularizer = l1Threshold * l2Const * (VectorUtils.L1Norm(in weights[0]) + Math.Abs(biasReg[0])); 997var l2Regularizer = l2Const * (VectorUtils.NormSquared(weights[0]) + biasReg[0] * biasReg[0]) * 0.5; 2094var newLoss = lossSum.Sum / count + l2Weight * VectorUtils.NormSquared(weights) * 0.5; 2136VectorUtils.AddMult(in features, weightsEditor.Values, (float)(step / weightScaling)); 2155VectorUtils.ScaleBy(ref weights, (float)weightScaling); // normalize the weights 2198VectorUtils.ScaleBy(ref weights, (float)weightScaling); // restore the true weights
Standard\SdcaMulticlass.cs (5)
218normSquared = VectorUtils.NormSquared(in features); 279VectorUtils.AddMult(in features, weightsEditor.Values, -primalUpdate); 314VectorUtils.AddMult(in features, weightsEditor.Values, labelPrimalUpdate); 421weightsL1Norm += VectorUtils.L1Norm(in weights[iClass]) + Math.Abs(biasReg[iClass]); 422weightsL2NormSquared += VectorUtils.NormSquared(weights[iClass]) + biasReg[iClass] * biasReg[iClass];
Microsoft.ML.Sweeper (4)
Algorithms\NelderMead.cs (4)
328var dist = VectorUtils.Distance(x, y); 386VectorUtils.AddMult(_simplexVertices.ElementAt(i).Value, centroid, scale); 393VectorUtils.AddMult(centroid, newPoint, 1 + delta); 394VectorUtils.AddMult(worst, newPoint, -delta);
Microsoft.ML.Transforms (2)
FourierDistributionSampler.cs (2)
114return VectorUtils.L2DistSquared(in first, in second); 235return VectorUtils.L1Distance(in first, in second);