44 references to BlockingThreadPool
Microsoft.ML.FastTree (44)
Dataset\Dataset.cs (2)
267
Parallel.For(0, NumFlocks, new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads },
317
Parallel.For(0, NumFlocks, new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads },
FastTreeRanking.cs (1)
611
int numThreads =
BlockingThreadPool
.NumThreads;
GamClassification.cs (2)
121
int innerLoopSize = 1 + targets.Length /
BlockingThreadPool
.NumThreads;
134
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
GamTrainer.cs (4)
398
DefineDocumentThreadBlocks(dataset.NumDocs,
BlockingThreadPool
.NumThreads, out int[] threadBlocks);
416
},
BlockingThreadPool
.NumThreads);
494
DefineDocumentThreadBlocks(TrainSet.NumDocs,
BlockingThreadPool
.NumThreads, out int[] trainThreadBlocks);
521
},
BlockingThreadPool
.NumThreads);
Training\Applications\ObjectiveFunction.cs (2)
65
var queue = new ConcurrentQueue<int>(Enumerable.Range(0,
BlockingThreadPool
.NumThreads));
79
Parallel.Invoke(new ParallelOptions() { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
Training\DcgCalculator.cs (7)
57
int numThreads =
BlockingThreadPool
.NumThreads;
167
new ParallelOptions() { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads },
268
new ParallelOptions() { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads },
368
int chunkSize = 1 + dataset.NumQueries /
BlockingThreadPool
.NumThreads; // Minimizes the number of repeat computations in sparse array to have each thread take as big a chunk as possible
383
Parallel.Invoke(new ParallelOptions() { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
507
int chunkSize = 1 + dataset.NumQueries /
BlockingThreadPool
.NumThreads; // Minimizes the number of repeat computations in sparse array to have each thread take as big a chunk as possible
521
Parallel.Invoke(new ParallelOptions() { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
Training\DocumentPartitioning.cs (3)
62
int innerLoopSize = 1 + dataset.NumDocs /
BlockingThreadPool
.NumThreads; // +1 is to make sure we don't have a few left over at the end
97
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
129
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
Training\ScoreTracker.cs (4)
105
Parallel.For(0, tree.NumLeaves, new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, (leaf) =>
172
int innerLoopSize = 1 + Dataset.NumDocs /
BlockingThreadPool
.NumThreads; // +1 is to make sure we don't have a few left over at the end
194
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
203
Parallel.For(0, tree.NumLeaves, new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads },
Training\Test.cs (6)
538
int chunkSize = 1 + Dataset.NumDocs /
BlockingThreadPool
.NumThreads; // Minimizes the number of repeat computations in sparse array to have each thread take as big a chunk as possible
565
Parallel.Invoke(new ParallelOptions() { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
616
int chunkSize = 1 + binaryLabels.Length /
BlockingThreadPool
.NumThreads; // Minimizes the number of repeat computations in sparse array to have each thread take as big a chunk as possible
640
Parallel.Invoke(new ParallelOptions() { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
656
int chunkSize = 1 + Dataset.NumDocs /
BlockingThreadPool
.NumThreads; // Minimizes the number of repeat computations in sparse array to have each thread take as big a chunk as possible
698
Parallel.Invoke(new ParallelOptions() { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
Training\WinLossCalculator.cs (4)
29
int numThreads =
BlockingThreadPool
.NumThreads;
50
int chunkSize = 1 + dataset.NumQueries /
BlockingThreadPool
.NumThreads; // Minimizes the number of repeat computations in sparse array to have each thread take as big a chunk as possible
55
var queue = new ConcurrentQueue<int>(Enumerable.Range(0,
BlockingThreadPool
.NumThreads));
63
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
TreeEnsemble\InternalRegressionTree.cs (6)
1379
int innerLoopSize = 1 + dataset.NumDocs /
BlockingThreadPool
.NumThreads; // +1 is to make sure we don't have a few left over at the end
1395
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
1404
int innerLoopSize = 1 + dataset.NumDocs /
BlockingThreadPool
.NumThreads; // +1 is to make sure we don't have a few left over at the end
1420
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
1427
int innerLoopSize = 1 + docIndices.Length /
BlockingThreadPool
.NumThreads; // +1 is to make sure we don't have a few left over at the end
1443
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
TreeEnsemble\InternalTreeEnsemble.cs (2)
293
int innerLoopSize = 1 + dataset.NumDocs /
BlockingThreadPool
.NumThreads; // minimize number of times we have to skip forward in the sparse arrays
308
Parallel.Invoke(new ParallelOptions { MaxDegreeOfParallelism =
BlockingThreadPool
.NumThreads }, actions);
Utils\ThreadTaskManager.cs (1)
29
BlockingThreadPool
.Initialize(numThreads);