Microsoft.ML.PCA.xml
19.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
<?xml version="1.0"?>
<doc>
<assembly>
<name>Microsoft.ML.PCA</name>
</assembly>
<members>
<member name="M:Microsoft.ML.PcaCatalog.ProjectToPrincipalComponents(Microsoft.ML.TransformsCatalog.ProjectionTransforms,System.String,System.String,System.String,System.Int32,System.Int32,System.Boolean,System.Nullable{System.Int32})">
<summary>Initializes a new instance of <see cref="T:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator"/>.</summary>
<param name="catalog">The transform's catalog.</param>
<param name="outputColumnName">Name of the column resulting from the transformation of <paramref name="inputColumnName"/>.</param>
<param name="inputColumnName">Name of column to transform. If set to <see langword="null"/>, the value of the <paramref name="outputColumnName"/> will be used as source.</param>
<param name="exampleWeightColumnName">The name of the example weight column (optional).</param>
<param name="rank">The number of principal components.</param>
<param name="overSampling">Oversampling parameter for randomized PrincipalComponentAnalysis training.</param>
<param name="center">If enabled, data is centered to be zero mean.</param>
<param name="seed">The seed for random number generation.</param>
</member>
<member name="M:Microsoft.ML.PcaCatalog.ProjectToPrincipalComponents(Microsoft.ML.TransformsCatalog.ProjectionTransforms,Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions[])">
<summary>Initializes a new instance of <see cref="T:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator"/>.</summary>
<param name="catalog">The transform's catalog.</param>
<param name="columns">Input columns to apply PrincipalComponentAnalysis on.</param>
</member>
<member name="M:Microsoft.ML.PcaCatalog.RandomizedPca(Microsoft.ML.AnomalyDetectionCatalog.AnomalyDetectionTrainers,System.String,System.String,System.Int32,System.Int32,System.Boolean,System.Nullable{System.Int32})">
<summary>
Trains an approximate PCA using Randomized SVD algorithm.
</summary>
<param name="catalog">The anomaly detection catalog trainer object.</param>
<param name="featureColumnName">The name of the feature column.</param>
<param name="exampleWeightColumnName">The name of the example weight column (optional).</param>
<param name="rank">The number of components in the PCA.</param>
<param name="oversampling">Oversampling parameter for randomized PCA training.</param>
<param name="center">If enabled, data is centered to be zero mean.</param>
<param name="seed">The seed for random number generation.</param>
</member>
<member name="M:Microsoft.ML.PcaCatalog.RandomizedPca(Microsoft.ML.AnomalyDetectionCatalog.AnomalyDetectionTrainers,Microsoft.ML.Trainers.RandomizedPcaTrainer.Options)">
<summary>
Trains an approximate PCA using Randomized SVD algorithm.
</summary>
<param name="catalog">The anomaly detection catalog trainer object.</param>
<param name="options">Advanced options to the algorithm.</param>
</member>
<member name="T:Microsoft.ML.Trainers.RandomizedPcaTrainer">
<summary>
This trainer trains an approximate PCA using Randomized SVD algorithm
Reference: https://web.stanford.edu/group/mmds/slides2010/Martinsson.pdf
</summary>
<remarks>
This PCA can be made into Kernel PCA by using Random Fourier Features transform
</remarks>
</member>
<member name="M:Microsoft.ML.Trainers.RandomizedPcaTrainer.#ctor(Microsoft.ML.IHostEnvironment,System.String,System.String,System.Int32,System.Int32,System.Boolean,System.Nullable{System.Int32})">
<summary>
Initializes a new instance of <see cref="T:Microsoft.ML.Trainers.RandomizedPcaTrainer"/>.
</summary>
<param name="env">The local instance of the <see cref="T:Microsoft.ML.IHostEnvironment"/>.</param>
<param name="features">The name of the feature column.</param>
<param name="weights">The name of the weight column.</param>
<param name="rank">The number of components in the PCA.</param>
<param name="oversampling">Oversampling parameter for randomized PCA training.</param>
<param name="center">If enabled, data is centered to be zero mean.</param>
<param name="seed">The seed for random number generation.</param>
</member>
<member name="M:Microsoft.ML.Trainers.RandomizedPcaTrainer.PostProcess(System.Single[][],System.Single[],System.Single[],System.Int32,System.Int32)">
<summary>
Modifies <paramref name="y"/> in place so it becomes <paramref name="y"/> * eigenvectors / eigenvalues.
</summary>
</member>
<member name="T:Microsoft.ML.Trainers.PcaModelParameters">
<summary>
PCA is a dimensionality-reduction transform which computes the projection of the feature vector onto a low-rank subspace.
</summary><remarks>
<a href="https://en.wikipedia.org/wiki/Principal_component_analysis">Principle Component Analysis (PCA)</a> is a dimensionality-reduction algorithm which computes the projection of the feature vector to onto a low-rank subspace.
Its training is done using the technique described in the paper: <a href="https://arxiv.org/pdf/1310.6304v2.pdf">Combining Structured and Unstructured Randomness in Large Scale PCA</a>,
and the paper <a href="https://arxiv.org/pdf/0909.4061v2.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
<para>For more information, see also:</para>
<list type="bullet">
<item><description>
<a href="https://web.stanford.edu/group/mmds/slides2010/Martinsson.pdf">Randomized Methods for Computing the Singular Value Decomposition (SVD) of very large matrices</a>
</description></item>
<item><description>
<a href="https://arxiv.org/abs/0809.2274">A randomized algorithm for principal component analysis</a>
</description></item>
<item><description>
<a href="http://users.cms.caltech.edu/~jtropp/papers/HMT11-Finding-Structure-SIREV.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
</description></item>
</list>
</remarks>
</member>
<member name="M:Microsoft.ML.Trainers.PcaModelParameters.#ctor(Microsoft.ML.IHostEnvironment,System.Int32,System.Single[][],Microsoft.ML.Data.VBuffer{System.Single}@)">
<summary>
Instantiate new model parameters from trained model.
</summary>
<param name="env">The host environment.</param>
<param name="rank">The rank of the PCA approximation of the covariance matrix. This is the number of eigenvectors in the model.</param>
<param name="eigenVectors">Array of eigenvectors.</param>
<param name="mean">The mean vector of the training data.</param>
</member>
<member name="M:Microsoft.ML.Trainers.PcaModelParameters.GetEigenVectors(Microsoft.ML.Data.VBuffer{System.Single}[]@,System.Int32@)">
<summary>
Copies the top eigenvectors of the covariance matrix of the training data
into a set of buffers.
</summary>
<param name="vectors">A possibly reusable set of vectors, which will
be expanded as necessary to accomodate the data.</param>
<param name="rank">Set to the rank, which is also the logical length
of <paramref name="vectors"/>.</param>
</member>
<member name="M:Microsoft.ML.Trainers.PcaModelParameters.GetMean(Microsoft.ML.Data.VBuffer{System.Single}@)">
<summary>
Copies the mean vector of the training data.
</summary>
</member>
<member name="T:Microsoft.ML.Transforms.PrincipalComponentAnalysisTransformer">
<summary>
PCA is a dimensionality-reduction transform which computes the projection of the feature vector onto a low-rank subspace.
</summary><remarks>
<a href="https://en.wikipedia.org/wiki/Principal_component_analysis">Principle Component Analysis (PCA)</a> is a dimensionality-reduction algorithm which computes the projection of the feature vector to onto a low-rank subspace.
Its training is done using the technique described in the paper: <a href="https://arxiv.org/pdf/1310.6304v2.pdf">Combining Structured and Unstructured Randomness in Large Scale PCA</a>,
and the paper <a href="https://arxiv.org/pdf/0909.4061v2.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
<para>For more information, see also:</para>
<list type="bullet">
<item><description>
<a href="https://web.stanford.edu/group/mmds/slides2010/Martinsson.pdf">Randomized Methods for Computing the Singular Value Decomposition (SVD) of very large matrices</a>
</description></item>
<item><description>
<a href="https://arxiv.org/abs/0809.2274">A randomized algorithm for principal component analysis</a>
</description></item>
<item><description>
<a href="http://users.cms.caltech.edu/~jtropp/papers/HMT11-Finding-Structure-SIREV.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
</description></item>
</list>
</remarks>
</member>
<member name="T:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator">
<summary>
PCA is a dimensionality-reduction transform which computes the projection of the feature vector onto a low-rank subspace.
</summary><remarks>
<a href="https://en.wikipedia.org/wiki/Principal_component_analysis">Principle Component Analysis (PCA)</a> is a dimensionality-reduction algorithm which computes the projection of the feature vector to onto a low-rank subspace.
Its training is done using the technique described in the paper: <a href="https://arxiv.org/pdf/1310.6304v2.pdf">Combining Structured and Unstructured Randomness in Large Scale PCA</a>,
and the paper <a href="https://arxiv.org/pdf/0909.4061v2.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
<para>For more information, see also:</para>
<list type="bullet">
<item><description>
<a href="https://web.stanford.edu/group/mmds/slides2010/Martinsson.pdf">Randomized Methods for Computing the Singular Value Decomposition (SVD) of very large matrices</a>
</description></item>
<item><description>
<a href="https://arxiv.org/abs/0809.2274">A randomized algorithm for principal component analysis</a>
</description></item>
<item><description>
<a href="http://users.cms.caltech.edu/~jtropp/papers/HMT11-Finding-Structure-SIREV.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
</description></item>
</list>
</remarks>
</member>
<member name="T:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions">
<summary>
Describes how the transformer handles one column pair.
</summary>
</member>
<member name="F:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.Name">
<summary>
Name of the column resulting from the transformation of <see cref="F:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.InputColumnName"/>.
</summary>
</member>
<member name="F:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.InputColumnName">
<summary>
Name of column to transform.
</summary>
</member>
<member name="F:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.WeightColumn">
<summary>
The name of the weight column.
</summary>
</member>
<member name="F:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.Rank">
<summary>
The number of components in the PCA.
</summary>
</member>
<member name="F:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.Oversampling">
<summary>
Oversampling parameter for randomized PCA training.
</summary>
</member>
<member name="F:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.Center">
<summary>
If enabled, data is centered to be zero mean.
</summary>
</member>
<member name="F:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.Seed">
<summary>
The seed for random number generation.
</summary>
</member>
<member name="M:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions.#ctor(System.String,System.String,System.String,System.Int32,System.Int32,System.Boolean,System.Nullable{System.Int32})">
<summary>
Describes how the transformer handles one column pair.
</summary>
<param name="name">Name of the column resulting from the transformation of <paramref name="inputColumnName"/>.</param>
<param name="inputColumnName">Name of column to transform.
If set to <see langword="null"/>, the value of the <paramref name="name"/> will be used as source.</param>
<param name="weightColumn">The name of the weight column.</param>
<param name="rank">The number of components in the PCA.</param>
<param name="overSampling">Oversampling parameter for randomized PCA training.</param>
<param name="center">If enabled, data is centered to be zero mean.</param>
<param name="seed">The random seed. If unspecified random state will be instead derived from the <see cref="T:Microsoft.ML.MLContext"/>.</param>
</member>
<member name="M:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.#ctor(Microsoft.ML.IHostEnvironment,System.String,System.String,System.String,System.Int32,System.Int32,System.Boolean,System.Nullable{System.Int32})">
<summary>
PCA is a dimensionality-reduction transform which computes the projection of the feature vector onto a low-rank subspace.
</summary><remarks>
<a href="https://en.wikipedia.org/wiki/Principal_component_analysis">Principle Component Analysis (PCA)</a> is a dimensionality-reduction algorithm which computes the projection of the feature vector to onto a low-rank subspace.
Its training is done using the technique described in the paper: <a href="https://arxiv.org/pdf/1310.6304v2.pdf">Combining Structured and Unstructured Randomness in Large Scale PCA</a>,
and the paper <a href="https://arxiv.org/pdf/0909.4061v2.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
<para>For more information, see also:</para>
<list type="bullet">
<item><description>
<a href="https://web.stanford.edu/group/mmds/slides2010/Martinsson.pdf">Randomized Methods for Computing the Singular Value Decomposition (SVD) of very large matrices</a>
</description></item>
<item><description>
<a href="https://arxiv.org/abs/0809.2274">A randomized algorithm for principal component analysis</a>
</description></item>
<item><description>
<a href="http://users.cms.caltech.edu/~jtropp/papers/HMT11-Finding-Structure-SIREV.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
</description></item>
</list>
</remarks>
<param name="env">The environment to use.</param>
<param name="outputColumnName">Name of the column resulting from the transformation of <paramref name="inputColumnName" />.</param>
<param name="inputColumnName">Name of the column to transform.
If set to <see langword="null" />, the value of the <paramref name="outputColumnName" /> will be used as source.</param>
<param name="weightColumn">The name of the weight column.</param>
<param name="rank">The number of components in the PCA.</param>
<param name="overSampling">Oversampling parameter for randomized PCA training.</param>
<param name="center">If enabled, data is centered to be zero mean.</param>
<param name="seed">The seed for random number generation.</param>
</member>
<member name="M:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.#ctor(Microsoft.ML.IHostEnvironment,Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.ColumnOptions[])">
<summary>
PCA is a dimensionality-reduction transform which computes the projection of the feature vector onto a low-rank subspace.
</summary><remarks>
<a href="https://en.wikipedia.org/wiki/Principal_component_analysis">Principle Component Analysis (PCA)</a> is a dimensionality-reduction algorithm which computes the projection of the feature vector to onto a low-rank subspace.
Its training is done using the technique described in the paper: <a href="https://arxiv.org/pdf/1310.6304v2.pdf">Combining Structured and Unstructured Randomness in Large Scale PCA</a>,
and the paper <a href="https://arxiv.org/pdf/0909.4061v2.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
<para>For more information, see also:</para>
<list type="bullet">
<item><description>
<a href="https://web.stanford.edu/group/mmds/slides2010/Martinsson.pdf">Randomized Methods for Computing the Singular Value Decomposition (SVD) of very large matrices</a>
</description></item>
<item><description>
<a href="https://arxiv.org/abs/0809.2274">A randomized algorithm for principal component analysis</a>
</description></item>
<item><description>
<a href="http://users.cms.caltech.edu/~jtropp/papers/HMT11-Finding-Structure-SIREV.pdf">Finding Structure with Randomness: Probabilistic Algorithms for Constructing Approximate Matrix Decompositions</a>
</description></item>
</list>
</remarks>
<param name="env">The environment to use.</param>
<param name="columns">The dataset columns to use, and their specific settings.</param>
</member>
<member name="M:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.Fit(Microsoft.Data.DataView.IDataView)">
<summary>
Trains and returns a <see cref="T:Microsoft.ML.Transforms.PrincipalComponentAnalysisTransformer"/>.
</summary>
</member>
<member name="M:Microsoft.ML.Transforms.PrincipalComponentAnalysisEstimator.GetOutputSchema(Microsoft.ML.SchemaShape)">
<summary>
Returns the <see cref="T:Microsoft.ML.SchemaShape"/> of the schema which will be produced by the transformer.
Used for schema propagation and verification in a pipeline.
</summary>
</member>
</members>
</doc>