1
- __precompile__ ()
2
1
module LIBSVM
2
+
3
+
3
4
import LIBLINEAR
5
+
4
6
using SparseArrays
5
7
using libsvm_jll
6
8
7
9
export svmtrain, svmpredict, fit!, predict, transform,
8
- SVC, NuSVC, OneClassSVM, NuSVR, EpsilonSVR, LinearSVC,
9
- Linearsolver, Kernel
10
+ SVC, NuSVC, OneClassSVM, NuSVR, EpsilonSVR, LinearSVC,
11
+ Linearsolver, Kernel
10
12
11
13
include (" LibSVMtypes.jl" )
12
14
include (" constants.jl" )
13
15
14
16
verbosity = false
15
17
16
- struct SupportVectors{T, U}
18
+ struct SupportVectors{T,U}
17
19
l:: Int32
18
20
nSV:: Vector{Int32}
19
- y:: Vector {T}
21
+ y:: AbstractVector {T}
20
22
X:: AbstractMatrix{U}
21
23
indices:: Vector{Int32}
22
24
SVnodes:: Vector{SVMNode}
@@ -36,8 +38,7 @@ function SupportVectors(smc::SVMModel, y, X)
36
38
37
39
yi = smc. param. svm_type == 2 ? Float64[] : y[sv_indices]
38
40
39
- SupportVectors (smc. l, nSV, yi , X[:,sv_indices],
40
- sv_indices, nodes)
41
+ SupportVectors (smc. l, nSV, yi , X[:,sv_indices], sv_indices, nodes)
41
42
end
42
43
43
44
struct SVM{T}
@@ -68,7 +69,7 @@ struct SVM{T}
68
69
probability:: Bool
69
70
end
70
71
71
- function SVM (smc:: SVMModel , y:: T , X, weights, labels, svmtype, kernel) where T
72
+ function SVM (smc:: SVMModel , y, X, weights, labels, svmtype, kernel)
72
73
svs = SupportVectors (smc, y, X)
73
74
coefs = zeros (smc. l, smc. nr_class- 1 )
74
75
for k in 1 : (smc. nr_class- 1 )
@@ -266,39 +267,48 @@ function set_num_threads(nt::Integer)
266
267
end
267
268
268
269
"""
269
- ```julia
270
- svmtrain{T, U<:Real}(X::AbstractMatrix{U}, y::AbstractVector{T}=[];
271
- svmtype::Type=SVC, kernel::Kernel.KERNEL=Kernel.RadialBasis, degree::Integer=3,
272
- gamma::Float64=1.0/size(X, 1), coef0::Float64=0.0,
273
- cost::Float64=1.0, nu::Float64=0.5, epsilon::Float64=0.1,
274
- tolerance::Float64=0.001, shrinking::Bool=true,
275
- probability::Bool=false, weights::Union{Dict{T, Float64}, Cvoid}=nothing,
276
- cachesize::Float64=200.0, verbose::Bool=false)
277
- ```
270
+ svmtrain(
271
+ X::AbstractMatrix{U}, y::AbstractVector{T} = [];
272
+ svmtype::Type = SVC,
273
+ kernel::Kernel.KERNEL = Kernel.RadialBasis,
274
+ degree::Integer = 3,
275
+ gamma::Float64 = 1.0/size(X, 1),
276
+ coef0::Float64 = 0.0,
277
+ cost::Float64=1.0,
278
+ nu::Float64 = 0.5,
279
+ epsilon::Float64 = 0.1,
280
+ tolerance::Float64 = 0.001,
281
+ shrinking::Bool = true,
282
+ probability::Bool = false,
283
+ weights::Union{Dict{T,Float64},Cvoid} = nothing,
284
+ cachesize::Float64 = 200.0,
285
+ verbose::Bool = false
286
+ ) where {T,U<:Real}
287
+
278
288
Train Support Vector Machine using LIBSVM using response vector `y`
279
- and training data `X`. The shape of `X` needs to be (nfeatures, nsamples).
289
+ and training data `X`. The shape of `X` needs to be ` (nfeatures, nsamples)` .
280
290
For one-class SVM use only `X`.
281
291
282
292
# Arguments
283
293
284
- * `svmtype::Type= LIBSVM.SVC`: Type of SVM to train `SVC` (for C-SVM), `NuSVC`
294
+ * `svmtype::Type = LIBSVM.SVC`: Type of SVM to train `SVC` (for C-SVM), `NuSVC`
285
295
`OneClassSVM`, `EpsilonSVR` or `NuSVR`. Defaults to `OneClassSVM` if
286
296
`y` is not used.
287
- * `kernel::Kernels.KERNEL= Kernel.RadialBasis`: Model kernel `Linear`, `Polynomial`,
297
+ * `kernel::Kernels.KERNEL = Kernel.RadialBasis`: Model kernel `Linear`, `Polynomial`,
288
298
`RadialBasis`, `Sigmoid` or `Precomputed`.
289
- * `degree::Integer= 3`: Kernel degree. Used for polynomial kernel
290
- * `gamma::Float64= 1.0/size(X, 1)` : γ for kernels
291
- * `coef0::Float64= 0.0`: parameter for sigmoid and polynomial kernel
292
- * `cost::Float64= 1.0`: cost parameter C of C-SVC, epsilon-SVR, and nu-SVR
293
- * `nu::Float64= 0.5`: parameter nu of nu-SVC, one-class SVM, and nu-SVR
294
- * `epsilon::Float64= 0.1`: epsilon in loss function of epsilon-SVR
295
- * `tolerance::Float64= 0.001`: tolerance of termination criterion
296
- * `shrinking::Bool= true`: whether to use the shrinking heuristics
297
- * `probability::Bool= false`: whether to train a SVC or SVR model for probability estimates
299
+ * `degree::Integer = 3`: Kernel degree. Used for polynomial kernel
300
+ * `gamma::Float64 = 1.0/size(X, 1)` : γ for kernels
301
+ * `coef0::Float64 = 0.0`: parameter for sigmoid and polynomial kernel
302
+ * `cost::Float64 = 1.0`: cost parameter C of C-SVC, epsilon-SVR, and nu-SVR
303
+ * `nu::Float64 = 0.5`: parameter nu of nu-SVC, one-class SVM, and nu-SVR
304
+ * `epsilon::Float64 = 0.1`: epsilon in loss function of epsilon-SVR
305
+ * `tolerance::Float64 = 0.001`: tolerance of termination criterion
306
+ * `shrinking::Bool = true`: whether to use the shrinking heuristics
307
+ * `probability::Bool = false`: whether to train a SVC or SVR model for probability estimates
298
308
* `weights::Union{Dict{T, Float64}, Cvoid}=nothing`: dictionary of class weights
299
- * `cachesize::Float64= 100.0`: cache memory size in MB
300
- * `verbose::Bool= false`: print training output from LIBSVM if true
301
- * `nt::Integer= 0`: number of OpenMP cores to use, if 0 it is set to OMP_NUM_THREADS, if negative it is set to the max number of threads
309
+ * `cachesize::Float64 = 100.0`: cache memory size in MB
310
+ * `verbose::Bool = false`: print training output from LIBSVM if true
311
+ * `nt::Integer = 0`: number of OpenMP cores to use, if 0 it is set to OMP_NUM_THREADS, if negative it is set to the max number of threads
302
312
303
313
Consult LIBSVM documentation for advice on the choise of correct
304
314
parameters and model tuning.
419
429
include (" ScikitLearnTypes.jl" )
420
430
include (" ScikitLearnAPI.jl" )
421
431
432
+
422
433
end
0 commit comments