diff -ruN RNAmicro1.1.4/Makefile RNAmicro1.1.4a/Makefile --- RNAmicro1.1.4/Makefile 2014-08-07 09:00:31.000000000 +0200 +++ RNAmicro1.1.4a/Makefile 2015-09-19 13:32:15.872994369 +0200 @@ -1,7 +1,7 @@ EXE = RNAmicro CC = gcc LL = g++ -VERSION = 1.0 +VERSION = 1.1.4 DISTNAME= RNAmicro WARN = -Wall -pedantic CCFLAGS = -O1 -DUNUSED='' -g $(WARN) @@ -15,11 +15,13 @@ strand.c zscore.c descriptors.c OBJS = $(foreach file, $(SRCS:.c=.o), $(file)) all: $(EXE) -$(EXE): $(OBJS) +$(EXE): $(OBJS) svm.o $(LL) $(CCFLAGS) $(LIBS) -o $(EXE) svm.o $(OBJS) $(OBJS): %.o: %.c $(CC) -c $(CCFLAGS) $< -o $@ +svm.o: svm.cpp svm.h + $(CC) -c $(CCFLAGS) svm.cpp -o svm.o veryclean: rm -f $(OBJS) $(EXE) clean: - rm -f $(OBJS) core + rm -f $(OBJS) svm.o core diff -ruN RNAmicro1.1.4/fold.c RNAmicro1.1.4a/fold.c --- RNAmicro1.1.4/fold.c 2014-08-07 09:00:31.000000000 +0200 +++ RNAmicro1.1.4a/fold.c 2015-09-19 13:28:25.954382460 +0200 @@ -783,7 +783,7 @@ /*---------------------------------------------------------------------------*/ -inline int HairpinE(int size, int type, int si1, int sj1, const char *string) { +int HairpinE(int size, int type, int si1, int sj1, const char *string) { int energy; energy = (size <= 30) ? P->hairpin[size] : P->hairpin[30]+(int)(P->lxc*log((size)/30.)); @@ -811,7 +811,7 @@ /*---------------------------------------------------------------------------*/ -inline PRIVATE int oldLoopEnergy(int i, int j, int p, int q, int type, int type_2) { +PRIVATE int oldLoopEnergy(int i, int j, int p, int q, int type, int type_2) { /* compute energy of degree 2 loop (stack bulge or interior) */ int n1, n2, m, energy; n1 = p-i-1; @@ -853,7 +853,7 @@ /*--------------------------------------------------------------------------*/ -inline int LoopEnergy(int n1, int n2, int type, int type_2, +int LoopEnergy(int n1, int n2, int type, int type_2, int si1, int sj1, int sp1, int sq1) { /* compute energy of degree 2 loop (stack bulge or interior) */ int nl, ns, energy; diff -ruN RNAmicro1.1.4/svm.cpp RNAmicro1.1.4a/svm.cpp --- RNAmicro1.1.4/svm.cpp 2014-08-07 09:03:56.000000000 +0200 +++ RNAmicro1.1.4a/svm.cpp 2015-09-19 13:26:10.468533764 +0200 @@ -5,8 +5,6 @@ #include #include #include -#include -#include #include "svm.h" int libsvm_version = LIBSVM_VERSION; typedef float Qfloat; @@ -74,7 +72,7 @@ // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) int get_data(const int index, Qfloat **data, int len); - void swap_index(int i, int j); + void swap_index(int i, int j); private: int l; long int size; @@ -194,7 +192,7 @@ class QMatrix { public: virtual Qfloat *get_Q(int column, int len) const = 0; - virtual double *get_QD() const = 0; + virtual Qfloat *get_QD() const = 0; virtual void swap_index(int i, int j) const = 0; virtual ~QMatrix() {} }; @@ -207,7 +205,7 @@ static double k_function(const svm_node *x, const svm_node *y, const svm_parameter& param); virtual Qfloat *get_Q(int column, int len) const = 0; - virtual double *get_QD() const = 0; + virtual Qfloat *get_QD() const = 0; virtual void swap_index(int i, int j) const // no so const... { swap(x[i],x[j]); @@ -414,7 +412,7 @@ char *alpha_status; // LOWER_BOUND, UPPER_BOUND, FREE double *alpha; const QMatrix *Q; - const double *QD; + const Qfloat *QD; double eps; double Cp,Cn; double *p; @@ -444,7 +442,7 @@ virtual double calculate_rho(); virtual void do_shrinking(); private: - bool be_shrunk(int i, double Gmax1, double Gmax2); + bool be_shrunk(int i, double Gmax1, double Gmax2); }; void Solver::swap_index(int i, int j) @@ -476,7 +474,7 @@ nr_free++; if(2*nr_free < active_size) - info("\nWARNING: using -h 0 may be faster\n"); + info("\nWarning: using -h 0 may be faster\n"); if (nr_free*l > 2*active_size*(l-active_size)) { @@ -558,10 +556,9 @@ // optimization step int iter = 0; - int max_iter = max(10000000, l>INT_MAX/100 ? INT_MAX : 100*l); int counter = min(l,1000)+1; - - while(iter < max_iter) + + while(1) { // show progress and do shrinking @@ -601,7 +598,7 @@ if(y[i]!=y[j]) { - double quad_coef = QD[i]+QD[j]+2*Q_i[j]; + double quad_coef = Q_i[i]+Q_j[j]+2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (-G[i]-G[j])/quad_coef; @@ -644,7 +641,7 @@ } else { - double quad_coef = QD[i]+QD[j]-2*Q_i[j]; + double quad_coef = Q_i[i]+Q_j[j]-2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (G[i]-G[j])/quad_coef; @@ -728,18 +725,6 @@ } } - if(iter >= max_iter) - { - if(active_size < l) - { - // reconstruct the whole gradient to calculate objective value - reconstruct_gradient(); - active_size = l; - info("*"); - } - fprintf(stderr,"\nWARNING: reaching max number of iterations\n"); - } - // calculate rho si->rho = calculate_rho(); @@ -833,8 +818,8 @@ Gmax2 = G[j]; if (grad_diff > 0) { - double obj_diff; - double quad_coef = QD[i]+QD[j]-2.0*y[i]*Q_i[j]; + double obj_diff; + double quad_coef=Q_i[i]+QD[j]-2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else @@ -857,8 +842,8 @@ Gmax2 = -G[j]; if (grad_diff > 0) { - double obj_diff; - double quad_coef = QD[i]+QD[j]+2.0*y[i]*Q_i[j]; + double obj_diff; + double quad_coef=Q_i[i]+QD[j]+2.0*y[i]*Q_i[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else @@ -1006,7 +991,7 @@ // // additional constraint: e^T \alpha = constant // -class Solver_NU: public Solver +class Solver_NU : public Solver { public: Solver_NU() {} @@ -1085,8 +1070,8 @@ Gmaxp2 = G[j]; if (grad_diff > 0) { - double obj_diff; - double quad_coef = QD[ip]+QD[j]-2*Q_ip[j]; + double obj_diff; + double quad_coef = Q_ip[ip]+QD[j]-2*Q_ip[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else @@ -1109,8 +1094,8 @@ Gmaxn2 = -G[j]; if (grad_diff > 0) { - double obj_diff; - double quad_coef = QD[in]+QD[j]-2*Q_in[j]; + double obj_diff; + double quad_coef = Q_in[in]+QD[j]-2*Q_in[j]; if (quad_coef > 0) obj_diff = -(grad_diff*grad_diff)/quad_coef; else @@ -1271,9 +1256,9 @@ { clone(y,y_,prob.l); cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20))); - QD = new double[prob.l]; + QD = new Qfloat[prob.l]; for(int i=0;i*kernel_function)(i,i); + QD[i]= (Qfloat)(this->*kernel_function)(i,i); } Qfloat *get_Q(int i, int len) const @@ -1288,7 +1273,7 @@ return data; } - double *get_QD() const + Qfloat *get_QD() const { return QD; } @@ -1310,7 +1295,7 @@ private: schar *y; Cache *cache; - double *QD; + Qfloat *QD; }; class ONE_CLASS_Q: public Kernel @@ -1320,9 +1305,9 @@ :Kernel(prob.l, prob.x, param) { cache = new Cache(prob.l,(long int)(param.cache_size*(1<<20))); - QD = new double[prob.l]; + QD = new Qfloat[prob.l]; for(int i=0;i*kernel_function)(i,i); + QD[i]= (Qfloat)(this->*kernel_function)(i,i); } Qfloat *get_Q(int i, int len) const @@ -1337,7 +1322,7 @@ return data; } - double *get_QD() const + Qfloat *get_QD() const { return QD; } @@ -1356,7 +1341,7 @@ } private: Cache *cache; - double *QD; + Qfloat *QD; }; class SVR_Q: public Kernel @@ -1367,7 +1352,7 @@ { l = prob.l; cache = new Cache(l,(long int)(param.cache_size*(1<<20))); - QD = new double[2*l]; + QD = new Qfloat[2*l]; sign = new schar[2*l]; index = new int[2*l]; for(int k=0;k*kernel_function)(k,k); - QD[k+l] = QD[k]; + QD[k]= (Qfloat)(this->*kernel_function)(k,k); + QD[k+l]=QD[k]; } buffer[0] = new Qfloat[2*l]; buffer[1] = new Qfloat[2*l]; @@ -1410,7 +1395,7 @@ return buf; } - double *get_QD() const + Qfloat *get_QD() const { return QD; } @@ -1431,7 +1416,7 @@ int *index; mutable int next_buffer; Qfloat *buffer[2]; - double *QD; + Qfloat *QD; }; // @@ -1451,7 +1436,7 @@ { alpha[i] = 0; minus_ones[i] = -1; - if(prob->y[i] > 0) y[i] = +1; else y[i] = -1; + if(prob->y[i] > 0) y[i] = +1; else y[i]=-1; } Solver s; @@ -1641,7 +1626,7 @@ struct decision_function { double *alpha; - double rho; + double rho; }; static decision_function svm_train_one( @@ -1701,6 +1686,30 @@ return f; } +// +// svm_model +// +struct svm_model +{ + struct svm_parameter param; /* parameter */ + int nr_class; /* number of classes, = 2 in regression/one class svm */ + int l; /* total #SV */ + struct svm_node **SV; /* SVs (SV[l]) */ + double **sv_coef; /* coefficients for SVs in decision functions (sv_coef[k-1][l]) */ + double *rho; /* constants in decision functions (rho[k*(k-1)/2]) */ + double *probA; /* pariwise probability information */ + double *probB; + + /* for classification only */ + + int *label; /* label of each class (label[k]) */ + int *nSV; /* number of SVs for each class (nSV[k]) */ + /* nSV[0] + nSV[1] + ... + nSV[k-1] = l */ + /* XXX */ + int free_sv; /* 1 if svm_model is created by svm_load_model*/ + /* 0 if svm_model is created by svm_train */ +}; + // Platt's binary SVM Probablistic Output: an improvement from Lin et al. static void sigmoid_train( int l, const double *dec_values, const double *labels, @@ -1722,7 +1731,7 @@ double *t=Malloc(double,l); double fApB,p,q,h11,h22,h21,g1,g2,det,dA,dB,gd,stepsize; double newA,newB,newf,d1,d2; - int iter; + int iter; // Initial Point and Initial Fun Value A=0.0; B=log((prior0+1.0)/(prior1+1.0)); @@ -1818,7 +1827,6 @@ static double sigmoid_predict(double decision_value, double A, double B) { double fApB = decision_value*A+B; - // 1-p used later; avoid catastrophic cancellation if (fApB >= 0) return exp(-fApB)/(1.0+exp(-fApB)); else @@ -1961,11 +1969,11 @@ struct svm_model *submodel = svm_train(&subprob,&subparam); for(j=begin;jx[perm[j]],&(dec_values[perm[j]])); + svm_predict_values(submodel,prob->x[perm[j]],&(dec_values[perm[j]])); // ensure +1 -1 order; reason not using CV subroutine dec_values[perm[j]] *= submodel->label[0]; } - svm_free_and_destroy_model(&submodel); + svm_destroy_model(submodel); svm_destroy_param(&subparam); } free(subprob.x); @@ -2018,7 +2026,7 @@ int nr_class = 0; int *label = Malloc(int,max_nr_class); int *count = Malloc(int,max_nr_class); - int *data_label = Malloc(int,l); + int *data_label = Malloc(int,l); int i; for(i=0;il = nSV; model->SV = Malloc(svm_node *,nSV); model->sv_coef[0] = Malloc(double,nSV); - model->sv_indices = Malloc(int,nSV); int j = 0; for(i=0;il;i++) if(fabs(f.alpha[i]) > 0) { model->SV[j] = prob->x[i]; model->sv_coef[0][j] = f.alpha[i]; - model->sv_indices[j] = i+1; ++j; } @@ -2149,10 +2137,7 @@ int *perm = Malloc(int,l); // group training data of the same class - svm_group_classes(prob,&nr_class,&label,&start,&count,perm); - if(nr_class == 1) - info("WARNING: training data in only one class. See README for details.\n"); - + svm_group_classes(prob,&nr_class,&label,&start,&count,perm); svm_node **x = Malloc(svm_node *,l); int i; for(i=0;iweight_label[i] == label[j]) break; if(j == nr_class) - fprintf(stderr,"WARNING: class label %d specified in weight is not found\n", param->weight_label[i]); + fprintf(stderr,"warning: class label %d specified in weight is not found\n", param->weight_label[i]); else weighted_C[j] *= param->weight[i]; } @@ -2274,14 +2259,9 @@ model->l = total_sv; model->SV = Malloc(svm_node *,total_sv); - model->sv_indices = Malloc(int,total_sv); p = 0; for(i=0;iSV[p] = x[i]; - model->sv_indices[p++] = perm[i] + 1; - } + if(nonzero[i]) model->SV[p++] = x[i]; int *nz_start = Malloc(int,nr_class); nz_start[0] = 0; @@ -2339,16 +2319,11 @@ void svm_cross_validation(const svm_problem *prob, const svm_parameter *param, int nr_fold, double *target) { int i; - int *fold_start; + int *fold_start = Malloc(int,nr_fold+1); int l = prob->l; int *perm = Malloc(int,l); int nr_class; - if (nr_fold > l) - { - nr_fold = l; - fprintf(stderr,"WARNING: # folds > # data. Will use # folds = # data instead (i.e., leave-one-out cross validation)\n"); - } - fold_start = Malloc(int,nr_fold+1); + // stratified cv may not give leave-one-out rate // Each class to l folds -> some folds may have zero elements if((param->svm_type == C_SVC || @@ -2394,9 +2369,9 @@ fold_start[0]=0; for (i=1;i<=nr_fold;i++) fold_start[i] = fold_start[i-1]+fold_count[i-1]; - free(start); + free(start); free(label); - free(count); + free(count); free(index); free(fold_count); } @@ -2443,17 +2418,17 @@ double *prob_estimates=Malloc(double,svm_get_nr_class(submodel)); for(j=begin;jx[perm[j]],prob_estimates); - free(prob_estimates); + free(prob_estimates); } else for(j=begin;jx[perm[j]]); - svm_free_and_destroy_model(&submodel); + svm_destroy_model(submodel); free(subprob.x); free(subprob.y); } free(fold_start); - free(perm); + free(perm); } @@ -2474,18 +2449,6 @@ label[i] = model->label[i]; } -void svm_get_sv_indices(const svm_model *model, int* indices) -{ - if (model->sv_indices != NULL) - for(int i=0;il;i++) - indices[i] = model->sv_indices[i]; -} - -int svm_get_nr_sv(const svm_model *model) -{ - return model->l; -} - double svm_get_svr_probability(const svm_model *model) { if ((model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) && @@ -2500,14 +2463,13 @@ double svm_predict_values(const svm_model *model, const svm_node *x, double* dec_values) { - int i; if(model->param.svm_type == ONE_CLASS || model->param.svm_type == EPSILON_SVR || model->param.svm_type == NU_SVR) { double *sv_coef = model->sv_coef[0]; double sum = 0; - for(i=0;il;i++) + for(int i=0;il;i++) sum += sv_coef[i] * Kernel::k_function(x,model->SV[i],model->param); sum -= model->rho[0]; *dec_values = sum; @@ -2519,6 +2481,7 @@ } else { + int i; int nr_class = model->nr_class; int l = model->l; @@ -2621,7 +2584,7 @@ for(i=0;ilabel[prob_max_idx]; } else @@ -2643,9 +2606,6 @@ FILE *fp = fopen(model_file_name,"w"); if(fp==NULL) return -1; - char *old_locale = strdup(setlocale(LC_ALL, NULL)); - setlocale(LC_ALL, "C"); - const svm_parameter& param = model->param; fprintf(fp,"svm_type %s\n", svm_type_table[param.svm_type]); @@ -2724,10 +2684,6 @@ } fprintf(fp, "\n"); } - - setlocale(LC_ALL, old_locale); - free(old_locale); - if (ferror(fp) != 0 || fclose(fp) != 0) return -1; else return 0; } @@ -2753,25 +2709,29 @@ return line; } -// -// FSCANF helps to handle fscanf failures. -// Its do-while block avoids the ambiguity when -// if (...) -// FSCANF(); -// is used -// -#define FSCANF(_stream, _format, _var) do{ if (fscanf(_stream, _format, _var) != 1) return false; }while(0) -bool read_model_header(FILE *fp, svm_model* model) +svm_model *svm_load_model(const char *model_file_name) { + FILE *fp = fopen(model_file_name,"rb"); + if(fp==NULL) return NULL; + + // read parameters + + svm_model *model = Malloc(svm_model,1); svm_parameter& param = model->param; + model->rho = NULL; + model->probA = NULL; + model->probB = NULL; + model->label = NULL; + model->nSV = NULL; + char cmd[81]; while(1) { - FSCANF(fp,"%80s",cmd); + fscanf(fp,"%80s",cmd); if(strcmp(cmd,"svm_type")==0) { - FSCANF(fp,"%80s",cmd); + fscanf(fp,"%80s",cmd); int i; for(i=0;svm_type_table[i];i++) { @@ -2784,12 +2744,16 @@ if(svm_type_table[i] == NULL) { fprintf(stderr,"unknown svm type.\n"); - return false; + free(model->rho); + free(model->label); + free(model->nSV); + free(model); + return NULL; } } else if(strcmp(cmd,"kernel_type")==0) { - FSCANF(fp,"%80s",cmd); + fscanf(fp,"%80s",cmd); int i; for(i=0;kernel_type_table[i];i++) { @@ -2801,106 +2765,79 @@ } if(kernel_type_table[i] == NULL) { - fprintf(stderr,"unknown kernel function.\n"); - return false; + fprintf(stderr,"unknown kernel function.\n"); + free(model->rho); + free(model->label); + free(model->nSV); + free(model); + return NULL; } } else if(strcmp(cmd,"degree")==0) - FSCANF(fp,"%d",¶m.degree); + fscanf(fp,"%d",¶m.degree); else if(strcmp(cmd,"gamma")==0) - FSCANF(fp,"%lf",¶m.gamma); + fscanf(fp,"%lf",¶m.gamma); else if(strcmp(cmd,"coef0")==0) - FSCANF(fp,"%lf",¶m.coef0); + fscanf(fp,"%lf",¶m.coef0); else if(strcmp(cmd,"nr_class")==0) - FSCANF(fp,"%d",&model->nr_class); + fscanf(fp,"%d",&model->nr_class); else if(strcmp(cmd,"total_sv")==0) - FSCANF(fp,"%d",&model->l); + fscanf(fp,"%d",&model->l); else if(strcmp(cmd,"rho")==0) { int n = model->nr_class * (model->nr_class-1)/2; model->rho = Malloc(double,n); for(int i=0;irho[i]); + fscanf(fp,"%lf",&model->rho[i]); } else if(strcmp(cmd,"label")==0) { int n = model->nr_class; model->label = Malloc(int,n); for(int i=0;ilabel[i]); + fscanf(fp,"%d",&model->label[i]); } else if(strcmp(cmd,"probA")==0) { int n = model->nr_class * (model->nr_class-1)/2; model->probA = Malloc(double,n); for(int i=0;iprobA[i]); + fscanf(fp,"%lf",&model->probA[i]); } else if(strcmp(cmd,"probB")==0) { int n = model->nr_class * (model->nr_class-1)/2; model->probB = Malloc(double,n); for(int i=0;iprobB[i]); + fscanf(fp,"%lf",&model->probB[i]); } else if(strcmp(cmd,"nr_sv")==0) { int n = model->nr_class; model->nSV = Malloc(int,n); for(int i=0;inSV[i]); + fscanf(fp,"%d",&model->nSV[i]); } else if(strcmp(cmd,"SV")==0) { while(1) { int c = getc(fp); - if(c==EOF || c=='\n') break; + if(c==EOF || c=='\n') break; } break; } else { fprintf(stderr,"unknown text in model file: [%s]\n",cmd); - return false; + free(model->rho); + free(model->label); + free(model->nSV); + free(model); + return NULL; } } - return true; - -} - -svm_model *svm_load_model(const char *model_file_name) -{ - FILE *fp = fopen(model_file_name,"rb"); - if(fp==NULL) return NULL; - - char *old_locale = strdup(setlocale(LC_ALL, NULL)); - setlocale(LC_ALL, "C"); - - // read parameters - - svm_model *model = Malloc(svm_model,1); - model->rho = NULL; - model->probA = NULL; - model->probB = NULL; - model->sv_indices = NULL; - model->label = NULL; - model->nSV = NULL; - - // read header - if (!read_model_header(fp, model)) - { - fprintf(stderr, "ERROR: fscanf failed to read model\n"); - setlocale(LC_ALL, old_locale); - free(old_locale); - free(model->rho); - free(model->label); - free(model->nSV); - free(model); - return NULL; - } - // read sv_coef and SV int elements = 0; @@ -2965,9 +2902,6 @@ } free(line); - setlocale(LC_ALL, old_locale); - free(old_locale); - if (ferror(fp) != 0 || fclose(fp) != 0) return NULL; @@ -2975,49 +2909,20 @@ return model; } -void svm_free_model_content(svm_model* model_ptr) -{ - if(model_ptr->free_sv && model_ptr->l > 0 && model_ptr->SV != NULL) - free((void *)(model_ptr->SV[0])); - if(model_ptr->sv_coef) - { - for(int i=0;inr_class-1;i++) - free(model_ptr->sv_coef[i]); - } - - free(model_ptr->SV); - model_ptr->SV = NULL; - - free(model_ptr->sv_coef); - model_ptr->sv_coef = NULL; - - free(model_ptr->rho); - model_ptr->rho = NULL; - - free(model_ptr->label); - model_ptr->label= NULL; - - free(model_ptr->probA); - model_ptr->probA = NULL; - - free(model_ptr->probB); - model_ptr->probB= NULL; - - free(model_ptr->sv_indices); - model_ptr->sv_indices = NULL; - - free(model_ptr->nSV); - model_ptr->nSV = NULL; -} - -void svm_free_and_destroy_model(svm_model** model_ptr_ptr) +void svm_destroy_model(svm_model* model) { - if(model_ptr_ptr != NULL && *model_ptr_ptr != NULL) - { - svm_free_model_content(*model_ptr_ptr); - free(*model_ptr_ptr); - *model_ptr_ptr = NULL; - } + if(model->free_sv && model->l > 0) + free((void *)(model->SV[0])); + for(int i=0;inr_class-1;i++) + free(model->sv_coef[i]); + free(model->SV); + free(model->sv_coef); + free(model->rho); + free(model->label); + free(model->probA); + free(model->probB); + free(model->nSV); + free(model); } void svm_destroy_param(svm_parameter* param) diff -ruN RNAmicro1.1.4/svm.h RNAmicro1.1.4a/svm.h --- RNAmicro1.1.4/svm.h 2014-08-07 09:00:31.000000000 +0200 +++ RNAmicro1.1.4a/svm.h 2015-09-19 13:26:06.338558698 +0200 @@ -1,10 +1,14 @@ #ifndef _LIBSVM_H #define _LIBSVM_H +#define LIBSVM_VERSION 291 + #ifdef __cplusplus extern "C" { #endif +extern int libsvm_version; + struct svm_node { int index; @@ -53,7 +57,7 @@ void svm_get_labels(const struct svm_model *model, int *label); double svm_get_svr_probability(const struct svm_model *model); -void svm_predict_values(const struct svm_model *model, const struct svm_node *x, double* dec_values); +double svm_predict_values(const struct svm_model *model, const struct svm_node *x, double* dec_values); double svm_predict(const struct svm_model *model, const struct svm_node *x); double svm_predict_probability(const struct svm_model *model, const struct svm_node *x, double* prob_estimates); @@ -63,6 +67,8 @@ const char *svm_check_parameter(const struct svm_problem *prob, const struct svm_parameter *param); int svm_check_probability_model(const struct svm_model *model); +void svm_set_print_string_function(void (*print_func)(const char *)); + #ifdef __cplusplus } #endif