Browse Source

Merge pull request #4 from jajupmochi/v0.2

V0.2
tags/v0.2.0
linlin GitHub 5 years ago
parent
commit
0024d6a568
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
100 changed files with 48901 additions and 18 deletions
  1. +16
    -0
      .gitignore
  2. +1
    -0
      gklearn/ged/env/__init__.py
  3. +0
    -0
      gklearn/ged/env/common_types.py
  4. +2
    -0
      gklearn/ged/median/__init__.py
  5. +4
    -6
      gklearn/ged/median/median_graph_estimator.py
  6. +2
    -12
      gklearn/ged/median/test_median_graph_estimator.py
  7. +53
    -0
      gklearn/ged/median/utils.py
  8. +1
    -0
      gklearn/ged/util/__init__.py
  9. +0
    -0
      gklearn/ged/util/cpp2python.py
  10. +122
    -0
      gklearn/ged/util/cpp_code.cpp
  11. +0
    -0
      gklearn/ged/util/misc.py
  12. +344
    -0
      gklearn/ged/util/util.py
  13. +97
    -0
      gklearn/gedlib/README.rst
  14. +10
    -0
      gklearn/gedlib/__init__.py
  15. +20
    -0
      gklearn/gedlib/documentation/Makefile
  16. BIN
      gklearn/gedlib/documentation/gedlibpy.pdf
  17. +36
    -0
      gklearn/gedlib/documentation/make.bat
  18. +199
    -0
      gklearn/gedlib/documentation/source/conf.py
  19. +2
    -0
      gklearn/gedlib/documentation/source/doc.rst
  20. +42
    -0
      gklearn/gedlib/documentation/source/editcost.rst
  21. +165
    -0
      gklearn/gedlib/documentation/source/examples.rst
  22. +36
    -0
      gklearn/gedlib/documentation/source/index.rst
  23. +97
    -0
      gklearn/gedlib/documentation/source/readme.rst
  24. +25510
    -0
      gklearn/gedlib/gedlibpy.cpp
  25. BIN
      gklearn/gedlib/gedlibpy.cpython-36m-x86_64-linux-gnu.so
  26. +1548
    -0
      gklearn/gedlib/gedlibpy.pyx
  27. +1
    -0
      gklearn/gedlib/lib/fann/libdoublefann.so
  28. +1
    -0
      gklearn/gedlib/lib/fann/libdoublefann.so.2
  29. BIN
      gklearn/gedlib/lib/fann/libdoublefann.so.2.2.0
  30. +1
    -0
      gklearn/gedlib/lib/fann/libfann.so
  31. +1
    -0
      gklearn/gedlib/lib/fann/libfann.so.2
  32. BIN
      gklearn/gedlib/lib/fann/libfann.so.2.2.0
  33. +1
    -0
      gklearn/gedlib/lib/fann/libfixedfann.so
  34. +1
    -0
      gklearn/gedlib/lib/fann/libfixedfann.so.2
  35. BIN
      gklearn/gedlib/lib/fann/libfixedfann.so.2.2.0
  36. +1
    -0
      gklearn/gedlib/lib/fann/libfloatfann.so
  37. +1
    -0
      gklearn/gedlib/lib/fann/libfloatfann.so.2
  38. BIN
      gklearn/gedlib/lib/fann/libfloatfann.so.2.2.0
  39. +10
    -0
      gklearn/gedlib/lib/fann/pkgconfig/fann.pc
  40. +31
    -0
      gklearn/gedlib/lib/libsvm.3.22/COPYRIGHT
  41. +2166
    -0
      gklearn/gedlib/lib/libsvm.3.22/FAQ.html
  42. +24
    -0
      gklearn/gedlib/lib/libsvm.3.22/Makefile
  43. +33
    -0
      gklearn/gedlib/lib/libsvm.3.22/Makefile.win
  44. +769
    -0
      gklearn/gedlib/lib/libsvm.3.22/README
  45. +270
    -0
      gklearn/gedlib/lib/libsvm.3.22/heart_scale
  46. +26
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/Makefile
  47. BIN
      gklearn/gedlib/lib/libsvm.3.22/java/libsvm.jar
  48. +2860
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm.java
  49. +2860
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm.m4
  50. +22
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_model.java
  51. +6
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_node.java
  52. +47
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_parameter.java
  53. +5
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_print_interface.java
  54. +7
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_problem.java
  55. +194
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/svm_predict.java
  56. +350
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/svm_scale.java
  57. +502
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/svm_toy.java
  58. +318
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/svm_train.java
  59. +1
    -0
      gklearn/gedlib/lib/libsvm.3.22/java/test_applet.html
  60. BIN
      gklearn/gedlib/lib/libsvm.3.22/libsvm.so
  61. +45
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/Makefile
  62. +245
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/README
  63. +212
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/libsvmread.c
  64. +119
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/libsvmwrite.c
  65. +22
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/make.m
  66. +374
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/svm_model_matlab.c
  67. +2
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/svm_model_matlab.h
  68. +370
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/svmpredict.c
  69. +495
    -0
      gklearn/gedlib/lib/libsvm.3.22/matlab/svmtrain.c
  70. +4
    -0
      gklearn/gedlib/lib/libsvm.3.22/python/Makefile
  71. +367
    -0
      gklearn/gedlib/lib/libsvm.3.22/python/README
  72. +330
    -0
      gklearn/gedlib/lib/libsvm.3.22/python/svm.py
  73. +262
    -0
      gklearn/gedlib/lib/libsvm.3.22/python/svmutil.py
  74. +239
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-predict.c
  75. +397
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-scale.c
  76. +22
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/Makefile
  77. +447
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/callbacks.cpp
  78. +54
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/callbacks.h
  79. +164
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/interface.c
  80. +14
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/interface.h
  81. +23
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/main.c
  82. +238
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/svm-toy.glade
  83. +18
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/qt/Makefile
  84. +437
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/qt/svm-toy.cpp
  85. +482
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-toy/windows/svm-toy.cpp
  86. +380
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm-train.c
  87. +3181
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm.cpp
  88. +21
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm.def
  89. +104
    -0
      gklearn/gedlib/lib/libsvm.3.22/svm.h
  90. +210
    -0
      gklearn/gedlib/lib/libsvm.3.22/tools/README
  91. +108
    -0
      gklearn/gedlib/lib/libsvm.3.22/tools/checkdata.py
  92. +79
    -0
      gklearn/gedlib/lib/libsvm.3.22/tools/easy.py
  93. +500
    -0
      gklearn/gedlib/lib/libsvm.3.22/tools/grid.py
  94. +120
    -0
      gklearn/gedlib/lib/libsvm.3.22/tools/subset.py
  95. BIN
      gklearn/gedlib/lib/libsvm.3.22/windows/libsvmread.mexw64
  96. BIN
      gklearn/gedlib/lib/libsvm.3.22/windows/libsvmwrite.mexw64
  97. BIN
      gklearn/gedlib/lib/libsvm.3.22/windows/svmpredict.mexw64
  98. BIN
      gklearn/gedlib/lib/libsvm.3.22/windows/svmtrain.mexw64
  99. BIN
      gklearn/gedlib/lib/nomad/libnomad.so
  100. BIN
      gklearn/gedlib/lib/nomad/libsgtelib.so

+ 16
- 0
.gitignore View File

@@ -47,3 +47,19 @@ htmlcov
virtualenv

.vscode/

# gedlibpy
gklearn/gedlib/build/
gklearn/gedlib/build/__pycache__/
gklearn/gedlib/collections/
gklearn/gedlib/Median_Example/
gklearn/gedlib/build/include/gedlib-master/median/collections/
gklearn/gedlib/include/
gklearn/gedlib/libgxlgedlib.so

# misc
notebooks/preimage/
notebooks/unfinished
gklearn/kernels/else/
gklearn/kernels/unfinished/
gklearn/kernels/.tags

+ 1
- 0
gklearn/ged/env/__init__.py View File

@@ -0,0 +1 @@
from gklearn.ged.env.common_types import AlgorithmState

gklearn/preimage/common_types.py → gklearn/ged/env/common_types.py View File


+ 2
- 0
gklearn/ged/median/__init__.py View File

@@ -0,0 +1,2 @@
from gklearn.ged.median.median_graph_estimator import MedianGraphEstimator
from gklearn.ged.median.utils import constant_node_costs, mge_options_to_string

gklearn/preimage/median_graph_estimator.py → gklearn/ged/median/median_graph_estimator.py View File

@@ -6,10 +6,9 @@ Created on Mon Mar 16 18:04:55 2020
@author: ljia
"""
import numpy as np
from gklearn.preimage.common_types import AlgorithmState
from gklearn.preimage import misc
from gklearn.preimage.timer import Timer
from gklearn.utils.utils import graph_isIdentical
from gklearn.ged.env import AlgorithmState
from gklearn.ged.util import misc
from gklearn.utils import Timer
import time
from tqdm import tqdm
import sys
@@ -822,5 +821,4 @@ class MedianGraphEstimator(object):
def compute_my_cost(g, h, node_map):
cost = 0.0
for node in g.nodes:
cost += 0
cost += 0

gklearn/preimage/test_median_graph_estimator.py → gklearn/ged/median/test_median_graph_estimator.py View File

@@ -8,7 +8,7 @@ Created on Mon Mar 16 17:26:40 2020
def test_median_graph_estimator():
from gklearn.utils.graphfiles import loadDataset
from gklearn.preimage.median_graph_estimator import MedianGraphEstimator
from gklearn.ged.median import MedianGraphEstimator, constant_node_costs
from gklearn.gedlib import librariesImport, gedlibpy
from gklearn.preimage.utils import get_same_item_indices
from gklearn.preimage.ged import convertGraph
@@ -31,7 +31,7 @@ def test_median_graph_estimator():
# Load dataset.
# dataset = '../../datasets/COIL-DEL/COIL-DEL_A.txt'
dataset = '../../datasets/Letter-high/Letter-high_A.txt'
dataset = '../../../datasets/Letter-high/Letter-high_A.txt'
Gn, y_all = loadDataset(dataset)
y_idx = get_same_item_indices(y_all)
for i, (y, values) in enumerate(y_idx.items()):
@@ -75,16 +75,6 @@ def test_median_graph_estimator():
gen_median = ged_env.get_nx_graph(gen_median_id)
return set_median, gen_median


def constant_node_costs(edit_cost_name):
if edit_cost_name == 'NON_SYMBOLIC' or edit_cost_name == 'LETTER2' or edit_cost_name == 'LETTER':
return False
# elif edit_cost_name != '':
# # throw ged::Error("Invalid dataset " + dataset + ". Usage: ./median_tests <AIDS|Mutagenicity|Letter-high|Letter-med|Letter-low|monoterpenoides|SYNTHETICnew|Fingerprint|COIL-DEL>");
# return False
# return True


if __name__ == '__main__':

+ 53
- 0
gklearn/ged/median/utils.py View File

@@ -0,0 +1,53 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 15:12:31 2020

@author: ljia
"""

def constant_node_costs(edit_cost_name):
if edit_cost_name == 'NON_SYMBOLIC' or edit_cost_name == 'LETTER2' or edit_cost_name == 'LETTER':
return False
# elif edit_cost_name != '':
# # throw ged::Error("Invalid dataset " + dataset + ". Usage: ./median_tests <AIDS|Mutagenicity|Letter-high|Letter-med|Letter-low|monoterpenoides|SYNTHETICnew|Fingerprint|COIL-DEL>");
# return False
# return True
def mge_options_to_string(options):
opt_str = ' '
for key, val in options.items():
if key == 'init_type':
opt_str += '--init-type ' + str(val) + ' '
elif key == 'random_inits':
opt_str += '--random-inits ' + str(val) + ' '
elif key == 'randomness':
opt_str += '--randomness ' + str(val) + ' '
elif key == 'verbose':
opt_str += '--stdout ' + str(val) + ' '
elif key == 'refine':
opt_str += '--refine ' + ('TRUE' if val else 'FALSE') + ' '
elif key == 'time_limit':
opt_str += '--time-limit ' + str(val) + ' '
elif key == 'max_itrs':
opt_str += '--max-itrs ' + str(val) + ' '
elif key == 'max_itrs_without_update':
opt_str += '--max-itrs-without-update ' + str(val) + ' '
elif key == 'seed':
opt_str += '--seed ' + str(val) + ' '
elif key == 'epsilon':
opt_str += '--epsilon ' + str(val) + ' '
elif key == 'inits_increase_order':
opt_str += '--inits-increase-order ' + str(val) + ' '
elif key == 'init_type_increase_order':
opt_str += '--init-type-increase-order ' + str(val) + ' '
elif key == 'max_itrs_increase_order':
opt_str += '--max-itrs-increase-order ' + str(val) + ' '
# else:
# valid_options = '[--init-type <arg>] [--random_inits <arg>] [--randomness <arg>] [--seed <arg>] [--verbose <arg>] '
# valid_options += '[--time_limit <arg>] [--max_itrs <arg>] [--epsilon <arg>] '
# valid_options += '[--inits_increase_order <arg>] [--init_type_increase_order <arg>] [--max_itrs_increase_order <arg>]'
# raise Exception('Invalid option "' + key + '". Options available = "' + valid_options + '"')

return opt_str

+ 1
- 0
gklearn/ged/util/__init__.py View File

@@ -0,0 +1 @@
from gklearn.ged.util.util import compute_geds, ged_options_to_string

gklearn/preimage/cpp2python.py → gklearn/ged/util/cpp2python.py View File


+ 122
- 0
gklearn/ged/util/cpp_code.cpp View File

@@ -0,0 +1,122 @@
else if (option.first == "random-inits") {
try {
num_random_inits_ = std::stoul(option.second);
desired_num_random_inits_ = num_random_inits_;
}
catch (...) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option random-inits. Usage: options = \"[--random-inits <convertible to int greater 0>]\"");
}
if (num_random_inits_ <= 0) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option random-inits. Usage: options = \"[--random-inits <convertible to int greater 0>]\"");
}
}
else if (option.first == "randomness") {
if (option.second == "PSEUDO") {
use_real_randomness_ = false;
}
else if (option.second == "REAL") {
use_real_randomness_ = true;
}
else {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option randomness. Usage: options = \"[--randomness REAL|PSEUDO] [...]\"");
}
}
else if (option.first == "stdout") {
if (option.second == "0") {
print_to_stdout_ = 0;
}
else if (option.second == "1") {
print_to_stdout_ = 1;
}
else if (option.second == "2") {
print_to_stdout_ = 2;
}
else {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option stdout. Usage: options = \"[--stdout 0|1|2] [...]\"");
}
}
else if (option.first == "refine") {
if (option.second == "TRUE") {
refine_ = true;
}
else if (option.second == "FALSE") {
refine_ = false;
}
else {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option refine. Usage: options = \"[--refine TRUE|FALSE] [...]\"");
}
}
else if (option.first == "time-limit") {
try {
time_limit_in_sec_ = std::stod(option.second);
}
catch (...) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option time-limit. Usage: options = \"[--time-limit <convertible to double>] [...]");
}
}
else if (option.first == "max-itrs") {
try {
max_itrs_ = std::stoi(option.second);
}
catch (...) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option max-itrs. Usage: options = \"[--max-itrs <convertible to int>] [...]");
}
}
else if (option.first == "max-itrs-without-update") {
try {
max_itrs_without_update_ = std::stoi(option.second);
}
catch (...) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option max-itrs-without-update. Usage: options = \"[--max-itrs-without-update <convertible to int>] [...]");
}
}
else if (option.first == "seed") {
try {
seed_ = std::stoul(option.second);
}
catch (...) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option seed. Usage: options = \"[--seed <convertible to int greater equal 0>] [...]");
}
}
else if (option.first == "epsilon") {
try {
epsilon_ = std::stod(option.second);
}
catch (...) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option epsilon. Usage: options = \"[--epsilon <convertible to double greater 0>] [...]");
}
if (epsilon_ <= 0) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option epsilon. Usage: options = \"[--epsilon <convertible to double greater 0>] [...]");
}
}
else if (option.first == "inits-increase-order") {
try {
num_inits_increase_order_ = std::stoul(option.second);
}
catch (...) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option inits-increase-order. Usage: options = \"[--inits-increase-order <convertible to int greater 0>]\"");
}
if (num_inits_increase_order_ <= 0) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option inits-increase-order. Usage: options = \"[--inits-increase-order <convertible to int greater 0>]\"");
}
}
else if (option.first == "init-type-increase-order") {
init_type_increase_order_ = option.second;
if (option.second != "CLUSTERS" and option.second != "K-MEANS++") {
throw ged::Error(std::string("Invalid argument ") + option.second + " for option init-type-increase-order. Usage: options = \"[--init-type-increase-order CLUSTERS|K-MEANS++] [...]\"");
}
}
else if (option.first == "max-itrs-increase-order") {
try {
max_itrs_increase_order_ = std::stoi(option.second);
}
catch (...) {
throw Error(std::string("Invalid argument \"") + option.second + "\" for option max-itrs-increase-order. Usage: options = \"[--max-itrs-increase-order <convertible to int>] [...]");
}
}
else {
std::string valid_options("[--init-type <arg>] [--random-inits <arg>] [--randomness <arg>] [--seed <arg>] [--stdout <arg>] ");
valid_options += "[--time-limit <arg>] [--max-itrs <arg>] [--epsilon <arg>] ";
valid_options += "[--inits-increase-order <arg>] [--init-type-increase-order <arg>] [--max-itrs-increase-order <arg>]";
throw Error(std::string("Invalid option \"") + option.first + "\". Usage: options = \"" + valid_options + "\"");
}

gklearn/preimage/misc.py → gklearn/ged/util/misc.py View File


+ 344
- 0
gklearn/ged/util/util.py View File

@@ -0,0 +1,344 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:06:22 2020

@author: ljia
"""
import numpy as np
from itertools import combinations
import multiprocessing
from multiprocessing import Pool
from functools import partial
import sys
from tqdm import tqdm
import networkx as nx
from gklearn.gedlib import librariesImport, gedlibpy


def compute_ged(g1, g2, options):
ged_env = gedlibpy.GEDEnv()
ged_env.set_edit_cost(options['edit_cost'], edit_cost_constant=options['edit_cost_constants'])
ged_env.add_nx_graph(g1, '')
ged_env.add_nx_graph(g2, '')
listID = ged_env.get_all_graph_ids()
ged_env.init()
ged_env.set_method(options['method'], ged_options_to_string(options))
ged_env.init_method()

g = listID[0]
h = listID[1]
ged_env.run_method(g, h)
pi_forward = ged_env.get_forward_map(g, h)
pi_backward = ged_env.get_backward_map(g, h)
upper = ged_env.get_upper_bound(g, h)
dis = upper
# make the map label correct (label remove map as np.inf)
nodes1 = [n for n in g1.nodes()]
nodes2 = [n for n in g2.nodes()]
nb1 = nx.number_of_nodes(g1)
nb2 = nx.number_of_nodes(g2)
pi_forward = [nodes2[pi] if pi < nb2 else np.inf for pi in pi_forward]
pi_backward = [nodes1[pi] if pi < nb1 else np.inf for pi in pi_backward]
# print(pi_forward)

return dis, pi_forward, pi_backward


def compute_geds(graphs, options={}, parallel=False):
# initialize ged env.
ged_env = gedlibpy.GEDEnv()
ged_env.set_edit_cost(options['edit_cost'], edit_cost_constant=options['edit_cost_constants'])
for g in graphs:
ged_env.add_nx_graph(g, '')
listID = ged_env.get_all_graph_ids()
ged_env.init()
ged_env.set_method(options['method'], ged_options_to_string(options))
ged_env.init_method()

# compute ged.
ged_mat = np.zeros((len(graphs), len(graphs)))
if parallel:
len_itr = int(len(graphs) * (len(graphs) - 1) / 2)
ged_vec = [0 for i in range(len_itr)]
n_edit_operations = [0 for i in range(len_itr)]
itr = combinations(range(0, len(graphs)), 2)
n_jobs = multiprocessing.cpu_count()
if len_itr < 100 * n_jobs:
chunksize = int(len_itr / n_jobs) + 1
else:
chunksize = 100
def init_worker(graphs_toshare, ged_env_toshare, listID_toshare):
global G_graphs, G_ged_env, G_listID
G_graphs = graphs_toshare
G_ged_env = ged_env_toshare
G_listID = listID_toshare
do_partial = partial(_wrapper_compute_ged_parallel, options)
pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(graphs, ged_env, listID))
iterator = tqdm(pool.imap_unordered(do_partial, itr, chunksize),
desc='computing GEDs', file=sys.stdout)
# iterator = pool.imap_unordered(do_partial, itr, chunksize)
for i, j, dis, n_eo_tmp in iterator:
idx_itr = int(len(graphs) * i + j - (i + 1) * (i + 2) / 2)
ged_vec[idx_itr] = dis
ged_mat[i][j] = dis
ged_mat[j][i] = dis
n_edit_operations[idx_itr] = n_eo_tmp
# print('\n-------------------------------------------')
# print(i, j, idx_itr, dis)
pool.close()
pool.join()
else:
ged_vec = []
n_edit_operations = []
for i in tqdm(range(len(graphs)), desc='computing GEDs', file=sys.stdout):
# for i in range(len(graphs)):
for j in range(i + 1, len(graphs)):
dis, pi_forward, pi_backward = _compute_ged(ged_env, listID[i], listID[j], graphs[i], graphs[j])
ged_vec.append(dis)
ged_mat[i][j] = dis
ged_mat[j][i] = dis
n_eo_tmp = get_nb_edit_operations(graphs[i], graphs[j], pi_forward, pi_backward, edit_cost=options['edit_cost'])
n_edit_operations.append(n_eo_tmp)
return ged_vec, ged_mat, n_edit_operations


def _wrapper_compute_ged_parallel(options, itr):
i = itr[0]
j = itr[1]
dis, n_eo_tmp = _compute_ged_parallel(G_ged_env, G_listID[i], G_listID[j], G_graphs[i], G_graphs[j], options)
return i, j, dis, n_eo_tmp


def _compute_ged_parallel(env, gid1, gid2, g1, g2, options):
dis, pi_forward, pi_backward = _compute_ged(env, gid1, gid2, g1, g2)
n_eo_tmp = get_nb_edit_operations(g1, g2, pi_forward, pi_backward, edit_cost=options['edit_cost']) # [0,0,0,0,0,0]
return dis, n_eo_tmp


def _compute_ged(env, gid1, gid2, g1, g2):
env.run_method(gid1, gid2)
pi_forward = env.get_forward_map(gid1, gid2)
pi_backward = env.get_backward_map(gid1, gid2)
upper = env.get_upper_bound(gid1, gid2)
dis = upper
# make the map label correct (label remove map as np.inf)
nodes1 = [n for n in g1.nodes()]
nodes2 = [n for n in g2.nodes()]
nb1 = nx.number_of_nodes(g1)
nb2 = nx.number_of_nodes(g2)
pi_forward = [nodes2[pi] if pi < nb2 else np.inf for pi in pi_forward]
pi_backward = [nodes1[pi] if pi < nb1 else np.inf for pi in pi_backward]

return dis, pi_forward, pi_backward


def get_nb_edit_operations(g1, g2, forward_map, backward_map, edit_cost=None):
if edit_cost == 'LETTER' or edit_cost == 'LETTER2':
return get_nb_edit_operations_letter(g1, g2, forward_map, backward_map)
elif edit_cost == 'NON_SYMBOLIC':
return get_nb_edit_operations_nonsymbolic(g1, g2, forward_map, backward_map)
else:
return get_nb_edit_operations_symbolic(g1, g2, forward_map, backward_map)

def get_nb_edit_operations_symbolic(g1, g2, forward_map, backward_map):
"""Compute the number of each edit operations.
"""
n_vi = 0
n_vr = 0
n_vs = 0
n_ei = 0
n_er = 0
n_es = 0
nodes1 = [n for n in g1.nodes()]
for i, map_i in enumerate(forward_map):
if map_i == np.inf:
n_vr += 1
elif g1.node[nodes1[i]]['atom'] != g2.node[map_i]['atom']:
n_vs += 1
for map_i in backward_map:
if map_i == np.inf:
n_vi += 1
# idx_nodes1 = range(0, len(node1))
edges1 = [e for e in g1.edges()]
nb_edges2_cnted = 0
for n1, n2 in edges1:
idx1 = nodes1.index(n1)
idx2 = nodes1.index(n2)
# one of the nodes is removed, thus the edge is removed.
if forward_map[idx1] == np.inf or forward_map[idx2] == np.inf:
n_er += 1
# corresponding edge is in g2.
elif (forward_map[idx1], forward_map[idx2]) in g2.edges():
nb_edges2_cnted += 1
# edge labels are different.
if g2.edges[((forward_map[idx1], forward_map[idx2]))]['bond_type'] \
!= g1.edges[(n1, n2)]['bond_type']:
n_es += 1
elif (forward_map[idx2], forward_map[idx1]) in g2.edges():
nb_edges2_cnted += 1
# edge labels are different.
if g2.edges[((forward_map[idx2], forward_map[idx1]))]['bond_type'] \
!= g1.edges[(n1, n2)]['bond_type']:
n_es += 1
# corresponding nodes are in g2, however the edge is removed.
else:
n_er += 1
n_ei = nx.number_of_edges(g2) - nb_edges2_cnted
return n_vi, n_vr, n_vs, n_ei, n_er, n_es


def get_nb_edit_operations_letter(g1, g2, forward_map, backward_map):
"""Compute the number of each edit operations.
"""
n_vi = 0
n_vr = 0
n_vs = 0
sod_vs = 0
n_ei = 0
n_er = 0
nodes1 = [n for n in g1.nodes()]
for i, map_i in enumerate(forward_map):
if map_i == np.inf:
n_vr += 1
else:
n_vs += 1
diff_x = float(g1.nodes[nodes1[i]]['x']) - float(g2.nodes[map_i]['x'])
diff_y = float(g1.nodes[nodes1[i]]['y']) - float(g2.nodes[map_i]['y'])
sod_vs += np.sqrt(np.square(diff_x) + np.square(diff_y))
for map_i in backward_map:
if map_i == np.inf:
n_vi += 1
# idx_nodes1 = range(0, len(node1))
edges1 = [e for e in g1.edges()]
nb_edges2_cnted = 0
for n1, n2 in edges1:
idx1 = nodes1.index(n1)
idx2 = nodes1.index(n2)
# one of the nodes is removed, thus the edge is removed.
if forward_map[idx1] == np.inf or forward_map[idx2] == np.inf:
n_er += 1
# corresponding edge is in g2. Edge label is not considered.
elif (forward_map[idx1], forward_map[idx2]) in g2.edges() or \
(forward_map[idx2], forward_map[idx1]) in g2.edges():
nb_edges2_cnted += 1
# corresponding nodes are in g2, however the edge is removed.
else:
n_er += 1
n_ei = nx.number_of_edges(g2) - nb_edges2_cnted
return n_vi, n_vr, n_vs, sod_vs, n_ei, n_er


def get_nb_edit_operations_nonsymbolic(g1, g2, forward_map, backward_map):
"""Compute the number of each edit operations.
"""
n_vi = 0
n_vr = 0
n_vs = 0
sod_vs = 0
n_ei = 0
n_er = 0
n_es = 0
sod_es = 0
nodes1 = [n for n in g1.nodes()]
for i, map_i in enumerate(forward_map):
if map_i == np.inf:
n_vr += 1
else:
n_vs += 1
sum_squares = 0
for a_name in g1.graph['node_attrs']:
diff = float(g1.nodes[nodes1[i]][a_name]) - float(g2.nodes[map_i][a_name])
sum_squares += np.square(diff)
sod_vs += np.sqrt(sum_squares)
for map_i in backward_map:
if map_i == np.inf:
n_vi += 1
# idx_nodes1 = range(0, len(node1))
edges1 = [e for e in g1.edges()]
for n1, n2 in edges1:
idx1 = nodes1.index(n1)
idx2 = nodes1.index(n2)
n1_g2 = forward_map[idx1]
n2_g2 = forward_map[idx2]
# one of the nodes is removed, thus the edge is removed.
if n1_g2 == np.inf or n2_g2 == np.inf:
n_er += 1
# corresponding edge is in g2.
elif (n1_g2, n2_g2) in g2.edges():
n_es += 1
sum_squares = 0
for a_name in g1.graph['edge_attrs']:
diff = float(g1.edges[n1, n2][a_name]) - float(g2.nodes[n1_g2, n2_g2][a_name])
sum_squares += np.square(diff)
sod_es += np.sqrt(sum_squares)
elif (n2_g2, n1_g2) in g2.edges():
n_es += 1
sum_squares = 0
for a_name in g1.graph['edge_attrs']:
diff = float(g1.edges[n2, n1][a_name]) - float(g2.nodes[n2_g2, n1_g2][a_name])
sum_squares += np.square(diff)
sod_es += np.sqrt(sum_squares)
# corresponding nodes are in g2, however the edge is removed.
else:
n_er += 1
n_ei = nx.number_of_edges(g2) - n_es
return n_vi, n_vr, sod_vs, n_ei, n_er, sod_es


def ged_options_to_string(options):
opt_str = ' '
for key, val in options.items():
if key == 'initialization_method':
opt_str += '--initial_solutions ' + str(val) + ' '
elif key == 'initialization_options':
opt_str += '--initialization-options ' + str(val) + ' '
elif key == 'lower_bound_method':
opt_str += '--lower-bound-method ' + str(val) + ' '
elif key == 'random_substitution_ratio':
opt_str += '--random-substitution-ratio ' + str(val) + ' '
elif key == 'initial_solutions':
opt_str += '--initial-solutions ' + str(val) + ' '
elif key == 'ratio_runs_from_initial_solutions':
opt_str += '--ratio-runs-from-initial-solutions ' + str(val) + ' '
elif key == 'threads':
opt_str += '--threads ' + str(val) + ' '
elif key == 'num_randpost_loops':
opt_str += '--num-randpost-loops ' + str(val) + ' '
elif key == 'max_randpost_retrials':
opt_str += '--maxrandpost-retrials ' + str(val) + ' '
elif key == 'randpost_penalty':
opt_str += '--randpost-penalty ' + str(val) + ' '
elif key == 'randpost_decay':
opt_str += '--randpost-decay ' + str(val) + ' '
elif key == 'log':
opt_str += '--log ' + str(val) + ' '
elif key == 'randomness':
opt_str += '--randomness ' + str(val) + ' '
# if not isinstance(val, list):
# opt_str += '--' + key.replace('_', '-') + ' '
# if val == False:
# val_str = 'FALSE'
# else:
# val_str = str(val)
# opt_str += val_str + ' '

return opt_str

+ 97
- 0
gklearn/gedlib/README.rst View File

@@ -0,0 +1,97 @@
GEDLIBPY
====================================

Please Read https://dbblumenthal.github.io/gedlib/ before using Python code.
You can also find this module documentation in documentation/build/html folder.

Make sure you have numpy installed (and Cython if you have to recompile the library). You can use pip for this.


Running the script
-------------------

After donwloading the entire folder, you can run test.py to ensure the library works.

For your code, you have to make two imports::

import librariesImport
import gedlibpy

You can call each function in the library with this. You can't move any folder or files on the library, please make sure that the architecture remains the same.

This library is compiled for Python3 only. If you want to use it with Python 2, you have to recompile it with setup.py. You have to use this command on your favorite shell::

python setup.py build_ext --inplace

After this step, you can use the same lines as Python3 for import, it will be ok. Check the documentation inside the documentation/build/html folder before using function. You can also copy the tests examples for basic use.


A problem with the library ?
-------------------------------

If the library isn't found, you can recompile the Python library because your Linux is different to mine. Please delete gedlibpy.so, gedlibpy.cpp and build folder. Then use this command on a linux shell ::

python3 setup.py build_ext --inplace

You can make it with Python 2 but make sure you use the same version with your code and the compilation.

If it's doesn't work, maybe the version of GedLib or another library can be a problem. If it is, you can re-install GedLib for your computer. You can download it on this git : https://dbblumenthal.github.io/gedlib/

You have to install Gedlib with the Python installer after that.
Just call::

python3 install.py

Make the links like indicate on the documentation. Use the same architecture like this library, but just change the .so and folders with your installation. You can recompile the Python library with setup command, after that.

If you use Mac OS, you have to follow all this part, and install the external libraries with this command::

install_name_tool -change <mylib> <path>/<to>/<mylib> <myexec>

For an example, you have to write these lines::

install_name_tool -change libdoublefann.2.dylib lib/fann/libdoublefann.2.dylib gedlibpy.so
install_name_tool -change libsvm.so lib/libsvm.3.22/libsvm.so gedlibpy.so
install_name_tool -change libnomad.so lib/nomad/libnomad.so gedlibpy.so
install_name_tool -change libsgtelib.so lib/nomad/libsgtelib.so gedlibpy.so

The name of the library gedlibpy can be different if you use Python 3.

If your problem is still here, you can contact me on : natacha.lambert@unicaen.fr

How to use this library
-------------------------

This library allow to compute edit distance between two graphs. You have to follow these steps to use it :

- Add your graphs (GXL files, NX Structures or your structure, make sure that the internal type is the same)
- Choose your cost function
- Init your environnment (After that, the cost function and your graphs can't be modified)
- Choose your method computation
- Run the computation with the IDs of the two graphs. You can have the ID when you add the graph or with some functions
- Find the result with differents functions (NodeMap, edit distance, etc)

Here is an example of code with GXL graphs::

gedlibpy.load_GXL_graphs('include/gedlib-master/data/datasets/Mutagenicity/data/', 'collections/MUTA_10.xml')
listID = gedlibpy.get_all_graph_ids()
gedlibpy.set_edit_cost("CHEM_1")
gedlibpy.init()
gedlibpy.set_method("IPFP", "")
gedlibpy.init_method()
g = listID[0]
h = listID[1]

gedlibpy.run_method(g,h)

print("Node Map : ", gedlibpy.get_node_map(g,h))
print ("Upper Bound = " + str(gedlibpy.get_upper_bound(g,h)) + ", Lower Bound = " + str(gedlibpy.get_lower_bound(g,h)) + ", Runtime = " + str(gedlibpy.get_runtime(g,h)))


Please read the documentation for more examples and functions.


An advice if you don't code in a shell
---------------------------------------

Python library don't indicate each C++ error. If you have a restart causing by an error in your code, please use on a linux shell for having C++ errors.

+ 10
- 0
gklearn/gedlib/__init__.py View File

@@ -0,0 +1,10 @@
# -*-coding:utf-8 -*-
"""
gedlib

"""

# info
__version__ = "0.1"
__author__ = "Linlin Jia"
__date__ = "March 2020"

+ 20
- 0
gklearn/gedlib/documentation/Makefile View File

@@ -0,0 +1,20 @@
# Minimal makefile for Sphinx documentation
#

# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = Cython_GedLib
SOURCEDIR = source
BUILDDIR = build

# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

.PHONY: help Makefile

# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

BIN
gklearn/gedlib/documentation/gedlibpy.pdf View File


+ 36
- 0
gklearn/gedlib/documentation/make.bat View File

@@ -0,0 +1,36 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
set SPHINXPROJ=Cython_GedLib
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd

+ 199
- 0
gklearn/gedlib/documentation/source/conf.py View File

@@ -0,0 +1,199 @@
# -*- coding: utf-8 -*-
#
# Python_GedLib documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 13 16:10:06 2019.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
sys.path.append("../../lib/fann")
#,"lib/gedlib", "lib/libsvm.3.22","lib/nomad"


# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages']

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'GeDLiBPy'
copyright = u'2019, Natacha Lambert'
author = u'Natacha Lambert'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'

# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False


# -- Options for HTML output ----------------------------------------------

# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'

# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}


# -- Options for HTMLHelp output ------------------------------------------

# Output file base name for HTML help builder.
htmlhelp_basename = 'gedlibpydoc'


# -- Options for LaTeX output ---------------------------------------------

latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',

# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',

# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',

# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gedlibpy.tex', u'gedlibpy Documentation',
u'Natacha Lambert', 'manual'),
]


# -- Options for manual page output ---------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gedlibpy', u'gedlibpy Documentation',
[author], 1)
]


# -- Options for Texinfo output -------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'gedlibpy', u'gedlibpy Documentation',
author, 'gedlibpy', 'One line description of project.',
'Miscellaneous'),
]



# -- Options for Epub output ----------------------------------------------

# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright

# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''

# A unique identification for the text.
#
# epub_uid = ''

# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']



# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}

+ 2
- 0
gklearn/gedlib/documentation/source/doc.rst View File

@@ -0,0 +1,2 @@
.. automodule:: gedlibpy
:members:

+ 42
- 0
gklearn/gedlib/documentation/source/editcost.rst View File

@@ -0,0 +1,42 @@
How to add your own editCost class
=========================================

When you choose your cost function, you can decide some parameters to personalize the function. But if you have some graphs which its type doesn't correpond to the choices, you can create your edit cost function.

For this, you have to write it in C++.

C++ side
-------------

You class must inherit to EditCost class, which is an asbtract class. You can find it here : include/gedlib-master/src/edit_costs

You can inspire you to the others to understand how to use it. You have to override these functions :

- virtual double node_ins_cost_fun(const UserNodeLabel & node_label) const final;
- virtual double node_del_cost_fun(const UserNodeLabel & node_label) const final;
- virtual double node_rel_cost_fun(const UserNodeLabel & node_label_1, const UserNodeLabel & node_label_2) const final;
- virtual double edge_ins_cost_fun(const UserEdgeLabel & edge_label) const final;
- virtual double edge_del_cost_fun(const UserEdgeLabel & edge_label) const final;
- virtual double edge_rel_cost_fun(const UserEdgeLabel & edge_label_1, const UserEdgeLabel & edge_label_2) const final;

You can add some attributes for parameters use or more functions, but these are unavoidable.

When your class is ready, please go to the C++ Bind here : src/GedLibBind.cpp . The function is :

void setPersonalEditCost(std::vector<double> editCostConstants){env.set_edit_costs(Your EditCost Class(editCostConstants));}

You have just to initialize your class. Parameters aren't mandatory, empty by default. If your class doesn't have one, you can skip this. After that, you have to recompile the project.

Python side
----------------

For this, use setup.py with this command in a linux shell::

python3 setup.py build_ext --inplace

You can also make it in Python 2.

Now you can use your edit cost function with the Python function set_personal_edit_cost(edit_cost_constant).

If you want more informations on C++, you can check the documentation of the original library here : https://github.com/dbblumenthal/gedlib


+ 165
- 0
gklearn/gedlib/documentation/source/examples.rst View File

@@ -0,0 +1,165 @@
Examples
==============

Before using each example, please make sure to put these lines on the beginnig of your code :

.. code-block:: python

import librariesImport
import gedlibpy

Use your path to access it, without changing the library architecture. After that, you are ready to use the library.

When you want to make new computation, please use this function :

.. code-block:: python

gedlibpy.restart_env()

All the graphs and results will be delete so make sure you don't need it.

Classique case with GXL graphs
------------------------------------
.. code-block:: python

gedlibpy.load_GXL_graphs('include/gedlib-master/data/datasets/Mutagenicity/data/', 'collections/MUTA_10.xml')
listID = gedlibpy.get_all_graph_ids()
gedlibpy.set_edit_cost("CHEM_1")

gedlibpy.init()

gedlibpy.set_method("IPFP", "")
gedlibpy.init_method()

g = listID[0]
h = listID[1]

gedlibpy.run_method(g,h)

print("Node Map : ", gedlibpy.get_node_map(g,h))
print ("Upper Bound = " + str(gedlibpy.get_upper_bound(g,h)) + ", Lower Bound = " + str(gedlibpy.get_lower_bound(g,h)) + ", Runtime = " + str(gedlibpy.get_runtime(g,h)))


You can also use this function :

.. code-block:: python

compute_edit_distance_on_GXl_graphs(path_folder, path_XML, edit_cost, method, options="", init_option = "EAGER_WITHOUT_SHUFFLED_COPIES")
This function compute all edit distance between all graphs, even itself. You can see the result with some functions and graphs IDs. Please see the documentation of the function for more informations.

Classique case with NX graphs
------------------------------------
.. code-block:: python

for graph in dataset :
gedlibpy.add_nx_graph(graph, classe)
listID = gedlibpy.get_all_graph_ids()
gedlibpy.set_edit_cost("CHEM_1")

gedlibpy.init()

gedlibpy.set_method("IPFP", "")
gedlibpy.init_method()

g = listID[0]
h = listID[1]

gedlibpy.run_method(g,h)

print("Node Map : ", gedlibpy.get_node_map(g,h))
print ("Upper Bound = " + str(gedlibpy.get_upper_bound(g,h)) + ", Lower Bound = " + str(gedlibpy.get_lower_bound(g,h)) + ", Runtime = " + str(gedlibpy.get_runtime(g,h)))

You can also use this function :

.. code-block:: python

compute_edit_distance_on_nx_graphs(dataset, classes, edit_cost, method, options, init_option = "EAGER_WITHOUT_SHUFFLED_COPIES")
This function compute all edit distance between all graphs, even itself. You can see the result in the return and with some functions and graphs IDs. Please see the documentation of the function for more informations.

Or this function :

.. code-block:: python

compute_ged_on_two_graphs(g1,g2, edit_cost, method, options, init_option = "EAGER_WITHOUT_SHUFFLED_COPIES")

This function allow to compute the edit distance just for two graphs. Please see the documentation of the function for more informations.

Add a graph from scratch
------------------------------------
.. code-block:: python

currentID = gedlibpy.add_graph()
gedlibpy.add_node(currentID, "_1", {"chem" : "C"})
gedlibpy.add_node(currentID, "_2", {"chem" : "O"})
gedlibpy.add_edge(currentID,"_1", "_2", {"valence": "1"} )

Please make sure as the type are the same (string for Ids and a dictionnary for labels). If you want a symmetrical graph, you can use this function to ensure the symmetry :

.. code-block:: python

add_symmetrical_edge(graph_id, tail, head, edge_label)

If you have a Nx structure, you can use directly this function :

.. code-block:: python

add_nx_graph(g, classe, ignore_duplicates=True)

Even if you have another structure, you can use this function :

.. code-block:: python
add_random_graph(name, classe, list_of_nodes, list_of_edges, ignore_duplicates=True)

Please read the documentation before using and respect the types.

Median computation
------------------------------------

An example is available in the Median_Example folder. It contains the necessary to compute a median graph. You can launch xp-letter-gbr.py to compute median graph on all letters in the dataset, or median.py for le letter Z.

To summarize the use, you can follow this example :

.. code-block:: python
import pygraph #Available with the median example
from median import draw_Letter_graph, compute_median, compute_median_set

gedlibpy.load_GXL_graphs('../include/gedlib-master/data/datasets/Letter/HIGH/', '../include/gedlib-master/data/collections/Letter_Z.xml')
gedlibpy.set_edit_cost("LETTER")
gedlibpy.init()
gedlibpy.set_method("IPFP", "")
gedlibpy.init_method()
listID = gedlibpy.get_all_graph_ids()

dataset,my_y = pygraph.utils.graphfiles.loadDataset("../include/gedlib-master/data/datasets/Letter/HIGH/Letter_Z.cxl")
median, sod, sods_path,set_median = compute_median(gedlibpy,listID,dataset,verbose=True)
draw_Letter_graph(median)

Please use the function in the median.py code to simplify your use. You can adapt this example to your case. Also, some function in the PythonGedLib module can make the work easier. Ask Benoît Gauzere if you want more information.

Hungarian algorithm
------------------------------------


LSAPE
~~~~~~

.. code-block:: python

result = gedlibpy.hungarian_LSAPE(matrixCost)
print("Rho = ", result[0], " Varrho = ", result[1], " u = ", result[2], " v = ", result[3])


LSAP
~~~~~~

.. code-block:: python

result = gedlibpy.hungarian_LSAP(matrixCost)
print("Rho = ", result[0], " Varrho = ", result[1], " u = ", result[2], " v = ", result[3])




+ 36
- 0
gklearn/gedlib/documentation/source/index.rst View File

@@ -0,0 +1,36 @@
.. Python_GedLib documentation master file, created by
sphinx-quickstart on Thu Jun 13 16:10:06 2019.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.


Welcome to GEDLIBPY's documentation!
=========================================

This module allow to use a C++ library for edit distance between graphs (GedLib) with Python.

Before using, please read the first section to ensure a good start with the library. Then, you can follow some examples or informations about each method.

.. toctree::
:maxdepth: 2
:caption: Contents:

readme
editcost
examples
doc

Authors
~~~~~~~~
* David Blumenthal for C++ module
* Natacha Lambert for Python module

Copyright (C) 2019 by all the authors

Indices and tables
~~~~~~~~~~~~~~~~~~~~~

* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

+ 97
- 0
gklearn/gedlib/documentation/source/readme.rst View File

@@ -0,0 +1,97 @@
How to install this library
====================================

Please Read https://dbblumenthal.github.io/gedlib/ before using Python code.
You can also find this module documentation in documentation/build/html folder.

Make sure you have numpy installed (and Cython if you have to recompile the library). You can use pip for this.


Running the script
-------------------

After donwloading the entire folder, you can run test.py to ensure the library works.

For your code, you have to make two imports::

import librariesImport
import gedlibpy

You can call each function in the library with this. You can't move any folder or files on the library, please make sure that the architecture remains the same.

This library is compiled for Python3 only. If you want to use it with Python 2, you have to recompile it with setup.py. You have to use this command on your favorite shell::

python setup.py build_ext --inplace

After this step, you can use the same lines as Python3 for import, it will be ok. Check the documentation inside the documentation/build/html folder before using function. You can also copy the tests examples for basic use.


A problem with the library ?
-------------------------------

If the library isn't found, you can recompile the Python library because your Linux is different to mine. Please delete gedlibpy.so, gedlibpy.cpp and build folder. Then use this command on a linux shell ::

python3 setup.py build_ext --inplace

You can make it with Python 2 but make sure you use the same version with your code and the compilation.

If it's doesn't work, maybe the version of GedLib or another library can be a problem. If it is, you can re-install GedLib for your computer. You can download it on this git : https://dbblumenthal.github.io/gedlib/

You have to install Gedlib with the Python installer after that.
Just call::

python3 install.py

Make the links like indicate on the documentation. Use the same architecture like this library, but just change the .so and folders with your installation. You can recompile the Python library with setup command, after that.

If you use Mac OS, you have to follow all this part, and install the external libraries with this command::

install_name_tool -change <mylib> <path>/<to>/<mylib> <myexec>

For an example, you have to write these lines::

install_name_tool -change libdoublefann.2.dylib lib/fann/libdoublefann.2.dylib gedlibpy.so
install_name_tool -change libsvm.so lib/libsvm.3.22/libsvm.so gedlibpy.so
install_name_tool -change libnomad.so lib/nomad/libnomad.so gedlibpy.so
install_name_tool -change libsgtelib.so lib/nomad/libsgtelib.so gedlibpy.so

The name of the library gedlibpy can be different if you use Python 3.

If your problem is still here, you can contact me on : natacha.lambert@unicaen.fr

How to use this library
-------------------------

This library allow to compute edit distance between two graphs. You have to follow these steps to use it :

- Add your graphs (GXL files, NX Structures or your structure, make sure that the internal type is the same)
- Choose your cost function
- Init your environnment (After that, the cost function and your graphs can't be modified)
- Choose your method computation
- Run the computation with the IDs of the two graphs. You can have the ID when you add the graph or with some functions
- Find the result with differents functions (NodeMap, edit distance, etc)

Here is an example of code with GXL graphs::

gedlibpy.load_GXL_graphs('include/gedlib-master/data/datasets/Mutagenicity/data/', 'collections/MUTA_10.xml')
listID = gedlibpy.get_all_graph_ids()
gedlibpy.set_edit_cost("CHEM_1")
gedlibpy.init()
gedlibpy.set_method("IPFP", "")
gedlibpy.init_method()
g = listID[0]
h = listID[1]

gedlibpy.run_method(g,h)

print("Node Map : ", gedlibpy.get_node_map(g,h))
print ("Upper Bound = " + str(gedlibpy.get_upper_bound(g,h)) + ", Lower Bound = " + str(gedlibpy.get_lower_bound(g,h)) + ", Runtime = " + str(gedlibpy.get_runtime(g,h)))


Please read the documentation for more examples and functions.


An advice if you don't code in a shell
---------------------------------------

Python library don't indicate each C++ error. If you have a restart causing by an error in your code, please use on a linux shell for having C++ errors.

+ 25510
- 0
gklearn/gedlib/gedlibpy.cpp
File diff suppressed because it is too large
View File


BIN
gklearn/gedlib/gedlibpy.cpython-36m-x86_64-linux-gnu.so View File


+ 1548
- 0
gklearn/gedlib/gedlibpy.pyx
File diff suppressed because it is too large
View File


+ 1
- 0
gklearn/gedlib/lib/fann/libdoublefann.so View File

@@ -0,0 +1 @@
libdoublefann.so.2

+ 1
- 0
gklearn/gedlib/lib/fann/libdoublefann.so.2 View File

@@ -0,0 +1 @@
libdoublefann.so.2.2.0

BIN
gklearn/gedlib/lib/fann/libdoublefann.so.2.2.0 View File


+ 1
- 0
gklearn/gedlib/lib/fann/libfann.so View File

@@ -0,0 +1 @@
libfann.so.2

+ 1
- 0
gklearn/gedlib/lib/fann/libfann.so.2 View File

@@ -0,0 +1 @@
libfann.so.2.2.0

BIN
gklearn/gedlib/lib/fann/libfann.so.2.2.0 View File


+ 1
- 0
gklearn/gedlib/lib/fann/libfixedfann.so View File

@@ -0,0 +1 @@
libfixedfann.so.2

+ 1
- 0
gklearn/gedlib/lib/fann/libfixedfann.so.2 View File

@@ -0,0 +1 @@
libfixedfann.so.2.2.0

BIN
gklearn/gedlib/lib/fann/libfixedfann.so.2.2.0 View File


+ 1
- 0
gklearn/gedlib/lib/fann/libfloatfann.so View File

@@ -0,0 +1 @@
libfloatfann.so.2

+ 1
- 0
gklearn/gedlib/lib/fann/libfloatfann.so.2 View File

@@ -0,0 +1 @@
libfloatfann.so.2.2.0

BIN
gklearn/gedlib/lib/fann/libfloatfann.so.2.2.0 View File


+ 10
- 0
gklearn/gedlib/lib/fann/pkgconfig/fann.pc View File

@@ -0,0 +1,10 @@
prefix=/export/home/lambertn/Documents/Python_GedLib/include/gedlib-master/ext/fann.2.2.0
exec_prefix=/export/home/lambertn/Documents/Python_GedLib/include/gedlib-master/ext/fann.2.2.0/bin
libdir=/export/home/lambertn/Documents/Python_GedLib/include/gedlib-master/ext/fann.2.2.0/lib
includedir=/export/home/lambertn/Documents/Python_GedLib/include/gedlib-master/ext/fann.2.2.0/include
Name: fann
Description: Fast Artificial Neural Network Library
Version: 2.2.0
Libs: -L${libdir} -lm -lfann
Cflags: -I${includedir}

+ 31
- 0
gklearn/gedlib/lib/libsvm.3.22/COPYRIGHT View File

@@ -0,0 +1,31 @@

Copyright (c) 2000-2014 Chih-Chung Chang and Chih-Jen Lin
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:

1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.

2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.

3. Neither name of copyright holders nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.


THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 2166
- 0
gklearn/gedlib/lib/libsvm.3.22/FAQ.html
File diff suppressed because it is too large
View File


+ 24
- 0
gklearn/gedlib/lib/libsvm.3.22/Makefile View File

@@ -0,0 +1,24 @@
CXX ?= g++
CFLAGS = -Wall -Wconversion -O3 -fPIC
OS = $(shell uname)

all: svm-train svm-predict svm-scale

lib: svm.o
if [ "$(OS)" = "Darwin" ]; then \
SHARED_LIB_FLAG="-dynamiclib -Wl,-install_name,libsvm.so"; \
else \
SHARED_LIB_FLAG="-shared -Wl,-soname,libsvm.so"; \
fi; \
$(CXX) $${SHARED_LIB_FLAG} svm.o -o libsvm.so

svm-predict: svm-predict.c svm.o
$(CXX) $(CFLAGS) svm-predict.c svm.o -o svm-predict -lm
svm-train: svm-train.c svm.o
$(CXX) $(CFLAGS) svm-train.c svm.o -o svm-train -lm
svm-scale: svm-scale.c
$(CXX) $(CFLAGS) svm-scale.c -o svm-scale
svm.o: svm.cpp svm.h
$(CXX) $(CFLAGS) -c svm.cpp
clean:
rm -f *~ svm.o svm-train svm-predict svm-scale libsvm.so

+ 33
- 0
gklearn/gedlib/lib/libsvm.3.22/Makefile.win View File

@@ -0,0 +1,33 @@
#You must ensure nmake.exe, cl.exe, link.exe are in system path.
#VCVARS64.bat
#Under dosbox prompt
#nmake -f Makefile.win

##########################################
CXX = cl.exe
CFLAGS = /nologo /O2 /EHsc /I. /D _WIN64 /D _CRT_SECURE_NO_DEPRECATE
TARGET = windows

all: $(TARGET)\svm-train.exe $(TARGET)\svm-predict.exe $(TARGET)\svm-scale.exe $(TARGET)\svm-toy.exe lib

$(TARGET)\svm-predict.exe: svm.h svm-predict.c svm.obj
$(CXX) $(CFLAGS) svm-predict.c svm.obj -Fe$(TARGET)\svm-predict.exe

$(TARGET)\svm-train.exe: svm.h svm-train.c svm.obj
$(CXX) $(CFLAGS) svm-train.c svm.obj -Fe$(TARGET)\svm-train.exe

$(TARGET)\svm-scale.exe: svm.h svm-scale.c
$(CXX) $(CFLAGS) svm-scale.c -Fe$(TARGET)\svm-scale.exe

$(TARGET)\svm-toy.exe: svm.h svm.obj svm-toy\windows\svm-toy.cpp
$(CXX) $(CFLAGS) svm-toy\windows\svm-toy.cpp svm.obj user32.lib gdi32.lib comdlg32.lib -Fe$(TARGET)\svm-toy.exe

svm.obj: svm.cpp svm.h
$(CXX) $(CFLAGS) -c svm.cpp

lib: svm.cpp svm.h svm.def
$(CXX) $(CFLAGS) -LD svm.cpp -Fe$(TARGET)\libsvm -link -DEF:svm.def

clean:
-erase /Q *.obj $(TARGET)\*.exe $(TARGET)\*.dll $(TARGET)\*.exp $(TARGET)\*.lib


+ 769
- 0
gklearn/gedlib/lib/libsvm.3.22/README View File

@@ -0,0 +1,769 @@
Libsvm is a simple, easy-to-use, and efficient software for SVM
classification and regression. It solves C-SVM classification, nu-SVM
classification, one-class-SVM, epsilon-SVM regression, and nu-SVM
regression. It also provides an automatic model selection tool for
C-SVM classification. This document explains the use of libsvm.

Libsvm is available at
http://www.csie.ntu.edu.tw/~cjlin/libsvm
Please read the COPYRIGHT file before using libsvm.

Table of Contents
=================

- Quick Start
- Installation and Data Format
- `svm-train' Usage
- `svm-predict' Usage
- `svm-scale' Usage
- Tips on Practical Use
- Examples
- Precomputed Kernels
- Library Usage
- Java Version
- Building Windows Binaries
- Additional Tools: Sub-sampling, Parameter Selection, Format checking, etc.
- MATLAB/OCTAVE Interface
- Python Interface
- Additional Information

Quick Start
===========

If you are new to SVM and if the data is not large, please go to
`tools' directory and use easy.py after installation. It does
everything automatic -- from data scaling to parameter selection.

Usage: easy.py training_file [testing_file]

More information about parameter selection can be found in
`tools/README.'

Installation and Data Format
============================

On Unix systems, type `make' to build the `svm-train' and `svm-predict'
programs. Run them without arguments to show the usages of them.

On other systems, consult `Makefile' to build them (e.g., see
'Building Windows binaries' in this file) or use the pre-built
binaries (Windows binaries are in the directory `windows').

The format of training and testing data file is:

<label> <index1>:<value1> <index2>:<value2> ...
.
.
.

Each line contains an instance and is ended by a '\n' character. For
classification, <label> is an integer indicating the class label
(multi-class is supported). For regression, <label> is the target
value which can be any real number. For one-class SVM, it's not used
so can be any number. The pair <index>:<value> gives a feature
(attribute) value: <index> is an integer starting from 1 and <value>
is a real number. The only exception is the precomputed kernel, where
<index> starts from 0; see the section of precomputed kernels. Indices
must be in ASCENDING order. Labels in the testing file are only used
to calculate accuracy or errors. If they are unknown, just fill the
first column with any numbers.

A sample classification data included in this package is
`heart_scale'. To check if your data is in a correct form, use
`tools/checkdata.py' (details in `tools/README').

Type `svm-train heart_scale', and the program will read the training
data and output the model file `heart_scale.model'. If you have a test
set called heart_scale.t, then type `svm-predict heart_scale.t
heart_scale.model output' to see the prediction accuracy. The `output'
file contains the predicted class labels.

For classification, if training data are in only one class (i.e., all
labels are the same), then `svm-train' issues a warning message:
`Warning: training data in only one class. See README for details,'
which means the training data is very unbalanced. The label in the
training data is directly returned when testing.

There are some other useful programs in this package.

svm-scale:

This is a tool for scaling input data file.

svm-toy:

This is a simple graphical interface which shows how SVM
separate data in a plane. You can click in the window to
draw data points. Use "change" button to choose class
1, 2 or 3 (i.e., up to three classes are supported), "load"
button to load data from a file, "save" button to save data to
a file, "run" button to obtain an SVM model, and "clear"
button to clear the window.

You can enter options in the bottom of the window, the syntax of
options is the same as `svm-train'.

Note that "load" and "save" consider dense data format both in
classification and the regression cases. For classification,
each data point has one label (the color) that must be 1, 2,
or 3 and two attributes (x-axis and y-axis values) in
[0,1). For regression, each data point has one target value
(y-axis) and one attribute (x-axis values) in [0, 1).

Type `make' in respective directories to build them.

You need Qt library to build the Qt version.
(available from http://www.trolltech.com)

You need GTK+ library to build the GTK version.
(available from http://www.gtk.org)
The pre-built Windows binaries are in the `windows'
directory. We use Visual C++ on a 64-bit machine.

`svm-train' Usage
=================

Usage: svm-train [options] training_set_file [model_file]
options:
-s svm_type : set type of SVM (default 0)
0 -- C-SVC (multi-class classification)
1 -- nu-SVC (multi-class classification)
2 -- one-class SVM
3 -- epsilon-SVR (regression)
4 -- nu-SVR (regression)
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)


The k in the -g option means the number of attributes in the input data.

option -v randomly splits the data into n parts and calculates cross
validation accuracy/mean squared error on them.

See libsvm FAQ for the meaning of outputs.

`svm-predict' Usage
===================

Usage: svm-predict [options] test_file model_file output_file
options:
-b probability_estimates: whether to predict probability estimates, 0 or 1 (default 0); for one-class SVM only 0 is supported

model_file is the model file generated by svm-train.
test_file is the test data you want to predict.
svm-predict will produce output in the output_file.

`svm-scale' Usage
=================

Usage: svm-scale [options] data_filename
options:
-l lower : x scaling lower limit (default -1)
-u upper : x scaling upper limit (default +1)
-y y_lower y_upper : y scaling limits (default: no y scaling)
-s save_filename : save scaling parameters to save_filename
-r restore_filename : restore scaling parameters from restore_filename

See 'Examples' in this file for examples.

Tips on Practical Use
=====================

* Scale your data. For example, scale each attribute to [0,1] or [-1,+1].
* For C-SVC, consider using the model selection tool in the tools directory.
* nu in nu-SVC/one-class-SVM/nu-SVR approximates the fraction of training
errors and support vectors.
* If data for classification are unbalanced (e.g. many positive and
few negative), try different penalty parameters C by -wi (see
examples below).
* Specify larger cache size (i.e., larger -m) for huge problems.

Examples
========

> svm-scale -l -1 -u 1 -s range train > train.scale
> svm-scale -r range test > test.scale

Scale each feature of the training data to be in [-1,1]. Scaling
factors are stored in the file range and then used for scaling the
test data.

> svm-train -s 0 -c 5 -t 2 -g 0.5 -e 0.1 data_file

Train a classifier with RBF kernel exp(-0.5|u-v|^2), C=10, and
stopping tolerance 0.1.

> svm-train -s 3 -p 0.1 -t 0 data_file

Solve SVM regression with linear kernel u'v and epsilon=0.1
in the loss function.

> svm-train -c 10 -w1 1 -w-2 5 -w4 2 data_file

Train a classifier with penalty 10 = 1 * 10 for class 1, penalty 50 =
5 * 10 for class -2, and penalty 20 = 2 * 10 for class 4.

> svm-train -s 0 -c 100 -g 0.1 -v 5 data_file

Do five-fold cross validation for the classifier using
the parameters C = 100 and gamma = 0.1

> svm-train -s 0 -b 1 data_file
> svm-predict -b 1 test_file data_file.model output_file

Obtain a model with probability information and predict test data with
probability estimates

Precomputed Kernels
===================

Users may precompute kernel values and input them as training and
testing files. Then libsvm does not need the original
training/testing sets.

Assume there are L training instances x1, ..., xL and.
Let K(x, y) be the kernel
value of two instances x and y. The input formats
are:

New training instance for xi:

<label> 0:i 1:K(xi,x1) ... L:K(xi,xL)

New testing instance for any x:

<label> 0:? 1:K(x,x1) ... L:K(x,xL)

That is, in the training file the first column must be the "ID" of
xi. In testing, ? can be any value.

All kernel values including ZEROs must be explicitly provided. Any
permutation or random subsets of the training/testing files are also
valid (see examples below).

Note: the format is slightly different from the precomputed kernel
package released in libsvmtools earlier.

Examples:

Assume the original training data has three four-feature
instances and testing data has one instance:

15 1:1 2:1 3:1 4:1
45 2:3 4:3
25 3:1

15 1:1 3:1

If the linear kernel is used, we have the following new
training/testing sets:

15 0:1 1:4 2:6 3:1
45 0:2 1:6 2:18 3:0
25 0:3 1:1 2:0 3:1
15 0:? 1:2 2:0 3:1

? can be any value.

Any subset of the above training file is also valid. For example,

25 0:3 1:1 2:0 3:1
45 0:2 1:6 2:18 3:0

implies that the kernel matrix is

[K(2,2) K(2,3)] = [18 0]
[K(3,2) K(3,3)] = [0 1]

Library Usage
=============

These functions and structures are declared in the header file
`svm.h'. You need to #include "svm.h" in your C/C++ source files and
link your program with `svm.cpp'. You can see `svm-train.c' and
`svm-predict.c' for examples showing how to use them. We define
LIBSVM_VERSION and declare `extern int libsvm_version; ' in svm.h, so
you can check the version number.

Before you classify test data, you need to construct an SVM model
(`svm_model') using training data. A model can also be saved in
a file for later use. Once an SVM model is available, you can use it
to classify new data.

- Function: struct svm_model *svm_train(const struct svm_problem *prob,
const struct svm_parameter *param);

This function constructs and returns an SVM model according to
the given training data and parameters.

struct svm_problem describes the problem:
struct svm_problem
{
int l;
double *y;
struct svm_node **x;
};
where `l' is the number of training data, and `y' is an array containing
their target values. (integers in classification, real numbers in
regression) `x' is an array of pointers, each of which points to a sparse
representation (array of svm_node) of one training vector.

For example, if we have the following training data:

LABEL ATTR1 ATTR2 ATTR3 ATTR4 ATTR5
----- ----- ----- ----- ----- -----
1 0 0.1 0.2 0 0
2 0 0.1 0.3 -1.2 0
1 0.4 0 0 0 0
2 0 0.1 0 1.4 0.5
3 -0.1 -0.2 0.1 1.1 0.1

then the components of svm_problem are:

l = 5

y -> 1 2 1 2 3

x -> [ ] -> (2,0.1) (3,0.2) (-1,?)
[ ] -> (2,0.1) (3,0.3) (4,-1.2) (-1,?)
[ ] -> (1,0.4) (-1,?)
[ ] -> (2,0.1) (4,1.4) (5,0.5) (-1,?)
[ ] -> (1,-0.1) (2,-0.2) (3,0.1) (4,1.1) (5,0.1) (-1,?)

where (index,value) is stored in the structure `svm_node':

struct svm_node
{
int index;
double value;
};

index = -1 indicates the end of one vector. Note that indices must
be in ASCENDING order.
struct svm_parameter describes the parameters of an SVM model:

struct svm_parameter
{
int svm_type;
int kernel_type;
int degree; /* for poly */
double gamma; /* for poly/rbf/sigmoid */
double coef0; /* for poly/sigmoid */

/* these are for training only */
double cache_size; /* in MB */
double eps; /* stopping criteria */
double C; /* for C_SVC, EPSILON_SVR, and NU_SVR */
int nr_weight; /* for C_SVC */
int *weight_label; /* for C_SVC */
double* weight; /* for C_SVC */
double nu; /* for NU_SVC, ONE_CLASS, and NU_SVR */
double p; /* for EPSILON_SVR */
int shrinking; /* use the shrinking heuristics */
int probability; /* do probability estimates */
};

svm_type can be one of C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR.

C_SVC: C-SVM classification
NU_SVC: nu-SVM classification
ONE_CLASS: one-class-SVM
EPSILON_SVR: epsilon-SVM regression
NU_SVR: nu-SVM regression

kernel_type can be one of LINEAR, POLY, RBF, SIGMOID.

LINEAR: u'*v
POLY: (gamma*u'*v + coef0)^degree
RBF: exp(-gamma*|u-v|^2)
SIGMOID: tanh(gamma*u'*v + coef0)
PRECOMPUTED: kernel values in training_set_file

cache_size is the size of the kernel cache, specified in megabytes.
C is the cost of constraints violation.
eps is the stopping criterion. (we usually use 0.00001 in nu-SVC,
0.001 in others). nu is the parameter in nu-SVM, nu-SVR, and
one-class-SVM. p is the epsilon in epsilon-insensitive loss function
of epsilon-SVM regression. shrinking = 1 means shrinking is conducted;
= 0 otherwise. probability = 1 means model with probability
information is obtained; = 0 otherwise.

nr_weight, weight_label, and weight are used to change the penalty
for some classes (If the weight for a class is not changed, it is
set to 1). This is useful for training classifier using unbalanced
input data or with asymmetric misclassification cost.

nr_weight is the number of elements in the array weight_label and
weight. Each weight[i] corresponds to weight_label[i], meaning that
the penalty of class weight_label[i] is scaled by a factor of weight[i].
If you do not want to change penalty for any of the classes,
just set nr_weight to 0.

*NOTE* Because svm_model contains pointers to svm_problem, you can
not free the memory used by svm_problem if you are still using the
svm_model produced by svm_train().

*NOTE* To avoid wrong parameters, svm_check_parameter() should be
called before svm_train().

struct svm_model stores the model obtained from the training procedure.
It is not recommended to directly access entries in this structure.
Programmers should use the interface functions to get the values.

struct svm_model
{
struct svm_parameter param; /* parameter */
int nr_class; /* number of classes, = 2 in regression/one class svm */
int l; /* total #SV */
struct svm_node **SV; /* SVs (SV[l]) */
double **sv_coef; /* coefficients for SVs in decision functions (sv_coef[k-1][l]) */
double *rho; /* constants in decision functions (rho[k*(k-1)/2]) */
double *probA; /* pairwise probability information */
double *probB;
int *sv_indices; /* sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set */

/* for classification only */

int *label; /* label of each class (label[k]) */
int *nSV; /* number of SVs for each class (nSV[k]) */
/* nSV[0] + nSV[1] + ... + nSV[k-1] = l */
/* XXX */
int free_sv; /* 1 if svm_model is created by svm_load_model*/
/* 0 if svm_model is created by svm_train */
};

param describes the parameters used to obtain the model.

nr_class is the number of classes. It is 2 for regression and one-class SVM.

l is the number of support vectors. SV and sv_coef are support
vectors and the corresponding coefficients, respectively. Assume there are
k classes. For data in class j, the corresponding sv_coef includes (k-1) y*alpha vectors,
where alpha's are solutions of the following two class problems:
1 vs j, 2 vs j, ..., j-1 vs j, j vs j+1, j vs j+2, ..., j vs k
and y=1 for the first j-1 vectors, while y=-1 for the remaining k-j
vectors. For example, if there are 4 classes, sv_coef and SV are like:

+-+-+-+--------------------+
|1|1|1| |
|v|v|v| SVs from class 1 |
|2|3|4| |
+-+-+-+--------------------+
|1|2|2| |
|v|v|v| SVs from class 2 |
|2|3|4| |
+-+-+-+--------------------+
|1|2|3| |
|v|v|v| SVs from class 3 |
|3|3|4| |
+-+-+-+--------------------+
|1|2|3| |
|v|v|v| SVs from class 4 |
|4|4|4| |
+-+-+-+--------------------+

See svm_train() for an example of assigning values to sv_coef.

rho is the bias term (-b). probA and probB are parameters used in
probability outputs. If there are k classes, there are k*(k-1)/2
binary problems as well as rho, probA, and probB values. They are
aligned in the order of binary problems:
1 vs 2, 1 vs 3, ..., 1 vs k, 2 vs 3, ..., 2 vs k, ..., k-1 vs k.

sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to
indicate support vectors in the training set.

label contains labels in the training data.

nSV is the number of support vectors in each class.

free_sv is a flag used to determine whether the space of SV should
be released in free_model_content(struct svm_model*) and
free_and_destroy_model(struct svm_model**). If the model is
generated by svm_train(), then SV points to data in svm_problem
and should not be removed. For example, free_sv is 0 if svm_model
is created by svm_train, but is 1 if created by svm_load_model.

- Function: double svm_predict(const struct svm_model *model,
const struct svm_node *x);

This function does classification or regression on a test vector x
given a model.

For a classification model, the predicted class for x is returned.
For a regression model, the function value of x calculated using
the model is returned. For an one-class model, +1 or -1 is
returned.

- Function: void svm_cross_validation(const struct svm_problem *prob,
const struct svm_parameter *param, int nr_fold, double *target);

This function conducts cross validation. Data are separated to
nr_fold folds. Under given parameters, sequentially each fold is
validated using the model from training the remaining. Predicted
labels (of all prob's instances) in the validation process are
stored in the array called target.

The format of svm_prob is same as that for svm_train().

- Function: int svm_get_svm_type(const struct svm_model *model);

This function gives svm_type of the model. Possible values of
svm_type are defined in svm.h.

- Function: int svm_get_nr_class(const svm_model *model);

For a classification model, this function gives the number of
classes. For a regression or an one-class model, 2 is returned.

- Function: void svm_get_labels(const svm_model *model, int* label)
For a classification model, this function outputs the name of
labels into an array called label. For regression and one-class
models, label is unchanged.

- Function: void svm_get_sv_indices(const struct svm_model *model, int *sv_indices)

This function outputs indices of support vectors into an array called sv_indices.
The size of sv_indices is the number of support vectors and can be obtained by calling svm_get_nr_sv.
Each sv_indices[i] is in the range of [1, ..., num_traning_data].

- Function: int svm_get_nr_sv(const struct svm_model *model)

This function gives the number of total support vector.

- Function: double svm_get_svr_probability(const struct svm_model *model);

For a regression model with probability information, this function
outputs a value sigma > 0. For test data, we consider the
probability model: target value = predicted value + z, z: Laplace
distribution e^(-|z|/sigma)/(2sigma)

If the model is not for svr or does not contain required
information, 0 is returned.

- Function: double svm_predict_values(const svm_model *model,
const svm_node *x, double* dec_values)

This function gives decision values on a test vector x given a
model, and return the predicted label (classification) or
the function value (regression).

For a classification model with nr_class classes, this function
gives nr_class*(nr_class-1)/2 decision values in the array
dec_values, where nr_class can be obtained from the function
svm_get_nr_class. The order is label[0] vs. label[1], ...,
label[0] vs. label[nr_class-1], label[1] vs. label[2], ...,
label[nr_class-2] vs. label[nr_class-1], where label can be
obtained from the function svm_get_labels. The returned value is
the predicted class for x. Note that when nr_class = 1, this
function does not give any decision value.

For a regression model, dec_values[0] and the returned value are
both the function value of x calculated using the model. For a
one-class model, dec_values[0] is the decision value of x, while
the returned value is +1/-1.

- Function: double svm_predict_probability(const struct svm_model *model,
const struct svm_node *x, double* prob_estimates);
This function does classification or regression on a test vector x
given a model with probability information.

For a classification model with probability information, this
function gives nr_class probability estimates in the array
prob_estimates. nr_class can be obtained from the function
svm_get_nr_class. The class with the highest probability is
returned. For regression/one-class SVM, the array prob_estimates
is unchanged and the returned value is the same as that of
svm_predict.

- Function: const char *svm_check_parameter(const struct svm_problem *prob,
const struct svm_parameter *param);

This function checks whether the parameters are within the feasible
range of the problem. This function should be called before calling
svm_train() and svm_cross_validation(). It returns NULL if the
parameters are feasible, otherwise an error message is returned.

- Function: int svm_check_probability_model(const struct svm_model *model);

This function checks whether the model contains required
information to do probability estimates. If so, it returns
+1. Otherwise, 0 is returned. This function should be called
before calling svm_get_svr_probability and
svm_predict_probability.

- Function: int svm_save_model(const char *model_file_name,
const struct svm_model *model);

This function saves a model to a file; returns 0 on success, or -1
if an error occurs.

- Function: struct svm_model *svm_load_model(const char *model_file_name);

This function returns a pointer to the model read from the file,
or a null pointer if the model could not be loaded.

- Function: void svm_free_model_content(struct svm_model *model_ptr);

This function frees the memory used by the entries in a model structure.

- Function: void svm_free_and_destroy_model(struct svm_model **model_ptr_ptr);

This function frees the memory used by a model and destroys the model
structure. It is equivalent to svm_destroy_model, which
is deprecated after version 3.0.

- Function: void svm_destroy_param(struct svm_parameter *param);

This function frees the memory used by a parameter set.

- Function: void svm_set_print_string_function(void (*print_func)(const char *));

Users can specify their output format by a function. Use
svm_set_print_string_function(NULL);
for default printing to stdout.

Java Version
============

The pre-compiled java class archive `libsvm.jar' and its source files are
in the java directory. To run the programs, use

java -classpath libsvm.jar svm_train <arguments>
java -classpath libsvm.jar svm_predict <arguments>
java -classpath libsvm.jar svm_toy
java -classpath libsvm.jar svm_scale <arguments>

Note that you need Java 1.5 (5.0) or above to run it.

You may need to add Java runtime library (like classes.zip) to the classpath.
You may need to increase maximum Java heap size.

Library usages are similar to the C version. These functions are available:

public class svm {
public static final int LIBSVM_VERSION=322;
public static svm_model svm_train(svm_problem prob, svm_parameter param);
public static void svm_cross_validation(svm_problem prob, svm_parameter param, int nr_fold, double[] target);
public static int svm_get_svm_type(svm_model model);
public static int svm_get_nr_class(svm_model model);
public static void svm_get_labels(svm_model model, int[] label);
public static void svm_get_sv_indices(svm_model model, int[] indices);
public static int svm_get_nr_sv(svm_model model);
public static double svm_get_svr_probability(svm_model model);
public static double svm_predict_values(svm_model model, svm_node[] x, double[] dec_values);
public static double svm_predict(svm_model model, svm_node[] x);
public static double svm_predict_probability(svm_model model, svm_node[] x, double[] prob_estimates);
public static void svm_save_model(String model_file_name, svm_model model) throws IOException
public static svm_model svm_load_model(String model_file_name) throws IOException
public static String svm_check_parameter(svm_problem prob, svm_parameter param);
public static int svm_check_probability_model(svm_model model);
public static void svm_set_print_string_function(svm_print_interface print_func);
}

The library is in the "libsvm" package.
Note that in Java version, svm_node[] is not ended with a node whose index = -1.

Users can specify their output format by

your_print_func = new svm_print_interface()
{
public void print(String s)
{
// your own format
}
};
svm.svm_set_print_string_function(your_print_func);

Building Windows Binaries
=========================

Windows binaries are available in the directory `windows'. To re-build
them via Visual C++, use the following steps:

1. Open a DOS command box (or Visual Studio Command Prompt) and change
to libsvm directory. If environment variables of VC++ have not been
set, type

""C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\bin\amd64\vcvars64.bat""

You may have to modify the above command according which version of
VC++ or where it is installed.

2. Type

nmake -f Makefile.win clean all

3. (optional) To build shared library libsvm.dll, type

nmake -f Makefile.win lib

4. (optional) To build 32-bit windows binaries, you must
(1) Setup "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\bin\vcvars32.bat" instead of vcvars64.bat
(2) Change CFLAGS in Makefile.win: /D _WIN64 to /D _WIN32
Another way is to build them from Visual C++ environment. See details
in libsvm FAQ.

- Additional Tools: Sub-sampling, Parameter Selection, Format checking, etc.
============================================================================

See the README file in the tools directory.

MATLAB/OCTAVE Interface
=======================

Please check the file README in the directory `matlab'.

Python Interface
================

See the README file in python directory.

Additional Information
======================

If you find LIBSVM helpful, please cite it as

Chih-Chung Chang and Chih-Jen Lin, LIBSVM : a library for support
vector machines. ACM Transactions on Intelligent Systems and
Technology, 2:27:1--27:27, 2011. Software available at
http://www.csie.ntu.edu.tw/~cjlin/libsvm

LIBSVM implementation document is available at
http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf

For any questions and comments, please email cjlin@csie.ntu.edu.tw

Acknowledgments:
This work was supported in part by the National Science
Council of Taiwan via the grant NSC 89-2213-E-002-013.
The authors thank their group members and users
for many helpful discussions and comments. They are listed in
http://www.csie.ntu.edu.tw/~cjlin/libsvm/acknowledgements


+ 270
- 0
gklearn/gedlib/lib/libsvm.3.22/heart_scale View File

@@ -0,0 +1,270 @@
+1 1:0.708333 2:1 3:1 4:-0.320755 5:-0.105023 6:-1 7:1 8:-0.419847 9:-1 10:-0.225806 12:1 13:-1
-1 1:0.583333 2:-1 3:0.333333 4:-0.603774 5:1 6:-1 7:1 8:0.358779 9:-1 10:-0.483871 12:-1 13:1
+1 1:0.166667 2:1 3:-0.333333 4:-0.433962 5:-0.383562 6:-1 7:-1 8:0.0687023 9:-1 10:-0.903226 11:-1 12:-1 13:1
-1 1:0.458333 2:1 3:1 4:-0.358491 5:-0.374429 6:-1 7:-1 8:-0.480916 9:1 10:-0.935484 12:-0.333333 13:1
-1 1:0.875 2:-1 3:-0.333333 4:-0.509434 5:-0.347032 6:-1 7:1 8:-0.236641 9:1 10:-0.935484 11:-1 12:-0.333333 13:-1
-1 1:0.5 2:1 3:1 4:-0.509434 5:-0.767123 6:-1 7:-1 8:0.0534351 9:-1 10:-0.870968 11:-1 12:-1 13:1
+1 1:0.125 2:1 3:0.333333 4:-0.320755 5:-0.406393 6:1 7:1 8:0.0839695 9:1 10:-0.806452 12:-0.333333 13:0.5
+1 1:0.25 2:1 3:1 4:-0.698113 5:-0.484018 6:-1 7:1 8:0.0839695 9:1 10:-0.612903 12:-0.333333 13:1
+1 1:0.291667 2:1 3:1 4:-0.132075 5:-0.237443 6:-1 7:1 8:0.51145 9:-1 10:-0.612903 12:0.333333 13:1
+1 1:0.416667 2:-1 3:1 4:0.0566038 5:0.283105 6:-1 7:1 8:0.267176 9:-1 10:0.290323 12:1 13:1
-1 1:0.25 2:1 3:1 4:-0.226415 5:-0.506849 6:-1 7:-1 8:0.374046 9:-1 10:-0.83871 12:-1 13:1
-1 2:1 3:1 4:-0.0943396 5:-0.543379 6:-1 7:1 8:-0.389313 9:1 10:-1 11:-1 12:-1 13:1
-1 1:-0.375 2:1 3:0.333333 4:-0.132075 5:-0.502283 6:-1 7:1 8:0.664122 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.333333 2:1 3:-1 4:-0.245283 5:-0.506849 6:-1 7:-1 8:0.129771 9:-1 10:-0.16129 12:0.333333 13:-1
-1 1:0.166667 2:-1 3:1 4:-0.358491 5:-0.191781 6:-1 7:1 8:0.343511 9:-1 10:-1 11:-1 12:-0.333333 13:-1
-1 1:0.75 2:-1 3:1 4:-0.660377 5:-0.894977 6:-1 7:-1 8:-0.175573 9:-1 10:-0.483871 12:-1 13:-1
+1 1:-0.291667 2:1 3:1 4:-0.132075 5:-0.155251 6:-1 7:-1 8:-0.251908 9:1 10:-0.419355 12:0.333333 13:1
+1 2:1 3:1 4:-0.132075 5:-0.648402 6:1 7:1 8:0.282443 9:1 11:1 12:-1 13:1
-1 1:0.458333 2:1 3:-1 4:-0.698113 5:-0.611872 6:-1 7:1 8:0.114504 9:1 10:-0.419355 12:-1 13:-1
-1 1:-0.541667 2:1 3:-1 4:-0.132075 5:-0.666667 6:-1 7:-1 8:0.633588 9:1 10:-0.548387 11:-1 12:-1 13:1
+1 1:0.583333 2:1 3:1 4:-0.509434 5:-0.52968 6:-1 7:1 8:-0.114504 9:1 10:-0.16129 12:0.333333 13:1
-1 1:-0.208333 2:1 3:-0.333333 4:-0.320755 5:-0.456621 6:-1 7:1 8:0.664122 9:-1 10:-0.935484 12:-1 13:-1
-1 1:-0.416667 2:1 3:1 4:-0.603774 5:-0.191781 6:-1 7:-1 8:0.679389 9:-1 10:-0.612903 12:-1 13:-1
-1 1:-0.25 2:1 3:1 4:-0.660377 5:-0.643836 6:-1 7:-1 8:0.0992366 9:-1 10:-0.967742 11:-1 12:-1 13:-1
-1 1:0.0416667 2:-1 3:-0.333333 4:-0.283019 5:-0.260274 6:1 7:1 8:0.343511 9:1 10:-1 11:-1 12:-0.333333 13:-1
-1 1:-0.208333 2:-1 3:0.333333 4:-0.320755 5:-0.319635 6:-1 7:-1 8:0.0381679 9:-1 10:-0.935484 11:-1 12:-1 13:-1
-1 1:-0.291667 2:-1 3:1 4:-0.169811 5:-0.465753 6:-1 7:1 8:0.236641 9:1 10:-1 12:-1 13:-1
-1 1:-0.0833333 2:-1 3:0.333333 4:-0.509434 5:-0.228311 6:-1 7:1 8:0.312977 9:-1 10:-0.806452 11:-1 12:-1 13:-1
+1 1:0.208333 2:1 3:0.333333 4:-0.660377 5:-0.525114 6:-1 7:1 8:0.435115 9:-1 10:-0.193548 12:-0.333333 13:1
-1 1:0.75 2:-1 3:0.333333 4:-0.698113 5:-0.365297 6:1 7:1 8:-0.0992366 9:-1 10:-1 11:-1 12:-0.333333 13:-1
+1 1:0.166667 2:1 3:0.333333 4:-0.358491 5:-0.52968 6:-1 7:1 8:0.206107 9:-1 10:-0.870968 12:-0.333333 13:1
-1 1:0.541667 2:1 3:1 4:0.245283 5:-0.534247 6:-1 7:1 8:0.0229008 9:-1 10:-0.258065 11:-1 12:-1 13:0.5
-1 1:-0.666667 2:-1 3:0.333333 4:-0.509434 5:-0.593607 6:-1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.25 2:1 3:1 4:0.433962 5:-0.086758 6:-1 7:1 8:0.0534351 9:1 10:0.0967742 11:1 12:-1 13:1
+1 1:-0.125 2:1 3:1 4:-0.0566038 5:-0.6621 6:-1 7:1 8:-0.160305 9:1 10:-0.709677 12:-1 13:1
+1 1:-0.208333 2:1 3:1 4:-0.320755 5:-0.406393 6:1 7:1 8:0.206107 9:1 10:-1 11:-1 12:0.333333 13:1
+1 1:0.333333 2:1 3:1 4:-0.132075 5:-0.630137 6:-1 7:1 8:0.0229008 9:1 10:-0.387097 11:-1 12:-0.333333 13:1
+1 1:0.25 2:1 3:-1 4:0.245283 5:-0.328767 6:-1 7:1 8:-0.175573 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.458333 2:1 3:0.333333 4:-0.320755 5:-0.753425 6:-1 7:-1 8:0.206107 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.208333 2:1 3:1 4:-0.471698 5:-0.561644 6:-1 7:1 8:0.755725 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:-0.541667 2:1 3:1 4:0.0943396 5:-0.557078 6:-1 7:-1 8:0.679389 9:-1 10:-1 11:-1 12:-1 13:1
-1 1:0.375 2:-1 3:1 4:-0.433962 5:-0.621005 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.375 2:1 3:0.333333 4:-0.320755 5:-0.511416 6:-1 7:-1 8:0.648855 9:1 10:-0.870968 11:-1 12:-1 13:-1
-1 1:-0.291667 2:1 3:-0.333333 4:-0.867925 5:-0.675799 6:1 7:-1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:1
+1 1:0.25 2:1 3:0.333333 4:-0.396226 5:-0.579909 6:1 7:-1 8:-0.0381679 9:-1 10:-0.290323 12:-0.333333 13:0.5
-1 1:0.208333 2:1 3:0.333333 4:-0.132075 5:-0.611872 6:1 7:1 8:0.435115 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:-0.166667 2:1 3:0.333333 4:-0.54717 5:-0.894977 6:-1 7:1 8:-0.160305 9:-1 10:-0.741935 11:-1 12:1 13:-1
+1 1:-0.375 2:1 3:1 4:-0.698113 5:-0.675799 6:-1 7:1 8:0.618321 9:-1 10:-1 11:-1 12:-0.333333 13:-1
+1 1:0.541667 2:1 3:-0.333333 4:0.245283 5:-0.452055 6:-1 7:-1 8:-0.251908 9:1 10:-1 12:1 13:0.5
+1 1:0.5 2:-1 3:1 4:0.0566038 5:-0.547945 6:-1 7:1 8:-0.343511 9:-1 10:-0.677419 12:1 13:1
+1 1:-0.458333 2:1 3:1 4:-0.207547 5:-0.136986 6:-1 7:-1 8:-0.175573 9:1 10:-0.419355 12:-1 13:0.5
-1 1:-0.0416667 2:1 3:-0.333333 4:-0.358491 5:-0.639269 6:1 7:-1 8:0.725191 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:0.5 2:-1 3:0.333333 4:-0.132075 5:0.328767 6:1 7:1 8:0.312977 9:-1 10:-0.741935 11:-1 12:-0.333333 13:-1
-1 1:0.416667 2:-1 3:-0.333333 4:-0.132075 5:-0.684932 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:0.333333 13:-1
-1 1:-0.333333 2:-1 3:-0.333333 4:-0.320755 5:-0.506849 6:-1 7:1 8:0.587786 9:-1 10:-0.806452 12:-1 13:-1
-1 1:-0.5 2:-1 3:-0.333333 4:-0.792453 5:-0.671233 6:-1 7:-1 8:0.480916 9:-1 10:-1 11:-1 12:-0.333333 13:-1
+1 1:0.333333 2:1 3:1 4:-0.169811 5:-0.817352 6:-1 7:1 8:-0.175573 9:1 10:0.16129 12:-0.333333 13:-1
-1 1:0.291667 2:-1 3:0.333333 4:-0.509434 5:-0.762557 6:1 7:-1 8:-0.618321 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.25 2:-1 3:1 4:0.509434 5:-0.438356 6:-1 7:-1 8:0.0992366 9:1 10:-1 12:-1 13:-1
+1 1:0.375 2:1 3:-0.333333 4:-0.509434 5:-0.292237 6:-1 7:1 8:-0.51145 9:-1 10:-0.548387 12:-0.333333 13:1
-1 1:0.166667 2:1 3:0.333333 4:0.0566038 5:-1 6:1 7:-1 8:0.557252 9:-1 10:-0.935484 11:-1 12:-0.333333 13:1
+1 1:-0.0833333 2:-1 3:1 4:-0.320755 5:-0.182648 6:-1 7:-1 8:0.0839695 9:1 10:-0.612903 12:-1 13:1
-1 1:-0.375 2:1 3:0.333333 4:-0.509434 5:-0.543379 6:-1 7:-1 8:0.496183 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:0.291667 2:-1 3:-1 4:0.0566038 5:-0.479452 6:-1 7:-1 8:0.526718 9:-1 10:-0.709677 11:-1 12:-1 13:-1
-1 1:0.416667 2:1 3:-1 4:-0.0377358 5:-0.511416 6:1 7:1 8:0.206107 9:-1 10:-0.258065 11:1 12:-1 13:0.5
+1 1:0.166667 2:1 3:1 4:0.0566038 5:-0.315068 6:-1 7:1 8:-0.374046 9:1 10:-0.806452 12:-0.333333 13:0.5
-1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.383562 6:-1 7:1 8:0.755725 9:1 10:-1 11:-1 12:-1 13:-1
+1 1:0.208333 2:-1 3:-0.333333 4:-0.207547 5:-0.118721 6:1 7:1 8:0.236641 9:-1 10:-1 11:-1 12:0.333333 13:-1
-1 1:-0.375 2:-1 3:0.333333 4:-0.54717 5:-0.47032 6:-1 7:-1 8:0.19084 9:-1 10:-0.903226 12:-0.333333 13:-1
+1 1:-0.25 2:1 3:0.333333 4:-0.735849 5:-0.465753 6:-1 7:-1 8:0.236641 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.333333 2:1 3:1 4:-0.509434 5:-0.388128 6:-1 7:-1 8:0.0534351 9:1 10:0.16129 12:-0.333333 13:1
-1 1:0.166667 2:-1 3:1 4:-0.509434 5:0.0410959 6:-1 7:-1 8:0.40458 9:1 10:-0.806452 11:-1 12:-1 13:-1
-1 1:0.708333 2:1 3:-0.333333 4:0.169811 5:-0.456621 6:-1 7:1 8:0.0992366 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:0.958333 2:-1 3:0.333333 4:-0.132075 5:-0.675799 6:-1 8:-0.312977 9:-1 10:-0.645161 12:-1 13:-1
-1 1:0.583333 2:-1 3:1 4:-0.773585 5:-0.557078 6:-1 7:-1 8:0.0839695 9:-1 10:-0.903226 11:-1 12:0.333333 13:-1
+1 1:-0.333333 2:1 3:1 4:-0.0943396 5:-0.164384 6:-1 7:1 8:0.160305 9:1 10:-1 12:1 13:1
-1 1:-0.333333 2:1 3:1 4:-0.811321 5:-0.625571 6:-1 7:1 8:0.175573 9:1 10:-0.0322581 12:-1 13:-1
-1 1:-0.583333 2:-1 3:0.333333 4:-1 5:-0.666667 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.458333 2:-1 3:0.333333 4:-0.509434 5:-0.621005 6:-1 7:-1 8:0.557252 9:-1 10:-1 12:-1 13:-1
-1 1:0.125 2:1 3:-0.333333 4:-0.509434 5:-0.497717 6:-1 7:-1 8:0.633588 9:-1 10:-0.741935 11:-1 12:-1 13:-1
+1 1:0.208333 2:1 3:1 4:-0.0188679 5:-0.579909 6:-1 7:-1 8:-0.480916 9:-1 10:-0.354839 12:-0.333333 13:1
+1 1:-0.75 2:1 3:1 4:-0.509434 5:-0.671233 6:-1 7:-1 8:-0.0992366 9:1 10:-0.483871 12:-1 13:1
+1 1:0.208333 2:1 3:1 4:0.0566038 5:-0.342466 6:-1 7:1 8:-0.389313 9:1 10:-0.741935 11:-1 12:-1 13:1
-1 1:-0.5 2:1 3:0.333333 4:-0.320755 5:-0.598174 6:-1 7:1 8:0.480916 9:-1 10:-0.354839 12:-1 13:-1
-1 1:0.166667 2:1 3:1 4:-0.698113 5:-0.657534 6:-1 7:-1 8:-0.160305 9:1 10:-0.516129 12:-1 13:0.5
-1 1:-0.458333 2:1 3:-1 4:0.0188679 5:-0.461187 6:-1 7:1 8:0.633588 9:-1 10:-0.741935 11:-1 12:0.333333 13:-1
-1 1:0.375 2:1 3:-0.333333 4:-0.358491 5:-0.625571 6:1 7:1 8:0.0534351 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:0.25 2:1 3:-1 4:0.584906 5:-0.342466 6:-1 7:1 8:0.129771 9:-1 10:0.354839 11:1 12:-1 13:1
-1 1:-0.5 2:-1 3:-0.333333 4:-0.396226 5:-0.178082 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:-0.125 2:1 3:1 4:0.0566038 5:-0.465753 6:-1 7:1 8:-0.129771 9:-1 10:-0.16129 12:-1 13:1
-1 1:0.25 2:1 3:-0.333333 4:-0.132075 5:-0.56621 6:-1 7:-1 8:0.419847 9:1 10:-1 11:-1 12:-1 13:-1
+1 1:0.333333 2:-1 3:1 4:-0.320755 5:-0.0684932 6:-1 7:1 8:0.496183 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.0416667 2:1 3:1 4:-0.433962 5:-0.360731 6:-1 7:1 8:-0.419847 9:1 10:-0.290323 12:-0.333333 13:1
+1 1:0.0416667 2:1 3:1 4:-0.698113 5:-0.634703 6:-1 7:1 8:-0.435115 9:1 10:-1 12:-0.333333 13:-1
+1 1:-0.0416667 2:1 3:1 4:-0.415094 5:-0.607306 6:-1 7:-1 8:0.480916 9:-1 10:-0.677419 11:-1 12:0.333333 13:1
+1 1:-0.25 2:1 3:1 4:-0.698113 5:-0.319635 6:-1 7:1 8:-0.282443 9:1 10:-0.677419 12:-0.333333 13:-1
-1 1:0.541667 2:1 3:1 4:-0.509434 5:-0.196347 6:-1 7:1 8:0.221374 9:-1 10:-0.870968 12:-1 13:-1
+1 1:0.208333 2:1 3:1 4:-0.886792 5:-0.506849 6:-1 7:-1 8:0.29771 9:-1 10:-0.967742 11:-1 12:-0.333333 13:1
-1 1:0.458333 2:-1 3:0.333333 4:-0.132075 5:-0.146119 6:-1 7:-1 8:-0.0534351 9:-1 10:-0.935484 11:-1 12:-1 13:1
-1 1:-0.125 2:-1 3:-0.333333 4:-0.509434 5:-0.461187 6:-1 7:-1 8:0.389313 9:-1 10:-0.645161 11:-1 12:-1 13:-1
-1 1:-0.375 2:-1 3:0.333333 4:-0.735849 5:-0.931507 6:-1 7:-1 8:0.587786 9:-1 10:-0.806452 12:-1 13:-1
+1 1:0.583333 2:1 3:1 4:-0.509434 5:-0.493151 6:-1 7:-1 8:-1 9:-1 10:-0.677419 12:-1 13:-1
-1 1:-0.166667 2:-1 3:1 4:-0.320755 5:-0.347032 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.166667 2:1 3:1 4:0.339623 5:-0.255708 6:1 7:1 8:-0.19084 9:-1 10:-0.677419 12:1 13:1
+1 1:0.416667 2:1 3:1 4:-0.320755 5:-0.415525 6:-1 7:1 8:0.160305 9:-1 10:-0.548387 12:-0.333333 13:1
+1 1:-0.208333 2:1 3:1 4:-0.433962 5:-0.324201 6:-1 7:1 8:0.450382 9:-1 10:-0.83871 12:-1 13:1
-1 1:-0.0833333 2:1 3:0.333333 4:-0.886792 5:-0.561644 6:-1 7:-1 8:0.0992366 9:1 10:-0.612903 12:-1 13:-1
+1 1:0.291667 2:-1 3:1 4:0.0566038 5:-0.39726 6:-1 7:1 8:0.312977 9:-1 10:-0.16129 12:0.333333 13:1
+1 1:0.25 2:1 3:1 4:-0.132075 5:-0.767123 6:-1 7:-1 8:0.389313 9:1 10:-1 11:-1 12:-0.333333 13:1
-1 1:-0.333333 2:-1 3:-0.333333 4:-0.660377 5:-0.844749 6:-1 7:-1 8:0.0229008 9:-1 10:-1 12:-1 13:-1
+1 1:0.0833333 2:-1 3:1 4:0.622642 5:-0.0821918 6:-1 8:-0.29771 9:1 10:0.0967742 12:-1 13:-1
-1 1:-0.5 2:1 3:-0.333333 4:-0.698113 5:-0.502283 6:-1 7:-1 8:0.251908 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.291667 2:-1 3:1 4:0.207547 5:-0.182648 6:-1 7:1 8:0.374046 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:0.0416667 2:-1 3:0.333333 4:-0.226415 5:-0.187215 6:1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.458333 2:1 3:-0.333333 4:-0.509434 5:-0.228311 6:-1 7:-1 8:0.389313 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.166667 2:-1 3:-0.333333 4:-0.245283 5:-0.3379 6:-1 7:-1 8:0.389313 9:-1 10:-1 12:-1 13:-1
+1 1:-0.291667 2:1 3:1 4:-0.509434 5:-0.438356 6:-1 7:1 8:0.114504 9:-1 10:-0.741935 11:-1 12:-1 13:1
+1 1:0.125 2:-1 3:1 4:1 5:-0.260274 6:1 7:1 8:-0.0534351 9:1 10:0.290323 11:1 12:0.333333 13:1
-1 1:0.541667 2:-1 3:-1 4:0.0566038 5:-0.543379 6:-1 7:-1 8:-0.343511 9:-1 10:-0.16129 11:1 12:-1 13:-1
+1 1:0.125 2:1 3:1 4:-0.320755 5:-0.283105 6:1 7:1 8:-0.51145 9:1 10:-0.483871 11:1 12:-1 13:1
+1 1:-0.166667 2:1 3:0.333333 4:-0.509434 5:-0.716895 6:-1 7:-1 8:0.0381679 9:-1 10:-0.354839 12:1 13:1
+1 1:0.0416667 2:1 3:1 4:-0.471698 5:-0.269406 6:-1 7:1 8:-0.312977 9:1 10:0.0322581 12:0.333333 13:-1
+1 1:0.166667 2:1 3:1 4:0.0943396 5:-0.324201 6:-1 7:-1 8:-0.740458 9:1 10:-0.612903 12:-0.333333 13:1
-1 1:0.5 2:-1 3:0.333333 4:0.245283 5:0.0684932 6:-1 7:1 8:0.221374 9:-1 10:-0.741935 11:-1 12:-1 13:-1
-1 1:0.0416667 2:1 3:0.333333 4:-0.415094 5:-0.328767 6:-1 7:1 8:0.236641 9:-1 10:-0.83871 11:1 12:-0.333333 13:-1
-1 1:0.0416667 2:-1 3:0.333333 4:0.245283 5:-0.657534 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-0.333333 13:-1
+1 1:0.375 2:1 3:1 4:-0.509434 5:-0.356164 6:-1 7:-1 8:-0.572519 9:1 10:-0.419355 12:0.333333 13:1
-1 1:-0.0416667 2:-1 3:0.333333 4:-0.207547 5:-0.680365 6:-1 7:1 8:0.496183 9:-1 10:-0.967742 12:-1 13:-1
-1 1:-0.0416667 2:1 3:-0.333333 4:-0.245283 5:-0.657534 6:-1 7:-1 8:0.328244 9:-1 10:-0.741935 11:-1 12:-0.333333 13:-1
+1 1:0.291667 2:1 3:1 4:-0.566038 5:-0.525114 6:1 7:-1 8:0.358779 9:1 10:-0.548387 11:-1 12:0.333333 13:1
+1 1:0.416667 2:-1 3:1 4:-0.735849 5:-0.347032 6:-1 7:-1 8:0.496183 9:1 10:-0.419355 12:0.333333 13:-1
+1 1:0.541667 2:1 3:1 4:-0.660377 5:-0.607306 6:-1 7:1 8:-0.0687023 9:1 10:-0.967742 11:-1 12:-0.333333 13:-1
-1 1:-0.458333 2:1 3:1 4:-0.132075 5:-0.543379 6:-1 7:-1 8:0.633588 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.458333 2:1 3:1 4:-0.509434 5:-0.452055 6:-1 7:1 8:-0.618321 9:1 10:-0.290323 11:1 12:-0.333333 13:-1
-1 1:0.0416667 2:1 3:0.333333 4:0.0566038 5:-0.515982 6:-1 7:1 8:0.435115 9:-1 10:-0.483871 11:-1 12:-1 13:1
-1 1:-0.291667 2:-1 3:0.333333 4:-0.0943396 5:-0.767123 6:-1 7:1 8:0.358779 9:1 10:-0.548387 11:1 12:-1 13:-1
-1 1:0.583333 2:-1 3:0.333333 4:0.0943396 5:-0.310502 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-0.333333 13:-1
+1 1:0.125 2:1 3:1 4:-0.415094 5:-0.438356 6:1 7:1 8:0.114504 9:1 10:-0.612903 12:-0.333333 13:-1
-1 1:-0.791667 2:-1 3:-0.333333 4:-0.54717 5:-0.616438 6:-1 7:-1 8:0.847328 9:-1 10:-0.774194 11:-1 12:-1 13:-1
-1 1:0.166667 2:1 3:1 4:-0.283019 5:-0.630137 6:-1 7:-1 8:0.480916 9:1 10:-1 11:-1 12:-1 13:1
+1 1:0.458333 2:1 3:1 4:-0.0377358 5:-0.607306 6:-1 7:1 8:-0.0687023 9:-1 10:-0.354839 12:0.333333 13:0.5
-1 1:0.25 2:1 3:1 4:-0.169811 5:-0.3379 6:-1 7:1 8:0.694656 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:-0.125 2:1 3:0.333333 4:-0.132075 5:-0.511416 6:-1 7:-1 8:0.40458 9:-1 10:-0.806452 12:-0.333333 13:1
-1 1:-0.0833333 2:1 3:-1 4:-0.415094 5:-0.60274 6:-1 7:1 8:-0.175573 9:1 10:-0.548387 11:-1 12:-0.333333 13:-1
+1 1:0.0416667 2:1 3:-0.333333 4:0.849057 5:-0.283105 6:-1 7:1 8:0.89313 9:-1 10:-1 11:-1 12:-0.333333 13:1
+1 2:1 3:1 4:-0.45283 5:-0.287671 6:-1 7:-1 8:-0.633588 9:1 10:-0.354839 12:0.333333 13:1
+1 1:-0.0416667 2:1 3:1 4:-0.660377 5:-0.525114 6:-1 7:-1 8:0.358779 9:-1 10:-1 11:-1 12:-0.333333 13:-1
+1 1:-0.541667 2:1 3:1 4:-0.698113 5:-0.812785 6:-1 7:1 8:-0.343511 9:1 10:-0.354839 12:-1 13:1
+1 1:0.208333 2:1 3:0.333333 4:-0.283019 5:-0.552511 6:-1 7:1 8:0.557252 9:-1 10:0.0322581 11:-1 12:0.333333 13:1
-1 1:-0.5 2:-1 3:0.333333 4:-0.660377 5:-0.351598 6:-1 7:1 8:0.541985 9:1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.5 2:1 3:0.333333 4:-0.660377 5:-0.43379 6:-1 7:-1 8:0.648855 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.125 2:-1 3:0.333333 4:-0.509434 5:-0.575342 6:-1 7:-1 8:0.328244 9:-1 10:-0.483871 12:-1 13:-1
-1 1:0.0416667 2:-1 3:0.333333 4:-0.735849 5:-0.356164 6:-1 7:1 8:0.465649 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:0.458333 2:-1 3:1 4:-0.320755 5:-0.191781 6:-1 7:-1 8:-0.221374 9:-1 10:-0.354839 12:0.333333 13:-1
-1 1:-0.0833333 2:-1 3:0.333333 4:-0.320755 5:-0.406393 6:-1 7:1 8:0.19084 9:-1 10:-0.83871 11:-1 12:-1 13:-1
-1 1:-0.291667 2:-1 3:-0.333333 4:-0.792453 5:-0.643836 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.0833333 2:1 3:1 4:-0.132075 5:-0.584475 6:-1 7:-1 8:-0.389313 9:1 10:0.806452 11:1 12:-1 13:1
-1 1:-0.333333 2:1 3:-0.333333 4:-0.358491 5:-0.16895 6:-1 7:1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:0.125 2:1 3:-1 4:-0.509434 5:-0.694064 6:-1 7:1 8:0.389313 9:-1 10:-0.387097 12:-1 13:1
+1 1:0.541667 2:-1 3:1 4:0.584906 5:-0.534247 6:1 7:-1 8:0.435115 9:1 10:-0.677419 12:0.333333 13:1
+1 1:-0.625 2:1 3:-1 4:-0.509434 5:-0.520548 6:-1 7:-1 8:0.694656 9:1 10:0.225806 12:-1 13:1
+1 1:0.375 2:-1 3:1 4:0.0566038 5:-0.461187 6:-1 7:-1 8:0.267176 9:1 10:-0.548387 12:-1 13:-1
-1 1:0.0833333 2:1 3:-0.333333 4:-0.320755 5:-0.378995 6:-1 7:-1 8:0.282443 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.208333 2:1 3:1 4:-0.358491 5:-0.392694 6:-1 7:1 8:-0.0992366 9:1 10:-0.0322581 12:0.333333 13:1
-1 1:-0.416667 2:1 3:1 4:-0.698113 5:-0.611872 6:-1 7:-1 8:0.374046 9:-1 10:-1 11:-1 12:-1 13:1
-1 1:0.458333 2:-1 3:1 4:0.622642 5:-0.0913242 6:-1 7:-1 8:0.267176 9:1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.125 2:-1 3:1 4:-0.698113 5:-0.415525 6:-1 7:1 8:0.343511 9:-1 10:-1 11:-1 12:-1 13:-1
-1 2:1 3:0.333333 4:-0.320755 5:-0.675799 6:1 7:1 8:0.236641 9:-1 10:-0.612903 11:1 12:-1 13:-1
-1 1:-0.333333 2:-1 3:1 4:-0.169811 5:-0.497717 6:-1 7:1 8:0.236641 9:1 10:-0.935484 12:-1 13:-1
+1 1:0.5 2:1 3:-1 4:-0.169811 5:-0.287671 6:1 7:1 8:0.572519 9:-1 10:-0.548387 12:-0.333333 13:-1
-1 1:0.666667 2:1 3:-1 4:0.245283 5:-0.506849 6:1 7:1 8:-0.0839695 9:-1 10:-0.967742 12:-0.333333 13:-1
+1 1:0.666667 2:1 3:0.333333 4:-0.132075 5:-0.415525 6:-1 7:1 8:0.145038 9:-1 10:-0.354839 12:1 13:1
+1 1:0.583333 2:1 3:1 4:-0.886792 5:-0.210046 6:-1 7:1 8:-0.175573 9:1 10:-0.709677 12:0.333333 13:-1
-1 1:0.625 2:-1 3:0.333333 4:-0.509434 5:-0.611872 6:-1 7:1 8:-0.328244 9:-1 10:-0.516129 12:-1 13:-1
-1 1:-0.791667 2:1 3:-1 4:-0.54717 5:-0.744292 6:-1 7:1 8:0.572519 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.375 2:-1 3:1 4:-0.169811 5:-0.232877 6:1 7:-1 8:-0.465649 9:-1 10:-0.387097 12:1 13:-1
+1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.214612 6:-1 7:-1 8:-0.221374 9:1 10:0.354839 12:1 13:1
+1 1:-0.291667 2:1 3:0.333333 4:0.0566038 5:-0.520548 6:-1 7:-1 8:0.160305 9:-1 10:0.16129 12:-1 13:-1
+1 1:0.583333 2:1 3:1 4:-0.415094 5:-0.415525 6:1 7:-1 8:0.40458 9:-1 10:-0.935484 12:0.333333 13:1
-1 1:-0.125 2:1 3:0.333333 4:-0.339623 5:-0.680365 6:-1 7:-1 8:0.40458 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.458333 2:1 3:0.333333 4:-0.509434 5:-0.479452 6:1 7:-1 8:0.877863 9:-1 10:-0.741935 11:1 12:-1 13:1
+1 1:0.125 2:-1 3:1 4:-0.245283 5:0.292237 6:-1 7:1 8:0.206107 9:1 10:-0.387097 12:0.333333 13:1
+1 1:-0.5 2:1 3:1 4:-0.698113 5:-0.789954 6:-1 7:1 8:0.328244 9:-1 10:-1 11:-1 12:-1 13:1
-1 1:-0.458333 2:-1 3:1 4:-0.849057 5:-0.365297 6:-1 7:1 8:-0.221374 9:-1 10:-0.806452 12:-1 13:-1
-1 2:1 3:0.333333 4:-0.320755 5:-0.452055 6:1 7:1 8:0.557252 9:-1 10:-1 11:-1 12:1 13:-1
-1 1:-0.416667 2:1 3:0.333333 4:-0.320755 5:-0.136986 6:-1 7:-1 8:0.389313 9:-1 10:-0.387097 11:-1 12:-0.333333 13:-1
+1 1:0.125 2:1 3:1 4:-0.283019 5:-0.73516 6:-1 7:1 8:-0.480916 9:1 10:-0.322581 12:-0.333333 13:0.5
-1 1:-0.0416667 2:1 3:1 4:-0.735849 5:-0.511416 6:1 7:-1 8:0.160305 9:-1 10:-0.967742 11:-1 12:1 13:1
-1 1:0.375 2:-1 3:1 4:-0.132075 5:0.223744 6:-1 7:1 8:0.312977 9:-1 10:-0.612903 12:-1 13:-1
+1 1:0.708333 2:1 3:0.333333 4:0.245283 5:-0.347032 6:-1 7:-1 8:-0.374046 9:1 10:-0.0645161 12:-0.333333 13:1
-1 1:0.0416667 2:1 3:1 4:-0.132075 5:-0.484018 6:-1 7:-1 8:0.358779 9:-1 10:-0.612903 11:-1 12:-1 13:-1
+1 1:0.708333 2:1 3:1 4:-0.0377358 5:-0.780822 6:-1 7:-1 8:-0.175573 9:1 10:-0.16129 11:1 12:-1 13:1
-1 1:0.0416667 2:1 3:-0.333333 4:-0.735849 5:-0.164384 6:-1 7:-1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:1
+1 1:-0.75 2:1 3:1 4:-0.396226 5:-0.287671 6:-1 7:1 8:0.29771 9:1 10:-1 11:-1 12:-1 13:1
-1 1:-0.208333 2:1 3:0.333333 4:-0.433962 5:-0.410959 6:1 7:-1 8:0.587786 9:-1 10:-1 11:-1 12:0.333333 13:-1
-1 1:0.0833333 2:-1 3:-0.333333 4:-0.226415 5:-0.43379 6:-1 7:1 8:0.374046 9:-1 10:-0.548387 12:-1 13:-1
-1 1:0.208333 2:-1 3:1 4:-0.886792 5:-0.442922 6:-1 7:1 8:-0.221374 9:-1 10:-0.677419 12:-1 13:-1
-1 1:0.0416667 2:-1 3:0.333333 4:-0.698113 5:-0.598174 6:-1 7:-1 8:0.328244 9:-1 10:-0.483871 12:-1 13:-1
-1 1:0.666667 2:-1 3:-1 4:-0.132075 5:-0.484018 6:-1 7:-1 8:0.221374 9:-1 10:-0.419355 11:-1 12:0.333333 13:-1
+1 1:1 2:1 3:1 4:-0.415094 5:-0.187215 6:-1 7:1 8:0.389313 9:1 10:-1 11:-1 12:1 13:-1
-1 1:0.625 2:1 3:0.333333 4:-0.54717 5:-0.310502 6:-1 7:-1 8:0.221374 9:-1 10:-0.677419 11:-1 12:-0.333333 13:1
+1 1:0.208333 2:1 3:1 4:-0.415094 5:-0.205479 6:-1 7:1 8:0.526718 9:-1 10:-1 11:-1 12:0.333333 13:1
+1 1:0.291667 2:1 3:1 4:-0.415094 5:-0.39726 6:-1 7:1 8:0.0687023 9:1 10:-0.0967742 12:-0.333333 13:1
+1 1:-0.0833333 2:1 3:1 4:-0.132075 5:-0.210046 6:-1 7:-1 8:0.557252 9:1 10:-0.483871 11:-1 12:-1 13:1
+1 1:0.0833333 2:1 3:1 4:0.245283 5:-0.255708 6:-1 7:1 8:0.129771 9:1 10:-0.741935 12:-0.333333 13:1
-1 1:-0.0416667 2:1 3:-1 4:0.0943396 5:-0.214612 6:1 7:-1 8:0.633588 9:-1 10:-0.612903 12:-1 13:1
-1 1:0.291667 2:-1 3:0.333333 4:-0.849057 5:-0.123288 6:-1 7:-1 8:0.358779 9:-1 10:-1 11:-1 12:-0.333333 13:-1
-1 1:0.208333 2:1 3:0.333333 4:-0.792453 5:-0.479452 6:-1 7:1 8:0.267176 9:1 10:-0.806452 12:-1 13:1
+1 1:0.458333 2:1 3:0.333333 4:-0.415094 5:-0.164384 6:-1 7:-1 8:-0.0839695 9:1 10:-0.419355 12:-1 13:1
-1 1:-0.666667 2:1 3:0.333333 4:-0.320755 5:-0.43379 6:-1 7:-1 8:0.770992 9:-1 10:0.129032 11:1 12:-1 13:-1
+1 1:0.25 2:1 3:-1 4:0.433962 5:-0.260274 6:-1 7:1 8:0.343511 9:-1 10:-0.935484 12:-1 13:1
-1 1:-0.0833333 2:1 3:0.333333 4:-0.415094 5:-0.456621 6:1 7:1 8:0.450382 9:-1 10:-0.225806 12:-1 13:-1
-1 1:-0.416667 2:-1 3:0.333333 4:-0.471698 5:-0.60274 6:-1 7:-1 8:0.435115 9:-1 10:-0.935484 12:-1 13:-1
+1 1:0.208333 2:1 3:1 4:-0.358491 5:-0.589041 6:-1 7:1 8:-0.0839695 9:1 10:-0.290323 12:1 13:1
-1 1:-1 2:1 3:-0.333333 4:-0.320755 5:-0.643836 6:-1 7:1 8:1 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.5 2:-1 3:-0.333333 4:-0.320755 5:-0.643836 6:-1 7:1 8:0.541985 9:-1 10:-0.548387 11:-1 12:-1 13:-1
-1 1:0.416667 2:-1 3:0.333333 4:-0.226415 5:-0.424658 6:-1 7:1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.0833333 2:1 3:0.333333 4:-1 5:-0.538813 6:-1 7:-1 8:0.267176 9:1 10:-1 11:-1 12:-0.333333 13:1
-1 1:0.0416667 2:1 3:0.333333 4:-0.509434 5:-0.39726 6:-1 7:1 8:0.160305 9:-1 10:-0.870968 12:-1 13:1
-1 1:-0.375 2:1 3:-0.333333 4:-0.509434 5:-0.570776 6:-1 7:-1 8:0.51145 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.0416667 2:1 3:1 4:-0.698113 5:-0.484018 6:-1 7:-1 8:-0.160305 9:1 10:-0.0967742 12:-0.333333 13:1
+1 1:0.5 2:1 3:1 4:-0.226415 5:-0.415525 6:-1 7:1 8:-0.145038 9:-1 10:-0.0967742 12:-0.333333 13:1
-1 1:0.166667 2:1 3:0.333333 4:0.0566038 5:-0.808219 6:-1 7:-1 8:0.572519 9:-1 10:-0.483871 11:-1 12:-1 13:-1
+1 1:0.416667 2:1 3:1 4:-0.320755 5:-0.0684932 6:1 7:1 8:-0.0687023 9:1 10:-0.419355 11:-1 12:1 13:1
-1 1:-0.75 2:-1 3:1 4:-0.169811 5:-0.739726 6:-1 7:-1 8:0.694656 9:-1 10:-0.548387 11:-1 12:-1 13:-1
-1 1:-0.5 2:1 3:-0.333333 4:-0.226415 5:-0.648402 6:-1 7:-1 8:-0.0687023 9:-1 10:-1 12:-1 13:0.5
+1 1:0.375 2:-1 3:0.333333 4:-0.320755 5:-0.374429 6:-1 7:-1 8:-0.603053 9:-1 10:-0.612903 12:-0.333333 13:1
+1 1:-0.416667 2:-1 3:1 4:-0.283019 5:-0.0182648 6:1 7:1 8:-0.00763359 9:1 10:-0.0322581 12:-1 13:1
-1 1:0.208333 2:-1 3:-1 4:0.0566038 5:-0.283105 6:1 7:1 8:0.389313 9:-1 10:-0.677419 11:-1 12:-1 13:-1
-1 1:-0.0416667 2:1 3:-1 4:-0.54717 5:-0.726027 6:-1 7:1 8:0.816794 9:-1 10:-1 12:-1 13:0.5
+1 1:0.333333 2:-1 3:1 4:-0.0377358 5:-0.173516 6:-1 7:1 8:0.145038 9:1 10:-0.677419 12:-1 13:1
+1 1:-0.583333 2:1 3:1 4:-0.54717 5:-0.575342 6:-1 7:-1 8:0.0534351 9:-1 10:-0.612903 12:-1 13:1
-1 1:-0.333333 2:1 3:1 4:-0.603774 5:-0.388128 6:-1 7:1 8:0.740458 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:-0.0416667 2:1 3:1 4:-0.358491 5:-0.410959 6:-1 7:-1 8:0.374046 9:1 10:-1 11:-1 12:-0.333333 13:1
-1 1:0.375 2:1 3:0.333333 4:-0.320755 5:-0.520548 6:-1 7:-1 8:0.145038 9:-1 10:-0.419355 12:1 13:1
+1 1:0.375 2:-1 3:1 4:0.245283 5:-0.826484 6:-1 7:1 8:0.129771 9:-1 10:1 11:1 12:1 13:1
-1 2:-1 3:1 4:-0.169811 5:-0.506849 6:-1 7:1 8:0.358779 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:-0.416667 2:1 3:1 4:-0.509434 5:-0.767123 6:-1 7:1 8:-0.251908 9:1 10:-0.193548 12:-1 13:1
-1 1:-0.25 2:1 3:0.333333 4:-0.169811 5:-0.401826 6:-1 7:1 8:0.29771 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.0416667 2:1 3:-0.333333 4:-0.509434 5:-0.0913242 6:-1 7:-1 8:0.541985 9:-1 10:-0.935484 11:-1 12:-1 13:-1
+1 1:0.625 2:1 3:0.333333 4:0.622642 5:-0.324201 6:1 7:1 8:0.206107 9:1 10:-0.483871 12:-1 13:1
-1 1:-0.583333 2:1 3:0.333333 4:-0.132075 5:-0.109589 6:-1 7:1 8:0.694656 9:-1 10:-1 11:-1 12:-1 13:-1
-1 2:-1 3:1 4:-0.320755 5:-0.369863 6:-1 7:1 8:0.0992366 9:-1 10:-0.870968 12:-1 13:-1
+1 1:0.375 2:-1 3:1 4:-0.132075 5:-0.351598 6:-1 7:1 8:0.358779 9:-1 10:0.16129 11:1 12:0.333333 13:-1
-1 1:-0.0833333 2:-1 3:0.333333 4:-0.132075 5:-0.16895 6:-1 7:1 8:0.0839695 9:-1 10:-0.516129 11:-1 12:-0.333333 13:-1
+1 1:0.291667 2:1 3:1 4:-0.320755 5:-0.420091 6:-1 7:-1 8:0.114504 9:1 10:-0.548387 11:-1 12:-0.333333 13:1
+1 1:0.5 2:1 3:1 4:-0.698113 5:-0.442922 6:-1 7:1 8:0.328244 9:-1 10:-0.806452 11:-1 12:0.333333 13:0.5
-1 1:0.5 2:-1 3:0.333333 4:0.150943 5:-0.347032 6:-1 7:-1 8:0.175573 9:-1 10:-0.741935 11:-1 12:-1 13:-1
+1 1:0.291667 2:1 3:0.333333 4:-0.132075 5:-0.730594 6:-1 7:1 8:0.282443 9:-1 10:-0.0322581 12:-1 13:-1
+1 1:0.291667 2:1 3:1 4:-0.0377358 5:-0.287671 6:-1 7:1 8:0.0839695 9:1 10:-0.0967742 12:0.333333 13:1
+1 1:0.0416667 2:1 3:1 4:-0.509434 5:-0.716895 6:-1 7:-1 8:-0.358779 9:-1 10:-0.548387 12:-0.333333 13:1
-1 1:-0.375 2:1 3:-0.333333 4:-0.320755 5:-0.575342 6:-1 7:1 8:0.78626 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:-0.375 2:1 3:1 4:-0.660377 5:-0.251142 6:-1 7:1 8:0.251908 9:-1 10:-1 11:-1 12:-0.333333 13:-1
-1 1:-0.0833333 2:1 3:0.333333 4:-0.698113 5:-0.776256 6:-1 7:-1 8:-0.206107 9:-1 10:-0.806452 11:-1 12:-1 13:-1
-1 1:0.25 2:1 3:0.333333 4:0.0566038 5:-0.607306 6:1 7:-1 8:0.312977 9:-1 10:-0.483871 11:-1 12:-1 13:-1
-1 1:0.75 2:-1 3:-0.333333 4:0.245283 5:-0.196347 6:-1 7:-1 8:0.389313 9:-1 10:-0.870968 11:-1 12:0.333333 13:-1
-1 1:0.333333 2:1 3:0.333333 4:0.0566038 5:-0.465753 6:1 7:-1 8:0.00763359 9:1 10:-0.677419 12:-1 13:-1
+1 1:0.0833333 2:1 3:1 4:-0.283019 5:0.0365297 6:-1 7:-1 8:-0.0687023 9:1 10:-0.612903 12:-0.333333 13:1
+1 1:0.458333 2:1 3:0.333333 4:-0.132075 5:-0.0456621 6:-1 7:-1 8:0.328244 9:-1 10:-1 11:-1 12:-1 13:-1
-1 1:-0.416667 2:1 3:1 4:0.0566038 5:-0.447489 6:-1 7:-1 8:0.526718 9:-1 10:-0.516129 11:-1 12:-1 13:-1
-1 1:0.208333 2:-1 3:0.333333 4:-0.509434 5:-0.0228311 6:-1 7:-1 8:0.541985 9:-1 10:-1 11:-1 12:-1 13:-1
+1 1:0.291667 2:1 3:1 4:-0.320755 5:-0.634703 6:-1 7:1 8:-0.0687023 9:1 10:-0.225806 12:0.333333 13:1
+1 1:0.208333 2:1 3:-0.333333 4:-0.509434 5:-0.278539 6:-1 7:1 8:0.358779 9:-1 10:-0.419355 12:-1 13:-1
-1 1:-0.166667 2:1 3:-0.333333 4:-0.320755 5:-0.360731 6:-1 7:-1 8:0.526718 9:-1 10:-0.806452 11:-1 12:-1 13:-1
+1 1:-0.208333 2:1 3:-0.333333 4:-0.698113 5:-0.52968 6:-1 7:-1 8:0.480916 9:-1 10:-0.677419 11:1 12:-1 13:1
-1 1:-0.0416667 2:1 3:0.333333 4:0.471698 5:-0.666667 6:1 7:-1 8:0.389313 9:-1 10:-0.83871 11:-1 12:-1 13:1
-1 1:-0.375 2:1 3:-0.333333 4:-0.509434 5:-0.374429 6:-1 7:-1 8:0.557252 9:-1 10:-1 11:-1 12:-1 13:1
-1 1:0.125 2:-1 3:-0.333333 4:-0.132075 5:-0.232877 6:-1 7:1 8:0.251908 9:-1 10:-0.580645 12:-1 13:-1
-1 1:0.166667 2:1 3:1 4:-0.132075 5:-0.69863 6:-1 7:-1 8:0.175573 9:-1 10:-0.870968 12:-1 13:0.5
+1 1:0.583333 2:1 3:1 4:0.245283 5:-0.269406 6:-1 7:1 8:-0.435115 9:1 10:-0.516129 12:1 13:-1

+ 26
- 0
gklearn/gedlib/lib/libsvm.3.22/java/Makefile View File

@@ -0,0 +1,26 @@
.SUFFIXES: .class .java
FILES = libsvm/svm.class libsvm/svm_model.class libsvm/svm_node.class \
libsvm/svm_parameter.class libsvm/svm_problem.class \
libsvm/svm_print_interface.class \
svm_train.class svm_predict.class svm_toy.class svm_scale.class

#JAVAC = jikes
JAVAC_FLAGS = -target 1.7 -source 1.7
JAVAC = javac
# JAVAC_FLAGS =
export CLASSPATH := .:$(CLASSPATH)

all: $(FILES)
jar cvf libsvm.jar *.class libsvm/*.class

.java.class:
$(JAVAC) $(JAVAC_FLAGS) $<

libsvm/svm.java: libsvm/svm.m4
m4 libsvm/svm.m4 > libsvm/svm.java

clean:
rm -f libsvm/*.class *.class *.jar libsvm/*~ *~ libsvm/svm.java

dist: clean all
rm *.class libsvm/*.class

BIN
gklearn/gedlib/lib/libsvm.3.22/java/libsvm.jar View File


+ 2860
- 0
gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm.java
File diff suppressed because it is too large
View File


+ 2860
- 0
gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm.m4
File diff suppressed because it is too large
View File


+ 22
- 0
gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_model.java View File

@@ -0,0 +1,22 @@
//
// svm_model
//
package libsvm;
public class svm_model implements java.io.Serializable
{
public svm_parameter param; // parameter
public int nr_class; // number of classes, = 2 in regression/one class svm
public int l; // total #SV
public svm_node[][] SV; // SVs (SV[l])
public double[][] sv_coef; // coefficients for SVs in decision functions (sv_coef[k-1][l])
public double[] rho; // constants in decision functions (rho[k*(k-1)/2])
public double[] probA; // pariwise probability information
public double[] probB;
public int[] sv_indices; // sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set

// for classification only

public int[] label; // label of each class (label[k])
public int[] nSV; // number of SVs for each class (nSV[k])
// nSV[0] + nSV[1] + ... + nSV[k-1] = l
};

+ 6
- 0
gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_node.java View File

@@ -0,0 +1,6 @@
package libsvm;
public class svm_node implements java.io.Serializable
{
public int index;
public double value;
}

+ 47
- 0
gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_parameter.java View File

@@ -0,0 +1,47 @@
package libsvm;
public class svm_parameter implements Cloneable,java.io.Serializable
{
/* svm_type */
public static final int C_SVC = 0;
public static final int NU_SVC = 1;
public static final int ONE_CLASS = 2;
public static final int EPSILON_SVR = 3;
public static final int NU_SVR = 4;

/* kernel_type */
public static final int LINEAR = 0;
public static final int POLY = 1;
public static final int RBF = 2;
public static final int SIGMOID = 3;
public static final int PRECOMPUTED = 4;

public int svm_type;
public int kernel_type;
public int degree; // for poly
public double gamma; // for poly/rbf/sigmoid
public double coef0; // for poly/sigmoid

// these are for training only
public double cache_size; // in MB
public double eps; // stopping criteria
public double C; // for C_SVC, EPSILON_SVR and NU_SVR
public int nr_weight; // for C_SVC
public int[] weight_label; // for C_SVC
public double[] weight; // for C_SVC
public double nu; // for NU_SVC, ONE_CLASS, and NU_SVR
public double p; // for EPSILON_SVR
public int shrinking; // use the shrinking heuristics
public int probability; // do probability estimates

public Object clone()
{
try
{
return super.clone();
} catch (CloneNotSupportedException e)
{
return null;
}
}

}

+ 5
- 0
gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_print_interface.java View File

@@ -0,0 +1,5 @@
package libsvm;
public interface svm_print_interface
{
public void print(String s);
}

+ 7
- 0
gklearn/gedlib/lib/libsvm.3.22/java/libsvm/svm_problem.java View File

@@ -0,0 +1,7 @@
package libsvm;
public class svm_problem implements java.io.Serializable
{
public int l;
public double[] y;
public svm_node[][] x;
}

+ 194
- 0
gklearn/gedlib/lib/libsvm.3.22/java/svm_predict.java View File

@@ -0,0 +1,194 @@
import libsvm.*;
import java.io.*;
import java.util.*;

class svm_predict {
private static svm_print_interface svm_print_null = new svm_print_interface()
{
public void print(String s) {}
};

private static svm_print_interface svm_print_stdout = new svm_print_interface()
{
public void print(String s)
{
System.out.print(s);
}
};

private static svm_print_interface svm_print_string = svm_print_stdout;

static void info(String s)
{
svm_print_string.print(s);
}

private static double atof(String s)
{
return Double.valueOf(s).doubleValue();
}

private static int atoi(String s)
{
return Integer.parseInt(s);
}

private static void predict(BufferedReader input, DataOutputStream output, svm_model model, int predict_probability) throws IOException
{
int correct = 0;
int total = 0;
double error = 0;
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;

int svm_type=svm.svm_get_svm_type(model);
int nr_class=svm.svm_get_nr_class(model);
double[] prob_estimates=null;

if(predict_probability == 1)
{
if(svm_type == svm_parameter.EPSILON_SVR ||
svm_type == svm_parameter.NU_SVR)
{
svm_predict.info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma="+svm.svm_get_svr_probability(model)+"\n");
}
else
{
int[] labels=new int[nr_class];
svm.svm_get_labels(model,labels);
prob_estimates = new double[nr_class];
output.writeBytes("labels");
for(int j=0;j<nr_class;j++)
output.writeBytes(" "+labels[j]);
output.writeBytes("\n");
}
}
while(true)
{
String line = input.readLine();
if(line == null) break;

StringTokenizer st = new StringTokenizer(line," \t\n\r\f:");

double target = atof(st.nextToken());
int m = st.countTokens()/2;
svm_node[] x = new svm_node[m];
for(int j=0;j<m;j++)
{
x[j] = new svm_node();
x[j].index = atoi(st.nextToken());
x[j].value = atof(st.nextToken());
}

double v;
if (predict_probability==1 && (svm_type==svm_parameter.C_SVC || svm_type==svm_parameter.NU_SVC))
{
v = svm.svm_predict_probability(model,x,prob_estimates);
output.writeBytes(v+" ");
for(int j=0;j<nr_class;j++)
output.writeBytes(prob_estimates[j]+" ");
output.writeBytes("\n");
}
else
{
v = svm.svm_predict(model,x);
output.writeBytes(v+"\n");
}

if(v == target)
++correct;
error += (v-target)*(v-target);
sumv += v;
sumy += target;
sumvv += v*v;
sumyy += target*target;
sumvy += v*target;
++total;
}
if(svm_type == svm_parameter.EPSILON_SVR ||
svm_type == svm_parameter.NU_SVR)
{
svm_predict.info("Mean squared error = "+error/total+" (regression)\n");
svm_predict.info("Squared correlation coefficient = "+
((total*sumvy-sumv*sumy)*(total*sumvy-sumv*sumy))/
((total*sumvv-sumv*sumv)*(total*sumyy-sumy*sumy))+
" (regression)\n");
}
else
svm_predict.info("Accuracy = "+(double)correct/total*100+
"% ("+correct+"/"+total+") (classification)\n");
}

private static void exit_with_help()
{
System.err.print("usage: svm_predict [options] test_file model_file output_file\n"
+"options:\n"
+"-b probability_estimates: whether to predict probability estimates, 0 or 1 (default 0); one-class SVM not supported yet\n"
+"-q : quiet mode (no outputs)\n");
System.exit(1);
}

public static void main(String argv[]) throws IOException
{
int i, predict_probability=0;
svm_print_string = svm_print_stdout;

// parse options
for(i=0;i<argv.length;i++)
{
if(argv[i].charAt(0) != '-') break;
++i;
switch(argv[i-1].charAt(1))
{
case 'b':
predict_probability = atoi(argv[i]);
break;
case 'q':
svm_print_string = svm_print_null;
i--;
break;
default:
System.err.print("Unknown option: " + argv[i-1] + "\n");
exit_with_help();
}
}
if(i>=argv.length-2)
exit_with_help();
try
{
BufferedReader input = new BufferedReader(new FileReader(argv[i]));
DataOutputStream output = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(argv[i+2])));
svm_model model = svm.svm_load_model(argv[i+1]);
if (model == null)
{
System.err.print("can't open model file "+argv[i+1]+"\n");
System.exit(1);
}
if(predict_probability == 1)
{
if(svm.svm_check_probability_model(model)==0)
{
System.err.print("Model does not support probabiliy estimates\n");
System.exit(1);
}
}
else
{
if(svm.svm_check_probability_model(model)!=0)
{
svm_predict.info("Model supports probability estimates, but disabled in prediction.\n");
}
}
predict(input,output,model,predict_probability);
input.close();
output.close();
}
catch(FileNotFoundException e)
{
exit_with_help();
}
catch(ArrayIndexOutOfBoundsException e)
{
exit_with_help();
}
}
}

+ 350
- 0
gklearn/gedlib/lib/libsvm.3.22/java/svm_scale.java View File

@@ -0,0 +1,350 @@
import libsvm.*;
import java.io.*;
import java.util.*;
import java.text.DecimalFormat;

class svm_scale
{
private String line = null;
private double lower = -1.0;
private double upper = 1.0;
private double y_lower;
private double y_upper;
private boolean y_scaling = false;
private double[] feature_max;
private double[] feature_min;
private double y_max = -Double.MAX_VALUE;
private double y_min = Double.MAX_VALUE;
private int max_index;
private long num_nonzeros = 0;
private long new_num_nonzeros = 0;

private static void exit_with_help()
{
System.out.print(
"Usage: svm-scale [options] data_filename\n"
+"options:\n"
+"-l lower : x scaling lower limit (default -1)\n"
+"-u upper : x scaling upper limit (default +1)\n"
+"-y y_lower y_upper : y scaling limits (default: no y scaling)\n"
+"-s save_filename : save scaling parameters to save_filename\n"
+"-r restore_filename : restore scaling parameters from restore_filename\n"
);
System.exit(1);
}

private BufferedReader rewind(BufferedReader fp, String filename) throws IOException
{
fp.close();
return new BufferedReader(new FileReader(filename));
}

private void output_target(double value)
{
if(y_scaling)
{
if(value == y_min)
value = y_lower;
else if(value == y_max)
value = y_upper;
else
value = y_lower + (y_upper-y_lower) *
(value-y_min) / (y_max-y_min);
}

System.out.print(value + " ");
}

private void output(int index, double value)
{
/* skip single-valued attribute */
if(feature_max[index] == feature_min[index])
return;

if(value == feature_min[index])
value = lower;
else if(value == feature_max[index])
value = upper;
else
value = lower + (upper-lower) *
(value-feature_min[index])/
(feature_max[index]-feature_min[index]);

if(value != 0)
{
System.out.print(index + ":" + value + " ");
new_num_nonzeros++;
}
}

private String readline(BufferedReader fp) throws IOException
{
line = fp.readLine();
return line;
}

private void run(String []argv) throws IOException
{
int i,index;
BufferedReader fp = null, fp_restore = null;
String save_filename = null;
String restore_filename = null;
String data_filename = null;


for(i=0;i<argv.length;i++)
{
if (argv[i].charAt(0) != '-') break;
++i;
switch(argv[i-1].charAt(1))
{
case 'l': lower = Double.parseDouble(argv[i]); break;
case 'u': upper = Double.parseDouble(argv[i]); break;
case 'y':
y_lower = Double.parseDouble(argv[i]);
++i;
y_upper = Double.parseDouble(argv[i]);
y_scaling = true;
break;
case 's': save_filename = argv[i]; break;
case 'r': restore_filename = argv[i]; break;
default:
System.err.println("unknown option");
exit_with_help();
}
}

if(!(upper > lower) || (y_scaling && !(y_upper > y_lower)))
{
System.err.println("inconsistent lower/upper specification");
System.exit(1);
}
if(restore_filename != null && save_filename != null)
{
System.err.println("cannot use -r and -s simultaneously");
System.exit(1);
}

if(argv.length != i+1)
exit_with_help();

data_filename = argv[i];
try {
fp = new BufferedReader(new FileReader(data_filename));
} catch (Exception e) {
System.err.println("can't open file " + data_filename);
System.exit(1);
}

/* assumption: min index of attributes is 1 */
/* pass 1: find out max index of attributes */
max_index = 0;

if(restore_filename != null)
{
int idx, c;

try {
fp_restore = new BufferedReader(new FileReader(restore_filename));
}
catch (Exception e) {
System.err.println("can't open file " + restore_filename);
System.exit(1);
}
if((c = fp_restore.read()) == 'y')
{
fp_restore.readLine();
fp_restore.readLine();
fp_restore.readLine();
}
fp_restore.readLine();
fp_restore.readLine();

String restore_line = null;
while((restore_line = fp_restore.readLine())!=null)
{
StringTokenizer st2 = new StringTokenizer(restore_line);
idx = Integer.parseInt(st2.nextToken());
max_index = Math.max(max_index, idx);
}
fp_restore = rewind(fp_restore, restore_filename);
}

while (readline(fp) != null)
{
StringTokenizer st = new StringTokenizer(line," \t\n\r\f:");
st.nextToken();
while(st.hasMoreTokens())
{
index = Integer.parseInt(st.nextToken());
max_index = Math.max(max_index, index);
st.nextToken();
num_nonzeros++;
}
}

try {
feature_max = new double[(max_index+1)];
feature_min = new double[(max_index+1)];
} catch(OutOfMemoryError e) {
System.err.println("can't allocate enough memory");
System.exit(1);
}

for(i=0;i<=max_index;i++)
{
feature_max[i] = -Double.MAX_VALUE;
feature_min[i] = Double.MAX_VALUE;
}

fp = rewind(fp, data_filename);

/* pass 2: find out min/max value */
while(readline(fp) != null)
{
int next_index = 1;
double target;
double value;

StringTokenizer st = new StringTokenizer(line," \t\n\r\f:");
target = Double.parseDouble(st.nextToken());
y_max = Math.max(y_max, target);
y_min = Math.min(y_min, target);

while (st.hasMoreTokens())
{
index = Integer.parseInt(st.nextToken());
value = Double.parseDouble(st.nextToken());

for (i = next_index; i<index; i++)
{
feature_max[i] = Math.max(feature_max[i], 0);
feature_min[i] = Math.min(feature_min[i], 0);
}

feature_max[index] = Math.max(feature_max[index], value);
feature_min[index] = Math.min(feature_min[index], value);
next_index = index + 1;
}

for(i=next_index;i<=max_index;i++)
{
feature_max[i] = Math.max(feature_max[i], 0);
feature_min[i] = Math.min(feature_min[i], 0);
}
}

fp = rewind(fp, data_filename);

/* pass 2.5: save/restore feature_min/feature_max */
if(restore_filename != null)
{
// fp_restore rewinded in finding max_index
int idx, c;
double fmin, fmax;

fp_restore.mark(2); // for reset
if((c = fp_restore.read()) == 'y')
{
fp_restore.readLine(); // pass the '\n' after 'y'
StringTokenizer st = new StringTokenizer(fp_restore.readLine());
y_lower = Double.parseDouble(st.nextToken());
y_upper = Double.parseDouble(st.nextToken());
st = new StringTokenizer(fp_restore.readLine());
y_min = Double.parseDouble(st.nextToken());
y_max = Double.parseDouble(st.nextToken());
y_scaling = true;
}
else
fp_restore.reset();

if(fp_restore.read() == 'x') {
fp_restore.readLine(); // pass the '\n' after 'x'
StringTokenizer st = new StringTokenizer(fp_restore.readLine());
lower = Double.parseDouble(st.nextToken());
upper = Double.parseDouble(st.nextToken());
String restore_line = null;
while((restore_line = fp_restore.readLine())!=null)
{
StringTokenizer st2 = new StringTokenizer(restore_line);
idx = Integer.parseInt(st2.nextToken());
fmin = Double.parseDouble(st2.nextToken());
fmax = Double.parseDouble(st2.nextToken());
if (idx <= max_index)
{
feature_min[idx] = fmin;
feature_max[idx] = fmax;
}
}
}
fp_restore.close();
}

if(save_filename != null)
{
Formatter formatter = new Formatter(new StringBuilder());
BufferedWriter fp_save = null;

try {
fp_save = new BufferedWriter(new FileWriter(save_filename));
} catch(IOException e) {
System.err.println("can't open file " + save_filename);
System.exit(1);
}

if(y_scaling)
{
formatter.format("y\n");
formatter.format("%.16g %.16g\n", y_lower, y_upper);
formatter.format("%.16g %.16g\n", y_min, y_max);
}
formatter.format("x\n");
formatter.format("%.16g %.16g\n", lower, upper);
for(i=1;i<=max_index;i++)
{
if(feature_min[i] != feature_max[i])
formatter.format("%d %.16g %.16g\n", i, feature_min[i], feature_max[i]);
}
fp_save.write(formatter.toString());
fp_save.close();
}

/* pass 3: scale */
while(readline(fp) != null)
{
int next_index = 1;
double target;
double value;

StringTokenizer st = new StringTokenizer(line," \t\n\r\f:");
target = Double.parseDouble(st.nextToken());
output_target(target);
while(st.hasMoreElements())
{
index = Integer.parseInt(st.nextToken());
value = Double.parseDouble(st.nextToken());
for (i = next_index; i<index; i++)
output(i, 0);
output(index, value);
next_index = index + 1;
}

for(i=next_index;i<= max_index;i++)
output(i, 0);
System.out.print("\n");
}
if (new_num_nonzeros > num_nonzeros)
System.err.print(
"WARNING: original #nonzeros " + num_nonzeros+"\n"
+" new #nonzeros " + new_num_nonzeros+"\n"
+"Use -l 0 if many original feature values are zeros\n");

fp.close();
}

public static void main(String argv[]) throws IOException
{
svm_scale s = new svm_scale();
s.run(argv);
}
}

+ 502
- 0
gklearn/gedlib/lib/libsvm.3.22/java/svm_toy.java View File

@@ -0,0 +1,502 @@
import libsvm.*;
import java.applet.*;
import java.awt.*;
import java.util.*;
import java.awt.event.*;
import java.io.*;

public class svm_toy extends Applet {

static final String DEFAULT_PARAM="-t 2 -c 100";
int XLEN;
int YLEN;

// off-screen buffer

Image buffer;
Graphics buffer_gc;

// pre-allocated colors

final static Color colors[] =
{
new Color(0,0,0),
new Color(0,120,120),
new Color(120,120,0),
new Color(120,0,120),
new Color(0,200,200),
new Color(200,200,0),
new Color(200,0,200)
};

class point {
point(double x, double y, byte value)
{
this.x = x;
this.y = y;
this.value = value;
}
double x, y;
byte value;
}

Vector<point> point_list = new Vector<point>();
byte current_value = 1;

public void init()
{
setSize(getSize());

final Button button_change = new Button("Change");
Button button_run = new Button("Run");
Button button_clear = new Button("Clear");
Button button_save = new Button("Save");
Button button_load = new Button("Load");
final TextField input_line = new TextField(DEFAULT_PARAM);

BorderLayout layout = new BorderLayout();
this.setLayout(layout);

Panel p = new Panel();
GridBagLayout gridbag = new GridBagLayout();
p.setLayout(gridbag);

GridBagConstraints c = new GridBagConstraints();
c.fill = GridBagConstraints.HORIZONTAL;
c.weightx = 1;
c.gridwidth = 1;
gridbag.setConstraints(button_change,c);
gridbag.setConstraints(button_run,c);
gridbag.setConstraints(button_clear,c);
gridbag.setConstraints(button_save,c);
gridbag.setConstraints(button_load,c);
c.weightx = 5;
c.gridwidth = 5;
gridbag.setConstraints(input_line,c);

button_change.setBackground(colors[current_value]);

p.add(button_change);
p.add(button_run);
p.add(button_clear);
p.add(button_save);
p.add(button_load);
p.add(input_line);
this.add(p,BorderLayout.SOUTH);

button_change.addActionListener(new ActionListener()
{ public void actionPerformed (ActionEvent e)
{ button_change_clicked(); button_change.setBackground(colors[current_value]); }});

button_run.addActionListener(new ActionListener()
{ public void actionPerformed (ActionEvent e)
{ button_run_clicked(input_line.getText()); }});

button_clear.addActionListener(new ActionListener()
{ public void actionPerformed (ActionEvent e)
{ button_clear_clicked(); }});

button_save.addActionListener(new ActionListener()
{ public void actionPerformed (ActionEvent e)
{ button_save_clicked(input_line.getText()); }});

button_load.addActionListener(new ActionListener()
{ public void actionPerformed (ActionEvent e)
{ button_load_clicked(); }});

input_line.addActionListener(new ActionListener()
{ public void actionPerformed (ActionEvent e)
{ button_run_clicked(input_line.getText()); }});

this.enableEvents(AWTEvent.MOUSE_EVENT_MASK);
}

void draw_point(point p)
{
Color c = colors[p.value+3];

Graphics window_gc = getGraphics();
buffer_gc.setColor(c);
buffer_gc.fillRect((int)(p.x*XLEN),(int)(p.y*YLEN),4,4);
window_gc.setColor(c);
window_gc.fillRect((int)(p.x*XLEN),(int)(p.y*YLEN),4,4);
}

void clear_all()
{
point_list.removeAllElements();
if(buffer != null)
{
buffer_gc.setColor(colors[0]);
buffer_gc.fillRect(0,0,XLEN,YLEN);
}
repaint();
}

void draw_all_points()
{
int n = point_list.size();
for(int i=0;i<n;i++)
draw_point(point_list.elementAt(i));
}

void button_change_clicked()
{
++current_value;
if(current_value > 3) current_value = 1;
}

private static double atof(String s)
{
return Double.valueOf(s).doubleValue();
}

private static int atoi(String s)
{
return Integer.parseInt(s);
}

void button_run_clicked(String args)
{
// guard
if(point_list.isEmpty()) return;

svm_parameter param = new svm_parameter();

// default values
param.svm_type = svm_parameter.C_SVC;
param.kernel_type = svm_parameter.RBF;
param.degree = 3;
param.gamma = 0;
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 40;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = new int[0];
param.weight = new double[0];

// parse options
StringTokenizer st = new StringTokenizer(args);
String[] argv = new String[st.countTokens()];
for(int i=0;i<argv.length;i++)
argv[i] = st.nextToken();

for(int i=0;i<argv.length;i++)
{
if(argv[i].charAt(0) != '-') break;
if(++i>=argv.length)
{
System.err.print("unknown option\n");
break;
}
switch(argv[i-1].charAt(1))
{
case 's':
param.svm_type = atoi(argv[i]);
break;
case 't':
param.kernel_type = atoi(argv[i]);
break;
case 'd':
param.degree = atoi(argv[i]);
break;
case 'g':
param.gamma = atof(argv[i]);
break;
case 'r':
param.coef0 = atof(argv[i]);
break;
case 'n':
param.nu = atof(argv[i]);
break;
case 'm':
param.cache_size = atof(argv[i]);
break;
case 'c':
param.C = atof(argv[i]);
break;
case 'e':
param.eps = atof(argv[i]);
break;
case 'p':
param.p = atof(argv[i]);
break;
case 'h':
param.shrinking = atoi(argv[i]);
break;
case 'b':
param.probability = atoi(argv[i]);
break;
case 'w':
++param.nr_weight;
{
int[] old = param.weight_label;
param.weight_label = new int[param.nr_weight];
System.arraycopy(old,0,param.weight_label,0,param.nr_weight-1);
}

{
double[] old = param.weight;
param.weight = new double[param.nr_weight];
System.arraycopy(old,0,param.weight,0,param.nr_weight-1);
}

param.weight_label[param.nr_weight-1] = atoi(argv[i-1].substring(2));
param.weight[param.nr_weight-1] = atof(argv[i]);
break;
default:
System.err.print("unknown option\n");
}
}

// build problem
svm_problem prob = new svm_problem();
prob.l = point_list.size();
prob.y = new double[prob.l];

if(param.kernel_type == svm_parameter.PRECOMPUTED)
{
}
else if(param.svm_type == svm_parameter.EPSILON_SVR ||
param.svm_type == svm_parameter.NU_SVR)
{
if(param.gamma == 0) param.gamma = 1;
prob.x = new svm_node[prob.l][1];
for(int i=0;i<prob.l;i++)
{
point p = point_list.elementAt(i);
prob.x[i][0] = new svm_node();
prob.x[i][0].index = 1;
prob.x[i][0].value = p.x;
prob.y[i] = p.y;
}

// build model & classify
svm_model model = svm.svm_train(prob, param);
svm_node[] x = new svm_node[1];
x[0] = new svm_node();
x[0].index = 1;
int[] j = new int[XLEN];

Graphics window_gc = getGraphics();
for (int i = 0; i < XLEN; i++)
{
x[0].value = (double) i / XLEN;
j[i] = (int)(YLEN*svm.svm_predict(model, x));
}
buffer_gc.setColor(colors[0]);
buffer_gc.drawLine(0,0,0,YLEN-1);
window_gc.setColor(colors[0]);
window_gc.drawLine(0,0,0,YLEN-1);
int p = (int)(param.p * YLEN);
for(int i=1;i<XLEN;i++)
{
buffer_gc.setColor(colors[0]);
buffer_gc.drawLine(i,0,i,YLEN-1);
window_gc.setColor(colors[0]);
window_gc.drawLine(i,0,i,YLEN-1);

buffer_gc.setColor(colors[5]);
window_gc.setColor(colors[5]);
buffer_gc.drawLine(i-1,j[i-1],i,j[i]);
window_gc.drawLine(i-1,j[i-1],i,j[i]);

if(param.svm_type == svm_parameter.EPSILON_SVR)
{
buffer_gc.setColor(colors[2]);
window_gc.setColor(colors[2]);
buffer_gc.drawLine(i-1,j[i-1]+p,i,j[i]+p);
window_gc.drawLine(i-1,j[i-1]+p,i,j[i]+p);

buffer_gc.setColor(colors[2]);
window_gc.setColor(colors[2]);
buffer_gc.drawLine(i-1,j[i-1]-p,i,j[i]-p);
window_gc.drawLine(i-1,j[i-1]-p,i,j[i]-p);
}
}
}
else
{
if(param.gamma == 0) param.gamma = 0.5;
prob.x = new svm_node [prob.l][2];
for(int i=0;i<prob.l;i++)
{
point p = point_list.elementAt(i);
prob.x[i][0] = new svm_node();
prob.x[i][0].index = 1;
prob.x[i][0].value = p.x;
prob.x[i][1] = new svm_node();
prob.x[i][1].index = 2;
prob.x[i][1].value = p.y;
prob.y[i] = p.value;
}

// build model & classify
svm_model model = svm.svm_train(prob, param);
svm_node[] x = new svm_node[2];
x[0] = new svm_node();
x[1] = new svm_node();
x[0].index = 1;
x[1].index = 2;

Graphics window_gc = getGraphics();
for (int i = 0; i < XLEN; i++)
for (int j = 0; j < YLEN ; j++) {
x[0].value = (double) i / XLEN;
x[1].value = (double) j / YLEN;
double d = svm.svm_predict(model, x);
if (param.svm_type == svm_parameter.ONE_CLASS && d<0) d=2;
buffer_gc.setColor(colors[(int)d]);
window_gc.setColor(colors[(int)d]);
buffer_gc.drawLine(i,j,i,j);
window_gc.drawLine(i,j,i,j);
}
}

draw_all_points();
}

void button_clear_clicked()
{
clear_all();
}

void button_save_clicked(String args)
{
FileDialog dialog = new FileDialog(new Frame(),"Save",FileDialog.SAVE);
dialog.setVisible(true);
String filename = dialog.getDirectory() + dialog.getFile();
if (filename == null) return;
try {
DataOutputStream fp = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(filename)));

int svm_type = svm_parameter.C_SVC;
int svm_type_idx = args.indexOf("-s ");
if(svm_type_idx != -1)
{
StringTokenizer svm_str_st = new StringTokenizer(args.substring(svm_type_idx+2).trim());
svm_type = atoi(svm_str_st.nextToken());
}

int n = point_list.size();
if(svm_type == svm_parameter.EPSILON_SVR || svm_type == svm_parameter.NU_SVR)
{
for(int i=0;i<n;i++)
{
point p = point_list.elementAt(i);
fp.writeBytes(p.y+" 1:"+p.x+"\n");
}
}
else
{
for(int i=0;i<n;i++)
{
point p = point_list.elementAt(i);
fp.writeBytes(p.value+" 1:"+p.x+" 2:"+p.y+"\n");
}
}
fp.close();
} catch (IOException e) { System.err.print(e); }
}

void button_load_clicked()
{
FileDialog dialog = new FileDialog(new Frame(),"Load",FileDialog.LOAD);
dialog.setVisible(true);
String filename = dialog.getDirectory() + dialog.getFile();
if (filename == null) return;
clear_all();
try {
BufferedReader fp = new BufferedReader(new FileReader(filename));
String line;
while((line = fp.readLine()) != null)
{
StringTokenizer st = new StringTokenizer(line," \t\n\r\f:");
if(st.countTokens() == 5)
{
byte value = (byte)atoi(st.nextToken());
st.nextToken();
double x = atof(st.nextToken());
st.nextToken();
double y = atof(st.nextToken());
point_list.addElement(new point(x,y,value));
}
else if(st.countTokens() == 3)
{
double y = atof(st.nextToken());
st.nextToken();
double x = atof(st.nextToken());
point_list.addElement(new point(x,y,current_value));
}else
break;
}
fp.close();
} catch (IOException e) { System.err.print(e); }
draw_all_points();
}
protected void processMouseEvent(MouseEvent e)
{
if(e.getID() == MouseEvent.MOUSE_PRESSED)
{
if(e.getX() >= XLEN || e.getY() >= YLEN) return;
point p = new point((double)e.getX()/XLEN,
(double)e.getY()/YLEN,
current_value);
point_list.addElement(p);
draw_point(p);
}
}

public void paint(Graphics g)
{
// create buffer first time
if(buffer == null) {
buffer = this.createImage(XLEN,YLEN);
buffer_gc = buffer.getGraphics();
buffer_gc.setColor(colors[0]);
buffer_gc.fillRect(0,0,XLEN,YLEN);
}
g.drawImage(buffer,0,0,this);
}

public Dimension getPreferredSize() { return new Dimension(XLEN,YLEN+50); }

public void setSize(Dimension d) { setSize(d.width,d.height); }
public void setSize(int w,int h) {
super.setSize(w,h);
XLEN = w;
YLEN = h-50;
clear_all();
}

public static void main(String[] argv)
{
new AppletFrame("svm_toy",new svm_toy(),500,500+50);
}
}

class AppletFrame extends Frame {
AppletFrame(String title, Applet applet, int width, int height)
{
super(title);
this.addWindowListener(new WindowAdapter() {
public void windowClosing(WindowEvent e) {
System.exit(0);
}
});
applet.init();
applet.setSize(width,height);
applet.start();
this.add(applet);
this.pack();
this.setVisible(true);
}
}

+ 318
- 0
gklearn/gedlib/lib/libsvm.3.22/java/svm_train.java View File

@@ -0,0 +1,318 @@
import libsvm.*;
import java.io.*;
import java.util.*;

class svm_train {
private svm_parameter param; // set by parse_command_line
private svm_problem prob; // set by read_problem
private svm_model model;
private String input_file_name; // set by parse_command_line
private String model_file_name; // set by parse_command_line
private String error_msg;
private int cross_validation;
private int nr_fold;

private static svm_print_interface svm_print_null = new svm_print_interface()
{
public void print(String s) {}
};

private static void exit_with_help()
{
System.out.print(
"Usage: svm_train [options] training_set_file [model_file]\n"
+"options:\n"
+"-s svm_type : set type of SVM (default 0)\n"
+" 0 -- C-SVC (multi-class classification)\n"
+" 1 -- nu-SVC (multi-class classification)\n"
+" 2 -- one-class SVM\n"
+" 3 -- epsilon-SVR (regression)\n"
+" 4 -- nu-SVR (regression)\n"
+"-t kernel_type : set type of kernel function (default 2)\n"
+" 0 -- linear: u'*v\n"
+" 1 -- polynomial: (gamma*u'*v + coef0)^degree\n"
+" 2 -- radial basis function: exp(-gamma*|u-v|^2)\n"
+" 3 -- sigmoid: tanh(gamma*u'*v + coef0)\n"
+" 4 -- precomputed kernel (kernel values in training_set_file)\n"
+"-d degree : set degree in kernel function (default 3)\n"
+"-g gamma : set gamma in kernel function (default 1/num_features)\n"
+"-r coef0 : set coef0 in kernel function (default 0)\n"
+"-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)\n"
+"-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)\n"
+"-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)\n"
+"-m cachesize : set cache memory size in MB (default 100)\n"
+"-e epsilon : set tolerance of termination criterion (default 0.001)\n"
+"-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)\n"
+"-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)\n"
+"-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)\n"
+"-v n : n-fold cross validation mode\n"
+"-q : quiet mode (no outputs)\n"
);
System.exit(1);
}

private void do_cross_validation()
{
int i;
int total_correct = 0;
double total_error = 0;
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
double[] target = new double[prob.l];

svm.svm_cross_validation(prob,param,nr_fold,target);
if(param.svm_type == svm_parameter.EPSILON_SVR ||
param.svm_type == svm_parameter.NU_SVR)
{
for(i=0;i<prob.l;i++)
{
double y = prob.y[i];
double v = target[i];
total_error += (v-y)*(v-y);
sumv += v;
sumy += y;
sumvv += v*v;
sumyy += y*y;
sumvy += v*y;
}
System.out.print("Cross Validation Mean squared error = "+total_error/prob.l+"\n");
System.out.print("Cross Validation Squared correlation coefficient = "+
((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))+"\n"
);
}
else
{
for(i=0;i<prob.l;i++)
if(target[i] == prob.y[i])
++total_correct;
System.out.print("Cross Validation Accuracy = "+100.0*total_correct/prob.l+"%\n");
}
}
private void run(String argv[]) throws IOException
{
parse_command_line(argv);
read_problem();
error_msg = svm.svm_check_parameter(prob,param);

if(error_msg != null)
{
System.err.print("ERROR: "+error_msg+"\n");
System.exit(1);
}

if(cross_validation != 0)
{
do_cross_validation();
}
else
{
model = svm.svm_train(prob,param);
svm.svm_save_model(model_file_name,model);
}
}

public static void main(String argv[]) throws IOException
{
svm_train t = new svm_train();
t.run(argv);
}

private static double atof(String s)
{
double d = Double.valueOf(s).doubleValue();
if (Double.isNaN(d) || Double.isInfinite(d))
{
System.err.print("NaN or Infinity in input\n");
System.exit(1);
}
return(d);
}

private static int atoi(String s)
{
return Integer.parseInt(s);
}

private void parse_command_line(String argv[])
{
int i;
svm_print_interface print_func = null; // default printing to stdout

param = new svm_parameter();
// default values
param.svm_type = svm_parameter.C_SVC;
param.kernel_type = svm_parameter.RBF;
param.degree = 3;
param.gamma = 0; // 1/num_features
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = new int[0];
param.weight = new double[0];
cross_validation = 0;

// parse options
for(i=0;i<argv.length;i++)
{
if(argv[i].charAt(0) != '-') break;
if(++i>=argv.length)
exit_with_help();
switch(argv[i-1].charAt(1))
{
case 's':
param.svm_type = atoi(argv[i]);
break;
case 't':
param.kernel_type = atoi(argv[i]);
break;
case 'd':
param.degree = atoi(argv[i]);
break;
case 'g':
param.gamma = atof(argv[i]);
break;
case 'r':
param.coef0 = atof(argv[i]);
break;
case 'n':
param.nu = atof(argv[i]);
break;
case 'm':
param.cache_size = atof(argv[i]);
break;
case 'c':
param.C = atof(argv[i]);
break;
case 'e':
param.eps = atof(argv[i]);
break;
case 'p':
param.p = atof(argv[i]);
break;
case 'h':
param.shrinking = atoi(argv[i]);
break;
case 'b':
param.probability = atoi(argv[i]);
break;
case 'q':
print_func = svm_print_null;
i--;
break;
case 'v':
cross_validation = 1;
nr_fold = atoi(argv[i]);
if(nr_fold < 2)
{
System.err.print("n-fold cross validation: n must >= 2\n");
exit_with_help();
}
break;
case 'w':
++param.nr_weight;
{
int[] old = param.weight_label;
param.weight_label = new int[param.nr_weight];
System.arraycopy(old,0,param.weight_label,0,param.nr_weight-1);
}

{
double[] old = param.weight;
param.weight = new double[param.nr_weight];
System.arraycopy(old,0,param.weight,0,param.nr_weight-1);
}

param.weight_label[param.nr_weight-1] = atoi(argv[i-1].substring(2));
param.weight[param.nr_weight-1] = atof(argv[i]);
break;
default:
System.err.print("Unknown option: " + argv[i-1] + "\n");
exit_with_help();
}
}

svm.svm_set_print_string_function(print_func);

// determine filenames

if(i>=argv.length)
exit_with_help();

input_file_name = argv[i];

if(i<argv.length-1)
model_file_name = argv[i+1];
else
{
int p = argv[i].lastIndexOf('/');
++p; // whew...
model_file_name = argv[i].substring(p)+".model";
}
}

// read in a problem (in svmlight format)

private void read_problem() throws IOException
{
BufferedReader fp = new BufferedReader(new FileReader(input_file_name));
Vector<Double> vy = new Vector<Double>();
Vector<svm_node[]> vx = new Vector<svm_node[]>();
int max_index = 0;

while(true)
{
String line = fp.readLine();
if(line == null) break;

StringTokenizer st = new StringTokenizer(line," \t\n\r\f:");

vy.addElement(atof(st.nextToken()));
int m = st.countTokens()/2;
svm_node[] x = new svm_node[m];
for(int j=0;j<m;j++)
{
x[j] = new svm_node();
x[j].index = atoi(st.nextToken());
x[j].value = atof(st.nextToken());
}
if(m>0) max_index = Math.max(max_index, x[m-1].index);
vx.addElement(x);
}

prob = new svm_problem();
prob.l = vy.size();
prob.x = new svm_node[prob.l][];
for(int i=0;i<prob.l;i++)
prob.x[i] = vx.elementAt(i);
prob.y = new double[prob.l];
for(int i=0;i<prob.l;i++)
prob.y[i] = vy.elementAt(i);

if(param.gamma == 0 && max_index > 0)
param.gamma = 1.0/max_index;

if(param.kernel_type == svm_parameter.PRECOMPUTED)
for(int i=0;i<prob.l;i++)
{
if (prob.x[i][0].index != 0)
{
System.err.print("Wrong kernel matrix: first column must be 0:sample_serial_number\n");
System.exit(1);
}
if ((int)prob.x[i][0].value <= 0 || (int)prob.x[i][0].value > max_index)
{
System.err.print("Wrong input format: sample_serial_number out of range\n");
System.exit(1);
}
}

fp.close();
}
}

+ 1
- 0
gklearn/gedlib/lib/libsvm.3.22/java/test_applet.html View File

@@ -0,0 +1 @@
<APPLET code="svm_toy.class" archive="libsvm.jar" width=300 height=350></APPLET>

BIN
gklearn/gedlib/lib/libsvm.3.22/libsvm.so View File


+ 45
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/Makefile View File

@@ -0,0 +1,45 @@
# This Makefile is used under Linux

MATLABDIR ?= /usr/local/matlab
# for Mac
# MATLABDIR ?= /opt/local/matlab

CXX ?= g++
#CXX = g++-4.1
CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I..

MEX = $(MATLABDIR)/bin/mex
MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)"
# comment the following line if you use MATLAB on 32-bit computer
MEX_OPTION += -largeArrayDims
MEX_EXT = $(shell $(MATLABDIR)/bin/mexext)

all: matlab

matlab: binary

octave:
@echo "please type make under Octave"

binary: svmpredict.$(MEX_EXT) svmtrain.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT)

svmpredict.$(MEX_EXT): svmpredict.c ../svm.h ../svm.o svm_model_matlab.o
$(MEX) $(MEX_OPTION) svmpredict.c ../svm.o svm_model_matlab.o

svmtrain.$(MEX_EXT): svmtrain.c ../svm.h ../svm.o svm_model_matlab.o
$(MEX) $(MEX_OPTION) svmtrain.c ../svm.o svm_model_matlab.o

libsvmread.$(MEX_EXT): libsvmread.c
$(MEX) $(MEX_OPTION) libsvmread.c

libsvmwrite.$(MEX_EXT): libsvmwrite.c
$(MEX) $(MEX_OPTION) libsvmwrite.c

svm_model_matlab.o: svm_model_matlab.c ../svm.h
$(CXX) $(CFLAGS) -c svm_model_matlab.c

../svm.o: ../svm.cpp ../svm.h
make -C .. svm.o

clean:
rm -f *~ *.o *.mex* *.obj ../svm.o

+ 245
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/README View File

@@ -0,0 +1,245 @@
-----------------------------------------
--- MATLAB/OCTAVE interface of LIBSVM ---
-----------------------------------------

Table of Contents
=================

- Introduction
- Installation
- Usage
- Returned Model Structure
- Other Utilities
- Examples
- Additional Information


Introduction
============

This tool provides a simple interface to LIBSVM, a library for support vector
machines (http://www.csie.ntu.edu.tw/~cjlin/libsvm). It is very easy to use as
the usage and the way of specifying parameters are the same as that of LIBSVM.

Installation
============

On Windows systems, pre-built binary files are already in the
directory '..\windows', so no need to conduct installation. Now we
provide binary files only for 64bit MATLAB on Windows. If you would
like to re-build the package, please rely on the following steps.

We recommend using make.m on both MATLAB and OCTAVE. Just type 'make'
to build 'libsvmread.mex', 'libsvmwrite.mex', 'svmtrain.mex', and
'svmpredict.mex'.

On MATLAB or Octave:

>> make

If make.m does not work on MATLAB (especially for Windows), try 'mex
-setup' to choose a suitable compiler for mex. Make sure your compiler
is accessible and workable. Then type 'make' to start the
installation.

Example:

matlab>> mex -setup
(ps: MATLAB will show the following messages to setup default compiler.)
Please choose your compiler for building external interface (MEX) files:
Would you like mex to locate installed compilers [y]/n? y
Select a compiler:
[1] Microsoft Visual C/C++ version 7.1 in C:\Program Files\Microsoft Visual Studio
[0] None
Compiler: 1
Please verify your choices:
Compiler: Microsoft Visual C/C++ 7.1
Location: C:\Program Files\Microsoft Visual Studio
Are these correct?([y]/n): y

matlab>> make

On Unix systems, if neither make.m nor 'mex -setup' works, please use
Makefile and type 'make' in a command window. Note that we assume
your MATLAB is installed in '/usr/local/matlab'. If not, please change
MATLABDIR in Makefile.

Example:
linux> make

To use octave, type 'make octave':

Example:
linux> make octave

For a list of supported/compatible compilers for MATLAB, please check
the following page:

http://www.mathworks.com/support/compilers/current_release/

Usage
=====

matlab> model = svmtrain(training_label_vector, training_instance_matrix [, 'libsvm_options']);

-training_label_vector:
An m by 1 vector of training labels (type must be double).
-training_instance_matrix:
An m by n matrix of m training instances with n features.
It can be dense or sparse (type must be double).
-libsvm_options:
A string of training options in the same format as that of LIBSVM.

matlab> [predicted_label, accuracy, decision_values/prob_estimates] = svmpredict(testing_label_vector, testing_instance_matrix, model [, 'libsvm_options']);
matlab> [predicted_label] = svmpredict(testing_label_vector, testing_instance_matrix, model [, 'libsvm_options']);

-testing_label_vector:
An m by 1 vector of prediction labels. If labels of test
data are unknown, simply use any random values. (type must be double)
-testing_instance_matrix:
An m by n matrix of m testing instances with n features.
It can be dense or sparse. (type must be double)
-model:
The output of svmtrain.
-libsvm_options:
A string of testing options in the same format as that of LIBSVM.

Returned Model Structure
========================

The 'svmtrain' function returns a model which can be used for future
prediction. It is a structure and is organized as [Parameters, nr_class,
totalSV, rho, Label, ProbA, ProbB, nSV, sv_coef, SVs]:

-Parameters: parameters
-nr_class: number of classes; = 2 for regression/one-class svm
-totalSV: total #SV
-rho: -b of the decision function(s) wx+b
-Label: label of each class; empty for regression/one-class SVM
-sv_indices: values in [1,...,num_traning_data] to indicate SVs in the training set
-ProbA: pairwise probability information; empty if -b 0 or in one-class SVM
-ProbB: pairwise probability information; empty if -b 0 or in one-class SVM
-nSV: number of SVs for each class; empty for regression/one-class SVM
-sv_coef: coefficients for SVs in decision functions
-SVs: support vectors

If you do not use the option '-b 1', ProbA and ProbB are empty
matrices. If the '-v' option is specified, cross validation is
conducted and the returned model is just a scalar: cross-validation
accuracy for classification and mean-squared error for regression.

More details about this model can be found in LIBSVM FAQ
(http://www.csie.ntu.edu.tw/~cjlin/libsvm/faq.html) and LIBSVM
implementation document
(http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.pdf).

Result of Prediction
====================

The function 'svmpredict' has three outputs. The first one,
predictd_label, is a vector of predicted labels. The second output,
accuracy, is a vector including accuracy (for classification), mean
squared error, and squared correlation coefficient (for regression).
The third is a matrix containing decision values or probability
estimates (if '-b 1' is specified). If k is the number of classes
in training data, for decision values, each row includes results of
predicting k(k-1)/2 binary-class SVMs. For classification, k = 1 is a
special case. Decision value +1 is returned for each testing instance,
instead of an empty vector. For probabilities, each row contains k values
indicating the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'Label' field
in the model structure.

Other Utilities
===============

A matlab function libsvmread reads files in LIBSVM format:

[label_vector, instance_matrix] = libsvmread('data.txt');

Two outputs are labels and instances, which can then be used as inputs
of svmtrain or svmpredict.

A matlab function libsvmwrite writes Matlab matrix to a file in LIBSVM format:

libsvmwrite('data.txt', label_vector, instance_matrix)

The instance_matrix must be a sparse matrix. (type must be double)
For 32bit and 64bit MATLAB on Windows, pre-built binary files are ready
in the directory `..\windows', but in future releases, we will only
include 64bit MATLAB binary files.

These codes are prepared by Rong-En Fan and Kai-Wei Chang from National
Taiwan University.

Examples
========

Train and test on the provided data heart_scale:

matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
matlab> model = svmtrain(heart_scale_label, heart_scale_inst, '-c 1 -g 0.07');
matlab> [predict_label, accuracy, dec_values] = svmpredict(heart_scale_label, heart_scale_inst, model); % test the training data

For probability estimates, you need '-b 1' for training and testing:

matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
matlab> model = svmtrain(heart_scale_label, heart_scale_inst, '-c 1 -g 0.07 -b 1');
matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
matlab> [predict_label, accuracy, prob_estimates] = svmpredict(heart_scale_label, heart_scale_inst, model, '-b 1');

To use precomputed kernel, you must include sample serial number as
the first column of the training and testing data (assume your kernel
matrix is K, # of instances is n):

matlab> K1 = [(1:n)', K]; % include sample serial number as first column
matlab> model = svmtrain(label_vector, K1, '-t 4');
matlab> [predict_label, accuracy, dec_values] = svmpredict(label_vector, K1, model); % test the training data

We give the following detailed example by splitting heart_scale into
150 training and 120 testing data. Constructing a linear kernel
matrix and then using the precomputed kernel gives exactly the same
testing error as using the LIBSVM built-in linear kernel.

matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale');
matlab>
matlab> % Split Data
matlab> train_data = heart_scale_inst(1:150,:);
matlab> train_label = heart_scale_label(1:150,:);
matlab> test_data = heart_scale_inst(151:270,:);
matlab> test_label = heart_scale_label(151:270,:);
matlab>
matlab> % Linear Kernel
matlab> model_linear = svmtrain(train_label, train_data, '-t 0');
matlab> [predict_label_L, accuracy_L, dec_values_L] = svmpredict(test_label, test_data, model_linear);
matlab>
matlab> % Precomputed Kernel
matlab> model_precomputed = svmtrain(train_label, [(1:150)', train_data*train_data'], '-t 4');
matlab> [predict_label_P, accuracy_P, dec_values_P] = svmpredict(test_label, [(1:120)', test_data*train_data'], model_precomputed);
matlab>
matlab> accuracy_L % Display the accuracy using linear kernel
matlab> accuracy_P % Display the accuracy using precomputed kernel

Note that for testing, you can put anything in the
testing_label_vector. For more details of precomputed kernels, please
read the section ``Precomputed Kernels'' in the README of the LIBSVM
package.

Additional Information
======================

This interface was initially written by Jun-Cheng Chen, Kuan-Jen Peng,
Chih-Yuan Yang and Chih-Huai Cheng from Department of Computer
Science, National Taiwan University. The current version was prepared
by Rong-En Fan and Ting-Fan Wu. If you find this tool useful, please
cite LIBSVM as follows

Chih-Chung Chang and Chih-Jen Lin, LIBSVM : a library for support
vector machines. ACM Transactions on Intelligent Systems and
Technology, 2:27:1--27:27, 2011. Software available at
http://www.csie.ntu.edu.tw/~cjlin/libsvm

For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>,
or check the FAQ page:

http://www.csie.ntu.edu.tw/~cjlin/libsvm/faq.html#/Q10:_MATLAB_interface

+ 212
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/libsvmread.c View File

@@ -0,0 +1,212 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <ctype.h>
#include <errno.h>

#include "mex.h"

#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif
#ifndef max
#define max(x,y) (((x)>(y))?(x):(y))
#endif
#ifndef min
#define min(x,y) (((x)<(y))?(x):(y))
#endif

void exit_with_help()
{
mexPrintf(
"Usage: [label_vector, instance_matrix] = libsvmread('filename');\n"
);
}

static void fake_answer(int nlhs, mxArray *plhs[])
{
int i;
for(i=0;i<nlhs;i++)
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
}

static char *line;
static int max_line_len;

static char* readline(FILE *input)
{
int len;
if(fgets(line,max_line_len,input) == NULL)
return NULL;

while(strrchr(line,'\n') == NULL)
{
max_line_len *= 2;
line = (char *) realloc(line, max_line_len);
len = (int) strlen(line);
if(fgets(line+len,max_line_len-len,input) == NULL)
break;
}
return line;
}

// read in a problem (in libsvm format)
void read_problem(const char *filename, int nlhs, mxArray *plhs[])
{
int max_index, min_index, inst_max_index;
size_t elements, k, i, l=0;
FILE *fp = fopen(filename,"r");
char *endptr;
mwIndex *ir, *jc;
double *labels, *samples;

if(fp == NULL)
{
mexPrintf("can't open input file %s\n",filename);
fake_answer(nlhs, plhs);
return;
}

max_line_len = 1024;
line = (char *) malloc(max_line_len*sizeof(char));

max_index = 0;
min_index = 1; // our index starts from 1
elements = 0;
while(readline(fp) != NULL)
{
char *idx, *val;
// features
int index = 0;

inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0
strtok(line," \t"); // label
while (1)
{
idx = strtok(NULL,":"); // index:value
val = strtok(NULL," \t");
if(val == NULL)
break;

errno = 0;
index = (int) strtol(idx,&endptr,10);
if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index)
{
mexPrintf("Wrong input format at line %d\n",l+1);
fake_answer(nlhs, plhs);
return;
}
else
inst_max_index = index;

min_index = min(min_index, index);
elements++;
}
max_index = max(max_index, inst_max_index);
l++;
}
rewind(fp);

// y
plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL);
// x^T
if (min_index <= 0)
plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL);
else
plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL);

labels = mxGetPr(plhs[0]);
samples = mxGetPr(plhs[1]);
ir = mxGetIr(plhs[1]);
jc = mxGetJc(plhs[1]);

k=0;
for(i=0;i<l;i++)
{
char *idx, *val, *label;
jc[i] = k;

readline(fp);

label = strtok(line," \t\n");
if(label == NULL)
{
mexPrintf("Empty line at line %d\n",i+1);
fake_answer(nlhs, plhs);
return;
}
labels[i] = strtod(label,&endptr);
if(endptr == label || *endptr != '\0')
{
mexPrintf("Wrong input format at line %d\n",i+1);
fake_answer(nlhs, plhs);
return;
}

// features
while(1)
{
idx = strtok(NULL,":");
val = strtok(NULL," \t");
if(val == NULL)
break;

ir[k] = (mwIndex) (strtol(idx,&endptr,10) - min_index); // precomputed kernel has <index> start from 0

errno = 0;
samples[k] = strtod(val,&endptr);
if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
{
mexPrintf("Wrong input format at line %d\n",i+1);
fake_answer(nlhs, plhs);
return;
}
++k;
}
}
jc[l] = k;

fclose(fp);
free(line);

{
mxArray *rhs[1], *lhs[1];
rhs[0] = plhs[1];
if(mexCallMATLAB(1, lhs, 1, rhs, "transpose"))
{
mexPrintf("Error: cannot transpose problem\n");
fake_answer(nlhs, plhs);
return;
}
plhs[1] = lhs[0];
}
}

void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
char filename[256];

if(nrhs != 1 || nlhs != 2)
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}

mxGetString(prhs[0], filename, mxGetN(prhs[0]) + 1);

if(filename == NULL)
{
mexPrintf("Error: filename is NULL\n");
return;
}

read_problem(filename, nlhs, plhs);

return;
}


+ 119
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/libsvmwrite.c View File

@@ -0,0 +1,119 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mex.h"

#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif

void exit_with_help()
{
mexPrintf(
"Usage: libsvmwrite('filename', label_vector, instance_matrix);\n"
);
}

static void fake_answer(int nlhs, mxArray *plhs[])
{
int i;
for(i=0;i<nlhs;i++)
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
}

void libsvmwrite(const char *filename, const mxArray *label_vec, const mxArray *instance_mat)
{
FILE *fp = fopen(filename,"w");
mwIndex *ir, *jc, k, low, high;
size_t i, l, label_vector_row_num;
double *samples, *labels;
mxArray *instance_mat_col; // instance sparse matrix in column format

if(fp ==NULL)
{
mexPrintf("can't open output file %s\n",filename);
return;
}

// transpose instance matrix
{
mxArray *prhs[1], *plhs[1];
prhs[0] = mxDuplicateArray(instance_mat);
if(mexCallMATLAB(1, plhs, 1, prhs, "transpose"))
{
mexPrintf("Error: cannot transpose instance matrix\n");
return;
}
instance_mat_col = plhs[0];
mxDestroyArray(prhs[0]);
}

// the number of instance
l = mxGetN(instance_mat_col);
label_vector_row_num = mxGetM(label_vec);

if(label_vector_row_num!=l)
{
mexPrintf("Length of label vector does not match # of instances.\n");
return;
}

// each column is one instance
labels = mxGetPr(label_vec);
samples = mxGetPr(instance_mat_col);
ir = mxGetIr(instance_mat_col);
jc = mxGetJc(instance_mat_col);

for(i=0;i<l;i++)
{
fprintf(fp,"%g", labels[i]);

low = jc[i], high = jc[i+1];
for(k=low;k<high;k++)
fprintf(fp," %lu:%g", (size_t)ir[k]+1, samples[k]);

fprintf(fp,"\n");
}

fclose(fp);
return;
}

void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
if(nlhs > 0)
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
// Transform the input Matrix to libsvm format
if(nrhs == 3)
{
char filename[256];
if(!mxIsDouble(prhs[1]) || !mxIsDouble(prhs[2]))
{
mexPrintf("Error: label vector and instance matrix must be double\n");
return;
}
mxGetString(prhs[0], filename, mxGetN(prhs[0])+1);

if(mxIsSparse(prhs[2]))
libsvmwrite(filename, prhs[1], prhs[2]);
else
{
mexPrintf("Instance_matrix must be sparse\n");
return;
}
}
else
{
exit_with_help();
return;
}
}

+ 22
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/make.m View File

@@ -0,0 +1,22 @@
% This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix
function make()
try
% This part is for OCTAVE
if (exist ('OCTAVE_VERSION', 'builtin'))
mex libsvmread.c
mex libsvmwrite.c
mex -I.. svmtrain.c ../svm.cpp svm_model_matlab.c
mex -I.. svmpredict.c ../svm.cpp svm_model_matlab.c
% This part is for MATLAB
% Add -largeArrayDims on 64-bit machines of MATLAB
else
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c
mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c
mex CFLAGS="\$CFLAGS -std=c99" -I.. -largeArrayDims svmtrain.c ../svm.cpp svm_model_matlab.c
mex CFLAGS="\$CFLAGS -std=c99" -I.. -largeArrayDims svmpredict.c ../svm.cpp svm_model_matlab.c
end
catch err
fprintf('Error: %s failed (line %d)\n', err.stack(1).file, err.stack(1).line);
disp(err.message);
fprintf('=> Please check README for detailed instructions.\n');
end

+ 374
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/svm_model_matlab.c View File

@@ -0,0 +1,374 @@
#include <stdlib.h>
#include <string.h>
#include "svm.h"

#include "mex.h"

#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif

#define NUM_OF_RETURN_FIELD 11

#define Malloc(type,n) (type *)malloc((n)*sizeof(type))

static const char *field_names[] = {
"Parameters",
"nr_class",
"totalSV",
"rho",
"Label",
"sv_indices",
"ProbA",
"ProbB",
"nSV",
"sv_coef",
"SVs"
};

const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model)
{
int i, j, n;
double *ptr;
mxArray *return_model, **rhs;
int out_id = 0;

rhs = (mxArray **)mxMalloc(sizeof(mxArray *)*NUM_OF_RETURN_FIELD);

// Parameters
rhs[out_id] = mxCreateDoubleMatrix(5, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
ptr[0] = model->param.svm_type;
ptr[1] = model->param.kernel_type;
ptr[2] = model->param.degree;
ptr[3] = model->param.gamma;
ptr[4] = model->param.coef0;
out_id++;

// nr_class
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
ptr[0] = model->nr_class;
out_id++;

// total SV
rhs[out_id] = mxCreateDoubleMatrix(1, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
ptr[0] = model->l;
out_id++;

// rho
n = model->nr_class*(model->nr_class-1)/2;
rhs[out_id] = mxCreateDoubleMatrix(n, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < n; i++)
ptr[i] = model->rho[i];
out_id++;

// Label
if(model->label)
{
rhs[out_id] = mxCreateDoubleMatrix(model->nr_class, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < model->nr_class; i++)
ptr[i] = model->label[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;

// sv_indices
if(model->sv_indices)
{
rhs[out_id] = mxCreateDoubleMatrix(model->l, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < model->l; i++)
ptr[i] = model->sv_indices[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;

// probA
if(model->probA != NULL)
{
rhs[out_id] = mxCreateDoubleMatrix(n, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < n; i++)
ptr[i] = model->probA[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id ++;

// probB
if(model->probB != NULL)
{
rhs[out_id] = mxCreateDoubleMatrix(n, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < n; i++)
ptr[i] = model->probB[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;

// nSV
if(model->nSV)
{
rhs[out_id] = mxCreateDoubleMatrix(model->nr_class, 1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < model->nr_class; i++)
ptr[i] = model->nSV[i];
}
else
rhs[out_id] = mxCreateDoubleMatrix(0, 0, mxREAL);
out_id++;

// sv_coef
rhs[out_id] = mxCreateDoubleMatrix(model->l, model->nr_class-1, mxREAL);
ptr = mxGetPr(rhs[out_id]);
for(i = 0; i < model->nr_class-1; i++)
for(j = 0; j < model->l; j++)
ptr[(i*(model->l))+j] = model->sv_coef[i][j];
out_id++;

// SVs
{
int ir_index, nonzero_element;
mwIndex *ir, *jc;
mxArray *pprhs[1], *pplhs[1];

if(model->param.kernel_type == PRECOMPUTED)
{
nonzero_element = model->l;
num_of_feature = 1;
}
else
{
nonzero_element = 0;
for(i = 0; i < model->l; i++) {
j = 0;
while(model->SV[i][j].index != -1)
{
nonzero_element++;
j++;
}
}
}

// SV in column, easier accessing
rhs[out_id] = mxCreateSparse(num_of_feature, model->l, nonzero_element, mxREAL);
ir = mxGetIr(rhs[out_id]);
jc = mxGetJc(rhs[out_id]);
ptr = mxGetPr(rhs[out_id]);
jc[0] = ir_index = 0;
for(i = 0;i < model->l; i++)
{
if(model->param.kernel_type == PRECOMPUTED)
{
// make a (1 x model->l) matrix
ir[ir_index] = 0;
ptr[ir_index] = model->SV[i][0].value;
ir_index++;
jc[i+1] = jc[i] + 1;
}
else
{
int x_index = 0;
while (model->SV[i][x_index].index != -1)
{
ir[ir_index] = model->SV[i][x_index].index - 1;
ptr[ir_index] = model->SV[i][x_index].value;
ir_index++, x_index++;
}
jc[i+1] = jc[i] + x_index;
}
}
// transpose back to SV in row
pprhs[0] = rhs[out_id];
if(mexCallMATLAB(1, pplhs, 1, pprhs, "transpose"))
return "cannot transpose SV matrix";
rhs[out_id] = pplhs[0];
out_id++;
}

/* Create a struct matrix contains NUM_OF_RETURN_FIELD fields */
return_model = mxCreateStructMatrix(1, 1, NUM_OF_RETURN_FIELD, field_names);

/* Fill struct matrix with input arguments */
for(i = 0; i < NUM_OF_RETURN_FIELD; i++)
mxSetField(return_model,0,field_names[i],mxDuplicateArray(rhs[i]));
/* return */
plhs[0] = return_model;
mxFree(rhs);

return NULL;
}

struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **msg)
{
int i, j, n, num_of_fields;
double *ptr;
int id = 0;
struct svm_node *x_space;
struct svm_model *model;
mxArray **rhs;

num_of_fields = mxGetNumberOfFields(matlab_struct);
if(num_of_fields != NUM_OF_RETURN_FIELD)
{
*msg = "number of return field is not correct";
return NULL;
}
rhs = (mxArray **) mxMalloc(sizeof(mxArray *)*num_of_fields);

for(i=0;i<num_of_fields;i++)
rhs[i] = mxGetFieldByNumber(matlab_struct, 0, i);

model = Malloc(struct svm_model, 1);
model->rho = NULL;
model->probA = NULL;
model->probB = NULL;
model->label = NULL;
model->sv_indices = NULL;
model->nSV = NULL;
model->free_sv = 1; // XXX

ptr = mxGetPr(rhs[id]);
model->param.svm_type = (int)ptr[0];
model->param.kernel_type = (int)ptr[1];
model->param.degree = (int)ptr[2];
model->param.gamma = ptr[3];
model->param.coef0 = ptr[4];
id++;

ptr = mxGetPr(rhs[id]);
model->nr_class = (int)ptr[0];
id++;

ptr = mxGetPr(rhs[id]);
model->l = (int)ptr[0];
id++;

// rho
n = model->nr_class * (model->nr_class-1)/2;
model->rho = (double*) malloc(n*sizeof(double));
ptr = mxGetPr(rhs[id]);
for(i=0;i<n;i++)
model->rho[i] = ptr[i];
id++;

// label
if(mxIsEmpty(rhs[id]) == 0)
{
model->label = (int*) malloc(model->nr_class*sizeof(int));
ptr = mxGetPr(rhs[id]);
for(i=0;i<model->nr_class;i++)
model->label[i] = (int)ptr[i];
}
id++;

// sv_indices
if(mxIsEmpty(rhs[id]) == 0)
{
model->sv_indices = (int*) malloc(model->l*sizeof(int));
ptr = mxGetPr(rhs[id]);
for(i=0;i<model->l;i++)
model->sv_indices[i] = (int)ptr[i];
}
id++;

// probA
if(mxIsEmpty(rhs[id]) == 0)
{
model->probA = (double*) malloc(n*sizeof(double));
ptr = mxGetPr(rhs[id]);
for(i=0;i<n;i++)
model->probA[i] = ptr[i];
}
id++;

// probB
if(mxIsEmpty(rhs[id]) == 0)
{
model->probB = (double*) malloc(n*sizeof(double));
ptr = mxGetPr(rhs[id]);
for(i=0;i<n;i++)
model->probB[i] = ptr[i];
}
id++;

// nSV
if(mxIsEmpty(rhs[id]) == 0)
{
model->nSV = (int*) malloc(model->nr_class*sizeof(int));
ptr = mxGetPr(rhs[id]);
for(i=0;i<model->nr_class;i++)
model->nSV[i] = (int)ptr[i];
}
id++;

// sv_coef
ptr = mxGetPr(rhs[id]);
model->sv_coef = (double**) malloc((model->nr_class-1)*sizeof(double));
for( i=0 ; i< model->nr_class -1 ; i++ )
model->sv_coef[i] = (double*) malloc((model->l)*sizeof(double));
for(i = 0; i < model->nr_class - 1; i++)
for(j = 0; j < model->l; j++)
model->sv_coef[i][j] = ptr[i*(model->l)+j];
id++;

// SV
{
int sr, elements;
int num_samples;
mwIndex *ir, *jc;
mxArray *pprhs[1], *pplhs[1];

// transpose SV
pprhs[0] = rhs[id];
if(mexCallMATLAB(1, pplhs, 1, pprhs, "transpose"))
{
svm_free_and_destroy_model(&model);
*msg = "cannot transpose SV matrix";
return NULL;
}
rhs[id] = pplhs[0];

sr = (int)mxGetN(rhs[id]);

ptr = mxGetPr(rhs[id]);
ir = mxGetIr(rhs[id]);
jc = mxGetJc(rhs[id]);

num_samples = (int)mxGetNzmax(rhs[id]);

elements = num_samples + sr;

model->SV = (struct svm_node **) malloc(sr * sizeof(struct svm_node *));
x_space = (struct svm_node *)malloc(elements * sizeof(struct svm_node));

// SV is in column
for(i=0;i<sr;i++)
{
int low = (int)jc[i], high = (int)jc[i+1];
int x_index = 0;
model->SV[i] = &x_space[low+i];
for(j=low;j<high;j++)
{
model->SV[i][x_index].index = (int)ir[j] + 1;
model->SV[i][x_index].value = ptr[j];
x_index++;
}
model->SV[i][x_index].index = -1;
}

id++;
}
mxFree(rhs);

return model;
}

+ 2
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/svm_model_matlab.h View File

@@ -0,0 +1,2 @@
const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model);
struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **error_message);

+ 370
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/svmpredict.c View File

@@ -0,0 +1,370 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "svm.h"

#include "mex.h"
#include "svm_model_matlab.h"

#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif

#define CMD_LEN 2048

int print_null(const char *s,...) {}
int (*info)(const char *fmt,...) = &mexPrintf;

void read_sparse_instance(const mxArray *prhs, int index, struct svm_node *x)
{
int i, j, low, high;
mwIndex *ir, *jc;
double *samples;

ir = mxGetIr(prhs);
jc = mxGetJc(prhs);
samples = mxGetPr(prhs);

// each column is one instance
j = 0;
low = (int)jc[index], high = (int)jc[index+1];
for(i=low;i<high;i++)
{
x[j].index = (int)ir[i] + 1;
x[j].value = samples[i];
j++;
}
x[j].index = -1;
}

static void fake_answer(int nlhs, mxArray *plhs[])
{
int i;
for(i=0;i<nlhs;i++)
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
}

void predict(int nlhs, mxArray *plhs[], const mxArray *prhs[], struct svm_model *model, const int predict_probability)
{
int label_vector_row_num, label_vector_col_num;
int feature_number, testing_instance_number;
int instance_index;
double *ptr_instance, *ptr_label, *ptr_predict_label;
double *ptr_prob_estimates, *ptr_dec_values, *ptr;
struct svm_node *x;
mxArray *pplhs[1]; // transposed instance sparse matrix
mxArray *tplhs[3]; // temporary storage for plhs[]

int correct = 0;
int total = 0;
double error = 0;
double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0;

int svm_type=svm_get_svm_type(model);
int nr_class=svm_get_nr_class(model);
double *prob_estimates=NULL;

// prhs[1] = testing instance matrix
feature_number = (int)mxGetN(prhs[1]);
testing_instance_number = (int)mxGetM(prhs[1]);
label_vector_row_num = (int)mxGetM(prhs[0]);
label_vector_col_num = (int)mxGetN(prhs[0]);

if(label_vector_row_num!=testing_instance_number)
{
mexPrintf("Length of label vector does not match # of instances.\n");
fake_answer(nlhs, plhs);
return;
}
if(label_vector_col_num!=1)
{
mexPrintf("label (1st argument) should be a vector (# of column is 1).\n");
fake_answer(nlhs, plhs);
return;
}

ptr_instance = mxGetPr(prhs[1]);
ptr_label = mxGetPr(prhs[0]);

// transpose instance matrix
if(mxIsSparse(prhs[1]))
{
if(model->param.kernel_type == PRECOMPUTED)
{
// precomputed kernel requires dense matrix, so we make one
mxArray *rhs[1], *lhs[1];
rhs[0] = mxDuplicateArray(prhs[1]);
if(mexCallMATLAB(1, lhs, 1, rhs, "full"))
{
mexPrintf("Error: cannot full testing instance matrix\n");
fake_answer(nlhs, plhs);
return;
}
ptr_instance = mxGetPr(lhs[0]);
mxDestroyArray(rhs[0]);
}
else
{
mxArray *pprhs[1];
pprhs[0] = mxDuplicateArray(prhs[1]);
if(mexCallMATLAB(1, pplhs, 1, pprhs, "transpose"))
{
mexPrintf("Error: cannot transpose testing instance matrix\n");
fake_answer(nlhs, plhs);
return;
}
}
}

if(predict_probability)
{
if(svm_type==NU_SVR || svm_type==EPSILON_SVR)
info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g\n",svm_get_svr_probability(model));
else
prob_estimates = (double *) malloc(nr_class*sizeof(double));
}

tplhs[0] = mxCreateDoubleMatrix(testing_instance_number, 1, mxREAL);
if(predict_probability)
{
// prob estimates are in plhs[2]
if(svm_type==C_SVC || svm_type==NU_SVC)
tplhs[2] = mxCreateDoubleMatrix(testing_instance_number, nr_class, mxREAL);
else
tplhs[2] = mxCreateDoubleMatrix(0, 0, mxREAL);
}
else
{
// decision values are in plhs[2]
if(svm_type == ONE_CLASS ||
svm_type == EPSILON_SVR ||
svm_type == NU_SVR ||
nr_class == 1) // if only one class in training data, decision values are still returned.
tplhs[2] = mxCreateDoubleMatrix(testing_instance_number, 1, mxREAL);
else
tplhs[2] = mxCreateDoubleMatrix(testing_instance_number, nr_class*(nr_class-1)/2, mxREAL);
}

ptr_predict_label = mxGetPr(tplhs[0]);
ptr_prob_estimates = mxGetPr(tplhs[2]);
ptr_dec_values = mxGetPr(tplhs[2]);
x = (struct svm_node*)malloc((feature_number+1)*sizeof(struct svm_node) );
for(instance_index=0;instance_index<testing_instance_number;instance_index++)
{
int i;
double target_label, predict_label;

target_label = ptr_label[instance_index];

if(mxIsSparse(prhs[1]) && model->param.kernel_type != PRECOMPUTED) // prhs[1]^T is still sparse
read_sparse_instance(pplhs[0], instance_index, x);
else
{
for(i=0;i<feature_number;i++)
{
x[i].index = i+1;
x[i].value = ptr_instance[testing_instance_number*i+instance_index];
}
x[feature_number].index = -1;
}

if(predict_probability)
{
if(svm_type==C_SVC || svm_type==NU_SVC)
{
predict_label = svm_predict_probability(model, x, prob_estimates);
ptr_predict_label[instance_index] = predict_label;
for(i=0;i<nr_class;i++)
ptr_prob_estimates[instance_index + i * testing_instance_number] = prob_estimates[i];
} else {
predict_label = svm_predict(model,x);
ptr_predict_label[instance_index] = predict_label;
}
}
else
{
if(svm_type == ONE_CLASS ||
svm_type == EPSILON_SVR ||
svm_type == NU_SVR)
{
double res;
predict_label = svm_predict_values(model, x, &res);
ptr_dec_values[instance_index] = res;
}
else
{
double *dec_values = (double *) malloc(sizeof(double) * nr_class*(nr_class-1)/2);
predict_label = svm_predict_values(model, x, dec_values);
if(nr_class == 1)
ptr_dec_values[instance_index] = 1;
else
for(i=0;i<(nr_class*(nr_class-1))/2;i++)
ptr_dec_values[instance_index + i * testing_instance_number] = dec_values[i];
free(dec_values);
}
ptr_predict_label[instance_index] = predict_label;
}

if(predict_label == target_label)
++correct;
error += (predict_label-target_label)*(predict_label-target_label);
sump += predict_label;
sumt += target_label;
sumpp += predict_label*predict_label;
sumtt += target_label*target_label;
sumpt += predict_label*target_label;
++total;
}
if(svm_type==NU_SVR || svm_type==EPSILON_SVR)
{
info("Mean squared error = %g (regression)\n",error/total);
info("Squared correlation coefficient = %g (regression)\n",
((total*sumpt-sump*sumt)*(total*sumpt-sump*sumt))/
((total*sumpp-sump*sump)*(total*sumtt-sumt*sumt))
);
}
else
info("Accuracy = %g%% (%d/%d) (classification)\n",
(double)correct/total*100,correct,total);

// return accuracy, mean squared error, squared correlation coefficient
tplhs[1] = mxCreateDoubleMatrix(3, 1, mxREAL);
ptr = mxGetPr(tplhs[1]);
ptr[0] = (double)correct/total*100;
ptr[1] = error/total;
ptr[2] = ((total*sumpt-sump*sumt)*(total*sumpt-sump*sumt))/
((total*sumpp-sump*sump)*(total*sumtt-sumt*sumt));

free(x);
if(prob_estimates != NULL)
free(prob_estimates);

switch(nlhs)
{
case 3:
plhs[2] = tplhs[2];
plhs[1] = tplhs[1];
case 1:
case 0:
plhs[0] = tplhs[0];
}
}

void exit_with_help()
{
mexPrintf(
"Usage: [predicted_label, accuracy, decision_values/prob_estimates] = svmpredict(testing_label_vector, testing_instance_matrix, model, 'libsvm_options')\n"
" [predicted_label] = svmpredict(testing_label_vector, testing_instance_matrix, model, 'libsvm_options')\n"
"Parameters:\n"
" model: SVM model structure from svmtrain.\n"
" libsvm_options:\n"
" -b probability_estimates: whether to predict probability estimates, 0 or 1 (default 0); one-class SVM not supported yet\n"
" -q : quiet mode (no outputs)\n"
"Returns:\n"
" predicted_label: SVM prediction output vector.\n"
" accuracy: a vector with accuracy, mean squared error, squared correlation coefficient.\n"
" prob_estimates: If selected, probability estimate vector.\n"
);
}

void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
int prob_estimate_flag = 0;
struct svm_model *model;
info = &mexPrintf;

if(nlhs == 2 || nlhs > 3 || nrhs > 4 || nrhs < 3)
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}

if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1])) {
mexPrintf("Error: label vector and instance matrix must be double\n");
fake_answer(nlhs, plhs);
return;
}

if(mxIsStruct(prhs[2]))
{
const char *error_msg;

// parse options
if(nrhs==4)
{
int i, argc = 1;
char cmd[CMD_LEN], *argv[CMD_LEN/2];

// put options in argv[]
mxGetString(prhs[3], cmd, mxGetN(prhs[3]) + 1);
if((argv[argc] = strtok(cmd, " ")) != NULL)
while((argv[++argc] = strtok(NULL, " ")) != NULL)
;

for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
if((++i>=argc) && argv[i-1][1] != 'q')
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
switch(argv[i-1][1])
{
case 'b':
prob_estimate_flag = atoi(argv[i]);
break;
case 'q':
i--;
info = &print_null;
break;
default:
mexPrintf("Unknown option: -%c\n", argv[i-1][1]);
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
}
}

model = matlab_matrix_to_model(prhs[2], &error_msg);
if (model == NULL)
{
mexPrintf("Error: can't read model: %s\n", error_msg);
fake_answer(nlhs, plhs);
return;
}

if(prob_estimate_flag)
{
if(svm_check_probability_model(model)==0)
{
mexPrintf("Model does not support probabiliy estimates\n");
fake_answer(nlhs, plhs);
svm_free_and_destroy_model(&model);
return;
}
}
else
{
if(svm_check_probability_model(model)!=0)
info("Model supports probability estimates, but disabled in predicton.\n");
}

predict(nlhs, plhs, prhs, model, prob_estimate_flag);
// destroy model
svm_free_and_destroy_model(&model);
}
else
{
mexPrintf("model file should be a struct array\n");
fake_answer(nlhs, plhs);
}

return;
}

+ 495
- 0
gklearn/gedlib/lib/libsvm.3.22/matlab/svmtrain.c View File

@@ -0,0 +1,495 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "svm.h"

#include "mex.h"
#include "svm_model_matlab.h"

#ifdef MX_API_VER
#if MX_API_VER < 0x07030000
typedef int mwIndex;
#endif
#endif

#define CMD_LEN 2048
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))

void print_null(const char *s) {}
void print_string_matlab(const char *s) {mexPrintf(s);}

void exit_with_help()
{
mexPrintf(
"Usage: model = svmtrain(training_label_vector, training_instance_matrix, 'libsvm_options');\n"
"libsvm_options:\n"
"-s svm_type : set type of SVM (default 0)\n"
" 0 -- C-SVC (multi-class classification)\n"
" 1 -- nu-SVC (multi-class classification)\n"
" 2 -- one-class SVM\n"
" 3 -- epsilon-SVR (regression)\n"
" 4 -- nu-SVR (regression)\n"
"-t kernel_type : set type of kernel function (default 2)\n"
" 0 -- linear: u'*v\n"
" 1 -- polynomial: (gamma*u'*v + coef0)^degree\n"
" 2 -- radial basis function: exp(-gamma*|u-v|^2)\n"
" 3 -- sigmoid: tanh(gamma*u'*v + coef0)\n"
" 4 -- precomputed kernel (kernel values in training_instance_matrix)\n"
"-d degree : set degree in kernel function (default 3)\n"
"-g gamma : set gamma in kernel function (default 1/num_features)\n"
"-r coef0 : set coef0 in kernel function (default 0)\n"
"-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)\n"
"-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)\n"
"-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)\n"
"-m cachesize : set cache memory size in MB (default 100)\n"
"-e epsilon : set tolerance of termination criterion (default 0.001)\n"
"-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)\n"
"-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)\n"
"-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)\n"
"-v n : n-fold cross validation mode\n"
"-q : quiet mode (no outputs)\n"
);
}

// svm arguments
struct svm_parameter param; // set by parse_command_line
struct svm_problem prob; // set by read_problem
struct svm_model *model;
struct svm_node *x_space;
int cross_validation;
int nr_fold;


double do_cross_validation()
{
int i;
int total_correct = 0;
double total_error = 0;
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
double *target = Malloc(double,prob.l);
double retval = 0.0;

svm_cross_validation(&prob,&param,nr_fold,target);
if(param.svm_type == EPSILON_SVR ||
param.svm_type == NU_SVR)
{
for(i=0;i<prob.l;i++)
{
double y = prob.y[i];
double v = target[i];
total_error += (v-y)*(v-y);
sumv += v;
sumy += y;
sumvv += v*v;
sumyy += y*y;
sumvy += v*y;
}
mexPrintf("Cross Validation Mean squared error = %g\n",total_error/prob.l);
mexPrintf("Cross Validation Squared correlation coefficient = %g\n",
((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))
);
retval = total_error/prob.l;
}
else
{
for(i=0;i<prob.l;i++)
if(target[i] == prob.y[i])
++total_correct;
mexPrintf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
retval = 100.0*total_correct/prob.l;
}
free(target);
return retval;
}

// nrhs should be 3
int parse_command_line(int nrhs, const mxArray *prhs[], char *model_file_name)
{
int i, argc = 1;
char cmd[CMD_LEN];
char *argv[CMD_LEN/2];
void (*print_func)(const char *) = print_string_matlab; // default printing to matlab display

// default values
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = 0; // 1/num_features
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = NULL;
param.weight = NULL;
cross_validation = 0;

if(nrhs <= 1)
return 1;

if(nrhs > 2)
{
// put options in argv[]
mxGetString(prhs[2], cmd, mxGetN(prhs[2]) + 1);
if((argv[argc] = strtok(cmd, " ")) != NULL)
while((argv[++argc] = strtok(NULL, " ")) != NULL)
;
}

// parse options
for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
++i;
if(i>=argc && argv[i-1][1] != 'q') // since option -q has no parameter
return 1;
switch(argv[i-1][1])
{
case 's':
param.svm_type = atoi(argv[i]);
break;
case 't':
param.kernel_type = atoi(argv[i]);
break;
case 'd':
param.degree = atoi(argv[i]);
break;
case 'g':
param.gamma = atof(argv[i]);
break;
case 'r':
param.coef0 = atof(argv[i]);
break;
case 'n':
param.nu = atof(argv[i]);
break;
case 'm':
param.cache_size = atof(argv[i]);
break;
case 'c':
param.C = atof(argv[i]);
break;
case 'e':
param.eps = atof(argv[i]);
break;
case 'p':
param.p = atof(argv[i]);
break;
case 'h':
param.shrinking = atoi(argv[i]);
break;
case 'b':
param.probability = atoi(argv[i]);
break;
case 'q':
print_func = &print_null;
i--;
break;
case 'v':
cross_validation = 1;
nr_fold = atoi(argv[i]);
if(nr_fold < 2)
{
mexPrintf("n-fold cross validation: n must >= 2\n");
return 1;
}
break;
case 'w':
++param.nr_weight;
param.weight_label = (int *)realloc(param.weight_label,sizeof(int)*param.nr_weight);
param.weight = (double *)realloc(param.weight,sizeof(double)*param.nr_weight);
param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
param.weight[param.nr_weight-1] = atof(argv[i]);
break;
default:
mexPrintf("Unknown option -%c\n", argv[i-1][1]);
return 1;
}
}

svm_set_print_string_function(print_func);

return 0;
}

// read in a problem (in svmlight format)
int read_problem_dense(const mxArray *label_vec, const mxArray *instance_mat)
{
// using size_t due to the output type of matlab functions
size_t i, j, k, l;
size_t elements, max_index, sc, label_vector_row_num;
double *samples, *labels;

prob.x = NULL;
prob.y = NULL;
x_space = NULL;

labels = mxGetPr(label_vec);
samples = mxGetPr(instance_mat);
sc = mxGetN(instance_mat);

elements = 0;
// number of instances
l = mxGetM(instance_mat);
label_vector_row_num = mxGetM(label_vec);
prob.l = (int)l;

if(label_vector_row_num!=l)
{
mexPrintf("Length of label vector does not match # of instances.\n");
return -1;
}

if(param.kernel_type == PRECOMPUTED)
elements = l * (sc + 1);
else
{
for(i = 0; i < l; i++)
{
for(k = 0; k < sc; k++)
if(samples[k * l + i] != 0)
elements++;
// count the '-1' element
elements++;
}
}

prob.y = Malloc(double,l);
prob.x = Malloc(struct svm_node *,l);
x_space = Malloc(struct svm_node, elements);

max_index = sc;
j = 0;
for(i = 0; i < l; i++)
{
prob.x[i] = &x_space[j];
prob.y[i] = labels[i];

for(k = 0; k < sc; k++)
{
if(param.kernel_type == PRECOMPUTED || samples[k * l + i] != 0)
{
x_space[j].index = (int)k + 1;
x_space[j].value = samples[k * l + i];
j++;
}
}
x_space[j++].index = -1;
}

if(param.gamma == 0 && max_index > 0)
param.gamma = (double)(1.0/max_index);

if(param.kernel_type == PRECOMPUTED)
for(i=0;i<l;i++)
{
if((int)prob.x[i][0].value <= 0 || (int)prob.x[i][0].value > (int)max_index)
{
mexPrintf("Wrong input format: sample_serial_number out of range\n");
return -1;
}
}

return 0;
}

int read_problem_sparse(const mxArray *label_vec, const mxArray *instance_mat)
{
mwIndex *ir, *jc, low, high, k;
// using size_t due to the output type of matlab functions
size_t i, j, l, elements, max_index, label_vector_row_num;
mwSize num_samples;
double *samples, *labels;
mxArray *instance_mat_col; // transposed instance sparse matrix

prob.x = NULL;
prob.y = NULL;
x_space = NULL;

// transpose instance matrix
{
mxArray *prhs[1], *plhs[1];
prhs[0] = mxDuplicateArray(instance_mat);
if(mexCallMATLAB(1, plhs, 1, prhs, "transpose"))
{
mexPrintf("Error: cannot transpose training instance matrix\n");
return -1;
}
instance_mat_col = plhs[0];
mxDestroyArray(prhs[0]);
}

// each column is one instance
labels = mxGetPr(label_vec);
samples = mxGetPr(instance_mat_col);
ir = mxGetIr(instance_mat_col);
jc = mxGetJc(instance_mat_col);

num_samples = mxGetNzmax(instance_mat_col);

// number of instances
l = mxGetN(instance_mat_col);
label_vector_row_num = mxGetM(label_vec);
prob.l = (int) l;

if(label_vector_row_num!=l)
{
mexPrintf("Length of label vector does not match # of instances.\n");
return -1;
}

elements = num_samples + l;
max_index = mxGetM(instance_mat_col);

prob.y = Malloc(double,l);
prob.x = Malloc(struct svm_node *,l);
x_space = Malloc(struct svm_node, elements);

j = 0;
for(i=0;i<l;i++)
{
prob.x[i] = &x_space[j];
prob.y[i] = labels[i];
low = jc[i], high = jc[i+1];
for(k=low;k<high;k++)
{
x_space[j].index = (int)ir[k] + 1;
x_space[j].value = samples[k];
j++;
}
x_space[j++].index = -1;
}

if(param.gamma == 0 && max_index > 0)
param.gamma = (double)(1.0/max_index);

return 0;
}

static void fake_answer(int nlhs, mxArray *plhs[])
{
int i;
for(i=0;i<nlhs;i++)
plhs[i] = mxCreateDoubleMatrix(0, 0, mxREAL);
}

// Interface function of matlab
// now assume prhs[0]: label prhs[1]: features
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
const char *error_msg;

// fix random seed to have same results for each run
// (for cross validation and probability estimation)
srand(1);

if(nlhs > 1)
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}

// Transform the input Matrix to libsvm format
if(nrhs > 1 && nrhs < 4)
{
int err;

if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1]))
{
mexPrintf("Error: label vector and instance matrix must be double\n");
fake_answer(nlhs, plhs);
return;
}

if(mxIsSparse(prhs[0]))
{
mexPrintf("Error: label vector should not be in sparse format\n");
fake_answer(nlhs, plhs);
return;
}

if(parse_command_line(nrhs, prhs, NULL))
{
exit_with_help();
svm_destroy_param(&param);
fake_answer(nlhs, plhs);
return;
}

if(mxIsSparse(prhs[1]))
{
if(param.kernel_type == PRECOMPUTED)
{
// precomputed kernel requires dense matrix, so we make one
mxArray *rhs[1], *lhs[1];

rhs[0] = mxDuplicateArray(prhs[1]);
if(mexCallMATLAB(1, lhs, 1, rhs, "full"))
{
mexPrintf("Error: cannot generate a full training instance matrix\n");
svm_destroy_param(&param);
fake_answer(nlhs, plhs);
return;
}
err = read_problem_dense(prhs[0], lhs[0]);
mxDestroyArray(lhs[0]);
mxDestroyArray(rhs[0]);
}
else
err = read_problem_sparse(prhs[0], prhs[1]);
}
else
err = read_problem_dense(prhs[0], prhs[1]);

// svmtrain's original code
error_msg = svm_check_parameter(&prob, &param);

if(err || error_msg)
{
if (error_msg != NULL)
mexPrintf("Error: %s\n", error_msg);
svm_destroy_param(&param);
free(prob.y);
free(prob.x);
free(x_space);
fake_answer(nlhs, plhs);
return;
}

if(cross_validation)
{
double *ptr;
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
ptr = mxGetPr(plhs[0]);
ptr[0] = do_cross_validation();
}
else
{
int nr_feat = (int)mxGetN(prhs[1]);
const char *error_msg;
model = svm_train(&prob, &param);
error_msg = model_to_matlab_structure(plhs, nr_feat, model);
if(error_msg)
mexPrintf("Error: can't convert libsvm model to matrix structure: %s\n", error_msg);
svm_free_and_destroy_model(&model);
}
svm_destroy_param(&param);
free(prob.y);
free(prob.x);
free(x_space);
}
else
{
exit_with_help();
fake_answer(nlhs, plhs);
return;
}
}

+ 4
- 0
gklearn/gedlib/lib/libsvm.3.22/python/Makefile View File

@@ -0,0 +1,4 @@
all = lib

lib:
make -C .. lib

+ 367
- 0
gklearn/gedlib/lib/libsvm.3.22/python/README View File

@@ -0,0 +1,367 @@
----------------------------------
--- Python interface of LIBSVM ---
----------------------------------

Table of Contents
=================

- Introduction
- Installation
- Quick Start
- Design Description
- Data Structures
- Utility Functions
- Additional Information

Introduction
============

Python (http://www.python.org/) is a programming language suitable for rapid
development. This tool provides a simple Python interface to LIBSVM, a library
for support vector machines (http://www.csie.ntu.edu.tw/~cjlin/libsvm). The
interface is very easy to use as the usage is the same as that of LIBSVM. The
interface is developed with the built-in Python library "ctypes."

Installation
============

On Unix systems, type

> make

The interface needs only LIBSVM shared library, which is generated by
the above command. We assume that the shared library is on the LIBSVM
main directory or in the system path.

For windows, the shared library libsvm.dll for 32-bit python is ready
in the directory `..\windows'. You can also copy it to the system
directory (e.g., `C:\WINDOWS\system32\' for Windows XP). To regenerate
the shared library, please follow the instruction of building windows
binaries in LIBSVM README.

Quick Start
===========

There are two levels of usage. The high-level one uses utility functions
in svmutil.py and the usage is the same as the LIBSVM MATLAB interface.

>>> from svmutil import *
# Read data in LIBSVM format
>>> y, x = svm_read_problem('../heart_scale')
>>> m = svm_train(y[:200], x[:200], '-c 4')
>>> p_label, p_acc, p_val = svm_predict(y[200:], x[200:], m)

# Construct problem in python format
# Dense data
>>> y, x = [1,-1], [[1,0,1], [-1,0,-1]]
# Sparse data
>>> y, x = [1,-1], [{1:1, 3:1}, {1:-1,3:-1}]
>>> prob = svm_problem(y, x)
>>> param = svm_parameter('-t 0 -c 4 -b 1')
>>> m = svm_train(prob, param)

# Precomputed kernel data (-t 4)
# Dense data
>>> y, x = [1,-1], [[1, 2, -2], [2, -2, 2]]
# Sparse data
>>> y, x = [1,-1], [{0:1, 1:2, 2:-2}, {0:2, 1:-2, 2:2}]
# isKernel=True must be set for precomputed kernel
>>> prob = svm_problem(y, x, isKernel=True)
>>> param = svm_parameter('-t 4 -c 4 -b 1')
>>> m = svm_train(prob, param)
# For the format of precomputed kernel, please read LIBSVM README.


# Other utility functions
>>> svm_save_model('heart_scale.model', m)
>>> m = svm_load_model('heart_scale.model')
>>> p_label, p_acc, p_val = svm_predict(y, x, m, '-b 1')
>>> ACC, MSE, SCC = evaluations(y, p_label)

# Getting online help
>>> help(svm_train)

The low-level use directly calls C interfaces imported by svm.py. Note that
all arguments and return values are in ctypes format. You need to handle them
carefully.

>>> from svm import *
>>> prob = svm_problem([1,-1], [{1:1, 3:1}, {1:-1,3:-1}])
>>> param = svm_parameter('-c 4')
>>> m = libsvm.svm_train(prob, param) # m is a ctype pointer to an svm_model
# Convert a Python-format instance to svm_nodearray, a ctypes structure
>>> x0, max_idx = gen_svm_nodearray({1:1, 3:1})
>>> label = libsvm.svm_predict(m, x0)

Design Description
==================

There are two files svm.py and svmutil.py, which respectively correspond to
low-level and high-level use of the interface.

In svm.py, we adopt the Python built-in library "ctypes," so that
Python can directly access C structures and interface functions defined
in svm.h.

While advanced users can use structures/functions in svm.py, to
avoid handling ctypes structures, in svmutil.py we provide some easy-to-use
functions. The usage is similar to LIBSVM MATLAB interface.

Data Structures
===============

Four data structures derived from svm.h are svm_node, svm_problem, svm_parameter,
and svm_model. They all contain fields with the same names in svm.h. Access
these fields carefully because you directly use a C structure instead of a
Python object. For svm_model, accessing the field directly is not recommanded.
Programmers should use the interface functions or methods of svm_model class
in Python to get the values. The following description introduces additional
fields and methods.

Before using the data structures, execute the following command to load the
LIBSVM shared library:

>>> from svm import *

- class svm_node:

Construct an svm_node.

>>> node = svm_node(idx, val)

idx: an integer indicates the feature index.

val: a float indicates the feature value.

Show the index and the value of a node.

>>> print(node)

- Function: gen_svm_nodearray(xi [,feature_max=None [,isKernel=False]])

Generate a feature vector from a Python list/tuple or a dictionary:

>>> xi, max_idx = gen_svm_nodearray({1:1, 3:1, 5:-2})

xi: the returned svm_nodearray (a ctypes structure)

max_idx: the maximal feature index of xi

feature_max: if feature_max is assigned, features with indices larger than
feature_max are removed.
isKernel: if isKernel == True, the list index starts from 0 for precomputed
kernel. Otherwise, the list index starts from 1. The default
value is False.

- class svm_problem:

Construct an svm_problem instance

>>> prob = svm_problem(y, x)

y: a Python list/tuple of l labels (type must be int/double).

x: a Python list/tuple of l data instances. Each element of x must be
an instance of list/tuple/dictionary type.

Note that if your x contains sparse data (i.e., dictionary), the internal
ctypes data format is still sparse.

For pre-computed kernel, the isKernel flag should be set to True:

>>> prob = svm_problem(y, x, isKernel=True)

Please read LIBSVM README for more details of pre-computed kernel.

- class svm_parameter:

Construct an svm_parameter instance

>>> param = svm_parameter('training_options')

If 'training_options' is empty, LIBSVM default values are applied.

Set param to LIBSVM default values.

>>> param.set_to_default_values()

Parse a string of options.

>>> param.parse_options('training_options')

Show values of parameters.

>>> print(param)

- class svm_model:

There are two ways to obtain an instance of svm_model:

>>> model = svm_train(y, x)
>>> model = svm_load_model('model_file_name')

Note that the returned structure of interface functions
libsvm.svm_train and libsvm.svm_load_model is a ctypes pointer of
svm_model, which is different from the svm_model object returned
by svm_train and svm_load_model in svmutil.py. We provide a
function toPyModel for the conversion:

>>> model_ptr = libsvm.svm_train(prob, param)
>>> model = toPyModel(model_ptr)

If you obtain a model in a way other than the above approaches,
handle it carefully to avoid memory leak or segmentation fault.

Some interface functions to access LIBSVM models are wrapped as
members of the class svm_model:

>>> svm_type = model.get_svm_type()
>>> nr_class = model.get_nr_class()
>>> svr_probability = model.get_svr_probability()
>>> class_labels = model.get_labels()
>>> sv_indices = model.get_sv_indices()
>>> nr_sv = model.get_nr_sv()
>>> is_prob_model = model.is_probability_model()
>>> support_vector_coefficients = model.get_sv_coef()
>>> support_vectors = model.get_SV()

Utility Functions
=================

To use utility functions, type

>>> from svmutil import *

The above command loads
svm_train() : train an SVM model
svm_predict() : predict testing data
svm_read_problem() : read the data from a LIBSVM-format file.
svm_load_model() : load a LIBSVM model.
svm_save_model() : save model to a file.
evaluations() : evaluate prediction results.

- Function: svm_train

There are three ways to call svm_train()

>>> model = svm_train(y, x [, 'training_options'])
>>> model = svm_train(prob [, 'training_options'])
>>> model = svm_train(prob, param)

y: a list/tuple of l training labels (type must be int/double).

x: a list/tuple of l training instances. The feature vector of
each training instance is an instance of list/tuple or dictionary.

training_options: a string in the same form as that for LIBSVM command
mode.

prob: an svm_problem instance generated by calling
svm_problem(y, x).
For pre-computed kernel, you should use
svm_problem(y, x, isKernel=True)

param: an svm_parameter instance generated by calling
svm_parameter('training_options')

model: the returned svm_model instance. See svm.h for details of this
structure. If '-v' is specified, cross validation is
conducted and the returned model is just a scalar: cross-validation
accuracy for classification and mean-squared error for regression.

To train the same data many times with different
parameters, the second and the third ways should be faster..

Examples:

>>> y, x = svm_read_problem('../heart_scale')
>>> prob = svm_problem(y, x)
>>> param = svm_parameter('-s 3 -c 5 -h 0')
>>> m = svm_train(y, x, '-c 5')
>>> m = svm_train(prob, '-t 2 -c 5')
>>> m = svm_train(prob, param)
>>> CV_ACC = svm_train(y, x, '-v 3')

- Function: svm_predict

To predict testing data with a model, use

>>> p_labs, p_acc, p_vals = svm_predict(y, x, model [,'predicting_options'])

y: a list/tuple of l true labels (type must be int/double). It is used
for calculating the accuracy. Use [0]*len(x) if true labels are
unavailable.

x: a list/tuple of l predicting instances. The feature vector of
each predicting instance is an instance of list/tuple or dictionary.

predicting_options: a string of predicting options in the same format as
that of LIBSVM.

model: an svm_model instance.

p_labels: a list of predicted labels

p_acc: a tuple including accuracy (for classification), mean
squared error, and squared correlation coefficient (for
regression).

p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes in training data,
for decision values, each element includes results of predicting
k(k-1)/2 binary-class SVMs. For classification, k = 1 is a
special case. Decision value [+1] is returned for each testing
instance, instead of an empty list.
For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes is the same as the 'model.label'
field in the model structure.

Example:

>>> m = svm_train(y, x, '-c 5')
>>> p_labels, p_acc, p_vals = svm_predict(y, x, m)

- Functions: svm_read_problem/svm_load_model/svm_save_model

See the usage by examples:

>>> y, x = svm_read_problem('data.txt')
>>> m = svm_load_model('model_file')
>>> svm_save_model('model_file', m)

- Function: evaluations

Calculate some evaluations using the true values (ty) and predicted
values (pv):

>>> (ACC, MSE, SCC) = evaluations(ty, pv)

ty: a list of true values.

pv: a list of predict values.

ACC: accuracy.

MSE: mean squared error.

SCC: squared correlation coefficient.


Additional Information
======================

This interface was written by Hsiang-Fu Yu from Department of Computer
Science, National Taiwan University. If you find this tool useful, please
cite LIBSVM as follows

Chih-Chung Chang and Chih-Jen Lin, LIBSVM : a library for support
vector machines. ACM Transactions on Intelligent Systems and
Technology, 2:27:1--27:27, 2011. Software available at
http://www.csie.ntu.edu.tw/~cjlin/libsvm

For any question, please contact Chih-Jen Lin <cjlin@csie.ntu.edu.tw>,
or check the FAQ page:

http://www.csie.ntu.edu.tw/~cjlin/libsvm/faq.html

+ 330
- 0
gklearn/gedlib/lib/libsvm.3.22/python/svm.py View File

@@ -0,0 +1,330 @@
#!/usr/bin/env python

from ctypes import *
from ctypes.util import find_library
from os import path
import sys

if sys.version_info[0] >= 3:
xrange = range

__all__ = ['libsvm', 'svm_problem', 'svm_parameter',
'toPyModel', 'gen_svm_nodearray', 'print_null', 'svm_node', 'C_SVC',
'EPSILON_SVR', 'LINEAR', 'NU_SVC', 'NU_SVR', 'ONE_CLASS',
'POLY', 'PRECOMPUTED', 'PRINT_STRING_FUN', 'RBF',
'SIGMOID', 'c_double', 'svm_model']

try:
dirname = path.dirname(path.abspath(__file__))
if sys.platform == 'win32':
libsvm = CDLL(path.join(dirname, r'..\windows\libsvm.dll'))
else:
libsvm = CDLL(path.join(dirname, '../libsvm.so.2'))
except:
# For unix the prefix 'lib' is not considered.
if find_library('svm'):
libsvm = CDLL(find_library('svm'))
elif find_library('libsvm'):
libsvm = CDLL(find_library('libsvm'))
else:
raise Exception('LIBSVM library not found.')

C_SVC = 0
NU_SVC = 1
ONE_CLASS = 2
EPSILON_SVR = 3
NU_SVR = 4

LINEAR = 0
POLY = 1
RBF = 2
SIGMOID = 3
PRECOMPUTED = 4

PRINT_STRING_FUN = CFUNCTYPE(None, c_char_p)
def print_null(s):
return

def genFields(names, types):
return list(zip(names, types))

def fillprototype(f, restype, argtypes):
f.restype = restype
f.argtypes = argtypes

class svm_node(Structure):
_names = ["index", "value"]
_types = [c_int, c_double]
_fields_ = genFields(_names, _types)

def __str__(self):
return '%d:%g' % (self.index, self.value)

def gen_svm_nodearray(xi, feature_max=None, isKernel=None):
if isinstance(xi, dict):
index_range = xi.keys()
elif isinstance(xi, (list, tuple)):
if not isKernel:
xi = [0] + xi # idx should start from 1
index_range = range(len(xi))
else:
raise TypeError('xi should be a dictionary, list or tuple')

if feature_max:
assert(isinstance(feature_max, int))
index_range = filter(lambda j: j <= feature_max, index_range)
if not isKernel:
index_range = filter(lambda j:xi[j] != 0, index_range)

index_range = sorted(index_range)
ret = (svm_node * (len(index_range)+1))()
ret[-1].index = -1
for idx, j in enumerate(index_range):
ret[idx].index = j
ret[idx].value = xi[j]
max_idx = 0
if index_range:
max_idx = index_range[-1]
return ret, max_idx

class svm_problem(Structure):
_names = ["l", "y", "x"]
_types = [c_int, POINTER(c_double), POINTER(POINTER(svm_node))]
_fields_ = genFields(_names, _types)

def __init__(self, y, x, isKernel=None):
if len(y) != len(x):
raise ValueError("len(y) != len(x)")
self.l = l = len(y)

max_idx = 0
x_space = self.x_space = []
for i, xi in enumerate(x):
tmp_xi, tmp_idx = gen_svm_nodearray(xi,isKernel=isKernel)
x_space += [tmp_xi]
max_idx = max(max_idx, tmp_idx)
self.n = max_idx

self.y = (c_double * l)()
for i, yi in enumerate(y): self.y[i] = yi

self.x = (POINTER(svm_node) * l)()
for i, xi in enumerate(self.x_space): self.x[i] = xi

class svm_parameter(Structure):
_names = ["svm_type", "kernel_type", "degree", "gamma", "coef0",
"cache_size", "eps", "C", "nr_weight", "weight_label", "weight",
"nu", "p", "shrinking", "probability"]
_types = [c_int, c_int, c_int, c_double, c_double,
c_double, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double),
c_double, c_double, c_int, c_int]
_fields_ = genFields(_names, _types)

def __init__(self, options = None):
if options == None:
options = ''
self.parse_options(options)

def __str__(self):
s = ''
attrs = svm_parameter._names + list(self.__dict__.keys())
values = map(lambda attr: getattr(self, attr), attrs)
for attr, val in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()

return s

def set_to_default_values(self):
self.svm_type = C_SVC;
self.kernel_type = RBF
self.degree = 3
self.gamma = 0
self.coef0 = 0
self.nu = 0.5
self.cache_size = 100
self.C = 1
self.eps = 0.001
self.p = 0.1
self.shrinking = 1
self.probability = 0
self.nr_weight = 0
self.weight_label = None
self.weight = None
self.cross_validation = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)

def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError("arg 1 should be a list or a str.")
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []

i = 0
while i < len(argv):
if argv[i] == "-s":
i = i + 1
self.svm_type = int(argv[i])
elif argv[i] == "-t":
i = i + 1
self.kernel_type = int(argv[i])
elif argv[i] == "-d":
i = i + 1
self.degree = int(argv[i])
elif argv[i] == "-g":
i = i + 1
self.gamma = float(argv[i])
elif argv[i] == "-r":
i = i + 1
self.coef0 = float(argv[i])
elif argv[i] == "-n":
i = i + 1
self.nu = float(argv[i])
elif argv[i] == "-m":
i = i + 1
self.cache_size = float(argv[i])
elif argv[i] == "-c":
i = i + 1
self.C = float(argv[i])
elif argv[i] == "-e":
i = i + 1
self.eps = float(argv[i])
elif argv[i] == "-p":
i = i + 1
self.p = float(argv[i])
elif argv[i] == "-h":
i = i + 1
self.shrinking = int(argv[i])
elif argv[i] == "-b":
i = i + 1
self.probability = int(argv[i])
elif argv[i] == "-q":
self.print_func = PRINT_STRING_FUN(print_null)
elif argv[i] == "-v":
i = i + 1
self.cross_validation = 1
self.nr_fold = int(argv[i])
if self.nr_fold < 2:
raise ValueError("n-fold cross validation: n must >= 2")
elif argv[i].startswith("-w"):
i = i + 1
self.nr_weight += 1
weight_label += [int(argv[i-1][2:])]
weight += [float(argv[i])]
else:
raise ValueError("Wrong options")
i += 1

libsvm.svm_set_print_string_function(self.print_func)
self.weight_label = (c_int*self.nr_weight)()
self.weight = (c_double*self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i]

class svm_model(Structure):
_names = ['param', 'nr_class', 'l', 'SV', 'sv_coef', 'rho',
'probA', 'probB', 'sv_indices', 'label', 'nSV', 'free_sv']
_types = [svm_parameter, c_int, c_int, POINTER(POINTER(svm_node)),
POINTER(POINTER(c_double)), POINTER(c_double),
POINTER(c_double), POINTER(c_double), POINTER(c_int),
POINTER(c_int), POINTER(c_int), c_int]
_fields_ = genFields(_names, _types)

def __init__(self):
self.__createfrom__ = 'python'

def __del__(self):
# free memory created by C to avoid memory leak
if hasattr(self, '__createfrom__') and self.__createfrom__ == 'C':
libsvm.svm_free_and_destroy_model(pointer(self))

def get_svm_type(self):
return libsvm.svm_get_svm_type(self)

def get_nr_class(self):
return libsvm.svm_get_nr_class(self)

def get_svr_probability(self):
return libsvm.svm_get_svr_probability(self)

def get_labels(self):
nr_class = self.get_nr_class()
labels = (c_int * nr_class)()
libsvm.svm_get_labels(self, labels)
return labels[:nr_class]

def get_sv_indices(self):
total_sv = self.get_nr_sv()
sv_indices = (c_int * total_sv)()
libsvm.svm_get_sv_indices(self, sv_indices)
return sv_indices[:total_sv]

def get_nr_sv(self):
return libsvm.svm_get_nr_sv(self)

def is_probability_model(self):
return (libsvm.svm_check_probability_model(self) == 1)

def get_sv_coef(self):
return [tuple(self.sv_coef[j][i] for j in xrange(self.nr_class - 1))
for i in xrange(self.l)]

def get_SV(self):
result = []
for sparse_sv in self.SV[:self.l]:
row = dict()

i = 0
while True:
row[sparse_sv[i].index] = sparse_sv[i].value
if sparse_sv[i].index == -1:
break
i += 1

result.append(row)
return result

def toPyModel(model_ptr):
"""
toPyModel(model_ptr) -> svm_model

Convert a ctypes POINTER(svm_model) to a Python svm_model
"""
if bool(model_ptr) == False:
raise ValueError("Null pointer")
m = model_ptr.contents
m.__createfrom__ = 'C'
return m

fillprototype(libsvm.svm_train, POINTER(svm_model), [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_cross_validation, None, [POINTER(svm_problem), POINTER(svm_parameter), c_int, POINTER(c_double)])

fillprototype(libsvm.svm_save_model, c_int, [c_char_p, POINTER(svm_model)])
fillprototype(libsvm.svm_load_model, POINTER(svm_model), [c_char_p])

fillprototype(libsvm.svm_get_svm_type, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_nr_class, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_labels, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_sv_indices, None, [POINTER(svm_model), POINTER(c_int)])
fillprototype(libsvm.svm_get_nr_sv, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_get_svr_probability, c_double, [POINTER(svm_model)])

fillprototype(libsvm.svm_predict_values, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])
fillprototype(libsvm.svm_predict, c_double, [POINTER(svm_model), POINTER(svm_node)])
fillprototype(libsvm.svm_predict_probability, c_double, [POINTER(svm_model), POINTER(svm_node), POINTER(c_double)])

fillprototype(libsvm.svm_free_model_content, None, [POINTER(svm_model)])
fillprototype(libsvm.svm_free_and_destroy_model, None, [POINTER(POINTER(svm_model))])
fillprototype(libsvm.svm_destroy_param, None, [POINTER(svm_parameter)])

fillprototype(libsvm.svm_check_parameter, c_char_p, [POINTER(svm_problem), POINTER(svm_parameter)])
fillprototype(libsvm.svm_check_probability_model, c_int, [POINTER(svm_model)])
fillprototype(libsvm.svm_set_print_string_function, None, [PRINT_STRING_FUN])

+ 262
- 0
gklearn/gedlib/lib/libsvm.3.22/python/svmutil.py View File

@@ -0,0 +1,262 @@
#!/usr/bin/env python

import os
import sys
from svm import *
from svm import __all__ as svm_all


__all__ = ['evaluations', 'svm_load_model', 'svm_predict', 'svm_read_problem',
'svm_save_model', 'svm_train'] + svm_all

sys.path = [os.path.dirname(os.path.abspath(__file__))] + sys.path

def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]

Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)

def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model

Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model

def svm_save_model(model_file_name, model):
"""
svm_save_model(model_file_name, model) -> None

Save a LIBSVM model to the file model_file_name.
"""
libsvm.svm_save_model(model_file_name.encode(), model)

def evaluations(ty, pv):
"""
evaluations(ty, pv) -> (ACC, MSE, SCC)

Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (ACC, MSE, SCC)

def svm_train(arg1, arg2=None, arg3=None):
"""
svm_train(y, x [, options]) -> model | ACC | MSE
svm_train(prob [, options]) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE

Train an SVM model from data (y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s svm_type : set type of SVM (default 0)
0 -- C-SVC (multi-class classification)
1 -- nu-SVC (multi-class classification)
2 -- one-class SVM
3 -- epsilon-SVR (regression)
4 -- nu-SVR (regression)
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
y, x, options = arg1, arg2, arg3
param = svm_parameter(options)
prob = svm_problem(y, x, isKernel=(param.kernel_type == PRECOMPUTED))
elif isinstance(arg1, svm_problem):
prob = arg1
if isinstance(arg2, svm_parameter):
param = arg2
else:
param = svm_parameter(arg2)
if prob == None or param == None:
raise TypeError("Wrong types for the arguments")

if param.kernel_type == PRECOMPUTED:
for xi in prob.x_space:
idx, val = xi[0].index, xi[0].value
if xi[0].index != 0:
raise ValueError('Wrong input format: first column must be 0:sample_serial_number')
if val <= 0 or val > prob.n:
raise ValueError('Wrong input format: sample_serial_number out of range')

if param.gamma == 0 and prob.n > 0:
param.gamma = 1.0 / prob.n
libsvm.svm_set_print_string_function(param.print_func)
err_msg = libsvm.svm_check_parameter(prob, param)
if err_msg:
raise ValueError('Error: %s' % err_msg)

if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
libsvm.svm_cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.svm_type in [EPSILON_SVR, NU_SVR]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = libsvm.svm_train(prob, param)
m = toPyModel(m)

# If prob is destroyed, data including SVs pointed by m can remain.
m.x_space = prob.x_space
return m

def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)

Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
-q : quiet mode (no outputs).

The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""

def info(s):
print(s)

predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1

svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []

if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")

if svm_type in [NU_SVR, EPSILON_SVR]:
info("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0

prob_estimates = (c_double * nr_class)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
info("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]

ACC, MSE, SCC = evaluations(y, pred_labels)
l = len(y)
if svm_type in [EPSILON_SVR, NU_SVR]:
info("Mean squared error = %g (regression)" % MSE)
info("Squared correlation coefficient = %g (regression)" % SCC)
else:
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(l*ACC/100), l))

return pred_labels, (ACC, MSE, SCC), pred_values



+ 239
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-predict.c View File

@@ -0,0 +1,239 @@
#include <stdio.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "svm.h"

int print_null(const char *s,...) {return 0;}

static int (*info)(const char *fmt,...) = &printf;

struct svm_node *x;
int max_nr_attr = 64;

struct svm_model* model;
int predict_probability=0;

static char *line = NULL;
static int max_line_len;

static char* readline(FILE *input)
{
int len;

if(fgets(line,max_line_len,input) == NULL)
return NULL;

while(strrchr(line,'\n') == NULL)
{
max_line_len *= 2;
line = (char *) realloc(line,max_line_len);
len = (int) strlen(line);
if(fgets(line+len,max_line_len-len,input) == NULL)
break;
}
return line;
}

void exit_input_error(int line_num)
{
fprintf(stderr,"Wrong input format at line %d\n", line_num);
exit(1);
}

void predict(FILE *input, FILE *output)
{
int correct = 0;
int total = 0;
double error = 0;
double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0;

int svm_type=svm_get_svm_type(model);
int nr_class=svm_get_nr_class(model);
double *prob_estimates=NULL;
int j;

if(predict_probability)
{
if (svm_type==NU_SVR || svm_type==EPSILON_SVR)
info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g\n",svm_get_svr_probability(model));
else
{
int *labels=(int *) malloc(nr_class*sizeof(int));
svm_get_labels(model,labels);
prob_estimates = (double *) malloc(nr_class*sizeof(double));
fprintf(output,"labels");
for(j=0;j<nr_class;j++)
fprintf(output," %d",labels[j]);
fprintf(output,"\n");
free(labels);
}
}

max_line_len = 1024;
line = (char *)malloc(max_line_len*sizeof(char));
while(readline(input) != NULL)
{
int i = 0;
double target_label, predict_label;
char *idx, *val, *label, *endptr;
int inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0

label = strtok(line," \t\n");
if(label == NULL) // empty line
exit_input_error(total+1);

target_label = strtod(label,&endptr);
if(endptr == label || *endptr != '\0')
exit_input_error(total+1);

while(1)
{
if(i>=max_nr_attr-1) // need one more for index = -1
{
max_nr_attr *= 2;
x = (struct svm_node *) realloc(x,max_nr_attr*sizeof(struct svm_node));
}

idx = strtok(NULL,":");
val = strtok(NULL," \t");

if(val == NULL)
break;
errno = 0;
x[i].index = (int) strtol(idx,&endptr,10);
if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index)
exit_input_error(total+1);
else
inst_max_index = x[i].index;

errno = 0;
x[i].value = strtod(val,&endptr);
if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
exit_input_error(total+1);

++i;
}
x[i].index = -1;

if (predict_probability && (svm_type==C_SVC || svm_type==NU_SVC))
{
predict_label = svm_predict_probability(model,x,prob_estimates);
fprintf(output,"%g",predict_label);
for(j=0;j<nr_class;j++)
fprintf(output," %g",prob_estimates[j]);
fprintf(output,"\n");
}
else
{
predict_label = svm_predict(model,x);
fprintf(output,"%g\n",predict_label);
}

if(predict_label == target_label)
++correct;
error += (predict_label-target_label)*(predict_label-target_label);
sump += predict_label;
sumt += target_label;
sumpp += predict_label*predict_label;
sumtt += target_label*target_label;
sumpt += predict_label*target_label;
++total;
}
if (svm_type==NU_SVR || svm_type==EPSILON_SVR)
{
info("Mean squared error = %g (regression)\n",error/total);
info("Squared correlation coefficient = %g (regression)\n",
((total*sumpt-sump*sumt)*(total*sumpt-sump*sumt))/
((total*sumpp-sump*sump)*(total*sumtt-sumt*sumt))
);
}
else
info("Accuracy = %g%% (%d/%d) (classification)\n",
(double)correct/total*100,correct,total);
if(predict_probability)
free(prob_estimates);
}

void exit_with_help()
{
printf(
"Usage: svm-predict [options] test_file model_file output_file\n"
"options:\n"
"-b probability_estimates: whether to predict probability estimates, 0 or 1 (default 0); for one-class SVM only 0 is supported\n"
"-q : quiet mode (no outputs)\n"
);
exit(1);
}

int main(int argc, char **argv)
{
FILE *input, *output;
int i;
// parse options
for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
++i;
switch(argv[i-1][1])
{
case 'b':
predict_probability = atoi(argv[i]);
break;
case 'q':
info = &print_null;
i--;
break;
default:
fprintf(stderr,"Unknown option: -%c\n", argv[i-1][1]);
exit_with_help();
}
}

if(i>=argc-2)
exit_with_help();

input = fopen(argv[i],"r");
if(input == NULL)
{
fprintf(stderr,"can't open input file %s\n",argv[i]);
exit(1);
}

output = fopen(argv[i+2],"w");
if(output == NULL)
{
fprintf(stderr,"can't open output file %s\n",argv[i+2]);
exit(1);
}

if((model=svm_load_model(argv[i+1]))==0)
{
fprintf(stderr,"can't open model file %s\n",argv[i+1]);
exit(1);
}

x = (struct svm_node *) malloc(max_nr_attr*sizeof(struct svm_node));
if(predict_probability)
{
if(svm_check_probability_model(model)==0)
{
fprintf(stderr,"Model does not support probabiliy estimates\n");
exit(1);
}
}
else
{
if(svm_check_probability_model(model)!=0)
info("Model supports probability estimates, but disabled in prediction.\n");
}

predict(input,output);
svm_free_and_destroy_model(&model);
free(x);
free(line);
fclose(input);
fclose(output);
return 0;
}

+ 397
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-scale.c View File

@@ -0,0 +1,397 @@
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <string.h>

void exit_with_help()
{
printf(
"Usage: svm-scale [options] data_filename\n"
"options:\n"
"-l lower : x scaling lower limit (default -1)\n"
"-u upper : x scaling upper limit (default +1)\n"
"-y y_lower y_upper : y scaling limits (default: no y scaling)\n"
"-s save_filename : save scaling parameters to save_filename\n"
"-r restore_filename : restore scaling parameters from restore_filename\n"
);
exit(1);
}

char *line = NULL;
int max_line_len = 1024;
double lower=-1.0,upper=1.0,y_lower,y_upper;
int y_scaling = 0;
double *feature_max;
double *feature_min;
double y_max = -DBL_MAX;
double y_min = DBL_MAX;
int max_index;
int min_index;
long int num_nonzeros = 0;
long int new_num_nonzeros = 0;

#define max(x,y) (((x)>(y))?(x):(y))
#define min(x,y) (((x)<(y))?(x):(y))

void output_target(double value);
void output(int index, double value);
char* readline(FILE *input);
int clean_up(FILE *fp_restore, FILE *fp, const char *msg);

int main(int argc,char **argv)
{
int i,index;
FILE *fp, *fp_restore = NULL;
char *save_filename = NULL;
char *restore_filename = NULL;

for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
++i;
switch(argv[i-1][1])
{
case 'l': lower = atof(argv[i]); break;
case 'u': upper = atof(argv[i]); break;
case 'y':
y_lower = atof(argv[i]);
++i;
y_upper = atof(argv[i]);
y_scaling = 1;
break;
case 's': save_filename = argv[i]; break;
case 'r': restore_filename = argv[i]; break;
default:
fprintf(stderr,"unknown option\n");
exit_with_help();
}
}

if(!(upper > lower) || (y_scaling && !(y_upper > y_lower)))
{
fprintf(stderr,"inconsistent lower/upper specification\n");
exit(1);
}
if(restore_filename && save_filename)
{
fprintf(stderr,"cannot use -r and -s simultaneously\n");
exit(1);
}

if(argc != i+1)
exit_with_help();

fp=fopen(argv[i],"r");
if(fp==NULL)
{
fprintf(stderr,"can't open file %s\n", argv[i]);
exit(1);
}

line = (char *) malloc(max_line_len*sizeof(char));

#define SKIP_TARGET\
while(isspace(*p)) ++p;\
while(!isspace(*p)) ++p;

#define SKIP_ELEMENT\
while(*p!=':') ++p;\
++p;\
while(isspace(*p)) ++p;\
while(*p && !isspace(*p)) ++p;
/* assumption: min index of attributes is 1 */
/* pass 1: find out max index of attributes */
max_index = 0;
min_index = 1;

if(restore_filename)
{
int idx, c;

fp_restore = fopen(restore_filename,"r");
if(fp_restore==NULL)
{
fprintf(stderr,"can't open file %s\n", restore_filename);
exit(1);
}

c = fgetc(fp_restore);
if(c == 'y')
{
readline(fp_restore);
readline(fp_restore);
readline(fp_restore);
}
readline(fp_restore);
readline(fp_restore);

while(fscanf(fp_restore,"%d %*f %*f\n",&idx) == 1)
max_index = max(idx,max_index);
rewind(fp_restore);
}

while(readline(fp)!=NULL)
{
char *p=line;

SKIP_TARGET

while(sscanf(p,"%d:%*f",&index)==1)
{
max_index = max(max_index, index);
min_index = min(min_index, index);
SKIP_ELEMENT
num_nonzeros++;
}
}

if(min_index < 1)
fprintf(stderr,
"WARNING: minimal feature index is %d, but indices should start from 1\n", min_index);

rewind(fp);

feature_max = (double *)malloc((max_index+1)* sizeof(double));
feature_min = (double *)malloc((max_index+1)* sizeof(double));

if(feature_max == NULL || feature_min == NULL)
{
fprintf(stderr,"can't allocate enough memory\n");
exit(1);
}

for(i=0;i<=max_index;i++)
{
feature_max[i]=-DBL_MAX;
feature_min[i]=DBL_MAX;
}

/* pass 2: find out min/max value */
while(readline(fp)!=NULL)
{
char *p=line;
int next_index=1;
double target;
double value;

if (sscanf(p,"%lf",&target) != 1)
return clean_up(fp_restore, fp, "ERROR: failed to read labels\n");
y_max = max(y_max,target);
y_min = min(y_min,target);
SKIP_TARGET

while(sscanf(p,"%d:%lf",&index,&value)==2)
{
for(i=next_index;i<index;i++)
{
feature_max[i]=max(feature_max[i],0);
feature_min[i]=min(feature_min[i],0);
}
feature_max[index]=max(feature_max[index],value);
feature_min[index]=min(feature_min[index],value);

SKIP_ELEMENT
next_index=index+1;
}

for(i=next_index;i<=max_index;i++)
{
feature_max[i]=max(feature_max[i],0);
feature_min[i]=min(feature_min[i],0);
}
}

rewind(fp);

/* pass 2.5: save/restore feature_min/feature_max */
if(restore_filename)
{
/* fp_restore rewinded in finding max_index */
int idx, c;
double fmin, fmax;
int next_index = 1;
if((c = fgetc(fp_restore)) == 'y')
{
if(fscanf(fp_restore, "%lf %lf\n", &y_lower, &y_upper) != 2 ||
fscanf(fp_restore, "%lf %lf\n", &y_min, &y_max) != 2)
return clean_up(fp_restore, fp, "ERROR: failed to read scaling parameters\n");
y_scaling = 1;
}
else
ungetc(c, fp_restore);

if (fgetc(fp_restore) == 'x')
{
if(fscanf(fp_restore, "%lf %lf\n", &lower, &upper) != 2)
return clean_up(fp_restore, fp, "ERROR: failed to read scaling parameters\n");
while(fscanf(fp_restore,"%d %lf %lf\n",&idx,&fmin,&fmax)==3)
{
for(i = next_index;i<idx;i++)
if(feature_min[i] != feature_max[i])
fprintf(stderr,
"WARNING: feature index %d appeared in file %s was not seen in the scaling factor file %s.\n",
i, argv[argc-1], restore_filename);

feature_min[idx] = fmin;
feature_max[idx] = fmax;

next_index = idx + 1;
}
for(i=next_index;i<=max_index;i++)
if(feature_min[i] != feature_max[i])
fprintf(stderr,
"WARNING: feature index %d appeared in file %s was not seen in the scaling factor file %s.\n",
i, argv[argc-1], restore_filename);
}
fclose(fp_restore);
}

if(save_filename)
{
FILE *fp_save = fopen(save_filename,"w");
if(fp_save==NULL)
{
fprintf(stderr,"can't open file %s\n", save_filename);
exit(1);
}
if(y_scaling)
{
fprintf(fp_save, "y\n");
fprintf(fp_save, "%.16g %.16g\n", y_lower, y_upper);
fprintf(fp_save, "%.16g %.16g\n", y_min, y_max);
}
fprintf(fp_save, "x\n");
fprintf(fp_save, "%.16g %.16g\n", lower, upper);
for(i=1;i<=max_index;i++)
{
if(feature_min[i]!=feature_max[i])
fprintf(fp_save,"%d %.16g %.16g\n",i,feature_min[i],feature_max[i]);
}

if(min_index < 1)
fprintf(stderr,
"WARNING: scaling factors with indices smaller than 1 are not stored to the file %s.\n", save_filename);

fclose(fp_save);
}
/* pass 3: scale */
while(readline(fp)!=NULL)
{
char *p=line;
int next_index=1;
double target;
double value;
if (sscanf(p,"%lf",&target) != 1)
return clean_up(NULL, fp, "ERROR: failed to read labels\n");
output_target(target);

SKIP_TARGET

while(sscanf(p,"%d:%lf",&index,&value)==2)
{
for(i=next_index;i<index;i++)
output(i,0);
output(index,value);

SKIP_ELEMENT
next_index=index+1;
}

for(i=next_index;i<=max_index;i++)
output(i,0);

printf("\n");
}

if (new_num_nonzeros > num_nonzeros)
fprintf(stderr,
"WARNING: original #nonzeros %ld\n"
" > new #nonzeros %ld\n"
"If feature values are non-negative and sparse, use -l 0 rather than the default -l -1\n",
num_nonzeros, new_num_nonzeros);

free(line);
free(feature_max);
free(feature_min);
fclose(fp);
return 0;
}

char* readline(FILE *input)
{
int len;
if(fgets(line,max_line_len,input) == NULL)
return NULL;

while(strrchr(line,'\n') == NULL)
{
max_line_len *= 2;
line = (char *) realloc(line, max_line_len);
len = (int) strlen(line);
if(fgets(line+len,max_line_len-len,input) == NULL)
break;
}
return line;
}

void output_target(double value)
{
if(y_scaling)
{
if(value == y_min)
value = y_lower;
else if(value == y_max)
value = y_upper;
else value = y_lower + (y_upper-y_lower) *
(value - y_min)/(y_max-y_min);
}
printf("%g ",value);
}

void output(int index, double value)
{
/* skip single-valued attribute */
if(feature_max[index] == feature_min[index])
return;

if(value == feature_min[index])
value = lower;
else if(value == feature_max[index])
value = upper;
else
value = lower + (upper-lower) *
(value-feature_min[index])/
(feature_max[index]-feature_min[index]);

if(value != 0)
{
printf("%d:%g ",index, value);
new_num_nonzeros++;
}
}

int clean_up(FILE *fp_restore, FILE *fp, const char* msg)
{
fprintf(stderr, "%s", msg);
free(line);
free(feature_max);
free(feature_min);
fclose(fp);
if (fp_restore)
fclose(fp_restore);
return -1;
}


+ 22
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/Makefile View File

@@ -0,0 +1,22 @@
CC? = gcc
CXX? = g++
CFLAGS = -Wall -O3 -g `pkg-config --cflags gtk+-2.0`
LIBS = `pkg-config --libs gtk+-2.0`

svm-toy: main.o interface.o callbacks.o ../../svm.o
$(CXX) $(CFLAGS) main.o interface.o callbacks.o ../../svm.o -o svm-toy $(LIBS)

main.o: main.c
$(CC) $(CFLAGS) -c main.c

interface.o: interface.c interface.h
$(CC) $(CFLAGS) -c interface.c

callbacks.o: callbacks.cpp callbacks.h
$(CXX) $(CFLAGS) -c callbacks.cpp

../../svm.o: ../../svm.cpp ../../svm.h
make -C ../.. svm.o

clean:
rm -f *~ callbacks.o svm-toy main.o interface.o callbacks.o ../../svm.o

+ 447
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/callbacks.cpp View File

@@ -0,0 +1,447 @@
#include <gtk/gtk.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <list>
#include "callbacks.h"
#include "interface.h"
#include "../../svm.h"
using namespace std;

#define DEFAULT_PARAM "-t 2 -c 100"
#define XLEN 500
#define YLEN 500

GdkColor colors[] =
{
{0,0,0,0},
{0,0,120<<8,120<<8},
{0,120<<8,120<<8,0},
{0,120<<8,0,120<<8},
{0,0,200<<8,200<<8},
{0,200<<8,200<<8,0},
{0,200<<8,0,200<<8},
};

GdkGC *gc;
GdkPixmap *pixmap;
extern "C" GtkWidget *draw_main;
GtkWidget *draw_main;
extern "C" GtkWidget *entry_option;
GtkWidget *entry_option;

typedef struct {
double x, y;
signed char value;
} point;

list<point> point_list;
int current_value = 1;

extern "C" void svm_toy_initialize()
{
gboolean success[7];

gdk_colormap_alloc_colors(
gdk_colormap_get_system(),
colors,
7,
FALSE,
TRUE,
success);

gc = gdk_gc_new(draw_main->window);
pixmap = gdk_pixmap_new(draw_main->window,XLEN,YLEN,-1);
gdk_gc_set_foreground(gc,&colors[0]);
gdk_draw_rectangle(pixmap,gc,TRUE,0,0,XLEN,YLEN);
gtk_entry_set_text(GTK_ENTRY(entry_option),DEFAULT_PARAM);
}

void redraw_area(GtkWidget* widget, int x, int y, int w, int h)
{
gdk_draw_pixmap(widget->window,
gc,
pixmap,
x,y,x,y,w,h);
}

void draw_point(const point& p)
{
gdk_gc_set_foreground(gc,&colors[p.value+3]);
gdk_draw_rectangle(pixmap, gc, TRUE,int(p.x*XLEN),int(p.y*YLEN),4,4);
gdk_draw_rectangle(draw_main->window, gc, TRUE,int(p.x*XLEN),int(p.y*YLEN),4,4);
}

void draw_all_points()
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end();p++)
draw_point(*p);
}

void clear_all()
{
point_list.clear();
gdk_gc_set_foreground(gc,&colors[0]);
gdk_draw_rectangle(pixmap,gc,TRUE,0,0,XLEN,YLEN);
redraw_area(draw_main,0,0,XLEN,YLEN);
}

void
on_button_change_clicked (GtkButton *button,
gpointer user_data)
{
++current_value;
if(current_value > 3) current_value = 1;
}

void
on_button_run_clicked (GtkButton *button,
gpointer user_data)
{
// guard
if(point_list.empty()) return;

svm_parameter param;
int i,j;

// default values
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = 0;
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = NULL;
param.weight = NULL;

// parse options
const char *p = gtk_entry_get_text(GTK_ENTRY(entry_option));

while (1) {
while (*p && *p != '-')
p++;

if (*p == '\0')
break;

p++;
switch (*p++) {
case 's':
param.svm_type = atoi(p);
break;
case 't':
param.kernel_type = atoi(p);
break;
case 'd':
param.degree = atoi(p);
break;
case 'g':
param.gamma = atof(p);
break;
case 'r':
param.coef0 = atof(p);
break;
case 'n':
param.nu = atof(p);
break;
case 'm':
param.cache_size = atof(p);
break;
case 'c':
param.C = atof(p);
break;
case 'e':
param.eps = atof(p);
break;
case 'p':
param.p = atof(p);
break;
case 'h':
param.shrinking = atoi(p);
break;
case 'b':
param.probability = atoi(p);
break;
case 'w':
++param.nr_weight;
param.weight_label = (int *)realloc(param.weight_label,sizeof(int)*param.nr_weight);
param.weight = (double *)realloc(param.weight,sizeof(double)*param.nr_weight);
param.weight_label[param.nr_weight-1] = atoi(p);
while(*p && !isspace(*p)) ++p;
param.weight[param.nr_weight-1] = atof(p);
break;
}
}
// build problem
svm_problem prob;

prob.l = point_list.size();
prob.y = new double[prob.l];

if(param.kernel_type == PRECOMPUTED)
{
}
else if(param.svm_type == EPSILON_SVR ||
param.svm_type == NU_SVR)
{
if(param.gamma == 0) param.gamma = 1;
svm_node *x_space = new svm_node[2 * prob.l];
prob.x = new svm_node *[prob.l];

i = 0;
for (list <point>::iterator q = point_list.begin(); q != point_list.end(); q++, i++)
{
x_space[2 * i].index = 1;
x_space[2 * i].value = q->x;
x_space[2 * i + 1].index = -1;
prob.x[i] = &x_space[2 * i];
prob.y[i] = q->y;
}

// build model & classify
svm_model *model = svm_train(&prob, &param);
svm_node x[2];
x[0].index = 1;
x[1].index = -1;
int *j = new int[XLEN];
for (i = 0; i < XLEN; i++)
{
x[0].value = (double) i / XLEN;
j[i] = (int)(YLEN*svm_predict(model, x));
}

gdk_gc_set_foreground(gc,&colors[0]);
gdk_draw_line(pixmap,gc,0,0,0,YLEN-1);
gdk_draw_line(draw_main->window,gc,0,0,0,YLEN-1);
int p = (int)(param.p * YLEN);
for(i = 1; i < XLEN; i++)
{
gdk_gc_set_foreground(gc,&colors[0]);
gdk_draw_line(pixmap,gc,i,0,i,YLEN-1);
gdk_draw_line(draw_main->window,gc,i,0,i,YLEN-1);
gdk_gc_set_foreground(gc,&colors[5]);
gdk_draw_line(pixmap,gc,i-1,j[i-1],i,j[i]);
gdk_draw_line(draw_main->window,gc,i-1,j[i-1],i,j[i]);
if(param.svm_type == EPSILON_SVR)
{
gdk_gc_set_foreground(gc,&colors[2]);
gdk_draw_line(pixmap,gc,i-1,j[i-1]+p,i,j[i]+p);
gdk_draw_line(draw_main->window,gc,i-1,j[i-1]+p,i,j[i]+p);
gdk_gc_set_foreground(gc,&colors[2]);
gdk_draw_line(pixmap,gc,i-1,j[i-1]-p,i,j[i]-p);
gdk_draw_line(draw_main->window,gc,i-1,j[i-1]-p,i,j[i]-p);
}
}

svm_free_and_destroy_model(&model);
delete[] j;
delete[] x_space;
delete[] prob.x;
delete[] prob.y;
}
else
{
if(param.gamma == 0) param.gamma = 0.5;
svm_node *x_space = new svm_node[3 * prob.l];
prob.x = new svm_node *[prob.l];

i = 0;
for (list <point>::iterator q = point_list.begin(); q != point_list.end(); q++, i++)
{
x_space[3 * i].index = 1;
x_space[3 * i].value = q->x;
x_space[3 * i + 1].index = 2;
x_space[3 * i + 1].value = q->y;
x_space[3 * i + 2].index = -1;
prob.x[i] = &x_space[3 * i];
prob.y[i] = q->value;
}

// build model & classify
svm_model *model = svm_train(&prob, &param);
svm_node x[3];
x[0].index = 1;
x[1].index = 2;
x[2].index = -1;
for (i = 0; i < XLEN; i++)
for (j = 0; j < YLEN; j++) {
x[0].value = (double) i / XLEN;
x[1].value = (double) j / YLEN;
double d = svm_predict(model, x);
if (param.svm_type == ONE_CLASS && d<0) d=2;
gdk_gc_set_foreground(gc,&colors[(int)d]);
gdk_draw_point(pixmap,gc,i,j);
gdk_draw_point(draw_main->window,gc,i,j);
}

svm_free_and_destroy_model(&model);
delete[] x_space;
delete[] prob.x;
delete[] prob.y;
}
free(param.weight_label);
free(param.weight);
draw_all_points();
}

void
on_button_clear_clicked (GtkButton *button,
gpointer user_data)
{
clear_all();
}

void
on_window1_destroy (GtkObject *object,
gpointer user_data)
{
gtk_exit(0);
}

gboolean
on_draw_main_button_press_event (GtkWidget *widget,
GdkEventButton *event,
gpointer user_data)
{
point p = {(double)event->x/XLEN, (double)event->y/YLEN, current_value};
point_list.push_back(p);
draw_point(p);
return FALSE;
}

gboolean
on_draw_main_expose_event (GtkWidget *widget,
GdkEventExpose *event,
gpointer user_data)
{
redraw_area(widget,
event->area.x, event->area.y,
event->area.width, event->area.height);
return FALSE;
}

GtkWidget *fileselection;
static enum { SAVE, LOAD } fileselection_flag;

void show_fileselection()
{
fileselection = create_fileselection();
gtk_signal_connect_object(
GTK_OBJECT(GTK_FILE_SELECTION(fileselection)->ok_button),
"clicked", GTK_SIGNAL_FUNC(gtk_widget_destroy),
(GtkObject *) fileselection);
gtk_signal_connect_object (GTK_OBJECT
(GTK_FILE_SELECTION(fileselection)->cancel_button),
"clicked", GTK_SIGNAL_FUNC(gtk_widget_destroy),
(GtkObject *) fileselection);

gtk_widget_show(fileselection);
}

void
on_button_save_clicked (GtkButton *button,
gpointer user_data)
{
fileselection_flag = SAVE;
show_fileselection();
}


void
on_button_load_clicked (GtkButton *button,
gpointer user_data)
{
fileselection_flag = LOAD;
show_fileselection();
}

void
on_filesel_ok_clicked (GtkButton *button,
gpointer user_data)
{
gtk_widget_hide(fileselection);
const char *filename = gtk_file_selection_get_filename(GTK_FILE_SELECTION(fileselection));

if(fileselection_flag == SAVE)
{
FILE *fp = fopen(filename,"w");
const char *p = gtk_entry_get_text(GTK_ENTRY(entry_option));
const char* svm_type_str = strstr(p, "-s ");
int svm_type = C_SVC;
if(svm_type_str != NULL)
sscanf(svm_type_str, "-s %d", &svm_type);
if(fp)
{
if(svm_type == EPSILON_SVR || svm_type == NU_SVR)
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end();p++)
fprintf(fp,"%f 1:%f\n", p->y, p->x);
}
else
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end();p++)
fprintf(fp,"%d 1:%f 2:%f\n", p->value, p->x, p->y);
}
fclose(fp);
}

}
else if(fileselection_flag == LOAD)
{
FILE *fp = fopen(filename,"r");
if(fp)
{
clear_all();
char buf[4096];
while(fgets(buf,sizeof(buf),fp))
{
int v;
double x,y;
if(sscanf(buf,"%d%*d:%lf%*d:%lf",&v,&x,&y)==3)
{
point p = {x,y,v};
point_list.push_back(p);
}
else if(sscanf(buf,"%lf%*d:%lf",&y,&x)==2)
{
point p = {x,y,current_value};
point_list.push_back(p);
}
else
break;
}
fclose(fp);
draw_all_points();
}
}
}

void
on_fileselection_destroy (GtkObject *object,
gpointer user_data)
{
}

void
on_filesel_cancel_clicked (GtkButton *button,
gpointer user_data)
{
}

+ 54
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/callbacks.h View File

@@ -0,0 +1,54 @@
#include <gtk/gtk.h>

#ifdef __cplusplus
extern "C" {
#endif

void
on_window1_destroy (GtkObject *object,
gpointer user_data);

gboolean
on_draw_main_button_press_event (GtkWidget *widget,
GdkEventButton *event,
gpointer user_data);

gboolean
on_draw_main_expose_event (GtkWidget *widget,
GdkEventExpose *event,
gpointer user_data);

void
on_button_change_clicked (GtkButton *button,
gpointer user_data);

void
on_button_run_clicked (GtkButton *button,
gpointer user_data);

void
on_button_clear_clicked (GtkButton *button,
gpointer user_data);

void
on_button_save_clicked (GtkButton *button,
gpointer user_data);

void
on_button_load_clicked (GtkButton *button,
gpointer user_data);

void
on_fileselection_destroy (GtkObject *object,
gpointer user_data);

void
on_filesel_ok_clicked (GtkButton *button,
gpointer user_data);

void
on_filesel_cancel_clicked (GtkButton *button,
gpointer user_data);
#ifdef __cplusplus
}
#endif

+ 164
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/interface.c View File

@@ -0,0 +1,164 @@
/*
* DO NOT EDIT THIS FILE - it is generated by Glade.
*/

#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <string.h>

#include <gdk/gdkkeysyms.h>
#include <gtk/gtk.h>

#include "callbacks.h"
#include "interface.h"

GtkWidget*
create_window (void)
{
GtkWidget *window;
GtkWidget *vbox1;
extern GtkWidget *draw_main;
GtkWidget *hbox1;
GtkWidget *button_change;
GtkWidget *button_run;
GtkWidget *button_clear;
GtkWidget *button_save;
GtkWidget *button_load;
extern GtkWidget *entry_option;

window = gtk_window_new (GTK_WINDOW_TOPLEVEL);
gtk_object_set_data (GTK_OBJECT (window), "window", window);
gtk_window_set_title (GTK_WINDOW (window), "SVM Toy");

vbox1 = gtk_vbox_new (FALSE, 0);
gtk_widget_ref (vbox1);
gtk_object_set_data_full (GTK_OBJECT (window), "vbox1", vbox1,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (vbox1);
gtk_container_add (GTK_CONTAINER (window), vbox1);

draw_main = gtk_drawing_area_new ();
gtk_widget_ref (draw_main);
gtk_object_set_data_full (GTK_OBJECT (window), "draw_main", draw_main,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (draw_main);
gtk_box_pack_start (GTK_BOX (vbox1), draw_main, TRUE, TRUE, 0);
gtk_widget_set_usize (draw_main, 500, 500);
gtk_widget_set_events (draw_main, GDK_EXPOSURE_MASK | GDK_BUTTON_PRESS_MASK);

hbox1 = gtk_hbox_new (FALSE, 0);
gtk_widget_ref (hbox1);
gtk_object_set_data_full (GTK_OBJECT (window), "hbox1", hbox1,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (hbox1);
gtk_box_pack_start (GTK_BOX (vbox1), hbox1, FALSE, FALSE, 0);

button_change = gtk_button_new_with_label ("Change");
gtk_widget_ref (button_change);
gtk_object_set_data_full (GTK_OBJECT (window), "button_change", button_change,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (button_change);
gtk_box_pack_start (GTK_BOX (hbox1), button_change, FALSE, FALSE, 0);

button_run = gtk_button_new_with_label ("Run");
gtk_widget_ref (button_run);
gtk_object_set_data_full (GTK_OBJECT (window), "button_run", button_run,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (button_run);
gtk_box_pack_start (GTK_BOX (hbox1), button_run, FALSE, FALSE, 0);

button_clear = gtk_button_new_with_label ("Clear");
gtk_widget_ref (button_clear);
gtk_object_set_data_full (GTK_OBJECT (window), "button_clear", button_clear,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (button_clear);
gtk_box_pack_start (GTK_BOX (hbox1), button_clear, FALSE, FALSE, 0);

button_save = gtk_button_new_with_label ("Save");
gtk_widget_ref (button_save);
gtk_object_set_data_full (GTK_OBJECT (window), "button_save", button_save,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (button_save);
gtk_box_pack_start (GTK_BOX (hbox1), button_save, FALSE, FALSE, 0);

button_load = gtk_button_new_with_label ("Load");
gtk_widget_ref (button_load);
gtk_object_set_data_full (GTK_OBJECT (window), "button_load", button_load,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (button_load);
gtk_box_pack_start (GTK_BOX (hbox1), button_load, FALSE, FALSE, 0);

entry_option = gtk_entry_new ();
gtk_widget_ref (entry_option);
gtk_object_set_data_full (GTK_OBJECT (window), "entry_option", entry_option,
(GtkDestroyNotify) gtk_widget_unref);
gtk_widget_show (entry_option);
gtk_box_pack_start (GTK_BOX (hbox1), entry_option, TRUE, TRUE, 0);

gtk_signal_connect (GTK_OBJECT (window), "destroy",
GTK_SIGNAL_FUNC (on_window1_destroy),
NULL);
gtk_signal_connect (GTK_OBJECT (draw_main), "button_press_event",
GTK_SIGNAL_FUNC (on_draw_main_button_press_event),
NULL);
gtk_signal_connect (GTK_OBJECT (draw_main), "expose_event",
GTK_SIGNAL_FUNC (on_draw_main_expose_event),
NULL);
gtk_signal_connect (GTK_OBJECT (button_change), "clicked",
GTK_SIGNAL_FUNC (on_button_change_clicked),
NULL);
gtk_signal_connect (GTK_OBJECT (button_run), "clicked",
GTK_SIGNAL_FUNC (on_button_run_clicked),
NULL);
gtk_signal_connect (GTK_OBJECT (button_clear), "clicked",
GTK_SIGNAL_FUNC (on_button_clear_clicked),
NULL);
gtk_signal_connect (GTK_OBJECT (button_save), "clicked",
GTK_SIGNAL_FUNC (on_button_save_clicked),
NULL);
gtk_signal_connect (GTK_OBJECT (button_load), "clicked",
GTK_SIGNAL_FUNC (on_button_load_clicked),
NULL);
gtk_signal_connect (GTK_OBJECT (entry_option), "activate",
GTK_SIGNAL_FUNC (on_button_run_clicked),
NULL);

return window;
}

GtkWidget*
create_fileselection (void)
{
GtkWidget *fileselection;
GtkWidget *filesel_ok;
GtkWidget *filesel_cancel;

fileselection = gtk_file_selection_new ("Select File");
gtk_object_set_data (GTK_OBJECT (fileselection), "fileselection", fileselection);
gtk_container_set_border_width (GTK_CONTAINER (fileselection), 10);
gtk_window_set_modal (GTK_WINDOW (fileselection), TRUE);

filesel_ok = GTK_FILE_SELECTION (fileselection)->ok_button;
gtk_object_set_data (GTK_OBJECT (fileselection), "filesel_ok", filesel_ok);
gtk_widget_show (filesel_ok);
GTK_WIDGET_SET_FLAGS (filesel_ok, GTK_CAN_DEFAULT);

filesel_cancel = GTK_FILE_SELECTION (fileselection)->cancel_button;
gtk_object_set_data (GTK_OBJECT (fileselection), "filesel_cancel", filesel_cancel);
gtk_widget_show (filesel_cancel);
GTK_WIDGET_SET_FLAGS (filesel_cancel, GTK_CAN_DEFAULT);

gtk_signal_connect (GTK_OBJECT (fileselection), "destroy",
GTK_SIGNAL_FUNC (on_fileselection_destroy),
NULL);
gtk_signal_connect (GTK_OBJECT (filesel_ok), "clicked",
GTK_SIGNAL_FUNC (on_filesel_ok_clicked),
NULL);
gtk_signal_connect (GTK_OBJECT (filesel_cancel), "clicked",
GTK_SIGNAL_FUNC (on_filesel_cancel_clicked),
NULL);

return fileselection;
}


+ 14
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/interface.h View File

@@ -0,0 +1,14 @@
/*
* DO NOT EDIT THIS FILE - it is generated by Glade.
*/

#ifdef __cplusplus
extern "C" {
#endif

GtkWidget* create_window (void);
GtkWidget* create_fileselection (void);

#ifdef __cplusplus
}
#endif

+ 23
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/main.c View File

@@ -0,0 +1,23 @@
/*
* Initial main.c file generated by Glade. Edit as required.
* Glade will not overwrite this file.
*/

#include <gtk/gtk.h>
#include "interface.h"
void svm_toy_initialize();

int main (int argc, char *argv[])
{
GtkWidget *window;

gtk_set_locale ();
gtk_init (&argc, &argv);

window = create_window ();
gtk_widget_show (window);

svm_toy_initialize();
gtk_main ();
return 0;
}

+ 238
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/gtk/svm-toy.glade View File

@@ -0,0 +1,238 @@
<?xml version="1.0"?>
<GTK-Interface>

<project>
<name>svm-toy</name>
<program_name>svm-toy</program_name>
<directory></directory>
<source_directory>src</source_directory>
<pixmaps_directory>pixmaps</pixmaps_directory>
<language>C</language>
<gnome_support>False</gnome_support>
<gettext_support>False</gettext_support>
<use_widget_names>False</use_widget_names>
<output_main_file>True</output_main_file>
<output_support_files>True</output_support_files>
<output_build_files>True</output_build_files>
<backup_source_files>False</backup_source_files>
<main_source_file>interface.c</main_source_file>
<main_header_file>interface.h</main_header_file>
<handler_source_file>callbacks.c</handler_source_file>
<handler_header_file>callbacks.h</handler_header_file>
<support_source_file>support.c</support_source_file>
<support_header_file>support.h</support_header_file>
<translatable_strings_file></translatable_strings_file>
</project>

<widget>
<class>GtkWindow</class>
<name>window</name>
<signal>
<name>destroy</name>
<handler>on_window1_destroy</handler>
<last_modification_time>Sun, 16 Apr 2000 09:47:10 GMT</last_modification_time>
</signal>
<title>SVM Toy</title>
<type>GTK_WINDOW_TOPLEVEL</type>
<position>GTK_WIN_POS_NONE</position>
<modal>False</modal>
<allow_shrink>False</allow_shrink>
<allow_grow>True</allow_grow>
<auto_shrink>False</auto_shrink>

<widget>
<class>GtkVBox</class>
<name>vbox1</name>
<homogeneous>False</homogeneous>
<spacing>0</spacing>

<widget>
<class>GtkDrawingArea</class>
<name>draw_main</name>
<width>500</width>
<height>500</height>
<events>GDK_EXPOSURE_MASK | GDK_BUTTON_PRESS_MASK</events>
<signal>
<name>button_press_event</name>
<handler>on_draw_main_button_press_event</handler>
<last_modification_time>Sun, 16 Apr 2000 13:02:05 GMT</last_modification_time>
</signal>
<signal>
<name>expose_event</name>
<handler>on_draw_main_expose_event</handler>
<last_modification_time>Sun, 16 Apr 2000 14:27:05 GMT</last_modification_time>
</signal>
<child>
<padding>0</padding>
<expand>True</expand>
<fill>True</fill>
</child>
</widget>

<widget>
<class>GtkHBox</class>
<name>hbox1</name>
<homogeneous>False</homogeneous>
<spacing>0</spacing>
<child>
<padding>0</padding>
<expand>False</expand>
<fill>False</fill>
</child>

<widget>
<class>GtkButton</class>
<name>button_change</name>
<can_focus>True</can_focus>
<signal>
<name>clicked</name>
<handler>on_button_change_clicked</handler>
<last_modification_time>Sun, 16 Apr 2000 09:40:18 GMT</last_modification_time>
</signal>
<label>Change</label>
<child>
<padding>0</padding>
<expand>False</expand>
<fill>False</fill>
</child>
</widget>

<widget>
<class>GtkButton</class>
<name>button_run</name>
<can_focus>True</can_focus>
<signal>
<name>clicked</name>
<handler>on_button_run_clicked</handler>
<last_modification_time>Sun, 16 Apr 2000 09:40:37 GMT</last_modification_time>
</signal>
<label>Run</label>
<child>
<padding>0</padding>
<expand>False</expand>
<fill>False</fill>
</child>
</widget>

<widget>
<class>GtkButton</class>
<name>button_clear</name>
<can_focus>True</can_focus>
<signal>
<name>clicked</name>
<handler>on_button_clear_clicked</handler>
<last_modification_time>Sun, 16 Apr 2000 09:40:44 GMT</last_modification_time>
</signal>
<label>Clear</label>
<child>
<padding>0</padding>
<expand>False</expand>
<fill>False</fill>
</child>
</widget>

<widget>
<class>GtkButton</class>
<name>button_save</name>
<can_focus>True</can_focus>
<signal>
<name>clicked</name>
<handler>on_button_save_clicked</handler>
<last_modification_time>Fri, 16 Jun 2000 18:23:46 GMT</last_modification_time>
</signal>
<label>Save</label>
<child>
<padding>0</padding>
<expand>False</expand>
<fill>False</fill>
</child>
</widget>

<widget>
<class>GtkButton</class>
<name>button_load</name>
<can_focus>True</can_focus>
<signal>
<name>clicked</name>
<handler>on_button_load_clicked</handler>
<last_modification_time>Fri, 16 Jun 2000 18:23:56 GMT</last_modification_time>
</signal>
<label>Load</label>
<child>
<padding>0</padding>
<expand>False</expand>
<fill>False</fill>
</child>
</widget>

<widget>
<class>GtkEntry</class>
<name>entry_option</name>
<can_focus>True</can_focus>
<signal>
<name>activate</name>
<handler>on_button_run_clicked</handler>
<last_modification_time>Sun, 16 Apr 2000 09:42:46 GMT</last_modification_time>
</signal>
<editable>True</editable>
<text_visible>True</text_visible>
<text_max_length>0</text_max_length>
<text></text>
<child>
<padding>0</padding>
<expand>True</expand>
<fill>True</fill>
</child>
</widget>
</widget>
</widget>
</widget>

<widget>
<class>GtkFileSelection</class>
<name>fileselection</name>
<border_width>10</border_width>
<signal>
<name>destroy</name>
<handler>on_fileselection_destroy</handler>
<last_modification_time>Fri, 16 Jun 2000 18:11:28 GMT</last_modification_time>
</signal>
<title>Select File</title>
<type>GTK_WINDOW_TOPLEVEL</type>
<position>GTK_WIN_POS_NONE</position>
<modal>True</modal>
<allow_shrink>False</allow_shrink>
<allow_grow>True</allow_grow>
<auto_shrink>False</auto_shrink>
<show_file_op_buttons>True</show_file_op_buttons>

<widget>
<class>GtkButton</class>
<child_name>FileSel:ok_button</child_name>
<name>filesel_ok</name>
<can_default>True</can_default>
<can_focus>True</can_focus>
<signal>
<name>clicked</name>
<handler>on_filesel_ok_clicked</handler>
<last_modification_time>Fri, 16 Jun 2000 18:09:56 GMT</last_modification_time>
</signal>
<label>OK</label>
</widget>

<widget>
<class>GtkButton</class>
<child_name>FileSel:cancel_button</child_name>
<name>filesel_cancel</name>
<can_default>True</can_default>
<can_focus>True</can_focus>
<signal>
<name>clicked</name>
<handler>on_filesel_cancel_clicked</handler>
<last_modification_time>Fri, 16 Jun 2000 18:09:46 GMT</last_modification_time>
</signal>
<label>Cancel</label>
</widget>
</widget>

</GTK-Interface>

+ 18
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/qt/Makefile View File

@@ -0,0 +1,18 @@
CXX? = g++
INCLUDE = /usr/include/qt4
CFLAGS = -Wall -O3 -I$(INCLUDE) -I$(INCLUDE)/QtGui -I$(INCLUDE)/QtCore
LIB = -lQtGui -lQtCore
MOC = /usr/bin/moc-qt4

svm-toy: svm-toy.cpp svm-toy.moc ../../svm.o
$(CXX) $(CFLAGS) svm-toy.cpp ../../svm.o -o svm-toy $(LIB)

svm-toy.moc: svm-toy.cpp
$(MOC) svm-toy.cpp -o svm-toy.moc

../../svm.o: ../../svm.cpp ../../svm.h
make -C ../.. svm.o

clean:
rm -f *~ svm-toy svm-toy.moc ../../svm.o


+ 437
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/qt/svm-toy.cpp View File

@@ -0,0 +1,437 @@
#include <QtGui>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <list>
#include "../../svm.h"
using namespace std;

#define DEFAULT_PARAM "-t 2 -c 100"
#define XLEN 500
#define YLEN 500

QRgb colors[] =
{
qRgb(0,0,0),
qRgb(0,120,120),
qRgb(120,120,0),
qRgb(120,0,120),
qRgb(0,200,200),
qRgb(200,200,0),
qRgb(200,0,200)
};

class SvmToyWindow : public QWidget
{

Q_OBJECT

public:
SvmToyWindow();
~SvmToyWindow();
protected:
virtual void mousePressEvent( QMouseEvent* );
virtual void paintEvent( QPaintEvent* );

private:
QPixmap buffer;
QPixmap icon1;
QPixmap icon2;
QPixmap icon3;
QPushButton button_change_icon;
QPushButton button_run;
QPushButton button_clear;
QPushButton button_save;
QPushButton button_load;
QLineEdit input_line;
QPainter buffer_painter;
struct point {
double x, y;
signed char value;
};
list<point> point_list;
int current_value;
const QPixmap& choose_icon(int v)
{
if(v==1) return icon1;
else if(v==2) return icon2;
else return icon3;
}
void clear_all()
{
point_list.clear();
buffer.fill(Qt::black);
repaint();
}
void draw_point(const point& p)
{
const QPixmap& icon = choose_icon(p.value);
buffer_painter.drawPixmap((int)(p.x*XLEN),(int)(p.y*YLEN),icon);
repaint();
}
void draw_all_points()
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end();p++)
draw_point(*p);
}
private slots:
void button_change_icon_clicked()
{
++current_value;
if(current_value > 3) current_value = 1;
button_change_icon.setIcon(choose_icon(current_value));
}
void button_run_clicked()
{
// guard
if(point_list.empty()) return;

svm_parameter param;
int i,j;

// default values
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = 0;
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = NULL;
param.weight = NULL;

// parse options
const char *p = input_line.text().toAscii().constData();

while (1) {
while (*p && *p != '-')
p++;

if (*p == '\0')
break;

p++;
switch (*p++) {
case 's':
param.svm_type = atoi(p);
break;
case 't':
param.kernel_type = atoi(p);
break;
case 'd':
param.degree = atoi(p);
break;
case 'g':
param.gamma = atof(p);
break;
case 'r':
param.coef0 = atof(p);
break;
case 'n':
param.nu = atof(p);
break;
case 'm':
param.cache_size = atof(p);
break;
case 'c':
param.C = atof(p);
break;
case 'e':
param.eps = atof(p);
break;
case 'p':
param.p = atof(p);
break;
case 'h':
param.shrinking = atoi(p);
break;
case 'b':
param.probability = atoi(p);
break;
case 'w':
++param.nr_weight;
param.weight_label = (int *)realloc(param.weight_label,sizeof(int)*param.nr_weight);
param.weight = (double *)realloc(param.weight,sizeof(double)*param.nr_weight);
param.weight_label[param.nr_weight-1] = atoi(p);
while(*p && !isspace(*p)) ++p;
param.weight[param.nr_weight-1] = atof(p);
break;
}
}
// build problem
svm_problem prob;

prob.l = point_list.size();
prob.y = new double[prob.l];

if(param.kernel_type == PRECOMPUTED)
{
}
else if(param.svm_type == EPSILON_SVR ||
param.svm_type == NU_SVR)
{
if(param.gamma == 0) param.gamma = 1;
svm_node *x_space = new svm_node[2 * prob.l];
prob.x = new svm_node *[prob.l];

i = 0;
for (list <point>::iterator q = point_list.begin(); q != point_list.end(); q++, i++)
{
x_space[2 * i].index = 1;
x_space[2 * i].value = q->x;
x_space[2 * i + 1].index = -1;
prob.x[i] = &x_space[2 * i];
prob.y[i] = q->y;
}

// build model & classify
svm_model *model = svm_train(&prob, &param);
svm_node x[2];
x[0].index = 1;
x[1].index = -1;
int *j = new int[XLEN];

for (i = 0; i < XLEN; i++)
{
x[0].value = (double) i / XLEN;
j[i] = (int)(YLEN*svm_predict(model, x));
}
buffer_painter.setPen(colors[0]);
buffer_painter.drawLine(0,0,0,YLEN-1);

int p = (int)(param.p * YLEN);
for(i = 1; i < XLEN; i++)
{
buffer_painter.setPen(colors[0]);
buffer_painter.drawLine(i,0,i,YLEN-1);
buffer_painter.setPen(colors[5]);
buffer_painter.drawLine(i-1,j[i-1],i,j[i]);
if(param.svm_type == EPSILON_SVR)
{
buffer_painter.setPen(colors[2]);
buffer_painter.drawLine(i-1,j[i-1]+p,i,j[i]+p);

buffer_painter.setPen(colors[2]);
buffer_painter.drawLine(i-1,j[i-1]-p,i,j[i]-p);
}
}

svm_free_and_destroy_model(&model);
delete[] j;
delete[] x_space;
delete[] prob.x;
delete[] prob.y;
}
else
{
if(param.gamma == 0) param.gamma = 0.5;
svm_node *x_space = new svm_node[3 * prob.l];
prob.x = new svm_node *[prob.l];

i = 0;
for (list <point>::iterator q = point_list.begin(); q != point_list.end(); q++, i++)
{
x_space[3 * i].index = 1;
x_space[3 * i].value = q->x;
x_space[3 * i + 1].index = 2;
x_space[3 * i + 1].value = q->y;
x_space[3 * i + 2].index = -1;
prob.x[i] = &x_space[3 * i];
prob.y[i] = q->value;
}

// build model & classify
svm_model *model = svm_train(&prob, &param);
svm_node x[3];
x[0].index = 1;
x[1].index = 2;
x[2].index = -1;

for (i = 0; i < XLEN; i++)
for (j = 0; j < YLEN ; j++) {
x[0].value = (double) i / XLEN;
x[1].value = (double) j / YLEN;
double d = svm_predict(model, x);
if (param.svm_type == ONE_CLASS && d<0) d=2;
buffer_painter.setPen(colors[(int)d]);
buffer_painter.drawPoint(i,j);
}

svm_free_and_destroy_model(&model);
delete[] x_space;
delete[] prob.x;
delete[] prob.y;
}
free(param.weight_label);
free(param.weight);
draw_all_points();
}
void button_clear_clicked()
{
clear_all();
}
void button_save_clicked()
{
QString filename = QFileDialog::getSaveFileName();
if(!filename.isNull())
{
FILE *fp = fopen(filename.toAscii().constData(),"w");
const char *p = input_line.text().toAscii().constData();
const char* svm_type_str = strstr(p, "-s ");
int svm_type = C_SVC;
if(svm_type_str != NULL)
sscanf(svm_type_str, "-s %d", &svm_type);
if(fp)
{
if(svm_type == EPSILON_SVR || svm_type == NU_SVR)
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end();p++)
fprintf(fp,"%f 1:%f\n", p->y, p->x);
}
else
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end();p++)
fprintf(fp,"%d 1:%f 2:%f\n", p->value, p->x, p->y);
}
fclose(fp);
}
}
}
void button_load_clicked()
{
QString filename = QFileDialog::getOpenFileName();
if(!filename.isNull())
{
FILE *fp = fopen(filename.toAscii().constData(),"r");
if(fp)
{
clear_all();
char buf[4096];
while(fgets(buf,sizeof(buf),fp))
{
int v;
double x,y;
if(sscanf(buf,"%d%*d:%lf%*d:%lf",&v,&x,&y)==3)
{
point p = {x,y,v};
point_list.push_back(p);
}
else if(sscanf(buf,"%lf%*d:%lf",&y,&x)==2)
{
point p = {x,y,current_value};
point_list.push_back(p);
}
else
break;
}
fclose(fp);
draw_all_points();
}
}
}
};

#include "svm-toy.moc"

SvmToyWindow::SvmToyWindow()
:button_change_icon(this)
,button_run("Run",this)
,button_clear("Clear",this)
,button_save("Save",this)
,button_load("Load",this)
,input_line(this)
,current_value(1)
{
buffer = QPixmap(XLEN,YLEN);
buffer.fill(Qt::black);

buffer_painter.begin(&buffer);

QObject::connect(&button_change_icon, SIGNAL(clicked()), this,
SLOT(button_change_icon_clicked()));
QObject::connect(&button_run, SIGNAL(clicked()), this,
SLOT(button_run_clicked()));
QObject::connect(&button_clear, SIGNAL(clicked()), this,
SLOT(button_clear_clicked()));
QObject::connect(&button_save, SIGNAL(clicked()), this,
SLOT(button_save_clicked()));
QObject::connect(&button_load, SIGNAL(clicked()), this,
SLOT(button_load_clicked()));
QObject::connect(&input_line, SIGNAL(returnPressed()), this,
SLOT(button_run_clicked()));

// don't blank the window before repainting
setAttribute(Qt::WA_NoBackground);
icon1 = QPixmap(4,4);
icon2 = QPixmap(4,4);
icon3 = QPixmap(4,4);
QPainter painter;
painter.begin(&icon1);
painter.fillRect(0,0,4,4,QBrush(colors[4]));
painter.end();

painter.begin(&icon2);
painter.fillRect(0,0,4,4,QBrush(colors[5]));
painter.end();

painter.begin(&icon3);
painter.fillRect(0,0,4,4,QBrush(colors[6]));
painter.end();

button_change_icon.setGeometry( 0, YLEN, 50, 25 );
button_run.setGeometry( 50, YLEN, 50, 25 );
button_clear.setGeometry( 100, YLEN, 50, 25 );
button_save.setGeometry( 150, YLEN, 50, 25);
button_load.setGeometry( 200, YLEN, 50, 25);
input_line.setGeometry( 250, YLEN, 250, 25);
input_line.setText(DEFAULT_PARAM);
button_change_icon.setIcon(icon1);
}

SvmToyWindow::~SvmToyWindow()
{
buffer_painter.end();
}

void SvmToyWindow::mousePressEvent( QMouseEvent* event )
{
point p = {(double)event->x()/XLEN, (double)event->y()/YLEN, current_value};
point_list.push_back(p);
draw_point(p);
}

void SvmToyWindow::paintEvent( QPaintEvent* )
{
// copy the image from the buffer pixmap to the window
QPainter p(this);
p.drawPixmap(0, 0, buffer);
}

int main( int argc, char* argv[] )
{
QApplication myapp( argc, argv );

SvmToyWindow* mywidget = new SvmToyWindow();
mywidget->setGeometry( 100, 100, XLEN, YLEN+25 );

mywidget->show();
return myapp.exec();
}

+ 482
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-toy/windows/svm-toy.cpp View File

@@ -0,0 +1,482 @@
#include <windows.h>
#include <windowsx.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <list>
#include "../../svm.h"
using namespace std;

#define DEFAULT_PARAM "-t 2 -c 100"
#define XLEN 500
#define YLEN 500
#define DrawLine(dc,x1,y1,x2,y2,c) \
do { \
HPEN hpen = CreatePen(PS_SOLID,0,c); \
HPEN horig = SelectPen(dc,hpen); \
MoveToEx(dc,x1,y1,NULL); \
LineTo(dc,x2,y2); \
SelectPen(dc,horig); \
DeletePen(hpen); \
} while(0)

using namespace std;

COLORREF colors[] =
{
RGB(0,0,0),
RGB(0,120,120),
RGB(120,120,0),
RGB(120,0,120),
RGB(0,200,200),
RGB(200,200,0),
RGB(200,0,200)
};

HWND main_window;
HBITMAP buffer;
HDC window_dc;
HDC buffer_dc;
HBRUSH brush1, brush2, brush3;
HWND edit;

enum {
ID_BUTTON_CHANGE, ID_BUTTON_RUN, ID_BUTTON_CLEAR,
ID_BUTTON_LOAD, ID_BUTTON_SAVE, ID_EDIT
};

struct point {
double x, y;
signed char value;
};

list<point> point_list;
int current_value = 1;

LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);

int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance,
PSTR szCmdLine, int iCmdShow)
{
static char szAppName[] = "SvmToy";
MSG msg;
WNDCLASSEX wndclass;

wndclass.cbSize = sizeof(wndclass);
wndclass.style = CS_HREDRAW | CS_VREDRAW;
wndclass.lpfnWndProc = WndProc;
wndclass.cbClsExtra = 0;
wndclass.cbWndExtra = 0;
wndclass.hInstance = hInstance;
wndclass.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass.hbrBackground = (HBRUSH) GetStockObject(BLACK_BRUSH);
wndclass.lpszMenuName = NULL;
wndclass.lpszClassName = szAppName;
wndclass.hIconSm = LoadIcon(NULL, IDI_APPLICATION);

RegisterClassEx(&wndclass);

main_window = CreateWindow(szAppName, // window class name
"SVM Toy", // window caption
WS_OVERLAPPEDWINDOW,// window style
CW_USEDEFAULT, // initial x position
CW_USEDEFAULT, // initial y position
XLEN, // initial x size
YLEN+52, // initial y size
NULL, // parent window handle
NULL, // window menu handle
hInstance, // program instance handle
NULL); // creation parameters

ShowWindow(main_window, iCmdShow);
UpdateWindow(main_window);

CreateWindow("button", "Change", WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON,
0, YLEN, 50, 25, main_window, (HMENU) ID_BUTTON_CHANGE, hInstance, NULL);
CreateWindow("button", "Run", WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON,
50, YLEN, 50, 25, main_window, (HMENU) ID_BUTTON_RUN, hInstance, NULL);
CreateWindow("button", "Clear", WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON,
100, YLEN, 50, 25, main_window, (HMENU) ID_BUTTON_CLEAR, hInstance, NULL);
CreateWindow("button", "Save", WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON,
150, YLEN, 50, 25, main_window, (HMENU) ID_BUTTON_SAVE, hInstance, NULL);
CreateWindow("button", "Load", WS_CHILD | WS_VISIBLE | BS_PUSHBUTTON,
200, YLEN, 50, 25, main_window, (HMENU) ID_BUTTON_LOAD, hInstance, NULL);

edit = CreateWindow("edit", NULL, WS_CHILD | WS_VISIBLE,
250, YLEN, 250, 25, main_window, (HMENU) ID_EDIT, hInstance, NULL);

Edit_SetText(edit,DEFAULT_PARAM);

brush1 = CreateSolidBrush(colors[4]);
brush2 = CreateSolidBrush(colors[5]);
brush3 = CreateSolidBrush(colors[6]);

window_dc = GetDC(main_window);
buffer = CreateCompatibleBitmap(window_dc, XLEN, YLEN);
buffer_dc = CreateCompatibleDC(window_dc);
SelectObject(buffer_dc, buffer);
PatBlt(buffer_dc, 0, 0, XLEN, YLEN, BLACKNESS);

while (GetMessage(&msg, NULL, 0, 0)) {
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}

int getfilename( HWND hWnd , char *filename, int len, int save)
{
OPENFILENAME OpenFileName;
memset(&OpenFileName,0,sizeof(OpenFileName));
filename[0]='\0';
OpenFileName.lStructSize = sizeof(OPENFILENAME);
OpenFileName.hwndOwner = hWnd;
OpenFileName.lpstrFile = filename;
OpenFileName.nMaxFile = len;
OpenFileName.Flags = 0;
return save?GetSaveFileName(&OpenFileName):GetOpenFileName(&OpenFileName);
}

void clear_all()
{
point_list.clear();
PatBlt(buffer_dc, 0, 0, XLEN, YLEN, BLACKNESS);
InvalidateRect(main_window, 0, 0);
}

HBRUSH choose_brush(int v)
{
if(v==1) return brush1;
else if(v==2) return brush2;
else return brush3;
}

void draw_point(const point & p)
{
RECT rect;
rect.left = int(p.x*XLEN);
rect.top = int(p.y*YLEN);
rect.right = int(p.x*XLEN) + 3;
rect.bottom = int(p.y*YLEN) + 3;
FillRect(window_dc, &rect, choose_brush(p.value));
FillRect(buffer_dc, &rect, choose_brush(p.value));
}

void draw_all_points()
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end(); p++)
draw_point(*p);
}

void button_run_clicked()
{
// guard
if(point_list.empty()) return;

svm_parameter param;
int i,j;
// default values
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = 0;
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = NULL;
param.weight = NULL;

// parse options
char str[1024];
Edit_GetLine(edit, 0, str, sizeof(str));
const char *p = str;

while (1) {
while (*p && *p != '-')
p++;

if (*p == '\0')
break;

p++;
switch (*p++) {
case 's':
param.svm_type = atoi(p);
break;
case 't':
param.kernel_type = atoi(p);
break;
case 'd':
param.degree = atoi(p);
break;
case 'g':
param.gamma = atof(p);
break;
case 'r':
param.coef0 = atof(p);
break;
case 'n':
param.nu = atof(p);
break;
case 'm':
param.cache_size = atof(p);
break;
case 'c':
param.C = atof(p);
break;
case 'e':
param.eps = atof(p);
break;
case 'p':
param.p = atof(p);
break;
case 'h':
param.shrinking = atoi(p);
break;
case 'b':
param.probability = atoi(p);
break;
case 'w':
++param.nr_weight;
param.weight_label = (int *)realloc(param.weight_label,sizeof(int)*param.nr_weight);
param.weight = (double *)realloc(param.weight,sizeof(double)*param.nr_weight);
param.weight_label[param.nr_weight-1] = atoi(p);
while(*p && !isspace(*p)) ++p;
param.weight[param.nr_weight-1] = atof(p);
break;
}
}
// build problem
svm_problem prob;

prob.l = point_list.size();
prob.y = new double[prob.l];

if(param.kernel_type == PRECOMPUTED)
{
}
else if(param.svm_type == EPSILON_SVR ||
param.svm_type == NU_SVR)
{
if(param.gamma == 0) param.gamma = 1;
svm_node *x_space = new svm_node[2 * prob.l];
prob.x = new svm_node *[prob.l];

i = 0;
for (list<point>::iterator q = point_list.begin(); q != point_list.end(); q++, i++)
{
x_space[2 * i].index = 1;
x_space[2 * i].value = q->x;
x_space[2 * i + 1].index = -1;
prob.x[i] = &x_space[2 * i];
prob.y[i] = q->y;
}

// build model & classify
svm_model *model = svm_train(&prob, &param);
svm_node x[2];
x[0].index = 1;
x[1].index = -1;
int *j = new int[XLEN];

for (i = 0; i < XLEN; i++)
{
x[0].value = (double) i / XLEN;
j[i] = (int)(YLEN*svm_predict(model, x));
}
DrawLine(buffer_dc,0,0,0,YLEN,colors[0]);
DrawLine(window_dc,0,0,0,YLEN,colors[0]);
int p = (int)(param.p * YLEN);
for(int i=1; i < XLEN; i++)
{
DrawLine(buffer_dc,i,0,i,YLEN,colors[0]);
DrawLine(window_dc,i,0,i,YLEN,colors[0]);
DrawLine(buffer_dc,i-1,j[i-1],i,j[i],colors[5]);
DrawLine(window_dc,i-1,j[i-1],i,j[i],colors[5]);

if(param.svm_type == EPSILON_SVR)
{
DrawLine(buffer_dc,i-1,j[i-1]+p,i,j[i]+p,colors[2]);
DrawLine(window_dc,i-1,j[i-1]+p,i,j[i]+p,colors[2]);

DrawLine(buffer_dc,i-1,j[i-1]-p,i,j[i]-p,colors[2]);
DrawLine(window_dc,i-1,j[i-1]-p,i,j[i]-p,colors[2]);
}
}
svm_free_and_destroy_model(&model);
delete[] j;
delete[] x_space;
delete[] prob.x;
delete[] prob.y;
}
else
{
if(param.gamma == 0) param.gamma = 0.5;
svm_node *x_space = new svm_node[3 * prob.l];
prob.x = new svm_node *[prob.l];

i = 0;
for (list<point>::iterator q = point_list.begin(); q != point_list.end(); q++, i++)
{
x_space[3 * i].index = 1;
x_space[3 * i].value = q->x;
x_space[3 * i + 1].index = 2;
x_space[3 * i + 1].value = q->y;
x_space[3 * i + 2].index = -1;
prob.x[i] = &x_space[3 * i];
prob.y[i] = q->value;
}

// build model & classify
svm_model *model = svm_train(&prob, &param);
svm_node x[3];
x[0].index = 1;
x[1].index = 2;
x[2].index = -1;

for (i = 0; i < XLEN; i++)
for (j = 0; j < YLEN; j++) {
x[0].value = (double) i / XLEN;
x[1].value = (double) j / YLEN;
double d = svm_predict(model, x);
if (param.svm_type == ONE_CLASS && d<0) d=2;
SetPixel(window_dc, i, j, colors[(int)d]);
SetPixel(buffer_dc, i, j, colors[(int)d]);
}

svm_free_and_destroy_model(&model);
delete[] x_space;
delete[] prob.x;
delete[] prob.y;
}
free(param.weight_label);
free(param.weight);
draw_all_points();
}

LRESULT CALLBACK WndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;

switch (iMsg) {
case WM_LBUTTONDOWN:
{
int x = LOWORD(lParam);
int y = HIWORD(lParam);
point p = {(double)x/XLEN, (double)y/YLEN, current_value};
point_list.push_back(p);
draw_point(p);
}
return 0;
case WM_PAINT:
{
hdc = BeginPaint(hwnd, &ps);
BitBlt(hdc, 0, 0, XLEN, YLEN, buffer_dc, 0, 0, SRCCOPY);
EndPaint(hwnd, &ps);
}
return 0;
case WM_COMMAND:
{
int id = LOWORD(wParam);
switch (id) {
case ID_BUTTON_CHANGE:
++current_value;
if(current_value > 3) current_value = 1;
break;
case ID_BUTTON_RUN:
button_run_clicked();
break;
case ID_BUTTON_CLEAR:
clear_all();
break;
case ID_BUTTON_SAVE:
{
char filename[1024];
if(getfilename(hwnd,filename,1024,1))
{
FILE *fp = fopen(filename,"w");

char str[1024];
Edit_GetLine(edit, 0, str, sizeof(str));
const char *p = str;
const char* svm_type_str = strstr(p, "-s ");
int svm_type = C_SVC;
if(svm_type_str != NULL)
sscanf(svm_type_str, "-s %d", &svm_type);

if(fp)
{
if(svm_type == EPSILON_SVR || svm_type == NU_SVR)
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end();p++)
fprintf(fp,"%f 1:%f\n", p->y, p->x);
}
else
{
for(list<point>::iterator p = point_list.begin(); p != point_list.end();p++)
fprintf(fp,"%d 1:%f 2:%f\n", p->value, p->x, p->y);
}
fclose(fp);
}
}
}
break;
case ID_BUTTON_LOAD:
{
char filename[1024];
if(getfilename(hwnd,filename,1024,0))
{
FILE *fp = fopen(filename,"r");
if(fp)
{
clear_all();
char buf[4096];
while(fgets(buf,sizeof(buf),fp))
{
int v;
double x,y;
if(sscanf(buf,"%d%*d:%lf%*d:%lf",&v,&x,&y)==3)
{
point p = {x,y,v};
point_list.push_back(p);
}
else if(sscanf(buf,"%lf%*d:%lf",&y,&x)==2)
{
point p = {x,y,current_value};
point_list.push_back(p);
}
else
break;
}
fclose(fp);
draw_all_points();
}
}
}
break;
}
}
return 0;
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}

return DefWindowProc(hwnd, iMsg, wParam, lParam);
}

+ 380
- 0
gklearn/gedlib/lib/libsvm.3.22/svm-train.c View File

@@ -0,0 +1,380 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include "svm.h"
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))

void print_null(const char *s) {}

void exit_with_help()
{
printf(
"Usage: svm-train [options] training_set_file [model_file]\n"
"options:\n"
"-s svm_type : set type of SVM (default 0)\n"
" 0 -- C-SVC (multi-class classification)\n"
" 1 -- nu-SVC (multi-class classification)\n"
" 2 -- one-class SVM\n"
" 3 -- epsilon-SVR (regression)\n"
" 4 -- nu-SVR (regression)\n"
"-t kernel_type : set type of kernel function (default 2)\n"
" 0 -- linear: u'*v\n"
" 1 -- polynomial: (gamma*u'*v + coef0)^degree\n"
" 2 -- radial basis function: exp(-gamma*|u-v|^2)\n"
" 3 -- sigmoid: tanh(gamma*u'*v + coef0)\n"
" 4 -- precomputed kernel (kernel values in training_set_file)\n"
"-d degree : set degree in kernel function (default 3)\n"
"-g gamma : set gamma in kernel function (default 1/num_features)\n"
"-r coef0 : set coef0 in kernel function (default 0)\n"
"-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)\n"
"-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)\n"
"-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)\n"
"-m cachesize : set cache memory size in MB (default 100)\n"
"-e epsilon : set tolerance of termination criterion (default 0.001)\n"
"-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)\n"
"-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)\n"
"-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)\n"
"-v n: n-fold cross validation mode\n"
"-q : quiet mode (no outputs)\n"
);
exit(1);
}

void exit_input_error(int line_num)
{
fprintf(stderr,"Wrong input format at line %d\n", line_num);
exit(1);
}

void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name);
void read_problem(const char *filename);
void do_cross_validation();

struct svm_parameter param; // set by parse_command_line
struct svm_problem prob; // set by read_problem
struct svm_model *model;
struct svm_node *x_space;
int cross_validation;
int nr_fold;

static char *line = NULL;
static int max_line_len;

static char* readline(FILE *input)
{
int len;
if(fgets(line,max_line_len,input) == NULL)
return NULL;

while(strrchr(line,'\n') == NULL)
{
max_line_len *= 2;
line = (char *) realloc(line,max_line_len);
len = (int) strlen(line);
if(fgets(line+len,max_line_len-len,input) == NULL)
break;
}
return line;
}

int main(int argc, char **argv)
{
char input_file_name[1024];
char model_file_name[1024];
const char *error_msg;

parse_command_line(argc, argv, input_file_name, model_file_name);
read_problem(input_file_name);
error_msg = svm_check_parameter(&prob,&param);

if(error_msg)
{
fprintf(stderr,"ERROR: %s\n",error_msg);
exit(1);
}

if(cross_validation)
{
do_cross_validation();
}
else
{
model = svm_train(&prob,&param);
if(svm_save_model(model_file_name,model))
{
fprintf(stderr, "can't save model to file %s\n", model_file_name);
exit(1);
}
svm_free_and_destroy_model(&model);
}
svm_destroy_param(&param);
free(prob.y);
free(prob.x);
free(x_space);
free(line);

return 0;
}

void do_cross_validation()
{
int i;
int total_correct = 0;
double total_error = 0;
double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0;
double *target = Malloc(double,prob.l);

svm_cross_validation(&prob,&param,nr_fold,target);
if(param.svm_type == EPSILON_SVR ||
param.svm_type == NU_SVR)
{
for(i=0;i<prob.l;i++)
{
double y = prob.y[i];
double v = target[i];
total_error += (v-y)*(v-y);
sumv += v;
sumy += y;
sumvv += v*v;
sumyy += y*y;
sumvy += v*y;
}
printf("Cross Validation Mean squared error = %g\n",total_error/prob.l);
printf("Cross Validation Squared correlation coefficient = %g\n",
((prob.l*sumvy-sumv*sumy)*(prob.l*sumvy-sumv*sumy))/
((prob.l*sumvv-sumv*sumv)*(prob.l*sumyy-sumy*sumy))
);
}
else
{
for(i=0;i<prob.l;i++)
if(target[i] == prob.y[i])
++total_correct;
printf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
}
free(target);
}

void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name)
{
int i;
void (*print_func)(const char*) = NULL; // default printing to stdout

// default values
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = 0; // 1/num_features
param.coef0 = 0;
param.nu = 0.5;
param.cache_size = 100;
param.C = 1;
param.eps = 1e-3;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
param.nr_weight = 0;
param.weight_label = NULL;
param.weight = NULL;
cross_validation = 0;

// parse options
for(i=1;i<argc;i++)
{
if(argv[i][0] != '-') break;
if(++i>=argc)
exit_with_help();
switch(argv[i-1][1])
{
case 's':
param.svm_type = atoi(argv[i]);
break;
case 't':
param.kernel_type = atoi(argv[i]);
break;
case 'd':
param.degree = atoi(argv[i]);
break;
case 'g':
param.gamma = atof(argv[i]);
break;
case 'r':
param.coef0 = atof(argv[i]);
break;
case 'n':
param.nu = atof(argv[i]);
break;
case 'm':
param.cache_size = atof(argv[i]);
break;
case 'c':
param.C = atof(argv[i]);
break;
case 'e':
param.eps = atof(argv[i]);
break;
case 'p':
param.p = atof(argv[i]);
break;
case 'h':
param.shrinking = atoi(argv[i]);
break;
case 'b':
param.probability = atoi(argv[i]);
break;
case 'q':
print_func = &print_null;
i--;
break;
case 'v':
cross_validation = 1;
nr_fold = atoi(argv[i]);
if(nr_fold < 2)
{
fprintf(stderr,"n-fold cross validation: n must >= 2\n");
exit_with_help();
}
break;
case 'w':
++param.nr_weight;
param.weight_label = (int *)realloc(param.weight_label,sizeof(int)*param.nr_weight);
param.weight = (double *)realloc(param.weight,sizeof(double)*param.nr_weight);
param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
param.weight[param.nr_weight-1] = atof(argv[i]);
break;
default:
fprintf(stderr,"Unknown option: -%c\n", argv[i-1][1]);
exit_with_help();
}
}

svm_set_print_string_function(print_func);

// determine filenames

if(i>=argc)
exit_with_help();

strcpy(input_file_name, argv[i]);

if(i<argc-1)
strcpy(model_file_name,argv[i+1]);
else
{
char *p = strrchr(argv[i],'/');
if(p==NULL)
p = argv[i];
else
++p;
sprintf(model_file_name,"%s.model",p);
}
}

// read in a problem (in svmlight format)

void read_problem(const char *filename)
{
int max_index, inst_max_index, i;
size_t elements, j;
FILE *fp = fopen(filename,"r");
char *endptr;
char *idx, *val, *label;

if(fp == NULL)
{
fprintf(stderr,"can't open input file %s\n",filename);
exit(1);
}

prob.l = 0;
elements = 0;

max_line_len = 1024;
line = Malloc(char,max_line_len);
while(readline(fp)!=NULL)
{
char *p = strtok(line," \t"); // label

// features
while(1)
{
p = strtok(NULL," \t");
if(p == NULL || *p == '\n') // check '\n' as ' ' may be after the last feature
break;
++elements;
}
++elements;
++prob.l;
}
rewind(fp);

prob.y = Malloc(double,prob.l);
prob.x = Malloc(struct svm_node *,prob.l);
x_space = Malloc(struct svm_node,elements);

max_index = 0;
j=0;
for(i=0;i<prob.l;i++)
{
inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0
readline(fp);
prob.x[i] = &x_space[j];
label = strtok(line," \t\n");
if(label == NULL) // empty line
exit_input_error(i+1);

prob.y[i] = strtod(label,&endptr);
if(endptr == label || *endptr != '\0')
exit_input_error(i+1);

while(1)
{
idx = strtok(NULL,":");
val = strtok(NULL," \t");

if(val == NULL)
break;

errno = 0;
x_space[j].index = (int) strtol(idx,&endptr,10);
if(endptr == idx || errno != 0 || *endptr != '\0' || x_space[j].index <= inst_max_index)
exit_input_error(i+1);
else
inst_max_index = x_space[j].index;

errno = 0;
x_space[j].value = strtod(val,&endptr);
if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
exit_input_error(i+1);

++j;
}

if(inst_max_index > max_index)
max_index = inst_max_index;
x_space[j++].index = -1;
}

if(param.gamma == 0 && max_index > 0)
param.gamma = 1.0/max_index;

if(param.kernel_type == PRECOMPUTED)
for(i=0;i<prob.l;i++)
{
if (prob.x[i][0].index != 0)
{
fprintf(stderr,"Wrong input format: first column must be 0:sample_serial_number\n");
exit(1);
}
if ((int)prob.x[i][0].value <= 0 || (int)prob.x[i][0].value > max_index)
{
fprintf(stderr,"Wrong input format: sample_serial_number out of range\n");
exit(1);
}
}

fclose(fp);
}

+ 3181
- 0
gklearn/gedlib/lib/libsvm.3.22/svm.cpp
File diff suppressed because it is too large
View File


+ 21
- 0
gklearn/gedlib/lib/libsvm.3.22/svm.def View File

@@ -0,0 +1,21 @@
LIBRARY libsvm
EXPORTS
svm_train @1
svm_cross_validation @2
svm_save_model @3
svm_load_model @4
svm_get_svm_type @5
svm_get_nr_class @6
svm_get_labels @7
svm_get_svr_probability @8
svm_predict_values @9
svm_predict @10
svm_predict_probability @11
svm_free_model_content @12
svm_free_and_destroy_model @13
svm_destroy_param @14
svm_check_parameter @15
svm_check_probability_model @16
svm_set_print_string_function @17
svm_get_sv_indices @18
svm_get_nr_sv @19

+ 104
- 0
gklearn/gedlib/lib/libsvm.3.22/svm.h View File

@@ -0,0 +1,104 @@
#ifndef _LIBSVM_H
#define _LIBSVM_H

#define LIBSVM_VERSION 322

#ifdef __cplusplus
extern "C" {
#endif

extern int libsvm_version;

struct svm_node
{
int index;
double value;
};

struct svm_problem
{
int l;
double *y;
struct svm_node **x;
};

enum { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR }; /* svm_type */
enum { LINEAR, POLY, RBF, SIGMOID, PRECOMPUTED }; /* kernel_type */

struct svm_parameter
{
int svm_type;
int kernel_type;
int degree; /* for poly */
double gamma; /* for poly/rbf/sigmoid */
double coef0; /* for poly/sigmoid */

/* these are for training only */
double cache_size; /* in MB */
double eps; /* stopping criteria */
double C; /* for C_SVC, EPSILON_SVR and NU_SVR */
int nr_weight; /* for C_SVC */
int *weight_label; /* for C_SVC */
double* weight; /* for C_SVC */
double nu; /* for NU_SVC, ONE_CLASS, and NU_SVR */
double p; /* for EPSILON_SVR */
int shrinking; /* use the shrinking heuristics */
int probability; /* do probability estimates */
};

//
// svm_model
//
struct svm_model
{
struct svm_parameter param; /* parameter */
int nr_class; /* number of classes, = 2 in regression/one class svm */
int l; /* total #SV */
struct svm_node **SV; /* SVs (SV[l]) */
double **sv_coef; /* coefficients for SVs in decision functions (sv_coef[k-1][l]) */
double *rho; /* constants in decision functions (rho[k*(k-1)/2]) */
double *probA; /* pariwise probability information */
double *probB;
int *sv_indices; /* sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set */

/* for classification only */

int *label; /* label of each class (label[k]) */
int *nSV; /* number of SVs for each class (nSV[k]) */
/* nSV[0] + nSV[1] + ... + nSV[k-1] = l */
/* XXX */
int free_sv; /* 1 if svm_model is created by svm_load_model*/
/* 0 if svm_model is created by svm_train */
};

struct svm_model *svm_train(const struct svm_problem *prob, const struct svm_parameter *param);
void svm_cross_validation(const struct svm_problem *prob, const struct svm_parameter *param, int nr_fold, double *target);

int svm_save_model(const char *model_file_name, const struct svm_model *model);
struct svm_model *svm_load_model(const char *model_file_name);

int svm_get_svm_type(const struct svm_model *model);
int svm_get_nr_class(const struct svm_model *model);
void svm_get_labels(const struct svm_model *model, int *label);
void svm_get_sv_indices(const struct svm_model *model, int *sv_indices);
int svm_get_nr_sv(const struct svm_model *model);
double svm_get_svr_probability(const struct svm_model *model);

double svm_predict_values(const struct svm_model *model, const struct svm_node *x, double* dec_values);
double svm_predict(const struct svm_model *model, const struct svm_node *x);
double svm_predict_probability(const struct svm_model *model, const struct svm_node *x, double* prob_estimates);

void svm_free_model_content(struct svm_model *model_ptr);
void svm_free_and_destroy_model(struct svm_model **model_ptr_ptr);
void svm_destroy_param(struct svm_parameter *param);

const char *svm_check_parameter(const struct svm_problem *prob, const struct svm_parameter *param);
int svm_check_probability_model(const struct svm_model *model);

void svm_set_print_string_function(void (*print_func)(const char *));

#ifdef __cplusplus
}
#endif

#endif /* _LIBSVM_H */

+ 210
- 0
gklearn/gedlib/lib/libsvm.3.22/tools/README View File

@@ -0,0 +1,210 @@
This directory includes some useful codes:

1. subset selection tools.
2. parameter selection tools.
3. LIBSVM format checking tools

Part I: Subset selection tools

Introduction
============

Training large data is time consuming. Sometimes one should work on a
smaller subset first. The python script subset.py randomly selects a
specified number of samples. For classification data, we provide a
stratified selection to ensure the same class distribution in the
subset.

Usage: subset.py [options] dataset number [output1] [output2]

This script selects a subset of the given data set.

options:
-s method : method of selection (default 0)
0 -- stratified selection (classification only)
1 -- random selection

output1 : the subset (optional)
output2 : the rest of data (optional)

If output1 is omitted, the subset will be printed on the screen.

Example
=======

> python subset.py heart_scale 100 file1 file2

From heart_scale 100 samples are randomly selected and stored in
file1. All remaining instances are stored in file2.


Part II: Parameter Selection Tools

Introduction
============

grid.py is a parameter selection tool for C-SVM classification using
the RBF (radial basis function) kernel. It uses cross validation (CV)
technique to estimate the accuracy of each parameter combination in
the specified range and helps you to decide the best parameters for
your problem.

grid.py directly executes libsvm binaries (so no python binding is needed)
for cross validation and then draw contour of CV accuracy using gnuplot.
You must have libsvm and gnuplot installed before using it. The package
gnuplot is available at http://www.gnuplot.info/

On Mac OSX, the precompiled gnuplot file needs the library Aquarterm,
which thus must be installed as well. In addition, this version of
gnuplot does not support png, so you need to change "set term png
transparent small" and use other image formats. For example, you may
have "set term pbm small color".

Usage: grid.py [grid_options] [svm_options] dataset

grid_options :
-log2c {begin,end,step | "null"} : set the range of c (default -5,15,2)
begin,end,step -- c_range = 2^{begin,...,begin+k*step,...,end}
"null" -- do not grid with c
-log2g {begin,end,step | "null"} : set the range of g (default 3,-15,-2)
begin,end,step -- g_range = 2^{begin,...,begin+k*step,...,end}
"null" -- do not grid with g
-v n : n-fold cross validation (default 5)
-svmtrain pathname : set svm executable path and name
-gnuplot {pathname | "null"} :
pathname -- set gnuplot executable path and name
"null" -- do not plot
-out {pathname | "null"} : (default dataset.out)
pathname -- set output file path and name
"null" -- do not output file
-png pathname : set graphic output file path and name (default dataset.png)
-resume [pathname] : resume the grid task using an existing output file (default pathname is dataset.out)
Use this option only if some parameters have been checked for the SAME data.

svm_options : additional options for svm-train

The program conducts v-fold cross validation using parameter C (and gamma)
= 2^begin, 2^(begin+step), ..., 2^end.

You can specify where the libsvm executable and gnuplot are using the
-svmtrain and -gnuplot parameters.

For windows users, please use pgnuplot.exe. If you are using gnuplot
3.7.1, please upgrade to version 3.7.3 or higher. The version 3.7.1
has a bug. If you use cygwin on windows, please use gunplot-x11.

If the task is terminated accidentally or you would like to change the
range of parameters, you can apply '-resume' to save time by re-using
previous results. You may specify the output file of a previous run
or use the default (i.e., dataset.out) without giving a name. Please
note that the same condition must be used in two runs. For example,
you cannot use '-v 10' earlier and resume the task with '-v 5'.

The value of some options can be "null." For example, `-log2c -1,0,1
-log2 "null"' means that C=2^-1,2^0,2^1 and g=LIBSVM's default gamma
value. That is, you do not conduct parameter selection on gamma.

Example
=======

> python grid.py -log2c -5,5,1 -log2g -4,0,1 -v 5 -m 300 heart_scale

Users (in particular MS Windows users) may need to specify the path of
executable files. You can either change paths in the beginning of
grid.py or specify them in the command line. For example,

> grid.py -log2c -5,5,1 -svmtrain "c:\Program Files\libsvm\windows\svm-train.exe" -gnuplot c:\tmp\gnuplot\binary\pgnuplot.exe -v 10 heart_scale

Output: two files
dataset.png: the CV accuracy contour plot generated by gnuplot
dataset.out: the CV accuracy at each (log2(C),log2(gamma))

The following example saves running time by loading the output file of a previous run.

> python grid.py -log2c -7,7,1 -log2g -5,2,1 -v 5 -resume heart_scale.out heart_scale

Parallel grid search
====================

You can conduct a parallel grid search by dispatching jobs to a
cluster of computers which share the same file system. First, you add
machine names in grid.py:

ssh_workers = ["linux1", "linux5", "linux5"]

and then setup your ssh so that the authentication works without
asking a password.

The same machine (e.g., linux5 here) can be listed more than once if
it has multiple CPUs or has more RAM. If the local machine is the
best, you can also enlarge the nr_local_worker. For example:

nr_local_worker = 2

Example:

> python grid.py heart_scale
[local] -1 -1 78.8889 (best c=0.5, g=0.5, rate=78.8889)
[linux5] -1 -7 83.3333 (best c=0.5, g=0.0078125, rate=83.3333)
[linux5] 5 -1 77.037 (best c=0.5, g=0.0078125, rate=83.3333)
[linux1] 5 -7 83.3333 (best c=0.5, g=0.0078125, rate=83.3333)
.
.
.

If -log2c, -log2g, or -v is not specified, default values are used.

If your system uses telnet instead of ssh, you list the computer names
in telnet_workers.

Calling grid in Python
======================

In addition to using grid.py as a command-line tool, you can use it as a
Python module.

>>> rate, param = find_parameters(dataset, options)

You need to specify `dataset' and `options' (default ''). See the following example.

> python

>>> from grid import *
>>> rate, param = find_parameters('../heart_scale', '-log2c -1,1,1 -log2g -1,1,1')
[local] 0.0 0.0 rate=74.8148 (best c=1.0, g=1.0, rate=74.8148)
[local] 0.0 -1.0 rate=77.037 (best c=1.0, g=0.5, rate=77.037)
.
.
[local] -1.0 -1.0 rate=78.8889 (best c=0.5, g=0.5, rate=78.8889)
.
.
>>> rate
78.8889
>>> param
{'c': 0.5, 'g': 0.5}


Part III: LIBSVM format checking tools

Introduction
============

`svm-train' conducts only a simple check of the input data. To do a
detailed check, we provide a python script `checkdata.py.'

Usage: checkdata.py dataset

Exit status (returned value): 1 if there are errors, 0 otherwise.

This tool is written by Rong-En Fan at National Taiwan University.

Example
=======

> cat bad_data
1 3:1 2:4
> python checkdata.py bad_data
line 1: feature indices must be in an ascending order, previous/current features 3:1 2:4
Found 1 lines with error.



+ 108
- 0
gklearn/gedlib/lib/libsvm.3.22/tools/checkdata.py View File

@@ -0,0 +1,108 @@
#!/usr/bin/env python

#
# A format checker for LIBSVM
#

#
# Copyright (c) 2007, Rong-En Fan
#
# All rights reserved.
#
# This program is distributed under the same license of the LIBSVM package.
#

from sys import argv, exit
import os.path

def err(line_no, msg):
print("line {0}: {1}".format(line_no, msg))

# works like float() but does not accept nan and inf
def my_float(x):
if x.lower().find("nan") != -1 or x.lower().find("inf") != -1:
raise ValueError

return float(x)

def main():
if len(argv) != 2:
print("Usage: {0} dataset".format(argv[0]))
exit(1)

dataset = argv[1]

if not os.path.exists(dataset):
print("dataset {0} not found".format(dataset))
exit(1)

line_no = 1
error_line_count = 0
for line in open(dataset, 'r'):
line_error = False

# each line must end with a newline character
if line[-1] != '\n':
err(line_no, "missing a newline character in the end")
line_error = True

nodes = line.split()

# check label
try:
label = nodes.pop(0)
if label.find(',') != -1:
# multi-label format
try:
for l in label.split(','):
l = my_float(l)
except:
err(line_no, "label {0} is not a valid multi-label form".format(label))
line_error = True
else:
try:
label = my_float(label)
except:
err(line_no, "label {0} is not a number".format(label))
line_error = True
except:
err(line_no, "missing label, perhaps an empty line?")
line_error = True

# check features
prev_index = -1
for i in range(len(nodes)):
try:
(index, value) = nodes[i].split(':')

index = int(index)
value = my_float(value)

# precomputed kernel's index starts from 0 and LIBSVM
# checks it. Hence, don't treat index 0 as an error.
if index < 0:
err(line_no, "feature index must be positive; wrong feature {0}".format(nodes[i]))
line_error = True
elif index <= prev_index:
err(line_no, "feature indices must be in an ascending order, previous/current features {0} {1}".format(nodes[i-1], nodes[i]))
line_error = True
prev_index = index
except:
err(line_no, "feature '{0}' not an <index>:<value> pair, <index> integer, <value> real number ".format(nodes[i]))
line_error = True

line_no += 1

if line_error:
error_line_count += 1
if error_line_count > 0:
print("Found {0} lines with error.".format(error_line_count))
return 1
else:
print("No error.")
return 0

if __name__ == "__main__":
exit(main())

+ 79
- 0
gklearn/gedlib/lib/libsvm.3.22/tools/easy.py View File

@@ -0,0 +1,79 @@
#!/usr/bin/env python

import sys
import os
from subprocess import *

if len(sys.argv) <= 1:
print('Usage: {0} training_file [testing_file]'.format(sys.argv[0]))
raise SystemExit

# svm, grid, and gnuplot executable files

is_win32 = (sys.platform == 'win32')
if not is_win32:
svmscale_exe = "../svm-scale"
svmtrain_exe = "../svm-train"
svmpredict_exe = "../svm-predict"
grid_py = "./grid.py"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmscale_exe = r"..\windows\svm-scale.exe"
svmtrain_exe = r"..\windows\svm-train.exe"
svmpredict_exe = r"..\windows\svm-predict.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
grid_py = r".\grid.py"

assert os.path.exists(svmscale_exe),"svm-scale executable not found"
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(svmpredict_exe),"svm-predict executable not found"
assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
assert os.path.exists(grid_py),"grid.py not found"

train_pathname = sys.argv[1]
assert os.path.exists(train_pathname),"training file not found"
file_name = os.path.split(train_pathname)[1]
scaled_file = file_name + ".scale"
model_file = file_name + ".model"
range_file = file_name + ".range"

if len(sys.argv) > 2:
test_pathname = sys.argv[2]
file_name = os.path.split(test_pathname)[1]
assert os.path.exists(test_pathname),"testing file not found"
scaled_test_file = file_name + ".scale"
predict_test_file = file_name + ".predict"

cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file)
print('Scaling training data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()

cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file)
print('Cross validation...')
f = Popen(cmd, shell = True, stdout = PIPE).stdout

line = ''
while True:
last_line = line
line = f.readline()
if not line: break
c,g,rate = map(float,last_line.split())

print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))

cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file)
print('Training...')
Popen(cmd, shell = True, stdout = PIPE).communicate()

print('Output model: {0}'.format(model_file))
if len(sys.argv) > 2:
cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file)
print('Scaling testing data...')
Popen(cmd, shell = True, stdout = PIPE).communicate()

cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file)
print('Testing...')
Popen(cmd, shell = True).communicate()

print('Output prediction: {0}'.format(predict_test_file))

+ 500
- 0
gklearn/gedlib/lib/libsvm.3.22/tools/grid.py View File

@@ -0,0 +1,500 @@
#!/usr/bin/env python
__all__ = ['find_parameters']

import os, sys, traceback, getpass, time, re
from threading import Thread
from subprocess import *

if sys.version_info[0] < 3:
from Queue import Queue
else:
from queue import Queue

telnet_workers = []
ssh_workers = []
nr_local_worker = 1

class GridOption:
def __init__(self, dataset_pathname, options):
dirname = os.path.dirname(__file__)
if sys.platform != 'win32':
self.svmtrain_pathname = os.path.join(dirname, '../svm-train')
self.gnuplot_pathname = '/usr/bin/gnuplot'
else:
# example for windows
self.svmtrain_pathname = os.path.join(dirname, r'..\windows\svm-train.exe')
# svmtrain_pathname = r'c:\Program Files\libsvm\windows\svm-train.exe'
self.gnuplot_pathname = r'c:\tmp\gnuplot\binary\pgnuplot.exe'
self.fold = 5
self.c_begin, self.c_end, self.c_step = -5, 15, 2
self.g_begin, self.g_end, self.g_step = 3, -15, -2
self.grid_with_c, self.grid_with_g = True, True
self.dataset_pathname = dataset_pathname
self.dataset_title = os.path.split(dataset_pathname)[1]
self.out_pathname = '{0}.out'.format(self.dataset_title)
self.png_pathname = '{0}.png'.format(self.dataset_title)
self.pass_through_string = ' '
self.resume_pathname = None
self.parse_options(options)

def parse_options(self, options):
if type(options) == str:
options = options.split()
i = 0
pass_through_options = []
while i < len(options):
if options[i] == '-log2c':
i = i + 1
if options[i] == 'null':
self.grid_with_c = False
else:
self.c_begin, self.c_end, self.c_step = map(float,options[i].split(','))
elif options[i] == '-log2g':
i = i + 1
if options[i] == 'null':
self.grid_with_g = False
else:
self.g_begin, self.g_end, self.g_step = map(float,options[i].split(','))
elif options[i] == '-v':
i = i + 1
self.fold = options[i]
elif options[i] in ('-c','-g'):
raise ValueError('Use -log2c and -log2g.')
elif options[i] == '-svmtrain':
i = i + 1
self.svmtrain_pathname = options[i]
elif options[i] == '-gnuplot':
i = i + 1
if options[i] == 'null':
self.gnuplot_pathname = None
else:
self.gnuplot_pathname = options[i]
elif options[i] == '-out':
i = i + 1
if options[i] == 'null':
self.out_pathname = None
else:
self.out_pathname = options[i]
elif options[i] == '-png':
i = i + 1
self.png_pathname = options[i]
elif options[i] == '-resume':
if i == (len(options)-1) or options[i+1].startswith('-'):
self.resume_pathname = self.dataset_title + '.out'
else:
i = i + 1
self.resume_pathname = options[i]
else:
pass_through_options.append(options[i])
i = i + 1

self.pass_through_string = ' '.join(pass_through_options)
if not os.path.exists(self.svmtrain_pathname):
raise IOError('svm-train executable not found')
if not os.path.exists(self.dataset_pathname):
raise IOError('dataset not found')
if self.resume_pathname and not os.path.exists(self.resume_pathname):
raise IOError('file for resumption not found')
if not self.grid_with_c and not self.grid_with_g:
raise ValueError('-log2c and -log2g should not be null simultaneously')
if self.gnuplot_pathname and not os.path.exists(self.gnuplot_pathname):
sys.stderr.write('gnuplot executable not found\n')
self.gnuplot_pathname = None

def redraw(db,best_param,gnuplot,options,tofile=False):
if len(db) == 0: return
begin_level = round(max(x[2] for x in db)) - 3
step_size = 0.5

best_log2c,best_log2g,best_rate = best_param

# if newly obtained c, g, or cv values are the same,
# then stop redrawing the contour.
if all(x[0] == db[0][0] for x in db): return
if all(x[1] == db[0][1] for x in db): return
if all(x[2] == db[0][2] for x in db): return

if tofile:
gnuplot.write(b"set term png transparent small linewidth 2 medium enhanced\n")
gnuplot.write("set output \"{0}\"\n".format(options.png_pathname.replace('\\','\\\\')).encode())
#gnuplot.write(b"set term postscript color solid\n")
#gnuplot.write("set output \"{0}.ps\"\n".format(options.dataset_title).encode().encode())
elif sys.platform == 'win32':
gnuplot.write(b"set term windows\n")
else:
gnuplot.write( b"set term x11\n")
gnuplot.write(b"set xlabel \"log2(C)\"\n")
gnuplot.write(b"set ylabel \"log2(gamma)\"\n")
gnuplot.write("set xrange [{0}:{1}]\n".format(options.c_begin,options.c_end).encode())
gnuplot.write("set yrange [{0}:{1}]\n".format(options.g_begin,options.g_end).encode())
gnuplot.write(b"set contour\n")
gnuplot.write("set cntrparam levels incremental {0},{1},100\n".format(begin_level,step_size).encode())
gnuplot.write(b"unset surface\n")
gnuplot.write(b"unset ztics\n")
gnuplot.write(b"set view 0,0\n")
gnuplot.write("set title \"{0}\"\n".format(options.dataset_title).encode())
gnuplot.write(b"unset label\n")
gnuplot.write("set label \"Best log2(C) = {0} log2(gamma) = {1} accuracy = {2}%\" \
at screen 0.5,0.85 center\n". \
format(best_log2c, best_log2g, best_rate).encode())
gnuplot.write("set label \"C = {0} gamma = {1}\""
" at screen 0.5,0.8 center\n".format(2**best_log2c, 2**best_log2g).encode())
gnuplot.write(b"set key at screen 0.9,0.9\n")
gnuplot.write(b"splot \"-\" with lines\n")
db.sort(key = lambda x:(x[0], -x[1]))

prevc = db[0][0]
for line in db:
if prevc != line[0]:
gnuplot.write(b"\n")
prevc = line[0]
gnuplot.write("{0[0]} {0[1]} {0[2]}\n".format(line).encode())
gnuplot.write(b"e\n")
gnuplot.write(b"\n") # force gnuplot back to prompt when term set failure
gnuplot.flush()


def calculate_jobs(options):
def range_f(begin,end,step):
# like range, but works on non-integer too
seq = []
while True:
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def permute_sequence(seq):
n = len(seq)
if n <= 1: return seq
mid = int(n/2)
left = permute_sequence(seq[:mid])
right = permute_sequence(seq[mid+1:])
ret = [seq[mid]]
while left or right:
if left: ret.append(left.pop(0))
if right: ret.append(right.pop(0))
return ret

c_seq = permute_sequence(range_f(options.c_begin,options.c_end,options.c_step))
g_seq = permute_sequence(range_f(options.g_begin,options.g_end,options.g_step))

if not options.grid_with_c:
c_seq = [None]
if not options.grid_with_g:
g_seq = [None]
nr_c = float(len(c_seq))
nr_g = float(len(g_seq))
i, j = 0, 0
jobs = []

while i < nr_c or j < nr_g:
if i/nr_c < j/nr_g:
# increase C resolution
line = []
for k in range(0,j):
line.append((c_seq[i],g_seq[k]))
i = i + 1
jobs.append(line)
else:
# increase g resolution
line = []
for k in range(0,i):
line.append((c_seq[k],g_seq[j]))
j = j + 1
jobs.append(line)

resumed_jobs = {}
if options.resume_pathname is None:
return jobs, resumed_jobs

for line in open(options.resume_pathname, 'r'):
line = line.strip()
rst = re.findall(r'rate=([0-9.]+)',line)
if not rst:
continue
rate = float(rst[0])

c, g = None, None
rst = re.findall(r'log2c=([0-9.-]+)',line)
if rst:
c = float(rst[0])
rst = re.findall(r'log2g=([0-9.-]+)',line)
if rst:
g = float(rst[0])

resumed_jobs[(c,g)] = rate

return jobs, resumed_jobs

class WorkerStopToken: # used to notify the worker to stop or if a worker is dead
pass

class Worker(Thread):
def __init__(self,name,job_queue,result_queue,options):
Thread.__init__(self)
self.name = name
self.job_queue = job_queue
self.result_queue = result_queue
self.options = options
def run(self):
while True:
(cexp,gexp) = self.job_queue.get()
if cexp is WorkerStopToken:
self.job_queue.put((cexp,gexp))
# print('worker {0} stop.'.format(self.name))
break
try:
c, g = None, None
if cexp != None:
c = 2.0**cexp
if gexp != None:
g = 2.0**gexp
rate = self.run_one(c,g)
if rate is None: raise RuntimeError('get no rate')
except:
# we failed, let others do that and we just quit
traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
self.job_queue.put((cexp,gexp))
sys.stderr.write('worker {0} quit.\n'.format(self.name))
break
else:
self.result_queue.put((self.name,cexp,gexp,rate))

def get_cmd(self,c,g):
options=self.options
cmdline = '"' + options.svmtrain_pathname + '"'
if options.grid_with_c:
cmdline += ' -c {0} '.format(c)
if options.grid_with_g:
cmdline += ' -g {0} '.format(g)
cmdline += ' -v {0} {1} {2} '.format\
(options.fold,options.pass_through_string,options.dataset_pathname)
return cmdline
class LocalWorker(Worker):
def run_one(self,c,g):
cmdline = self.get_cmd(c,g)
result = Popen(cmdline,shell=True,stdout=PIPE,stderr=PIPE,stdin=PIPE).stdout
for line in result.readlines():
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])

class SSHWorker(Worker):
def __init__(self,name,job_queue,result_queue,host,options):
Worker.__init__(self,name,job_queue,result_queue,options)
self.host = host
self.cwd = os.getcwd()
def run_one(self,c,g):
cmdline = 'ssh -x -t -t {0} "cd {1}; {2}"'.format\
(self.host,self.cwd,self.get_cmd(c,g))
result = Popen(cmdline,shell=True,stdout=PIPE,stderr=PIPE,stdin=PIPE).stdout
for line in result.readlines():
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])

class TelnetWorker(Worker):
def __init__(self,name,job_queue,result_queue,host,username,password,options):
Worker.__init__(self,name,job_queue,result_queue,options)
self.host = host
self.username = username
self.password = password
def run(self):
import telnetlib
self.tn = tn = telnetlib.Telnet(self.host)
tn.read_until('login: ')
tn.write(self.username + '\n')
tn.read_until('Password: ')
tn.write(self.password + '\n')

# XXX: how to know whether login is successful?
tn.read_until(self.username)
#
print('login ok', self.host)
tn.write('cd '+os.getcwd()+'\n')
Worker.run(self)
tn.write('exit\n')
def run_one(self,c,g):
cmdline = self.get_cmd(c,g)
result = self.tn.write(cmdline+'\n')
(idx,matchm,output) = self.tn.expect(['Cross.*\n'])
for line in output.split('\n'):
if str(line).find('Cross') != -1:
return float(line.split()[-1][0:-1])
def find_parameters(dataset_pathname, options=''):
def update_param(c,g,rate,best_c,best_g,best_rate,worker,resumed):
if (rate > best_rate) or (rate==best_rate and g==best_g and c<best_c):
best_rate,best_c,best_g = rate,c,g
stdout_str = '[{0}] {1} {2} (best '.format\
(worker,' '.join(str(x) for x in [c,g] if x is not None),rate)
output_str = ''
if c != None:
stdout_str += 'c={0}, '.format(2.0**best_c)
output_str += 'log2c={0} '.format(c)
if g != None:
stdout_str += 'g={0}, '.format(2.0**best_g)
output_str += 'log2g={0} '.format(g)
stdout_str += 'rate={0})'.format(best_rate)
print(stdout_str)
if options.out_pathname and not resumed:
output_str += 'rate={0}\n'.format(rate)
result_file.write(output_str)
result_file.flush()
return best_c,best_g,best_rate
options = GridOption(dataset_pathname, options);

if options.gnuplot_pathname:
gnuplot = Popen(options.gnuplot_pathname,stdin = PIPE,stdout=PIPE,stderr=PIPE).stdin
else:
gnuplot = None
# put jobs in queue

jobs,resumed_jobs = calculate_jobs(options)
job_queue = Queue(0)
result_queue = Queue(0)

for (c,g) in resumed_jobs:
result_queue.put(('resumed',c,g,resumed_jobs[(c,g)]))

for line in jobs:
for (c,g) in line:
if (c,g) not in resumed_jobs:
job_queue.put((c,g))

# hack the queue to become a stack --
# this is important when some thread
# failed and re-put a job. It we still
# use FIFO, the job will be put
# into the end of the queue, and the graph
# will only be updated in the end
job_queue._put = job_queue.queue.appendleft

# fire telnet workers

if telnet_workers:
nr_telnet_worker = len(telnet_workers)
username = getpass.getuser()
password = getpass.getpass()
for host in telnet_workers:
worker = TelnetWorker(host,job_queue,result_queue,
host,username,password,options)
worker.start()

# fire ssh workers

if ssh_workers:
for host in ssh_workers:
worker = SSHWorker(host,job_queue,result_queue,host,options)
worker.start()

# fire local workers

for i in range(nr_local_worker):
worker = LocalWorker('local',job_queue,result_queue,options)
worker.start()

# gather results

done_jobs = {}

if options.out_pathname:
if options.resume_pathname:
result_file = open(options.out_pathname, 'a')
else:
result_file = open(options.out_pathname, 'w')


db = []
best_rate = -1
best_c,best_g = None,None

for (c,g) in resumed_jobs:
rate = resumed_jobs[(c,g)]
best_c,best_g,best_rate = update_param(c,g,rate,best_c,best_g,best_rate,'resumed',True)

for line in jobs:
for (c,g) in line:
while (c,g) not in done_jobs:
(worker,c1,g1,rate1) = result_queue.get()
done_jobs[(c1,g1)] = rate1
if (c1,g1) not in resumed_jobs:
best_c,best_g,best_rate = update_param(c1,g1,rate1,best_c,best_g,best_rate,worker,False)
db.append((c,g,done_jobs[(c,g)]))
if gnuplot and options.grid_with_c and options.grid_with_g:
redraw(db,[best_c, best_g, best_rate],gnuplot,options)
redraw(db,[best_c, best_g, best_rate],gnuplot,options,True)


if options.out_pathname:
result_file.close()
job_queue.put((WorkerStopToken,None))
best_param, best_cg = {}, []
if best_c != None:
best_param['c'] = 2.0**best_c
best_cg += [2.0**best_c]
if best_g != None:
best_param['g'] = 2.0**best_g
best_cg += [2.0**best_g]
print('{0} {1}'.format(' '.join(map(str,best_cg)), best_rate))

return best_rate, best_param


if __name__ == '__main__':

def exit_with_help():
print("""\
Usage: grid.py [grid_options] [svm_options] dataset

grid_options :
-log2c {begin,end,step | "null"} : set the range of c (default -5,15,2)
begin,end,step -- c_range = 2^{begin,...,begin+k*step,...,end}
"null" -- do not grid with c
-log2g {begin,end,step | "null"} : set the range of g (default 3,-15,-2)
begin,end,step -- g_range = 2^{begin,...,begin+k*step,...,end}
"null" -- do not grid with g
-v n : n-fold cross validation (default 5)
-svmtrain pathname : set svm executable path and name
-gnuplot {pathname | "null"} :
pathname -- set gnuplot executable path and name
"null" -- do not plot
-out {pathname | "null"} : (default dataset.out)
pathname -- set output file path and name
"null" -- do not output file
-png pathname : set graphic output file path and name (default dataset.png)
-resume [pathname] : resume the grid task using an existing output file (default pathname is dataset.out)
This is experimental. Try this option only if some parameters have been checked for the SAME data.

svm_options : additional options for svm-train""")
sys.exit(1)
if len(sys.argv) < 2:
exit_with_help()
dataset_pathname = sys.argv[-1]
options = sys.argv[1:-1]
try:
find_parameters(dataset_pathname, options)
except (IOError,ValueError) as e:
sys.stderr.write(str(e) + '\n')
sys.stderr.write('Try "grid.py" for more information.\n')
sys.exit(1)

+ 120
- 0
gklearn/gedlib/lib/libsvm.3.22/tools/subset.py View File

@@ -0,0 +1,120 @@
#!/usr/bin/env python

import os, sys, math, random
from collections import defaultdict

if sys.version_info[0] >= 3:
xrange = range

def exit_with_help(argv):
print("""\
Usage: {0} [options] dataset subset_size [output1] [output2]

This script randomly selects a subset of the dataset.

options:
-s method : method of selection (default 0)
0 -- stratified selection (classification only)
1 -- random selection

output1 : the subset (optional)
output2 : rest of the data (optional)
If output1 is omitted, the subset will be printed on the screen.""".format(argv[0]))
exit(1)

def process_options(argv):
argc = len(argv)
if argc < 3:
exit_with_help(argv)

# default method is stratified selection
method = 0
subset_file = sys.stdout
rest_file = None

i = 1
while i < argc:
if argv[i][0] != "-":
break
if argv[i] == "-s":
i = i + 1
method = int(argv[i])
if method not in [0,1]:
print("Unknown selection method {0}".format(method))
exit_with_help(argv)
i = i + 1

dataset = argv[i]
subset_size = int(argv[i+1])
if i+2 < argc:
subset_file = open(argv[i+2],'w')
if i+3 < argc:
rest_file = open(argv[i+3],'w')

return dataset, subset_size, method, subset_file, rest_file

def random_selection(dataset, subset_size):
l = sum(1 for line in open(dataset,'r'))
return sorted(random.sample(xrange(l), subset_size))

def stratified_selection(dataset, subset_size):
labels = [line.split(None,1)[0] for line in open(dataset)]
label_linenums = defaultdict(list)
for i, label in enumerate(labels):
label_linenums[label] += [i]

l = len(labels)
remaining = subset_size
ret = []

# classes with fewer data are sampled first; otherwise
# some rare classes may not be selected
for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])):
linenums = label_linenums[label]
label_size = len(linenums)
# at least one instance per class
s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l)))))
if s == 0:
sys.stderr.write('''\
Error: failed to have at least one instance per class
1. You may have regression data.
2. Your classification data is unbalanced or too small.
Please use -s 1.
''')
sys.exit(-1)
remaining -= s
ret += [linenums[i] for i in random.sample(xrange(label_size), s)]
return sorted(ret)

def main(argv=sys.argv):
dataset, subset_size, method, subset_file, rest_file = process_options(argv)
#uncomment the following line to fix the random seed
#random.seed(0)
selected_lines = []

if method == 0:
selected_lines = stratified_selection(dataset, subset_size)
elif method == 1:
selected_lines = random_selection(dataset, subset_size)

#select instances based on selected_lines
dataset = open(dataset,'r')
prev_selected_linenum = -1
for i in xrange(len(selected_lines)):
for cnt in xrange(selected_lines[i]-prev_selected_linenum-1):
line = dataset.readline()
if rest_file:
rest_file.write(line)
subset_file.write(dataset.readline())
prev_selected_linenum = selected_lines[i]
subset_file.close()

if rest_file:
for line in dataset:
rest_file.write(line)
rest_file.close()
dataset.close()

if __name__ == '__main__':
main(sys.argv)


BIN
gklearn/gedlib/lib/libsvm.3.22/windows/libsvmread.mexw64 View File


BIN
gklearn/gedlib/lib/libsvm.3.22/windows/libsvmwrite.mexw64 View File


BIN
gklearn/gedlib/lib/libsvm.3.22/windows/svmpredict.mexw64 View File


BIN
gklearn/gedlib/lib/libsvm.3.22/windows/svmtrain.mexw64 View File


BIN
gklearn/gedlib/lib/nomad/libnomad.so View File


BIN
gklearn/gedlib/lib/nomad/libsgtelib.so View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save