diff --git a/.gitignore b/.gitignore index b38480d..4e41d57 100644 --- a/.gitignore +++ b/.gitignore @@ -18,15 +18,19 @@ notebooks/results/* notebooks/check_gm/* notebooks/test_parallel/* requirements/* -pygraph/model.py -pygraph/kernels/*_sym.py +gklearn/model.py +gklearn/kernels/*_sym.py *.npy *.eps *.dat *.pyc -preimage/* -!preimage/*.py +gklearn/preimage/* +!gklearn/preimage/*.py __pycache__ ##*# + +docs/build/* +!docs/build/latex/*.pdf +docs/log* diff --git a/README.md b/README.md index 0503a35..2d467d9 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -# py-graph -[![Build Status](https://travis-ci.org/jajupmochi/py-graph.svg?branch=ljia)](https://travis-ci.org/jajupmochi/py-graph) -[![codecov](https://codecov.io/gh/jajupmochi/py-graph/branch/ljia/graph/badge.svg)](https://codecov.io/gh/jajupmochi/py-graph) -[![Documentation Status](https://readthedocs.org/projects/py-graph/badge/?version=ljia)](https://py-graph.readthedocs.io/en/ljia/?badge=ljia) +# graphkit-learn +[![Build Status](https://travis-ci.org/jajupmochi/graphkit-learn.svg?branch=ljia)](https://travis-ci.org/jajupmochi/graphkit-learn) +[![codecov](https://codecov.io/gh/jajupmochi/graphkit-learn/branch/ljia/graph/badge.svg)](https://codecov.io/gh/jajupmochi/graphkit-learn) +[![Documentation Status](https://readthedocs.org/projects/graphkit-learn/badge/?version=ljia)](https://graphkit-learn.readthedocs.io/en/ljia/?badge=ljia) A python package for graph kernels. @@ -20,11 +20,11 @@ A python package for graph kernels. ## How to use? -Simply clone this repository and voilà! Then check [`notebooks`](https://github.com/jajupmochi/py-graph/tree/ljia/notebooks) directory for demos: -* [`notebooks`](https://github.com/jajupmochi/py-graph/tree/ljia/notebooks) directory includes test codes of graph kernels based on linear patterns; -* [`notebooks/tests`](https://github.com/jajupmochi/py-graph/tree/ljia/notebooks/tests) directory includes codes that test some libraries and functions; -* [`notebooks/utils`](https://github.com/jajupmochi/py-graph/tree/ljia/notebooks/utils) directory includes some useful tools, such as a Gram matrix checker and a function to get properties of datasets; -* [`notebooks/else`](https://github.com/jajupmochi/py-graph/tree/ljia/notebooks/else) directory includes other codes that we used for experiments. +Simply clone this repository and voilà! Then check [`notebooks`](https://github.com/jajupmochi/graphkit-learn/tree/ljia/notebooks) directory for demos: +* [`notebooks`](https://github.com/jajupmochi/graphkit-learn/tree/ljia/notebooks) directory includes test codes of graph kernels based on linear patterns; +* [`notebooks/tests`](https://github.com/jajupmochi/graphkit-learn/tree/ljia/notebooks/tests) directory includes codes that test some libraries and functions; +* [`notebooks/utils`](https://github.com/jajupmochi/graphkit-learn/tree/ljia/notebooks/utils) directory includes some useful tools, such as a Gram matrix checker and a function to get properties of datasets; +* [`notebooks/else`](https://github.com/jajupmochi/graphkit-learn/tree/ljia/notebooks/else) directory includes other codes that we used for experiments. ## List of graph kernels @@ -77,6 +77,8 @@ Check this paper for detailed description of graph kernels and experimental resu Linlin Jia, Benoit Gaüzère, and Paul Honeine. Graph Kernels Based on Linear Patterns: Theoretical and Experimental Comparisons. working paper or preprint, March 2019. URL https://hal-normandie-univ.archives-ouvertes.fr/hal-02053946. +A comparison of performances of graph kernels on benchmark datasets can be found [here](https://graphkit-learn.readthedocs.io/en/ljia/index.html#experiments). + ## References [1] Thomas Gärtner, Peter Flach, and Stefan Wrobel. On graph kernels: Hardness results and efficient alternatives. Learning Theory and Kernel Machines, pages 129–143, 2003. diff --git a/docs/Makefile b/docs/Makefile index 298ea9e..69fe55e 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -4,8 +4,8 @@ # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build -SOURCEDIR = . -BUILDDIR = _build +SOURCEDIR = source +BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle deleted file mode 100644 index 56cced3..0000000 Binary files a/docs/_build/doctrees/environment.pickle and /dev/null differ diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree deleted file mode 100644 index 1579d82..0000000 Binary files a/docs/_build/doctrees/index.doctree and /dev/null differ diff --git a/docs/_build/doctrees/modules.doctree b/docs/_build/doctrees/modules.doctree deleted file mode 100644 index c46a2dc..0000000 Binary files a/docs/_build/doctrees/modules.doctree and /dev/null differ diff --git a/docs/_build/doctrees/pygraph.doctree b/docs/_build/doctrees/pygraph.doctree deleted file mode 100644 index 7c2a62b..0000000 Binary files a/docs/_build/doctrees/pygraph.doctree and /dev/null differ diff --git a/docs/_build/doctrees/pygraph.utils.doctree b/docs/_build/doctrees/pygraph.utils.doctree deleted file mode 100644 index 2c2141e..0000000 Binary files a/docs/_build/doctrees/pygraph.utils.doctree and /dev/null differ diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo deleted file mode 100644 index 7f6bd34..0000000 --- a/docs/_build/html/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: b72b368ba10131aed8c3edbb863096bd -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html deleted file mode 100644 index a0e2ef3..0000000 --- a/docs/_build/html/_modules/index.html +++ /dev/null @@ -1,196 +0,0 @@ - - - - - - - - - - - Overview: module code — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Overview: module code
  • - - -
  • - -
  • - -
- - -
-
- - - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/graphdataset.html b/docs/_build/html/_modules/pygraph/utils/graphdataset.html deleted file mode 100644 index f7515d7..0000000 --- a/docs/_build/html/_modules/pygraph/utils/graphdataset.html +++ /dev/null @@ -1,502 +0,0 @@ - - - - - - - - - - - pygraph.utils.graphdataset — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Module code »
  • - -
  • pygraph.utils.graphdataset
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pygraph.utils.graphdataset

-""" Obtain all kinds of attributes of a graph dataset.
-"""
-
-
-
[docs]def get_dataset_attributes(Gn, - target=None, - attr_names=[], - node_label=None, - edge_label=None): - """Returns the structure and property information of the graph dataset Gn. - - Parameters - ---------- - Gn : List of NetworkX graph - List of graphs whose information will be returned. - target : list - The list of classification targets corresponding to Gn. Only works for - classification problems. - attr_names : list - List of strings which indicate which informations will be returned. The - possible choices includes: - 'substructures': sub-structures Gn contains, including 'linear', 'non - linear' and 'cyclic'. - 'node_labeled': whether vertices have symbolic labels. - 'edge_labeled': whether egdes have symbolic labels. - 'is_directed': whether graphs in Gn are directed. - 'dataset_size': number of graphs in Gn. - 'ave_node_num': average number of vertices of graphs in Gn. - 'min_node_num': minimum number of vertices of graphs in Gn. - 'max_node_num': maximum number of vertices of graphs in Gn. - 'ave_edge_num': average number of edges of graphs in Gn. - 'min_edge_num': minimum number of edges of graphs in Gn. - 'max_edge_num': maximum number of edges of graphs in Gn. - 'ave_node_degree': average vertex degree of graphs in Gn. - 'min_node_degree': minimum vertex degree of graphs in Gn. - 'max_node_degree': maximum vertex degree of graphs in Gn. - 'ave_fill_factor': average fill factor (number_of_edges / - (number_of_nodes ** 2)) of graphs in Gn. - 'min_fill_factor': minimum fill factor of graphs in Gn. - 'max_fill_factor': maximum fill factor of graphs in Gn. - 'node_label_num': number of symbolic vertex labels. - 'edge_label_num': number of symbolic edge labels. - 'node_attr_dim': number of dimensions of non-symbolic vertex labels. - Extracted from the 'attributes' attribute of graph nodes. - 'edge_attr_dim': number of dimensions of non-symbolic edge labels. - Extracted from the 'attributes' attribute of graph edges. - 'class_number': number of classes. Only available for classification - problems. - node_label : string - Node attribute used as label. The default node label is atom. Mandatory - when 'node_labeled' or 'node_label_num' is required. - edge_label : string - Edge attribute used as label. The default edge label is bond_type. - Mandatory when 'edge_labeled' or 'edge_label_num' is required. - - Return - ------ - attrs : dict - Value for each property. - """ - import networkx as nx - import numpy as np - - attrs = {} - - def get_dataset_size(Gn): - return len(Gn) - - def get_all_node_num(Gn): - return [nx.number_of_nodes(G) for G in Gn] - - def get_ave_node_num(all_node_num): - return np.mean(all_node_num) - - def get_min_node_num(all_node_num): - return np.amin(all_node_num) - - def get_max_node_num(all_node_num): - return np.amax(all_node_num) - - def get_all_edge_num(Gn): - return [nx.number_of_edges(G) for G in Gn] - - def get_ave_edge_num(all_edge_num): - return np.mean(all_edge_num) - - def get_min_edge_num(all_edge_num): - return np.amin(all_edge_num) - - def get_max_edge_num(all_edge_num): - return np.amax(all_edge_num) - - def is_node_labeled(Gn): - return False if node_label is None else True - - def get_node_label_num(Gn): - nl = set() - for G in Gn: - nl = nl | set(nx.get_node_attributes(G, node_label).values()) - return len(nl) - - def is_edge_labeled(Gn): - return False if edge_label is None else True - - def get_edge_label_num(Gn): - el = set() - for G in Gn: - el = el | set(nx.get_edge_attributes(G, edge_label).values()) - return len(el) - - def is_directed(Gn): - return nx.is_directed(Gn[0]) - - def get_ave_node_degree(Gn): - return np.mean([np.mean(list(dict(G.degree()).values())) for G in Gn]) - - def get_max_node_degree(Gn): - return np.amax([np.mean(list(dict(G.degree()).values())) for G in Gn]) - - def get_min_node_degree(Gn): - return np.amin([np.mean(list(dict(G.degree()).values())) for G in Gn]) - - # get fill factor, the number of non-zero entries in the adjacency matrix. - def get_ave_fill_factor(Gn): - return np.mean([nx.number_of_edges(G) / (nx.number_of_nodes(G) - * nx.number_of_nodes(G)) for G in Gn]) - - def get_max_fill_factor(Gn): - return np.amax([nx.number_of_edges(G) / (nx.number_of_nodes(G) - * nx.number_of_nodes(G)) for G in Gn]) - - def get_min_fill_factor(Gn): - return np.amin([nx.number_of_edges(G) / (nx.number_of_nodes(G) - * nx.number_of_nodes(G)) for G in Gn]) - - def get_substructures(Gn): - subs = set() - for G in Gn: - degrees = list(dict(G.degree()).values()) - if any(i == 2 for i in degrees): - subs.add('linear') - if np.amax(degrees) >= 3: - subs.add('non linear') - if 'linear' in subs and 'non linear' in subs: - break - - if is_directed(Gn): - for G in Gn: - if len(list(nx.find_cycle(G))) > 0: - subs.add('cyclic') - break - # else: - # # @todo: this method does not work for big graph with large amount of edges like D&D, try a better way. - # upper = np.amin([nx.number_of_edges(G) for G in Gn]) * 2 + 10 - # for G in Gn: - # if (nx.number_of_edges(G) < upper): - # cyc = list(nx.simple_cycles(G.to_directed())) - # if any(len(i) > 2 for i in cyc): - # subs.add('cyclic') - # break - # if 'cyclic' not in subs: - # for G in Gn: - # cyc = list(nx.simple_cycles(G.to_directed())) - # if any(len(i) > 2 for i in cyc): - # subs.add('cyclic') - # break - - return subs - - def get_class_num(target): - return len(set(target)) - - def get_node_attr_dim(Gn): - for G in Gn: - for n in G.nodes(data=True): - if 'attributes' in n[1]: - return len(n[1]['attributes']) - return 0 - - def get_edge_attr_dim(Gn): - for G in Gn: - if nx.number_of_edges(G) > 0: - for e in G.edges(data=True): - if 'attributes' in e[2]: - return len(e[2]['attributes']) - return 0 - - if attr_names == []: - attr_names = [ - 'substructures', - 'node_labeled', - 'edge_labeled', - 'is_directed', - 'dataset_size', - 'ave_node_num', - 'min_node_num', - 'max_node_num', - 'ave_edge_num', - 'min_edge_num', - 'max_edge_num', - 'ave_node_degree', - 'min_node_degree', - 'max_node_degree', - 'ave_fill_factor', - 'min_fill_factor', - 'max_fill_factor', - 'node_label_num', - 'edge_label_num', - 'node_attr_dim', - 'edge_attr_dim', - 'class_number', - ] - - # dataset size - if 'dataset_size' in attr_names: - - attrs.update({'dataset_size': get_dataset_size(Gn)}) - - # graph node number - if any(i in attr_names - for i in ['ave_node_num', 'min_node_num', 'max_node_num']): - - all_node_num = get_all_node_num(Gn) - - if 'ave_node_num' in attr_names: - - attrs.update({'ave_node_num': get_ave_node_num(all_node_num)}) - - if 'min_node_num' in attr_names: - - attrs.update({'min_node_num': get_min_node_num(all_node_num)}) - - if 'max_node_num' in attr_names: - - attrs.update({'max_node_num': get_max_node_num(all_node_num)}) - - # graph edge number - if any(i in attr_names for i in - ['ave_edge_num', 'min_edge_num', 'max_edge_num']): - - all_edge_num = get_all_edge_num(Gn) - - if 'ave_edge_num' in attr_names: - - attrs.update({'ave_edge_num': get_ave_edge_num(all_edge_num)}) - - if 'max_edge_num' in attr_names: - - attrs.update({'max_edge_num': get_max_edge_num(all_edge_num)}) - - if 'min_edge_num' in attr_names: - - attrs.update({'min_edge_num': get_min_edge_num(all_edge_num)}) - - # label number - if any(i in attr_names for i in ['node_labeled', 'node_label_num']): - is_nl = is_node_labeled(Gn) - node_label_num = get_node_label_num(Gn) - - if 'node_labeled' in attr_names: - # graphs are considered node unlabeled if all nodes have the same label. - attrs.update({'node_labeled': is_nl if node_label_num > 1 else False}) - - if 'node_label_num' in attr_names: - attrs.update({'node_label_num': node_label_num}) - - if any(i in attr_names for i in ['edge_labeled', 'edge_label_num']): - is_el = is_edge_labeled(Gn) - edge_label_num = get_edge_label_num(Gn) - - if 'edge_labeled' in attr_names: - # graphs are considered edge unlabeled if all edges have the same label. - attrs.update({'edge_labeled': is_el if edge_label_num > 1 else False}) - - if 'edge_label_num' in attr_names: - attrs.update({'edge_label_num': edge_label_num}) - - if 'is_directed' in attr_names: - attrs.update({'is_directed': is_directed(Gn)}) - - if 'ave_node_degree' in attr_names: - attrs.update({'ave_node_degree': get_ave_node_degree(Gn)}) - - if 'max_node_degree' in attr_names: - attrs.update({'max_node_degree': get_max_node_degree(Gn)}) - - if 'min_node_degree' in attr_names: - attrs.update({'min_node_degree': get_min_node_degree(Gn)}) - - if 'ave_fill_factor' in attr_names: - attrs.update({'ave_fill_factor': get_ave_fill_factor(Gn)}) - - if 'max_fill_factor' in attr_names: - attrs.update({'max_fill_factor': get_max_fill_factor(Gn)}) - - if 'min_fill_factor' in attr_names: - attrs.update({'min_fill_factor': get_min_fill_factor(Gn)}) - - if 'substructures' in attr_names: - attrs.update({'substructures': get_substructures(Gn)}) - - if 'class_number' in attr_names: - attrs.update({'class_number': get_class_num(target)}) - - if 'node_attr_dim' in attr_names: - attrs['node_attr_dim'] = get_node_attr_dim(Gn) - - if 'edge_attr_dim' in attr_names: - attrs['edge_attr_dim'] = get_edge_attr_dim(Gn) - - from collections import OrderedDict - return OrderedDict( - sorted(attrs.items(), key=lambda i: attr_names.index(i[0])))
-
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/graphfiles.html b/docs/_build/html/_modules/pygraph/utils/graphfiles.html deleted file mode 100644 index 08c1fb7..0000000 --- a/docs/_build/html/_modules/pygraph/utils/graphfiles.html +++ /dev/null @@ -1,811 +0,0 @@ - - - - - - - - - - - pygraph.utils.graphfiles — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Module code »
  • - -
  • pygraph.utils.graphfiles
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pygraph.utils.graphfiles

-""" Utilities function to manage graph files
-"""
-from os.path import dirname, splitext
-
-
[docs]def loadCT(filename): - """load data from a Chemical Table (.ct) file. - - Notes - ------ - a typical example of data in .ct is like this: - - 3 2 <- number of nodes and edges - 0.0000 0.0000 0.0000 C <- each line describes a node (x,y,z + label) - 0.0000 0.0000 0.0000 C - 0.0000 0.0000 0.0000 O - 1 3 1 1 <- each line describes an edge : to, from, bond type, bond stereo - 2 3 1 1 - - Check https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=10&ved=2ahUKEwivhaSdjsTlAhVhx4UKHczHA8gQFjAJegQIARAC&url=https%3A%2F%2Fwww.daylight.com%2Fmeetings%2Fmug05%2FKappler%2Fctfile.pdf&usg=AOvVaw1cDNrrmMClkFPqodlF2inS - for detailed format discription. - """ - import networkx as nx - from os.path import basename - g = nx.Graph() - with open(filename) as f: - content = f.read().splitlines() - g = nx.Graph( - name = str(content[0]), - filename = basename(filename)) # set name of the graph - tmp = content[1].split(" ") - if tmp[0] == '': - nb_nodes = int(tmp[1]) # number of the nodes - nb_edges = int(tmp[2]) # number of the edges - else: - nb_nodes = int(tmp[0]) - nb_edges = int(tmp[1]) - # patch for compatibility : label will be removed later - for i in range(0, nb_nodes): - tmp = content[i + 2].split(" ") - tmp = [x for x in tmp if x != ''] - g.add_node(i, atom=tmp[3].strip(), - label=[item.strip() for item in tmp[3:]], - attributes=[item.strip() for item in tmp[0:3]]) - for i in range(0, nb_edges): - tmp = content[i + g.number_of_nodes() + 2].split(" ") - tmp = [x for x in tmp if x != ''] - g.add_edge(int(tmp[0]) - 1, int(tmp[1]) - 1, - bond_type=tmp[2].strip(), - label=[item.strip() for item in tmp[2:]]) - return g
- - -
[docs]def loadGXL(filename): - from os.path import basename - import networkx as nx - import xml.etree.ElementTree as ET - - tree = ET.parse(filename) - root = tree.getroot() - index = 0 - g = nx.Graph(filename=basename(filename), name=root[0].attrib['id']) - dic = {} # used to retrieve incident nodes of edges - for node in root.iter('node'): - dic[node.attrib['id']] = index - labels = {} - for attr in node.iter('attr'): - labels[attr.attrib['name']] = attr[0].text - if 'chem' in labels: - labels['label'] = labels['chem'] - labels['atom'] = labels['chem'] - g.add_node(index, **labels) - index += 1 - - for edge in root.iter('edge'): - labels = {} - for attr in edge.iter('attr'): - labels[attr.attrib['name']] = attr[0].text - if 'valence' in labels: - labels['label'] = labels['valence'] - labels['bond_type'] = labels['valence'] - g.add_edge(dic[edge.attrib['from']], dic[edge.attrib['to']], **labels) - return g
- - -
[docs]def saveGXL(graph, filename, method='benoit'): - if method == 'benoit': - import xml.etree.ElementTree as ET - root_node = ET.Element('gxl') - attr = dict() - attr['id'] = str(graph.graph['name']) - attr['edgeids'] = 'true' - attr['edgemode'] = 'undirected' - graph_node = ET.SubElement(root_node, 'graph', attrib=attr) - - for v in graph: - current_node = ET.SubElement(graph_node, 'node', attrib={'id': str(v)}) - for attr in graph.nodes[v].keys(): - cur_attr = ET.SubElement( - current_node, 'attr', attrib={'name': attr}) - cur_value = ET.SubElement(cur_attr, - graph.nodes[v][attr].__class__.__name__) - cur_value.text = graph.nodes[v][attr] - - for v1 in graph: - for v2 in graph[v1]: - if (v1 < v2): # Non oriented graphs - cur_edge = ET.SubElement( - graph_node, - 'edge', - attrib={ - 'from': str(v1), - 'to': str(v2) - }) - for attr in graph[v1][v2].keys(): - cur_attr = ET.SubElement( - cur_edge, 'attr', attrib={'name': attr}) - cur_value = ET.SubElement( - cur_attr, graph[v1][v2][attr].__class__.__name__) - cur_value.text = str(graph[v1][v2][attr]) - - tree = ET.ElementTree(root_node) - tree.write(filename) - elif method == 'gedlib': - # reference: https://github.com/dbblumenthal/gedlib/blob/master/data/generate_molecules.py#L22 -# pass - gxl_file = open(filename, 'w') - gxl_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") - gxl_file.write("<!DOCTYPE gxl SYSTEM \"http://www.gupro.de/GXL/gxl-1.0.dtd\">\n") - gxl_file.write("<gxl xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n") - gxl_file.write("<graph id=\"" + str(graph.graph['name']) + "\" edgeids=\"true\" edgemode=\"undirected\">\n") - for v, attrs in graph.nodes(data=True): - gxl_file.write("<node id=\"_" + str(v) + "\">") - gxl_file.write("<attr name=\"" + "chem" + "\"><int>" + str(attrs['chem']) + "</int></attr>") - gxl_file.write("</node>\n") - for v1, v2, attrs in graph.edges(data=True): - gxl_file.write("<edge from=\"_" + str(v1) + "\" to=\"_" + str(v2) + "\">") - gxl_file.write("<attr name=\"valence\"><int>" + str(attrs['valence']) + "</int></attr>") -# gxl_file.write("<attr name=\"valence\"><int>" + "1" + "</int></attr>") - gxl_file.write("</edge>\n") - gxl_file.write("</graph>\n") - gxl_file.write("</gxl>") - gxl_file.close() - elif method == 'gedlib-letter': - # reference: https://github.com/dbblumenthal/gedlib/blob/master/data/generate_molecules.py#L22 - # and https://github.com/dbblumenthal/gedlib/blob/master/data/datasets/Letter/HIGH/AP1_0000.gxl - gxl_file = open(filename, 'w') - gxl_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") - gxl_file.write("<!DOCTYPE gxl SYSTEM \"http://www.gupro.de/GXL/gxl-1.0.dtd\">\n") - gxl_file.write("<gxl xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n") - gxl_file.write("<graph id=\"" + str(graph.graph['name']) + "\" edgeids=\"false\" edgemode=\"undirected\">\n") - for v, attrs in graph.nodes(data=True): - gxl_file.write("<node id=\"_" + str(v) + "\">") - gxl_file.write("<attr name=\"x\"><float>" + str(attrs['attributes'][0]) + "</float></attr>") - gxl_file.write("<attr name=\"y\"><float>" + str(attrs['attributes'][1]) + "</float></attr>") - gxl_file.write("</node>\n") - for v1, v2, attrs in graph.edges(data=True): - gxl_file.write("<edge from=\"_" + str(v1) + "\" to=\"_" + str(v2) + "\"/>\n") - gxl_file.write("</graph>\n") - gxl_file.write("</gxl>") - gxl_file.close()
- - -
[docs]def loadSDF(filename): - """load data from structured data file (.sdf file). - - Notes - ------ - A SDF file contains a group of molecules, represented in the similar way as in MOL format. - Check http://www.nonlinear.com/progenesis/sdf-studio/v0.9/faq/sdf-file-format-guidance.aspx, 2018 for detailed structure. - """ - import networkx as nx - from os.path import basename - from tqdm import tqdm - import sys - data = [] - with open(filename) as f: - content = f.read().splitlines() - index = 0 - pbar = tqdm(total=len(content) + 1, desc='load SDF', file=sys.stdout) - while index < len(content): - index_old = index - - g = nx.Graph(name=content[index].strip()) # set name of the graph - - tmp = content[index + 3] - nb_nodes = int(tmp[:3]) # number of the nodes - nb_edges = int(tmp[3:6]) # number of the edges - - for i in range(0, nb_nodes): - tmp = content[i + index + 4] - g.add_node(i, atom=tmp[31:34].strip()) - - for i in range(0, nb_edges): - tmp = content[i + index + g.number_of_nodes() + 4] - tmp = [tmp[i:i + 3] for i in range(0, len(tmp), 3)] - g.add_edge( - int(tmp[0]) - 1, int(tmp[1]) - 1, bond_type=tmp[2].strip()) - - data.append(g) - - index += 4 + g.number_of_nodes() + g.number_of_edges() - while content[index].strip() != '$$$$': # seperator - index += 1 - index += 1 - - pbar.update(index - index_old) - pbar.update(1) - pbar.close() - - return data
- - -
[docs]def loadMAT(filename, extra_params): - """Load graph data from a MATLAB (up to version 7.1) .mat file. - - Notes - ------ - A MAT file contains a struct array containing graphs, and a column vector lx containing a class label for each graph. - Check README in downloadable file in http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/WL/, 2018 for detailed structure. - """ - from scipy.io import loadmat - import numpy as np - import networkx as nx - data = [] - content = loadmat(filename) - order = extra_params['am_sp_al_nl_el'] - # print(content) - # print('----') - for key, value in content.items(): - if key[0] == 'l': # class label - y = np.transpose(value)[0].tolist() - # print(y) - elif key[0] != '_': - # print(value[0][0][0]) - # print() - # print(value[0][0][1]) - # print() - # print(value[0][0][2]) - # print() - # if len(value[0][0]) > 3: - # print(value[0][0][3]) - # print('----') - # if adjacency matrix is not compressed / edge label exists - if order[1] == 0: - for i, item in enumerate(value[0]): - # print(item) - # print('------') - g = nx.Graph(name=i) # set name of the graph - nl = np.transpose(item[order[3]][0][0][0]) # node label - # print(item[order[3]]) - # print() - for index, label in enumerate(nl[0]): - g.add_node(index, atom=str(label)) - el = item[order[4]][0][0][0] # edge label - for edge in el: - g.add_edge( - edge[0] - 1, edge[1] - 1, bond_type=str(edge[2])) - data.append(g) - else: - from scipy.sparse import csc_matrix - for i, item in enumerate(value[0]): - # print(item) - # print('------') - g = nx.Graph(name=i) # set name of the graph - nl = np.transpose(item[order[3]][0][0][0]) # node label - # print(nl) - # print() - for index, label in enumerate(nl[0]): - g.add_node(index, atom=str(label)) - sam = item[order[0]] # sparse adjacency matrix - index_no0 = sam.nonzero() - for col, row in zip(index_no0[0], index_no0[1]): - # print(col) - # print(row) - g.add_edge(col, row) - data.append(g) - # print(g.edges(data=True)) - return data, y
- - -
[docs]def loadTXT(dirname_dataset): - """Load graph data from a .txt file. - - Notes - ------ - The graph data is loaded from separate files. - Check README in downloadable file http://tiny.cc/PK_MLJ_data, 2018 for detailed structure. - """ - import numpy as np - import networkx as nx - from os import listdir - from os.path import dirname - - # load data file names - for name in listdir(dirname_dataset): - if '_A' in name: - fam = dirname_dataset + '/' + name - elif '_graph_indicator' in name: - fgi = dirname_dataset + '/' + name - elif '_graph_labels' in name: - fgl = dirname_dataset + '/' + name - elif '_node_labels' in name: - fnl = dirname_dataset + '/' + name - elif '_edge_labels' in name: - fel = dirname_dataset + '/' + name - elif '_edge_attributes' in name: - fea = dirname_dataset + '/' + name - elif '_node_attributes' in name: - fna = dirname_dataset + '/' + name - elif '_graph_attributes' in name: - fga = dirname_dataset + '/' + name - # this is supposed to be the node attrs, make sure to put this as the last 'elif' - elif '_attributes' in name: - fna = dirname_dataset + '/' + name - - content_gi = open(fgi).read().splitlines() # graph indicator - content_am = open(fam).read().splitlines() # adjacency matrix - content_gl = open(fgl).read().splitlines() # lass labels - - # create graphs and add nodes - data = [nx.Graph(name=i) for i in range(0, len(content_gl))] - if 'fnl' in locals(): - content_nl = open(fnl).read().splitlines() # node labels - for i, line in enumerate(content_gi): - # transfer to int first in case of unexpected blanks - data[int(line) - 1].add_node(i, atom=str(int(content_nl[i]))) - else: - for i, line in enumerate(content_gi): - data[int(line) - 1].add_node(i) - - # add edges - for line in content_am: - tmp = line.split(',') - n1 = int(tmp[0]) - 1 - n2 = int(tmp[1]) - 1 - # ignore edge weight here. - g = int(content_gi[n1]) - 1 - data[g].add_edge(n1, n2) - - # add edge labels - if 'fel' in locals(): - content_el = open(fel).read().splitlines() - for index, line in enumerate(content_el): - label = line.strip() - n = [int(i) - 1 for i in content_am[index].split(',')] - g = int(content_gi[n[0]]) - 1 - data[g].edges[n[0], n[1]]['bond_type'] = label - - # add node attributes - if 'fna' in locals(): - content_na = open(fna).read().splitlines() - for i, line in enumerate(content_na): - attrs = [i.strip() for i in line.split(',')] - g = int(content_gi[i]) - 1 - data[g].nodes[i]['attributes'] = attrs - - # add edge attributes - if 'fea' in locals(): - content_ea = open(fea).read().splitlines() - for index, line in enumerate(content_ea): - attrs = [i.strip() for i in line.split(',')] - n = [int(i) - 1 for i in content_am[index].split(',')] - g = int(content_gi[n[0]]) - 1 - data[g].edges[n[0], n[1]]['attributes'] = attrs - - # load y - y = [int(i) for i in content_gl] - - return data, y
- - -
[docs]def loadDataset(filename, filename_y=None, extra_params=None): - """Read graph data from filename and load them as NetworkX graphs. - - Parameters - ---------- - filename : string - The name of the file from where the dataset is read. - filename_y : string - The name of file of the targets corresponding to graphs. - extra_params : dict - Extra parameters only designated to '.mat' format. - - Return - ------ - data : List of NetworkX graph. - y : List - Targets corresponding to graphs. - - Notes - ----- - This function supports following graph dataset formats: - 'ds': load data from .ds file. See comments of function loadFromDS for a example. - 'cxl': load data from Graph eXchange Language file (.cxl file). See - http://www.gupro.de/GXL/Introduction/background.html, 2019 for detail. - 'sdf': load data from structured data file (.sdf file). See - http://www.nonlinear.com/progenesis/sdf-studio/v0.9/faq/sdf-file-format-guidance.aspx, - 2018 for details. - 'mat': Load graph data from a MATLAB (up to version 7.1) .mat file. See - README in downloadable file in http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/WL/, - 2018 for details. - 'txt': Load graph data from a special .txt file. See - https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets, - 2019 for details. Note here filename is the name of either .txt file in - the dataset directory. - """ - extension = splitext(filename)[1][1:] - if extension == "ds": - data, y = loadFromDS(filename, filename_y) - elif extension == "cxl": - import xml.etree.ElementTree as ET - - dirname_dataset = dirname(filename) - tree = ET.parse(filename) - root = tree.getroot() - data = [] - y = [] - for graph in root.iter('graph'): - mol_filename = graph.attrib['file'] - mol_class = graph.attrib['class'] - data.append(loadGXL(dirname_dataset + '/' + mol_filename)) - y.append(mol_class) - elif extension == 'xml': - data, y = loadFromXML(filename, extra_params) - elif extension == "sdf": - import numpy as np - from tqdm import tqdm - import sys - - data = loadSDF(filename) - - y_raw = open(filename_y).read().splitlines() - y_raw.pop(0) - tmp0 = [] - tmp1 = [] - for i in range(0, len(y_raw)): - tmp = y_raw[i].split(',') - tmp0.append(tmp[0]) - tmp1.append(tmp[1].strip()) - - y = [] - for i in tqdm(range(0, len(data)), desc='ajust data', file=sys.stdout): - try: - y.append(tmp1[tmp0.index(data[i].name)].strip()) - except ValueError: # if data[i].name not in tmp0 - data[i] = [] - data = list(filter(lambda a: a != [], data)) - elif extension == "mat": - data, y = loadMAT(filename, extra_params) - elif extension == 'txt': - dirname_dataset = dirname(filename) - data, y = loadTXT(dirname_dataset) - # print(len(y)) - # print(y) - # print(data[0].nodes(data=True)) - # print('----') - # print(data[0].edges(data=True)) - # for g in data: - # print(g.nodes(data=True)) - # print('----') - # print(g.edges(data=True)) - - return data, y
- - -
[docs]def loadFromXML(filename, extra_params): - import xml.etree.ElementTree as ET - - if extra_params: - dirname_dataset = extra_params - else: - dirname_dataset = dirname(filename) - tree = ET.parse(filename) - root = tree.getroot() - data = [] - y = [] - for graph in root.iter('graph'): - mol_filename = graph.attrib['file'] - mol_class = graph.attrib['class'] - data.append(loadGXL(dirname_dataset + '/' + mol_filename)) - y.append(mol_class) - - return data, y
- - -
[docs]def loadFromDS(filename, filename_y): - """Load data from .ds file. - Possible graph formats include: - '.ct': see function loadCT for detail. - '.gxl': see dunction loadGXL for detail. - Note these graph formats are checked automatically by the extensions of - graph files. - """ - dirname_dataset = dirname(filename) - data = [] - y = [] - content = open(filename).read().splitlines() - extension = splitext(content[0].split(' ')[0])[1][1:] - if filename_y is None or filename_y == '': - if extension == 'ct': - for i in range(0, len(content)): - tmp = content[i].split(' ') - # remove the '#'s in file names - data.append( - loadCT(dirname_dataset + '/' + tmp[0].replace('#', '', 1))) - y.append(float(tmp[1])) - elif extension == 'gxl': - for i in range(0, len(content)): - tmp = content[i].split(' ') - # remove the '#'s in file names - data.append( - loadGXL(dirname_dataset + '/' + tmp[0].replace('#', '', 1))) - y.append(float(tmp[1])) - else: # y in a seperate file - if extension == 'ct': - for i in range(0, len(content)): - tmp = content[i] - # remove the '#'s in file names - data.append( - loadCT(dirname_dataset + '/' + tmp.replace('#', '', 1))) - elif extension == 'gxl': - for i in range(0, len(content)): - tmp = content[i] - # remove the '#'s in file names - data.append( - loadGXL(dirname_dataset + '/' + tmp.replace('#', '', 1))) - - content_y = open(filename_y).read().splitlines() - # assume entries in filename and filename_y have the same order. - for item in content_y: - tmp = item.split(' ') - # assume the 3rd entry in a line is y (for Alkane dataset) - y.append(float(tmp[2])) - - return data, y
- - -
[docs]def saveDataset(Gn, y, gformat='gxl', group=None, filename='gfile', xparams=None): - """Save list of graphs. - """ - import os - dirname_ds = os.path.dirname(filename) - if dirname_ds != '': - dirname_ds += '/' - if not os.path.exists(dirname_ds) : - os.makedirs(dirname_ds) - - if 'graph_dir' in xparams: - graph_dir = xparams['graph_dir'] + '/' - if not os.path.exists(graph_dir): - os.makedirs(graph_dir) - else: - graph_dir = dirname_ds - - if group == 'xml' and gformat == 'gxl': - with open(filename + '.xml', 'w') as fgroup: - fgroup.write("<?xml version=\"1.0\"?>") - fgroup.write("\n<!DOCTYPE GraphCollection SYSTEM \"http://www.inf.unibz.it/~blumenthal/dtd/GraphCollection.dtd\">") - fgroup.write("\n<GraphCollection>") - for idx, g in enumerate(Gn): - fname_tmp = "graph" + str(idx) + ".gxl" - saveGXL(g, graph_dir + fname_tmp, method=xparams['method']) - fgroup.write("\n\t<graph file=\"" + fname_tmp + "\" class=\"" + str(y[idx]) + "\"/>") - fgroup.write("\n</GraphCollection>") - fgroup.close()
- - -if __name__ == '__main__': -# ### Load dataset from .ds file. -# # .ct files. -# ds = {'name': 'Alkane', 'dataset': '../../datasets/Alkane/dataset.ds', -# 'dataset_y': '../../datasets/Alkane/dataset_boiling_point_names.txt'} -# Gn, y = loadDataset(ds['dataset'], filename_y=ds['dataset_y']) -## ds = {'name': 'Acyclic', 'dataset': '../../datasets/acyclic/dataset_bps.ds'} # node symb -## Gn, y = loadDataset(ds['dataset']) -## ds = {'name': 'MAO', 'dataset': '../../datasets/MAO/dataset.ds'} # node/edge symb -## Gn, y = loadDataset(ds['dataset']) -## ds = {'name': 'PAH', 'dataset': '../../datasets/PAH/dataset.ds'} # unlabeled -## Gn, y = loadDataset(ds['dataset']) -# print(Gn[1].nodes(data=True)) -# print(Gn[1].edges(data=True)) -# print(y[1]) - -# # .gxl file. -# ds = {'name': 'monoterpenoides', -# 'dataset': '../../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb -# Gn, y = loadDataset(ds['dataset']) -# print(Gn[1].nodes(data=True)) -# print(Gn[1].edges(data=True)) -# print(y[1]) - - ### Convert graph from one format to another. - # .gxl file. - import networkx as nx - ds = {'name': 'monoterpenoides', - 'dataset': '../../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb - Gn, y = loadDataset(ds['dataset']) - y = [int(i) for i in y] - print(Gn[1].nodes(data=True)) - print(Gn[1].edges(data=True)) - print(y[1]) - # Convert a graph to the proper NetworkX format that can be recognized by library gedlib. - Gn_new = [] - for G in Gn: - G_new = nx.Graph() - for nd, attrs in G.nodes(data=True): - G_new.add_node(str(nd), chem=attrs['atom']) - for nd1, nd2, attrs in G.edges(data=True): - G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type']) -# G_new.add_edge(str(nd1), str(nd2)) - Gn_new.append(G_new) - print(Gn_new[1].nodes(data=True)) - print(Gn_new[1].edges(data=True)) - print(Gn_new[1]) - filename = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/generated_datsets/monoterpenoides/gxl/monoterpenoides' - xparams = {'method': 'gedlib'} - saveDataset(Gn, y, gformat='gxl', group='xml', filename=filename, xparams=xparams) - -# ds = {'name': 'MUTAG', 'dataset': '../../datasets/MUTAG/MUTAG.mat', -# 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}} # node/edge symb -# Gn, y = loadDataset(ds['dataset'], extra_params=ds['extra_params']) -# saveDataset(Gn, y, group='xml', filename='temp/temp') -
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/isNotebook.html b/docs/_build/html/_modules/pygraph/utils/isNotebook.html deleted file mode 100644 index ebc4a18..0000000 --- a/docs/_build/html/_modules/pygraph/utils/isNotebook.html +++ /dev/null @@ -1,205 +0,0 @@ - - - - - - - - - - - pygraph.utils.isNotebook — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Module code »
  • - -
  • pygraph.utils.isNotebook
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pygraph.utils.isNotebook

-""" Functions for python system.
-"""
-
-
[docs]def isNotebook(): - """check if code is executed in the IPython notebook. - """ - try: - shell = get_ipython().__class__.__name__ - if shell == 'ZMQInteractiveShell': - return True # Jupyter notebook or qtconsole - elif shell == 'TerminalInteractiveShell': - return False # Terminal running IPython - else: - return False # Other type (?) - except NameError: - return False # Probably standard Python interpreter
-
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/kernels.html b/docs/_build/html/_modules/pygraph/utils/kernels.html deleted file mode 100644 index 487f6bf..0000000 --- a/docs/_build/html/_modules/pygraph/utils/kernels.html +++ /dev/null @@ -1,347 +0,0 @@ - - - - - - - - - - - pygraph.utils.kernels — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -

Source code for pygraph.utils.kernels

-"""Those who are not graph kernels. We can be kernels for nodes or edges!
-These kernels are defined between pairs of vectors.
-"""
-import numpy as np
-
-
[docs]def deltakernel(x, y): - """Delta kernel. Return 1 if x == y, 0 otherwise. - - Parameters - ---------- - x, y : any - Two parts to compare. - - Return - ------ - kernel : integer - Delta kernel. - - References - ---------- - [1] H. Kashima, K. Tsuda, and A. Inokuchi. Marginalized kernels between - labeled graphs. In Proceedings of the 20th International Conference on - Machine Learning, Washington, DC, United States, 2003. - """ - return x == y #(1 if condition else 0)
- - -
[docs]def gaussiankernel(x, y, gamma=None): - """Gaussian kernel. - Compute the rbf (gaussian) kernel between x and y: - - K(x, y) = exp(-gamma ||x-y||^2). - - Read more in the :ref:`User Guide <rbf_kernel>`. - - Parameters - ---------- - x, y : array - - gamma : float, default None - If None, defaults to 1.0 / n_features - - Returns - ------- - kernel : float - """ - if gamma is None: - gamma = 1.0 / len(x) - - xt = np.array([float(itm) for itm in x]) - yt = np.array([float(itm) for itm in y]) - kernel = xt - yt - kernel = kernel ** 2 - kernel = np.sum(kernel) - kernel *= -gamma - kernel = np.exp(kernel) - return kernel
- - -
[docs]def polynomialkernel(x, y, d=1, c=0): - """Polynomial kernel. - Compute the polynomial kernel between x and y: - - K(x, y) = <x, y> ^d + c. - - Parameters - ---------- - x, y : array - - d : integer, default 1 - - c : float, default 0 - - Returns - ------- - kernel : float - """ - return np.dot(x, y) ** d + c
- - -
[docs]def linearkernel(x, y): - """Polynomial kernel. - Compute the polynomial kernel between x and y: - - K(x, y) = <x, y>. - - Parameters - ---------- - x, y : array - - d : integer, default 1 - - c : float, default 0 - - Returns - ------- - kernel : float - """ - return np.dot(x, y)
- - -
[docs]def kernelsum(k1, k2, d11, d12, d21=None, d22=None, lamda1=1, lamda2=1): - """Sum of a pair of kernels. - - k = lamda1 * k1(d11, d12) + lamda2 * k2(d21, d22) - - Parameters - ---------- - k1, k2 : function - A pair of kernel functions. - d11, d12: - Inputs of k1. If d21 or d22 is None, apply d11, d12 to both k1 and k2. - d21, d22: - Inputs of k2. - lamda1, lamda2: float - Coefficients of the product. - - Return - ------ - kernel : integer - - """ - if d21 == None or d22 == None: - kernel = lamda1 * k1(d11, d12) + lamda2 * k2(d11, d12) - else: - kernel = lamda1 * k1(d11, d12) + lamda2 * k2(d21, d22) - return kernel
- - -
[docs]def kernelproduct(k1, k2, d11, d12, d21=None, d22=None, lamda=1): - """Product of a pair of kernels. - - k = lamda * k1(d11, d12) * k2(d21, d22) - - Parameters - ---------- - k1, k2 : function - A pair of kernel functions. - d11, d12: - Inputs of k1. If d21 or d22 is None, apply d11, d12 to both k1 and k2. - d21, d22: - Inputs of k2. - lamda: float - Coefficient of the product. - - Return - ------ - kernel : integer - """ - if d21 == None or d22 == None: - kernel = lamda * k1(d11, d12) * k2(d11, d12) - else: - kernel = lamda * k1(d11, d12) * k2(d21, d22) - return kernel
- - -if __name__ == '__main__': - o = polynomialkernel([1, 2], [3, 4], 2, 3) -
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/logger2file.html b/docs/_build/html/_modules/pygraph/utils/logger2file.html deleted file mode 100644 index ce45a0d..0000000 --- a/docs/_build/html/_modules/pygraph/utils/logger2file.html +++ /dev/null @@ -1,216 +0,0 @@ - - - - - - - - - - - pygraph.utils.logger2file — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Module code »
  • - -
  • pygraph.utils.logger2file
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pygraph.utils.logger2file

-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Nov  8 14:21:25 2019
-
-@author: ljia
-"""
-
-import sys
-import time
-
-
[docs]class Logger(object): - def __init__(self): - self.terminal = sys.stdout - self.log = open("log." + str(time.time()) + ".log", "a") - -
[docs] def write(self, message): - self.terminal.write(message) - self.log.write(message)
- -
[docs] def flush(self): - #this flush method is needed for python 3 compatibility. - #this handles the flush command by doing nothing. - #you might want to specify some extra behavior here. - pass
- -sys.stdout = Logger() -
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/model_selection_precomputed.html b/docs/_build/html/_modules/pygraph/utils/model_selection_precomputed.html deleted file mode 100644 index b467f7f..0000000 --- a/docs/_build/html/_modules/pygraph/utils/model_selection_precomputed.html +++ /dev/null @@ -1,1148 +0,0 @@ - - - - - - - - - - - pygraph.utils.model_selection_precomputed — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Module code »
  • - -
  • pygraph.utils.model_selection_precomputed
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pygraph.utils.model_selection_precomputed

-import numpy as np
-import matplotlib
-matplotlib.use('Agg')
-from matplotlib import pyplot as plt
-from sklearn.kernel_ridge import KernelRidge
-from sklearn.svm import SVC
-from sklearn.metrics import accuracy_score, mean_squared_error
-from sklearn.model_selection import KFold, train_test_split, ParameterGrid
-
-#from joblib import Parallel, delayed
-from multiprocessing import Pool, Array
-from functools import partial
-import sys
-sys.path.insert(0, "../")
-import os
-import time
-import datetime
-#from os.path import basename, splitext
-from pygraph.utils.graphfiles import loadDataset
-from tqdm import tqdm
-
-#from memory_profiler import profile
-
-#@profile
-
[docs]def model_selection_for_precomputed_kernel(datafile, - estimator, - param_grid_precomputed, - param_grid, - model_type, - NUM_TRIALS=30, - datafile_y=None, - extra_params=None, - ds_name='ds-unknown', - n_jobs=1, - read_gm_from_file=False, - verbose=True): - """Perform model selection, fitting and testing for precomputed kernels - using nested CV. Print out neccessary data during the process then finally - the results. - - Parameters - ---------- - datafile : string - Path of dataset file. - estimator : function - kernel function used to estimate. This function needs to return a gram matrix. - param_grid_precomputed : dictionary - Dictionary with names (string) of parameters used to calculate gram - matrices as keys and lists of parameter settings to try as values. This - enables searching over any sequence of parameter settings. Params with - length 1 will be omitted. - param_grid : dictionary - Dictionary with names (string) of parameters used as penelties as keys - and lists of parameter settings to try as values. This enables - searching over any sequence of parameter settings. Params with length 1 - will be omitted. - model_type : string - Type of the problem, can be 'regression' or 'classification'. - NUM_TRIALS : integer - Number of random trials of outer cv loop. The default is 30. - datafile_y : string - Path of file storing y data. This parameter is optional depending on - the given dataset file. - extra_params : dict - Extra parameters for loading dataset. See function pygraph.utils. - graphfiles.loadDataset for detail. - ds_name : string - Name of the dataset. - n_jobs : int - Number of jobs for parallelization. - read_gm_from_file : boolean - Whether gram matrices are loaded from a file. - - Examples - -------- - >>> import numpy as np - >>> import sys - >>> sys.path.insert(0, "../") - >>> from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel - >>> from pygraph.kernels.untilHPathKernel import untilhpathkernel - >>> - >>> datafile = '../datasets/MUTAG/MUTAG_A.txt' - >>> estimator = untilhpathkernel - >>> param_grid_precomputed = {’depth’: np.linspace(1, 10, 10), ’k_func’: - [’MinMax’, ’tanimoto’], ’compute_method’: [’trie’]} - >>> # ’C’ for classification problems and ’alpha’ for regression problems. - >>> param_grid = [{’C’: np.logspace(-10, 10, num=41, base=10)}, {’alpha’: - np.logspace(-10, 10, num=41, base=10)}] - >>> - >>> model_selection_for_precomputed_kernel(datafile, estimator, - param_grid_precomputed, param_grid[0], 'classification', ds_name=’MUTAG’) - """ - tqdm.monitor_interval = 0 - - results_dir = '../notebooks/results/' + estimator.__name__ - if not os.path.exists(results_dir): - os.makedirs(results_dir) - # a string to save all the results. - str_fw = '###################### log time: ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '. ######################\n\n' - str_fw += '# This file contains results of ' + estimator.__name__ + ' on dataset ' + ds_name + ',\n# including gram matrices, serial numbers for gram matrix figures and performance.\n\n' - - # setup the model type - model_type = model_type.lower() - if model_type != 'regression' and model_type != 'classification': - raise Exception( - 'The model type is incorrect! Please choose from regression or classification.' - ) - if verbose: - print() - print('--- This is a %s problem ---' % model_type) - str_fw += 'This is a %s problem.\n' % model_type - - # calculate gram matrices rather than read them from file. - if read_gm_from_file == False: - # Load the dataset - if verbose: - print() - print('\n1. Loading dataset from file...') - if isinstance(datafile, str): - dataset, y_all = loadDataset( - datafile, filename_y=datafile_y, extra_params=extra_params) - else: # load data directly from variable. - dataset = datafile - y_all = datafile_y - - # import matplotlib.pyplot as plt - # import networkx as nx - # nx.draw_networkx(dataset[30]) - # plt.show() - - # Grid of parameters with a discrete number of values for each. - param_list_precomputed = list(ParameterGrid(param_grid_precomputed)) - param_list = list(ParameterGrid(param_grid)) - - gram_matrices = [ - ] # a list to store gram matrices for all param_grid_precomputed - gram_matrix_time = [ - ] # a list to store time to calculate gram matrices - param_list_pre_revised = [ - ] # list to store param grids precomputed ignoring the useless ones - - # calculate all gram matrices - if verbose: - print() - print('2. Calculating gram matrices. This could take a while...') - str_fw += '\nII. Gram matrices.\n\n' - tts = time.time() # start training time - nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN) - for idx, params_out in enumerate(param_list_precomputed): - y = y_all[:] - params_out['n_jobs'] = n_jobs - params_out['verbose'] = verbose -# print(dataset) -# import networkx as nx -# nx.draw_networkx(dataset[1]) -# plt.show() - rtn_data = estimator(dataset[:], **params_out) - Kmatrix = rtn_data[0] - current_run_time = rtn_data[1] - # for some kernels, some graphs in datasets may not meet the - # kernels' requirements for graph structure. These graphs are trimmed. - if len(rtn_data) == 3: - idx_trim = rtn_data[2] # the index of trimmed graph list - y = [y[idxt] for idxt in idx_trim] # trim y accordingly -# Kmatrix = np.random.rand(2250, 2250) -# current_run_time = 0.1 - - # remove graphs whose kernels with themselves are zeros - # @todo: y not changed accordingly? - Kmatrix_diag = Kmatrix.diagonal().copy() - nb_g_ignore = 0 - for idxk, diag in enumerate(Kmatrix_diag): - if diag == 0: - Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=0) - Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=1) - nb_g_ignore += 1 - # normalization - # @todo: works only for undirected graph? - Kmatrix_diag = Kmatrix.diagonal().copy() - for i in range(len(Kmatrix)): - for j in range(i, len(Kmatrix)): - Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j]) - Kmatrix[j][i] = Kmatrix[i][j] - if verbose: - print() - if params_out == {}: - if verbose: - print('the gram matrix is: ') - str_fw += 'the gram matrix is:\n\n' - else: - if verbose: - print('the gram matrix with parameters', params_out, 'is: \n\n') - str_fw += 'the gram matrix with parameters %s is:\n\n' % params_out - if len(Kmatrix) < 2: - nb_gm_ignore += 1 - if verbose: - print('ignored, as at most only one of all its diagonal value is non-zero.') - str_fw += 'ignored, as at most only one of all its diagonal value is non-zero.\n\n' - else: - if np.isnan(Kmatrix).any( - ): # if the matrix contains elements that are not numbers - nb_gm_ignore += 1 - if verbose: - print('ignored, as it contains elements that are not numbers.') - str_fw += 'ignored, as it contains elements that are not numbers.\n\n' - else: -# print(Kmatrix) - str_fw += np.array2string( - Kmatrix, - separator=',') + '\n\n' -# separator=',', -# threshold=np.inf, -# floatmode='unique') + '\n\n' - - fig_file_name = results_dir + '/GM[ds]' + ds_name - if params_out != {}: - fig_file_name += '[params]' + str(idx) - plt.imshow(Kmatrix) - plt.colorbar() - plt.savefig(fig_file_name + '.eps', format='eps', dpi=300) -# plt.show() - plt.clf() - gram_matrices.append(Kmatrix) - gram_matrix_time.append(current_run_time) - param_list_pre_revised.append(params_out) - if nb_g_ignore > 0: - if verbose: - print(', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore) - str_fw += ', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore - if verbose: - print() - print( - '{} gram matrices are calculated, {} of which are ignored.'.format( - len(param_list_precomputed), nb_gm_ignore)) - str_fw += '{} gram matrices are calculated, {} of which are ignored.\n\n'.format(len(param_list_precomputed), nb_gm_ignore) - str_fw += 'serial numbers of gram matrix figures and their corresponding parameters settings:\n\n' - str_fw += ''.join([ - '{}: {}\n'.format(idx, params_out) - for idx, params_out in enumerate(param_list_precomputed) - ]) - - if verbose: - print() - if len(gram_matrices) == 0: - if verbose: - print('all gram matrices are ignored, no results obtained.') - str_fw += '\nall gram matrices are ignored, no results obtained.\n\n' - else: - # save gram matrices to file. -# np.savez(results_dir + '/' + ds_name + '.gm', -# gms=gram_matrices, params=param_list_pre_revised, y=y, -# gmtime=gram_matrix_time) - if verbose: - print( - '3. Fitting and predicting using nested cross validation. This could really take a while...' - ) - - # ---- use pool.imap_unordered to parallel and track progress. ---- -# train_pref = [] -# val_pref = [] -# test_pref = [] -# def func_assign(result, var_to_assign): -# for idx, itm in enumerate(var_to_assign): -# itm.append(result[idx]) -# trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, y, model_type) -# -# parallel_me(trial_do_partial, range(NUM_TRIALS), func_assign, -# [train_pref, val_pref, test_pref], glbv=gram_matrices, -# method='imap_unordered', n_jobs=n_jobs, chunksize=1, -# itr_desc='cross validation') - - def init_worker(gms_toshare): - global G_gms - G_gms = gms_toshare - -# gram_matrices = np.array(gram_matrices) -# gms_shape = gram_matrices.shape -# gms_array = Array('d', np.reshape(gram_matrices.copy(), -1, order='C')) -# pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gms_array, gms_shape)) - pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gram_matrices,)) - trial_do_partial = partial(parallel_trial_do, param_list_pre_revised, param_list, y, model_type) - train_pref = [] - val_pref = [] - test_pref = [] -# if NUM_TRIALS < 1000 * n_jobs: -# chunksize = int(NUM_TRIALS / n_jobs) + 1 -# else: -# chunksize = 1000 - chunksize = 1 - if verbose: - iterator = tqdm(pool.imap_unordered(trial_do_partial, - range(NUM_TRIALS), chunksize), desc='cross validation', file=sys.stdout) - else: - iterator = pool.imap_unordered(trial_do_partial, range(NUM_TRIALS), chunksize) - for o1, o2, o3 in iterator: - train_pref.append(o1) - val_pref.append(o2) - test_pref.append(o3) - pool.close() - pool.join() - -# # ---- use pool.map to parallel. ---- -# pool = Pool(n_jobs) -# trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, gram_matrices, y[0:250], model_type) -# result_perf = pool.map(trial_do_partial, range(NUM_TRIALS)) -# train_pref = [item[0] for item in result_perf] -# val_pref = [item[1] for item in result_perf] -# test_pref = [item[2] for item in result_perf] - -# # ---- direct running, normally use a single CPU core. ---- -# train_pref = [] -# val_pref = [] -# test_pref = [] -# for i in tqdm(range(NUM_TRIALS), desc='cross validation', file=sys.stdout): -# o1, o2, o3 = trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, i) -# train_pref.append(o1) -# val_pref.append(o2) -# test_pref.append(o3) -# print() - - if verbose: - print() - print('4. Getting final performance...') - str_fw += '\nIII. Performance.\n\n' - # averages and confidences of performances on outer trials for each combination of parameters - average_train_scores = np.mean(train_pref, axis=0) -# print('val_pref: ', val_pref[0][0]) - average_val_scores = np.mean(val_pref, axis=0) -# print('test_pref: ', test_pref[0][0]) - average_perf_scores = np.mean(test_pref, axis=0) - # sample std is used here - std_train_scores = np.std(train_pref, axis=0, ddof=1) - std_val_scores = np.std(val_pref, axis=0, ddof=1) - std_perf_scores = np.std(test_pref, axis=0, ddof=1) - - if model_type == 'regression': - best_val_perf = np.amin(average_val_scores) - else: - best_val_perf = np.amax(average_val_scores) -# print('average_val_scores: ', average_val_scores) -# print('best_val_perf: ', best_val_perf) -# print() - best_params_index = np.where(average_val_scores == best_val_perf) - # find smallest val std with best val perf. - best_val_stds = [ - std_val_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - min_val_std = np.amin(best_val_stds) - best_params_index = np.where(std_val_scores == min_val_std) - best_params_out = [ - param_list_pre_revised[i] for i in best_params_index[0] - ] - best_params_in = [param_list[i] for i in best_params_index[1]] - if verbose: - print('best_params_out: ', best_params_out) - print('best_params_in: ', best_params_in) - print() - print('best_val_perf: ', best_val_perf) - print('best_val_std: ', min_val_std) - str_fw += 'best settings of hyper-params to build gram matrix: %s\n' % best_params_out - str_fw += 'best settings of other hyper-params: %s\n\n' % best_params_in - str_fw += 'best_val_perf: %s\n' % best_val_perf - str_fw += 'best_val_std: %s\n' % min_val_std - -# print(best_params_index) -# print(best_params_index[0]) -# print(average_perf_scores) - final_performance = [ - average_perf_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - final_confidence = [ - std_perf_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - if verbose: - print('final_performance: ', final_performance) - print('final_confidence: ', final_confidence) - str_fw += 'final_performance: %s\n' % final_performance - str_fw += 'final_confidence: %s\n' % final_confidence - train_performance = [ - average_train_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - train_std = [ - std_train_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - if verbose: - print('train_performance: %s' % train_performance) - print('train_std: ', train_std) - str_fw += 'train_performance: %s\n' % train_performance - str_fw += 'train_std: %s\n\n' % train_std - - if verbose: - print() - tt_total = time.time() - tts # training time for all hyper-parameters - average_gram_matrix_time = np.mean(gram_matrix_time) - std_gram_matrix_time = np.std(gram_matrix_time, ddof=1) if len(gram_matrix_time) > 1 else 0 - best_gram_matrix_time = [ - gram_matrix_time[i] for i in best_params_index[0] - ] - ave_bgmt = np.mean(best_gram_matrix_time) - std_bgmt = np.std(best_gram_matrix_time, ddof=1) if len(best_gram_matrix_time) > 1 else 0 - if verbose: - print('time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s' - .format(average_gram_matrix_time, std_gram_matrix_time)) - print('time to calculate best gram matrix: {:.2f}±{:.2f}s'.format( - ave_bgmt, std_bgmt)) - print('total training time with all hyper-param choices: {:.2f}s'.format( - tt_total)) - str_fw += 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s\n'.format(average_gram_matrix_time, std_gram_matrix_time) - str_fw += 'time to calculate best gram matrix: {:.2f}±{:.2f}s\n'.format(ave_bgmt, std_bgmt) - str_fw += 'total training time with all hyper-param choices: {:.2f}s\n\n'.format(tt_total) - - # # save results to file - # np.savetxt(results_name_pre + 'average_train_scores.dt', - # average_train_scores) - # np.savetxt(results_name_pre + 'average_val_scores', average_val_scores) - # np.savetxt(results_name_pre + 'average_perf_scores.dt', - # average_perf_scores) - # np.savetxt(results_name_pre + 'std_train_scores.dt', std_train_scores) - # np.savetxt(results_name_pre + 'std_val_scores.dt', std_val_scores) - # np.savetxt(results_name_pre + 'std_perf_scores.dt', std_perf_scores) - - # np.save(results_name_pre + 'best_params_index', best_params_index) - # np.save(results_name_pre + 'best_params_pre.dt', best_params_out) - # np.save(results_name_pre + 'best_params_in.dt', best_params_in) - # np.save(results_name_pre + 'best_val_perf.dt', best_val_perf) - # np.save(results_name_pre + 'best_val_std.dt', best_val_std) - # np.save(results_name_pre + 'final_performance.dt', final_performance) - # np.save(results_name_pre + 'final_confidence.dt', final_confidence) - # np.save(results_name_pre + 'train_performance.dt', train_performance) - # np.save(results_name_pre + 'train_std.dt', train_std) - - # np.save(results_name_pre + 'gram_matrix_time.dt', gram_matrix_time) - # np.save(results_name_pre + 'average_gram_matrix_time.dt', - # average_gram_matrix_time) - # np.save(results_name_pre + 'std_gram_matrix_time.dt', - # std_gram_matrix_time) - # np.save(results_name_pre + 'best_gram_matrix_time.dt', - # best_gram_matrix_time) - - # read gram matrices from file. - else: - # Grid of parameters with a discrete number of values for each. -# param_list_precomputed = list(ParameterGrid(param_grid_precomputed)) - param_list = list(ParameterGrid(param_grid)) - - # read gram matrices from file. - if verbose: - print() - print('2. Reading gram matrices from file...') - str_fw += '\nII. Gram matrices.\n\nGram matrices are read from file, see last log for detail.\n' - gmfile = np.load(results_dir + '/' + ds_name + '.gm.npz') - gram_matrices = gmfile['gms'] # a list to store gram matrices for all param_grid_precomputed - gram_matrix_time = gmfile['gmtime'] # time used to compute the gram matrices - param_list_pre_revised = gmfile['params'] # list to store param grids precomputed ignoring the useless ones - y = gmfile['y'].tolist() - - tts = time.time() # start training time -# nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN) - if verbose: - print( - '3. Fitting and predicting using nested cross validation. This could really take a while...' - ) - - # ---- use pool.imap_unordered to parallel and track progress. ---- - def init_worker(gms_toshare): - global G_gms - G_gms = gms_toshare - - pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gram_matrices,)) - trial_do_partial = partial(parallel_trial_do, param_list_pre_revised, param_list, y, model_type) - train_pref = [] - val_pref = [] - test_pref = [] - chunksize = 1 - if verbose: - iterator = tqdm(pool.imap_unordered(trial_do_partial, - range(NUM_TRIALS), chunksize), desc='cross validation', file=sys.stdout) - else: - iterator = pool.imap_unordered(trial_do_partial, range(NUM_TRIALS), chunksize) - for o1, o2, o3 in iterator: - train_pref.append(o1) - val_pref.append(o2) - test_pref.append(o3) - pool.close() - pool.join() - - # # ---- use pool.map to parallel. ---- - # result_perf = pool.map(trial_do_partial, range(NUM_TRIALS)) - # train_pref = [item[0] for item in result_perf] - # val_pref = [item[1] for item in result_perf] - # test_pref = [item[2] for item in result_perf] - - # # ---- use joblib.Parallel to parallel and track progress. ---- - # trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, gram_matrices, y, model_type) - # result_perf = Parallel(n_jobs=n_jobs, verbose=10)(delayed(trial_do_partial)(trial) for trial in range(NUM_TRIALS)) - # train_pref = [item[0] for item in result_perf] - # val_pref = [item[1] for item in result_perf] - # test_pref = [item[2] for item in result_perf] - -# # ---- direct running, normally use a single CPU core. ---- -# train_pref = [] -# val_pref = [] -# test_pref = [] -# for i in tqdm(range(NUM_TRIALS), desc='cross validation', file=sys.stdout): -# o1, o2, o3 = trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, i) -# train_pref.append(o1) -# val_pref.append(o2) -# test_pref.append(o3) - - if verbose: - print() - print('4. Getting final performance...') - str_fw += '\nIII. Performance.\n\n' - # averages and confidences of performances on outer trials for each combination of parameters - average_train_scores = np.mean(train_pref, axis=0) - average_val_scores = np.mean(val_pref, axis=0) - average_perf_scores = np.mean(test_pref, axis=0) - # sample std is used here - std_train_scores = np.std(train_pref, axis=0, ddof=1) - std_val_scores = np.std(val_pref, axis=0, ddof=1) - std_perf_scores = np.std(test_pref, axis=0, ddof=1) - - if model_type == 'regression': - best_val_perf = np.amin(average_val_scores) - else: - best_val_perf = np.amax(average_val_scores) - best_params_index = np.where(average_val_scores == best_val_perf) - # find smallest val std with best val perf. - best_val_stds = [ - std_val_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - min_val_std = np.amin(best_val_stds) - best_params_index = np.where(std_val_scores == min_val_std) - best_params_out = [ - param_list_pre_revised[i] for i in best_params_index[0] - ] - best_params_in = [param_list[i] for i in best_params_index[1]] - if verbose: - print('best_params_out: ', best_params_out) - print('best_params_in: ', best_params_in) - print() - print('best_val_perf: ', best_val_perf) - print('best_val_std: ', min_val_std) - str_fw += 'best settings of hyper-params to build gram matrix: %s\n' % best_params_out - str_fw += 'best settings of other hyper-params: %s\n\n' % best_params_in - str_fw += 'best_val_perf: %s\n' % best_val_perf - str_fw += 'best_val_std: %s\n' % min_val_std - - final_performance = [ - average_perf_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - final_confidence = [ - std_perf_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - if verbose: - print('final_performance: ', final_performance) - print('final_confidence: ', final_confidence) - str_fw += 'final_performance: %s\n' % final_performance - str_fw += 'final_confidence: %s\n' % final_confidence - train_performance = [ - average_train_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - train_std = [ - std_train_scores[value][best_params_index[1][idx]] - for idx, value in enumerate(best_params_index[0]) - ] - if verbose: - print('train_performance: %s' % train_performance) - print('train_std: ', train_std) - str_fw += 'train_performance: %s\n' % train_performance - str_fw += 'train_std: %s\n\n' % train_std - - if verbose: - print() - average_gram_matrix_time = np.mean(gram_matrix_time) - std_gram_matrix_time = np.std(gram_matrix_time, ddof=1) if len(gram_matrix_time) > 1 else 0 - best_gram_matrix_time = [ - gram_matrix_time[i] for i in best_params_index[0] - ] - ave_bgmt = np.mean(best_gram_matrix_time) - std_bgmt = np.std(best_gram_matrix_time, ddof=1) if len(best_gram_matrix_time) > 1 else 0 - if verbose: - print( - 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s' - .format(average_gram_matrix_time, std_gram_matrix_time)) - print('time to calculate best gram matrix: {:.2f}±{:.2f}s'.format( - ave_bgmt, std_bgmt)) - tt_poster = time.time() - tts # training time with hyper-param choices who did not participate in calculation of gram matrices - if verbose: - print( - 'training time with hyper-param choices who did not participate in calculation of gram matrices: {:.2f}s'.format( - tt_poster)) - print('total training time with all hyper-param choices: {:.2f}s'.format( - tt_poster + np.sum(gram_matrix_time))) -# str_fw += 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s\n'.format(average_gram_matrix_time, std_gram_matrix_time) -# str_fw += 'time to calculate best gram matrix: {:.2f}±{:.2f}s\n'.format(ave_bgmt, std_bgmt) - str_fw += 'training time with hyper-param choices who did not participate in calculation of gram matrices: {:.2f}s\n\n'.format(tt_poster) - - # open file to save all results for this dataset. - if not os.path.exists(results_dir): - os.makedirs(results_dir) - - # print out results as table. - str_fw += printResultsInTable(param_list, param_list_pre_revised, average_val_scores, - std_val_scores, average_perf_scores, std_perf_scores, - average_train_scores, std_train_scores, gram_matrix_time, - model_type, verbose) - - # open file to save all results for this dataset. - if not os.path.exists(results_dir + '/' + ds_name + '.output.txt'): - with open(results_dir + '/' + ds_name + '.output.txt', 'w') as f: - f.write(str_fw) - else: - with open(results_dir + '/' + ds_name + '.output.txt', 'r+') as f: - content = f.read() - f.seek(0, 0) - f.write(str_fw + '\n\n\n' + content)
- - -
[docs]def trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, trial): # Test set level - -# # get gram matrices from global variables. -# gram_matrices = np.reshape(G_gms.copy(), G_gms_shape, order='C') - - # Arrays to store scores - train_pref = np.zeros((len(param_list_pre_revised), len(param_list))) - val_pref = np.zeros((len(param_list_pre_revised), len(param_list))) - test_pref = np.zeros((len(param_list_pre_revised), len(param_list))) - - # randomness added to seeds of split function below. "high" is "size" times - # 10 so that at least 10 different random output will be yielded. Remove - # these lines if identical outputs is required. - rdm_out = np.random.RandomState(seed=None) - rdm_seed_out_l = rdm_out.uniform(high=len(param_list_pre_revised) * 10, - size=len(param_list_pre_revised)) -# print(trial, rdm_seed_out_l) -# print() - # loop for each outer param tuple - for index_out, params_out in enumerate(param_list_pre_revised): - # get gram matrices from global variables. -# gm_now = G_gms[index_out * G_gms_shape[1] * G_gms_shape[2]:(index_out + 1) * G_gms_shape[1] * G_gms_shape[2]] -# gm_now = np.reshape(gm_now.copy(), (G_gms_shape[1], G_gms_shape[2]), order='C') - gm_now = gram_matrices[index_out].copy() - - # split gram matrix and y to app and test sets. - indices = range(len(y)) - # The argument "random_state" in function "train_test_split" can not be - # set to None, because it will use RandomState instance used by - # np.random, which is possible for multiple subprocesses to inherit the - # same seed if they forked at the same time, leading to identical - # random variates for different subprocesses. Instead, we use "trial" - # and "index_out" parameters to generate different seeds for different - # trials/subprocesses and outer loops. "rdm_seed_out_l" is used to add - # randomness into seeds, so that it yields a different output every - # time the program is run. To yield identical outputs every time, - # remove the second line below. Same method is used to the "KFold" - # function in the inner loop. - rdm_seed_out = (trial + 1) * (index_out + 1) - rdm_seed_out = (rdm_seed_out + int(rdm_seed_out_l[index_out])) % (2 ** 32 - 1) -# print(trial, rdm_seed_out) - X_app, X_test, y_app, y_test, idx_app, idx_test = train_test_split( - gm_now, y, indices, test_size=0.1, - random_state=rdm_seed_out, shuffle=True) -# print(trial, idx_app, idx_test) -# print() - X_app = X_app[:, idx_app] - X_test = X_test[:, idx_app] - y_app = np.array(y_app) - y_test = np.array(y_test) - - rdm_seed_in_l = rdm_out.uniform(high=len(param_list) * 10, - size=len(param_list)) - # loop for each inner param tuple - for index_in, params_in in enumerate(param_list): -# if trial == 0: -# print(index_out, index_in) -# print('params_in: ', params_in) -# st = time.time() - rdm_seed_in = (trial + 1) * (index_out + 1) * (index_in + 1) -# print("rdm_seed_in1: ", trial, index_in, rdm_seed_in) - rdm_seed_in = (rdm_seed_in + int(rdm_seed_in_l[index_in])) % (2 ** 32 - 1) -# print("rdm_seed_in2: ", trial, index_in, rdm_seed_in) - inner_cv = KFold(n_splits=10, shuffle=True, random_state=rdm_seed_in) - current_train_perf = [] - current_valid_perf = [] - current_test_perf = [] - - # For regression use the Kernel Ridge method -# try: - if model_type == 'regression': - kr = KernelRidge(kernel='precomputed', **params_in) - # loop for each split on validation set level - # validation set level - for train_index, valid_index in inner_cv.split(X_app): -# print("train_index, valid_index: ", trial, index_in, train_index, valid_index) -# if trial == 0: -# print('train_index: ', train_index) -# print('valid_index: ', valid_index) -# print('idx_test: ', idx_test) -# print('y_app[train_index]: ', y_app[train_index]) -# print('X_app[train_index, :][:, train_index]: ', X_app[train_index, :][:, train_index]) -# print('X_app[valid_index, :][:, train_index]: ', X_app[valid_index, :][:, train_index]) - kr.fit(X_app[train_index, :][:, train_index], - y_app[train_index]) - - # predict on the train, validation and test set - y_pred_train = kr.predict( - X_app[train_index, :][:, train_index]) - y_pred_valid = kr.predict( - X_app[valid_index, :][:, train_index]) -# if trial == 0: -# print('y_pred_valid: ', y_pred_valid) -# print() - y_pred_test = kr.predict( - X_test[:, train_index]) - - # root mean squared errors - current_train_perf.append( - np.sqrt( - mean_squared_error( - y_app[train_index], y_pred_train))) - current_valid_perf.append( - np.sqrt( - mean_squared_error( - y_app[valid_index], y_pred_valid))) -# if trial == 0: -# print(mean_squared_error( -# y_app[valid_index], y_pred_valid)) - current_test_perf.append( - np.sqrt( - mean_squared_error( - y_test, y_pred_test))) - # For clcassification use SVM - else: - svc = SVC(kernel='precomputed', cache_size=200, - verbose=False, **params_in) - # loop for each split on validation set level - # validation set level - for train_index, valid_index in inner_cv.split(X_app): -# np.savez("bug.npy",X_app[train_index, :][:, train_index],y_app[train_index]) -# if trial == 0: -# print('train_index: ', train_index) -# print('valid_index: ', valid_index) -# print('idx_test: ', idx_test) -# print('y_app[train_index]: ', y_app[train_index]) -# print('X_app[train_index, :][:, train_index]: ', X_app[train_index, :][:, train_index]) -# print('X_app[valid_index, :][:, train_index]: ', X_app[valid_index, :][:, train_index]) - svc.fit(X_app[train_index, :][:, train_index], - y_app[train_index]) - - # predict on the train, validation and test set - y_pred_train = svc.predict( - X_app[train_index, :][:, train_index]) - y_pred_valid = svc.predict( - X_app[valid_index, :][:, train_index]) - y_pred_test = svc.predict( - X_test[:, train_index]) - - # root mean squared errors - current_train_perf.append( - accuracy_score(y_app[train_index], - y_pred_train)) - current_valid_perf.append( - accuracy_score(y_app[valid_index], - y_pred_valid)) - current_test_perf.append( - accuracy_score(y_test, y_pred_test)) -# except ValueError: -# print(sys.exc_info()[0]) -# print(params_out, params_in) - - # average performance on inner splits - train_pref[index_out][index_in] = np.mean( - current_train_perf) - val_pref[index_out][index_in] = np.mean( - current_valid_perf) - test_pref[index_out][index_in] = np.mean( - current_test_perf) -# print(time.time() - st) -# if trial == 0: -# print('val_pref: ', val_pref) -# print('test_pref: ', test_pref) - - return train_pref, val_pref, test_pref
- -
[docs]def parallel_trial_do(param_list_pre_revised, param_list, y, model_type, trial): - train_pref, val_pref, test_pref = trial_do(param_list_pre_revised, - param_list, G_gms, y, - model_type, trial) - return train_pref, val_pref, test_pref
- - -
[docs]def compute_gram_matrices(dataset, y, estimator, param_list_precomputed, - results_dir, ds_name, - n_jobs=1, str_fw='', verbose=True): - gram_matrices = [ - ] # a list to store gram matrices for all param_grid_precomputed - gram_matrix_time = [ - ] # a list to store time to calculate gram matrices - param_list_pre_revised = [ - ] # list to store param grids precomputed ignoring the useless ones - - nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN) - for idx, params_out in enumerate(param_list_precomputed): - params_out['n_jobs'] = n_jobs -# print(dataset) -# import networkx as nx -# nx.draw_networkx(dataset[1]) -# plt.show() - rtn_data = estimator(dataset[:], **params_out) - Kmatrix = rtn_data[0] - current_run_time = rtn_data[1] - # for some kernels, some graphs in datasets may not meet the - # kernels' requirements for graph structure. These graphs are trimmed. - if len(rtn_data) == 3: - idx_trim = rtn_data[2] # the index of trimmed graph list - y = [y[idxt] for idxt in idx_trim] # trim y accordingly - - Kmatrix_diag = Kmatrix.diagonal().copy() - # remove graphs whose kernels with themselves are zeros - nb_g_ignore = 0 - for idxk, diag in enumerate(Kmatrix_diag): - if diag == 0: - Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=0) - Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=1) - nb_g_ignore += 1 - # normalization - for i in range(len(Kmatrix)): - for j in range(i, len(Kmatrix)): - Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j]) - Kmatrix[j][i] = Kmatrix[i][j] - - if verbose: - print() - if params_out == {}: - if verbose: - print('the gram matrix is: ') - str_fw += 'the gram matrix is:\n\n' - else: - if verbose: - print('the gram matrix with parameters', params_out, 'is: ') - str_fw += 'the gram matrix with parameters %s is:\n\n' % params_out - if len(Kmatrix) < 2: - nb_gm_ignore += 1 - if verbose: - print('ignored, as at most only one of all its diagonal value is non-zero.') - str_fw += 'ignored, as at most only one of all its diagonal value is non-zero.\n\n' - else: - if np.isnan(Kmatrix).any( - ): # if the matrix contains elements that are not numbers - nb_gm_ignore += 1 - if verbose: - print('ignored, as it contains elements that are not numbers.') - str_fw += 'ignored, as it contains elements that are not numbers.\n\n' - else: -# print(Kmatrix) - str_fw += np.array2string( - Kmatrix, - separator=',') + '\n\n' -# separator=',', -# threshold=np.inf, -# floatmode='unique') + '\n\n' - - fig_file_name = results_dir + '/GM[ds]' + ds_name - if params_out != {}: - fig_file_name += '[params]' + str(idx) - plt.imshow(Kmatrix) - plt.colorbar() - plt.savefig(fig_file_name + '.eps', format='eps', dpi=300) -# plt.show() - plt.clf() - gram_matrices.append(Kmatrix) - gram_matrix_time.append(current_run_time) - param_list_pre_revised.append(params_out) - if nb_g_ignore > 0: - if verbose: - print(', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore) - str_fw += ', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore - if verbose: - print() - print( - '{} gram matrices are calculated, {} of which are ignored.'.format( - len(param_list_precomputed), nb_gm_ignore)) - str_fw += '{} gram matrices are calculated, {} of which are ignored.\n\n'.format(len(param_list_precomputed), nb_gm_ignore) - str_fw += 'serial numbers of gram matrix figures and their corresponding parameters settings:\n\n' - str_fw += ''.join([ - '{}: {}\n'.format(idx, params_out) - for idx, params_out in enumerate(param_list_precomputed) - ]) - - return gram_matrices, gram_matrix_time, param_list_pre_revised, y, str_fw
- - -
[docs]def read_gram_matrices_from_file(results_dir, ds_name): - gmfile = np.load(results_dir + '/' + ds_name + '.gm.npz') - gram_matrices = gmfile['gms'] # a list to store gram matrices for all param_grid_precomputed - param_list_pre_revised = gmfile['params'] # list to store param grids precomputed ignoring the useless ones - y = gmfile['y'].tolist() - return gram_matrices, param_list_pre_revised, y
- - -
[docs]def printResultsInTable(param_list, param_list_pre_revised, average_val_scores, - std_val_scores, average_perf_scores, std_perf_scores, - average_train_scores, std_train_scores, gram_matrix_time, - model_type, verbose): - from collections import OrderedDict - from tabulate import tabulate - table_dict = {} - if model_type == 'regression': - for param_in in param_list: - param_in['alpha'] = '{:.2e}'.format(param_in['alpha']) - else: - for param_in in param_list: - param_in['C'] = '{:.2e}'.format(param_in['C']) - table_dict['params'] = [{**param_out, **param_in} - for param_in in param_list for param_out in param_list_pre_revised] - table_dict['gram_matrix_time'] = [ - '{:.2f}'.format(gram_matrix_time[index_out]) - for param_in in param_list - for index_out, _ in enumerate(param_list_pre_revised) - ] - table_dict['valid_perf'] = [ - '{:.2f}±{:.2f}'.format(average_val_scores[index_out][index_in], - std_val_scores[index_out][index_in]) - for index_in, _ in enumerate(param_list) - for index_out, _ in enumerate(param_list_pre_revised) - ] - table_dict['test_perf'] = [ - '{:.2f}±{:.2f}'.format(average_perf_scores[index_out][index_in], - std_perf_scores[index_out][index_in]) - for index_in, _ in enumerate(param_list) - for index_out, _ in enumerate(param_list_pre_revised) - ] - table_dict['train_perf'] = [ - '{:.2f}±{:.2f}'.format(average_train_scores[index_out][index_in], - std_train_scores[index_out][index_in]) - for index_in, _ in enumerate(param_list) - for index_out, _ in enumerate(param_list_pre_revised) - ] - - keyorder = [ - 'params', 'train_perf', 'valid_perf', 'test_perf', - 'gram_matrix_time' - ] - if verbose: - print() - tb_print = tabulate(OrderedDict(sorted(table_dict.items(), - key=lambda i: keyorder.index(i[0]))), headers='keys') -# print(tb_print) - return 'table of performance v.s. hyper-params:\n\n%s\n\n' % tb_print
-
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/parallel.html b/docs/_build/html/_modules/pygraph/utils/parallel.html deleted file mode 100644 index b707fea..0000000 --- a/docs/_build/html/_modules/pygraph/utils/parallel.html +++ /dev/null @@ -1,252 +0,0 @@ - - - - - - - - - - - pygraph.utils.parallel — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -

Source code for pygraph.utils.parallel

-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Tue Dec 11 11:39:46 2018
-Parallel aid functions.
-@author: ljia
-"""
-import multiprocessing
-from multiprocessing import Pool
-from tqdm import tqdm
-import sys
-
-
[docs]def parallel_me(func, func_assign, var_to_assign, itr, len_itr=None, init_worker=None, - glbv=None, method=None, n_jobs=None, chunksize=None, itr_desc='', - verbose=True): - ''' - ''' - if method == 'imap_unordered': - if glbv: # global varibles required. -# def init_worker(v_share): -# global G_var -# G_var = v_share - if n_jobs == None: - n_jobs = multiprocessing.cpu_count() - with Pool(processes=n_jobs, initializer=init_worker, - initargs=glbv) as pool: - if chunksize == None: - if len_itr < 100 * n_jobs: - chunksize = int(len_itr / n_jobs) + 1 - else: - chunksize = 100 - for result in (tqdm(pool.imap_unordered(func, itr, chunksize), - desc=itr_desc, file=sys.stdout) if verbose else - pool.imap_unordered(func, itr, chunksize)): - func_assign(result, var_to_assign) - else: - if n_jobs == None: - n_jobs = multiprocessing.cpu_count() - with Pool(processes=n_jobs) as pool: - if chunksize == None: - if len_itr < 100 * n_jobs: - chunksize = int(len_itr / n_jobs) + 1 - else: - chunksize = 100 - for result in (tqdm(pool.imap_unordered(func, itr, chunksize), - desc=itr_desc, file=sys.stdout) if verbose else - pool.imap_unordered(func, itr, chunksize)): - func_assign(result, var_to_assign)
- - - -
[docs]def parallel_gm(func, Kmatrix, Gn, init_worker=None, glbv=None, - method='imap_unordered', n_jobs=None, chunksize=None, - verbose=True): - from itertools import combinations_with_replacement - def func_assign(result, var_to_assign): - var_to_assign[result[0]][result[1]] = result[2] - var_to_assign[result[1]][result[0]] = result[2] - itr = combinations_with_replacement(range(0, len(Gn)), 2) - len_itr = int(len(Gn) * (len(Gn) + 1) / 2) - parallel_me(func, func_assign, Kmatrix, itr, len_itr=len_itr, - init_worker=init_worker, glbv=glbv, method=method, n_jobs=n_jobs, - chunksize=chunksize, itr_desc='calculating kernels', verbose=verbose)
-
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/trie.html b/docs/_build/html/_modules/pygraph/utils/trie.html deleted file mode 100644 index cf29a6a..0000000 --- a/docs/_build/html/_modules/pygraph/utils/trie.html +++ /dev/null @@ -1,300 +0,0 @@ - - - - - - - - - - - pygraph.utils.trie — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -

Source code for pygraph.utils.trie

-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-"""
-Created on Wed Jan 30 10:48:49 2019
-
-Trie (prefix tree)
-@author: ljia
-@references:
-        https://viblo.asia/p/nlp-build-a-trie-data-structure-from-scratch-with-python-3P0lPzroKox, 2019.1
-"""
-
-import pickle
-import json
-
-""" Trie class
-"""
-
[docs]class Trie: - # init Trie class - def __init__(self): - self.root = self.getNode() - -
[docs] def getNode(self): - return {"isEndOfWord": False, "children": {}}
- -
[docs] def insertWord(self, word): - current = self.root - for ch in word: - - if ch in current["children"]: - node = current["children"][ch] - else: - node = self.getNode() - current["children"][ch] = node - - current = node - current["isEndOfWord"] = True - if 'count' in current: - current['count'] += 1 - else: - current['count'] = 1
- -
[docs] def searchWord(self, word): - current = self.root - for ch in word: - if ch not in current["children"]: - return 0 - node = current["children"][ch] - - current = node - if 'count' in current: - return current["count"] - else: - return 0
- -
[docs] def searchWordPrefix(self, word): - current = self.root - for ch in word: - if not current["children"].has_key(ch): - return False - node = current["children"][ch] - - current = node - # return True if children contain keys and values - return bool(current["children"])
- -
[docs] def deleteWord(self, word): - self._delete(self.root, word, 0)
- - def _delete(self, current, word, index): - if(index == len(word)): - if not current["isEndOfWord"]: - return False - current["isEndOfWord"] = False - return len(current["children"].keys()) == 0 - - ch = word[index] - if not current["children"].has_key(ch): - return False - node = current["children"][ch] - - should_delete_current_node = self._delete(node, word, index + 1) - - if should_delete_current_node: - current["children"].pop(ch) - return len(current["children"].keys()) == 0 - - return False - -
[docs] def save_to_pickle(self, file_name): - f = open(file_name + ".pkl", "wb") - pickle.dump(self.root, f) - f.close()
- -
[docs] def load_from_pickle(self, file_name): - f = open(file_name + ".pkl", "rb") - self.root = pickle.load(f) - f.close()
- -
[docs] def to_json(self): - return json.dump(self.root)
- -
[docs] def save_to_json(self, file_name): - json_data = json.dumps(self.root) - f = open(file_name + ".json", "w") - f.write(json_data) - f.close()
- -
[docs] def load_from_json(self, file_name): - json_file = open(file_name + ".json", "r") - self.root = json.load(json_file) - json_file.close()
-
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pygraph/utils/utils.html b/docs/_build/html/_modules/pygraph/utils/utils.html deleted file mode 100644 index 5679427..0000000 --- a/docs/_build/html/_modules/pygraph/utils/utils.html +++ /dev/null @@ -1,451 +0,0 @@ - - - - - - - - - - - pygraph.utils.utils — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -

Source code for pygraph.utils.utils

-import networkx as nx
-import numpy as np
-from copy import deepcopy
-#from itertools import product
-
-# from tqdm import tqdm
-
-
-
[docs]def getSPLengths(G1): - sp = nx.shortest_path(G1) - distances = np.zeros((G1.number_of_nodes(), G1.number_of_nodes())) - for i in sp.keys(): - for j in sp[i].keys(): - distances[i, j] = len(sp[i][j]) - 1 - return distances
- - -
[docs]def getSPGraph(G, edge_weight=None): - """Transform graph G to its corresponding shortest-paths graph. - - Parameters - ---------- - G : NetworkX graph - The graph to be tramsformed. - edge_weight : string - edge attribute corresponding to the edge weight. - - Return - ------ - S : NetworkX graph - The shortest-paths graph corresponding to G. - - Notes - ------ - For an input graph G, its corresponding shortest-paths graph S contains the same set of nodes as G, while there exists an edge between all nodes in S which are connected by a walk in G. Every edge in S between two nodes is labeled by the shortest distance between these two nodes. - - References - ---------- - [1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE. - """ - return floydTransformation(G, edge_weight=edge_weight)
- - -
[docs]def floydTransformation(G, edge_weight=None): - """Transform graph G to its corresponding shortest-paths graph using Floyd-transformation. - - Parameters - ---------- - G : NetworkX graph - The graph to be tramsformed. - edge_weight : string - edge attribute corresponding to the edge weight. The default edge weight is bond_type. - - Return - ------ - S : NetworkX graph - The shortest-paths graph corresponding to G. - - References - ---------- - [1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE. - """ - spMatrix = nx.floyd_warshall_numpy(G, weight=edge_weight) - S = nx.Graph() - S.add_nodes_from(G.nodes(data=True)) - ns = list(G.nodes()) - for i in range(0, G.number_of_nodes()): - for j in range(i + 1, G.number_of_nodes()): - if spMatrix[i, j] != np.inf: - S.add_edge(ns[i], ns[j], cost=spMatrix[i, j]) - return S
- - -
[docs]def untotterTransformation(G, node_label, edge_label): - """Transform graph G according to Mahé et al.'s work to filter out tottering patterns of marginalized kernel and tree pattern kernel. - - Parameters - ---------- - G : NetworkX graph - The graph to be tramsformed. - node_label : string - node attribute used as label. The default node label is 'atom'. - edge_label : string - edge attribute used as label. The default edge label is 'bond_type'. - - Return - ------ - gt : NetworkX graph - The transformed graph corresponding to G. - - References - ---------- - [1] Pierre Mahé, Nobuhisa Ueda, Tatsuya Akutsu, Jean-Luc Perret, and Jean-Philippe Vert. Extensions of marginalized graph kernels. In Proceedings of the twenty-first international conference on Machine learning, page 70. ACM, 2004. - """ - # arrange all graphs in a list - G = G.to_directed() - gt = nx.Graph() - gt.graph = G.graph - gt.add_nodes_from(G.nodes(data=True)) - for edge in G.edges(): - gt.add_node(edge) - gt.node[edge].update({node_label: G.node[edge[1]][node_label]}) - gt.add_edge(edge[0], edge) - gt.edges[edge[0], edge].update({ - edge_label: - G[edge[0]][edge[1]][edge_label] - }) - for neighbor in G[edge[1]]: - if neighbor != edge[0]: - gt.add_edge(edge, (edge[1], neighbor)) - gt.edges[edge, (edge[1], neighbor)].update({ - edge_label: - G[edge[1]][neighbor][edge_label] - }) - # nx.draw_networkx(gt) - # plt.show() - - # relabel nodes using consecutive integers for convenience of kernel calculation. - gt = nx.convert_node_labels_to_integers( - gt, first_label=0, label_attribute='label_orignal') - return gt
- - -
[docs]def direct_product(G1, G2, node_label, edge_label): - """Return the direct/tensor product of directed graphs G1 and G2. - - Parameters - ---------- - G1, G2 : NetworkX graph - The original graphs. - node_label : string - node attribute used as label. The default node label is 'atom'. - edge_label : string - edge attribute used as label. The default edge label is 'bond_type'. - - Return - ------ - gt : NetworkX graph - The direct product graph of G1 and G2. - - Notes - ----- - This method differs from networkx.tensor_product in that this method only adds nodes and edges in G1 and G2 that have the same labels to the direct product graph. - - References - ---------- - [1] Thomas Gärtner, Peter Flach, and Stefan Wrobel. On graph kernels: Hardness results and efficient alternatives. Learning Theory and Kernel Machines, pages 129–143, 2003. - """ - # arrange all graphs in a list - from itertools import product - # G = G.to_directed() - gt = nx.DiGraph() - # add nodes - for u, v in product(G1, G2): - if G1.nodes[u][node_label] == G2.nodes[v][node_label]: - gt.add_node((u, v)) - gt.nodes[(u, v)].update({node_label: G1.nodes[u][node_label]}) - # add edges, faster for sparse graphs (no so many edges), which is the most case for now. - for (u1, v1), (u2, v2) in product(G1.edges, G2.edges): - if (u1, u2) in gt and ( - v1, v2 - ) in gt and G1.edges[u1, v1][edge_label] == G2.edges[u2, - v2][edge_label]: - gt.add_edge((u1, u2), (v1, v2)) - gt.edges[(u1, u2), (v1, v2)].update({ - edge_label: - G1.edges[u1, v1][edge_label] - }) - - # # add edges, faster for dense graphs (a lot of edges, complete graph would be super). - # for u, v in product(gt, gt): - # if (u[0], v[0]) in G1.edges and ( - # u[1], v[1] - # ) in G2.edges and G1.edges[u[0], - # v[0]][edge_label] == G2.edges[u[1], - # v[1]][edge_label]: - # gt.add_edge((u[0], u[1]), (v[0], v[1])) - # gt.edges[(u[0], u[1]), (v[0], v[1])].update({ - # edge_label: - # G1.edges[u[0], v[0]][edge_label] - # }) - - # relabel nodes using consecutive integers for convenience of kernel calculation. - # gt = nx.convert_node_labels_to_integers( - # gt, first_label=0, label_attribute='label_orignal') - return gt
- - -
[docs]def graph_deepcopy(G): - """Deep copy a graph, including deep copy of all nodes, edges and - attributes of the graph, nodes and edges. - - Note - ---- - It is the same as the NetworkX function graph.copy(), as far as I know. - """ - # add graph attributes. - labels = {} - for k, v in G.graph.items(): - labels[k] = deepcopy(v) - if G.is_directed(): - G_copy = nx.DiGraph(**labels) - else: - G_copy = nx.Graph(**labels) - - # add nodes - for nd, attrs in G.nodes(data=True): - labels = {} - for k, v in attrs.items(): - labels[k] = deepcopy(v) - G_copy.add_node(nd, **labels) - - # add edges. - for nd1, nd2, attrs in G.edges(data=True): - labels = {} - for k, v in attrs.items(): - labels[k] = deepcopy(v) - G_copy.add_edge(nd1, nd2, **labels) - - return G_copy
- - -
[docs]def graph_isIdentical(G1, G2): - """Check if two graphs are identical, including: same nodes, edges, node - labels/attributes, edge labels/attributes. - - Notes - ---- - 1. The type of graphs has to be the same. - 2. Global/Graph attributes are neglected as they may contain names for graphs. - """ - # check nodes. - nlist1 = [n for n in G1.nodes(data=True)] - nlist2 = [n for n in G2.nodes(data=True)] - if not nlist1 == nlist2: - return False - # check edges. - elist1 = [n for n in G1.edges(data=True)] - elist2 = [n for n in G2.edges(data=True)] - if not elist1 == elist2: - return False - # check graph attributes. - - return True
- - -
[docs]def get_node_labels(Gn, node_label): - """Get node labels of dataset Gn. - """ - nl = set() - for G in Gn: - nl = nl | set(nx.get_node_attributes(G, node_label).values()) - return nl
- - -
[docs]def get_edge_labels(Gn, edge_label): - """Get edge labels of dataset Gn. - """ - el = set() - for G in Gn: - el = el | set(nx.get_edge_attributes(G, edge_label).values()) - return el
-
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt deleted file mode 100644 index d2d2ed9..0000000 --- a/docs/_build/html/_sources/index.rst.txt +++ /dev/null @@ -1,20 +0,0 @@ -.. py-graph documentation master file, created by - sphinx-quickstart on Tue Jan 28 17:13:42 2020. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to py-graph's documentation! -==================================== - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/_build/html/_sources/modules.rst.txt b/docs/_build/html/_sources/modules.rst.txt deleted file mode 100644 index a563717..0000000 --- a/docs/_build/html/_sources/modules.rst.txt +++ /dev/null @@ -1,7 +0,0 @@ -py-graph -======== - -.. toctree:: - :maxdepth: 4 - - pygraph diff --git a/docs/_build/html/_sources/pygraph.rst.txt b/docs/_build/html/_sources/pygraph.rst.txt deleted file mode 100644 index 592eb54..0000000 --- a/docs/_build/html/_sources/pygraph.rst.txt +++ /dev/null @@ -1,17 +0,0 @@ -pygraph package -=============== - -Subpackages ------------ - -.. toctree:: - - pygraph.utils - -Module contents ---------------- - -.. automodule:: pygraph - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_build/html/_sources/pygraph.utils.rst.txt b/docs/_build/html/_sources/pygraph.utils.rst.txt deleted file mode 100644 index 07ec351..0000000 --- a/docs/_build/html/_sources/pygraph.utils.rst.txt +++ /dev/null @@ -1,94 +0,0 @@ -pygraph.utils package -===================== - -Submodules ----------- - -pygraph.utils.graphdataset module ---------------------------------- - -.. automodule:: pygraph.utils.graphdataset - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.graphfiles module -------------------------------- - -.. automodule:: pygraph.utils.graphfiles - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.ipython\_log module ---------------------------------- - -.. automodule:: pygraph.utils.ipython_log - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.isNotebook module -------------------------------- - -.. automodule:: pygraph.utils.isNotebook - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.kernels module ----------------------------- - -.. automodule:: pygraph.utils.kernels - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.logger2file module --------------------------------- - -.. automodule:: pygraph.utils.logger2file - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.model\_selection\_precomputed module --------------------------------------------------- - -.. automodule:: pygraph.utils.model_selection_precomputed - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.parallel module ------------------------------ - -.. automodule:: pygraph.utils.parallel - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.trie module -------------------------- - -.. automodule:: pygraph.utils.trie - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.utils module --------------------------- - -.. automodule:: pygraph.utils.utils - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: pygraph.utils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_build/html/_static/ajax-loader.gif b/docs/_build/html/_static/ajax-loader.gif deleted file mode 100644 index 61faf8c..0000000 Binary files a/docs/_build/html/_static/ajax-loader.gif and /dev/null differ diff --git a/docs/_build/html/_static/alabaster.css b/docs/_build/html/_static/alabaster.css deleted file mode 100644 index 25e7738..0000000 --- a/docs/_build/html/_static/alabaster.css +++ /dev/null @@ -1,688 +0,0 @@ -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: Georgia, serif; - font-size: 17px; - background-color: #fff; - color: #000; - margin: 0; - padding: 0; -} - - -div.document { - width: 940px; - margin: 30px auto 0 auto; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 220px; -} - -div.sphinxsidebar { - width: 220px; - font-size: 14px; - line-height: 1.5; -} - -hr { - border: 1px solid #B1B4B6; -} - -div.body { - background-color: #fff; - color: #3E4349; - padding: 0 30px 0 30px; -} - -div.body > .section { - text-align: left; -} - -div.footer { - width: 940px; - margin: 20px auto 30px auto; - font-size: 14px; - color: #888; - text-align: right; -} - -div.footer a { - color: #888; -} - -p.caption { - font-family: inherit; - font-size: inherit; -} - - -div.relations { - display: none; -} - - -div.sphinxsidebar a { - color: #444; - text-decoration: none; - border-bottom: 1px dotted #999; -} - -div.sphinxsidebar a:hover { - border-bottom: 1px solid #999; -} - -div.sphinxsidebarwrapper { - padding: 18px 10px; -} - -div.sphinxsidebarwrapper p.logo { - padding: 0; - margin: -10px 0 0 0px; - text-align: center; -} - -div.sphinxsidebarwrapper h1.logo { - margin-top: -10px; - text-align: center; - margin-bottom: 5px; - text-align: left; -} - -div.sphinxsidebarwrapper h1.logo-name { - margin-top: 0px; -} - -div.sphinxsidebarwrapper p.blurb { - margin-top: 0; - font-style: normal; -} - -div.sphinxsidebar h3, -div.sphinxsidebar h4 { - font-family: Georgia, serif; - color: #444; - font-size: 24px; - font-weight: normal; - margin: 0 0 5px 0; - padding: 0; -} - -div.sphinxsidebar h4 { - font-size: 20px; -} - -div.sphinxsidebar h3 a { - color: #444; -} - -div.sphinxsidebar p.logo a, -div.sphinxsidebar h3 a, -div.sphinxsidebar p.logo a:hover, -div.sphinxsidebar h3 a:hover { - border: none; -} - -div.sphinxsidebar p { - color: #555; - margin: 10px 0; -} - -div.sphinxsidebar ul { - margin: 10px 0; - padding: 0; - color: #000; -} - -div.sphinxsidebar ul li.toctree-l1 > a { - font-size: 120%; -} - -div.sphinxsidebar ul li.toctree-l2 > a { - font-size: 110%; -} - -div.sphinxsidebar input { - border: 1px solid #CCC; - font-family: Georgia, serif; - font-size: 1em; -} - -div.sphinxsidebar hr { - border: none; - height: 1px; - color: #AAA; - background: #AAA; - - text-align: left; - margin-left: 0; - width: 50%; -} - -/* -- body styles ----------------------------------------------------------- */ - -a { - color: #004B6B; - text-decoration: underline; -} - -a:hover { - color: #6D4100; - text-decoration: underline; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: Georgia, serif; - font-weight: normal; - margin: 30px 0px 10px 0px; - padding: 0; -} - -div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } -div.body h2 { font-size: 180%; } -div.body h3 { font-size: 150%; } -div.body h4 { font-size: 130%; } -div.body h5 { font-size: 100%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #DDD; - padding: 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - color: #444; - background: #EAEAEA; -} - -div.body p, div.body dd, div.body li { - line-height: 1.4em; -} - -div.admonition { - margin: 20px 0px; - padding: 10px 30px; - background-color: #EEE; - border: 1px solid #CCC; -} - -div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { - background-color: #FBFBFB; - border-bottom: 1px solid #fafafa; -} - -div.admonition p.admonition-title { - font-family: Georgia, serif; - font-weight: normal; - font-size: 24px; - margin: 0 0 10px 0; - padding: 0; - line-height: 1; -} - -div.admonition p.last { - margin-bottom: 0; -} - -div.highlight { - background-color: #fff; -} - -dt:target, .highlight { - background: #FAF3E8; -} - -div.warning { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.danger { - background-color: #FCC; - border: 1px solid #FAA; - -moz-box-shadow: 2px 2px 4px #D52C2C; - -webkit-box-shadow: 2px 2px 4px #D52C2C; - box-shadow: 2px 2px 4px #D52C2C; -} - -div.error { - background-color: #FCC; - border: 1px solid #FAA; - -moz-box-shadow: 2px 2px 4px #D52C2C; - -webkit-box-shadow: 2px 2px 4px #D52C2C; - box-shadow: 2px 2px 4px #D52C2C; -} - -div.caution { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.attention { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.important { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.note { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.tip { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.hint { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.seealso { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.topic { - background-color: #EEE; -} - -p.admonition-title { - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -pre, tt, code { - font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; - font-size: 0.9em; -} - -.hll { - background-color: #FFC; - margin: 0 -12px; - padding: 0 12px; - display: block; -} - -img.screenshot { -} - -tt.descname, tt.descclassname, code.descname, code.descclassname { - font-size: 0.95em; -} - -tt.descname, code.descname { - padding-right: 0.08em; -} - -img.screenshot { - -moz-box-shadow: 2px 2px 4px #EEE; - -webkit-box-shadow: 2px 2px 4px #EEE; - box-shadow: 2px 2px 4px #EEE; -} - -table.docutils { - border: 1px solid #888; - -moz-box-shadow: 2px 2px 4px #EEE; - -webkit-box-shadow: 2px 2px 4px #EEE; - box-shadow: 2px 2px 4px #EEE; -} - -table.docutils td, table.docutils th { - border: 1px solid #888; - padding: 0.25em 0.7em; -} - -table.field-list, table.footnote { - border: none; - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - -table.footnote { - margin: 15px 0; - width: 100%; - border: 1px solid #EEE; - background: #FDFDFD; - font-size: 0.9em; -} - -table.footnote + table.footnote { - margin-top: -15px; - border-top: none; -} - -table.field-list th { - padding: 0 0.8em 0 0; -} - -table.field-list td { - padding: 0; -} - -table.field-list p { - margin-bottom: 0.8em; -} - -/* Cloned from - * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 - */ -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -table.footnote td.label { - width: .1px; - padding: 0.3em 0 0.3em 0.5em; -} - -table.footnote td { - padding: 0.3em 0.5em; -} - -dl { - margin: 0; - padding: 0; -} - -dl dd { - margin-left: 30px; -} - -blockquote { - margin: 0 0 0 30px; - padding: 0; -} - -ul, ol { - /* Matches the 30px from the narrow-screen "li > ul" selector below */ - margin: 10px 0 10px 30px; - padding: 0; -} - -pre { - background: #EEE; - padding: 7px 30px; - margin: 15px 0px; - line-height: 1.3em; -} - -div.viewcode-block:target { - background: #ffd; -} - -dl pre, blockquote pre, li pre { - margin-left: 0; - padding-left: 30px; -} - -tt, code { - background-color: #ecf0f3; - color: #222; - /* padding: 1px 2px; */ -} - -tt.xref, code.xref, a tt { - background-color: #FBFBFB; - border-bottom: 1px solid #fff; -} - -a.reference { - text-decoration: none; - border-bottom: 1px dotted #004B6B; -} - -/* Don't put an underline on images */ -a.image-reference, a.image-reference:hover { - border-bottom: none; -} - -a.reference:hover { - border-bottom: 1px solid #6D4100; -} - -a.footnote-reference { - text-decoration: none; - font-size: 0.7em; - vertical-align: top; - border-bottom: 1px dotted #004B6B; -} - -a.footnote-reference:hover { - border-bottom: 1px solid #6D4100; -} - -a:hover tt, a:hover code { - background: #EEE; -} - - -@media screen and (max-width: 870px) { - - div.sphinxsidebar { - display: none; - } - - div.document { - width: 100%; - - } - - div.documentwrapper { - margin-left: 0; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - } - - div.bodywrapper { - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - margin-left: 0; - } - - ul { - margin-left: 0; - } - - li > ul { - /* Matches the 30px from the "ul, ol" selector above */ - margin-left: 30px; - } - - .document { - width: auto; - } - - .footer { - width: auto; - } - - .bodywrapper { - margin: 0; - } - - .footer { - width: auto; - } - - .github { - display: none; - } - - - -} - - - -@media screen and (max-width: 875px) { - - body { - margin: 0; - padding: 20px 30px; - } - - div.documentwrapper { - float: none; - background: #fff; - } - - div.sphinxsidebar { - display: block; - float: none; - width: 102.5%; - margin: 50px -30px -20px -30px; - padding: 10px 20px; - background: #333; - color: #FFF; - } - - div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, - div.sphinxsidebar h3 a { - color: #fff; - } - - div.sphinxsidebar a { - color: #AAA; - } - - div.sphinxsidebar p.logo { - display: none; - } - - div.document { - width: 100%; - margin: 0; - } - - div.footer { - display: none; - } - - div.bodywrapper { - margin: 0; - } - - div.body { - min-height: 0; - padding: 0; - } - - .rtd_doc_footer { - display: none; - } - - .document { - width: auto; - } - - .footer { - width: auto; - } - - .footer { - width: auto; - } - - .github { - display: none; - } -} - - -/* misc. */ - -.revsys-inline { - display: none!important; -} - -/* Make nested-list/multi-paragraph items look better in Releases changelog - * pages. Without this, docutils' magical list fuckery causes inconsistent - * formatting between different release sub-lists. - */ -div#changelog > div.section > ul > li > p:only-child { - margin-bottom: 0; -} - -/* Hide fugly table cell borders in ..bibliography:: directive output */ -table.docutils.citation, table.docutils.citation td, table.docutils.citation th { - border: none; - /* Below needed in some edge cases; if not applied, bottom shadows appear */ - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - - -/* relbar */ - -.related { - line-height: 30px; - width: 100%; - font-size: 0.9rem; -} - -.related.top { - border-bottom: 1px solid #EEE; - margin-bottom: 20px; -} - -.related.bottom { - border-top: 1px solid #EEE; -} - -.related ul { - padding: 0; - margin: 0; - list-style: none; -} - -.related li { - display: inline; -} - -nav#rellinks { - float: right; -} - -nav#rellinks li+li:before { - content: "|"; -} - -nav#breadcrumbs li+li:before { - content: "\00BB"; -} - -/* Hide certain items when printing */ -@media print { - div.related { - display: none; - } -} \ No newline at end of file diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css deleted file mode 100644 index 104f076..0000000 --- a/docs/_build/html/_static/basic.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/_build/html/_static/comment-bright.png b/docs/_build/html/_static/comment-bright.png deleted file mode 100644 index 15e27ed..0000000 Binary files a/docs/_build/html/_static/comment-bright.png and /dev/null differ diff --git a/docs/_build/html/_static/comment-close.png b/docs/_build/html/_static/comment-close.png deleted file mode 100644 index 4d91bcf..0000000 Binary files a/docs/_build/html/_static/comment-close.png and /dev/null differ diff --git a/docs/_build/html/_static/comment.png b/docs/_build/html/_static/comment.png deleted file mode 100644 index dfbc0cb..0000000 Binary files a/docs/_build/html/_static/comment.png and /dev/null differ diff --git a/docs/_build/html/_static/css/badge_only.css b/docs/_build/html/_static/css/badge_only.css deleted file mode 100644 index 3c33cef..0000000 --- a/docs/_build/html/_static/css/badge_only.css +++ /dev/null @@ -1 +0,0 @@ -.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} diff --git a/docs/_build/html/_static/css/theme.css b/docs/_build/html/_static/css/theme.css deleted file mode 100644 index aed8cef..0000000 --- a/docs/_build/html/_static/css/theme.css +++ /dev/null @@ -1,6 +0,0 @@ -/* sphinx_rtd_theme version 0.4.3 | MIT license */ -/* Built 20190212 16:02 */ -*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}[hidden]{display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:hover,a:active{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;color:#000;text-decoration:none}mark{background:#ff0;color:#000;font-style:italic;font-weight:bold}pre,code,.rst-content tt,.rst-content code,kbd,samp{font-family:monospace,serif;_font-family:"courier new",monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:before,q:after{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}ul,ol,dl{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:0;margin:0;padding:0}label{cursor:pointer}legend{border:0;*margin-left:-7px;padding:0;white-space:normal}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*width:13px;*height:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top;resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{html,body,section{background:none !important}*{box-shadow:none !important;text-shadow:none !important;filter:none !important;-ms-filter:none !important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:.5cm}p,h2,.rst-content .toctree-wrapper p.caption,h3{orphans:3;widows:3}h2,.rst-content .toctree-wrapper p.caption,h3{page-break-after:avoid}}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content .code-block-caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition,.btn,input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"],select,textarea,.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a,.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a,.wy-nav-top a{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:'FontAwesome';src:url("../fonts/fontawesome-webfont.eot?v=4.7.0");src:url("../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff2?v=4.7.0") format("woff2"),url("../fonts/fontawesome-webfont.woff?v=4.7.0") format("woff"),url("../fonts/fontawesome-webfont.ttf?v=4.7.0") format("truetype"),url("../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular") format("svg");font-weight:normal;font-style:normal}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.3333333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.2857142857em;text-align:center}.fa-ul{padding-left:0;margin-left:2.1428571429em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.1428571429em;width:2.1428571429em;top:.1428571429em;text-align:center}.fa-li.fa-lg{left:-1.8571428571em}.fa-border{padding:.2em .25em .15em;border:solid 0.08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.wy-menu-vertical li span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.rst-content .fa-pull-left.admonition-title,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content dl dt .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.rst-content code.download span.fa-pull-left:first-child,.fa-pull-left.icon{margin-right:.3em}.fa.fa-pull-right,.wy-menu-vertical li span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.rst-content .fa-pull-right.admonition-title,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content dl dt .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.rst-content code.download span.fa-pull-right:first-child,.fa-pull-right.icon{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.wy-menu-vertical li span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.rst-content .pull-left.admonition-title,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content dl dt .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content .code-block-caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.rst-content code.download span.pull-left:first-child,.pull-left.icon{margin-right:.3em}.fa.pull-right,.wy-menu-vertical li span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.rst-content .pull-right.admonition-title,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content dl dt .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content .code-block-caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.rst-content code.download span.pull-right:first-child,.pull-right.icon{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-remove:before,.fa-close:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-gear:before,.fa-cog:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-rotate-right:before,.fa-repeat:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.rst-content .admonition-title:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-warning:before,.fa-exclamation-triangle:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-gears:before,.fa-cogs:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-save:before,.fa-floppy-o:before{content:""}.fa-square:before{content:""}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.wy-dropdown .caret:before,.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-unsorted:before,.fa-sort:before{content:""}.fa-sort-down:before,.fa-sort-desc:before{content:""}.fa-sort-up:before,.fa-sort-asc:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-legal:before,.fa-gavel:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-flash:before,.fa-bolt:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-paste:before,.fa-clipboard:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-unlink:before,.fa-chain-broken:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:""}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:""}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:""}.fa-euro:before,.fa-eur:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-rupee:before,.fa-inr:before{content:""}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:""}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:""}.fa-won:before,.fa-krw:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-turkish-lira:before,.fa-try:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-institution:before,.fa-bank:before,.fa-university:before{content:""}.fa-mortar-board:before,.fa-graduation-cap:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:""}.fa-file-zip-o:before,.fa-file-archive-o:before{content:""}.fa-file-sound-o:before,.fa-file-audio-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:""}.fa-ge:before,.fa-empire:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-send:before,.fa-paper-plane:before{content:""}.fa-send-o:before,.fa-paper-plane-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-hotel:before,.fa-bed:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-yc:before,.fa-y-combinator:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-tv:before,.fa-television:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:""}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-signing:before,.fa-sign-language:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-vcard:before,.fa-address-card:before{content:""}.fa-vcard-o:before,.fa-address-card-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context{font-family:inherit}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content .code-block-caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before{font-family:"FontAwesome";display:inline-block;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa,a .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,a .rst-content .admonition-title,.rst-content a .admonition-title,a .rst-content h1 .headerlink,.rst-content h1 a .headerlink,a .rst-content h2 .headerlink,.rst-content h2 a .headerlink,a .rst-content h3 .headerlink,.rst-content h3 a .headerlink,a .rst-content h4 .headerlink,.rst-content h4 a .headerlink,a .rst-content h5 .headerlink,.rst-content h5 a .headerlink,a .rst-content h6 .headerlink,.rst-content h6 a .headerlink,a .rst-content dl dt .headerlink,.rst-content dl dt a .headerlink,a .rst-content p.caption .headerlink,.rst-content p.caption a .headerlink,a .rst-content table>caption .headerlink,.rst-content table>caption a .headerlink,a .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption a .headerlink,a .rst-content tt.download span:first-child,.rst-content tt.download a span:first-child,a .rst-content code.download span:first-child,.rst-content code.download a span:first-child,a .icon{display:inline-block;text-decoration:inherit}.btn .fa,.btn .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .btn span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.btn .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.btn .rst-content .admonition-title,.rst-content .btn .admonition-title,.btn .rst-content h1 .headerlink,.rst-content h1 .btn .headerlink,.btn .rst-content h2 .headerlink,.rst-content h2 .btn .headerlink,.btn .rst-content h3 .headerlink,.rst-content h3 .btn .headerlink,.btn .rst-content h4 .headerlink,.rst-content h4 .btn .headerlink,.btn .rst-content h5 .headerlink,.rst-content h5 .btn .headerlink,.btn .rst-content h6 .headerlink,.rst-content h6 .btn .headerlink,.btn .rst-content dl dt .headerlink,.rst-content dl dt .btn .headerlink,.btn .rst-content p.caption .headerlink,.rst-content p.caption .btn .headerlink,.btn .rst-content table>caption .headerlink,.rst-content table>caption .btn .headerlink,.btn .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .btn .headerlink,.btn .rst-content tt.download span:first-child,.rst-content tt.download .btn span:first-child,.btn .rst-content code.download span:first-child,.rst-content code.download .btn span:first-child,.btn .icon,.nav .fa,.nav .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand,.nav .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.nav .rst-content .admonition-title,.rst-content .nav .admonition-title,.nav .rst-content h1 .headerlink,.rst-content h1 .nav .headerlink,.nav .rst-content h2 .headerlink,.rst-content h2 .nav .headerlink,.nav .rst-content h3 .headerlink,.rst-content h3 .nav .headerlink,.nav .rst-content h4 .headerlink,.rst-content h4 .nav .headerlink,.nav .rst-content h5 .headerlink,.rst-content h5 .nav .headerlink,.nav .rst-content h6 .headerlink,.rst-content h6 .nav .headerlink,.nav .rst-content dl dt .headerlink,.rst-content dl dt .nav .headerlink,.nav .rst-content p.caption .headerlink,.rst-content p.caption .nav .headerlink,.nav .rst-content table>caption .headerlink,.rst-content table>caption .nav .headerlink,.nav .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .nav .headerlink,.nav .rst-content tt.download span:first-child,.rst-content tt.download .nav span:first-child,.nav .rst-content code.download span:first-child,.rst-content code.download .nav span:first-child,.nav .icon{display:inline}.btn .fa.fa-large,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.btn .rst-content .fa-large.admonition-title,.rst-content .btn .fa-large.admonition-title,.btn .rst-content h1 .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.btn .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .btn .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.btn .rst-content .code-block-caption .fa-large.headerlink,.rst-content .code-block-caption .btn .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .btn span.fa-large:first-child,.btn .rst-content code.download span.fa-large:first-child,.rst-content code.download .btn span.fa-large:first-child,.btn .fa-large.icon,.nav .fa.fa-large,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand,.nav .rst-content .fa-large.admonition-title,.rst-content .nav .fa-large.admonition-title,.nav .rst-content h1 .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.nav .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.nav .rst-content .code-block-caption .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.nav .rst-content code.download span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.nav .fa-large.icon{line-height:.9em}.btn .fa.fa-spin,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.btn .rst-content .fa-spin.admonition-title,.rst-content .btn .fa-spin.admonition-title,.btn .rst-content h1 .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.btn .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .btn .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.btn .rst-content .code-block-caption .fa-spin.headerlink,.rst-content .code-block-caption .btn .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .btn span.fa-spin:first-child,.btn .rst-content code.download span.fa-spin:first-child,.rst-content code.download .btn span.fa-spin:first-child,.btn .fa-spin.icon,.nav .fa.fa-spin,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand,.nav .rst-content .fa-spin.admonition-title,.rst-content .nav .fa-spin.admonition-title,.nav .rst-content h1 .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.nav .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.nav .rst-content .code-block-caption .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.nav .rst-content code.download span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.nav .fa-spin.icon{display:inline-block}.btn.fa:before,.wy-menu-vertical li span.btn.toctree-expand:before,.rst-content .btn.admonition-title:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content dl dt .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.rst-content code.download span.btn:first-child:before,.btn.icon:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.rst-content code.download span.btn:first-child:hover:before,.btn.icon:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before,.btn-mini .rst-content .admonition-title:before,.rst-content .btn-mini .admonition-title:before,.btn-mini .rst-content h1 .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.btn-mini .rst-content dl dt .headerlink:before,.rst-content dl dt .btn-mini .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.rst-content tt.download .btn-mini span:first-child:before,.btn-mini .rst-content code.download span:first-child:before,.rst-content code.download .btn-mini span:first-child:before,.btn-mini .icon:before{font-size:14px;vertical-align:-15%}.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.wy-alert-title,.rst-content .admonition-title{color:#fff;font-weight:bold;display:block;color:#fff;background:#6ab0de;margin:-12px;padding:6px 12px;margin-bottom:12px}.wy-alert.wy-alert-danger,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.admonition{background:#fdf3f2}.wy-alert.wy-alert-danger .wy-alert-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .danger .wy-alert-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .danger .admonition-title,.rst-content .error .admonition-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition .admonition-title{background:#f29f97}.wy-alert.wy-alert-warning,.rst-content .wy-alert-warning.note,.rst-content .attention,.rst-content .caution,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.tip,.rst-content .warning,.rst-content .wy-alert-warning.seealso,.rst-content .admonition-todo,.rst-content .wy-alert-warning.admonition{background:#ffedcc}.wy-alert.wy-alert-warning .wy-alert-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .attention .wy-alert-title,.rst-content .caution .wy-alert-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .attention .admonition-title,.rst-content .caution .admonition-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .warning .admonition-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .admonition-todo .admonition-title,.rst-content .wy-alert-warning.admonition .admonition-title{background:#f0b37e}.wy-alert.wy-alert-info,.rst-content .note,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.rst-content .seealso,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.admonition{background:#e7f2fa}.wy-alert.wy-alert-info .wy-alert-title,.rst-content .note .wy-alert-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.rst-content .note .admonition-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .seealso .admonition-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition .admonition-title{background:#6ab0de}.wy-alert.wy-alert-success,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.warning,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.admonition{background:#dbfaf4}.wy-alert.wy-alert-success .wy-alert-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .hint .wy-alert-title,.rst-content .important .wy-alert-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .hint .admonition-title,.rst-content .important .admonition-title,.rst-content .tip .admonition-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition .admonition-title{background:#1abc9c}.wy-alert.wy-alert-neutral,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.admonition{background:#f3f6f6}.wy-alert.wy-alert-neutral .wy-alert-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition .admonition-title{color:#404040;background:#e1e4e5}.wy-alert.wy-alert-neutral a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a{color:#2980B9}.wy-alert p:last-child,.rst-content .note p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.rst-content .seealso p:last-child,.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0px;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,0.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27AE60}.wy-tray-container li.wy-tray-item-info{background:#2980B9}.wy-tray-container li.wy-tray-item-warning{background:#E67E22}.wy-tray-container li.wy-tray-item-danger{background:#E74C3C}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width: 768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px 12px;color:#fff;border:1px solid rgba(0,0,0,0.1);background-color:#27AE60;text-decoration:none;font-weight:normal;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:0px 1px 2px -1px rgba(255,255,255,0.5) inset,0px -2px 0px 0px rgba(0,0,0,0.1) inset;outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:0px -1px 0px 0px rgba(0,0,0,0.05) inset,0px 2px 0px 0px rgba(0,0,0,0.1) inset;padding:8px 12px 6px 12px}.btn:visited{color:#fff}.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled:hover,.btn-disabled:focus,.btn-disabled:active{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980B9 !important}.btn-info:hover{background-color:#2e8ece !important}.btn-neutral{background-color:#f3f6f6 !important;color:#404040 !important}.btn-neutral:hover{background-color:#e5ebeb !important;color:#404040}.btn-neutral:visited{color:#404040 !important}.btn-success{background-color:#27AE60 !important}.btn-success:hover{background-color:#295 !important}.btn-danger{background-color:#E74C3C !important}.btn-danger:hover{background-color:#ea6153 !important}.btn-warning{background-color:#E67E22 !important}.btn-warning:hover{background-color:#e98b39 !important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f !important}.btn-link{background-color:transparent !important;color:#2980B9;box-shadow:none;border-color:transparent !important}.btn-link:hover{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:active{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:visited{color:#9B59B6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:before,.wy-btn-group:after{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:solid 1px #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,0.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980B9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:solid 1px #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type="search"]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980B9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned input,.wy-form-aligned textarea,.wy-form-aligned select,.wy-form-aligned .wy-help-inline,.wy-form-aligned label{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{border:0;margin:0;padding:0}legend{display:block;width:100%;border:0;padding:0;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label{display:block;margin:0 0 .3125em 0;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;*zoom:1;max-width:68em;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#E74C3C}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full input[type="text"],.wy-control-group .wy-form-full input[type="password"],.wy-control-group .wy-form-full input[type="email"],.wy-control-group .wy-form-full input[type="url"],.wy-control-group .wy-form-full input[type="date"],.wy-control-group .wy-form-full input[type="month"],.wy-control-group .wy-form-full input[type="time"],.wy-control-group .wy-form-full input[type="datetime"],.wy-control-group .wy-form-full input[type="datetime-local"],.wy-control-group .wy-form-full input[type="week"],.wy-control-group .wy-form-full input[type="number"],.wy-control-group .wy-form-full input[type="search"],.wy-control-group .wy-form-full input[type="tel"],.wy-control-group .wy-form-full input[type="color"],.wy-control-group .wy-form-halves input[type="text"],.wy-control-group .wy-form-halves input[type="password"],.wy-control-group .wy-form-halves input[type="email"],.wy-control-group .wy-form-halves input[type="url"],.wy-control-group .wy-form-halves input[type="date"],.wy-control-group .wy-form-halves input[type="month"],.wy-control-group .wy-form-halves input[type="time"],.wy-control-group .wy-form-halves input[type="datetime"],.wy-control-group .wy-form-halves input[type="datetime-local"],.wy-control-group .wy-form-halves input[type="week"],.wy-control-group .wy-form-halves input[type="number"],.wy-control-group .wy-form-halves input[type="search"],.wy-control-group .wy-form-halves input[type="tel"],.wy-control-group .wy-form-halves input[type="color"],.wy-control-group .wy-form-thirds input[type="text"],.wy-control-group .wy-form-thirds input[type="password"],.wy-control-group .wy-form-thirds input[type="email"],.wy-control-group .wy-form-thirds input[type="url"],.wy-control-group .wy-form-thirds input[type="date"],.wy-control-group .wy-form-thirds input[type="month"],.wy-control-group .wy-form-thirds input[type="time"],.wy-control-group .wy-form-thirds input[type="datetime"],.wy-control-group .wy-form-thirds input[type="datetime-local"],.wy-control-group .wy-form-thirds input[type="week"],.wy-control-group .wy-form-thirds input[type="number"],.wy-control-group .wy-form-thirds input[type="search"],.wy-control-group .wy-form-thirds input[type="tel"],.wy-control-group .wy-form-thirds input[type="color"]{width:100%}.wy-control-group .wy-form-full{float:left;display:block;margin-right:2.3576515979%;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.3576515979%;width:48.821174201%}.wy-control-group .wy-form-halves:last-child{margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n+1){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.3576515979%;width:31.7615656014%}.wy-control-group .wy-form-thirds:last-child{margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control{margin:6px 0 0 0;font-size:90%}.wy-control-no-input{display:inline-block;margin:6px 0 0 0;font-size:90%}.wy-control-group.fluid-input input[type="text"],.wy-control-group.fluid-input input[type="password"],.wy-control-group.fluid-input input[type="email"],.wy-control-group.fluid-input input[type="url"],.wy-control-group.fluid-input input[type="date"],.wy-control-group.fluid-input input[type="month"],.wy-control-group.fluid-input input[type="time"],.wy-control-group.fluid-input input[type="datetime"],.wy-control-group.fluid-input input[type="datetime-local"],.wy-control-group.fluid-input input[type="week"],.wy-control-group.fluid-input input[type="number"],.wy-control-group.fluid-input input[type="search"],.wy-control-group.fluid-input input[type="tel"],.wy-control-group.fluid-input input[type="color"]{width:100%}.wy-form-message-inline{display:inline-block;padding-left:.3em;color:#666;vertical-align:middle;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;*overflow:visible}input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type="datetime-local"]{padding:.34375em .625em}input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}input[type="text"]:focus,input[type="password"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus{outline:0;outline:thin dotted \9;border-color:#333}input.no-focus:focus{border-color:#ccc !important}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:1px auto #129FEA}input[type="text"][disabled],input[type="password"][disabled],input[type="email"][disabled],input[type="url"][disabled],input[type="date"][disabled],input[type="month"][disabled],input[type="time"][disabled],input[type="datetime"][disabled],input[type="datetime-local"][disabled],input[type="week"][disabled],input[type="number"][disabled],input[type="search"][disabled],input[type="tel"][disabled],input[type="color"][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#E74C3C;border:1px solid #E74C3C}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#E74C3C}input[type="file"]:focus:invalid:focus,input[type="radio"]:focus:invalid:focus,input[type="checkbox"]:focus:invalid:focus{outline-color:#E74C3C}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type="radio"][disabled],input[type="checkbox"][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:solid 1px #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{position:absolute;content:"";display:block;left:0;top:0;width:36px;height:12px;border-radius:4px;background:#ccc;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{position:absolute;content:"";display:block;width:18px;height:18px;border-radius:4px;background:#999;left:-3px;top:-3px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27AE60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#E74C3C}.wy-control-group.wy-control-group-error input[type="text"],.wy-control-group.wy-control-group-error input[type="password"],.wy-control-group.wy-control-group-error input[type="email"],.wy-control-group.wy-control-group-error input[type="url"],.wy-control-group.wy-control-group-error input[type="date"],.wy-control-group.wy-control-group-error input[type="month"],.wy-control-group.wy-control-group-error input[type="time"],.wy-control-group.wy-control-group-error input[type="datetime"],.wy-control-group.wy-control-group-error input[type="datetime-local"],.wy-control-group.wy-control-group-error input[type="week"],.wy-control-group.wy-control-group-error input[type="number"],.wy-control-group.wy-control-group-error input[type="search"],.wy-control-group.wy-control-group-error input[type="tel"],.wy-control-group.wy-control-group-error input[type="color"]{border:solid 1px #E74C3C}.wy-control-group.wy-control-group-error textarea{border:solid 1px #E74C3C}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27AE60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#E74C3C}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#E67E22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980B9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width: 480px){.wy-form button[type="submit"]{margin:.7em 0 0}.wy-form input[type="text"],.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:.3em;display:block}.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0 0}.wy-form .wy-help-inline,.wy-form-message-inline,.wy-form-message{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width: 768px){.tablet-hide{display:none}}@media screen and (max-width: 480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.wy-table,.rst-content table.docutils,.rst-content table.field-list{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.wy-table caption,.rst-content table.docutils caption,.rst-content table.field-list caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td,.wy-table th,.rst-content table.docutils th,.rst-content table.field-list th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.wy-table td:first-child,.rst-content table.docutils td:first-child,.rst-content table.field-list td:first-child,.wy-table th:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list th:first-child{border-left-width:0}.wy-table thead,.rst-content table.docutils thead,.rst-content table.field-list thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.wy-table thead th,.rst-content table.docutils thead th,.rst-content table.field-list thead th{font-weight:bold;border-bottom:solid 2px #e1e4e5}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td{background-color:transparent;vertical-align:middle}.wy-table td p,.rst-content table.docutils td p,.rst-content table.field-list td p{line-height:18px}.wy-table td p:last-child,.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child{margin-bottom:0}.wy-table .wy-table-cell-min,.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min{width:1%;padding-right:0}.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:gray;font-size:90%}.wy-table-tertiary{color:gray;font-size:80%}.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td,.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td{background-color:#f3f6f6}.wy-table-backed{background-color:#f3f6f6}.wy-table-bordered-all,.rst-content table.docutils{border:1px solid #e1e4e5}.wy-table-bordered-all td,.rst-content table.docutils td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.wy-table-bordered-all tbody>tr:last-child td,.rst-content table.docutils tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px 0;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0 !important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980B9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9B59B6}html{height:100%;overflow-x:hidden}body{font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;font-weight:normal;color:#404040;min-height:100%;overflow-x:hidden;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#E67E22 !important}a.wy-text-warning:hover{color:#eb9950 !important}.wy-text-info{color:#2980B9 !important}a.wy-text-info:hover{color:#409ad5 !important}.wy-text-success{color:#27AE60 !important}a.wy-text-success:hover{color:#36d278 !important}.wy-text-danger{color:#E74C3C !important}a.wy-text-danger:hover{color:#ed7669 !important}.wy-text-neutral{color:#404040 !important}a.wy-text-neutral:hover{color:#595959 !important}h1,h2,.rst-content .toctree-wrapper p.caption,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif}p{line-height:24px;margin:0;font-size:16px;margin-bottom:24px}h1{font-size:175%}h2,.rst-content .toctree-wrapper p.caption{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}code,.rst-content tt,.rst-content code{white-space:nowrap;max-width:100%;background:#fff;border:solid 1px #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;color:#E74C3C;overflow-x:auto}code.code-large,.rst-content tt.code-large{font-size:90%}.wy-plain-list-disc,.rst-content .section ul,.rst-content .toctree-wrapper ul,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.wy-plain-list-disc li,.rst-content .section ul li,.rst-content .toctree-wrapper ul li,article ul li{list-style:disc;margin-left:24px}.wy-plain-list-disc li p:last-child,.rst-content .section ul li p:last-child,.rst-content .toctree-wrapper ul li p:last-child,article ul li p:last-child{margin-bottom:0}.wy-plain-list-disc li ul,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li ul,article ul li ul{margin-bottom:0}.wy-plain-list-disc li li,.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,article ul li li{list-style:circle}.wy-plain-list-disc li li li,.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,article ul li li li{list-style:square}.wy-plain-list-disc li ol li,.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,article ul li ol li{list-style:decimal}.wy-plain-list-decimal,.rst-content .section ol,.rst-content ol.arabic,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.wy-plain-list-decimal li,.rst-content .section ol li,.rst-content ol.arabic li,article ol li{list-style:decimal;margin-left:24px}.wy-plain-list-decimal li p:last-child,.rst-content .section ol li p:last-child,.rst-content ol.arabic li p:last-child,article ol li p:last-child{margin-bottom:0}.wy-plain-list-decimal li ul,.rst-content .section ol li ul,.rst-content ol.arabic li ul,article ol li ul{margin-bottom:0}.wy-plain-list-decimal li ul li,.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:before,.wy-breadcrumbs:after{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.wy-breadcrumbs li code,.wy-breadcrumbs li .rst-content tt,.rst-content .wy-breadcrumbs li tt{padding:5px;border:none;background:none}.wy-breadcrumbs li code.literal,.wy-breadcrumbs li .rst-content tt.literal,.rst-content .wy-breadcrumbs li tt.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width: 480px){.wy-breadcrumbs-extra{display:none}.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:before,.wy-menu-horiz:after{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz ul,.wy-menu-horiz li{display:inline-block}.wy-menu-horiz li:hover{background:rgba(255,255,255,0.1)}.wy-menu-horiz li.divide-left{border-left:solid 1px #404040}.wy-menu-horiz li.divide-right{border-right:solid 1px #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#3a7ca8;height:32px;display:inline-block;line-height:32px;padding:0 1.618em;margin:12px 0 0 0;display:block;font-weight:bold;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:solid 1px #404040}.wy-menu-vertical li.divide-bottom{border-bottom:solid 1px #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:gray;border-right:solid 1px #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.wy-menu-vertical li code,.wy-menu-vertical li .rst-content tt,.rst-content .wy-menu-vertical li tt{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a{color:#404040;padding:.4045em 1.618em;font-weight:bold;position:relative;background:#fcfcfc;border:none;padding-left:1.618em -4px}.wy-menu-vertical li.on a:hover,.wy-menu-vertical li.current>a:hover{background:#fcfcfc}.wy-menu-vertical li.on a:hover span.toctree-expand,.wy-menu-vertical li.current>a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:solid 1px #c9c9c9;border-top:solid 1px #c9c9c9}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a{color:#404040}.wy-menu-vertical li.toctree-l1.current li.toctree-l2>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>ul{display:none}.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3.current>ul{display:block}.wy-menu-vertical li.toctree-l2.current>a{background:#c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{display:block;background:#c9c9c9;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3{font-size:.9em}.wy-menu-vertical li.toctree-l3.current>a{background:#bdbdbd;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{display:block;background:#bdbdbd;padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:normal}.wy-menu-vertical a{display:inline-block;line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980B9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980B9;text-align:center;padding:.809em;display:block;color:#fcfcfc;margin-bottom:.809em}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em auto;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a{color:#fcfcfc;font-size:100%;font-weight:bold;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search>a:hover,.wy-side-nav-search .wy-dropdown>a:hover{background:rgba(255,255,255,0.1)}.wy-side-nav-search>a img.logo,.wy-side-nav-search .wy-dropdown>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search>a.icon img.logo,.wy-side-nav-search .wy-dropdown>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:normal;color:rgba(255,255,255,0.3)}.wy-nav .wy-menu-vertical header{color:#2980B9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980B9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980B9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:before,.wy-nav-top:after{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:bold}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,0.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:gray}footer p{margin-bottom:12px}footer span.commit code,footer span.commit .rst-content tt,.rst-content footer span.commit tt{padding:0px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:1em;background:none;border:none;color:gray}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:before,.rst-footer-buttons:after{width:100%}.rst-footer-buttons:before,.rst-footer-buttons:after{display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:before,.rst-breadcrumbs-buttons:after{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:solid 1px #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:solid 1px #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:gray;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width: 768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-side-scroll{width:auto}.wy-side-nav-search{width:auto}.wy-menu.wy-menu-vertical{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width: 1100px){.wy-nav-content-wrap{background:rgba(0,0,0,0.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,footer,.wy-nav-side{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .icon{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0px}.rst-content div.figure.align-center{text-align:center}.rst-content .section>img,.rst-content .section>a>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;display:block;overflow:auto}.rst-content pre.literal-block,.rst-content div[class^='highlight']{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px 0}.rst-content pre.literal-block div[class^='highlight'],.rst-content div[class^='highlight'] div[class^='highlight']{padding:0px;border:none;margin:0}.rst-content div[class^='highlight'] td.code{width:100%}.rst-content .linenodiv pre{border-right:solid 1px #e6e9ea;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^='highlight'] pre{white-space:pre;margin:0;padding:12px 12px;display:block;overflow:auto}.rst-content div[class^='highlight'] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content pre.literal-block,.rst-content div[class^='highlight'] pre,.rst-content .linenodiv pre{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:12px;line-height:1.4}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^='highlight'],.rst-content div[class^='highlight'] pre{white-space:pre-wrap}}.rst-content .note .last,.rst-content .attention .last,.rst-content .caution .last,.rst-content .danger .last,.rst-content .error .last,.rst-content .hint .last,.rst-content .important .last,.rst-content .tip .last,.rst-content .warning .last,.rst-content .seealso .last,.rst-content .admonition-todo .last,.rst-content .admonition .last{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,0.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent !important;border-color:rgba(0,0,0,0.1) !important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha li{list-style:upper-alpha}.rst-content .section ol p,.rst-content .section ul p{margin-bottom:12px}.rst-content .section ol p:last-child,.rst-content .section ul p:last-child{margin-bottom:24px}.rst-content .line-block{margin-left:0px;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0px}.rst-content .topic-title{font-weight:bold;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0px 0px 24px 24px}.rst-content .align-left{float:left;margin:0px 24px 24px 0px}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content .toctree-wrapper p.caption .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content .code-block-caption .headerlink{visibility:hidden;font-size:14px}.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content .toctree-wrapper p.caption .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after,.rst-content .code-block-caption .headerlink:after{content:"";font-family:FontAwesome}.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content .toctree-wrapper p.caption:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after,.rst-content .code-block-caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:solid 1px #e1e4e5}.rst-content .sidebar p,.rst-content .sidebar ul,.rst-content .sidebar dl{font-size:90%}.rst-content .sidebar .last{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif;font-weight:bold;background:#e1e4e5;padding:6px 12px;margin:-24px;margin-bottom:24px;font-size:100%}.rst-content .highlighted{background:#F1C40F;display:inline-block;font-weight:bold;padding:0 6px}.rst-content .footnote-reference,.rst-content .citation-reference{vertical-align:baseline;position:relative;top:-0.4em;line-height:0;font-size:90%}.rst-content table.docutils.citation,.rst-content table.docutils.footnote{background:none;border:none;color:gray}.rst-content table.docutils.citation td,.rst-content table.docutils.citation tr,.rst-content table.docutils.footnote td,.rst-content table.docutils.footnote tr{border:none;background-color:transparent !important;white-space:normal}.rst-content table.docutils.citation td.label,.rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}.rst-content table.docutils.citation tt,.rst-content table.docutils.citation code,.rst-content table.docutils.footnote tt,.rst-content table.docutils.footnote code{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}.rst-content table.docutils td .last,.rst-content table.docutils td .last :last-child{margin-bottom:0}.rst-content table.field-list{border:none}.rst-content table.field-list td{border:none}.rst-content table.field-list td p{font-size:inherit;line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content tt,.rst-content tt,.rst-content code{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;padding:2px 5px}.rst-content tt big,.rst-content tt em,.rst-content tt big,.rst-content code big,.rst-content tt em,.rst-content code em{font-size:100% !important;line-height:normal}.rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal{color:#E74C3C}.rst-content tt.xref,a .rst-content tt,.rst-content tt.xref,.rst-content code.xref,a .rst-content tt,a .rst-content code{font-weight:bold;color:#404040}.rst-content pre,.rst-content kbd,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace}.rst-content a tt,.rst-content a tt,.rst-content a code{color:#2980B9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:bold;margin-bottom:12px}.rst-content dl p,.rst-content dl table,.rst-content dl ul,.rst-content dl ol{margin-bottom:12px !important}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl:not(.docutils){margin-bottom:24px}.rst-content dl:not(.docutils) dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980B9;border-top:solid 3px #6ab0de;padding:6px;position:relative}.rst-content dl:not(.docutils) dt:before{color:#6ab0de}.rst-content dl:not(.docutils) dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dl dt{margin-bottom:6px;border:none;border-left:solid 3px #ccc;background:#f0f0f0;color:#555}.rst-content dl:not(.docutils) dl dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dt:first-child{margin-top:0}.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) code{font-weight:bold}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) code.descclassname{background-color:transparent;border:none;padding:0;font-size:100% !important}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname{font-weight:bold}.rst-content dl:not(.docutils) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:bold}.rst-content dl:not(.docutils) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-link,.rst-content .viewcode-back{display:inline-block;color:#27AE60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:bold}.rst-content tt.download,.rst-content code.download{background:inherit;padding:inherit;font-weight:normal;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content tt.download span:first-child,.rst-content code.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width: 480px){.rst-content .sidebar{width:100%}}span[id*='MathJax-Span']{color:#404040}.math{text-align:center}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-regular.eot");src:url("../fonts/Lato/lato-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-regular.woff2") format("woff2"),url("../fonts/Lato/lato-regular.woff") format("woff"),url("../fonts/Lato/lato-regular.ttf") format("truetype");font-weight:400;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bold.eot");src:url("../fonts/Lato/lato-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bold.woff2") format("woff2"),url("../fonts/Lato/lato-bold.woff") format("woff"),url("../fonts/Lato/lato-bold.ttf") format("truetype");font-weight:700;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bolditalic.eot");src:url("../fonts/Lato/lato-bolditalic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bolditalic.woff2") format("woff2"),url("../fonts/Lato/lato-bolditalic.woff") format("woff"),url("../fonts/Lato/lato-bolditalic.ttf") format("truetype");font-weight:700;font-style:italic}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-italic.eot");src:url("../fonts/Lato/lato-italic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-italic.woff2") format("woff2"),url("../fonts/Lato/lato-italic.woff") format("woff"),url("../fonts/Lato/lato-italic.ttf") format("truetype");font-weight:400;font-style:italic}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:400;src:url("../fonts/RobotoSlab/roboto-slab.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.ttf") format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:700;src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.ttf") format("truetype")} diff --git a/docs/_build/html/_static/custom.css b/docs/_build/html/_static/custom.css deleted file mode 100644 index 2a924f1..0000000 --- a/docs/_build/html/_static/custom.css +++ /dev/null @@ -1 +0,0 @@ -/* This file intentionally left blank. */ diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js deleted file mode 100644 index ffadbec..0000000 --- a/docs/_build/html/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var bbox = span.getBBox(); - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - var parentOfText = node.parentNode.parentNode; - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/docs/_build/html/_static/documentation_options.js b/docs/_build/html/_static/documentation_options.js deleted file mode 100644 index 168d437..0000000 --- a/docs/_build/html/_static/documentation_options.js +++ /dev/null @@ -1,296 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, - SEARCH_LANGUAGE_STOP_WORDS: ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"] -}; - - - -/* Non-minified version JS is _stemmer.js if file is provided */ -/** - * Porter Stemmer - */ -var Stemmer = function() { - - var step2list = { - ational: 'ate', - tional: 'tion', - enci: 'ence', - anci: 'ance', - izer: 'ize', - bli: 'ble', - alli: 'al', - entli: 'ent', - eli: 'e', - ousli: 'ous', - ization: 'ize', - ation: 'ate', - ator: 'ate', - alism: 'al', - iveness: 'ive', - fulness: 'ful', - ousness: 'ous', - aliti: 'al', - iviti: 'ive', - biliti: 'ble', - logi: 'log' - }; - - var step3list = { - icate: 'ic', - ative: '', - alize: 'al', - iciti: 'ic', - ical: 'ic', - ful: '', - ness: '' - }; - - var c = "[^aeiou]"; // consonant - var v = "[aeiouy]"; // vowel - var C = c + "[^aeiouy]*"; // consonant sequence - var V = v + "[aeiou]*"; // vowel sequence - - var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 - var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 - var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 - var s_v = "^(" + C + ")?" + v; // vowel in stem - - this.stemWord = function (w) { - var stem; - var suffix; - var firstch; - var origword = w; - - if (w.length < 3) - return w; - - var re; - var re2; - var re3; - var re4; - - firstch = w.substr(0,1); - if (firstch == "y") - w = firstch.toUpperCase() + w.substr(1); - - // Step 1a - re = /^(.+?)(ss|i)es$/; - re2 = /^(.+?)([^s])s$/; - - if (re.test(w)) - w = w.replace(re,"$1$2"); - else if (re2.test(w)) - w = w.replace(re2,"$1$2"); - - // Step 1b - re = /^(.+?)eed$/; - re2 = /^(.+?)(ed|ing)$/; - if (re.test(w)) { - var fp = re.exec(w); - re = new RegExp(mgr0); - if (re.test(fp[1])) { - re = /.$/; - w = w.replace(re,""); - } - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1]; - re2 = new RegExp(s_v); - if (re2.test(stem)) { - w = stem; - re2 = /(at|bl|iz)$/; - re3 = new RegExp("([^aeiouylsz])\\1$"); - re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re2.test(w)) - w = w + "e"; - else if (re3.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - else if (re4.test(w)) - w = w + "e"; - } - } - - // Step 1c - re = /^(.+?)y$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(s_v); - if (re.test(stem)) - w = stem + "i"; - } - - // Step 2 - re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step2list[suffix]; - } - - // Step 3 - re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step3list[suffix]; - } - - // Step 4 - re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; - re2 = /^(.+?)(s|t)(ion)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - if (re.test(stem)) - w = stem; - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1] + fp[2]; - re2 = new RegExp(mgr1); - if (re2.test(stem)) - w = stem; - } - - // Step 5 - re = /^(.+?)e$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - re2 = new RegExp(meq1); - re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) - w = stem; - } - re = /ll$/; - re2 = new RegExp(mgr1); - if (re.test(w) && re2.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - - // and turn initial Y back to y - if (firstch == "y") - w = firstch.toLowerCase() + w.substr(1); - return w; - } -} - - - - - -var splitChars = (function() { - var result = {}; - var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648, - 1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702, - 2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971, - 2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345, - 3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761, - 3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823, - 4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125, - 8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695, - 11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587, - 43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141]; - var i, j, start, end; - for (i = 0; i < singles.length; i++) { - result[singles[i]] = true; - } - var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709], - [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161], - [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568], - [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807], - [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047], - [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383], - [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450], - [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547], - [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673], - [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820], - [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946], - [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023], - [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173], - [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332], - [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481], - [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718], - [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791], - [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095], - [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205], - [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687], - [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968], - [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869], - [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102], - [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271], - [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592], - [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822], - [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167], - [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959], - [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143], - [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318], - [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483], - [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101], - [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567], - [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292], - [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444], - [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783], - [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311], - [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511], - [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774], - [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071], - [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263], - [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519], - [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647], - [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967], - [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295], - [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274], - [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007], - [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381], - [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]]; - for (i = 0; i < ranges.length; i++) { - start = ranges[i][0]; - end = ranges[i][1]; - for (j = start; j <= end; j++) { - result[j] = true; - } - } - return result; -})(); - -function splitQuery(query) { - var result = []; - var start = -1; - for (var i = 0; i < query.length; i++) { - if (splitChars[query.charCodeAt(i)]) { - if (start !== -1) { - result.push(query.slice(start, i)); - start = -1; - } - } else if (start === -1) { - start = i; - } - } - if (start !== -1) { - result.push(query.slice(start)); - } - return result; -} - - diff --git a/docs/_build/html/_static/down-pressed.png b/docs/_build/html/_static/down-pressed.png deleted file mode 100644 index 5756c8c..0000000 Binary files a/docs/_build/html/_static/down-pressed.png and /dev/null differ diff --git a/docs/_build/html/_static/down.png b/docs/_build/html/_static/down.png deleted file mode 100644 index 1b3bdad..0000000 Binary files a/docs/_build/html/_static/down.png and /dev/null differ diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png deleted file mode 100644 index a858a41..0000000 Binary files a/docs/_build/html/_static/file.png and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf b/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf deleted file mode 100644 index 809c1f5..0000000 Binary files a/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf b/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf deleted file mode 100644 index fc981ce..0000000 Binary files a/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Inconsolata.ttf b/docs/_build/html/_static/fonts/Inconsolata.ttf deleted file mode 100644 index 4b8a36d..0000000 Binary files a/docs/_build/html/_static/fonts/Inconsolata.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato-Bold.ttf b/docs/_build/html/_static/fonts/Lato-Bold.ttf deleted file mode 100644 index 1d23c70..0000000 Binary files a/docs/_build/html/_static/fonts/Lato-Bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato-Regular.ttf b/docs/_build/html/_static/fonts/Lato-Regular.ttf deleted file mode 100644 index 0f3d0f8..0000000 Binary files a/docs/_build/html/_static/fonts/Lato-Regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.eot b/docs/_build/html/_static/fonts/Lato/lato-bold.eot deleted file mode 100644 index 3361183..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bold.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.ttf b/docs/_build/html/_static/fonts/Lato/lato-bold.ttf deleted file mode 100644 index 29f691d..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.woff b/docs/_build/html/_static/fonts/Lato/lato-bold.woff deleted file mode 100644 index c6dff51..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 b/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 deleted file mode 100644 index bb19504..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot deleted file mode 100644 index 3d41549..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf deleted file mode 100644 index f402040..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff deleted file mode 100644 index 88ad05b..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 deleted file mode 100644 index c4e3d80..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.eot b/docs/_build/html/_static/fonts/Lato/lato-italic.eot deleted file mode 100644 index 3f82642..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-italic.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.ttf b/docs/_build/html/_static/fonts/Lato/lato-italic.ttf deleted file mode 100644 index b4bfc9b..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-italic.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.woff b/docs/_build/html/_static/fonts/Lato/lato-italic.woff deleted file mode 100644 index 76114bc..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-italic.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 b/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 deleted file mode 100644 index 3404f37..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.eot b/docs/_build/html/_static/fonts/Lato/lato-regular.eot deleted file mode 100644 index 11e3f2a..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-regular.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.ttf b/docs/_build/html/_static/fonts/Lato/lato-regular.ttf deleted file mode 100644 index 74decd9..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.woff b/docs/_build/html/_static/fonts/Lato/lato-regular.woff deleted file mode 100644 index ae1307f..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-regular.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 b/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 deleted file mode 100644 index 3bf9843..0000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf b/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf deleted file mode 100644 index df5d1df..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf b/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf deleted file mode 100644 index eb52a79..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot deleted file mode 100644 index 79dc8ef..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf deleted file mode 100644 index df5d1df..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff deleted file mode 100644 index 6cb6000..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 deleted file mode 100644 index 7059e23..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot deleted file mode 100644 index 2f7ca78..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf deleted file mode 100644 index eb52a79..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff deleted file mode 100644 index f815f63..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 deleted file mode 100644 index f2c76e5..0000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.eot b/docs/_build/html/_static/fonts/fontawesome-webfont.eot deleted file mode 100644 index e9f60ca..0000000 Binary files a/docs/_build/html/_static/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.svg b/docs/_build/html/_static/fonts/fontawesome-webfont.svg deleted file mode 100644 index 855c845..0000000 --- a/docs/_build/html/_static/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,2671 +0,0 @@ - - - - -Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 - By ,,, -Copyright Dave Gandy 2016. All rights reserved. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.ttf b/docs/_build/html/_static/fonts/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2..0000000 Binary files a/docs/_build/html/_static/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.woff b/docs/_build/html/_static/fonts/fontawesome-webfont.woff deleted file mode 100644 index 400014a..0000000 Binary files a/docs/_build/html/_static/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 b/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 4d13fc6..0000000 Binary files a/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/jquery-3.2.1.js b/docs/_build/html/_static/jquery-3.2.1.js deleted file mode 100644 index d2d8ca4..0000000 --- a/docs/_build/html/_static/jquery-3.2.1.js +++ /dev/null @@ -1,10253 +0,0 @@ -/*! - * jQuery JavaScript Library v3.2.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2017-03-20T18:59Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.2.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && Array.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.3 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-08-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true && ("form" in elem || "label" in elem); - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - disabledAncestor( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - !compilerCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) {} - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -var risSimple = /^.[^:#\[\.,]*$/; - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Simple selector that can be filtered directly, removing non-Elements - if ( risSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - // Complex selector, compare the two sets, removing non-Elements - qualifier = jQuery.filter( qualifier, elements ); - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; - } ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( nodeName( elem, "iframe" ) ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( jQuery.isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( jQuery.isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - jQuery.isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ jQuery.camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ jQuery.camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( jQuery.camelCase ); - } else { - key = jQuery.camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = jQuery.camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - jQuery.contains( elem.ownerDocument, elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, - scale = 1, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - do { - - // If previous iteration zeroed out, double until we get *something*. - // Use string for doubling so we don't accidentally see scale as unchanged below - scale = scale || ".5"; - - // Adjust and apply - initialInUnit = initialInUnit / scale; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Update scale, tolerating zero or NaN from tween.cur() - // Break the loop if scale is unchanged or perfect, or if we've just had enough. - } while ( - scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations - ); - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); - -var rscriptType = ( /^$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, contains, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); -var documentElement = document.documentElement; - - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 only -// See #13393 for more info -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or 2) have namespace(s) - // a subset or equal to those in the bound event (both can have no namespace). - if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: jQuery.isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - this.focus(); - return false; - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( ">tbody", elem )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - - if ( match ) { - elem.type = match[ 1 ]; - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( isFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl ) { - jQuery._evalUrl( node.src ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = jQuery.contains( elem.ownerDocument, elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rmargin = ( /^margin/ ); - -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - div.style.cssText = - "box-sizing:border-box;" + - "position:relative;display:block;" + - "margin:auto;border:1px;padding:1px;" + - "top:1%;width:50%"; - div.innerHTML = ""; - documentElement.appendChild( container ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = divStyle.marginLeft === "2px"; - boxSizingReliableVal = divStyle.width === "4px"; - - // Support: Android 4.0 - 4.3 only - // Some styles come back with percentage values, even though they shouldn't - div.style.marginRight = "50%"; - pixelMarginRightVal = divStyle.marginRight === "4px"; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + - "padding:0;margin-top:1px;position:absolute"; - container.appendChild( div ); - - jQuery.extend( support, { - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelMarginRight: function() { - computeStyleTests(); - return pixelMarginRightVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }, - - cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style; - -// Return a css property mapped to a potentially vendor prefixed property -function vendorPropName( name ) { - - // Shortcut for names that are not vendor prefixed - if ( name in emptyStyle ) { - return name; - } - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a property mapped along what jQuery.cssProps suggests or to -// a vendor prefixed property. -function finalPropName( name ) { - var ret = jQuery.cssProps[ name ]; - if ( !ret ) { - ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; - } - return ret; -} - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i, - val = 0; - - // If we already have the right measurement, avoid augmentation - if ( extra === ( isBorderBox ? "border" : "content" ) ) { - i = 4; - - // Otherwise initialize for horizontal or vertical properties - } else { - i = name === "width" ? 1 : 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // At this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - - // At this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // At this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with computed style - var valueIsBorderBox, - styles = getStyles( elem ), - val = curCSS( elem, name, styles ), - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test( val ) ) { - return val; - } - - // Check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && - ( support.boxSizingReliable() || val === elem.style[ name ] ); - - // Fall back to offsetWidth/Height when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - if ( val === "auto" ) { - val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; - } - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - - // Use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - "float": "cssFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - if ( type === "number" ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = jQuery.camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, name ) { - jQuery.cssHooks[ name ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, name, extra ); - } ) : - getWidthOrHeight( elem, name, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = extra && getStyles( elem ), - subtract = extra && augmentWidthOrHeight( - elem, - name, - extra, - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - styles - ); - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ name ] = value; - value = jQuery.css( elem, name ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( !rmargin.test( prefix ) ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && - ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || - jQuery.cssHooks[ tween.prop ] ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = jQuery.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 13 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = jQuery.camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( jQuery.isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - jQuery.proxy( result.stop, result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( jQuery.isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( jQuery.isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - jQuery.isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( jQuery.isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = jQuery.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( jQuery.isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - if ( typeof value === "string" && value ) { - classes = value.match( rnothtmlwhite ) || []; - - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( type === "string" ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = value.match( rnothtmlwhite ) || []; - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, isFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - elem[ type ](); - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + - "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + - "change select submit keydown keypress keyup contextmenu" ).split( " " ), - function( i, name ) { - - // Handle event binding - jQuery.fn[ name ] = function( data, fn ) { - return arguments.length > 0 ? - this.on( name, null, data, fn ) : - this.trigger( name ); - }; -} ); - -jQuery.fn.extend( { - hover: function( fnOver, fnOut ) { - return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); - } -} ); - - - - -support.focusin = "onfocusin" in window; - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = jQuery.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && jQuery.type( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = jQuery.isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( jQuery.isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; - } - } - match = responseHeaders[ key.toLowerCase() ]; - } - return match == null ? null : match; - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 13 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available, append data to url - if ( s.data ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( jQuery.isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - "throws": true - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( jQuery.isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain requests - if ( s.crossDomain ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Index
  • - - -
  • - - - -
  • - -
- - -
-
-
-
- - -

Index

- -
- C - | D - | F - | G - | I - | K - | L - | M - | P - | R - | S - | T - | U - | W - -
-

C

- - -
- -

D

- - - -
- -

F

- - - -
- -

G

- - - -
- -

I

- - - -
- -

K

- - - -
- -

L

- - - -
- -

M

- - -
- -

P

- - - -
- -

R

- - -
- -

S

- - - -
- -

T

- - - -
- -

U

- - -
- -

W

- - -
- - - -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/index.html b/docs/_build/html/index.html deleted file mode 100644 index 92ad201..0000000 --- a/docs/_build/html/index.html +++ /dev/null @@ -1,207 +0,0 @@ - - - - - - - - - - - Welcome to py-graph’s documentation! — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Welcome to py-graph’s documentation!

-
-
-
-
-

Indices and tables

- -
- - -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/modules.html b/docs/_build/html/modules.html deleted file mode 100644 index feba6b4..0000000 --- a/docs/_build/html/modules.html +++ /dev/null @@ -1,222 +0,0 @@ - - - - - - - - - - - py-graph — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - - - -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/objects.inv b/docs/_build/html/objects.inv deleted file mode 100644 index 66410ed..0000000 Binary files a/docs/_build/html/objects.inv and /dev/null differ diff --git a/docs/_build/html/py-modindex.html b/docs/_build/html/py-modindex.html deleted file mode 100644 index 5ffb88e..0000000 --- a/docs/_build/html/py-modindex.html +++ /dev/null @@ -1,257 +0,0 @@ - - - - - - - - - - - Python Module Index — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Python Module Index
  • - - -
  • - -
  • - -
- - -
-
-
-
- - -

Python Module Index

- -
- p -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
- p
- pygraph -
    - pygraph.utils -
    - pygraph.utils.graphdataset -
    - pygraph.utils.graphfiles -
    - pygraph.utils.isNotebook -
    - pygraph.utils.kernels -
    - pygraph.utils.logger2file -
    - pygraph.utils.model_selection_precomputed -
    - pygraph.utils.parallel -
    - pygraph.utils.trie -
    - pygraph.utils.utils -
- - -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/pygraph.html b/docs/_build/html/pygraph.html deleted file mode 100644 index bc4d2b6..0000000 --- a/docs/_build/html/pygraph.html +++ /dev/null @@ -1,237 +0,0 @@ - - - - - - - - - - - pygraph package — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

pygraph package

- -
-

Module contents

-

Pygraph

-
-
This package contains 4 sub packages :
-
    -
  • c_ext : binders to C++ code
  • -
  • ged : allows to compute graph edit distance between networkX graphs
  • -
  • kernels : computation of graph kernels, ie graph similarity measure compatible with SVM
  • -
  • notebooks : examples of code using this library
  • -
  • utils : Diverse computation on graphs
  • -
-
-
-
-
- - -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/pygraph.utils.html b/docs/_build/html/pygraph.utils.html deleted file mode 100644 index 67b0d8e..0000000 --- a/docs/_build/html/pygraph.utils.html +++ /dev/null @@ -1,816 +0,0 @@ - - - - - - - - - - - pygraph.utils package — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

pygraph.utils package

-
-

Submodules

-
-
-

pygraph.utils.graphdataset module

-

Obtain all kinds of attributes of a graph dataset.

-
-
-pygraph.utils.graphdataset.get_dataset_attributes(Gn, target=None, attr_names=[], node_label=None, edge_label=None)[source]
-

Returns the structure and property information of the graph dataset Gn.

-
-
Gn : List of NetworkX graph
-
List of graphs whose information will be returned.
-
target : list
-
The list of classification targets corresponding to Gn. Only works for -classification problems.
-
attr_names : list
-

List of strings which indicate which informations will be returned. The -possible choices includes: -‘substructures’: sub-structures Gn contains, including ‘linear’, ‘non

-
-
linear’ and ‘cyclic’.
-

‘node_labeled’: whether vertices have symbolic labels. -‘edge_labeled’: whether egdes have symbolic labels. -‘is_directed’: whether graphs in Gn are directed. -‘dataset_size’: number of graphs in Gn. -‘ave_node_num’: average number of vertices of graphs in Gn. -‘min_node_num’: minimum number of vertices of graphs in Gn. -‘max_node_num’: maximum number of vertices of graphs in Gn. -‘ave_edge_num’: average number of edges of graphs in Gn. -‘min_edge_num’: minimum number of edges of graphs in Gn. -‘max_edge_num’: maximum number of edges of graphs in Gn. -‘ave_node_degree’: average vertex degree of graphs in Gn. -‘min_node_degree’: minimum vertex degree of graphs in Gn. -‘max_node_degree’: maximum vertex degree of graphs in Gn. -‘ave_fill_factor’: average fill factor (number_of_edges /

-
-
(number_of_nodes ** 2)) of graphs in Gn.
-

‘min_fill_factor’: minimum fill factor of graphs in Gn. -‘max_fill_factor’: maximum fill factor of graphs in Gn. -‘node_label_num’: number of symbolic vertex labels. -‘edge_label_num’: number of symbolic edge labels. -‘node_attr_dim’: number of dimensions of non-symbolic vertex labels.

-
-
Extracted from the ‘attributes’ attribute of graph nodes.
-
-
‘edge_attr_dim’: number of dimensions of non-symbolic edge labels.
-
Extracted from the ‘attributes’ attribute of graph edges.
-
‘class_number’: number of classes. Only available for classification
-
problems.
-
-
-
node_label : string
-
Node attribute used as label. The default node label is atom. Mandatory -when ‘node_labeled’ or ‘node_label_num’ is required.
-
edge_label : string
-
Edge attribute used as label. The default edge label is bond_type. -Mandatory when ‘edge_labeled’ or ‘edge_label_num’ is required.
-
-
-
attrs : dict
-
Value for each property.
-
-
- -
-
-

pygraph.utils.graphfiles module

-

Utilities function to manage graph files

-
-
-pygraph.utils.graphfiles.loadCT(filename)[source]
-

load data from a Chemical Table (.ct) file.

-

a typical example of data in .ct is like this:

-
-
-
3 2 <- number of nodes and edges
-
-
0.0000 0.0000 0.0000 C <- each line describes a node (x,y,z + label) -0.0000 0.0000 0.0000 C -0.0000 0.0000 0.0000 O
-

1 3 1 1 <- each line describes an edge : to, from, bond type, bond stereo -2 3 1 1

-
-
-
-

Check https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=10&ved=2ahUKEwivhaSdjsTlAhVhx4UKHczHA8gQFjAJegQIARAC&url=https%3A%2F%2Fwww.daylight.com%2Fmeetings%2Fmug05%2FKappler%2Fctfile.pdf&usg=AOvVaw1cDNrrmMClkFPqodlF2inS -for detailed format discription.

-
- -
-
-pygraph.utils.graphfiles.loadDataset(filename, filename_y=None, extra_params=None)[source]
-

Read graph data from filename and load them as NetworkX graphs.

-
-
filename : string
-
The name of the file from where the dataset is read.
-
filename_y : string
-
The name of file of the targets corresponding to graphs.
-
extra_params : dict
-
Extra parameters only designated to ‘.mat’ format.
-
-

data : List of NetworkX graph. -y : List

-
-
Targets corresponding to graphs.
-

This function supports following graph dataset formats: -‘ds’: load data from .ds file. See comments of function loadFromDS for a example. -‘cxl’: load data from Graph eXchange Language file (.cxl file). See

-
-
-
-
‘sdf’: load data from structured data file (.sdf file). See
-
http://www.nonlinear.com/progenesis/sdf-studio/v0.9/faq/sdf-file-format-guidance.aspx, -2018 for details.
-
‘mat’: Load graph data from a MATLAB (up to version 7.1) .mat file. See
-
README in downloadable file in http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/WL/, -2018 for details.
-
‘txt’: Load graph data from a special .txt file. See
-
https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets, -2019 for details. Note here filename is the name of either .txt file in -the dataset directory.
-
-
- -
-
-pygraph.utils.graphfiles.loadFromDS(filename, filename_y)[source]
-

Load data from .ds file. -Possible graph formats include:

-
-
‘.ct’: see function loadCT for detail. -‘.gxl’: see dunction loadGXL for detail.
-

Note these graph formats are checked automatically by the extensions of -graph files.

-
- -
-
-pygraph.utils.graphfiles.loadFromXML(filename, extra_params)[source]
-
- -
-
-pygraph.utils.graphfiles.loadGXL(filename)[source]
-
- -
-
-pygraph.utils.graphfiles.loadMAT(filename, extra_params)[source]
-

Load graph data from a MATLAB (up to version 7.1) .mat file.

-

A MAT file contains a struct array containing graphs, and a column vector lx containing a class label for each graph. -Check README in downloadable file in http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/WL/, 2018 for detailed structure.

-
- -
-
-pygraph.utils.graphfiles.loadSDF(filename)[source]
-

load data from structured data file (.sdf file).

-

A SDF file contains a group of molecules, represented in the similar way as in MOL format. -Check http://www.nonlinear.com/progenesis/sdf-studio/v0.9/faq/sdf-file-format-guidance.aspx, 2018 for detailed structure.

-
- -
-
-pygraph.utils.graphfiles.loadTXT(dirname_dataset)[source]
-

Load graph data from a .txt file.

-

The graph data is loaded from separate files. -Check README in downloadable file http://tiny.cc/PK_MLJ_data, 2018 for detailed structure.

-
- -
-
-pygraph.utils.graphfiles.saveDataset(Gn, y, gformat='gxl', group=None, filename='gfile', xparams=None)[source]
-

Save list of graphs.

-
- -
-
-pygraph.utils.graphfiles.saveGXL(graph, filename, method='benoit')[source]
-
- -
-
-

pygraph.utils.ipython_log module

-
-
-

pygraph.utils.isNotebook module

-

Functions for python system.

-
-
-pygraph.utils.isNotebook.isNotebook()[source]
-

check if code is executed in the IPython notebook.

-
- -
-
-

pygraph.utils.kernels module

-

Those who are not graph kernels. We can be kernels for nodes or edges! -These kernels are defined between pairs of vectors.

-
-
-pygraph.utils.kernels.deltakernel(x, y)[source]
-

Delta kernel. Return 1 if x == y, 0 otherwise.

-
-
x, y : any
-
Two parts to compare.
-
-
-
kernel : integer
-
Delta kernel.
-
-

[1] H. Kashima, K. Tsuda, and A. Inokuchi. Marginalized kernels between -labeled graphs. In Proceedings of the 20th International Conference on -Machine Learning, Washington, DC, United States, 2003.

-
- -
-
-pygraph.utils.kernels.gaussiankernel(x, y, gamma=None)[source]
-

Gaussian kernel. -Compute the rbf (gaussian) kernel between x and y:

-
-
K(x, y) = exp(-gamma ||x-y||^2).
-

Read more in the User Guide.

-

x, y : array

-
-
gamma : float, default None
-
If None, defaults to 1.0 / n_features
-
-

kernel : float

-
- -
-
-pygraph.utils.kernels.kernelproduct(k1, k2, d11, d12, d21=None, d22=None, lamda=1)[source]
-

Product of a pair of kernels.

-

k = lamda * k1(d11, d12) * k2(d21, d22)

-
-
k1, k2 : function
-
A pair of kernel functions.
-
d11, d12:
-
Inputs of k1. If d21 or d22 is None, apply d11, d12 to both k1 and k2.
-
d21, d22:
-
Inputs of k2.
-
lamda: float
-
Coefficient of the product.
-
-

kernel : integer

-
- -
-
-pygraph.utils.kernels.kernelsum(k1, k2, d11, d12, d21=None, d22=None, lamda1=1, lamda2=1)[source]
-

Sum of a pair of kernels.

-

k = lamda1 * k1(d11, d12) + lamda2 * k2(d21, d22)

-
-
k1, k2 : function
-
A pair of kernel functions.
-
d11, d12:
-
Inputs of k1. If d21 or d22 is None, apply d11, d12 to both k1 and k2.
-
d21, d22:
-
Inputs of k2.
-
lamda1, lamda2: float
-
Coefficients of the product.
-
-

kernel : integer

-
- -
-
-pygraph.utils.kernels.linearkernel(x, y)[source]
-

Polynomial kernel. -Compute the polynomial kernel between x and y:

-
-
K(x, y) = <x, y>.
-

x, y : array

-

d : integer, default 1

-

c : float, default 0

-

kernel : float

-
- -
-
-pygraph.utils.kernels.polynomialkernel(x, y, d=1, c=0)[source]
-

Polynomial kernel. -Compute the polynomial kernel between x and y:

-
-
K(x, y) = <x, y> ^d + c.
-

x, y : array

-

d : integer, default 1

-

c : float, default 0

-

kernel : float

-
- -
-
-

pygraph.utils.logger2file module

-

Created on Fri Nov 8 14:21:25 2019

-

@author: ljia

-
-
-class pygraph.utils.logger2file.Logger[source]
-

Bases: object

-
-
-flush()[source]
-
- -
-
-write(message)[source]
-
- -
- -
-
-

pygraph.utils.model_selection_precomputed module

-
-
-pygraph.utils.model_selection_precomputed.compute_gram_matrices(dataset, y, estimator, param_list_precomputed, results_dir, ds_name, n_jobs=1, str_fw='', verbose=True)[source]
-
- -
-
-pygraph.utils.model_selection_precomputed.model_selection_for_precomputed_kernel(datafile, estimator, param_grid_precomputed, param_grid, model_type, NUM_TRIALS=30, datafile_y=None, extra_params=None, ds_name='ds-unknown', n_jobs=1, read_gm_from_file=False, verbose=True)[source]
-

Perform model selection, fitting and testing for precomputed kernels -using nested CV. Print out neccessary data during the process then finally -the results.

-
-
datafile : string
-
Path of dataset file.
-
estimator : function
-
kernel function used to estimate. This function needs to return a gram matrix.
-
param_grid_precomputed : dictionary
-
Dictionary with names (string) of parameters used to calculate gram -matrices as keys and lists of parameter settings to try as values. This -enables searching over any sequence of parameter settings. Params with -length 1 will be omitted.
-
param_grid : dictionary
-
Dictionary with names (string) of parameters used as penelties as keys -and lists of parameter settings to try as values. This enables -searching over any sequence of parameter settings. Params with length 1 -will be omitted.
-
model_type : string
-
Type of the problem, can be ‘regression’ or ‘classification’.
-
NUM_TRIALS : integer
-
Number of random trials of outer cv loop. The default is 30.
-
datafile_y : string
-
Path of file storing y data. This parameter is optional depending on -the given dataset file.
-
extra_params : dict
-
Extra parameters for loading dataset. See function pygraph.utils. -graphfiles.loadDataset for detail.
-
ds_name : string
-
Name of the dataset.
-
n_jobs : int
-
Number of jobs for parallelization.
-
read_gm_from_file : boolean
-
Whether gram matrices are loaded from a file.
-
-
>>> import numpy as np
->>> import sys
->>> sys.path.insert(0, "../")
->>> from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel
->>> from pygraph.kernels.untilHPathKernel import untilhpathkernel
->>>
->>> datafile = '../datasets/MUTAG/MUTAG_A.txt'
->>> estimator = untilhpathkernel
->>> param_grid_precomputed = {’depth’:  np.linspace(1, 10, 10), ’k_func’:
-        [’MinMax’, ’tanimoto’], ’compute_method’:  [’trie’]}
->>> # ’C’ for classification problems and ’alpha’ for regression problems.
->>> param_grid = [{’C’: np.logspace(-10, 10, num=41, base=10)}, {’alpha’:
-        np.logspace(-10, 10, num=41, base=10)}]
->>>
->>> model_selection_for_precomputed_kernel(datafile, estimator, 
-        param_grid_precomputed, param_grid[0], 'classification', ds_name=’MUTAG’)
-
-
-
- -
-
-pygraph.utils.model_selection_precomputed.parallel_trial_do(param_list_pre_revised, param_list, y, model_type, trial)[source]
-
- -
-
-pygraph.utils.model_selection_precomputed.printResultsInTable(param_list, param_list_pre_revised, average_val_scores, std_val_scores, average_perf_scores, std_perf_scores, average_train_scores, std_train_scores, gram_matrix_time, model_type, verbose)[source]
-
- -
-
-pygraph.utils.model_selection_precomputed.read_gram_matrices_from_file(results_dir, ds_name)[source]
-
- -
-
-pygraph.utils.model_selection_precomputed.trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, trial)[source]
-
- -
-
-

pygraph.utils.parallel module

-

Created on Tue Dec 11 11:39:46 2018 -Parallel aid functions. -@author: ljia

-
-
-pygraph.utils.parallel.parallel_gm(func, Kmatrix, Gn, init_worker=None, glbv=None, method='imap_unordered', n_jobs=None, chunksize=None, verbose=True)[source]
-
- -
-
-pygraph.utils.parallel.parallel_me(func, func_assign, var_to_assign, itr, len_itr=None, init_worker=None, glbv=None, method=None, n_jobs=None, chunksize=None, itr_desc='', verbose=True)[source]
-
- -
-
-

pygraph.utils.trie module

-

Created on Wed Jan 30 10:48:49 2019

-

Trie (prefix tree) -@author: ljia -@references:

-
-
-
-
-class pygraph.utils.trie.Trie[source]
-

Bases: object

-
-
-deleteWord(word)[source]
-
- -
-
-getNode()[source]
-
- -
-
-insertWord(word)[source]
-
- -
-
-load_from_json(file_name)[source]
-
- -
-
-load_from_pickle(file_name)[source]
-
- -
-
-save_to_json(file_name)[source]
-
- -
-
-save_to_pickle(file_name)[source]
-
- -
-
-searchWord(word)[source]
-
- -
-
-searchWordPrefix(word)[source]
-
- -
-
-to_json()[source]
-
- -
- -
-
-

pygraph.utils.utils module

-
-
-pygraph.utils.utils.direct_product(G1, G2, node_label, edge_label)[source]
-

Return the direct/tensor product of directed graphs G1 and G2.

-
-
G1, G2 : NetworkX graph
-
The original graphs.
-
node_label : string
-
node attribute used as label. The default node label is ‘atom’.
-
edge_label : string
-
edge attribute used as label. The default edge label is ‘bond_type’.
-
-
-
gt : NetworkX graph
-
The direct product graph of G1 and G2.
-
-

This method differs from networkx.tensor_product in that this method only adds nodes and edges in G1 and G2 that have the same labels to the direct product graph.

-

[1] Thomas Gärtner, Peter Flach, and Stefan Wrobel. On graph kernels: Hardness results and efficient alternatives. Learning Theory and Kernel Machines, pages 129–143, 2003.

-
- -
-
-pygraph.utils.utils.floydTransformation(G, edge_weight=None)[source]
-

Transform graph G to its corresponding shortest-paths graph using Floyd-transformation.

-
-
G : NetworkX graph
-
The graph to be tramsformed.
-
edge_weight : string
-
edge attribute corresponding to the edge weight. The default edge weight is bond_type.
-
-
-
S : NetworkX graph
-
The shortest-paths graph corresponding to G.
-
-

[1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE.

-
- -
-
-pygraph.utils.utils.getSPGraph(G, edge_weight=None)[source]
-

Transform graph G to its corresponding shortest-paths graph.

-
-
G : NetworkX graph
-
The graph to be tramsformed.
-
edge_weight : string
-
edge attribute corresponding to the edge weight.
-
-
-
S : NetworkX graph
-
The shortest-paths graph corresponding to G.
-
-

For an input graph G, its corresponding shortest-paths graph S contains the same set of nodes as G, while there exists an edge between all nodes in S which are connected by a walk in G. Every edge in S between two nodes is labeled by the shortest distance between these two nodes.

-

[1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE.

-
- -
-
-pygraph.utils.utils.getSPLengths(G1)[source]
-
- -
-
-pygraph.utils.utils.get_edge_labels(Gn, edge_label)[source]
-

Get edge labels of dataset Gn.

-
- -
-
-pygraph.utils.utils.get_node_labels(Gn, node_label)[source]
-

Get node labels of dataset Gn.

-
- -
-
-pygraph.utils.utils.graph_deepcopy(G)[source]
-

Deep copy a graph, including deep copy of all nodes, edges and -attributes of the graph, nodes and edges.

-

It is the same as the NetworkX function graph.copy(), as far as I know.

-
- -
-
-pygraph.utils.utils.graph_isIdentical(G1, G2)[source]
-

Check if two graphs are identical, including: same nodes, edges, node -labels/attributes, edge labels/attributes.

-
    -
  1. The type of graphs has to be the same.
  2. -
  3. Global/Graph attributes are neglected as they may contain names for graphs.
  4. -
-
- -
-
-pygraph.utils.utils.untotterTransformation(G, node_label, edge_label)[source]
-

Transform graph G according to Mahé et al.’s work to filter out tottering patterns of marginalized kernel and tree pattern kernel.

-
-
G : NetworkX graph
-
The graph to be tramsformed.
-
node_label : string
-
node attribute used as label. The default node label is ‘atom’.
-
edge_label : string
-
edge attribute used as label. The default edge label is ‘bond_type’.
-
-
-
gt : NetworkX graph
-
The transformed graph corresponding to G.
-
-

[1] Pierre Mahé, Nobuhisa Ueda, Tatsuya Akutsu, Jean-Luc Perret, and Jean-Philippe Vert. Extensions of marginalized graph kernels. In Proceedings of the twenty-first international conference on Machine learning, page 70. ACM, 2004.

-
- -
-
-

Module contents

-

Pygraph - utils module

-
-
Implement some methods to manage graphs
-
graphfiles.py : load .gxl and .ct files -utils.py : compute some properties on networkX graphs
-
-
-
- - -
- -
- - -
-
- -
- -
- - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/search.html b/docs/_build/html/search.html deleted file mode 100644 index 191258f..0000000 --- a/docs/_build/html/search.html +++ /dev/null @@ -1,208 +0,0 @@ - - - - - - - - - - - Search — py-graph documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • Docs »
  • - -
  • Search
  • - - -
  • - - - -
  • - -
- - -
-
-
-
- - - - -
- -
- -
- -
- - -
-
- -
- -
- - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/searchindex.js b/docs/_build/html/searchindex.js deleted file mode 100644 index 2183d83..0000000 --- a/docs/_build/html/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["index","modules","pygraph","pygraph.utils"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":1,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["index.rst","modules.rst","pygraph.rst","pygraph.utils.rst"],objects:{"":{pygraph:[2,0,0,"-"]},"pygraph.utils":{graphdataset:[3,0,0,"-"],graphfiles:[3,0,0,"-"],isNotebook:[3,0,0,"-"],kernels:[3,0,0,"-"],logger2file:[3,0,0,"-"],model_selection_precomputed:[3,0,0,"-"],parallel:[3,0,0,"-"],trie:[3,0,0,"-"],utils:[3,0,0,"-"]},"pygraph.utils.graphdataset":{get_dataset_attributes:[3,1,1,""]},"pygraph.utils.graphfiles":{loadCT:[3,1,1,""],loadDataset:[3,1,1,""],loadFromDS:[3,1,1,""],loadFromXML:[3,1,1,""],loadGXL:[3,1,1,""],loadMAT:[3,1,1,""],loadSDF:[3,1,1,""],loadTXT:[3,1,1,""],saveDataset:[3,1,1,""],saveGXL:[3,1,1,""]},"pygraph.utils.isNotebook":{isNotebook:[3,1,1,""]},"pygraph.utils.kernels":{deltakernel:[3,1,1,""],gaussiankernel:[3,1,1,""],kernelproduct:[3,1,1,""],kernelsum:[3,1,1,""],linearkernel:[3,1,1,""],polynomialkernel:[3,1,1,""]},"pygraph.utils.logger2file":{Logger:[3,2,1,""]},"pygraph.utils.logger2file.Logger":{flush:[3,3,1,""],write:[3,3,1,""]},"pygraph.utils.model_selection_precomputed":{compute_gram_matrices:[3,1,1,""],model_selection_for_precomputed_kernel:[3,1,1,""],parallel_trial_do:[3,1,1,""],printResultsInTable:[3,1,1,""],read_gram_matrices_from_file:[3,1,1,""],trial_do:[3,1,1,""]},"pygraph.utils.parallel":{parallel_gm:[3,1,1,""],parallel_me:[3,1,1,""]},"pygraph.utils.trie":{Trie:[3,2,1,""]},"pygraph.utils.trie.Trie":{deleteWord:[3,3,1,""],getNode:[3,3,1,""],insertWord:[3,3,1,""],load_from_json:[3,3,1,""],load_from_pickle:[3,3,1,""],save_to_json:[3,3,1,""],save_to_pickle:[3,3,1,""],searchWord:[3,3,1,""],searchWordPrefix:[3,3,1,""],to_json:[3,3,1,""]},"pygraph.utils.utils":{direct_product:[3,1,1,""],floydTransformation:[3,1,1,""],getSPGraph:[3,1,1,""],getSPLengths:[3,1,1,""],get_edge_labels:[3,1,1,""],get_node_labels:[3,1,1,""],graph_deepcopy:[3,1,1,""],graph_isIdentical:[3,1,1,""],untotterTransformation:[3,1,1,""]},pygraph:{utils:[3,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"],"3":["py","method","Python method"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class","3":"py:method"},terms:{"20th":3,"2ahukewivhasdjstlahvhx4ukhczha8gqfjajegqiarac":3,"2fctfile":3,"2fkappler":3,"2fmeet":3,"2fmug05":3,"2fwww":3,"3p0lpzrokox":3,"boolean":3,"class":3,"default":3,"final":3,"float":3,"function":3,"g\u00e4rtner":3,"import":3,"int":3,"mah\u00e9":3,"return":3,"true":3,"try":3,"while":3,For:3,The:3,These:3,accord:3,acm:3,add:3,aid:3,akutsu:3,all:3,allow:2,alpha:3,altern:3,ani:3,aovvaw1cdnrrmmclkfpqodlf2in:3,appli:3,arrai:3,asia:3,aspx:3,atom:3,attr:3,attr_nam:3,attribut:3,author:3,automat:3,avail:3,ave_edge_num:3,ave_fill_factor:3,ave_node_degre:3,ave_node_num:3,averag:3,average_perf_scor:3,average_train_scor:3,average_val_scor:3,background:3,base:3,benoit:3,between:[2,3],binder:2,bond:3,bond_typ:3,borgwardt:3,both:3,build:3,c_ext:2,calcul:3,can:3,check:3,chemic:3,choic:3,chunksiz:3,class_numb:3,classif:3,code:[2,3],coeffici:3,column:3,com:3,comment:3,compar:3,compat:2,comput:[2,3],compute_gram_matric:3,compute_method:3,confer:3,connect:3,contain:[2,3],content:1,copi:3,correspond:3,creat:3,cxl:3,cyclic:3,d11:3,d12:3,d21:3,d22:3,data:3,datafil:3,datafile_i:3,dataset:3,dataset_s:3,daylight:3,dec:3,deep:3,defin:3,degre:3,deleteword:3,delta:3,deltakernel:3,depend:3,depth:3,describ:3,design:3,detail:3,dict:3,dictionari:3,differ:3,dimens:3,direct:3,direct_product:3,directori:3,dirname_dataset:3,discript:3,distanc:[2,3],divers:2,dortmund:3,download:3,ds_name:3,dunction:3,dure:3,each:3,edg:3,edge_attr_dim:3,edge_label:3,edge_label_num:3,edge_weight:3,edit:2,effici:3,egd:3,either:3,enabl:3,esrc:3,estim:3,everi:3,exampl:[2,3],exchang:3,execut:3,exist:3,exp:3,extens:3,extra:3,extra_param:3,extract:3,factor:3,fals:3,faq:3,far:3,fifth:3,file:3,file_nam:3,filenam:3,filename_i:3,fill:3,filter:3,first:3,fit:3,flach:3,floyd:3,floydtransform:3,flush:3,follow:3,format:3,fri:3,from:3,func:3,func_assign:3,gamma:3,gaussian:3,gaussiankernel:3,ged:2,get:3,get_dataset_attribut:3,get_edge_label:3,get_node_label:3,getnod:3,getspgraph:3,getsplength:3,gfile:3,gformat:3,given:3,glbv:3,global:3,googl:3,gram:3,gram_matric:3,gram_matrix_tim:3,graph:[2,3],graph_deepcopi:3,graph_isident:3,graphdataset:[1,2],graphfil:[1,2],graphkerneldataset:3,group:3,guid:3,guidanc:3,gupro:3,gxl:3,hard:3,has:3,have:3,here:3,html:3,http:3,ident:3,ieee:3,imap_unord:3,implement:3,includ:3,indata:3,index:0,indic:3,inform:3,init_work:3,inokuchi:3,input:3,insert:3,insertword:3,integ:3,intern:3,introduct:3,ipython:3,ipython_log:[1,2],is_direct:3,isnotebook:[1,2],itr:3,itr_desc:3,its:3,jan:3,jean:3,job:3,k_func:3,kashima:3,kei:3,kernel:[1,2],kernelproduct:3,kernelsum:3,kind:3,kmatrix:3,know:3,kriegel:3,label:3,lamda1:3,lamda2:3,lamda:3,languag:3,learn:3,len_itr:3,length:3,librari:2,like:3,line:3,linear:3,linearkernel:3,linspac:3,list:3,ljia:3,load:3,load_from_json:3,load_from_pickl:3,loadct:3,loaddataset:3,loadfromd:3,loadfromxml:3,loadgxl:3,loadmat:3,loadsdf:3,loadtxt:3,logger2fil:[1,2],logger:3,logspac:3,loop:3,ls11:3,luc:3,machin:3,mai:3,manag:3,mandatori:3,margin:3,mat:3,matlab:3,matric:3,matrix:3,max_edge_num:3,max_fill_factor:3,max_node_degre:3,max_node_num:3,maximum:3,measur:2,messag:3,method:3,min_edge_num:3,min_fill_factor:3,min_node_degre:3,min_node_num:3,mine:3,minimum:3,minmax:3,mitarbeit:3,mlcb:3,model:3,model_selection_for_precomputed_kernel:3,model_selection_precomput:[1,2],model_typ:3,modul:[0,1],mol:3,molecul:3,more:3,morri:3,mpg:3,mutag:3,mutag_a:3,n_featur:3,n_job:3,name:3,neccessari:3,need:3,neglect:3,nest:3,networkx:[2,3],nino:3,nlp:3,nobuhisa:3,node:3,node_attr_dim:3,node_label:3,node_label_num:3,non:3,none:3,nonlinear:3,note:3,notebook:[2,3],nov:3,num:3,num_trial:3,number:3,number_of_edg:3,number_of_nod:3,numpi:3,object:3,obtain:3,omit:3,onli:3,option:3,origin:3,otherwis:3,out:3,outer:3,over:3,packag:1,page:[0,3],pair:3,parallel:[1,2],parallel_gm:3,parallel_m:3,parallel_trial_do:3,param:3,param_grid:3,param_grid_precomput:3,param_list:3,param_list_pre_revis:3,param_list_precomput:3,paramet:3,part:3,path:3,pattern:3,pdf:3,penelti:3,perform:3,perret:3,peter:3,philipp:3,pierr:3,pk_mlj_data:3,polynomi:3,polynomialkernel:3,possibl:3,precomput:3,prefix:3,print:3,printresultsint:3,problem:3,proceed:3,process:3,product:3,progenesi:3,properti:3,pygraph:1,python:3,random:3,rbf:3,rct:3,read:3,read_gm_from_fil:3,read_gram_matrices_from_fil:3,readm:3,refer:3,regress:3,repres:3,requir:3,result:3,results_dir:3,same:3,save:3,save_to_json:3,save_to_pickl:3,savedataset:3,savegxl:3,scratch:3,sdf:3,search:[0,3],searchword:3,searchwordprefix:3,see:3,select:3,separ:3,sequenc:3,set:3,shortest:3,similar:[2,3],some:3,sourc:3,special:3,staff:3,state:3,std_perf_scor:3,std_train_scor:3,std_val_scor:3,stefan:3,stereo:3,store:3,str_fw:3,string:3,struct:3,structur:3,studio:3,sub:[2,3],submodul:[1,2],subpackag:1,substructur:3,sum:3,support:3,svm:2,symbol:3,sys:3,system:3,tabl:3,tanimoto:3,target:3,tatsuya:3,tensor:3,tensor_product:3,test:3,thei:3,them:3,theori:3,thi:[2,3],thoma:3,those:3,tini:3,to_json:3,totter:3,tramsform:3,transform:3,tree:3,trial:3,trial_do:3,trie:[1,2],tsuda:3,tue:3,tuebingen:3,twenti:3,two:3,txt:3,type:3,typic:3,ueda:3,unit:3,unknown:3,untilhpathkernel:3,untottertransform:3,url:3,used:3,user:3,usg:3,using:[2,3],util:[1,2],valu:3,var_to_assign:3,vector:3,ved:3,verbos:3,version:3,vert:3,vertex:3,vertic:3,viblo:3,wai:3,walk:3,washington:3,web:3,wed:3,weight:3,when:3,where:3,whether:3,which:3,who:3,whose:3,word:3,work:3,write:3,wrobel:3,www:3,xparam:3},titles:["Welcome to py-graph\u2019s documentation!","py-graph","pygraph package","pygraph.utils package"],titleterms:{content:[2,3],document:0,graph:[0,1],graphdataset:3,graphfil:3,indic:0,ipython_log:3,isnotebook:3,kernel:3,logger2fil:3,model_selection_precomput:3,modul:[2,3],packag:[2,3],parallel:3,pygraph:[2,3],submodul:3,subpackag:2,tabl:0,trie:3,util:3,welcom:0}}) \ No newline at end of file diff --git a/docs/_build/latex/LICRcyr2utf8.xdy b/docs/_build/latex/LICRcyr2utf8.xdy deleted file mode 100644 index a9ca1c8..0000000 --- a/docs/_build/latex/LICRcyr2utf8.xdy +++ /dev/null @@ -1,101 +0,0 @@ -;; -*- coding: utf-8; mode: Lisp; -*- -;; style file for xindy -;; filename: LICRcyr2utf8.xdy -;; description: style file for xindy which maps back LaTeX Internal -;; Character Representation of Cyrillic to utf-8 -;; usage: for use with pdflatex produced .idx files. -;; Contributed by the Sphinx team, July 2018. -(merge-rule "\IeC {\'\CYRG }" "Ѓ" :string) -(merge-rule "\IeC {\'\CYRK }" "Ќ" :string) -(merge-rule "\IeC {\'\cyrg }" "ѓ" :string) -(merge-rule "\IeC {\'\cyrk }" "ќ" :string) -(merge-rule "\IeC {\CYRA }" "А" :string) -(merge-rule "\IeC {\CYRB }" "Б" :string) -(merge-rule "\IeC {\CYRC }" "Ц" :string) -(merge-rule "\IeC {\CYRCH }" "Ч" :string) -(merge-rule "\IeC {\CYRD }" "Д" :string) -(merge-rule "\IeC {\CYRDJE }" "Ђ" :string) -(merge-rule "\IeC {\CYRDZE }" "Ѕ" :string) -(merge-rule "\IeC {\CYRDZHE }" "Џ" :string) -(merge-rule "\IeC {\CYRE }" "Е" :string) -(merge-rule "\IeC {\CYREREV }" "Э" :string) -(merge-rule "\IeC {\CYRERY }" "Ы" :string) -(merge-rule "\IeC {\CYRF }" "Ф" :string) -(merge-rule "\IeC {\CYRG }" "Г" :string) -(merge-rule "\IeC {\CYRGUP }" "Ґ" :string) -(merge-rule "\IeC {\CYRH }" "Х" :string) -(merge-rule "\IeC {\CYRHRDSN }" "Ъ" :string) -(merge-rule "\IeC {\CYRI }" "И" :string) -(merge-rule "\IeC {\CYRIE }" "Є" :string) -(merge-rule "\IeC {\CYRII }" "І" :string) -(merge-rule "\IeC {\CYRISHRT }" "Й" :string) -(merge-rule "\IeC {\CYRJE }" "Ј" :string) -(merge-rule "\IeC {\CYRK }" "К" :string) -(merge-rule "\IeC {\CYRL }" "Л" :string) -(merge-rule "\IeC {\CYRLJE }" "Љ" :string) -(merge-rule "\IeC {\CYRM }" "М" :string) -(merge-rule "\IeC {\CYRN }" "Н" :string) -(merge-rule "\IeC {\CYRNJE }" "Њ" :string) -(merge-rule "\IeC {\CYRO }" "О" :string) -(merge-rule "\IeC {\CYRP }" "П" :string) -(merge-rule "\IeC {\CYRR }" "Р" :string) -(merge-rule "\IeC {\CYRS }" "С" :string) -(merge-rule "\IeC {\CYRSFTSN }" "Ь" :string) -(merge-rule "\IeC {\CYRSH }" "Ш" :string) -(merge-rule "\IeC {\CYRSHCH }" "Щ" :string) -(merge-rule "\IeC {\CYRT }" "Т" :string) -(merge-rule "\IeC {\CYRTSHE }" "Ћ" :string) -(merge-rule "\IeC {\CYRU }" "У" :string) -(merge-rule "\IeC {\CYRUSHRT }" "Ў" :string) -(merge-rule "\IeC {\CYRV }" "В" :string) -(merge-rule "\IeC {\CYRYA }" "Я" :string) -(merge-rule "\IeC {\CYRYI }" "Ї" :string) -(merge-rule "\IeC {\CYRYO }" "Ё" :string) -(merge-rule "\IeC {\CYRYU }" "Ю" :string) -(merge-rule "\IeC {\CYRZ }" "З" :string) -(merge-rule "\IeC {\CYRZH }" "Ж" :string) -(merge-rule "\IeC {\cyra }" "а" :string) -(merge-rule "\IeC {\cyrb }" "б" :string) -(merge-rule "\IeC {\cyrc }" "ц" :string) -(merge-rule "\IeC {\cyrch }" "ч" :string) -(merge-rule "\IeC {\cyrd }" "д" :string) -(merge-rule "\IeC {\cyrdje }" "ђ" :string) -(merge-rule "\IeC {\cyrdze }" "ѕ" :string) -(merge-rule "\IeC {\cyrdzhe }" "џ" :string) -(merge-rule "\IeC {\cyre }" "е" :string) -(merge-rule "\IeC {\cyrerev }" "э" :string) -(merge-rule "\IeC {\cyrery }" "ы" :string) -(merge-rule "\IeC {\cyrf }" "ф" :string) -(merge-rule "\IeC {\cyrg }" "г" :string) -(merge-rule "\IeC {\cyrgup }" "ґ" :string) -(merge-rule "\IeC {\cyrh }" "х" :string) -(merge-rule "\IeC {\cyrhrdsn }" "ъ" :string) -(merge-rule "\IeC {\cyri }" "и" :string) -(merge-rule "\IeC {\cyrie }" "є" :string) -(merge-rule "\IeC {\cyrii }" "і" :string) -(merge-rule "\IeC {\cyrishrt }" "й" :string) -(merge-rule "\IeC {\cyrje }" "ј" :string) -(merge-rule "\IeC {\cyrk }" "к" :string) -(merge-rule "\IeC {\cyrl }" "л" :string) -(merge-rule "\IeC {\cyrlje }" "љ" :string) -(merge-rule "\IeC {\cyrm }" "м" :string) -(merge-rule "\IeC {\cyrn }" "н" :string) -(merge-rule "\IeC {\cyrnje }" "њ" :string) -(merge-rule "\IeC {\cyro }" "о" :string) -(merge-rule "\IeC {\cyrp }" "п" :string) -(merge-rule "\IeC {\cyrr }" "р" :string) -(merge-rule "\IeC {\cyrs }" "с" :string) -(merge-rule "\IeC {\cyrsftsn }" "ь" :string) -(merge-rule "\IeC {\cyrsh }" "ш" :string) -(merge-rule "\IeC {\cyrshch }" "щ" :string) -(merge-rule "\IeC {\cyrt }" "т" :string) -(merge-rule "\IeC {\cyrtshe }" "ћ" :string) -(merge-rule "\IeC {\cyru }" "у" :string) -(merge-rule "\IeC {\cyrushrt }" "ў" :string) -(merge-rule "\IeC {\cyrv }" "в" :string) -(merge-rule "\IeC {\cyrya }" "я" :string) -(merge-rule "\IeC {\cyryi }" "ї" :string) -(merge-rule "\IeC {\cyryo }" "ё" :string) -(merge-rule "\IeC {\cyryu }" "ю" :string) -(merge-rule "\IeC {\cyrz }" "з" :string) -(merge-rule "\IeC {\cyrzh }" "ж" :string) diff --git a/docs/_build/latex/LICRlatin2utf8.xdy b/docs/_build/latex/LICRlatin2utf8.xdy deleted file mode 100644 index 60a24b4..0000000 --- a/docs/_build/latex/LICRlatin2utf8.xdy +++ /dev/null @@ -1,236 +0,0 @@ -;; style file for xindy -;; filename: LICRlatin2utf8.xdy -;; description: style file for xindy which maps back LaTeX Internal -;; Character Representation of letters (as arising in .idx index -;; file) to UTF-8 encoding for correct sorting by xindy. -;; usage: for use with the pdflatex engine, -;; *not* for use with xelatex or lualatex. -;; -;; This is based upon xindy's distributed file tex/inputenc/utf8.xdy. -;; The modifications include: -;; -;; - Updates for compatibility with current LaTeX macro encoding. -;; -;; - Systematic usage of the \IeC {...} mark-up, because mark-up in -;; tex/inputenc/utf8.xdy was using it on seemingly random basis, and -;; Sphinx coercing of xindy usability for both Latin and Cyrillic scripts -;; with pdflatex requires its systematic presence here. -;; -;; - Support for some extra letters: Ÿ, Ŋ, ŋ, Œ, œ, IJ, ij, ȷ and ẞ. -;; -;; Indeed Sphinx needs to support for pdflatex engine all Unicode letters -;; available in TeX T1 font encoding. The above letters are found in -;; that encoding but not in the Latin1, 2, 3 charsets which are those -;; covered by original tex/inputenc/utf8.xdy. -;; -;; - There is a problem that ȷ is not supported out-of-the box by LaTeX -;; with inputenc, one must add explicitely -;; \DeclareUnicodeCharacter{0237}{\j} -;; to preamble of LaTeX document. However this character is not supported -;; by the TeX "times" font used by default by Sphinx for pdflatex engine. -;; -;; - ẞ needs \DeclareUnicodeCharacter{1E9E}{\SS} (but ß needs no extra set-up). -;; -;; - U+02DB (˛) and U+02D9 (˙) are also not supported by inputenc -;; out of the box and require -;; \DeclareUnicodeCharacter{02DB}{\k{}} -;; \DeclareUnicodeCharacter{02D9}{\.{}} -;; to be added to preamble. -;; -;; - U+0127 ħ and U+0126 Ħ are absent from TeX T1+TS1 font encodings. -;; -;; - Characters Ŋ and ŋ are not supported by TeX font "times" used by -;; default by Sphinx for pdflatex engine but they are supported by -;; some TeX fonts, in particular by the default LaTeX font for T1 -;; encoding. -;; -;; - " and ~ must be escaped as ~" and resp. ~~ in xindy merge rules. -;; -;; Contributed by the Sphinx team, July 2018. -;; -;; See sphinx.xdy for superior figures, as they are escaped by LaTeX writer. -(merge-rule "\IeC {\textonesuperior }" "¹" :string) -(merge-rule "\IeC {\texttwosuperior }" "²" :string) -(merge-rule "\IeC {\textthreesuperior }" "³" :string) -(merge-rule "\IeC {\'a}" "á" :string) -(merge-rule "\IeC {\'A}" "Á" :string) -(merge-rule "\IeC {\`a}" "à" :string) -(merge-rule "\IeC {\`A}" "À" :string) -(merge-rule "\IeC {\^a}" "â" :string) -(merge-rule "\IeC {\^A}" "Â" :string) -(merge-rule "\IeC {\~"a}" "ä" :string) -(merge-rule "\IeC {\~"A}" "Ä" :string) -(merge-rule "\IeC {\~~a}" "ã" :string) -(merge-rule "\IeC {\~~A}" "Ã" :string) -(merge-rule "\IeC {\c c}" "ç" :string) -(merge-rule "\IeC {\c C}" "Ç" :string) -(merge-rule "\IeC {\'c}" "ć" :string) -(merge-rule "\IeC {\'C}" "Ć" :string) -(merge-rule "\IeC {\^c}" "ĉ" :string) -(merge-rule "\IeC {\^C}" "Ĉ" :string) -(merge-rule "\IeC {\.c}" "ċ" :string) -(merge-rule "\IeC {\.C}" "Ċ" :string) -(merge-rule "\IeC {\c s}" "ş" :string) -(merge-rule "\IeC {\c S}" "Ş" :string) -(merge-rule "\IeC {\c t}" "ţ" :string) -(merge-rule "\IeC {\c T}" "Ţ" :string) -(merge-rule "\IeC {\-}" "­" :string); soft hyphen -(merge-rule "\IeC {\textdiv }" "÷" :string) -(merge-rule "\IeC {\'e}" "é" :string) -(merge-rule "\IeC {\'E}" "É" :string) -(merge-rule "\IeC {\`e}" "è" :string) -(merge-rule "\IeC {\`E}" "È" :string) -(merge-rule "\IeC {\^e}" "ê" :string) -(merge-rule "\IeC {\^E}" "Ê" :string) -(merge-rule "\IeC {\~"e}" "ë" :string) -(merge-rule "\IeC {\~"E}" "Ë" :string) -(merge-rule "\IeC {\^g}" "ĝ" :string) -(merge-rule "\IeC {\^G}" "Ĝ" :string) -(merge-rule "\IeC {\.g}" "ġ" :string) -(merge-rule "\IeC {\.G}" "Ġ" :string) -(merge-rule "\IeC {\^h}" "ĥ" :string) -(merge-rule "\IeC {\^H}" "Ĥ" :string) -(merge-rule "\IeC {\H o}" "ő" :string) -(merge-rule "\IeC {\H O}" "Ő" :string) -(merge-rule "\IeC {\textacutedbl }" "˝" :string) -(merge-rule "\IeC {\H u}" "ű" :string) -(merge-rule "\IeC {\H U}" "Ű" :string) -(merge-rule "\IeC {\ae }" "æ" :string) -(merge-rule "\IeC {\AE }" "Æ" :string) -(merge-rule "\IeC {\textcopyright }" "©" :string) -(merge-rule "\IeC {\c \ }" "¸" :string) -(merge-rule "\IeC {\dh }" "ð" :string) -(merge-rule "\IeC {\DH }" "Ð" :string) -(merge-rule "\IeC {\dj }" "đ" :string) -(merge-rule "\IeC {\DJ }" "Đ" :string) -(merge-rule "\IeC {\guillemotleft }" "«" :string) -(merge-rule "\IeC {\guillemotright }" "»" :string) -(merge-rule "\IeC {\'\i }" "í" :string) -(merge-rule "\IeC {\`\i }" "ì" :string) -(merge-rule "\IeC {\^\i }" "î" :string) -(merge-rule "\IeC {\~"\i }" "ï" :string) -(merge-rule "\IeC {\i }" "ı" :string) -(merge-rule "\IeC {\^\j }" "ĵ" :string) -(merge-rule "\IeC {\k {}}" "˛" :string) -(merge-rule "\IeC {\l }" "ł" :string) -(merge-rule "\IeC {\L }" "Ł" :string) -(merge-rule "\IeC {\nobreakspace }" " " :string) -(merge-rule "\IeC {\o }" "ø" :string) -(merge-rule "\IeC {\O }" "Ø" :string) -(merge-rule "\IeC {\textsterling }" "£" :string) -(merge-rule "\IeC {\textparagraph }" "¶" :string) -(merge-rule "\IeC {\ss }" "ß" :string) -(merge-rule "\IeC {\textsection }" "§" :string) -(merge-rule "\IeC {\textbrokenbar }" "¦" :string) -(merge-rule "\IeC {\textcent }" "¢" :string) -(merge-rule "\IeC {\textcurrency }" "¤" :string) -(merge-rule "\IeC {\textdegree }" "°" :string) -(merge-rule "\IeC {\textexclamdown }" "¡" :string) -(merge-rule "\IeC {\texthbar }" "ħ" :string) -(merge-rule "\IeC {\textHbar }" "Ħ" :string) -(merge-rule "\IeC {\textonehalf }" "½" :string) -(merge-rule "\IeC {\textonequarter }" "¼" :string) -(merge-rule "\IeC {\textordfeminine }" "ª" :string) -(merge-rule "\IeC {\textordmasculine }" "º" :string) -(merge-rule "\IeC {\textperiodcentered }" "·" :string) -(merge-rule "\IeC {\textquestiondown }" "¿" :string) -(merge-rule "\IeC {\textregistered }" "®" :string) -(merge-rule "\IeC {\textthreequarters }" "¾" :string) -(merge-rule "\IeC {\textyen }" "¥" :string) -(merge-rule "\IeC {\th }" "þ" :string) -(merge-rule "\IeC {\TH }" "Þ" :string) -(merge-rule "\IeC {\'I}" "Í" :string) -(merge-rule "\IeC {\`I}" "Ì" :string) -(merge-rule "\IeC {\^I}" "Î" :string) -(merge-rule "\IeC {\~"I}" "Ï" :string) -(merge-rule "\IeC {\.I}" "İ" :string) -(merge-rule "\IeC {\^J}" "Ĵ" :string) -(merge-rule "\IeC {\k a}" "ą" :string) -(merge-rule "\IeC {\k A}" "Ą" :string) -(merge-rule "\IeC {\k e}" "ę" :string) -(merge-rule "\IeC {\k E}" "Ę" :string) -(merge-rule "\IeC {\'l}" "ĺ" :string) -(merge-rule "\IeC {\'L}" "Ĺ" :string) -(merge-rule "\IeC {\textlnot }" "¬" :string) -(merge-rule "\IeC {\textmu }" "µ" :string) -(merge-rule "\IeC {\'n}" "ń" :string) -(merge-rule "\IeC {\'N}" "Ń" :string) -(merge-rule "\IeC {\~~n}" "ñ" :string) -(merge-rule "\IeC {\~~N}" "Ñ" :string) -(merge-rule "\IeC {\'o}" "ó" :string) -(merge-rule "\IeC {\'O}" "Ó" :string) -(merge-rule "\IeC {\`o}" "ò" :string) -(merge-rule "\IeC {\`O}" "Ò" :string) -(merge-rule "\IeC {\^o}" "ô" :string) -(merge-rule "\IeC {\^O}" "Ô" :string) -(merge-rule "\IeC {\~"o}" "ö" :string) -(merge-rule "\IeC {\~"O}" "Ö" :string) -(merge-rule "\IeC {\~~o}" "õ" :string) -(merge-rule "\IeC {\~~O}" "Õ" :string) -(merge-rule "\IeC {\textpm }" "±" :string) -(merge-rule "\IeC {\r a}" "å" :string) -(merge-rule "\IeC {\r A}" "Å" :string) -(merge-rule "\IeC {\'r}" "ŕ" :string) -(merge-rule "\IeC {\'R}" "Ŕ" :string) -(merge-rule "\IeC {\r u}" "ů" :string) -(merge-rule "\IeC {\r U}" "Ů" :string) -(merge-rule "\IeC {\'s}" "ś" :string) -(merge-rule "\IeC {\'S}" "Ś" :string) -(merge-rule "\IeC {\^s}" "ŝ" :string) -(merge-rule "\IeC {\^S}" "Ŝ" :string) -(merge-rule "\IeC {\textasciidieresis }" "¨" :string) -(merge-rule "\IeC {\textasciimacron }" "¯" :string) -(merge-rule "\IeC {\.{}}" "˙" :string) -(merge-rule "\IeC {\textasciiacute }" "´" :string) -(merge-rule "\IeC {\texttimes }" "×" :string) -(merge-rule "\IeC {\u a}" "ă" :string) -(merge-rule "\IeC {\u A}" "Ă" :string) -(merge-rule "\IeC {\u g}" "ğ" :string) -(merge-rule "\IeC {\u G}" "Ğ" :string) -(merge-rule "\IeC {\textasciibreve }" "˘" :string) -(merge-rule "\IeC {\'u}" "ú" :string) -(merge-rule "\IeC {\'U}" "Ú" :string) -(merge-rule "\IeC {\`u}" "ù" :string) -(merge-rule "\IeC {\`U}" "Ù" :string) -(merge-rule "\IeC {\^u}" "û" :string) -(merge-rule "\IeC {\^U}" "Û" :string) -(merge-rule "\IeC {\~"u}" "ü" :string) -(merge-rule "\IeC {\~"U}" "Ü" :string) -(merge-rule "\IeC {\u u}" "ŭ" :string) -(merge-rule "\IeC {\u U}" "Ŭ" :string) -(merge-rule "\IeC {\v c}" "č" :string) -(merge-rule "\IeC {\v C}" "Č" :string) -(merge-rule "\IeC {\v d}" "ď" :string) -(merge-rule "\IeC {\v D}" "Ď" :string) -(merge-rule "\IeC {\v e}" "ě" :string) -(merge-rule "\IeC {\v E}" "Ě" :string) -(merge-rule "\IeC {\v l}" "ľ" :string) -(merge-rule "\IeC {\v L}" "Ľ" :string) -(merge-rule "\IeC {\v n}" "ň" :string) -(merge-rule "\IeC {\v N}" "Ň" :string) -(merge-rule "\IeC {\v r}" "ř" :string) -(merge-rule "\IeC {\v R}" "Ř" :string) -(merge-rule "\IeC {\v s}" "š" :string) -(merge-rule "\IeC {\v S}" "Š" :string) -(merge-rule "\IeC {\textasciicaron }" "ˇ" :string) -(merge-rule "\IeC {\v t}" "ť" :string) -(merge-rule "\IeC {\v T}" "Ť" :string) -(merge-rule "\IeC {\v z}" "ž" :string) -(merge-rule "\IeC {\v Z}" "Ž" :string) -(merge-rule "\IeC {\'y}" "ý" :string) -(merge-rule "\IeC {\'Y}" "Ý" :string) -(merge-rule "\IeC {\~"y}" "ÿ" :string) -(merge-rule "\IeC {\'z}" "ź" :string) -(merge-rule "\IeC {\'Z}" "Ź" :string) -(merge-rule "\IeC {\.z}" "ż" :string) -(merge-rule "\IeC {\.Z}" "Ż" :string) -;; letters not in Latin1, 2, 3 but available in TeX T1 font encoding -(merge-rule "\IeC {\~"Y}" "Ÿ" :string) -(merge-rule "\IeC {\NG }" "Ŋ" :string) -(merge-rule "\IeC {\ng }" "ŋ" :string) -(merge-rule "\IeC {\OE }" "Œ" :string) -(merge-rule "\IeC {\oe }" "œ" :string) -(merge-rule "\IeC {\IJ }" "IJ" :string) -(merge-rule "\IeC {\ij }" "ij" :string) -(merge-rule "\IeC {\j }" "ȷ" :string) -(merge-rule "\IeC {\SS }" "ẞ" :string) diff --git a/docs/_build/latex/LatinRules.xdy b/docs/_build/latex/LatinRules.xdy deleted file mode 100644 index 99f14a2..0000000 --- a/docs/_build/latex/LatinRules.xdy +++ /dev/null @@ -1,607 +0,0 @@ -;; style file for xindy -;; filename: LatinRules.xdy -;; -;; It is based upon xindy's files lang/general/utf8.xdy and -;; lang/general/utf8-lang.xdy which implement -;; "a general sorting order for Western European languages" -;; -;; The aim for Sphinx is to be able to index in a Cyrillic document -;; also terms using the Latin alphabets, inclusive of letters -;; with diacritics. To this effect the xindy rules from lang/general -;; got manually re-coded to avoid collisions with the encoding -;; done by xindy for sorting words in Cyrillic languages, which was -;; observed not to use bytes with octal encoding 0o266 or higher. -;; -;; So here we use only 0o266 or higher bytes. -;; (Ŋ, ŋ, IJ, and ij are absent from -;; lang/general/utf8.xdy and not included here) -;; Contributed by the Sphinx team, 2018. - -(define-letter-group "A" :prefixes ("")) -(define-letter-group "B" :after "A" :prefixes ("")) -(define-letter-group "C" :after "B" :prefixes ("")) -(define-letter-group "D" :after "C" :prefixes ("")) -(define-letter-group "E" :after "D" :prefixes ("")) -(define-letter-group "F" :after "E" :prefixes ("")) -(define-letter-group "G" :after "F" :prefixes ("")) -(define-letter-group "H" :after "G" :prefixes ("")) -(define-letter-group "I" :after "H" :prefixes ("")) -(define-letter-group "J" :after "I" :prefixes ("")) -(define-letter-group "K" :after "J" :prefixes ("")) -(define-letter-group "L" :after "K" :prefixes ("")) -(define-letter-group "M" :after "L" :prefixes ("")) -(define-letter-group "N" :after "M" :prefixes ("")) -(define-letter-group "O" :after "N" :prefixes ("")) -(define-letter-group "P" :after "O" :prefixes ("")) -(define-letter-group "Q" :after "P" :prefixes ("")) -(define-letter-group "R" :after "Q" :prefixes ("")) -(define-letter-group "S" :after "R" :prefixes ("")) -(define-letter-group "T" :after "S" :prefixes ("")) -(define-letter-group "U" :after "T" :prefixes ("")) -(define-letter-group "V" :after "U" :prefixes ("")) -(define-letter-group "W" :after "V" :prefixes ("")) -(define-letter-group "X" :after "W" :prefixes ("")) -(define-letter-group "Y" :after "X" :prefixes ("")) -(define-letter-group "Z" :after "Y" :prefixes ("")) - -(define-rule-set "sphinx-xy-alphabetize" - - :rules (("À" "" :string) - ("Ă" "" :string) - ("â" "" :string) - ("Ä" "" :string) - ("à" "" :string) - ("Å" "" :string) - ("Ã" "" :string) - ("Á" "" :string) - ("á" "" :string) - ("ã" "" :string) - ("Â" "" :string) - ("ă" "" :string) - ("å" "" :string) - ("ą" "" :string) - ("ä" "" :string) - ("Ą" "" :string) - ("æ" "" :string) - ("Æ" "" :string) - ("ć" "" :string) - ("ĉ" "" :string) - ("ç" "" :string) - ("Č" "" :string) - ("č" "" :string) - ("Ĉ" "" :string) - ("Ç" "" :string) - ("Ć" "" :string) - ("ď" "" :string) - ("Đ" "" :string) - ("Ď" "" :string) - ("đ" "" :string) - ("ê" "" :string) - ("Ę" "" :string) - ("Ě" "" :string) - ("ë" "" :string) - ("ě" "" :string) - ("é" "" :string) - ("È" "" :string) - ("Ë" "" :string) - ("É" "" :string) - ("è" "" :string) - ("Ê" "" :string) - ("ę" "" :string) - ("ĝ" "" :string) - ("ğ" "" :string) - ("Ğ" "" :string) - ("Ĝ" "" :string) - ("ĥ" "" :string) - ("Ĥ" "" :string) - ("Ï" "" :string) - ("Í" "" :string) - ("ï" "" :string) - ("Î" "" :string) - ("î" "" :string) - ("ı" "" :string) - ("İ" "" :string) - ("í" "" :string) - ("Ì" "" :string) - ("ì" "" :string) - ("Ĵ" "" :string) - ("ĵ" "" :string) - ("ł" "" :string) - ("Ł" "" :string) - ("ľ" "" :string) - ("Ľ" "" :string) - ("ń" "" :string) - ("Ń" "" :string) - ("ñ" "" :string) - ("ň" "" :string) - ("Ñ" "" :string) - ("Ň" "" :string) - ("Õ" "" :string) - ("Ő" "" :string) - ("ó" "" :string) - ("ö" "" :string) - ("ô" "" :string) - ("ő" "" :string) - ("Ø" "" :string) - ("Ö" "" :string) - ("õ" "" :string) - ("Ô" "" :string) - ("ø" "" :string) - ("Ó" "" :string) - ("Ò" "" :string) - ("ò" "" :string) - ("œ" "ĺ" :string) - ("Œ" "ĺ" :string) - ("Ř" "" :string) - ("ř" "" :string) - ("Ŕ" "" :string) - ("ŕ" "" :string) - ("ŝ" "" :string) - ("Ś" "" :string) - ("ș" "" :string) - ("ş" "" :string) - ("Ŝ" "" :string) - ("ś" "" :string) - ("Ș" "" :string) - ("š" "" :string) - ("Ş" "" :string) - ("Š" "" :string) - ("ß" "" :string) - ("Ț" "" :string) - ("Ť" "" :string) - ("ț" "" :string) - ("ť" "" :string) - ("û" "" :string) - ("ŭ" "" :string) - ("ů" "" :string) - ("ű" "" :string) - ("ù" "" :string) - ("Ŭ" "" :string) - ("Ù" "" :string) - ("Ű" "" :string) - ("Ü" "" :string) - ("Ů" "" :string) - ("ú" "" :string) - ("Ú" "" :string) - ("Û" "" :string) - ("ü" "" :string) - ("ÿ" "" :string) - ("Ý" "" :string) - ("Ÿ" "" :string) - ("ý" "" :string) - ("Ż" "" :string) - ("Ž" "" :string) - ("Ź" "" :string) - ("ž" "" :string) - ("ż" "" :string) - ("ź" "" :string) - ("a" "" :string) - ("A" "" :string) - ("b" "" :string) - ("B" "" :string) - ("c" "" :string) - ("C" "" :string) - ("d" "" :string) - ("D" "" :string) - ("e" "" :string) - ("E" "" :string) - ("F" "" :string) - ("f" "" :string) - ("G" "" :string) - ("g" "" :string) - ("H" "" :string) - ("h" "" :string) - ("i" "" :string) - ("I" "" :string) - ("J" "" :string) - ("j" "" :string) - ("K" "" :string) - ("k" "" :string) - ("L" "" :string) - ("l" "" :string) - ("M" "" :string) - ("m" "" :string) - ("n" "" :string) - ("N" "" :string) - ("O" "" :string) - ("o" "" :string) - ("p" "" :string) - ("P" "" :string) - ("Q" "" :string) - ("q" "" :string) - ("r" "" :string) - ("R" "" :string) - ("S" "" :string) - ("s" "" :string) - ("t" "" :string) - ("T" "" :string) - ("u" "" :string) - ("U" "" :string) - ("v" "" :string) - ("V" "" :string) - ("W" "" :string) - ("w" "" :string) - ("x" "" :string) - ("X" "" :string) - ("Y" "" :string) - ("y" "" :string) - ("z" "" :string) - ("Z" "" :string) - )) - -(define-rule-set "sphinx-xy-resolve-diacritics" - - :rules (("Ĥ" "" :string) - ("ó" "" :string) - ("ľ" "" :string) - ("Ř" "" :string) - ("ĝ" "" :string) - ("ď" "" :string) - ("Ě" "" :string) - ("ĥ" "" :string) - ("Č" "" :string) - ("Ĵ" "" :string) - ("ě" "" :string) - ("ž" "" :string) - ("Ď" "" :string) - ("ř" "" :string) - ("Ž" "" :string) - ("ı" "" :string) - ("Ť" "" :string) - ("á" "" :string) - ("č" "" :string) - ("Á" "" :string) - ("ň" "" :string) - ("Š" "" :string) - ("Ň" "" :string) - ("ĵ" "" :string) - ("ť" "" :string) - ("Ó" "" :string) - ("ý" "" :string) - ("Ĝ" "" :string) - ("Ú" "" :string) - ("Ľ" "" :string) - ("š" "" :string) - ("Ý" "" :string) - ("ú" "" :string) - ("Ś" "" :string) - ("ć" "" :string) - ("Ł" "" :string) - ("ł" "" :string) - ("ń" "" :string) - ("À" "" :string) - ("Ź" "" :string) - ("à" "" :string) - ("Ń" "" :string) - ("Đ" "" :string) - ("ÿ" "" :string) - ("ś" "" :string) - ("Ğ" "" :string) - ("ğ" "" :string) - ("Ù" "" :string) - ("İ" "" :string) - ("đ" "" :string) - ("ù" "" :string) - ("Ț" "" :string) - ("é" "" :string) - ("ŕ" "" :string) - ("Ć" "" :string) - ("ț" "" :string) - ("ò" "" :string) - ("ź" "" :string) - ("Ò" "" :string) - ("Ÿ" "" :string) - ("Ŕ" "" :string) - ("É" "" :string) - ("ĉ" "" :string) - ("ô" "" :string) - ("Í" "" :string) - ("ŝ" "" :string) - ("Ż" "" :string) - ("Ă" "" :string) - ("Ŝ" "" :string) - ("ñ" "" :string) - ("ŭ" "" :string) - ("í" "" :string) - ("È" "" :string) - ("Ô" "" :string) - ("Ŭ" "" :string) - ("ż" "" :string) - ("Ñ" "" :string) - ("è" "" :string) - ("Ĉ" "" :string) - ("ă" "" :string) - ("â" "" :string) - ("û" "" :string) - ("ê" "" :string) - ("Õ" "" :string) - ("õ" "" :string) - ("ș" "" :string) - ("ç" "" :string) - ("Â" "" :string) - ("Ê" "" :string) - ("Û" "" :string) - ("Ç" "" :string) - ("ì" "" :string) - ("Ì" "" :string) - ("Ș" "" :string) - ("ö" "" :string) - ("Ö" "" :string) - ("ş" "" :string) - ("ů" "" :string) - ("ë" "" :string) - ("ã" "" :string) - ("î" "" :string) - ("Î" "" :string) - ("Ã" "" :string) - ("Ş" "" :string) - ("Ů" "" :string) - ("Ë" "" :string) - ("ï" "" :string) - ("Ő" "" :string) - ("Ï" "" :string) - ("Ę" "" :string) - ("ő" "" :string) - ("Ü" "" :string) - ("Å" "" :string) - ("ü" "" :string) - ("ę" "" :string) - ("å" "" :string) - ("Ä" "" :string) - ("ű" "" :string) - ("Ø" "" :string) - ("ø" "" :string) - ("Ű" "" :string) - ("ä" "" :string) - ("Ą" "" :string) - ("ą" "" :string) - ("œ" "" :string) - ("ß" "" :string) - ("Æ" "" :string) - ("Œ" "" :string) - ("æ" "" :string) - ("e" "" :string) - ("t" "" :string) - ("L" "" :string) - ("Y" "" :string) - ("J" "" :string) - ("a" "" :string) - ("p" "" :string) - ("u" "" :string) - ("j" "" :string) - ("b" "" :string) - ("G" "" :string) - ("U" "" :string) - ("F" "" :string) - ("H" "" :string) - ("i" "" :string) - ("z" "" :string) - ("c" "" :string) - ("l" "" :string) - ("A" "" :string) - ("Q" "" :string) - ("w" "" :string) - ("D" "" :string) - ("R" "" :string) - ("d" "" :string) - ("s" "" :string) - ("r" "" :string) - ("k" "" :string) - ("v" "" :string) - ("m" "" :string) - ("P" "" :string) - ("y" "" :string) - ("K" "" :string) - ("q" "" :string) - ("S" "" :string) - ("I" "" :string) - ("C" "" :string) - ("M" "" :string) - ("Z" "" :string) - ("T" "" :string) - ("W" "" :string) - ("B" "" :string) - ("h" "" :string) - ("x" "" :string) - ("X" "" :string) - ("f" "" :string) - ("E" "" :string) - ("V" "" :string) - ("N" "" :string) - ("O" "" :string) - ("o" "" :string) - ("g" "" :string) - ("n" "" :string) - )) - -(define-rule-set "sphinx-xy-resolve-case" - - :rules (("Ú" "8" :string) - ("Ÿ" "8" :string) - ("Ç" "8" :string) - ("Ĉ" "8" :string) - ("Ŕ" "8" :string) - ("Ľ" "8" :string) - ("Ů" "8" :string) - ("Ý" "8" :string) - ("É" "8" :string) - ("Ë" "8" :string) - ("Ș" "8" :string) - ("Ì" "8" :string) - ("Ê" "8" :string) - ("Ň" "8" :string) - ("Ą" "8" :string) - ("Š" "8" :string) - ("Û" "8" :string) - ("Ş" "8" :string) - ("Ć" "8" :string) - ("Ò" "8" :string) - ("Ĝ" "8" :string) - ("Ñ" "8" :string) - ("Ó" "8" :string) - ("Î" "8" :string) - ("Á" "8" :string) - ("Ã" "8" :string) - ("Ț" "8" :string) - ("Å" "8" :string) - ("Ğ" "8" :string) - ("Ü" "8" :string) - ("È" "8" :string) - ("Ô" "8" :string) - ("İ" "8" :string) - ("Ű" "8" :string) - ("Ù" "8" :string) - ("Ŭ" "8" :string) - ("Â" "8" :string) - ("Ť" "8" :string) - ("Ń" "8" :string) - ("Ď" "8" :string) - ("Ź" "8" :string) - ("Ž" "8" :string) - ("Đ" "8" :string) - ("Ŝ" "8" :string) - ("Č" "8" :string) - ("Ĵ" "8" :string) - ("Ö" "8" :string) - ("Ø" "8" :string) - ("Ż" "8" :string) - ("Ł" "8" :string) - ("Ă" "8" :string) - ("Ě" "8" :string) - ("Ő" "8" :string) - ("Õ" "8" :string) - ("Ę" "8" :string) - ("Ï" "8" :string) - ("À" "8" :string) - ("Ĥ" "8" :string) - ("Ä" "8" :string) - ("Ś" "8" :string) - ("Ř" "8" :string) - ("Í" "8" :string) - ("Œ" "89" :string) - ("Æ" "89" :string) - ("ì" "9" :string) - ("è" "9" :string) - ("ą" "9" :string) - ("š" "9" :string) - ("ú" "9" :string) - ("å" "9" :string) - ("ă" "9" :string) - ("ę" "9" :string) - ("ü" "9" :string) - ("ź" "9" :string) - ("ò" "9" :string) - ("ť" "9" :string) - ("ț" "9" :string) - ("ĵ" "9" :string) - ("ŕ" "9" :string) - ("ż" "9" :string) - ("ä" "9" :string) - ("ý" "9" :string) - ("ù" "9" :string) - ("á" "9" :string) - ("é" "9" :string) - ("č" "9" :string) - ("ň" "9" :string) - ("ś" "9" :string) - ("ø" "9" :string) - ("í" "9" :string) - ("đ" "9" :string) - ("ı" "9" :string) - ("ğ" "9" :string) - ("î" "9" :string) - ("ã" "9" :string) - ("à" "9" :string) - ("ř" "9" :string) - ("ő" "9" :string) - ("ů" "9" :string) - ("ș" "9" :string) - ("ÿ" "9" :string) - ("ë" "9" :string) - ("ŭ" "9" :string) - ("ç" "9" :string) - ("ű" "9" :string) - ("ñ" "9" :string) - ("õ" "9" :string) - ("ě" "9" :string) - ("ş" "9" :string) - ("ž" "9" :string) - ("ĝ" "9" :string) - ("ŝ" "9" :string) - ("ń" "9" :string) - ("û" "9" :string) - ("ł" "9" :string) - ("ď" "9" :string) - ("ĥ" "9" :string) - ("ê" "9" :string) - ("ô" "9" :string) - ("ĉ" "9" :string) - ("â" "9" :string) - ("ć" "9" :string) - ("ï" "9" :string) - ("ö" "9" :string) - ("ľ" "9" :string) - ("ó" "9" :string) - ("æ" "99" :string) - ("ß" "99" :string) - ("œ" "99" :string) - ("N" "8" :string) - ("V" "8" :string) - ("O" "8" :string) - ("X" "8" :string) - ("E" "8" :string) - ("P" "8" :string) - ("K" "8" :string) - ("T" "8" :string) - ("Z" "8" :string) - ("M" "8" :string) - ("C" "8" :string) - ("I" "8" :string) - ("S" "8" :string) - ("B" "8" :string) - ("W" "8" :string) - ("D" "8" :string) - ("R" "8" :string) - ("H" "8" :string) - ("F" "8" :string) - ("Q" "8" :string) - ("A" "8" :string) - ("G" "8" :string) - ("U" "8" :string) - ("J" "8" :string) - ("Y" "8" :string) - ("L" "8" :string) - ("o" "9" :string) - ("n" "9" :string) - ("g" "9" :string) - ("x" "9" :string) - ("f" "9" :string) - ("y" "9" :string) - ("q" "9" :string) - ("h" "9" :string) - ("w" "9" :string) - ("s" "9" :string) - ("d" "9" :string) - ("v" "9" :string) - ("k" "9" :string) - ("r" "9" :string) - ("m" "9" :string) - ("z" "9" :string) - ("c" "9" :string) - ("i" "9" :string) - ("l" "9" :string) - ("b" "9" :string) - ("j" "9" :string) - ("a" "9" :string) - ("p" "9" :string) - ("u" "9" :string) - ("t" "9" :string) - ("e" "9" :string) - )) - -(use-rule-set :run 0 - :rule-set ("sphinx-xy-alphabetize")) -(use-rule-set :run 1 - :rule-set ("sphinx-xy-resolve-diacritics")) -(use-rule-set :run 2 - :rule-set ("sphinx-xy-resolve-case")) diff --git a/docs/_build/latex/Makefile b/docs/_build/latex/Makefile deleted file mode 100644 index c561680..0000000 --- a/docs/_build/latex/Makefile +++ /dev/null @@ -1,68 +0,0 @@ -# Makefile for Sphinx LaTeX output - -ALLDOCS = $(basename $(wildcard *.tex)) -ALLPDF = $(addsuffix .pdf,$(ALLDOCS)) -ALLDVI = $(addsuffix .dvi,$(ALLDOCS)) -ALLXDV = -ALLPS = $(addsuffix .ps,$(ALLDOCS)) -ALLIMGS = $(wildcard *.png *.gif *.jpg *.jpeg) - -# Prefix for archive names -ARCHIVEPREFIX = -# Additional LaTeX options (passed via variables in latexmkrc/latexmkjarc file) -export LATEXOPTS = -# Additional latexmk options -LATEXMKOPTS = -# format: pdf or dvi (used only by archive targets) -FMT = pdf - -LATEX = latexmk -dvi -PDFLATEX = latexmk -pdf -dvi- -ps- - - -%.png %.gif %.jpg %.jpeg: FORCE_MAKE - extractbb '$@' - -%.dvi: %.tex FORCE_MAKE - $(LATEX) $(LATEXMKOPTS) '$<' - -%.ps: %.dvi - dvips '$<' - -%.pdf: %.tex FORCE_MAKE - $(PDFLATEX) $(LATEXMKOPTS) '$<' - -all: $(ALLPDF) - -all-dvi: $(ALLDVI) - -all-ps: $(ALLPS) - -all-pdf: $(ALLPDF) - -zip: all-$(FMT) - mkdir $(ARCHIVEPREFIX)docs-$(FMT) - cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) - zip -q -r -9 $(ARCHIVEPREFIX)docs-$(FMT).zip $(ARCHIVEPREFIX)docs-$(FMT) - rm -r $(ARCHIVEPREFIX)docs-$(FMT) - -tar: all-$(FMT) - mkdir $(ARCHIVEPREFIX)docs-$(FMT) - cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) - tar cf $(ARCHIVEPREFIX)docs-$(FMT).tar $(ARCHIVEPREFIX)docs-$(FMT) - rm -r $(ARCHIVEPREFIX)docs-$(FMT) - -gz: tar - gzip -9 < $(ARCHIVEPREFIX)docs-$(FMT).tar > $(ARCHIVEPREFIX)docs-$(FMT).tar.gz - -bz2: tar - bzip2 -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar - -xz: tar - xz -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar - -clean: - rm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ps *.tar *.tar.gz *.tar.bz2 *.tar.xz $(ALLPDF) $(ALLDVI) $(ALLXDV) *.fls *.fdb_latexmk - -.PHONY: all all-pdf all-dvi all-ps clean zip tar gz bz2 xz -.PHONY: FORCE_MAKE \ No newline at end of file diff --git a/docs/_build/latex/footnotehyper-sphinx.sty b/docs/_build/latex/footnotehyper-sphinx.sty deleted file mode 100644 index b6692cf..0000000 --- a/docs/_build/latex/footnotehyper-sphinx.sty +++ /dev/null @@ -1,269 +0,0 @@ -\NeedsTeXFormat{LaTeX2e} -\ProvidesPackage{footnotehyper-sphinx}% - [2017/10/27 v1.7 hyperref aware footnote.sty for sphinx (JFB)] -%% -%% Package: footnotehyper-sphinx -%% Version: based on footnotehyper.sty 2017/03/07 v1.0 -%% as available at https://www.ctan.org/pkg/footnotehyper -%% License: the one applying to Sphinx -%% -%% Refer to the PDF documentation at https://www.ctan.org/pkg/footnotehyper for -%% the code comments. -%% -%% Differences: -%% 1. a partial tabulary compatibility layer added (enough for Sphinx mark-up), -%% 2. use of \spx@opt@BeforeFootnote from sphinx.sty, -%% 3. use of \sphinxunactivateextrasandspace from sphinx.sty, -%% 4. macro definition \sphinxfootnotemark, -%% 5. macro definition \sphinxlongtablepatch -%% 6. replaced an \undefined by \@undefined -\DeclareOption*{\PackageWarning{footnotehyper-sphinx}{Option `\CurrentOption' is unknown}}% -\ProcessOptions\relax -\newbox\FNH@notes -\newdimen\FNH@width -\let\FNH@colwidth\columnwidth -\newif\ifFNH@savingnotes -\AtBeginDocument {% - \let\FNH@latex@footnote \footnote - \let\FNH@latex@footnotetext\footnotetext - \let\FNH@H@@footnotetext \@footnotetext - \newenvironment{savenotes} - {\FNH@savenotes\ignorespaces}{\FNH@spewnotes\ignorespacesafterend}% - \let\spewnotes \FNH@spewnotes - \let\footnote \FNH@footnote - \let\footnotetext \FNH@footnotetext - \let\endfootnote \FNH@endfntext - \let\endfootnotetext\FNH@endfntext - \@ifpackageloaded{hyperref} - {\ifHy@hyperfootnotes - \let\FNH@H@@footnotetext\H@@footnotetext - \else - \let\FNH@hyper@fntext\FNH@nohyp@fntext - \fi}% - {\let\FNH@hyper@fntext\FNH@nohyp@fntext}% -}% -\def\FNH@hyper@fntext{\FNH@fntext\FNH@hyper@fntext@i}% -\def\FNH@nohyp@fntext{\FNH@fntext\FNH@nohyp@fntext@i}% -\def\FNH@fntext #1{% - \ifx\ifmeasuring@\@undefined - \expandafter\@secondoftwo\else\expandafter\@firstofone\fi -% these two lines modified for Sphinx (tabulary compatibility): - {\ifmeasuring@\expandafter\@gobbletwo\else\expandafter\@firstofone\fi}% - {\ifx\equation$\expandafter\@gobbletwo\fi #1}%$ -}% -\long\def\FNH@hyper@fntext@i#1{% - \global\setbox\FNH@notes\vbox - {\unvbox\FNH@notes - \FNH@startnote - \@makefntext - {\rule\z@\footnotesep\ignorespaces - \ifHy@nesting\expandafter\ltx@firstoftwo - \else\expandafter\ltx@secondoftwo - \fi - {\expandafter\hyper@@anchor\expandafter{\Hy@footnote@currentHref}{#1}}% - {\Hy@raisedlink - {\expandafter\hyper@@anchor\expandafter{\Hy@footnote@currentHref}% - {\relax}}% - \let\@currentHref\Hy@footnote@currentHref - \let\@currentlabelname\@empty - #1}% - \@finalstrut\strutbox - }% - \FNH@endnote - }% -}% -\long\def\FNH@nohyp@fntext@i#1{% - \global\setbox\FNH@notes\vbox - {\unvbox\FNH@notes - \FNH@startnote - \@makefntext{\rule\z@\footnotesep\ignorespaces#1\@finalstrut\strutbox}% - \FNH@endnote - }% -}% -\def\FNH@startnote{% - \hsize\FNH@colwidth - \interlinepenalty\interfootnotelinepenalty - \reset@font\footnotesize - \floatingpenalty\@MM - \@parboxrestore - \protected@edef\@currentlabel{\csname p@\@mpfn\endcsname\@thefnmark}% - \color@begingroup -}% -\def\FNH@endnote{\color@endgroup}% -\def\FNH@savenotes{% - \begingroup - \ifFNH@savingnotes\else - \FNH@savingnotestrue - \let\@footnotetext \FNH@hyper@fntext - \let\@mpfootnotetext \FNH@hyper@fntext - \let\H@@mpfootnotetext\FNH@nohyp@fntext - \FNH@width\columnwidth - \let\FNH@colwidth\FNH@width - \global\setbox\FNH@notes\box\voidb@x - \let\FNH@thempfn\thempfn - \let\FNH@mpfn\@mpfn - \ifx\@minipagerestore\relax\let\@minipagerestore\@empty\fi - \expandafter\def\expandafter\@minipagerestore\expandafter{% - \@minipagerestore - \let\thempfn\FNH@thempfn - \let\@mpfn\FNH@mpfn - }% - \fi -}% -\def\FNH@spewnotes {% - \endgroup - \ifFNH@savingnotes\else - \ifvoid\FNH@notes\else - \begingroup - \let\@makefntext\@empty - \let\@finalstrut\@gobble - \let\rule\@gobbletwo - \FNH@H@@footnotetext{\unvbox\FNH@notes}% - \endgroup - \fi - \fi -}% -\def\FNH@footnote@envname {footnote}% -\def\FNH@footnotetext@envname{footnotetext}% -\def\FNH@footnote{% -% this line added for Sphinx: - \spx@opt@BeforeFootnote - \ifx\@currenvir\FNH@footnote@envname - \expandafter\FNH@footnoteenv - \else - \expandafter\FNH@latex@footnote - \fi -}% -\def\FNH@footnoteenv{% -% this line added for Sphinx (footnotes in parsed literal blocks): - \catcode13=5 \sphinxunactivateextrasandspace - \@ifnextchar[% - \FNH@footnoteenv@i %] - {\stepcounter\@mpfn - \protected@xdef\@thefnmark{\thempfn}% - \@footnotemark - \def\FNH@endfntext@fntext{\@footnotetext}% - \FNH@startfntext}% -}% -\def\FNH@footnoteenv@i[#1]{% - \begingroup - \csname c@\@mpfn\endcsname #1\relax - \unrestored@protected@xdef\@thefnmark{\thempfn}% - \endgroup - \@footnotemark - \def\FNH@endfntext@fntext{\@footnotetext}% - \FNH@startfntext -}% -\def\FNH@footnotetext{% - \ifx\@currenvir\FNH@footnotetext@envname - \expandafter\FNH@footnotetextenv - \else - \expandafter\FNH@latex@footnotetext - \fi -}% -\def\FNH@footnotetextenv{% - \@ifnextchar[% - \FNH@footnotetextenv@i %] - {\protected@xdef\@thefnmark{\thempfn}% - \def\FNH@endfntext@fntext{\@footnotetext}% - \FNH@startfntext}% -}% -\def\FNH@footnotetextenv@i[#1]{% - \begingroup - \csname c@\@mpfn\endcsname #1\relax - \unrestored@protected@xdef\@thefnmark{\thempfn}% - \endgroup - \ifFNH@savingnotes - \def\FNH@endfntext@fntext{\FNH@nohyp@fntext}% - \else - \def\FNH@endfntext@fntext{\FNH@H@@footnotetext}% - \fi - \FNH@startfntext -}% -\def\FNH@startfntext{% - \setbox\z@\vbox\bgroup - \FNH@startnote - \FNH@prefntext - \rule\z@\footnotesep\ignorespaces -}% -\def\FNH@endfntext {% - \@finalstrut\strutbox - \FNH@postfntext - \FNH@endnote - \egroup - \begingroup - \let\@makefntext\@empty\let\@finalstrut\@gobble\let\rule\@gobbletwo - \FNH@endfntext@fntext {\unvbox\z@}% - \endgroup -}% -\AtBeginDocument{% - \let\FNH@@makefntext\@makefntext - \ifx\@makefntextFB\@undefined - \expandafter\@gobble\else\expandafter\@firstofone\fi - {\ifFBFrenchFootnotes \let\FNH@@makefntext\@makefntextFB \else - \let\FNH@@makefntext\@makefntextORI\fi}% - \expandafter\FNH@check@a\FNH@@makefntext{1.2!3?4,}% - \FNH@@@1.2!3?4,\FNH@@@\relax -}% -\long\def\FNH@check@a #11.2!3?4,#2\FNH@@@#3{% - \ifx\relax#3\expandafter\@firstoftwo\else\expandafter\@secondoftwo\fi - \FNH@bad@makefntext@alert - {\def\FNH@prefntext{#1}\def\FNH@postfntext{#2}\FNH@check@b}% -}% -\def\FNH@check@b #1\relax{% - \expandafter\expandafter\expandafter\FNH@check@c - \expandafter\meaning\expandafter\FNH@prefntext - \meaning\FNH@postfntext1.2!3?4,\FNH@check@c\relax -}% -\def\FNH@check@c #11.2!3?4,#2#3\relax{% - \ifx\FNH@check@c#2\expandafter\@gobble\fi\FNH@bad@makefntext@alert -}% -% slight reformulation for Sphinx -\def\FNH@bad@makefntext@alert{% - \PackageWarningNoLine{footnotehyper-sphinx}% - {Footnotes will be sub-optimal, sorry. This is due to the document class or^^J - some package modifying macro \string\@makefntext.^^J - You can try to report this incompatibility at^^J - https://github.com/sphinx-doc/sphinx with this info:}% - \typeout{\meaning\@makefntext}% - \let\FNH@prefntext\@empty\let\FNH@postfntext\@empty -}% -% this macro from original footnote.sty is not used anymore by Sphinx -% but for simplicity sake let's just keep it as is -\def\makesavenoteenv{\@ifnextchar[\FNH@msne@ii\FNH@msne@i}%] -\def\FNH@msne@i #1{% - \expandafter\let\csname FNH$#1\expandafter\endcsname %$ - \csname #1\endcsname - \expandafter\let\csname endFNH$#1\expandafter\endcsname %$ - \csname end#1\endcsname - \FNH@msne@ii[#1]{FNH$#1}%$ -}% -\def\FNH@msne@ii[#1]#2{% - \expandafter\edef\csname#1\endcsname{% - \noexpand\savenotes - \expandafter\noexpand\csname#2\endcsname - }% - \expandafter\edef\csname end#1\endcsname{% - \expandafter\noexpand\csname end#2\endcsname - \noexpand\expandafter - \noexpand\spewnotes - \noexpand\if@endpe\noexpand\@endpetrue\noexpand\fi - }% -}% -% end of footnotehyper 2017/02/16 v0.99 -% some extras for Sphinx : -% \sphinxfootnotemark: usable in section titles and silently removed from TOCs. -\def\sphinxfootnotemark [#1]% - {\ifx\thepage\relax\else\protect\spx@opt@BeforeFootnote - \protect\footnotemark[#1]\fi}% -\AtBeginDocument{% - % let hyperref less complain - \pdfstringdefDisableCommands{\def\sphinxfootnotemark [#1]{}}% - % to obtain hyperlinked footnotes in longtable environment we must replace - % hyperref's patch of longtable's patch of \@footnotetext by our own - \let\LT@p@ftntext\FNH@hyper@fntext - % this *requires* longtable to be used always wrapped in savenotes environment -}% -\endinput -%% -%% End of file `footnotehyper-sphinx.sty'. diff --git a/docs/_build/latex/latexmkjarc b/docs/_build/latex/latexmkjarc deleted file mode 100644 index 93c7fc7..0000000 --- a/docs/_build/latex/latexmkjarc +++ /dev/null @@ -1,24 +0,0 @@ -$latex = 'platex ' . $ENV{'LATEXOPTS'} . ' -kanji=utf8 %O %S'; -$dvipdf = 'dvipdfmx %O -o %D %S'; -$makeindex = 'internal mendex %S %B %D'; -sub mendex { - my ($source, $basename, $destination) = @_; - my $dictfile = $basename . ".dic"; - unlink($destination); - if (-f $dictfile) { - system("mendex", "-U", "-f", "-d", $dictfile, "-s", "python.ist", $source); - if ($? > 0) { - print("mendex exited with error code $? (ignored)\n"); - } - } - if (!-e $destination) { - # create an empty .ind file if nothing - open(FH, ">" . $destination); - close(FH); - } - return 0; -} -add_cus_dep( "glo", "gls", 0, "makeglo" ); -sub makeglo { - return system( "mendex -J -f -s gglo.ist -o '$_[0].gls' '$_[0].glo'" ); -} diff --git a/docs/_build/latex/latexmkrc b/docs/_build/latex/latexmkrc deleted file mode 100644 index bba17fa..0000000 --- a/docs/_build/latex/latexmkrc +++ /dev/null @@ -1,9 +0,0 @@ -$latex = 'latex ' . $ENV{'LATEXOPTS'} . ' %O %S'; -$pdflatex = 'pdflatex ' . $ENV{'LATEXOPTS'} . ' %O %S'; -$lualatex = 'lualatex ' . $ENV{'LATEXOPTS'} . ' %O %S'; -$xelatex = 'xelatex --no-pdf ' . $ENV{'LATEXOPTS'} . ' %O %S'; -$makeindex = 'makeindex -s python.ist %O -o %D %S'; -add_cus_dep( "glo", "gls", 0, "makeglo" ); -sub makeglo { - return system( "makeindex -s gglo.ist -o '$_[0].gls' '$_[0].glo'" ); -} \ No newline at end of file diff --git a/docs/_build/latex/make.bat b/docs/_build/latex/make.bat deleted file mode 100644 index 94bda21..0000000 --- a/docs/_build/latex/make.bat +++ /dev/null @@ -1,31 +0,0 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -pushd %~dp0 - -set PDFLATEX=latexmk -pdf -dvi- -ps- - -set "LATEXOPTS= " - -if "%1" == "" goto all-pdf - -if "%1" == "all-pdf" ( - :all-pdf - for %%i in (*.tex) do ( - %PDFLATEX% %LATEXMKOPTS% %%i - ) - goto end -) - -if "%1" == "all-pdf-ja" ( - goto all-pdf -) - -if "%1" == "clean" ( - del /q /s *.dvi *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ps *.tar *.tar.gz *.tar.bz2 *.tar.xz *.fls *.fdb_latexmk - goto end -) - -:end -popd \ No newline at end of file diff --git a/docs/_build/latex/py-graph.tex b/docs/_build/latex/py-graph.tex deleted file mode 100644 index 68140df..0000000 --- a/docs/_build/latex/py-graph.tex +++ /dev/null @@ -1,87 +0,0 @@ -%% Generated by Sphinx. -\def\sphinxdocclass{report} -\documentclass[letterpaper,10pt,english]{sphinxmanual} -\ifdefined\pdfpxdimen - \let\sphinxpxdimen\pdfpxdimen\else\newdimen\sphinxpxdimen -\fi \sphinxpxdimen=.75bp\relax - -\PassOptionsToPackage{warn}{textcomp} -\usepackage[utf8]{inputenc} -\ifdefined\DeclareUnicodeCharacter -% support both utf8 and utf8x syntaxes -\edef\sphinxdqmaybe{\ifdefined\DeclareUnicodeCharacterAsOptional\string"\fi} - \DeclareUnicodeCharacter{\sphinxdqmaybe00A0}{\nobreakspace} - \DeclareUnicodeCharacter{\sphinxdqmaybe2500}{\sphinxunichar{2500}} - \DeclareUnicodeCharacter{\sphinxdqmaybe2502}{\sphinxunichar{2502}} - \DeclareUnicodeCharacter{\sphinxdqmaybe2514}{\sphinxunichar{2514}} - \DeclareUnicodeCharacter{\sphinxdqmaybe251C}{\sphinxunichar{251C}} - \DeclareUnicodeCharacter{\sphinxdqmaybe2572}{\textbackslash} -\fi -\usepackage{cmap} -\usepackage[T1]{fontenc} -\usepackage{amsmath,amssymb,amstext} -\usepackage{babel} -\usepackage{times} -\usepackage[Bjarne]{fncychap} -\usepackage{sphinx} - -\fvset{fontsize=\small} -\usepackage{geometry} - -% Include hyperref last. -\usepackage{hyperref} -% Fix anchor placement for figures with captions. -\usepackage{hypcap}% it must be loaded after hyperref. -% Set up styles of URL: it should be placed after hyperref. -\urlstyle{same} -\addto\captionsenglish{\renewcommand{\contentsname}{Contents:}} - -\addto\captionsenglish{\renewcommand{\figurename}{Fig.}} -\addto\captionsenglish{\renewcommand{\tablename}{Table}} -\addto\captionsenglish{\renewcommand{\literalblockname}{Listing}} - -\addto\captionsenglish{\renewcommand{\literalblockcontinuedname}{continued from previous page}} -\addto\captionsenglish{\renewcommand{\literalblockcontinuesname}{continues on next page}} -\addto\captionsenglish{\renewcommand{\sphinxnonalphabeticalgroupname}{Non-alphabetical}} -\addto\captionsenglish{\renewcommand{\sphinxsymbolsname}{Symbols}} -\addto\captionsenglish{\renewcommand{\sphinxnumbersname}{Numbers}} - -\addto\extrasenglish{\def\pageautorefname{page}} - -\setcounter{tocdepth}{1} - - - -\title{py-graph Documentation} -\date{Jan 28, 2020} -\release{} -\author{Linlin Jia} -\newcommand{\sphinxlogo}{\vbox{}} -\renewcommand{\releasename}{} -\makeindex -\begin{document} - -\maketitle -\sphinxtableofcontents -\phantomsection\label{\detokenize{index::doc}} - - - -\chapter{Indices and tables} -\label{\detokenize{index:indices-and-tables}}\begin{itemize} -\item {} -\DUrole{xref,std,std-ref}{genindex} - -\item {} -\DUrole{xref,std,std-ref}{modindex} - -\item {} -\DUrole{xref,std,std-ref}{search} - -\end{itemize} - - - -\renewcommand{\indexname}{Index} -\printindex -\end{document} \ No newline at end of file diff --git a/docs/_build/latex/python.ist b/docs/_build/latex/python.ist deleted file mode 100644 index 7a1c06f..0000000 --- a/docs/_build/latex/python.ist +++ /dev/null @@ -1,13 +0,0 @@ -line_max 100 -headings_flag 1 -heading_prefix " \\bigletter " - -preamble "\\begin{sphinxtheindex} -\\let\\bigletter\\sphinxstyleindexlettergroup - -" - -postamble "\n\n\\end{sphinxtheindex}\n" - -symhead_positive "{\\sphinxsymbolsname}" -numhead_positive "{\\sphinxnumbersname}" diff --git a/docs/_build/latex/sphinx.sty b/docs/_build/latex/sphinx.sty deleted file mode 100644 index fc75bf3..0000000 --- a/docs/_build/latex/sphinx.sty +++ /dev/null @@ -1,1684 +0,0 @@ -% -% sphinx.sty -% -% Adapted from the old python.sty, mostly written by Fred Drake, -% by Georg Brandl. -% - -\NeedsTeXFormat{LaTeX2e}[1995/12/01] -\ProvidesPackage{sphinx}[2018/07/18 v1.8 LaTeX package (Sphinx markup)] - -% provides \ltx@ifundefined -% (many packages load ltxcmds: graphicx does for pdftex and lualatex but -% not xelatex, and anyhow kvoptions does, but it may be needed in future to -% use \sphinxdeprecationwarning earlier, and it needs \ltx@ifundefined) -\RequirePackage{ltxcmds} - -%% for deprecation warnings -\newcommand\sphinxdeprecationwarning[4]{% #1 the deprecated macro or name, -% #2 = when deprecated, #3 = when removed, #4 = additional info - \edef\spx@tempa{\detokenize{#1}}% - \ltx@ifundefined{sphinx_depr_\spx@tempa}{% - \global\expandafter\let\csname sphinx_depr_\spx@tempa\endcsname\spx@tempa - \expandafter\AtEndDocument\expandafter{\expandafter\let\expandafter - \sphinxdeprecatedmacro\csname sphinx_depr_\spx@tempa\endcsname - \PackageWarningNoLine{sphinx}{^^J**** SPHINX DEPRECATION WARNING:^^J - \sphinxdeprecatedmacro^^J - \@spaces- is deprecated at Sphinx #2^^J - \@spaces- and removed at Sphinx #3.^^J - #4^^J****}}% - }{% warning already emitted (at end of latex log), don't repeat - }} - - -%% PACKAGES -% -% we delay handling of options to after having loaded packages, because -% of the need to use \definecolor. -\RequirePackage{graphicx} -\@ifclassloaded{memoir}{}{\RequirePackage{fancyhdr}} -% for \text macro and \iffirstchoice@ conditional even if amsmath not loaded -\RequirePackage{amstext} -\RequirePackage{textcomp}% "warn" option issued from template -\RequirePackage{titlesec} -\@ifpackagelater{titlesec}{2016/03/15}% - {\@ifpackagelater{titlesec}{2016/03/21}% - {}% - {\newif\ifsphinx@ttlpatch@ok - \IfFileExists{etoolbox.sty}{% - \RequirePackage{etoolbox}% - \patchcmd{\ttlh@hang}{\parindent\z@}{\parindent\z@\leavevmode}% - {\sphinx@ttlpatch@oktrue}{}% - \ifsphinx@ttlpatch@ok - \patchcmd{\ttlh@hang}{\noindent}{}{}{\sphinx@ttlpatch@okfalse}% - \fi - }{}% - \ifsphinx@ttlpatch@ok - \typeout{^^J Package Sphinx Info: ^^J - **** titlesec 2.10.1 successfully patched for bugfix ****^^J}% - \else - \AtEndDocument{\PackageWarningNoLine{sphinx}{^^J% -******** titlesec 2.10.1 has a bug, (section numbers disappear) ......|^^J% -******** and Sphinx could not patch it, perhaps because your local ...|^^J% -******** copy is already fixed without a changed release date. .......|^^J% -******** If not, you must update titlesec! ...........................|}}% - \fi - }% - }{} -\RequirePackage{tabulary} -% tabulary has a bug with its re-definition of \multicolumn in its first pass -% which is not \long. But now Sphinx does not use LaTeX's \multicolumn but its -% own macro. Hence we don't even need to patch tabulary. See sphinxmulticell.sty -% X or S (Sphinx) may have meanings if some table package is loaded hence -% \X was chosen to avoid possibility of conflict -\newcolumntype{\X}[2]{p{\dimexpr - (\linewidth-\arrayrulewidth)*#1/#2-\tw@\tabcolsep-\arrayrulewidth\relax}} -\newcolumntype{\Y}[1]{p{\dimexpr - #1\dimexpr\linewidth-\arrayrulewidth\relax-\tw@\tabcolsep-\arrayrulewidth\relax}} -% using here T (for Tabulary) feels less of a problem than the X could be -\newcolumntype{T}{J}% -% For tables allowing pagebreaks -\RequirePackage{longtable} -% User interface to set-up whitespace before and after tables: -\newcommand*\sphinxtablepre {0pt}% -\newcommand*\sphinxtablepost{\medskipamount}% -\newcommand*\sphinxbelowcaptionspace{.5\sphinxbaselineskip}% -% as one can not use \baselineskip from inside longtable (it is zero there) -% we need \sphinxbaselineskip, which defaults to \baselineskip -\def\sphinxbaselineskip{\baselineskip}% -% These commands are inserted by the table templates -\def\sphinxatlongtablestart - {\par - \vskip\parskip - \vskip\dimexpr\sphinxtablepre\relax % adjust vertical position - \vbox{}% get correct baseline from above - \LTpre\z@skip\LTpost\z@skip % set to zero longtable's own skips - \edef\sphinxbaselineskip{\dimexpr\the\dimexpr\baselineskip\relax\relax}% - }% -\def\sphinxatlongtableend{\prevdepth\z@\vskip\sphinxtablepost\relax}% -\def\sphinxlongtablecapskipadjust - {\dimexpr-\dp\strutbox-\sphinxbaselineskip+\sphinxbelowcaptionspace\relax}% -% Now for tables not using longtable -\def\sphinxattablestart - {\par - \vskip\dimexpr\sphinxtablepre\relax - }% -\let\sphinxattableend\sphinxatlongtableend -% longtable's wraps captions to a maximal width of \LTcapwidth -% so we do the same for all tables -\newcommand*\sphinxcapstartof[1]{% - \vskip\parskip - \vbox{}% force baselineskip for good positioning by capstart of hyperanchor - \def\@captype{#1}% - \capstart -% move back vertically to compensate space inserted by next paragraph - \vskip-\baselineskip\vskip-\parskip -}% -% use \LTcapwidth (default is 4in) to wrap caption (if line width is bigger) -\newcommand\sphinxcaption[2][\LTcapwidth]{% - \noindent\hb@xt@\linewidth{\hss - \vtop{\@tempdima\dimexpr#1\relax -% don't exceed linewidth for the caption width - \ifdim\@tempdima>\linewidth\hsize\linewidth\else\hsize\@tempdima\fi -% longtable ignores \abovecaptionskip/\belowcaptionskip, so add hooks here -% to uniformize control of caption distance to tables - \abovecaptionskip\sphinxabovecaptionskip - \belowcaptionskip\sphinxbelowcaptionskip - \caption[{#2}]% - {\strut\ignorespaces#2\ifhmode\unskip\@finalstrut\strutbox\fi}% - }\hss}% - \par\prevdepth\dp\strutbox -}% -\def\spx@abovecaptionskip{\abovecaptionskip} -\newcommand*\sphinxabovecaptionskip{\z@skip} -\newcommand*\sphinxbelowcaptionskip{\z@skip} - -\newcommand\sphinxaftercaption -{% this default definition serves with a caption *above* a table, to make sure - % its last baseline is \sphinxbelowcaptionspace above table top - \nobreak - \vskip\dimexpr\sphinxbelowcaptionspace\relax - \vskip-\baselineskip\vskip-\parskip -}% -% varwidth is crucial for our handling of general contents in merged cells -\RequirePackage{varwidth} -% but addition of a compatibility patch with hyperref is needed -% (tested with varwidth v 0.92 Mar 2009) -\AtBeginDocument {% - \let\@@vwid@Hy@raisedlink\Hy@raisedlink - \long\def\@vwid@Hy@raisedlink#1{\@vwid@wrap{\@@vwid@Hy@raisedlink{#1}}}% - \edef\@vwid@setup{% - \let\noexpand\Hy@raisedlink\noexpand\@vwid@Hy@raisedlink % HYPERREF ! - \unexpanded\expandafter{\@vwid@setup}}% -}% -% Homemade package to handle merged cells -\RequirePackage{sphinxmulticell} -\RequirePackage{makeidx} -% For framing code-blocks and warning type notices, and shadowing topics -\RequirePackage{framed} -% The xcolor package draws better fcolorboxes around verbatim code -\IfFileExists{xcolor.sty}{ - \RequirePackage{xcolor} -}{ - \RequirePackage{color} -} -% For highlighted code. -\RequirePackage{fancyvrb} -\define@key{FV}{hllines}{\def\sphinx@verbatim@checkifhl##1{\in@{, ##1,}{#1}}} -% For hyperlinked footnotes in tables; also for gathering footnotes from -% topic and warning blocks. Also to allow code-blocks in footnotes. -\RequirePackage{footnotehyper-sphinx} -% For the H specifier. Do not \restylefloat{figure}, it breaks Sphinx code -% for allowing figures in tables. -\RequirePackage{float} -% For floating figures in the text. Better to load after float. -\RequirePackage{wrapfig} -% Separate paragraphs by space by default. -\RequirePackage{parskip} -% For parsed-literal blocks. -\RequirePackage{alltt} -% Display "real" single quotes in literal blocks. -\RequirePackage{upquote} -% control caption around literal-block -\RequirePackage{capt-of} -\RequirePackage{needspace} -% LaTeX 2018-04-01 and later provides \@removefromreset -\ltx@ifundefined{@removefromreset} - {\RequirePackage{remreset}} - {}% avoid warning -% to make pdf with correct encoded bookmarks in Japanese -% this should precede the hyperref package -\ifx\kanjiskip\@undefined -% for non-Japanese: make sure bookmarks are ok also with lualatex - \PassOptionsToPackage{pdfencoding=unicode}{hyperref} -\else - \RequirePackage{atbegshi} - \ifx\ucs\@undefined - \ifnum 42146=\euc"A4A2 - \AtBeginShipoutFirst{\special{pdf:tounicode EUC-UCS2}} - \else - \AtBeginShipoutFirst{\special{pdf:tounicode 90ms-RKSJ-UCS2}} - \fi - \else - \AtBeginShipoutFirst{\special{pdf:tounicode UTF8-UCS2}} - \fi -\fi - -\ifx\@jsc@uplatextrue\@undefined\else - \PassOptionsToPackage{setpagesize=false}{hyperref} -\fi - -% These options can be overriden inside 'hyperref' key -% or by later use of \hypersetup. -\PassOptionsToPackage{colorlinks,breaklinks,% - linkcolor=InnerLinkColor,filecolor=OuterLinkColor,% - menucolor=OuterLinkColor,urlcolor=OuterLinkColor,% - citecolor=InnerLinkColor}{hyperref} - -% stylesheet for highlighting with pygments -\RequirePackage{sphinxhighlight} -% fix baseline increase from Pygments latex formatter in case of error tokens -% and keep \fboxsep's scope local via added braces -\def\PYG@tok@err{% - \def\PYG@bc##1{{\setlength{\fboxsep}{-\fboxrule}% - \fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}% -} -\def\PYG@tok@cs{% - \def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}% - \def\PYG@bc##1{{\setlength{\fboxsep}{0pt}% - \colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}}% -}% - - -%% OPTIONS -% -% Handle options via "kvoptions" (later loaded by hyperref anyhow) -\RequirePackage{kvoptions} -\SetupKeyvalOptions{prefix=spx@opt@} % use \spx@opt@ prefix - -% Sphinx legacy text layout: 1in margins on all four sides -\ifx\@jsc@uplatextrue\@undefined -\DeclareStringOption[1in]{hmargin} -\DeclareStringOption[1in]{vmargin} -\DeclareStringOption[.5in]{marginpar} -\else -% Japanese standard document classes handle \mag in a special way -\DeclareStringOption[\inv@mag in]{hmargin} -\DeclareStringOption[\inv@mag in]{vmargin} -\DeclareStringOption[.5\dimexpr\inv@mag in\relax]{marginpar} -\fi - -\DeclareStringOption[0]{maxlistdepth}% \newcommand*\spx@opt@maxlistdepth{0} -\DeclareStringOption[-1]{numfigreset} -\DeclareBoolOption[false]{nonumfigreset} -\DeclareBoolOption[false]{mathnumfig} -% \DeclareBoolOption[false]{usespart}% not used -% dimensions, we declare the \dimen registers here. -\newdimen\sphinxverbatimsep -\newdimen\sphinxverbatimborder -\newdimen\sphinxshadowsep -\newdimen\sphinxshadowsize -\newdimen\sphinxshadowrule -% \DeclareStringOption is not convenient for the handling of these dimensions -% because we want to assign the values to the corresponding registers. Even if -% we added the code to the key handler it would be too late for the initial -% set-up and we would need to do initial assignments explicitely. We end up -% using \define@key directly. -% verbatim -\sphinxverbatimsep=\fboxsep - \define@key{sphinx}{verbatimsep}{\sphinxverbatimsep\dimexpr #1\relax} -\sphinxverbatimborder=\fboxrule - \define@key{sphinx}{verbatimborder}{\sphinxverbatimborder\dimexpr #1\relax} -% topic boxes -\sphinxshadowsep =5pt - \define@key{sphinx}{shadowsep}{\sphinxshadowsep\dimexpr #1\relax} -\sphinxshadowsize=4pt - \define@key{sphinx}{shadowsize}{\sphinxshadowsize\dimexpr #1\relax} -\sphinxshadowrule=\fboxrule - \define@key{sphinx}{shadowrule}{\sphinxshadowrule\dimexpr #1\relax} -% verbatim -\DeclareBoolOption[true]{verbatimwithframe} -\DeclareBoolOption[true]{verbatimwrapslines} -\DeclareBoolOption[true]{verbatimhintsturnover} -\DeclareBoolOption[true]{inlineliteralwraps} -\DeclareStringOption[t]{literalblockcappos} -\DeclareStringOption[r]{verbatimcontinuedalign} -\DeclareStringOption[r]{verbatimcontinuesalign} -% parsed literal -\DeclareBoolOption[true]{parsedliteralwraps} -% \textvisiblespace for compatibility with fontspec+XeTeX/LuaTeX -\DeclareStringOption[\textcolor{red}{\textvisiblespace}]{verbatimvisiblespace} -\DeclareStringOption % must use braces to hide the brackets - [{\makebox[2\fontcharwd\font`\x][r]{\textcolor{red}{\tiny$\m@th\hookrightarrow$}}}]% - {verbatimcontinued} -% notices/admonitions -% the dimensions for notices/admonitions are kept as macros and assigned to -% \spx@notice@border at time of use, hence \DeclareStringOption is ok for this -\newdimen\spx@notice@border -\DeclareStringOption[0.5pt]{noteborder} -\DeclareStringOption[0.5pt]{hintborder} -\DeclareStringOption[0.5pt]{importantborder} -\DeclareStringOption[0.5pt]{tipborder} -\DeclareStringOption[1pt]{warningborder} -\DeclareStringOption[1pt]{cautionborder} -\DeclareStringOption[1pt]{attentionborder} -\DeclareStringOption[1pt]{dangerborder} -\DeclareStringOption[1pt]{errorborder} -% footnotes -\DeclareStringOption[\mbox{ }]{AtStartFootnote} -% we need a public macro name for direct use in latex file -\newcommand*{\sphinxAtStartFootnote}{\spx@opt@AtStartFootnote} -% no such need for this one, as it is used inside other macros -\DeclareStringOption[\leavevmode\unskip]{BeforeFootnote} -% some font styling. -\DeclareStringOption[\sffamily\bfseries]{HeaderFamily} -% colours -% same problems as for dimensions: we want the key handler to use \definecolor. -% first, some colours with no prefix, for backwards compatibility -\newcommand*{\sphinxDeclareColorOption}[2]{% - \definecolor{#1}#2% - \define@key{sphinx}{#1}{\definecolor{#1}##1}% -}% -\sphinxDeclareColorOption{TitleColor}{{rgb}{0.126,0.263,0.361}} -\sphinxDeclareColorOption{InnerLinkColor}{{rgb}{0.208,0.374,0.486}} -\sphinxDeclareColorOption{OuterLinkColor}{{rgb}{0.216,0.439,0.388}} -\sphinxDeclareColorOption{VerbatimColor}{{rgb}{1,1,1}} -\sphinxDeclareColorOption{VerbatimBorderColor}{{rgb}{0,0,0}} -% now the colours defined with "sphinx" prefix in their names -\newcommand*{\sphinxDeclareSphinxColorOption}[2]{% - % set the initial default - \definecolor{sphinx#1}#2% - % set the key handler. The "value" ##1 must be acceptable by \definecolor. - \define@key{sphinx}{#1}{\definecolor{sphinx#1}##1}% -}% -% Default color chosen to be as in minted.sty LaTeX package! -\sphinxDeclareSphinxColorOption{VerbatimHighlightColor}{{rgb}{0.878,1,1}} -% admonition boxes, "light" style -\sphinxDeclareSphinxColorOption{noteBorderColor}{{rgb}{0,0,0}} -\sphinxDeclareSphinxColorOption{hintBorderColor}{{rgb}{0,0,0}} -\sphinxDeclareSphinxColorOption{importantBorderColor}{{rgb}{0,0,0}} -\sphinxDeclareSphinxColorOption{tipBorderColor}{{rgb}{0,0,0}} -% admonition boxes, "heavy" style -\sphinxDeclareSphinxColorOption{warningBorderColor}{{rgb}{0,0,0}} -\sphinxDeclareSphinxColorOption{cautionBorderColor}{{rgb}{0,0,0}} -\sphinxDeclareSphinxColorOption{attentionBorderColor}{{rgb}{0,0,0}} -\sphinxDeclareSphinxColorOption{dangerBorderColor}{{rgb}{0,0,0}} -\sphinxDeclareSphinxColorOption{errorBorderColor}{{rgb}{0,0,0}} -\sphinxDeclareSphinxColorOption{warningBgColor}{{rgb}{1,1,1}} -\sphinxDeclareSphinxColorOption{cautionBgColor}{{rgb}{1,1,1}} -\sphinxDeclareSphinxColorOption{attentionBgColor}{{rgb}{1,1,1}} -\sphinxDeclareSphinxColorOption{dangerBgColor}{{rgb}{1,1,1}} -\sphinxDeclareSphinxColorOption{errorBgColor}{{rgb}{1,1,1}} - -\DeclareDefaultOption{\@unknownoptionerror} -\ProcessKeyvalOptions* -% don't allow use of maxlistdepth via \sphinxsetup. -\DisableKeyvalOption{sphinx}{maxlistdepth} -\DisableKeyvalOption{sphinx}{numfigreset} -\DisableKeyvalOption{sphinx}{nonumfigreset} -\DisableKeyvalOption{sphinx}{mathnumfig} -% user interface: options can be changed midway in a document! -\newcommand\sphinxsetup[1]{\setkeys{sphinx}{#1}} - - -%% MAXLISTDEPTH -% -% remove LaTeX's cap on nesting depth if 'maxlistdepth' key used. -% This is a hack, which works with the standard classes: it assumes \@toodeep -% is always used in "true" branches: "\if ... \@toodeep \else .. \fi." - -% will force use the "false" branch (if there is one) -\def\spx@toodeep@hack{\fi\iffalse} - -% do nothing if 'maxlistdepth' key not used or if package enumitem loaded. -\ifnum\spx@opt@maxlistdepth=\z@\expandafter\@gobbletwo\fi -\AtBeginDocument{% -\@ifpackageloaded{enumitem}{\remove@to@nnil}{}% - \let\spx@toodeepORI\@toodeep - \def\@toodeep{% - \ifnum\@listdepth<\spx@opt@maxlistdepth\relax - \expandafter\spx@toodeep@hack - \else - \expandafter\spx@toodeepORI - \fi}% -% define all missing \@list... macros - \count@\@ne - \loop - \ltx@ifundefined{@list\romannumeral\the\count@} - {\iffalse}{\iftrue\advance\count@\@ne}% - \repeat - \loop - \ifnum\count@>\spx@opt@maxlistdepth\relax\else - \expandafter\let - \csname @list\romannumeral\the\count@\expandafter\endcsname - \csname @list\romannumeral\the\numexpr\count@-\@ne\endcsname - % workaround 2.6--3.2d babel-french issue (fixed in 3.2e; no change needed) - \ltx@ifundefined{leftmargin\romannumeral\the\count@} - {\expandafter\let - \csname leftmargin\romannumeral\the\count@\expandafter\endcsname - \csname leftmargin\romannumeral\the\numexpr\count@-\@ne\endcsname}{}% - \advance\count@\@ne - \repeat -% define all missing enum... counters and \labelenum... macros and \p@enum.. - \count@\@ne - \loop - \ltx@ifundefined{c@enum\romannumeral\the\count@} - {\iffalse}{\iftrue\advance\count@\@ne}% - \repeat - \loop - \ifnum\count@>\spx@opt@maxlistdepth\relax\else - \newcounter{enum\romannumeral\the\count@}% - \expandafter\def - \csname labelenum\romannumeral\the\count@\expandafter\endcsname - \expandafter - {\csname theenum\romannumeral\the\numexpr\count@\endcsname.}% - \expandafter\def - \csname p@enum\romannumeral\the\count@\expandafter\endcsname - \expandafter - {\csname p@enum\romannumeral\the\numexpr\count@-\@ne\expandafter - \endcsname\csname theenum\romannumeral\the\numexpr\count@-\@ne\endcsname.}% - \advance\count@\@ne - \repeat -% define all missing labelitem... macros - \count@\@ne - \loop - \ltx@ifundefined{labelitem\romannumeral\the\count@} - {\iffalse}{\iftrue\advance\count@\@ne}% - \repeat - \loop - \ifnum\count@>\spx@opt@maxlistdepth\relax\else - \expandafter\let - \csname labelitem\romannumeral\the\count@\expandafter\endcsname - \csname labelitem\romannumeral\the\numexpr\count@-\@ne\endcsname - \advance\count@\@ne - \repeat - \PackageInfo{sphinx}{maximal list depth extended to \spx@opt@maxlistdepth}% -\@gobble\@nnil -} - - -%% INDEX, BIBLIOGRAPHY, APPENDIX, TABLE OF CONTENTS -% -% fix the double index and bibliography on the table of contents -% in jsclasses (Japanese standard document classes) -\ifx\@jsc@uplatextrue\@undefined\else - \renewenvironment{sphinxtheindex} - {\cleardoublepage\phantomsection - \begin{theindex}} - {\end{theindex}} - - \renewenvironment{sphinxthebibliography}[1] - {\cleardoublepage% \phantomsection % not needed here since TeXLive 2010's hyperref - \begin{thebibliography}{#1}} - {\end{thebibliography}} -\fi - -% disable \@chappos in Appendix in pTeX -\ifx\kanjiskip\@undefined\else - \let\py@OldAppendix=\appendix - \renewcommand{\appendix}{ - \py@OldAppendix - \gdef\@chappos{} - } -\fi - -% make commands known to non-Sphinx document classes -\providecommand*{\sphinxtableofcontents}{\tableofcontents} -\ltx@ifundefined{sphinxthebibliography} - {\newenvironment - {sphinxthebibliography}{\begin{thebibliography}}{\end{thebibliography}}% - } - {}% else clause of \ltx@ifundefined -\ltx@ifundefined{sphinxtheindex} - {\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}}% - {}% else clause of \ltx@ifundefined - -% for usage with xindy: this string gets internationalized in preamble -\newcommand*{\sphinxnonalphabeticalgroupname}{} -% redefined in preamble, headings for makeindex produced index -\newcommand*{\sphinxsymbolsname}{} -\newcommand*{\sphinxnumbersname}{} - -%% COLOR (general) -% -% FIXME: \normalcolor should probably be used in place of \py@NormalColor -% elsewhere, and \py@NormalColor should never be defined. \normalcolor -% switches to the colour from last \color call in preamble. -\def\py@NormalColor{\color{black}} -% FIXME: it is probably better to use \color{TitleColor}, as TitleColor -% can be customized from 'sphinxsetup', and drop usage of \py@TitleColor -\def\py@TitleColor{\color{TitleColor}} -% FIXME: this line should be dropped, as "9" is default anyhow. -\ifdefined\pdfcompresslevel\pdfcompresslevel = 9 \fi - - -%% PAGE STYLING -% -% Style parameters and macros used by most documents here -\raggedbottom -\sloppy -\hbadness = 5000 % don't print trivial gripes - -\pagestyle{empty} % start this way - -% Redefine the 'normal' header/footer style when using "fancyhdr" package: -% Note: this presupposes "twoside". If "oneside" class option, there will be warnings. -\ltx@ifundefined{fancyhf}{}{ - % Use \pagestyle{normal} as the primary pagestyle for text. - \fancypagestyle{normal}{ - \fancyhf{} -% (for \py@HeaderFamily cf "TITLES") - \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}} - \fancyfoot[LO]{{\py@HeaderFamily\nouppercase{\rightmark}}} - \fancyfoot[RE]{{\py@HeaderFamily\nouppercase{\leftmark}}} - \fancyhead[LE,RO]{{\py@HeaderFamily \@title\sphinxheadercomma\py@release}} - \renewcommand{\headrulewidth}{0.4pt} - \renewcommand{\footrulewidth}{0.4pt} - % define chaptermark with \@chappos when \@chappos is available for Japanese - \ltx@ifundefined{@chappos}{} - {\def\chaptermark##1{\markboth{\@chapapp\space\thechapter\space\@chappos\space ##1}{}}} - } - % Update the plain style so we get the page number & footer line, - % but not a chapter or section title. This is to keep the first - % page of a chapter and the blank page between chapters `clean.' - \fancypagestyle{plain}{ - \fancyhf{} - \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}} - \renewcommand{\headrulewidth}{0pt} - \renewcommand{\footrulewidth}{0.4pt} - } -} - -% geometry -\ifx\kanjiskip\@undefined - \PassOptionsToPackage{% - hmargin={\unexpanded{\spx@opt@hmargin}},% - vmargin={\unexpanded{\spx@opt@vmargin}},% - marginpar=\unexpanded{\spx@opt@marginpar}} - {geometry} -\else - % set text width for Japanese documents to be integer multiple of 1zw - % and text height to be integer multiple of \baselineskip - % the execution is delayed to \sphinxsetup then geometry.sty - \normalsize\normalfont - \newcommand*\sphinxtextwidthja[1]{% - \if@twocolumn\tw@\fi - \dimexpr - \numexpr\dimexpr\paperwidth-\tw@\dimexpr#1\relax\relax/ - \dimexpr\if@twocolumn\tw@\else\@ne\fi zw\relax - zw\relax}% - \newcommand*\sphinxmarginparwidthja[1]{% - \dimexpr\numexpr\dimexpr#1\relax/\dimexpr1zw\relax zw\relax}% - \newcommand*\sphinxtextlinesja[1]{% - \numexpr\@ne+\dimexpr\paperheight-\topskip-\tw@\dimexpr#1\relax\relax/ - \baselineskip\relax}% - \ifx\@jsc@uplatextrue\@undefined\else - % the way we found in order for the papersize special written by - % geometry in the dvi file to be correct in case of jsbook class - \ifnum\mag=\@m\else % do nothing special if nomag class option or 10pt - \PassOptionsToPackage{truedimen}{geometry}% - \fi - \fi - \PassOptionsToPackage{% - hmarginratio={1:1},% - textwidth=\unexpanded{\sphinxtextwidthja{\spx@opt@hmargin}},% - vmarginratio={1:1},% - lines=\unexpanded{\sphinxtextlinesja{\spx@opt@vmargin}},% - marginpar=\unexpanded{\sphinxmarginparwidthja{\spx@opt@marginpar}},% - footskip=2\baselineskip,% - }{geometry}% - \AtBeginDocument - {% update a dimension used by the jsclasses - \ifx\@jsc@uplatextrue\@undefined\else\fullwidth\textwidth\fi - % for some reason, jreport normalizes all dimensions with \@settopoint - \@ifclassloaded{jreport} - {\@settopoint\textwidth\@settopoint\textheight\@settopoint\marginparwidth} - {}% <-- "false" clause of \@ifclassloaded - }% -\fi - -% fix fncychap's bug which uses prematurely the \textwidth value -\@ifpackagewith{fncychap}{Bjornstrup} - {\AtBeginDocument{\mylen\textwidth\advance\mylen-2\myhi}}% - {}% <-- "false" clause of \@ifpackagewith - - -%% TITLES -% -% Since Sphinx 1.5, users should use HeaderFamily key to 'sphinxsetup' rather -% than defining their own \py@HeaderFamily command (which is still possible). -% Memo: \py@HeaderFamily is also used by \maketitle as defined in -% sphinxmanual.cls/sphinxhowto.cls -\newcommand{\py@HeaderFamily}{\spx@opt@HeaderFamily} - -% This sets up the fancy chapter headings that make the documents look -% at least a little better than the usual LaTeX output. -\@ifpackagewith{fncychap}{Bjarne}{ - \ChNameVar {\raggedleft\normalsize \py@HeaderFamily} - \ChNumVar {\raggedleft\Large \py@HeaderFamily} - \ChTitleVar{\raggedleft\Large \py@HeaderFamily} - % This creates (numbered) chapter heads without the leading \vspace*{}: - \def\@makechapterhead#1{% - {\parindent \z@ \raggedright \normalfont - \ifnum \c@secnumdepth >\m@ne - \if@mainmatter - \DOCH - \fi - \fi - \interlinepenalty\@M - \if@mainmatter - \DOTI{#1}% - \else% - \DOTIS{#1}% - \fi - }} -}{}% <-- "false" clause of \@ifpackagewith - -% Augment the sectioning commands used to get our own font family in place, -% and reset some internal data items (\titleformat from titlesec package) -\titleformat{\section}{\Large\py@HeaderFamily}% - {\py@TitleColor\thesection}{0.5em}{\py@TitleColor}{\py@NormalColor} -\titleformat{\subsection}{\large\py@HeaderFamily}% - {\py@TitleColor\thesubsection}{0.5em}{\py@TitleColor}{\py@NormalColor} -\titleformat{\subsubsection}{\py@HeaderFamily}% - {\py@TitleColor\thesubsubsection}{0.5em}{\py@TitleColor}{\py@NormalColor} -% By default paragraphs (and subsubsections) will not be numbered because -% sphinxmanual.cls and sphinxhowto.cls set secnumdepth to 2 -\titleformat{\paragraph}{\py@HeaderFamily}% - {\py@TitleColor\theparagraph}{0.5em}{\py@TitleColor}{\py@NormalColor} -\titleformat{\subparagraph}{\py@HeaderFamily}% - {\py@TitleColor\thesubparagraph}{0.5em}{\py@TitleColor}{\py@NormalColor} - - -%% GRAPHICS -% -% \sphinxincludegraphics defined to resize images larger than the line width, -% except if height or width option present. -% -% If scale is present, rescale before fitting to line width. (since 1.5) -\newbox\spx@image@box -\newcommand*{\sphinxincludegraphics}[2][]{% - \in@{height}{#1}\ifin@\else\in@{width}{#1}\fi - \ifin@ % height or width present - \includegraphics[#1]{#2}% - \else % no height nor width (but #1 may be "scale=...") - \setbox\spx@image@box\hbox{\includegraphics[#1,draft]{#2}}% - \ifdim \wd\spx@image@box>\linewidth - \setbox\spx@image@box\box\voidb@x % clear memory - \includegraphics[#1,width=\linewidth]{#2}% - \else - \includegraphics[#1]{#2}% - \fi - \fi -} - - -%% FIGURE IN TABLE -% -\newenvironment{sphinxfigure-in-table}[1][\linewidth]{% - \def\@captype{figure}% - \sphinxsetvskipsforfigintablecaption - \begin{minipage}{#1}% -}{\end{minipage}} -% store original \caption macro for use with figures in longtable and tabulary -\AtBeginDocument{\let\spx@originalcaption\caption} -\newcommand*\sphinxfigcaption - {\ifx\equation$%$% this is trick to identify tabulary first pass - \firstchoice@false\else\firstchoice@true\fi - \spx@originalcaption } -\newcommand*\sphinxsetvskipsforfigintablecaption - {\abovecaptionskip\smallskipamount - \belowcaptionskip\smallskipamount} - - -%% CITATIONS -% -\protected\def\sphinxcite{\cite} - -%% FOOTNOTES -% -% Support large numbered footnotes in minipage -% But now obsolete due to systematic use of \savenotes/\spewnotes -% when minipages are in use in the various macro definitions next. -\def\thempfootnote{\arabic{mpfootnote}} - - -%% NUMBERING OF FIGURES, TABLES, AND LITERAL BLOCKS -\ltx@ifundefined{c@chapter} - {\newcounter{literalblock}}% - {\newcounter{literalblock}[chapter]% - \def\theliteralblock{\ifnum\c@chapter>\z@\arabic{chapter}.\fi - \arabic{literalblock}}% - }% -\ifspx@opt@nonumfigreset - \ltx@ifundefined{c@chapter}{}{% - \@removefromreset{figure}{chapter}% - \@removefromreset{table}{chapter}% - \@removefromreset{literalblock}{chapter}% - \ifspx@opt@mathnumfig - \@removefromreset{equation}{chapter}% - \fi - }% - \def\thefigure{\arabic{figure}}% - \def\thetable {\arabic{table}}% - \def\theliteralblock{\arabic{literalblock}}% - \ifspx@opt@mathnumfig - \def\theequation{\arabic{equation}}% - \fi -\else -\let\spx@preAthefigure\@empty -\let\spx@preBthefigure\@empty -% \ifspx@opt@usespart % <-- LaTeX writer could pass such a 'usespart' boolean -% % as sphinx.sty package option -% If document uses \part, (triggered in Sphinx by latex_toplevel_sectioning) -% LaTeX core per default does not reset chapter or section -% counters at each part. -% But if we modify this, we need to redefine \thechapter, \thesection to -% include the part number and this will cause problems in table of contents -% because of too wide numbering. Simplest is to do nothing. -% \fi -\ifnum\spx@opt@numfigreset>0 - \ltx@ifundefined{c@chapter} - {} - {\g@addto@macro\spx@preAthefigure{\ifnum\c@chapter>\z@\arabic{chapter}.}% - \g@addto@macro\spx@preBthefigure{\fi}}% -\fi -\ifnum\spx@opt@numfigreset>1 - \@addtoreset{figure}{section}% - \@addtoreset{table}{section}% - \@addtoreset{literalblock}{section}% - \ifspx@opt@mathnumfig - \@addtoreset{equation}{section}% - \fi - \g@addto@macro\spx@preAthefigure{\ifnum\c@section>\z@\arabic{section}.}% - \g@addto@macro\spx@preBthefigure{\fi}% -\fi -\ifnum\spx@opt@numfigreset>2 - \@addtoreset{figure}{subsection}% - \@addtoreset{table}{subsection}% - \@addtoreset{literalblock}{subsection}% - \ifspx@opt@mathnumfig - \@addtoreset{equation}{subsection}% - \fi - \g@addto@macro\spx@preAthefigure{\ifnum\c@subsection>\z@\arabic{subsection}.}% - \g@addto@macro\spx@preBthefigure{\fi}% -\fi -\ifnum\spx@opt@numfigreset>3 - \@addtoreset{figure}{subsubsection}% - \@addtoreset{table}{subsubsection}% - \@addtoreset{literalblock}{subsubsection}% - \ifspx@opt@mathnumfig - \@addtoreset{equation}{subsubsection}% - \fi - \g@addto@macro\spx@preAthefigure{\ifnum\c@subsubsection>\z@\arabic{subsubsection}.}% - \g@addto@macro\spx@preBthefigure{\fi}% -\fi -\ifnum\spx@opt@numfigreset>4 - \@addtoreset{figure}{paragraph}% - \@addtoreset{table}{paragraph}% - \@addtoreset{literalblock}{paragraph}% - \ifspx@opt@mathnumfig - \@addtoreset{equation}{paragraph}% - \fi - \g@addto@macro\spx@preAthefigure{\ifnum\c@subparagraph>\z@\arabic{subparagraph}.}% - \g@addto@macro\spx@preBthefigure{\fi}% -\fi -\ifnum\spx@opt@numfigreset>5 - \@addtoreset{figure}{subparagraph}% - \@addtoreset{table}{subparagraph}% - \@addtoreset{literalblock}{subparagraph}% - \ifspx@opt@mathnumfig - \@addtoreset{equation}{subparagraph}% - \fi - \g@addto@macro\spx@preAthefigure{\ifnum\c@subsubparagraph>\z@\arabic{subsubparagraph}.}% - \g@addto@macro\spx@preBthefigure{\fi}% -\fi -\expandafter\g@addto@macro -\expandafter\spx@preAthefigure\expandafter{\spx@preBthefigure}% -\let\thefigure\spx@preAthefigure -\let\thetable\spx@preAthefigure -\let\theliteralblock\spx@preAthefigure -\g@addto@macro\thefigure{\arabic{figure}}% -\g@addto@macro\thetable{\arabic{table}}% -\g@addto@macro\theliteralblock{\arabic{literalblock}}% - \ifspx@opt@mathnumfig - \let\theequation\spx@preAthefigure - \g@addto@macro\theequation{\arabic{equation}}% - \fi -\fi - - -%% LITERAL BLOCKS -% -% Based on use of "fancyvrb.sty"'s Verbatim. -% - with framing allowing page breaks ("framed.sty") -% - with breaking of long lines (exploits Pygments mark-up), -% - with possibly of a top caption, non-separable by pagebreak. -% - and usable inside tables or footnotes ("footnotehyper-sphinx"). - -% For extensions which use \OriginalVerbatim and compatibility with Sphinx < -% 1.5, we define and use these when (unmodified) Verbatim will be needed. But -% Sphinx >= 1.5 does not modify the \Verbatim macro anymore. -\let\OriginalVerbatim \Verbatim -\let\endOriginalVerbatim\endVerbatim - -% for captions of literal blocks -% at start of caption title -\newcommand*{\fnum@literalblock}{\literalblockname\nobreakspace\theliteralblock} -% this will be overwritten in document preamble by Babel translation -\newcommand*{\literalblockname}{Listing } -% file extension needed for \caption's good functioning, the file is created -% only if a \listof{literalblock}{foo} command is encountered, which is -% analogous to \listoffigures, but for the code listings (foo = chosen title.) -\newcommand*{\ext@literalblock}{lol} - -\newif\ifspx@inframed % flag set if we are already in a framed environment -% if forced use of minipage encapsulation is needed (e.g. table cells) -\newif\ifsphinxverbatimwithminipage \sphinxverbatimwithminipagefalse - -% Framing macro for use with framed.sty's \FrameCommand -% - it obeys current indentation, -% - frame is \fboxsep separated from the contents, -% - the contents use the full available text width, -% - #1 = color of frame, #2 = color of background, -% - #3 = above frame, #4 = below frame, #5 = within frame, -% - #3 and #4 must be already typeset boxes; they must issue \normalcolor -% or similar, else, they are under scope of color #1 -\long\def\spx@fcolorbox #1#2#3#4#5{% - \hskip\@totalleftmargin - \hskip-\fboxsep\hskip-\fboxrule - % use of \color@b@x here is compatible with both xcolor.sty and color.sty - \color@b@x {\color{#1}\spx@CustomFBox{#3}{#4}}{\color{#2}}{#5}% - \hskip-\fboxsep\hskip-\fboxrule - \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth -}% -% #1 = for material above frame, such as a caption or a "continued" hint -% #2 = for material below frame, such as a caption or "continues on next page" -% #3 = actual contents, which will be typeset with a background color -\long\def\spx@CustomFBox#1#2#3{% - \begingroup - \setbox\@tempboxa\hbox{{#3}}% inner braces to avoid color leaks - \vbox{#1% above frame - % draw frame border _latest_ to avoid pdf viewer issue - \kern\fboxrule - \hbox{\kern\fboxrule - \copy\@tempboxa - \kern-\wd\@tempboxa\kern-\fboxrule - \vrule\@width\fboxrule - \kern\wd\@tempboxa - \vrule\@width\fboxrule}% - \kern-\dimexpr\ht\@tempboxa+\dp\@tempboxa+\fboxrule\relax - \hrule\@height\fboxrule - \kern\dimexpr\ht\@tempboxa+\dp\@tempboxa\relax - \hrule\@height\fboxrule - #2% below frame - }% - \endgroup -}% -\def\spx@fcolorbox@put@c#1{% hide width from framed.sty measuring - \moveright\dimexpr\fboxrule+.5\wd\@tempboxa\hb@xt@\z@{\hss#1\hss}% -}% -\def\spx@fcolorbox@put@r#1{% right align with contents, width hidden - \moveright\dimexpr\fboxrule+\wd\@tempboxa-\fboxsep\hb@xt@\z@{\hss#1}% -}% -\def\spx@fcolorbox@put@l#1{% left align with contents, width hidden - \moveright\dimexpr\fboxrule+\fboxsep\hb@xt@\z@{#1\hss}% -}% -% -\def\sphinxVerbatim@Continued - {\csname spx@fcolorbox@put@\spx@opt@verbatimcontinuedalign\endcsname - {\normalcolor\sphinxstylecodecontinued\literalblockcontinuedname}}% -\def\sphinxVerbatim@Continues - {\csname spx@fcolorbox@put@\spx@opt@verbatimcontinuesalign\endcsname - {\normalcolor\sphinxstylecodecontinues\literalblockcontinuesname}}% -\def\sphinxVerbatim@Title - {\spx@fcolorbox@put@c{\unhcopy\sphinxVerbatim@TitleBox}}% -\let\sphinxVerbatim@Before\@empty -\let\sphinxVerbatim@After\@empty -% Defaults are redefined in document preamble according to language -\newcommand*\literalblockcontinuedname{continued from previous page}% -\newcommand*\literalblockcontinuesname{continues on next page}% -% -\def\spx@verbatimfcolorbox{\spx@fcolorbox{VerbatimBorderColor}{VerbatimColor}}% -\def\sphinxVerbatim@FrameCommand - {\spx@verbatimfcolorbox\sphinxVerbatim@Before\sphinxVerbatim@After}% -\def\sphinxVerbatim@FirstFrameCommand - {\spx@verbatimfcolorbox\sphinxVerbatim@Before\sphinxVerbatim@Continues}% -\def\sphinxVerbatim@MidFrameCommand - {\spx@verbatimfcolorbox\sphinxVerbatim@Continued\sphinxVerbatim@Continues}% -\def\sphinxVerbatim@LastFrameCommand - {\spx@verbatimfcolorbox\sphinxVerbatim@Continued\sphinxVerbatim@After}% - -% For linebreaks inside Verbatim environment from package fancyvrb. -\newbox\sphinxcontinuationbox -\newbox\sphinxvisiblespacebox -\newcommand*\sphinxafterbreak {\copy\sphinxcontinuationbox} - -% Take advantage of the already applied Pygments mark-up to insert -% potential linebreaks for TeX processing. -% {, <, #, %, $, ' and ": go to next line. -% _, }, ^, &, >, - and ~: stay at end of broken line. -% Use of \textquotesingle for straight quote. -% FIXME: convert this to package options ? -\newcommand*\sphinxbreaksbeforelist {% - \do\PYGZob\{\do\PYGZlt\<\do\PYGZsh\#\do\PYGZpc\%% {, <, #, %, - \do\PYGZdl\$\do\PYGZdq\"% $, " - \def\PYGZsq - {\discretionary{}{\sphinxafterbreak\textquotesingle}{\textquotesingle}}% ' -} -\newcommand*\sphinxbreaksafterlist {% - \do\PYGZus\_\do\PYGZcb\}\do\PYGZca\^\do\PYGZam\&% _, }, ^, &, - \do\PYGZgt\>\do\PYGZhy\-\do\PYGZti\~% >, -, ~ -} -\newcommand*\sphinxbreaksatspecials {% - \def\do##1##2% - {\def##1{\discretionary{}{\sphinxafterbreak\char`##2}{\char`##2}}}% - \sphinxbreaksbeforelist - \def\do##1##2% - {\def##1{\discretionary{\char`##2}{\sphinxafterbreak}{\char`##2}}}% - \sphinxbreaksafterlist -} - -\def\sphinx@verbatim@nolig@list {\do \`}% -% Some characters . , ; ? ! / are not pygmentized. -% This macro makes them "active" and they will insert potential linebreaks. -% Not compatible with math mode (cf \sphinxunactivateextras). -\newcommand*\sphinxbreaksbeforeactivelist {}% none -\newcommand*\sphinxbreaksafteractivelist {\do\.\do\,\do\;\do\?\do\!\do\/} -\newcommand*\sphinxbreaksviaactive {% - \def\do##1{\lccode`\~`##1% - \lowercase{\def~}{\discretionary{}{\sphinxafterbreak\char`##1}{\char`##1}}% - \catcode`##1\active}% - \sphinxbreaksbeforeactivelist - \def\do##1{\lccode`\~`##1% - \lowercase{\def~}{\discretionary{\char`##1}{\sphinxafterbreak}{\char`##1}}% - \catcode`##1\active}% - \sphinxbreaksafteractivelist - \lccode`\~`\~ -} - -% If the linebreak is at a space, the latter will be displayed as visible -% space at end of first line, and a continuation symbol starts next line. -\def\spx@verbatim@space {% - \nobreak\hskip\z@skip - \discretionary{\copy\sphinxvisiblespacebox}{\sphinxafterbreak} - {\kern\fontdimen2\font}% -}% - -% if the available space on page is less than \literalblockneedspace, insert pagebreak -\newcommand{\sphinxliteralblockneedspace}{5\baselineskip} -\newcommand{\sphinxliteralblockwithoutcaptionneedspace}{1.5\baselineskip} -% The title (caption) is specified from outside as macro \sphinxVerbatimTitle. -% \sphinxVerbatimTitle is reset to empty after each use of Verbatim. -\newcommand*\sphinxVerbatimTitle {} -% This box to typeset the caption before framed.sty multiple passes for framing. -\newbox\sphinxVerbatim@TitleBox -% This is a workaround to a "feature" of French lists, when literal block -% follows immediately; usable generally (does only \par then), a priori... -\newcommand*\sphinxvspacefixafterfrenchlists{% - \ifvmode\ifdim\lastskip<\z@ \vskip\parskip\fi\else\par\fi -} -% Holder macro for labels of literal blocks. Set-up by LaTeX writer. -\newcommand*\sphinxLiteralBlockLabel {} -\newcommand*\sphinxSetupCaptionForVerbatim [1] -{% - \sphinxvspacefixafterfrenchlists - \needspace{\sphinxliteralblockneedspace}% -% insert a \label via \sphinxLiteralBlockLabel -% reset to normal the color for the literal block caption - \def\sphinxVerbatimTitle - {\py@NormalColor\sphinxcaption{\sphinxLiteralBlockLabel #1}}% -} -\newcommand*\sphinxSetupCodeBlockInFootnote {% - \fvset{fontsize=\footnotesize}\let\caption\sphinxfigcaption - \sphinxverbatimwithminipagetrue % reduces vertical spaces - % we counteract (this is in a group) the \@normalsize from \caption - \let\normalsize\footnotesize\let\@parboxrestore\relax - \def\spx@abovecaptionskip{\sphinxverbatimsmallskipamount}% -} -% needed to create wrapper environments of fancyvrb's Verbatim -\newcommand*{\sphinxVerbatimEnvironment}{\gdef\FV@EnvironName{sphinxVerbatim}} -\newcommand*{\sphinxverbatimsmallskipamount}{\smallskipamount} -% serves to implement line highlighting and line wrapping -\newcommand\sphinxFancyVerbFormatLine[1]{% - \expandafter\sphinx@verbatim@checkifhl\expandafter{\the\FV@CodeLineNo}% - \ifin@ - \sphinxVerbatimHighlightLine{#1}% - \else - \sphinxVerbatimFormatLine{#1}% - \fi -}% -\newcommand\sphinxVerbatimHighlightLine[1]{% - \edef\sphinxrestorefboxsep{\fboxsep\the\fboxsep\relax}% - \fboxsep0pt\relax % cf LaTeX bug graphics/4524 - \colorbox{sphinxVerbatimHighlightColor}% - {\sphinxrestorefboxsep\sphinxVerbatimFormatLine{#1}}% - % no need to restore \fboxsep here, as this ends up in a \hbox from fancyvrb -}% -% \sphinxVerbatimFormatLine will be set locally to one of those two: -\newcommand\sphinxVerbatimFormatLineWrap[1]{% - \hsize\linewidth - \vtop{\raggedright\hyphenpenalty\z@\exhyphenpenalty\z@ - \doublehyphendemerits\z@\finalhyphendemerits\z@ - \strut #1\strut}% -}% -\newcommand\sphinxVerbatimFormatLineNoWrap[1]{\hb@xt@\linewidth{\strut #1\hss}}% -\g@addto@macro\FV@SetupFont{% - \sbox\sphinxcontinuationbox {\spx@opt@verbatimcontinued}% - \sbox\sphinxvisiblespacebox {\spx@opt@verbatimvisiblespace}% -}% -\newenvironment{sphinxVerbatim}{% - % first, let's check if there is a caption - \ifx\sphinxVerbatimTitle\empty - \sphinxvspacefixafterfrenchlists - \parskip\z@skip - \vskip\sphinxverbatimsmallskipamount - % there was no caption. Check if nevertheless a label was set. - \ifx\sphinxLiteralBlockLabel\empty\else - % we require some space to be sure hyperlink target from \phantomsection - % will not be separated from upcoming verbatim by a page break - \needspace{\sphinxliteralblockwithoutcaptionneedspace}% - \phantomsection\sphinxLiteralBlockLabel - \fi - \else - \parskip\z@skip - \if t\spx@opt@literalblockcappos - \vskip\spx@abovecaptionskip - \def\sphinxVerbatim@Before - {\sphinxVerbatim@Title\nointerlineskip - \kern\dimexpr-\dp\strutbox+\sphinxbelowcaptionspace\relax}% - \else - \vskip\sphinxverbatimsmallskipamount - \def\sphinxVerbatim@After - {\nointerlineskip\kern\dp\strutbox\sphinxVerbatim@Title}% - \fi - \def\@captype{literalblock}% - \capstart - % \sphinxVerbatimTitle must reset color - \setbox\sphinxVerbatim@TitleBox - \hbox{\begin{minipage}{\linewidth}% - \sphinxVerbatimTitle - \end{minipage}}% - \fi - \global\let\sphinxLiteralBlockLabel\empty - \global\let\sphinxVerbatimTitle\empty - \fboxsep\sphinxverbatimsep \fboxrule\sphinxverbatimborder - \ifspx@opt@verbatimwithframe\else\fboxrule\z@\fi - \let\FrameCommand \sphinxVerbatim@FrameCommand - \let\FirstFrameCommand\sphinxVerbatim@FirstFrameCommand - \let\MidFrameCommand \sphinxVerbatim@MidFrameCommand - \let\LastFrameCommand \sphinxVerbatim@LastFrameCommand - \ifspx@opt@verbatimhintsturnover\else - \let\sphinxVerbatim@Continued\@empty - \let\sphinxVerbatim@Continues\@empty - \fi - \ifspx@opt@verbatimwrapslines - % fancyvrb's Verbatim puts each input line in (unbreakable) horizontal boxes. - % This customization wraps each line from the input in a \vtop, thus - % allowing it to wrap and display on two or more lines in the latex output. - % - The codeline counter will be increased only once. - % - The wrapped material will not break across pages, it is impossible - % to achieve this without extensive rewrite of fancyvrb. - % - The (not used in sphinx) obeytabs option to Verbatim is - % broken by this change (showtabs and tabspace work). - \let\sphinxVerbatimFormatLine\sphinxVerbatimFormatLineWrap - \let\FV@Space\spx@verbatim@space - % Allow breaks at special characters using \PYG... macros. - \sphinxbreaksatspecials - % Breaks at punctuation characters . , ; ? ! and / (needs catcode activation) - \fvset{codes*=\sphinxbreaksviaactive}% - \else % end of conditional code for wrapping long code lines - \let\sphinxVerbatimFormatLine\sphinxVerbatimFormatLineNoWrap - \fi - \let\FancyVerbFormatLine\sphinxFancyVerbFormatLine - % workaround to fancyvrb's check of \@currenvir - \let\VerbatimEnvironment\sphinxVerbatimEnvironment - % workaround to fancyvrb's check of current list depth - \def\@toodeep {\advance\@listdepth\@ne}% - % The list environment is needed to control perfectly the vertical space. - % Note: \OuterFrameSep used by framed.sty is later set to \topsep hence 0pt. - % - if caption: distance from last text baseline to caption baseline is - % A+(B-F)+\ht\strutbox, A = \abovecaptionskip (default 10pt), B = - % \baselineskip, F is the framed.sty \FrameHeightAdjust macro, default 6pt. - % Formula valid for F < 10pt. - % - distance of baseline of caption to top of frame is like for tables: - % \sphinxbelowcaptionspace (=0.5\baselineskip) - % - if no caption: distance of last text baseline to code frame is S+(B-F), - % with S = \sphinxverbatimtopskip (=\smallskip) - % - and distance from bottom of frame to next text baseline is - % \baselineskip+\parskip. - % The \trivlist is used to avoid possible "too deeply nested" error. - \itemsep \z@skip - \topsep \z@skip - \partopsep \z@skip - % trivlist will set \parsep to \parskip = zero - % \leftmargin will be set to zero by trivlist - \rightmargin\z@ - \parindent \z@% becomes \itemindent. Default zero, but perhaps overwritten. - \trivlist\item\relax - \ifsphinxverbatimwithminipage\spx@inframedtrue\fi - % use a minipage if we are already inside a framed environment - \ifspx@inframed\noindent\begin{minipage}{\linewidth}\fi - \MakeFramed {% adapted over from framed.sty's snugshade environment - \advance\hsize-\width\@totalleftmargin\z@\linewidth\hsize\@setminipage - }% - % For grid placement from \strut's in \FancyVerbFormatLine - \lineskip\z@skip - % active comma should not be overwritten by \@noligs - \ifspx@opt@verbatimwrapslines - \let\verbatim@nolig@list \sphinx@verbatim@nolig@list - \fi - % will fetch its optional arguments if any - \OriginalVerbatim -} -{% - \endOriginalVerbatim - \par\unskip\@minipagefalse\endMakeFramed % from framed.sty snugshade - \ifspx@inframed\end{minipage}\fi - \endtrivlist -} -\newenvironment {sphinxVerbatimNoFrame} - {\spx@opt@verbatimwithframefalse - % needed for fancyvrb as literal code will end in \end{sphinxVerbatimNoFrame} - \def\sphinxVerbatimEnvironment{\gdef\FV@EnvironName{sphinxVerbatimNoFrame}}% - \begin{sphinxVerbatim}} - {\end{sphinxVerbatim}} -\newenvironment {sphinxVerbatimintable} - {% don't use a frame if in a table cell - \spx@opt@verbatimwithframefalse - \sphinxverbatimwithminipagetrue - % the literal block caption uses \sphinxcaption which is wrapper of \caption, - % but \caption must be modified because longtable redefines it to work only - % for the own table caption, and tabulary has multiple passes - \let\caption\sphinxfigcaption - % reduce above caption skip - \def\spx@abovecaptionskip{\sphinxverbatimsmallskipamount}% - \def\sphinxVerbatimEnvironment{\gdef\FV@EnvironName{sphinxVerbatimintable}}% - \begin{sphinxVerbatim}} - {\end{sphinxVerbatim}} - - -%% PARSED LITERALS -% allow long lines to wrap like they do in code-blocks - -% this should be kept in sync with definitions in sphinx.util.texescape -\newcommand*\sphinxbreaksattexescapedchars{% - \def\do##1##2% put potential break point before character - {\def##1{\discretionary{}{\sphinxafterbreak\char`##2}{\char`##2}}}% - \do\{\{\do\textless\<\do\#\#\do\%\%\do\$\$% {, <, #, %, $ - \def\do##1##2% put potential break point after character - {\def##1{\discretionary{\char`##2}{\sphinxafterbreak}{\char`##2}}}% - \do\_\_\do\}\}\do\textasciicircum\^\do\&\&% _, }, ^, &, - \do\textgreater\>\do\textasciitilde\~% >, ~ -} -\newcommand*\sphinxbreaksviaactiveinparsedliteral{% - \sphinxbreaksviaactive % by default handles . , ; ? ! / - \do\-% we need also the hyphen character (ends up "as is" in parsed-literal) - \lccode`\~`\~ % - % update \dospecials as it is used by \url - % but deactivation will already have been done hence this is unneeded: - % \expandafter\def\expandafter\dospecials\expandafter{\dospecials - % \sphinxbreaksbeforeactivelist\sphinxbreaksafteractivelist\do\-}% -} -\newcommand*\sphinxbreaksatspaceinparsedliteral{% - \lccode`~32 \lowercase{\let~}\spx@verbatim@space\lccode`\~`\~ -} -\newcommand*{\sphinxunactivateextras}{\let\do\@makeother - \sphinxbreaksbeforeactivelist\sphinxbreaksafteractivelist\do\-}% -% the \catcode13=5\relax (deactivate end of input lines) is left to callers -\newcommand*{\sphinxunactivateextrasandspace}{\catcode32=10\relax - \sphinxunactivateextras}% -% now for the modified alltt environment -\newenvironment{sphinxalltt} -{% at start of next line to workaround Emacs/AUCTeX issue with this file -\begin{alltt}% - \ifspx@opt@parsedliteralwraps - \sbox\sphinxcontinuationbox {\spx@opt@verbatimcontinued}% - \sbox\sphinxvisiblespacebox {\spx@opt@verbatimvisiblespace}% - \sphinxbreaksattexescapedchars - \sphinxbreaksviaactiveinparsedliteral - \sphinxbreaksatspaceinparsedliteral -% alltt takes care of the ' as derivative ("prime") in math mode - \everymath\expandafter{\the\everymath\sphinxunactivateextrasandspace - \catcode`\<=12\catcode`\>=12\catcode`\^=7\catcode`\_=8 }% -% not sure if displayed math (align,...) can end up in parsed-literal, anyway - \everydisplay\expandafter{\the\everydisplay - \catcode13=5 \sphinxunactivateextrasandspace - \catcode`\<=12\catcode`\>=12\catcode`\^=7\catcode`\_=8 }% - \fi } -{\end{alltt}} - -% Protect \href's first argument in contexts such as sphinxalltt (or -% \sphinxcode). Sphinx uses \#, \%, \& ... always inside \sphinxhref. -\protected\def\sphinxhref#1#2{{% - \sphinxunactivateextrasandspace % never do \scantokens with active space! - \endlinechar\m@ne\everyeof{{#2}}% keep catcode regime for #2 - \scantokens{\href{#1}}% normalise it for #1 during \href expansion -}} -% Same for \url. And also \nolinkurl for coherence. -\protected\def\sphinxurl#1{{% - \sphinxunactivateextrasandspace\everyeof{}% (<- precaution for \scantokens) - \endlinechar\m@ne\scantokens{\url{#1}}% -}} -\protected\def\sphinxnolinkurl#1{{% - \sphinxunactivateextrasandspace\everyeof{}% - \endlinechar\m@ne\scantokens{\nolinkurl{#1}}% -}} - - -%% TOPIC AND CONTENTS BOXES -% -% Again based on use of "framed.sty", this allows breakable framed boxes. -\long\def\spx@ShadowFBox#1{% - \leavevmode\begingroup - % first we frame the box #1 - \setbox\@tempboxa - \hbox{\vrule\@width\sphinxshadowrule - \vbox{\hrule\@height\sphinxshadowrule - \kern\sphinxshadowsep - \hbox{\kern\sphinxshadowsep #1\kern\sphinxshadowsep}% - \kern\sphinxshadowsep - \hrule\@height\sphinxshadowrule}% - \vrule\@width\sphinxshadowrule}% - % Now we add the shadow, like \shadowbox from fancybox.sty would do - \dimen@\dimexpr.5\sphinxshadowrule+\sphinxshadowsize\relax - \hbox{\vbox{\offinterlineskip - \hbox{\copy\@tempboxa\kern-.5\sphinxshadowrule - % add shadow on right side - \lower\sphinxshadowsize - \hbox{\vrule\@height\ht\@tempboxa \@width\dimen@}% - }% - \kern-\dimen@ % shift back vertically to bottom of frame - % and add shadow at bottom - \moveright\sphinxshadowsize - \vbox{\hrule\@width\wd\@tempboxa \@height\dimen@}% - }% - % move left by the size of right shadow so shadow adds no width - \kern-\sphinxshadowsize - }% - \endgroup -} - -% use framed.sty to allow page breaks in frame+shadow -% works well inside Lists and Quote-like environments -% produced by ``topic'' directive (or local contents) -% could nest if LaTeX writer authorized it -\newenvironment{sphinxShadowBox} - {\def\FrameCommand {\spx@ShadowFBox }% - % configure framed.sty not to add extra vertical spacing - \ltx@ifundefined{OuterFrameSep}{}{\OuterFrameSep\z@skip}% - % the \trivlist will add the vertical spacing on top and bottom which is - % typical of center environment as used in Sphinx <= 1.4.1 - % the \noindent has the effet of an extra blank line on top, to - % imitate closely the layout from Sphinx <= 1.4.1; the \FrameHeightAdjust - % will put top part of frame on this baseline. - \def\FrameHeightAdjust {\baselineskip}% - % use package footnote to handle footnotes - \savenotes - \trivlist\item\noindent - % use a minipage if we are already inside a framed environment - \ifspx@inframed\begin{minipage}{\linewidth}\fi - \MakeFramed {\spx@inframedtrue - % framed.sty puts into "\width" the added width (=2shadowsep+2shadowrule) - % adjust \hsize to what the contents must use - \advance\hsize-\width - % adjust LaTeX parameters to behave properly in indented/quoted contexts - \FrameRestore - % typeset the contents as in a minipage (Sphinx <= 1.4.1 used a minipage and - % itemize/enumerate are therein typeset more tightly, we want to keep - % that). We copy-paste from LaTeX source code but don't do a real minipage. - \@pboxswfalse - \let\@listdepth\@mplistdepth \@mplistdepth\z@ - \@minipagerestore - \@setminipage - }% - }% - {% insert the "endminipage" code - \par\unskip - \@minipagefalse - \endMakeFramed - \ifspx@inframed\end{minipage}\fi - \endtrivlist - % output the stored footnotes - \spewnotes - } - - -%% NOTICES AND ADMONITIONS -% -% Some are quite plain -% the spx@notice@bordercolor etc are set in the sphinxadmonition environment -\newenvironment{sphinxlightbox}{% - \par\allowbreak - \noindent{\color{spx@notice@bordercolor}% - \rule{\linewidth}{\spx@notice@border}}\par\nobreak - {\parskip\z@skip\noindent}% - } - {% - % counteract previous possible negative skip (French lists!): - % (we can't cancel that any earlier \vskip introduced a potential pagebreak) - \sphinxvspacefixafterfrenchlists - \nobreak\vbox{\noindent\kern\@totalleftmargin - {\color{spx@notice@bordercolor}% - \rule[\dimexpr.4\baselineskip-\spx@notice@border\relax] - {\linewidth}{\spx@notice@border}}\hss}\allowbreak - }% end of sphinxlightbox environment definition -% may be renewenvironment'd by user for complete customization -\newenvironment{sphinxnote}[1] - {\begin{sphinxlightbox}\sphinxstrong{#1} }{\end{sphinxlightbox}} -\newenvironment{sphinxhint}[1] - {\begin{sphinxlightbox}\sphinxstrong{#1} }{\end{sphinxlightbox}} -\newenvironment{sphinximportant}[1] - {\begin{sphinxlightbox}\sphinxstrong{#1} }{\end{sphinxlightbox}} -\newenvironment{sphinxtip}[1] - {\begin{sphinxlightbox}\sphinxstrong{#1} }{\end{sphinxlightbox}} -% or just use the package options -% these are needed for common handling by notice environment of lightbox -% and heavybox but they are currently not used by lightbox environment -% and there is consequently no corresponding package option -\definecolor{sphinxnoteBgColor}{rgb}{1,1,1} -\definecolor{sphinxhintBgColor}{rgb}{1,1,1} -\definecolor{sphinximportantBgColor}{rgb}{1,1,1} -\definecolor{sphinxtipBgColor}{rgb}{1,1,1} - -% Others get more distinction -% Code adapted from framed.sty's "snugshade" environment. -% Nesting works (inner frames do not allow page breaks). -\newenvironment{sphinxheavybox}{\par - \setlength{\FrameRule}{\spx@notice@border}% - \setlength{\FrameSep}{\dimexpr.6\baselineskip-\FrameRule\relax} - % configure framed.sty's parameters to obtain same vertical spacing - % as for "light" boxes. We need for this to manually insert parskip glue and - % revert a skip done by framed before the frame. - \ltx@ifundefined{OuterFrameSep}{}{\OuterFrameSep\z@skip}% - \vspace{\FrameHeightAdjust} - % copied/adapted from framed.sty's snugshade - \def\FrameCommand##1{\hskip\@totalleftmargin - \fboxsep\FrameSep \fboxrule\FrameRule - \fcolorbox{spx@notice@bordercolor}{spx@notice@bgcolor}{##1}% - \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}% - \savenotes - % use a minipage if we are already inside a framed environment - \ifspx@inframed - \noindent\begin{minipage}{\linewidth} - \else - % handle case where notice is first thing in a list item (or is quoted) - \if@inlabel - \noindent\par\vspace{-\baselineskip} - \else - \vspace{\parskip} - \fi - \fi - \MakeFramed {\spx@inframedtrue - \advance\hsize-\width \@totalleftmargin\z@ \linewidth\hsize - % minipage initialization copied from LaTeX source code. - \@pboxswfalse - \let\@listdepth\@mplistdepth \@mplistdepth\z@ - \@minipagerestore - \@setminipage }% - } - {% - \par\unskip - \@minipagefalse - \endMakeFramed - \ifspx@inframed\end{minipage}\fi - % set footnotes at bottom of page - \spewnotes - % arrange for similar spacing below frame as for "light" boxes. - \vskip .4\baselineskip - }% end of sphinxheavybox environment definition -% may be renewenvironment'd by user for complete customization -\newenvironment{sphinxwarning}[1] - {\begin{sphinxheavybox}\sphinxstrong{#1} }{\end{sphinxheavybox}} -\newenvironment{sphinxcaution}[1] - {\begin{sphinxheavybox}\sphinxstrong{#1} }{\end{sphinxheavybox}} -\newenvironment{sphinxattention}[1] - {\begin{sphinxheavybox}\sphinxstrong{#1} }{\end{sphinxheavybox}} -\newenvironment{sphinxdanger}[1] - {\begin{sphinxheavybox}\sphinxstrong{#1} }{\end{sphinxheavybox}} -\newenvironment{sphinxerror}[1] - {\begin{sphinxheavybox}\sphinxstrong{#1} }{\end{sphinxheavybox}} -% or just use package options - -% the \colorlet of xcolor (if at all loaded) is overkill for our use case -\newcommand{\sphinxcolorlet}[2] - {\expandafter\let\csname\@backslashchar color@#1\expandafter\endcsname - \csname\@backslashchar color@#2\endcsname } - -% the main dispatch for all types of notices -\newenvironment{sphinxadmonition}[2]{% #1=type, #2=heading - % can't use #1 directly in definition of end part - \def\spx@noticetype {#1}% - % set parameters of heavybox/lightbox - \sphinxcolorlet{spx@notice@bordercolor}{sphinx#1BorderColor}% - \sphinxcolorlet{spx@notice@bgcolor}{sphinx#1BgColor}% - \spx@notice@border \dimexpr\csname spx@opt@#1border\endcsname\relax - % start specific environment, passing the heading as argument - \begin{sphinx#1}{#2}} - % workaround some LaTeX "feature" of \end command - {\edef\spx@temp{\noexpand\end{sphinx\spx@noticetype}}\spx@temp} - - -%% PYTHON DOCS MACROS AND ENVIRONMENTS -% (some macros here used by \maketitle in sphinxmanual.cls and sphinxhowto.cls) - -% \moduleauthor{name}{email} -\newcommand{\moduleauthor}[2]{} - -% \sectionauthor{name}{email} -\newcommand{\sectionauthor}[2]{} - -% Allow the release number to be specified independently of the -% \date{}. This allows the date to reflect the document's date and -% release to specify the release that is documented. -% -\newcommand{\py@release}{\releasename\space\version} -\newcommand{\version}{}% part of \py@release, used by title page and headers -% \releaseinfo is used on titlepage (sphinxmanual.cls, sphinxhowto.cls) -\newcommand{\releaseinfo}{} -\newcommand{\setreleaseinfo}[1]{\renewcommand{\releaseinfo}{#1}} -% this is inserted via template and #1=release config variable -\newcommand{\release}[1]{\renewcommand{\version}{#1}} -% this is defined by template to 'releasename' latex_elements key -\newcommand{\releasename}{} -% Fix issue in case release and releasename deliberately left blank -\newcommand{\sphinxheadercomma}{, }% used in fancyhdr header definition -\newcommand{\sphinxifemptyorblank}[1]{% -% test after one expansion of macro #1 if contents is empty or spaces - \if&\expandafter\@firstofone\detokenize\expandafter{#1}&% - \expandafter\@firstoftwo\else\expandafter\@secondoftwo\fi}% -\AtBeginDocument {% - \sphinxifemptyorblank{\releasename} - {\sphinxifemptyorblank{\version}{\let\sphinxheadercomma\empty}{}} - {}% -}% - -% Allow specification of the author's address separately from the -% author's name. This can be used to format them differently, which -% is a good thing. -% -\newcommand{\py@authoraddress}{} -\newcommand{\authoraddress}[1]{\renewcommand{\py@authoraddress}{#1}} - -% {fulllineitems} is the main environment for object descriptions. -% -\newcommand{\py@itemnewline}[1]{% - \kern\labelsep - \@tempdima\linewidth - \advance\@tempdima \labelwidth\makebox[\@tempdima][l]{#1}% - \kern-\labelsep -} - -\newenvironment{fulllineitems}{% - \begin{list}{}{\labelwidth \leftmargin - \rightmargin \z@ \topsep -\parskip \partopsep \parskip - \itemsep -\parsep - \let\makelabel=\py@itemnewline}% -}{\end{list}} - -% Signatures, possibly multi-line -% -\newlength{\py@argswidth} -\newcommand{\py@sigparams}[2]{% - \parbox[t]{\py@argswidth}{#1\sphinxcode{)}#2}} -\newcommand{\pysigline}[1]{\item[{#1}]} -\newcommand{\pysiglinewithargsret}[3]{% - \settowidth{\py@argswidth}{#1\sphinxcode{(}}% - \addtolength{\py@argswidth}{-2\py@argswidth}% - \addtolength{\py@argswidth}{\linewidth}% - \item[{#1\sphinxcode{(}\py@sigparams{#2}{#3}}]} -\newcommand{\pysigstartmultiline}{% - \def\pysigstartmultiline{\vskip\smallskipamount\parskip\z@skip\itemsep\z@skip}% - \edef\pysigstopmultiline - {\noexpand\leavevmode\parskip\the\parskip\relax\itemsep\the\itemsep\relax}% - \parskip\z@skip\itemsep\z@skip -} - -% Production lists -% -\newenvironment{productionlist}{% -% \def\sphinxoptional##1{{\Large[}##1{\Large]}} - \def\production##1##2{\\\sphinxcode{\sphinxupquote{##1}}&::=&\sphinxcode{\sphinxupquote{##2}}}% - \def\productioncont##1{\\& &\sphinxcode{\sphinxupquote{##1}}}% - \parindent=2em - \indent - \setlength{\LTpre}{0pt}% - \setlength{\LTpost}{0pt}% - \begin{longtable}[l]{lcl} -}{% - \end{longtable} -} - -% Definition lists; requested by AMK for HOWTO documents. Probably useful -% elsewhere as well, so keep in in the general style support. -% -\newenvironment{definitions}{% - \begin{description}% - \def\term##1{\item[{##1}]\mbox{}\\*[0mm]}% -}{% - \end{description}% -} - -%% FROM DOCTUTILS LATEX WRITER -% -% The following is stuff copied from docutils' latex writer. -% -\newcommand{\optionlistlabel}[1]{\normalfont\bfseries #1 \hfill}% \bf deprecated -\newenvironment{optionlist}[1] -{\begin{list}{} - {\setlength{\labelwidth}{#1} - \setlength{\rightmargin}{1cm} - \setlength{\leftmargin}{\rightmargin} - \addtolength{\leftmargin}{\labelwidth} - \addtolength{\leftmargin}{\labelsep} - \renewcommand{\makelabel}{\optionlistlabel}} -}{\end{list}} - -\newlength{\lineblockindentation} -\setlength{\lineblockindentation}{2.5em} -\newenvironment{lineblock}[1] -{\begin{list}{} - {\setlength{\partopsep}{\parskip} - \addtolength{\partopsep}{\baselineskip} - \topsep0pt\itemsep0.15\baselineskip\parsep0pt - \leftmargin#1\relax} - \raggedright} -{\end{list}} - -% From docutils.writers.latex2e -% inline markup (custom roles) -% \DUrole{#1}{#2} tries \DUrole#1{#2} -\providecommand*{\DUrole}[2]{% - \ifcsname DUrole\detokenize{#1}\endcsname - \csname DUrole\detokenize{#1}\endcsname{#2}% - \else% backwards compatibility: try \docutilsrole#1{#2} - \ifcsname docutilsrole\detokenize{#1}\endcsname - \csname docutilsrole\detokenize{#1}\endcsname{#2}% - \else - #2% - \fi - \fi -} - -\providecommand*{\DUprovidelength}[2]{% - \ifdefined#1\else\newlength{#1}\setlength{#1}{#2}\fi -} - -\DUprovidelength{\DUlineblockindent}{2.5em} -\ifdefined\DUlineblock\else - \newenvironment{DUlineblock}[1]{% - \list{}{\setlength{\partopsep}{\parskip} - \addtolength{\partopsep}{\baselineskip} - \setlength{\topsep}{0pt} - \setlength{\itemsep}{0.15\baselineskip} - \setlength{\parsep}{0pt} - \setlength{\leftmargin}{#1}} - \raggedright - } - {\endlist} -\fi - -%% TEXT STYLING -% -% to obtain straight quotes we execute \@noligs as patched by upquote, and -% \scantokens is needed in cases where it would be too late for the macro to -% first set catcodes and then fetch its argument. We also make the contents -% breakable at non-escaped . , ; ? ! / using \sphinxbreaksviaactive. -% the macro must be protected if it ends up used in moving arguments, -% in 'alltt' \@noligs is done already, and the \scantokens must be avoided. -\protected\def\sphinxupquote#1{{\def\@tempa{alltt}% - \ifx\@tempa\@currenvir\else - \ifspx@opt@inlineliteralwraps - \sphinxbreaksviaactive\let\sphinxafterbreak\empty - % do not overwrite the comma set-up - \let\verbatim@nolig@list\sphinx@literal@nolig@list - \fi - % fix a space-gobbling issue due to LaTeX's original \do@noligs - \let\do@noligs\sphinx@do@noligs - \@noligs\endlinechar\m@ne\everyeof{}% (<- in case inside \sphinxhref) - \expandafter\scantokens - \fi {{#1}}}}% extra brace pair to fix end-space gobbling issue... -\def\sphinx@do@noligs #1{\catcode`#1\active\begingroup\lccode`\~`#1\relax - \lowercase{\endgroup\def~{\leavevmode\kern\z@\char`#1 }}} -\def\sphinx@literal@nolig@list {\do\`\do\<\do\>\do\'\do\-}% - -% Some custom font markup commands. -\protected\def\sphinxstrong#1{\textbf{#1}} -\protected\def\sphinxcode#1{\texttt{#1}} -\protected\def\sphinxbfcode#1{\textbf{\sphinxcode{#1}}} -\protected\def\sphinxemail#1{\textsf{#1}} -\protected\def\sphinxtablecontinued#1{\textsf{#1}} -\protected\def\sphinxtitleref#1{\emph{#1}} -\protected\def\sphinxmenuselection#1{\emph{#1}} -\protected\def\sphinxguilabel#1{\emph{#1}} -\protected\def\sphinxaccelerator#1{\underline{#1}} -\protected\def\sphinxcrossref#1{\emph{#1}} -\protected\def\sphinxtermref#1{\emph{#1}} -% \optional is used for ``[, arg]``, i.e. desc_optional nodes. -\long\protected\def\sphinxoptional#1{% - {\textnormal{\Large[}}{#1}\hspace{0.5mm}{\textnormal{\Large]}}} - -% additional customizable styling -\def\sphinxstyleindexentry #1{\texttt{#1}} -\def\sphinxstyleindexextra #1{ \emph{(#1)}} -\def\sphinxstyleindexpageref #1{, \pageref{#1}} -\def\sphinxstyleindexlettergroup #1% - {{\Large\sffamily#1}\nopagebreak\vspace{1mm}} -\def\sphinxstyleindexlettergroupDefault #1% - {{\Large\sffamily\sphinxnonalphabeticalgroupname}\nopagebreak\vspace{1mm}} -\protected\def\sphinxstyletopictitle #1{\textbf{#1}\par\medskip} -\let\sphinxstylesidebartitle\sphinxstyletopictitle -\protected\def\sphinxstyleothertitle #1{\textbf{#1}} -\protected\def\sphinxstylesidebarsubtitle #1{~\\\textbf{#1} \smallskip} -% \text.. commands do not allow multiple paragraphs -\protected\def\sphinxstyletheadfamily {\sffamily} -\protected\def\sphinxstyleemphasis #1{\emph{#1}} -\protected\def\sphinxstyleliteralemphasis#1{\emph{\sphinxcode{#1}}} -\protected\def\sphinxstylestrong #1{\textbf{#1}} -\protected\def\sphinxstyleliteralstrong#1{\sphinxbfcode{#1}} -\protected\def\sphinxstyleabbreviation #1{\textsc{#1}} -\protected\def\sphinxstyleliteralintitle#1{\sphinxcode{#1}} -\newcommand*\sphinxstylecodecontinued[1]{\footnotesize(#1)}% -\newcommand*\sphinxstylecodecontinues[1]{\footnotesize(#1)}% -% figure legend comes after caption and may contain arbitrary body elements -\newenvironment{sphinxlegend}{\par\small}{\par} -% reduce hyperref "Token not allowed in a PDF string" warnings on PDF builds -\AtBeginDocument{\pdfstringdefDisableCommands{% -% all "protected" macros possibly ending up in section titles should be here - \let\sphinxstyleemphasis \@firstofone - \let\sphinxstyleliteralemphasis \@firstofone - \let\sphinxstylestrong \@firstofone - \let\sphinxstyleliteralstrong \@firstofone - \let\sphinxstyleabbreviation \@firstofone - \let\sphinxstyleliteralintitle \@firstofone - \let\sphinxupquote \@firstofone - \let\sphinxstrong \@firstofone - \let\sphinxcode \@firstofone - \let\sphinxbfcode \@firstofone - \let\sphinxemail \@firstofone - \let\sphinxcrossref \@firstofone - \let\sphinxtermref \@firstofone -}} - -% For curly braces inside \index macro -\def\sphinxleftcurlybrace{\{} -\def\sphinxrightcurlybrace{\}} - -% Declare Unicode characters used by linux tree command to pdflatex utf8/utf8x -\def\spx@bd#1#2{% - \leavevmode - \begingroup - \ifx\spx@bd@height \@undefined\def\spx@bd@height{\baselineskip}\fi - \ifx\spx@bd@width \@undefined\setbox0\hbox{0}\def\spx@bd@width{\wd0 }\fi - \ifx\spx@bd@thickness\@undefined\def\spx@bd@thickness{.6\p@}\fi - \ifx\spx@bd@lower \@undefined\def\spx@bd@lower{\dp\strutbox}\fi - \lower\spx@bd@lower#1{#2}% - \endgroup -}% -\@namedef{sphinx@u2500}% BOX DRAWINGS LIGHT HORIZONTAL - {\spx@bd{\vbox to\spx@bd@height} - {\vss\hrule\@height\spx@bd@thickness - \@width\spx@bd@width\vss}}% -\@namedef{sphinx@u2502}% BOX DRAWINGS LIGHT VERTICAL - {\spx@bd{\hb@xt@\spx@bd@width} - {\hss\vrule\@height\spx@bd@height - \@width \spx@bd@thickness\hss}}% -\@namedef{sphinx@u2514}% BOX DRAWINGS LIGHT UP AND RIGHT - {\spx@bd{\hb@xt@\spx@bd@width} - {\hss\raise.5\spx@bd@height - \hb@xt@\z@{\hss\vrule\@height.5\spx@bd@height - \@width \spx@bd@thickness\hss}% - \vbox to\spx@bd@height{\vss\hrule\@height\spx@bd@thickness - \@width.5\spx@bd@width\vss}}}% -\@namedef{sphinx@u251C}% BOX DRAWINGS LIGHT VERTICAL AND RIGHT - {\spx@bd{\hb@xt@\spx@bd@width} - {\hss - \hb@xt@\z@{\hss\vrule\@height\spx@bd@height - \@width \spx@bd@thickness\hss}% - \vbox to\spx@bd@height{\vss\hrule\@height\spx@bd@thickness - \@width.5\spx@bd@width\vss}}}% -\protected\def\sphinxunichar#1{\@nameuse{sphinx@u#1}}% - -% Tell TeX about pathological hyphenation cases: -\hyphenation{Base-HTTP-Re-quest-Hand-ler} -\endinput diff --git a/docs/_build/latex/sphinx.xdy b/docs/_build/latex/sphinx.xdy deleted file mode 100644 index 17b0656..0000000 --- a/docs/_build/latex/sphinx.xdy +++ /dev/null @@ -1,203 +0,0 @@ -;;; -*- mode: lisp; coding: utf-8; -*- - -;; Unfortunately xindy is out-of-the-box hyperref-incompatible. This -;; configuration is a workaround, which requires to pass option -;; hyperindex=false to hyperref. -;; textit and emph not currently used by Sphinx LaTeX writer. -(define-attributes (("textbf" "textit" "emph" "default"))) -(markup-locref :open "\textbf{\hyperpage{" :close "}}" :attr "textbf") -(markup-locref :open "\textit{\hyperpage{" :close "}}" :attr "textit") -(markup-locref :open "\emph{\hyperpage{" :close "}}" :attr "emph") -(markup-locref :open "\hyperpage{" :close "}" :attr "default") - -(require "numeric-sort.xdy") - -;; xindy base module latex.xdy loads tex.xdy and the latter instructs -;; xindy to ignore **all** TeX macros in .idx entries, except those -;; explicitely described in merge rule. But when after applying all -;; merge rules an empty string results, xindy raises an error: - -;; ERROR: CHAR: index 0 should be less than the length of the string - -;; For example when using pdflatex with utf-8 characters the index -;; file will contain \IeC macros and they will get ignored except if -;; suitable merge rules are loaded early. The texindy script coming -;; with xindy provides this, but only for Latin scripts. The texindy -;; man page says to use rather xelatex or lualatex in case of Cyrillic -;; scripts. - -;; Sphinx contributes LICRcyr2utf8.xdy to provide support for Cyrillic -;; scripts for the pdflatex engine. - -;; Another issue caused by xindy ignoring all TeX macros except those -;; explicitely declared reveals itself when attempting to index ">>>", -;; as the ">" is converted to "\textgreater{}" by Sphinx's LaTeX -;; escaping. - -;; To fix this, Sphinx does **not** use texindy, and does not even -;; load the xindy latex.xdy base module. - -;(require "latex.xdy") - -;; Rather it incorporates some suitable extracts from latex.xdy and -;; tex.xdy with additional Sphinx contributed rules. - -;; But, this means for pdflatex and Latin scripts that the xindy file -;; tex/inputenc/uf8.xdy is not usable because it refers to the macro -;; \IeC only sporadically, and as tex.xdy is not loaded, a rule such as -;; (merge-rule "\'e" "é" :string) -;; does not work, it must be -;; (merge-rule "\IeC {\'e}" "é" :string) -;; So Sphinx contributes LICRlatin2utf8.xdy to mitigate that problem. - -;;;;;;;; extracts from tex.xdy (discarding most original comments): - -;;; -;;; TeX conventions -;;; - -;; Discard leading and trailing white space. Collapse multiple white -;; space characters to blank. - -(merge-rule "^ +" "" :eregexp) -(merge-rule " +$" "" :eregexp) -(merge-rule " +" " " :eregexp) - -;; Handle TeX markup - -(merge-rule "\\([{}$%&#])" "\1" :eregexp) - -;;;;;;;; end of extracts from xindy's tex.xdy - -;;;;;;;; extracts from latex.xdy: - -;; Standard location classes: arabic and roman numbers, and alphabets. - -(define-location-class "arabic-page-numbers" ("arabic-numbers")) -(define-location-class "roman-page-numbers" ("roman-numbers-lowercase")) -(define-location-class "Roman-page-numbers" ("roman-numbers-uppercase")) -(define-location-class "alpha-page-numbers" ("alpha")) -(define-location-class "Alpha-page-numbers" ("ALPHA")) - -;; Output Markup - -(markup-letter-group-list :sep "~n~n \indexspace~n") - -(markup-indexentry :open "~n \item " :depth 0) -(markup-indexentry :open "~n \subitem " :depth 1) -(markup-indexentry :open "~n \subsubitem " :depth 2) - -(markup-locclass-list :open ", " :sep ", ") -(markup-locref-list :sep ", ") - -;;;;;;;; end of extracts from latex.xdy - -;; The LaTeX \index command turns \ into normal character so the TeX macros -;; written to .idx files are not followed by a blank. This is different -;; from non-ascii letters which end up (with pdflatex) as \IeC macros in .idx -;; file, with a blank space after \IeC - -;; Details of the syntax are explained at -;; http://xindy.sourceforge.net/doc/manual-3.html -;; In absence of :string, "xindy uses an auto-detection mechanism to decide, -;; if the pattern is a regular expression or not". But it is not obvious to -;; guess, for example "\\_" is not detected as RE but "\\P\{\}" is, so for -;; being sure we apply the :string switch everywhere and do not use \\ etc... - -;; Go back from sphinx.util.texescape TeX macros to UTF-8 - -(merge-rule "\sphinxleftcurlybrace{}" "{" :string) -(merge-rule "\sphinxrightcurlybrace{}" "}" :string) -(merge-rule "\_" "_" :string) -(merge-rule "{[}" "[" :string) -(merge-rule "{]}" "]" :string) -(merge-rule "{}`" "`" :string) -(merge-rule "\textbackslash{}" "\" :string) ; " for Emacs syntax highlighting -(merge-rule "\textasciitilde{}" "~~" :string); the ~~ escape is needed here -(merge-rule "\textless{}" "<" :string) -(merge-rule "\textgreater{}" ">" :string) -(merge-rule "\textasciicircum{}" "^" :string) -(merge-rule "\P{}" "¶" :string) -(merge-rule "\S{}" "§" :string) -(merge-rule "\texteuro{}" "€" :string) -(merge-rule "\(\infty\)" "∞" :string) -(merge-rule "\(\pm\)" "±" :string) -(merge-rule "\(\rightarrow\)" "→" :string) -(merge-rule "\(\checkmark\)" "✓" :string) -(merge-rule "\textendash{}" "–" :string) -(merge-rule "\textbar{}" "|" :string) -(merge-rule "\(\sp{\text{0}}\)" "⁰" :string) -(merge-rule "\(\sp{\text{1}}\)" "¹" :string) -(merge-rule "\(\sp{\text{2}}\)" "²" :string) -(merge-rule "\(\sp{\text{3}}\)" "³" :string) -(merge-rule "\(\sp{\text{4}}\)" "⁴" :string) -(merge-rule "\(\sp{\text{5}}\)" "⁵" :string) -(merge-rule "\(\sp{\text{6}}\)" "⁶" :string) -(merge-rule "\(\sp{\text{7}}\)" "⁷" :string) -(merge-rule "\(\sp{\text{8}}\)" "⁸" :string) -(merge-rule "\(\sp{\text{9}}\)" "⁹" :string) -(merge-rule "\(\sb{\text{0}}\)" "₀" :string) -(merge-rule "\(\sb{\text{1}}\)" "₁" :string) -(merge-rule "\(\sb{\text{2}}\)" "₂" :string) -(merge-rule "\(\sb{\text{3}}\)" "₃" :string) -(merge-rule "\(\sb{\text{4}}\)" "₄" :string) -(merge-rule "\(\sb{\text{5}}\)" "₅" :string) -(merge-rule "\(\sb{\text{6}}\)" "₆" :string) -(merge-rule "\(\sb{\text{7}}\)" "₇" :string) -(merge-rule "\(\sb{\text{8}}\)" "₈" :string) -(merge-rule "\(\sb{\text{9}}\)" "₉" :string) -(merge-rule "\(\alpha\)" "α" :string) -(merge-rule "\(\beta\)" "β" :string) -(merge-rule "\(\gamma\)" "γ" :string) -(merge-rule "\(\delta\)" "δ" :string) -(merge-rule "\(\epsilon\)" "ε" :string) -(merge-rule "\(\zeta\)" "ζ" :string) -(merge-rule "\(\eta\)" "η" :string) -(merge-rule "\(\theta\)" "θ" :string) -(merge-rule "\(\iota\)" "ι" :string) -(merge-rule "\(\kappa\)" "κ" :string) -(merge-rule "\(\lambda\)" "λ" :string) -(merge-rule "\(\mu\)" "μ" :string) -(merge-rule "\(\nu\)" "ν" :string) -(merge-rule "\(\xi\)" "ξ" :string) -(merge-rule "\(\pi\)" "π" :string) -(merge-rule "\(\rho\)" "ρ" :string) -(merge-rule "\(\sigma\)" "σ" :string) -(merge-rule "\(\tau\)" "τ" :string) -(merge-rule "\(\upsilon\)" "υ" :string) -(merge-rule "\(\phi\)" "φ" :string) -(merge-rule "\(\chi\)" "χ" :string) -(merge-rule "\(\psi\)" "ψ" :string) -(merge-rule "\(\omega\)" "ω" :string) -(merge-rule "\(\Gamma\)" "Γ" :string) -(merge-rule "\(\Delta\)" "Δ" :string) -(merge-rule "\(\Theta\)" "Θ" :string) -(merge-rule "\(\Lambda\)" "Λ" :string) -(merge-rule "\(\Xi\)" "Ξ" :string) -(merge-rule "\(\Pi\)" "Π" :string) -(merge-rule "\(\Sigma\)" "Σ" :string) -(merge-rule "\(\Upsilon\)" "Υ" :string) -(merge-rule "\(\Phi\)" "Φ" :string) -(merge-rule "\(\Psi\)" "Ψ" :string) -(merge-rule "\(\Omega\)" "Ω" :string) - -;; This xindy module provides some basic support for "see" -(require "makeindex.xdy") - -;; This creates one-letter headings and works fine with utf-8 letters. -;; For Cyrillic with pdflatex works thanks to LICRcyr2utf8.xdy -(require "latin-lettergroups.xdy") - -;; currently we don't (know how to easily) separate "Numbers" from -;; "Symbols" with xindy as is the case with makeindex. -(markup-index :open "\begin{sphinxtheindex} -\let\lettergroup\sphinxstyleindexlettergroup -\let\lettergroupDefault\sphinxstyleindexlettergroupDefault - -" - :close " - -\end{sphinxtheindex} -" - :tree) - diff --git a/docs/_build/latex/sphinxhighlight.sty b/docs/_build/latex/sphinxhighlight.sty deleted file mode 100644 index 1557ce6..0000000 --- a/docs/_build/latex/sphinxhighlight.sty +++ /dev/null @@ -1,105 +0,0 @@ -\NeedsTeXFormat{LaTeX2e}[1995/12/01] -\ProvidesPackage{sphinxhighlight}[2016/05/29 stylesheet for highlighting with pygments] - - -\makeatletter -\def\PYG@reset{\let\PYG@it=\relax \let\PYG@bf=\relax% - \let\PYG@ul=\relax \let\PYG@tc=\relax% - \let\PYG@bc=\relax \let\PYG@ff=\relax} -\def\PYG@tok#1{\csname PYG@tok@#1\endcsname} -\def\PYG@toks#1+{\ifx\relax#1\empty\else% - \PYG@tok{#1}\expandafter\PYG@toks\fi} -\def\PYG@do#1{\PYG@bc{\PYG@tc{\PYG@ul{% - \PYG@it{\PYG@bf{\PYG@ff{#1}}}}}}} -\def\PYG#1#2{\PYG@reset\PYG@toks#1+\relax+\PYG@do{#2}} - -\expandafter\def\csname PYG@tok@w\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}} -\expandafter\def\csname PYG@tok@c\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}} -\expandafter\def\csname PYG@tok@cp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@cs\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\colorbox[rgb]{1.00,0.94,0.94}{\strut ##1}}} -\expandafter\def\csname PYG@tok@k\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@kp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@kt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.56,0.13,0.00}{##1}}} -\expandafter\def\csname PYG@tok@o\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} -\expandafter\def\csname PYG@tok@ow\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@nb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@nf\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.02,0.16,0.49}{##1}}} -\expandafter\def\csname PYG@tok@nc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}} -\expandafter\def\csname PYG@tok@nn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.05,0.52,0.71}{##1}}} -\expandafter\def\csname PYG@tok@ne\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@nv\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} -\expandafter\def\csname PYG@tok@no\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.38,0.68,0.84}{##1}}} -\expandafter\def\csname PYG@tok@nl\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.13,0.44}{##1}}} -\expandafter\def\csname PYG@tok@ni\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.84,0.33,0.22}{##1}}} -\expandafter\def\csname PYG@tok@na\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@nt\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.02,0.16,0.45}{##1}}} -\expandafter\def\csname PYG@tok@nd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.33,0.33,0.33}{##1}}} -\expandafter\def\csname PYG@tok@s\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@sd\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@si\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.44,0.63,0.82}{##1}}} -\expandafter\def\csname PYG@tok@se\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@sr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.14,0.33,0.53}{##1}}} -\expandafter\def\csname PYG@tok@ss\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.32,0.47,0.09}{##1}}} -\expandafter\def\csname PYG@tok@sx\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}} -\expandafter\def\csname PYG@tok@m\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} -\expandafter\def\csname PYG@tok@gh\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} -\expandafter\def\csname PYG@tok@gu\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}} -\expandafter\def\csname PYG@tok@gd\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}} -\expandafter\def\csname PYG@tok@gi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}} -\expandafter\def\csname PYG@tok@gr\endcsname{\def\PYG@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}} -\expandafter\def\csname PYG@tok@ge\endcsname{\let\PYG@it=\textit} -\expandafter\def\csname PYG@tok@gs\endcsname{\let\PYG@bf=\textbf} -\expandafter\def\csname PYG@tok@gp\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.78,0.36,0.04}{##1}}} -\expandafter\def\csname PYG@tok@go\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.20,0.20,0.20}{##1}}} -\expandafter\def\csname PYG@tok@gt\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}} -\expandafter\def\csname PYG@tok@err\endcsname{\def\PYG@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}} -\expandafter\def\csname PYG@tok@kc\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@kd\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@kn\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@kr\endcsname{\let\PYG@bf=\textbf\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@bp\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.00,0.44,0.13}{##1}}} -\expandafter\def\csname PYG@tok@fm\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.02,0.16,0.49}{##1}}} -\expandafter\def\csname PYG@tok@vc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} -\expandafter\def\csname PYG@tok@vg\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} -\expandafter\def\csname PYG@tok@vi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} -\expandafter\def\csname PYG@tok@vm\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.73,0.38,0.84}{##1}}} -\expandafter\def\csname PYG@tok@sa\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@sb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@sc\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@dl\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@s2\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@sh\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@s1\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.25,0.44,0.63}{##1}}} -\expandafter\def\csname PYG@tok@mb\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} -\expandafter\def\csname PYG@tok@mf\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} -\expandafter\def\csname PYG@tok@mh\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} -\expandafter\def\csname PYG@tok@mi\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} -\expandafter\def\csname PYG@tok@il\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} -\expandafter\def\csname PYG@tok@mo\endcsname{\def\PYG@tc##1{\textcolor[rgb]{0.13,0.50,0.31}{##1}}} -\expandafter\def\csname PYG@tok@ch\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}} -\expandafter\def\csname PYG@tok@cm\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}} -\expandafter\def\csname PYG@tok@cpf\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}} -\expandafter\def\csname PYG@tok@c1\endcsname{\let\PYG@it=\textit\def\PYG@tc##1{\textcolor[rgb]{0.25,0.50,0.56}{##1}}} - -\def\PYGZbs{\char`\\} -\def\PYGZus{\char`\_} -\def\PYGZob{\char`\{} -\def\PYGZcb{\char`\}} -\def\PYGZca{\char`\^} -\def\PYGZam{\char`\&} -\def\PYGZlt{\char`\<} -\def\PYGZgt{\char`\>} -\def\PYGZsh{\char`\#} -\def\PYGZpc{\char`\%} -\def\PYGZdl{\char`\$} -\def\PYGZhy{\char`\-} -\def\PYGZsq{\char`\'} -\def\PYGZdq{\char`\"} -\def\PYGZti{\char`\~} -% for compatibility with earlier versions -\def\PYGZat{@} -\def\PYGZlb{[} -\def\PYGZrb{]} -\makeatother - -\renewcommand\PYGZsq{\textquotesingle} diff --git a/docs/_build/latex/sphinxhowto.cls b/docs/_build/latex/sphinxhowto.cls deleted file mode 100644 index 8b0530a..0000000 --- a/docs/_build/latex/sphinxhowto.cls +++ /dev/null @@ -1,95 +0,0 @@ -% -% sphinxhowto.cls for Sphinx (http://sphinx-doc.org/) -% - -\NeedsTeXFormat{LaTeX2e}[1995/12/01] -\ProvidesClass{sphinxhowto}[2017/03/26 v1.6 Document class (Sphinx HOWTO)] - -% 'oneside' option overriding the 'twoside' default -\newif\if@oneside -\DeclareOption{oneside}{\@onesidetrue} -% Pass remaining document options to the parent class. -\DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}} -\ProcessOptions\relax - -% Default to two-side document -\if@oneside -% nothing to do (oneside is the default) -\else -\PassOptionsToClass{twoside}{\sphinxdocclass} -\fi - -\LoadClass{\sphinxdocclass} - -% Set some sane defaults for section numbering depth and TOC depth. You can -% reset these counters in your preamble. -% -\setcounter{secnumdepth}{2} -\setcounter{tocdepth}{2}% i.e. section and subsection - -% Change the title page to look a bit better, and fit in with the fncychap -% ``Bjarne'' style a bit better. -% -\renewcommand{\maketitle}{% - \noindent\rule{\textwidth}{1pt}\par - \begingroup % for PDF information dictionary - \def\endgraf{ }\def\and{\& }% - \pdfstringdefDisableCommands{\def\\{, }}% overwrite hyperref setup - \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}% - \endgroup - \begin{flushright} - \sphinxlogo - \py@HeaderFamily - {\Huge \@title }\par - {\itshape\large \py@release \releaseinfo}\par - \vspace{25pt} - {\Large - \begin{tabular}[t]{c} - \@author - \end{tabular}}\par - \vspace{25pt} - \@date \par - \py@authoraddress \par - \end{flushright} - \@thanks - \setcounter{footnote}{0} - \let\thanks\relax\let\maketitle\relax - %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} -} - -\newcommand{\sphinxtableofcontents}{ - \begingroup - \parskip = 0mm - \tableofcontents - \endgroup - \rule{\textwidth}{1pt} - \vspace{12pt} -} - -\@ifundefined{fancyhf}{ - \pagestyle{plain}}{ - \pagestyle{normal}} % start this way; change for -\pagenumbering{arabic} % ToC & chapters - -\thispagestyle{empty} - -% Fix the bibliography environment to add an entry to the Table of -% Contents. -% For an article document class this environment is a section, -% so no page break before it. -% -\newenvironment{sphinxthebibliography}[1]{% - % \phantomsection % not needed here since TeXLive 2010's hyperref - \begin{thebibliography}{#1}% - \addcontentsline{toc}{section}{\ifdefined\refname\refname\else\ifdefined\bibname\bibname\fi\fi}}{\end{thebibliography}} - - -% Same for the indices. -% The memoir class already does this, so we don't duplicate it in that case. -% -\@ifclassloaded{memoir} - {\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}} - {\newenvironment{sphinxtheindex}{% - \phantomsection % needed because no chapter, section, ... is created by theindex - \begin{theindex}% - \addcontentsline{toc}{section}{\indexname}}{\end{theindex}}} diff --git a/docs/_build/latex/sphinxmanual.cls b/docs/_build/latex/sphinxmanual.cls deleted file mode 100644 index 94c71d7..0000000 --- a/docs/_build/latex/sphinxmanual.cls +++ /dev/null @@ -1,114 +0,0 @@ -% -% sphinxmanual.cls for Sphinx (http://sphinx-doc.org/) -% - -\NeedsTeXFormat{LaTeX2e}[1995/12/01] -\ProvidesClass{sphinxmanual}[2017/03/26 v1.6 Document class (Sphinx manual)] - -% chapters starting at odd pages (overridden by 'openany' document option) -\PassOptionsToClass{openright}{\sphinxdocclass} - -% 'oneside' option overriding the 'twoside' default -\newif\if@oneside -\DeclareOption{oneside}{\@onesidetrue} -% Pass remaining document options to the parent class. -\DeclareOption*{\PassOptionsToClass{\CurrentOption}{\sphinxdocclass}} -\ProcessOptions\relax - -% Defaults two-side document -\if@oneside -% nothing to do (oneside is the default) -\else -\PassOptionsToClass{twoside}{\sphinxdocclass} -\fi - -\LoadClass{\sphinxdocclass} - -% Set some sane defaults for section numbering depth and TOC depth. You can -% reset these counters in your preamble. -% -\setcounter{secnumdepth}{2} -\setcounter{tocdepth}{1} - -% Change the title page to look a bit better, and fit in with the fncychap -% ``Bjarne'' style a bit better. -% -\renewcommand{\maketitle}{% - \let\spx@tempa\relax - \ifHy@pageanchor\def\spx@tempa{\Hy@pageanchortrue}\fi - \hypersetup{pageanchor=false}% avoid duplicate destination warnings - \begin{titlepage}% - \let\footnotesize\small - \let\footnoterule\relax - \noindent\rule{\textwidth}{1pt}\par - \begingroup % for PDF information dictionary - \def\endgraf{ }\def\and{\& }% - \pdfstringdefDisableCommands{\def\\{, }}% overwrite hyperref setup - \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}% - \endgroup - \begin{flushright}% - \sphinxlogo - \py@HeaderFamily - {\Huge \@title \par} - {\itshape\LARGE \py@release\releaseinfo \par} - \vfill - {\LARGE - \begin{tabular}[t]{c} - \@author - \end{tabular} - \par} - \vfill\vfill - {\large - \@date \par - \vfill - \py@authoraddress \par - }% - \end{flushright}%\par - \@thanks - \end{titlepage}% - \setcounter{footnote}{0}% - \let\thanks\relax\let\maketitle\relax - %\gdef\@thanks{}\gdef\@author{}\gdef\@title{} - \if@openright\cleardoublepage\else\clearpage\fi - \spx@tempa -} - -\newcommand{\sphinxtableofcontents}{% - \pagenumbering{roman}% - \pagestyle{plain}% - \begingroup - \parskip \z@skip - \tableofcontents - \endgroup - % before resetting page counter, let's do the right thing. - \if@openright\cleardoublepage\else\clearpage\fi - \pagenumbering{arabic}% - \ifdefined\fancyhf\pagestyle{normal}\fi -} - -% This is needed to get the width of the section # area wide enough in the -% library reference. Doing it here keeps it the same for all the manuals. -% -\renewcommand*\l@section{\@dottedtocline{1}{1.5em}{2.6em}} -\renewcommand*\l@subsection{\@dottedtocline{2}{4.1em}{3.5em}} - -% Fix the bibliography environment to add an entry to the Table of -% Contents. -% For a report document class this environment is a chapter. -% -\newenvironment{sphinxthebibliography}[1]{% - \if@openright\cleardoublepage\else\clearpage\fi - % \phantomsection % not needed here since TeXLive 2010's hyperref - \begin{thebibliography}{#1}% - \addcontentsline{toc}{chapter}{\bibname}}{\end{thebibliography}} - -% Same for the indices. -% The memoir class already does this, so we don't duplicate it in that case. -% -\@ifclassloaded{memoir} - {\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}} - {\newenvironment{sphinxtheindex}{% - \if@openright\cleardoublepage\else\clearpage\fi - \phantomsection % needed as no chapter, section, ... created - \begin{theindex}% - \addcontentsline{toc}{chapter}{\indexname}}{\end{theindex}}} diff --git a/docs/_build/latex/sphinxmulticell.sty b/docs/_build/latex/sphinxmulticell.sty deleted file mode 100644 index f0d11b1..0000000 --- a/docs/_build/latex/sphinxmulticell.sty +++ /dev/null @@ -1,317 +0,0 @@ -\NeedsTeXFormat{LaTeX2e} -\ProvidesPackage{sphinxmulticell}% - [2017/02/23 v1.6 better span rows and columns of a table (Sphinx team)]% -\DeclareOption*{\PackageWarning{sphinxmulticell}{Option `\CurrentOption' is unknown}}% -\ProcessOptions\relax -% -% --- MULTICOLUMN --- -% standard LaTeX's \multicolumn -% 1. does not allow verbatim contents, -% 2. interacts very poorly with tabulary. -% -% It is needed to write own macros for Sphinx: to allow code-blocks in merged -% cells rendered by tabular/longtable, and to allow multi-column cells with -% paragraphs to be taken into account sanely by tabulary algorithm for column -% widths. -% -% This requires quite a bit of hacking. First, in Sphinx, the multi-column -% contents will *always* be wrapped in a varwidth environment. The issue -% becomes to pass it the correct target width. We must trick tabulary into -% believing the multicolumn is simply separate columns, else tabulary does not -% incorporate the contents in its algorithm. But then we must clear the -% vertical rules... -% -% configuration of tabulary -\setlength{\tymin}{3\fontcharwd\font`0 }% minimal width of "squeezed" columns -\setlength{\tymax}{10000pt}% allow enough room for paragraphs to "compete" -% we need access to tabulary's final computed width. \@tempdima is too volatile -% to hope it has kept tabulary's value when \sphinxcolwidth needs it. -\newdimen\sphinx@TY@tablewidth -\def\tabulary{% - \def\TY@final{\sphinx@TY@tablewidth\@tempdima\tabular}% - \let\endTY@final\endtabular - \TY@tabular}% -% next hack is needed only if user has set latex_use_latex_multicolumn to True: -% it fixes tabulary's bug with \multicolumn defined "short" in first pass. (if -% upstream tabulary adds a \long, our extra one causes no harm) -\def\sphinx@tempa #1\def\multicolumn#2#3#4#5#6#7#8#9\sphinx@tempa - {\def\TY@tab{#1\long\def\multicolumn####1####2####3{\multispan####1\relax}#9}}% -\expandafter\sphinx@tempa\TY@tab\sphinx@tempa -% -% TN. 1: as \omit is never executed, Sphinx multicolumn does not need to worry -% like standard multicolumn about |l| vs l|. On the other hand it assumes -% columns are separated by a | ... (if not it will add extraneous -% \arrayrulewidth space for each column separation in its estimate of available -% width). -% -% TN. 1b: as Sphinx multicolumn uses neither \omit nor \span, it can not -% (easily) get rid of extra macros from >{...} or <{...} between columns. At -% least, it has been made compatible with colortbl's \columncolor. -% -% TN. 2: tabulary's second pass is handled like tabular/longtable's single -% pass, with the difference that we hacked \TY@final to set in -% \sphinx@TY@tablewidth the final target width as computed by tabulary. This is -% needed only to handle columns with a "horizontal" specifier: "p" type columns -% (inclusive of tabulary's LJRC) holds the target column width in the -% \linewidth dimension. -% -% TN. 3: use of \begin{sphinxmulticolumn}...\end{sphinxmulticolumn} mark-up -% would need some hacking around the fact that groups can not span across table -% cells (the code does inserts & tokens, see TN1b). It was decided to keep it -% simple with \sphinxstartmulticolumn...\sphinxstopmulticolumn. -% -% MEMO about nesting: if sphinxmulticolumn is encountered in a nested tabular -% inside a tabulary it will think to be at top level in the tabulary. But -% Sphinx generates no nested tables, and if some LaTeX macro uses internally a -% tabular this will not have a \sphinxstartmulticolumn within it! -% -\def\sphinxstartmulticolumn{% - \ifx\equation$% $ tabulary's first pass - \expandafter\sphinx@TYI@start@multicolumn - \else % either not tabulary or tabulary's second pass - \expandafter\sphinx@start@multicolumn - \fi -}% -\def\sphinxstopmulticolumn{% - \ifx\equation$% $ tabulary's first pass - \expandafter\sphinx@TYI@stop@multicolumn - \else % either not tabulary or tabulary's second pass - \ignorespaces - \fi -}% -\def\sphinx@TYI@start@multicolumn#1{% - % use \gdef always to avoid stack space build up - \gdef\sphinx@tempa{#1}\begingroup\setbox\z@\hbox\bgroup -}% -\def\sphinx@TYI@stop@multicolumn{\egroup % varwidth was used with \tymax - \xdef\sphinx@tempb{\the\dimexpr\wd\z@/\sphinx@tempa}% per column width - \endgroup - \expandafter\sphinx@TYI@multispan\expandafter{\sphinx@tempa}% -}% -\def\sphinx@TYI@multispan #1{% - \kern\sphinx@tempb\ignorespaces % the per column occupied width - \ifnum#1>\@ne % repeat, taking into account subtleties of TeX's & ... - \expandafter\sphinx@TYI@multispan@next\expandafter{\the\numexpr#1-\@ne\expandafter}% - \fi -}% -\def\sphinx@TYI@multispan@next{&\relax\sphinx@TYI@multispan}% -% -% Now the branch handling either the second pass of tabulary or the single pass -% of tabular/longtable. This is the delicate part where we gather the -% dimensions from the p columns either set-up by tabulary or by user p column -% or Sphinx \X, \Y columns. The difficulty is that to get the said width, the -% template must be inserted (other hacks would be horribly complicated except -% if we rewrote crucial parts of LaTeX's \@array !) and we can not do -% \omit\span like standard \multicolumn's easy approach. Thus we must cancel -% the \vrule separators. Also, perhaps the column specifier is of the l, c, r -% type, then we attempt an ad hoc rescue to give varwidth a reasonable target -% width. -\def\sphinx@start@multicolumn#1{% - \gdef\sphinx@multiwidth{0pt}\gdef\sphinx@tempa{#1}\sphinx@multispan{#1}% -}% -\def\sphinx@multispan #1{% - \ifnum#1=\@ne\expandafter\sphinx@multispan@end - \else\expandafter\sphinx@multispan@next - \fi {#1}% -}% -\def\sphinx@multispan@next #1{% - % trick to recognize L, C, R, J or p, m, b type columns - \ifdim\baselineskip>\z@ - \gdef\sphinx@tempb{\linewidth}% - \else - % if in an l, r, c type column, try and hope for the best - \xdef\sphinx@tempb{\the\dimexpr(\ifx\TY@final\@undefined\linewidth\else - \sphinx@TY@tablewidth\fi-\arrayrulewidth)/\sphinx@tempa - -\tw@\tabcolsep-\arrayrulewidth\relax}% - \fi - \noindent\kern\sphinx@tempb\relax - \xdef\sphinx@multiwidth - {\the\dimexpr\sphinx@multiwidth+\sphinx@tempb+\tw@\tabcolsep+\arrayrulewidth}% - % hack the \vline and the colortbl macros - \sphinx@hack@vline\sphinx@hack@CT&\relax - % repeat - \expandafter\sphinx@multispan\expandafter{\the\numexpr#1-\@ne}% -}% -% packages like colortbl add group levels, we need to "climb back up" to be -% able to hack the \vline and also the colortbl inserted tokens. This creates -% empty space whether or not the columns were | separated: -\def\sphinx@hack@vline{\ifnum\currentgrouptype=6\relax - \kern\arrayrulewidth\arrayrulewidth\z@\else\aftergroup\sphinx@hack@vline\fi}% -\def\sphinx@hack@CT{\ifnum\currentgrouptype=6\relax - \let\CT@setup\sphinx@CT@setup\else\aftergroup\sphinx@hack@CT\fi}% -% It turns out \CT@row@color is not expanded contrarily to \CT@column@color -% during LaTeX+colortbl preamble preparation, hence it would be possible for -% \sphinx@CT@setup to discard only the column color and choose to obey or not -% row color and cell color. It would even be possible to propagate cell color -% to row color for the duration of the Sphinx multicolumn... the (provisional?) -% choice has been made to cancel the colortbl colours for the multicolumn -% duration. -\def\sphinx@CT@setup #1\endgroup{\endgroup}% hack to remove colour commands -\def\sphinx@multispan@end#1{% - % first, trace back our steps horizontally - \noindent\kern-\dimexpr\sphinx@multiwidth\relax - % and now we set the final computed width for the varwidth environment - \ifdim\baselineskip>\z@ - \xdef\sphinx@multiwidth{\the\dimexpr\sphinx@multiwidth+\linewidth}% - \else - \xdef\sphinx@multiwidth{\the\dimexpr\sphinx@multiwidth+ - (\ifx\TY@final\@undefined\linewidth\else - \sphinx@TY@tablewidth\fi-\arrayrulewidth)/\sphinx@tempa - -\tw@\tabcolsep-\arrayrulewidth\relax}% - \fi - % we need to remove colour set-up also for last cell of the multi-column - \aftergroup\sphinx@hack@CT -}% -\newcommand*\sphinxcolwidth[2]{% - % this dimension will always be used for varwidth, and serves as maximum - % width when cells are merged either via multirow or multicolumn or both, - % as always their contents is wrapped in varwidth environment. - \ifnum#1>\@ne % multi-column (and possibly also multi-row) - % we wrote our own multicolumn code especially to handle that (and allow - % verbatim contents) - \ifx\equation$%$ - \tymax % first pass of tabulary (cf MEMO above regarding nesting) - \else % the \@gobble thing is for compatibility with standard \multicolumn - \sphinx@multiwidth\@gobble{#1/#2}% - \fi - \else % single column multirow - \ifx\TY@final\@undefined % not a tabulary. - \ifdim\baselineskip>\z@ - % in a p{..} type column, \linewidth is the target box width - \linewidth - \else - % l, c, r columns. Do our best. - \dimexpr(\linewidth-\arrayrulewidth)/#2- - \tw@\tabcolsep-\arrayrulewidth\relax - \fi - \else % in tabulary - \ifx\equation$%$% first pass - \tymax % it is set to a big value so that paragraphs can express themselves - \else - % second pass. - \ifdim\baselineskip>\z@ - \linewidth % in a L, R, C, J column or a p, \X, \Y ... - \else - % we have hacked \TY@final to put in \sphinx@TY@tablewidth the table width - \dimexpr(\sphinx@TY@tablewidth-\arrayrulewidth)/#2- - \tw@\tabcolsep-\arrayrulewidth\relax - \fi - \fi - \fi - \fi -}% -% fallback default in case user has set latex_use_latex_multicolumn to True: -% \sphinxcolwidth will use this only inside LaTeX's standard \multicolumn -\def\sphinx@multiwidth #1#2{\dimexpr % #1 to gobble the \@gobble (!) - (\ifx\TY@final\@undefined\linewidth\else\sphinx@TY@tablewidth\fi - -\arrayrulewidth)*#2-\tw@\tabcolsep-\arrayrulewidth\relax}% -% -% --- MULTIROW --- -% standard \multirow -% 1. does not allow verbatim contents, -% 2. does not allow blank lines in its argument, -% 3. its * specifier means to typeset "horizontally" which is very -% bad for paragraph content. 2016 version has = specifier but it -% must be used with p type columns only, else results are bad, -% 4. it requires manual intervention if the contents is too long to fit -% in the asked-for number of rows. -% 5. colour panels (either from \rowcolor or \columncolor) will hide -% the bottom part of multirow text, hence manual tuning is needed -% to put the multirow insertion at the _bottom_. -% -% The Sphinx solution consists in always having contents wrapped -% in a varwidth environment so that it makes sense to estimate how many -% lines it will occupy, and then ensure by insertion of suitable struts -% that the table rows have the needed height. The needed mark-up is done -% by LaTeX writer, which has its own id for the merged cells. -% -% The colour issue is solved by clearing colour panels in all cells, -% whether or not the multirow is single-column or multi-column. -% -% In passing we obtain baseline alignements across rows (only if -% \arraylinestretch is 1, as LaTeX's does not obey \arraylinestretch in "p" -% multi-line contents, only first and last line...) -% -% TODO: examine the situation with \arraylinestretch > 1. The \extrarowheight -% is hopeless for multirow anyhow, it makes baseline alignment strictly -% impossible. -\newcommand\sphinxmultirow[2]{\begingroup - % #1 = nb of spanned rows, #2 = Sphinx id of "cell", #3 = contents - % but let's fetch #3 in a way allowing verbatim contents ! - \def\sphinx@nbofrows{#1}\def\sphinx@cellid{#2}% - \afterassignment\sphinx@multirow\let\next= -}% -\def\sphinx@multirow {% - \setbox\z@\hbox\bgroup\aftergroup\sphinx@@multirow\strut -}% -\def\sphinx@@multirow {% - % The contents, which is a varwidth environment, has been captured in - % \box0 (a \hbox). - % We have with \sphinx@cellid an assigned unique id. The goal is to give - % about the same height to all the involved rows. - % For this Sphinx will insert a \sphinxtablestrut{cell_id} mark-up - % in LaTeX file and the expansion of the latter will do the suitable thing. - \dimen@\dp\z@ - \dimen\tw@\ht\@arstrutbox - \advance\dimen@\dimen\tw@ - \advance\dimen\tw@\dp\@arstrutbox - \count@=\dimen@ % type conversion dim -> int - \count\tw@=\dimen\tw@ - \divide\count@\count\tw@ % TeX division truncates - \advance\dimen@-\count@\dimen\tw@ - % 1300sp is about 0.02pt. For comparison a rule default width is 0.4pt. - % (note that if \count@ holds 0, surely \dimen@>1300sp) - \ifdim\dimen@>1300sp \advance\count@\@ne \fi - % now \count@ holds the count L of needed "lines" - % and \sphinx@nbofrows holds the number N of rows - % we have L >= 1 and N >= 1 - % if L is a multiple of N, ... clear what to do ! - % else write L = qN + r, 1 <= r < N and we will - % arrange for each row to have enough space for: - % q+1 "lines" in each of the first r rows - % q "lines" in each of the (N-r) bottom rows - % for a total of (q+1) * r + q * (N-r) = q * N + r = L - % It is possible that q == 0. - \count\tw@\count@ - % the TeX division truncates - \divide\count\tw@\sphinx@nbofrows\relax - \count4\count\tw@ % q - \multiply\count\tw@\sphinx@nbofrows\relax - \advance\count@-\count\tw@ % r - \expandafter\xdef\csname sphinx@tablestrut_\sphinx@cellid\endcsname - {\noexpand\sphinx@tablestrut{\the\count4}{\the\count@}{\sphinx@cellid}}% - \dp\z@\z@ - % this will use the real height if it is >\ht\@arstrutbox - \sphinxtablestrut{\sphinx@cellid}\box\z@ - \endgroup % group was opened in \sphinxmultirow -}% -\newcommand*\sphinxtablestrut[1]{% - % #1 is a "cell_id", i.e. the id of a merged group of table cells - \csname sphinx@tablestrut_#1\endcsname -}% -% LaTeX typesets the table row by row, hence each execution can do -% an update for the next row. -\newcommand*\sphinx@tablestrut[3]{\begingroup - % #1 = q, #2 = (initially) r, #3 = cell_id, q+1 lines in first r rows - % if #2 = 0, create space for max(q,1) table lines - % if #2 > 0, create space for q+1 lines and decrement #2 - \leavevmode - \count@#1\relax - \ifnum#2=\z@ - \ifnum\count@=\z@\count@\@ne\fi - \else - % next row will be with a #2 decremented by one - \expandafter\xdef\csname sphinx@tablestrut_#3\endcsname - {\noexpand\sphinx@tablestrut{#1}{\the\numexpr#2-\@ne}{#3}}% - \advance\count@\@ne - \fi - \vrule\@height\ht\@arstrutbox - \@depth\dimexpr\count@\ht\@arstrutbox+\count@\dp\@arstrutbox-\ht\@arstrutbox\relax - \@width\z@ - \endgroup - % we need this to avoid colour panels hiding bottom parts of multirow text - \sphinx@hack@CT -}% -\endinput -%% -%% End of file `sphinxmulticell.sty'. diff --git a/docs/_build/linkcheck/output.txt b/docs/_build/linkcheck/output.txt deleted file mode 100644 index e69de29..0000000 diff --git a/docs/commands.md b/docs/commands.md new file mode 100644 index 0000000..ff7cc4c --- /dev/null +++ b/docs/commands.md @@ -0,0 +1,5 @@ +sphinx-apidoc -o docs/ gklearn/ --separate + +sphinx-apidoc -o source/ ../gklearn/ --separate --force --module-first --no-toc + +make html diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index d2d2ed9..0000000 --- a/docs/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. py-graph documentation master file, created by - sphinx-quickstart on Tue Jan 28 17:13:42 2020. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to py-graph's documentation! -==================================== - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/log.1580230429.3064938.log b/docs/log.1580230429.3064938.log deleted file mode 100644 index e69de29..0000000 diff --git a/docs/make.bat b/docs/make.bat index 27f573b..543c6b1 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -7,8 +7,8 @@ REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) -set SOURCEDIR=. -set BUILDDIR=_build +set SOURCEDIR=source +set BUILDDIR=build if "%1" == "" goto help diff --git a/docs/pygraph.rst b/docs/pygraph.rst deleted file mode 100644 index 592eb54..0000000 --- a/docs/pygraph.rst +++ /dev/null @@ -1,17 +0,0 @@ -pygraph package -=============== - -Subpackages ------------ - -.. toctree:: - - pygraph.utils - -Module contents ---------------- - -.. automodule:: pygraph - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/pygraph.utils.rst b/docs/pygraph.utils.rst deleted file mode 100644 index 07ec351..0000000 --- a/docs/pygraph.utils.rst +++ /dev/null @@ -1,94 +0,0 @@ -pygraph.utils package -===================== - -Submodules ----------- - -pygraph.utils.graphdataset module ---------------------------------- - -.. automodule:: pygraph.utils.graphdataset - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.graphfiles module -------------------------------- - -.. automodule:: pygraph.utils.graphfiles - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.ipython\_log module ---------------------------------- - -.. automodule:: pygraph.utils.ipython_log - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.isNotebook module -------------------------------- - -.. automodule:: pygraph.utils.isNotebook - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.kernels module ----------------------------- - -.. automodule:: pygraph.utils.kernels - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.logger2file module --------------------------------- - -.. automodule:: pygraph.utils.logger2file - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.model\_selection\_precomputed module --------------------------------------------------- - -.. automodule:: pygraph.utils.model_selection_precomputed - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.parallel module ------------------------------ - -.. automodule:: pygraph.utils.parallel - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.trie module -------------------------- - -.. automodule:: pygraph.utils.trie - :members: - :undoc-members: - :show-inheritance: - -pygraph.utils.utils module --------------------------- - -.. automodule:: pygraph.utils.utils - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: pygraph.utils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/conf.py b/docs/source/conf.py similarity index 85% rename from docs/conf.py rename to docs/source/conf.py index 59646dc..8c0bf5d 100644 --- a/docs/conf.py +++ b/docs/source/conf.py @@ -15,19 +15,19 @@ import os import sys # sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('..')) - +# sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, '../') # -- Project information ----------------------------------------------------- -project = 'py-graph' +project = 'graphkit-learn' copyright = '2020, Linlin Jia' author = 'Linlin Jia' # The short X.Y version -version = '1.0' +version = '' # The full version, including alpha/beta/rc tags -release = '' +release = '1.0.0' # -- General configuration --------------------------------------------------- @@ -41,9 +41,13 @@ release = '' # ones. extensions = [ 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', + 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', + 'm2r', ] # Add any paths that contain templates here, relative to this directory. @@ -52,8 +56,8 @@ templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ['.rst', '.md'] +# source_suffix = '.rst' # The master toctree document. master_doc = 'index' @@ -68,7 +72,7 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None @@ -107,7 +111,7 @@ html_static_path = ['_static'] # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'py-graphdoc' +htmlhelp_basename = 'graphkit-learndoc' # -- Options for LaTeX output ------------------------------------------------ @@ -134,7 +138,7 @@ latex_elements = { # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'py-graph.tex', 'py-graph Documentation', + (master_doc, 'graphkit-learn.tex', 'graphkit-learn Documentation', 'Linlin Jia', 'manual'), ] @@ -144,7 +148,7 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'py-graph', 'py-graph Documentation', + (master_doc, 'graphkit-learn', 'graphkit-learn Documentation', [author], 1) ] @@ -155,8 +159,8 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'py-graph', 'py-graph Documentation', - author, 'py-graph', 'One line description of project.', + (master_doc, 'graphkit-learn', 'graphkit-learn Documentation', + author, 'graphkit-learn', 'One line description of project.', 'Miscellaneous'), ] @@ -180,3 +184,10 @@ epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + +add_module_names = False diff --git a/docs/source/experiments.rst b/docs/source/experiments.rst new file mode 100644 index 0000000..151cb54 --- /dev/null +++ b/docs/source/experiments.rst @@ -0,0 +1,23 @@ +Experiments +=========== + +To exhibit the effectiveness and practicability of `graphkit-learn` library, we tested it on several benchmark datasets. See `(Kersting et al., 2016) __ for details on these datasets. + +A two-layer nested cross-validation (CV) is applied to select and evaluate models, where outer CV randomly splits the dataset into 10 folds with 9 as validation set, and inner CV then randomly splits validation set to 10 folds with 9 as training set. The whole procedure is performed 30 times, and the average performance is computed over these trails. Possible parameters of a graph kernel are also tuned during this procedure. + +The machine used to execute the experiments is a cluster with 28 CPU cores of Intel(R) Xeon(R) E5-2680 v4 @ 2.40GHz, 252GB memory, and 64-bit operating system CentOS Linux release 7.3.1611. All results were run with Python 3.5.2. + +The figure below exhibits accuracies achieved by graph kernels implemented in `graphkit-learn` library. Each row corresponds to a dataset and each column to a graph kernel. Accuracies are in percentage for classification and in terms of errors of boiling points for regression (Alkane and +Acyclic datasets). Red color indicates a worse result and green a better one. Gray cells with the “inf” marker indicate that the computation of the graph kernel on the dataset is neglected due to much higher consumption of computational resources than other kernels. + +.. image:: figures/all_test_accuracy.svg + :width: 600 + :alt: accuracies + +The figure below displays computational time consumed to compute Gram matrices of each graph +kernels (in :math:`log10` of seconds) on each dataset. Colors have the same meaning as in the figure above. + +.. image:: figures/all_ave_gm_times.svg + :width: 600 + :alt: computational time + diff --git a/docs/source/figures/all_ave_gm_times.svg b/docs/source/figures/all_ave_gm_times.svg new file mode 100644 index 0000000..d102164 --- /dev/null +++ b/docs/source/figures/all_ave_gm_times.svg @@ -0,0 +1,3788 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/source/figures/all_test_accuracy.svg b/docs/source/figures/all_test_accuracy.svg new file mode 100644 index 0000000..96b052c --- /dev/null +++ b/docs/source/figures/all_test_accuracy.svg @@ -0,0 +1,3841 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/source/gklearn.kernels.commonWalkKernel.rst b/docs/source/gklearn.kernels.commonWalkKernel.rst new file mode 100644 index 0000000..1b4b4d8 --- /dev/null +++ b/docs/source/gklearn.kernels.commonWalkKernel.rst @@ -0,0 +1,7 @@ +gklearn.kernels.commonWalkKernel +================================ + +.. automodule:: gklearn.kernels.commonWalkKernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.kernels.marginalizedKernel.rst b/docs/source/gklearn.kernels.marginalizedKernel.rst new file mode 100644 index 0000000..70141f7 --- /dev/null +++ b/docs/source/gklearn.kernels.marginalizedKernel.rst @@ -0,0 +1,7 @@ +gklearn.kernels.marginalizedKernel +================================== + +.. automodule:: gklearn.kernels.marginalizedKernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.kernels.randomWalkKernel.rst b/docs/source/gklearn.kernels.randomWalkKernel.rst new file mode 100644 index 0000000..f6a24d6 --- /dev/null +++ b/docs/source/gklearn.kernels.randomWalkKernel.rst @@ -0,0 +1,7 @@ +gklearn.kernels.randomWalkKernel +================================ + +.. automodule:: gklearn.kernels.randomWalkKernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.kernels.rst b/docs/source/gklearn.kernels.rst new file mode 100644 index 0000000..404d2d3 --- /dev/null +++ b/docs/source/gklearn.kernels.rst @@ -0,0 +1,19 @@ +gklearn.kernels +=============== + +.. automodule:: gklearn.kernels + :members: + :undoc-members: + :show-inheritance: + +.. toctree:: + + gklearn.kernels.commonWalkKernel + gklearn.kernels.marginalizedKernel + gklearn.kernels.randomWalkKernel + gklearn.kernels.spKernel + gklearn.kernels.structuralspKernel + gklearn.kernels.treeletKernel + gklearn.kernels.untilHPathKernel + gklearn.kernels.weisfeilerLehmanKernel + diff --git a/docs/source/gklearn.kernels.spKernel.rst b/docs/source/gklearn.kernels.spKernel.rst new file mode 100644 index 0000000..d9da9bc --- /dev/null +++ b/docs/source/gklearn.kernels.spKernel.rst @@ -0,0 +1,7 @@ +gklearn.kernels.spKernel +======================== + +.. automodule:: gklearn.kernels.spKernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.kernels.structuralspKernel.rst b/docs/source/gklearn.kernels.structuralspKernel.rst new file mode 100644 index 0000000..90c0fe3 --- /dev/null +++ b/docs/source/gklearn.kernels.structuralspKernel.rst @@ -0,0 +1,7 @@ +gklearn.kernels.structuralspKernel +================================== + +.. automodule:: gklearn.kernels.structuralspKernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.kernels.treeletKernel.rst b/docs/source/gklearn.kernels.treeletKernel.rst new file mode 100644 index 0000000..c88016d --- /dev/null +++ b/docs/source/gklearn.kernels.treeletKernel.rst @@ -0,0 +1,7 @@ +gklearn.kernels.treeletKernel +============================= + +.. automodule:: gklearn.kernels.treeletKernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.kernels.untilHPathKernel.rst b/docs/source/gklearn.kernels.untilHPathKernel.rst new file mode 100644 index 0000000..76f3910 --- /dev/null +++ b/docs/source/gklearn.kernels.untilHPathKernel.rst @@ -0,0 +1,7 @@ +gklearn.kernels.untilHPathKernel +================================ + +.. automodule:: gklearn.kernels.untilHPathKernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.kernels.weisfeilerLehmanKernel.rst b/docs/source/gklearn.kernels.weisfeilerLehmanKernel.rst new file mode 100644 index 0000000..f5797a2 --- /dev/null +++ b/docs/source/gklearn.kernels.weisfeilerLehmanKernel.rst @@ -0,0 +1,7 @@ +gklearn.kernels.weisfeilerLehmanKernel +====================================== + +.. automodule:: gklearn.kernels.weisfeilerLehmanKernel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.rst b/docs/source/gklearn.rst new file mode 100644 index 0000000..d7de14a --- /dev/null +++ b/docs/source/gklearn.rst @@ -0,0 +1,13 @@ +gklearn +======= + +.. automodule:: gklearn + :members: + :undoc-members: + :show-inheritance: + +.. toctree:: + + gklearn.kernels + gklearn.utils + diff --git a/docs/source/gklearn.utils.graphdataset.rst b/docs/source/gklearn.utils.graphdataset.rst new file mode 100644 index 0000000..4e2aae1 --- /dev/null +++ b/docs/source/gklearn.utils.graphdataset.rst @@ -0,0 +1,7 @@ +gklearn.utils.graphdataset +========================== + +.. automodule:: gklearn.utils.graphdataset + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.utils.graphfiles.rst b/docs/source/gklearn.utils.graphfiles.rst new file mode 100644 index 0000000..48b5e06 --- /dev/null +++ b/docs/source/gklearn.utils.graphfiles.rst @@ -0,0 +1,7 @@ +gklearn.utils.graphfiles +======================== + +.. automodule:: gklearn.utils.graphfiles + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.utils.kernels.rst b/docs/source/gklearn.utils.kernels.rst new file mode 100644 index 0000000..023cb3e --- /dev/null +++ b/docs/source/gklearn.utils.kernels.rst @@ -0,0 +1,7 @@ +gklearn.utils.kernels +===================== + +.. automodule:: gklearn.utils.kernels + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.utils.model_selection_precomputed.rst b/docs/source/gklearn.utils.model_selection_precomputed.rst new file mode 100644 index 0000000..b80e8fc --- /dev/null +++ b/docs/source/gklearn.utils.model_selection_precomputed.rst @@ -0,0 +1,7 @@ +gklearn.utils.model\_selection\_precomputed +=========================================== + +.. automodule:: gklearn.utils.model_selection_precomputed + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.utils.parallel.rst b/docs/source/gklearn.utils.parallel.rst new file mode 100644 index 0000000..8469b0a --- /dev/null +++ b/docs/source/gklearn.utils.parallel.rst @@ -0,0 +1,7 @@ +gklearn.utils.parallel +====================== + +.. automodule:: gklearn.utils.parallel + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.utils.rst b/docs/source/gklearn.utils.rst new file mode 100644 index 0000000..3d8a0e6 --- /dev/null +++ b/docs/source/gklearn.utils.rst @@ -0,0 +1,19 @@ +gklearn.utils +============= + +.. automodule:: gklearn.utils + :members: + :undoc-members: + :show-inheritance: + + +.. toctree:: + + gklearn.utils.graphdataset + gklearn.utils.graphfiles + gklearn.utils.kernels + gklearn.utils.model_selection_precomputed + gklearn.utils.parallel + gklearn.utils.trie + gklearn.utils.utils + diff --git a/docs/source/gklearn.utils.trie.rst b/docs/source/gklearn.utils.trie.rst new file mode 100644 index 0000000..1310cb1 --- /dev/null +++ b/docs/source/gklearn.utils.trie.rst @@ -0,0 +1,7 @@ +gklearn.utils.trie +================== + +.. automodule:: gklearn.utils.trie + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/gklearn.utils.utils.rst b/docs/source/gklearn.utils.utils.rst new file mode 100644 index 0000000..004db58 --- /dev/null +++ b/docs/source/gklearn.utils.utils.rst @@ -0,0 +1,7 @@ +gklearn.utils.utils +=================== + +.. automodule:: gklearn.utils.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..b531ba1 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,24 @@ +.. graphkit-learn documentation master file, created by + sphinx-quickstart on Wed Feb 12 15:06:37 2020. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +.. mdinclude:: ../../README.md + +Documentation +------------- + +.. toctree:: + :maxdepth: 1 + + modules.rst + experiments.rst + + + +Indices and tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/modules.rst b/docs/source/modules.rst similarity index 51% rename from docs/modules.rst rename to docs/source/modules.rst index a563717..536f81b 100644 --- a/docs/modules.rst +++ b/docs/source/modules.rst @@ -1,7 +1,7 @@ -py-graph -======== +Modules +======= .. toctree:: :maxdepth: 4 - pygraph + gklearn diff --git a/pygraph/__init__.py b/gklearn/__init__.py similarity index 85% rename from pygraph/__init__.py rename to gklearn/__init__.py index c29a3e7..c607b26 100644 --- a/pygraph/__init__.py +++ b/gklearn/__init__.py @@ -1,6 +1,6 @@ # -*-coding:utf-8 -*- """ -Pygraph +gklearn This package contains 4 sub packages : * c_ext : binders to C++ code @@ -16,6 +16,6 @@ __author__ = "Benoit Gaüzère" __date__ = "November 2017" # import sub modules -# from pygraph import c_ext -# from pygraph import ged -from pygraph import utils +# from gklearn import c_ext +# from gklearn import ged +from gklearn import utils diff --git a/gklearn/kernels/.tags b/gklearn/kernels/.tags new file mode 100644 index 0000000..0663235 --- /dev/null +++ b/gklearn/kernels/.tags @@ -0,0 +1,188 @@ +!_TAG_FILE_FORMAT 2 /extended format; --format=1 will not append ;" to lines/ +!_TAG_FILE_SORTED 0 /0=unsorted, 1=sorted, 2=foldcase/ +!_TAG_PROGRAM_AUTHOR Darren Hiebert /dhiebert@users.sourceforge.net/ +!_TAG_PROGRAM_NAME Exuberant Ctags // +!_TAG_PROGRAM_URL http://ctags.sourceforge.net /official site/ +!_TAG_PROGRAM_VERSION 5.9~svn20110310 // +commonwalkkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def commonwalkkernel(*args,$/;" function line:23 +compute_method /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ compute_method = compute_method.lower()$/;" variable line:67 +Gn /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ Gn = args[0] if len(args) == 1 else [args[0], args[1]]$/;" variable line:69 +len_gn /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ len_gn = len(Gn)$/;" variable line:72 +Gn /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ Gn = [(idx, G) for idx, G in enumerate(Gn) if nx.number_of_nodes(G) != 1]$/;" variable line:73 +idx /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ idx = [G[0] for G in Gn]$/;" variable line:74 +Gn /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ Gn = [G[1] for G in Gn]$/;" variable line:75 +ds_attrs /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ ds_attrs = get_dataset_attributes($/;" variable line:81 +attr_names /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ attr_names=['node_labeled', 'edge_labeled', 'is_directed'],$/;" variable line:83 +Gn /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ Gn = [G.to_directed() for G in Gn]$/;" variable line:92 +start_time /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ start_time = time.time()$/;" variable line:94 +Kmatrix /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ Kmatrix = np.zeros((len(Gn), len(Gn)))$/;" variable line:96 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ def init_worker(gn_toshare):$/;" function line:99 +run_time /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^ run_time = time.time() - start_time$/;" variable line:173 +_commonwalkkernel_exp /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def _commonwalkkernel_exp(g1, g2, node_label, edge_label, beta):$/;" function line:181 +wrapper_cw_exp /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def wrapper_cw_exp(node_label, edge_label, beta, itr):$/;" function line:249 +_commonwalkkernel_geo /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def _commonwalkkernel_geo(g1, g2, node_label, edge_label, gamma):$/;" function line:255 +wrapper_cw_geo /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def wrapper_cw_geo(node_label, edge_label, gama, itr):$/;" function line:290 +_commonwalkkernel_brute /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def _commonwalkkernel_brute(walks1,$/;" function line:296 +find_all_walks_until_length /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def find_all_walks_until_length(G,$/;" function line:336 +find_walks /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def find_walks(G, source_node, length):$/;" function line:388 +find_all_walks /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/commonWalkKernel.py /^def find_all_walks(G, length):$/;" function line:412 +randomwalkkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def randomwalkkernel(*args,$/;" function line:27 +_sylvester_equation /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _sylvester_equation(Gn, lmda, p, q, eweight, n_jobs):$/;" function line:150 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^ def init_worker(Awl_toshare):$/;" function line:184 function:_sylvester_equation +wrapper_se_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def wrapper_se_do(lmda, itr):$/;" function line:214 +_se_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _se_do(A_wave1, A_wave2, lmda):$/;" function line:220 +_conjugate_gradient /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _conjugate_gradient(Gn, lmda, p, q, ds_attrs, node_kernels, edge_kernels, $/;" function line:236 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^ def init_worker(gn_toshare):$/;" function line:280 function:_conjugate_gradient +wrapper_cg_unlabled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def wrapper_cg_unlabled_do(lmda, itr):$/;" function line:302 +_cg_unlabled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _cg_unlabled_do(A_wave1, A_wave2, lmda):$/;" function line:308 +wrapper_cg_labled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def wrapper_cg_labled_do(ds_attrs, node_kernels, node_label, edge_kernels, $/;" function line:320 +_cg_labled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _cg_labled_do(g1, g2, ds_attrs, node_kernels, node_label, $/;" function line:328 +_fixed_point /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _fixed_point(Gn, lmda, p, q, ds_attrs, node_kernels, edge_kernels, $/;" function line:351 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^ def init_worker(gn_toshare):$/;" function line:408 function:_fixed_point +wrapper_fp_labled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def wrapper_fp_labled_do(ds_attrs, node_kernels, node_label, edge_kernels, $/;" function line:418 +_fp_labled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _fp_labled_do(g1, g2, ds_attrs, node_kernels, node_label, $/;" function line:426 +func_fp /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def func_fp(x, p_times, lmda, w_times):$/;" function line:448 +_spectral_decomposition /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _spectral_decomposition(Gn, weight, p, q, sub_kernel, eweight, n_jobs):$/;" function line:456 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^ def init_worker(q_T_toshare, P_toshare, D_toshare):$/;" function line:492 function:_spectral_decomposition +wrapper_sd_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def wrapper_sd_do(weight, sub_kernel, itr):$/;" function line:516 +_sd_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _sd_do(q_T1, q_T2, P1, P2, D1, D2, weight, sub_kernel): $/;" function line:523 +_randomwalkkernel_kron /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def _randomwalkkernel_kron(G1, G2, node_label, edge_label):$/;" function line:540 +getLabels /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def getLabels(Gn, node_label, edge_label, directed):$/;" function line:561 +filterGramMatrix /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def filterGramMatrix(gmt, label_dict, label, directed):$/;" function line:581 +computeVK /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def computeVK(g1, g2, ds_attrs, node_kernels, node_label):$/;" function line:593 +computeW /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/rwalk_sym.py /^def computeW(g1, g2, vk_dict, ds_attrs, edge_kernels, edge_label):$/;" function line:627 +spkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/sp_sym.py /^def spkernel(*args,$/;" function line:24 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/sp_sym.py /^ def init_worker(gn_toshare):$/;" function line:115 function:spkernel +spkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/sp_sym.py /^def spkernel_do(g1, g2, ds_attrs, node_label, node_kernels):$/;" function line:130 +wrapper_sp_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/sp_sym.py /^def wrapper_sp_do(ds_attrs, node_label, node_kernels, itr):$/;" function line:191 +wrapper_getSPGraph /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/sp_sym.py /^def wrapper_getSPGraph(weight, itr_item):$/;" function line:197 +structuralspkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/ssp_sym.py /^def structuralspkernel(*args,$/;" function line:25 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/ssp_sym.py /^ def init_worker(spl_toshare, gs_toshare):$/;" function line:177 function:structuralspkernel +structuralspkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/ssp_sym.py /^def structuralspkernel_do(g1, g2, spl1, spl2, ds_attrs, node_label, edge_label,$/;" function line:265 +wrapper_ssp_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/ssp_sym.py /^def wrapper_ssp_do(ds_attrs, node_label, edge_label, node_kernels, $/;" function line:417 +get_shortest_paths /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/ssp_sym.py /^def get_shortest_paths(G, weight, directed):$/;" function line:426 +wrapper_getSP /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/else/ssp_sym.py /^def wrapper_getSP(weight, directed, itr_item):$/;" function line:461 +marginalizedkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/marginalizedKernel.py /^def marginalizedkernel(*args,$/;" function line:31 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/marginalizedKernel.py /^ def init_worker(gn_toshare):$/;" function line:114 function:marginalizedkernel +_marginalizedkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/marginalizedKernel.py /^def _marginalizedkernel_do(g1, g2, node_label, edge_label, p_quit, n_iteration):$/;" function line:144 +wrapper_marg_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/marginalizedKernel.py /^def wrapper_marg_do(node_label, edge_label, p_quit, n_iteration, itr):$/;" function line:290 +wrapper_untotter /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/marginalizedKernel.py /^def wrapper_untotter(Gn, node_label, edge_label, i):$/;" function line:296 +randomwalkkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def randomwalkkernel(*args,$/;" function line:21 +_sylvester_equation /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _sylvester_equation(Gn, lmda, p, q, eweight, n_jobs, verbose=True):$/;" function line:197 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^ def init_worker(Awl_toshare):$/;" function line:232 function:_sylvester_equation +wrapper_se_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def wrapper_se_do(lmda, itr):$/;" function line:262 +_se_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _se_do(A_wave1, A_wave2, lmda):$/;" function line:268 +_conjugate_gradient /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _conjugate_gradient(Gn, lmda, p, q, ds_attrs, node_kernels, edge_kernels, $/;" function line:284 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^ def init_worker(gn_toshare):$/;" function line:328 function:_conjugate_gradient +wrapper_cg_unlabled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def wrapper_cg_unlabled_do(lmda, itr):$/;" function line:350 +_cg_unlabled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _cg_unlabled_do(A_wave1, A_wave2, lmda):$/;" function line:356 +wrapper_cg_labled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def wrapper_cg_labled_do(ds_attrs, node_kernels, node_label, edge_kernels, $/;" function line:368 +_cg_labled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _cg_labled_do(g1, g2, ds_attrs, node_kernels, node_label, $/;" function line:376 +_fixed_point /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _fixed_point(Gn, lmda, p, q, ds_attrs, node_kernels, edge_kernels, $/;" function line:399 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^ def init_worker(gn_toshare):$/;" function line:456 function:_fixed_point +wrapper_fp_labled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def wrapper_fp_labled_do(ds_attrs, node_kernels, node_label, edge_kernels, $/;" function line:466 +_fp_labled_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _fp_labled_do(g1, g2, ds_attrs, node_kernels, node_label, $/;" function line:474 +func_fp /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def func_fp(x, p_times, lmda, w_times):$/;" function line:496 +_spectral_decomposition /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _spectral_decomposition(Gn, weight, p, q, sub_kernel, eweight, n_jobs, verbose=True):$/;" function line:504 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^ def init_worker(q_T_toshare, P_toshare, D_toshare):$/;" function line:541 function:_spectral_decomposition +wrapper_sd_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def wrapper_sd_do(weight, sub_kernel, itr):$/;" function line:566 +_sd_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _sd_do(q_T1, q_T2, P1, P2, D1, D2, weight, sub_kernel): $/;" function line:573 +_randomwalkkernel_kron /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def _randomwalkkernel_kron(G1, G2, node_label, edge_label):$/;" function line:590 +getLabels /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def getLabels(Gn, node_label, edge_label, directed):$/;" function line:611 +filterGramMatrix /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def filterGramMatrix(gmt, label_dict, label, directed):$/;" function line:631 +computeVK /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def computeVK(g1, g2, ds_attrs, node_kernels, node_label):$/;" function line:643 +computeW /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/randomWalkKernel.py /^def computeW(g1, g2, vk_dict, ds_attrs, edge_kernels, edge_label):$/;" function line:677 +spkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/spKernel.py /^def spkernel(*args,$/;" function line:22 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/spKernel.py /^ def init_worker(gn_toshare):$/;" function line:157 function:spkernel +spkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/spKernel.py /^def spkernel_do(g1, g2, ds_attrs, node_label, node_kernels):$/;" function line:207 +wrapper_sp_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/spKernel.py /^def wrapper_sp_do(ds_attrs, node_label, node_kernels, itr):$/;" function line:297 +wrapper_getSPGraph /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/spKernel.py /^def wrapper_getSPGraph(weight, itr_item):$/;" function line:310 +structuralspkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def structuralspkernel(*args,$/;" function line:28 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^ def init_worker(spl_toshare, gs_toshare):$/;" function line:179 function:structuralspkernel +structuralspkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def structuralspkernel_do(g1, g2, spl1, spl2, ds_attrs, node_label, edge_label,$/;" function line:258 +wrapper_ssp_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def wrapper_ssp_do(ds_attrs, node_label, edge_label, node_kernels, $/;" function line:346 +ssp_do_trie /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def ssp_do_trie(g1, g2, trie1, trie2, ds_attrs, node_label, edge_label,$/;" function line:355 +wrapper_ssp_do_trie /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def wrapper_ssp_do_trie(ds_attrs, node_label, edge_label, node_kernels, $/;" function line:463 +getAllNodeKernels /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def getAllNodeKernels(g1, g2, node_kernels, node_label, ds_attrs):$/;" function line:471 +getAllEdgeKernels /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def getAllEdgeKernels(g1, g2, edge_kernels, edge_label, ds_attrs):$/;" function line:505 +traverseBothTriem /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def traverseBothTriem(root, trie2, kernel, vk_dict, ek_dict, pcurrent=[]):$/;" function line:551 +traverseTrie2m /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def traverseTrie2m(root, p1, kernel, vk_dict, ek_dict, pcurrent=[]):$/;" function line:568 +traverseBothTriev /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def traverseBothTriev(root, trie2, kernel, vk_dict, ek_dict, pcurrent=[]):$/;" function line:592 +traverseTrie2v /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def traverseTrie2v(root, p1, kernel, vk_dict, ek_dict, pcurrent=[]):$/;" function line:609 +traverseBothTriee /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def traverseBothTriee(root, trie2, kernel, vk_dict, ek_dict, pcurrent=[]):$/;" function line:631 +traverseTrie2e /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def traverseTrie2e(root, p1, kernel, vk_dict, ek_dict, pcurrent=[]):$/;" function line:648 +traverseBothTrieu /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def traverseBothTrieu(root, trie2, kernel, vk_dict, ek_dict, pcurrent=[]):$/;" function line:673 +traverseTrie2u /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def traverseTrie2u(root, p1, kernel, vk_dict, ek_dict, pcurrent=[]):$/;" function line:690 +get_shortest_paths /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def get_shortest_paths(G, weight, directed):$/;" function line:748 +wrapper_getSP_naive /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def wrapper_getSP_naive(weight, directed, itr_item):$/;" function line:783 +get_sps_as_trie /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def get_sps_as_trie(G, weight, directed):$/;" function line:789 +wrapper_getSP_trie /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/structuralspKernel.py /^def wrapper_getSP_trie(weight, directed, itr_item):$/;" function line:830 +treeletkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/treeletKernel.py /^def treeletkernel(*args, $/;" function line:23 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/treeletKernel.py /^ def init_worker(canonkeys_toshare):$/;" function line:105 function:treeletkernel +_treeletkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/treeletKernel.py /^def _treeletkernel_do(canonkey1, canonkey2, sub_kernel):$/;" function line:140 +wrapper_treeletkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/treeletKernel.py /^def wrapper_treeletkernel_do(sub_kernel, itr):$/;" function line:160 +get_canonkeys /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/treeletKernel.py /^def get_canonkeys(G, node_label, edge_label, labeled, is_directed):$/;" function line:166 +wrapper_get_canonkeys /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/treeletKernel.py /^def wrapper_get_canonkeys(node_label, edge_label, labeled, is_directed, itr_item):$/;" function line:418 +find_paths /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/treeletKernel.py /^def find_paths(G, source_node, length):$/;" function line:424 +find_all_paths /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/treeletKernel.py /^def find_all_paths(G, length, is_directed):$/;" function line:449 +cyclicpatternkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/cyclicPatternKernel.py /^def cyclicpatternkernel(*args, node_label = 'atom', edge_label = 'bond_type', labeled = True, cycle_bound = None):$/;" function line:20 +_cyclicpatternkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/cyclicPatternKernel.py /^def _cyclicpatternkernel_do(patterns1, patterns2):$/;" function line:63 +get_patterns /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/cyclicPatternKernel.py /^def get_patterns(G, node_label = 'atom', edge_label = 'bond_type', labeled = True, cycle_bound = None):$/;" function line:87 +pathkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/pathKernel.py /^def pathkernel(*args, node_label='atom', edge_label='bond_type'):$/;" function line:20 +_pathkernel_do_l /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/pathKernel.py /^def _pathkernel_do_l(G1, G2, sp1, sp2, node_label, edge_label):$/;" function line:107 +_pathkernel_do_nl /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/pathKernel.py /^def _pathkernel_do_nl(G1, G2, sp1, sp2, node_label):$/;" function line:148 +_pathkernel_do_el /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/pathKernel.py /^def _pathkernel_do_el(G1, G2, sp1, sp2, edge_label):$/;" function line:171 +_pathkernel_do_unl /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/pathKernel.py /^def _pathkernel_do_unl(G1, G2, sp1, sp2):$/;" function line:196 +get_shortest_paths /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/pathKernel.py /^def get_shortest_paths(G, weight):$/;" function line:211 +treepatternkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/treePatternKernel.py /^def treepatternkernel(*args,$/;" function line:21 +_treepatternkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/treePatternKernel.py /^def _treepatternkernel_do(G1, G2, node_label, edge_label, labeled, kernel_type,$/;" function line:90 +matchingset /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/treePatternKernel.py /^ def matchingset(n1, n2):$/;" function line:119 function:_treepatternkernel_do +mset_com /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/treePatternKernel.py /^ def mset_com(allpairs, length):$/;" function line:123 function:_treepatternkernel_do.matchingset +kernel_h /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/treePatternKernel.py /^ def kernel_h(h):$/;" function line:165 function:_treepatternkernel_do +weisfeilerlehmankernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/weisfeilerLehmanKernel.py /^def weisfeilerlehmankernel(*args, node_label = 'atom', edge_label = 'bond_type', height = 0, base_kernel = 'subtree'):$/;" function line:18 +_wl_subtreekernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/weisfeilerLehmanKernel.py /^def _wl_subtreekernel_do(Gn, node_label, edge_label, height):$/;" function line:75 +_wl_spkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/weisfeilerLehmanKernel.py /^def _wl_spkernel_do(Gn, node_label, edge_label, height):$/;" function line:183 +_wl_edgekernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/weisfeilerLehmanKernel.py /^def _wl_edgekernel_do(Gn, node_label, edge_label, height):$/;" function line:264 +_wl_userkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/unfinished/weisfeilerLehmanKernel.py /^def _wl_userkernel_do(Gn, node_label, edge_label, height, base_kernel):$/;" function line:340 +untilhpathkernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def untilhpathkernel(*args,$/;" function line:25 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^ def init_worker(trie_toshare):$/;" function line:142 function:untilhpathkernel +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^ def init_worker(plist_toshare):$/;" function line:149 function:untilhpathkernel +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^ def init_worker(plist_toshare):$/;" function line:156 function:untilhpathkernel +_untilhpathkernel_do_trie /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def _untilhpathkernel_do_trie(trie1, trie2, k_func):$/;" function line:207 +traverseTrie1t /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^ def traverseTrie1t(root, trie2, setlist, pcurrent=[]):$/;" function line:226 function:_untilhpathkernel_do_trie +traverseTrie2t /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^ def traverseTrie2t(root, trie1, setlist, pcurrent=[]):$/;" function line:244 function:_untilhpathkernel_do_trie +traverseTrie1m /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^ def traverseTrie1m(root, trie2, sumlist, pcurrent=[]):$/;" function line:271 function:_untilhpathkernel_do_trie +traverseTrie2m /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^ def traverseTrie2m(root, trie1, sumlist, pcurrent=[]):$/;" function line:289 function:_untilhpathkernel_do_trie +wrapper_uhpath_do_trie /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def wrapper_uhpath_do_trie(k_func, itr):$/;" function line:316 +_untilhpathkernel_do_naive /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def _untilhpathkernel_do_naive(paths1, paths2, k_func):$/;" function line:322 +wrapper_uhpath_do_naive /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def wrapper_uhpath_do_naive(k_func, itr):$/;" function line:365 +_untilhpathkernel_do_kernelless /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def _untilhpathkernel_do_kernelless(paths1, paths2, k_func):$/;" function line:371 +wrapper_uhpath_do_kernelless /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def wrapper_uhpath_do_kernelless(k_func, itr):$/;" function line:414 +find_all_paths_until_length /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def find_all_paths_until_length(G,$/;" function line:421 +wrapper_find_all_paths_until_length /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def wrapper_find_all_paths_until_length(length, ds_attrs, node_label, $/;" function line:492 +find_all_path_as_trie /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def find_all_path_as_trie(G,$/;" function line:501 +traverseGraph /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^ def traverseGraph(root, ptrie, length, G, ds_attrs, node_label, edge_label,$/;" function line:542 function:find_all_path_as_trie +wrapper_find_all_path_as_trie /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def wrapper_find_all_path_as_trie(length, ds_attrs, node_label, $/;" function line:593 +paths2labelseqs /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/untilHPathKernel.py /^def paths2labelseqs(plist, G, ds_attrs, node_label, edge_label):$/;" function line:601 +weisfeilerlehmankernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def weisfeilerlehmankernel(*args, $/;" function line:25 +base_kernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ base_kernel = base_kernel.lower()$/;" variable line:74 +Gn /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ Gn = args[0] if len(args) == 1 else [args[0], args[1]] # arrange all graphs in a list$/;" variable line:75 +Gn /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ Gn = [g.copy() for g in Gn]$/;" variable line:76 +ds_attrs /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ ds_attrs = get_dataset_attributes(Gn, attr_names=['node_labeled'], $/;" variable line:77 +node_label /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ node_label=node_label)$/;" variable line:78 +start_time /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ start_time = time.time()$/;" variable line:83 +Kmatrix /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ Kmatrix = _wl_kernel_do(Gn, node_label, edge_label, height, parallel, n_jobs, verbose)$/;" variable line:87 +Kmatrix /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ Kmatrix = _wl_spkernel_do(Gn, node_label, edge_label, height)$/;" variable line:91 +Kmatrix /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ Kmatrix = _wl_edgekernel_do(Gn, node_label, edge_label, height)$/;" variable line:95 +Kmatrix /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ Kmatrix = _wl_userkernel_do(Gn, node_label, edge_label, height, base_kernel)$/;" variable line:99 +run_time /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ run_time = time.time() - start_time$/;" variable line:101 +_wl_kernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def _wl_kernel_do(Gn, node_label, edge_label, height, parallel, n_jobs, verbose):$/;" function line:109 +wl_iteration /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def wl_iteration(G, node_label):$/;" function line:256 +wrapper_wl_iteration /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def wrapper_wl_iteration(node_label, itr_item):$/;" function line:293 +compute_kernel_matrix /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def compute_kernel_matrix(Kmatrix, all_num_of_each_label, Gn, parallel, n_jobs, verbose):$/;" function line:300 +init_worker /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^ def init_worker(alllabels_toshare):$/;" function line:305 function:compute_kernel_matrix +compute_subtree_kernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def compute_subtree_kernel(num_of_each_label1, num_of_each_label2, kernel):$/;" function line:319 +wrapper_compute_subtree_kernel /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def wrapper_compute_subtree_kernel(Kmatrix, itr):$/;" function line:333 +_wl_spkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def _wl_spkernel_do(Gn, node_label, edge_label, height):$/;" function line:339 +_wl_edgekernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def _wl_edgekernel_do(Gn, node_label, edge_label, height):$/;" function line:421 +_wl_userkernel_do /media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py /^def _wl_userkernel_do(Gn, node_label, edge_label, height, base_kernel):$/;" function line:498 diff --git a/gklearn/kernels/__init__.py b/gklearn/kernels/__init__.py new file mode 100644 index 0000000..bbd4c48 --- /dev/null +++ b/gklearn/kernels/__init__.py @@ -0,0 +1,8 @@ +# -*-coding:utf-8 -*- +"""gklearn - kernels module +""" + +# info +__version__ = "0.1" +__author__ = "Linlin Jia" +__date__ = "November 2018" diff --git a/pygraph/kernels/commonWalkKernel.py b/gklearn/kernels/commonWalkKernel.py similarity index 98% rename from pygraph/kernels/commonWalkKernel.py rename to gklearn/kernels/commonWalkKernel.py index 9609ad7..8690958 100644 --- a/pygraph/kernels/commonWalkKernel.py +++ b/gklearn/kernels/commonWalkKernel.py @@ -1,9 +1,11 @@ """ @author: linlin -@references: + +@references: + [1] Thomas Gärtner, Peter Flach, and Stefan Wrobel. On graph kernels: - Hardness results and efficient alternatives. Learning Theory and Kernel - Machines, pages 129–143, 2003. + Hardness results and efficient alternatives. Learning Theory and Kernel + Machines, pages 129–143, 2003. """ import sys @@ -15,9 +17,9 @@ import networkx as nx import numpy as np sys.path.insert(0, "../") -from pygraph.utils.utils import direct_product -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +from gklearn.utils.utils import direct_product +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm def commonwalkkernel(*args, @@ -29,32 +31,33 @@ def commonwalkkernel(*args, n_jobs=None, verbose=True): """Calculate common walk graph kernels between graphs. + Parameters ---------- Gn : List of NetworkX graph List of graphs between which the kernels are calculated. - / + G1, G2 : NetworkX graphs Two graphs between which the kernel is calculated. node_label : string Node attribute used as symbolic label. The default node label is 'atom'. edge_label : string Edge attribute used as symbolic label. The default edge label is 'bond_type'. -# n : integer -# Longest length of walks. Only useful when applying the 'brute' method. weight: integer Weight coefficient of different lengths of walks, which represents beta in 'exp' method and gamma in 'geo'. compute_method : string Method used to compute walk kernel. The Following choices are available: + 'exp': method based on exponential serials applied on the direct product graph, as shown in reference [1]. The time complexity is O(n^6) for graphs with n vertices. + 'geo': method based on geometric serials applied on the direct product graph, as shown in reference [1]. The time complexity is O(n^6) for graphs with n vertices. -# 'brute': brute force, simply search for all walks and compare them. + n_jobs : int Number of jobs for parallelization. @@ -64,6 +67,9 @@ def commonwalkkernel(*args, Kernel matrix, each element of which is a common walk kernel between 2 graphs. """ +# n : integer +# Longest length of walks. Only useful when applying the 'brute' method. +# 'brute': brute force, simply search for all walks and compare them. compute_method = compute_method.lower() # arrange all graphs in a list Gn = args[0] if len(args) == 1 else [args[0], args[1]] diff --git a/pygraph/kernels/else/rwalk_sym.py b/gklearn/kernels/else/rwalk_sym.py similarity index 99% rename from pygraph/kernels/else/rwalk_sym.py rename to gklearn/kernels/else/rwalk_sym.py index f45cd9a..e9db9fd 100644 --- a/pygraph/kernels/else/rwalk_sym.py +++ b/gklearn/kernels/else/rwalk_sym.py @@ -21,8 +21,8 @@ from scipy.sparse import identity, kron from scipy.sparse.linalg import cg from scipy.optimize import fixed_point -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm def randomwalkkernel(*args, # params for all method. diff --git a/pygraph/kernels/else/sp_sym.py b/gklearn/kernels/else/sp_sym.py similarity index 97% rename from pygraph/kernels/else/sp_sym.py rename to gklearn/kernels/else/sp_sym.py index 649c65f..0da15ee 100644 --- a/pygraph/kernels/else/sp_sym.py +++ b/gklearn/kernels/else/sp_sym.py @@ -16,9 +16,9 @@ from tqdm import tqdm import networkx as nx import numpy as np -from pygraph.utils.utils import getSPGraph -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +from gklearn.utils.utils import getSPGraph +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm sys.path.insert(0, "../") def spkernel(*args, diff --git a/pygraph/kernels/else/ssp_sym.py b/gklearn/kernels/else/ssp_sym.py similarity index 99% rename from pygraph/kernels/else/ssp_sym.py rename to gklearn/kernels/else/ssp_sym.py index 2ce50ca..d0cf9ca 100644 --- a/pygraph/kernels/else/ssp_sym.py +++ b/gklearn/kernels/else/ssp_sym.py @@ -16,8 +16,8 @@ from tqdm import tqdm import networkx as nx import numpy as np -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm sys.path.insert(0, "../") diff --git a/pygraph/kernels/marginalizedKernel.py b/gklearn/kernels/marginalizedKernel.py similarity index 98% rename from pygraph/kernels/marginalizedKernel.py rename to gklearn/kernels/marginalizedKernel.py index 8b5fdf6..43b1809 100644 --- a/pygraph/kernels/marginalizedKernel.py +++ b/gklearn/kernels/marginalizedKernel.py @@ -1,9 +1,12 @@ """ @author: linlin + @references: + [1] H. Kashima, K. Tsuda, and A. Inokuchi. Marginalized kernels between labeled graphs. In Proceedings of the 20th International Conference on Machine Learning, Washington, DC, United States, 2003. + [2] Pierre Mahé, Nobuhisa Ueda, Tatsuya Akutsu, Jean-Luc Perret, and Jean-Philippe Vert. Extensions of marginalized graph kernels. In Proceedings of the twenty-first international conference on Machine @@ -21,10 +24,10 @@ tqdm.monitor_interval = 0 import networkx as nx import numpy as np -from pygraph.utils.kernels import deltakernel -from pygraph.utils.utils import untotterTransformation -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +from gklearn.utils.kernels import deltakernel +from gklearn.utils.utils import untotterTransformation +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm sys.path.insert(0, "../") @@ -42,20 +45,26 @@ def marginalizedkernel(*args, ---------- Gn : List of NetworkX graph List of graphs between which the kernels are calculated. - / + G1, G2 : NetworkX graphs Two graphs between which the kernel is calculated. + node_label : string Node attribute used as symbolic label. The default node label is 'atom'. + edge_label : string Edge attribute used as symbolic label. The default edge label is 'bond_type'. + p_quit : integer The termination probability in the random walks generating step. + n_iteration : integer Time of iterations to calculate R_inf. + remove_totters : boolean Whether to remove totterings by method introduced in [2]. The default value is False. + n_jobs : int Number of jobs for parallelization. @@ -294,4 +303,4 @@ def wrapper_marg_do(node_label, edge_label, p_quit, n_iteration, itr): def wrapper_untotter(Gn, node_label, edge_label, i): - return i, untotterTransformation(Gn[i], node_label, edge_label) \ No newline at end of file + return i, untotterTransformation(Gn[i], node_label, edge_label) diff --git a/pygraph/kernels/randomWalkKernel.py b/gklearn/kernels/randomWalkKernel.py similarity index 99% rename from pygraph/kernels/randomWalkKernel.py rename to gklearn/kernels/randomWalkKernel.py index 568f5c5..2299403 100644 --- a/pygraph/kernels/randomWalkKernel.py +++ b/gklearn/kernels/randomWalkKernel.py @@ -1,6 +1,9 @@ """ @author: linlin -@references: S Vichy N Vishwanathan, Nicol N Schraudolph, Risi Kondor, and Karsten M Borgwardt. Graph kernels. Journal of Machine Learning Research, 11(Apr):1201–1242, 2010. + +@references: + + [1] S Vichy N Vishwanathan, Nicol N Schraudolph, Risi Kondor, and Karsten M Borgwardt. Graph kernels. Journal of Machine Learning Research, 11(Apr):1201–1242, 2010. """ import sys @@ -15,8 +18,8 @@ from scipy.sparse import identity, kron from scipy.sparse.linalg import cg from scipy.optimize import fixed_point -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm def randomwalkkernel(*args, # params for all method. @@ -35,31 +38,42 @@ def randomwalkkernel(*args, n_jobs=None, verbose=True): """Calculate random walk graph kernels. + Parameters ---------- Gn : List of NetworkX graph List of graphs between which the kernels are calculated. - / + G1, G2 : NetworkX graphs Two graphs between which the kernel is calculated. + compute_method : string Method used to compute kernel. The Following choices are available: + 'sylvester' - Sylvester equation method. + 'conjugate' - conjugate gradient method. + 'fp' - fixed-point iterations. + 'spectral' - spectral decomposition. + weight : float A constant weight set for random walks of length h. + p : None Initial probability distribution on the unlabeled direct product graph of two graphs. It is set to be uniform over all vertices in the direct product graph. + q : None Stopping probability distribution on the unlabeled direct product graph of two graphs. It is set to be uniform over all vertices in the direct product graph. - edge_weight: float + + edge_weight : float + Edge attribute name corresponding to the edge weight. node_kernels: dict @@ -71,6 +85,7 @@ def randomwalkkernel(*args, dimension array (n_samples, n_features). Each function returns a number as the kernel value. Ignored when nodes are unlabeled. This argument is designated to conjugate gradient method and fixed-point iterations. + edge_kernels: dict A dictionary of kernel functions for edges, including 3 items: 'symb' for symbolic edge labels, 'nsymb' for non-symbolic edge labels, 'mix' @@ -80,10 +95,12 @@ def randomwalkkernel(*args, dimension array (n_samples, n_features). Each function returns a number as the kernel value. Ignored when edges are unlabeled. This argument is designated to conjugate gradient method and fixed-point iterations. + node_label: string Node attribute used as label. The default node label is atom. This argument is designated to conjugate gradient method and fixed-point iterations. + edge_label : string Edge attribute used as label. The default edge label is bond_type. This argument is designated to conjugate gradient method and fixed-point @@ -889,4 +906,4 @@ def computeW(g1, g2, vk_dict, ds_attrs, edge_kernels, edge_label): e1[1] * nx.number_of_nodes(g2) + e2[0]) w_times[w_idx2[0], w_idx2[1]] = w_times[w_idx[0], w_idx[1]] w_times[w_idx2[1], w_idx2[0]] = w_times[w_idx[0], w_idx[1]] - return w_times, w_dim \ No newline at end of file + return w_times, w_dim diff --git a/pygraph/kernels/spKernel.py b/gklearn/kernels/spKernel.py similarity index 88% rename from pygraph/kernels/spKernel.py rename to gklearn/kernels/spKernel.py index 17fc9d9..74f6c55 100644 --- a/pygraph/kernels/spKernel.py +++ b/gklearn/kernels/spKernel.py @@ -1,7 +1,10 @@ """ @author: linlin -@references: Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData -Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE. + +@references: + + [1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData + Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE. """ import sys @@ -14,15 +17,16 @@ from tqdm import tqdm import networkx as nx import numpy as np -from pygraph.utils.utils import getSPGraph -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +from gklearn.utils.utils import getSPGraph +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm sys.path.insert(0, "../") def spkernel(*args, node_label='atom', edge_weight=None, node_kernels=None, + parallel='imap_unordered', n_jobs=None, verbose=True): """Calculate shortest-path kernels between graphs. @@ -31,13 +35,16 @@ def spkernel(*args, ---------- Gn : List of NetworkX graph List of graphs between which the kernels are calculated. - / + G1, G2 : NetworkX graphs Two graphs between which the kernel is calculated. + node_label : string Node attribute used as label. The default node label is atom. + edge_weight : string Edge attribute name corresponding to the edge weight. + node_kernels : dict A dictionary of kernel functions for nodes, including 3 items: 'symb' for symbolic node labels, 'nsymb' for non-symbolic node labels, 'mix' @@ -46,6 +53,7 @@ def spkernel(*args, non-symbolic label for each the two nodes. Each label is in form of 2-D dimension array (n_samples, n_features). Each function returns an number as the kernel value. Ignored when nodes are unlabeled. + n_jobs : int Number of jobs for parallelization. @@ -95,28 +103,31 @@ def spkernel(*args, start_time = time.time() - pool = Pool(n_jobs) - # get shortest path graphs of Gn - getsp_partial = partial(wrapper_getSPGraph, weight) - itr = zip(Gn, range(0, len(Gn))) - if len(Gn) < 100 * n_jobs: -# # use default chunksize as pool.map when iterable is less than 100 -# chunksize, extra = divmod(len(Gn), n_jobs * 4) -# if extra: -# chunksize += 1 - chunksize = int(len(Gn) / n_jobs) + 1 - else: - chunksize = 100 - if verbose: - iterator = tqdm(pool.imap_unordered(getsp_partial, itr, chunksize), - desc='getting sp graphs', file=sys.stdout) - else: - iterator = pool.imap_unordered(getsp_partial, itr, chunksize) - for i, g in iterator: - Gn[i] = g - pool.close() - pool.join() + if parallel == 'imap_unordered': + pool = Pool(n_jobs) + # get shortest path graphs of Gn + getsp_partial = partial(wrapper_getSPGraph, weight) + itr = zip(Gn, range(0, len(Gn))) + if len(Gn) < 100 * n_jobs: + # # use default chunksize as pool.map when iterable is less than 100 + # chunksize, extra = divmod(len(Gn), n_jobs * 4) + # if extra: + # chunksize += 1 + chunksize = int(len(Gn) / n_jobs) + 1 + else: + chunksize = 100 + if verbose: + iterator = tqdm(pool.imap_unordered(getsp_partial, itr, chunksize), + desc='getting sp graphs', file=sys.stdout) + else: + iterator = pool.imap_unordered(getsp_partial, itr, chunksize) + for i, g in iterator: + Gn[i] = g + pool.close() + pool.join() + elif parallel is None: + pass # # ---- direct running, normally use single CPU core. ---- # for i in tqdm(range(len(Gn)), desc='getting sp graphs', file=sys.stdout): # i, Gn[i] = wrapper_getSPGraph(weight, (Gn[i], i)) @@ -311,4 +322,4 @@ def wrapper_getSPGraph(weight, itr_item): g = itr_item[0] i = itr_item[1] return i, getSPGraph(g, edge_weight=weight) - # return i, nx.floyd_warshall_numpy(g, weight=weight) \ No newline at end of file + # return i, nx.floyd_warshall_numpy(g, weight=weight) diff --git a/pygraph/kernels/structuralspKernel.py b/gklearn/kernels/structuralspKernel.py similarity index 98% rename from pygraph/kernels/structuralspKernel.py rename to gklearn/kernels/structuralspKernel.py index ba2f86a..3a2970a 100644 --- a/pygraph/kernels/structuralspKernel.py +++ b/gklearn/kernels/structuralspKernel.py @@ -4,8 +4,11 @@ Created on Thu Sep 27 10:56:23 2018 @author: linlin -@references: Suard F, Rakotomamonjy A, Bensrhair A. Kernel on Bag of Paths For -Measuring Similarity of Shapes. InESANN 2007 Apr 25 (pp. 355-360). + +@references: + + [1] Suard F, Rakotomamonjy A, Bensrhair A. Kernel on Bag of Paths For + Measuring Similarity of Shapes. InESANN 2007 Apr 25 (pp. 355-360). """ import sys @@ -18,9 +21,9 @@ from tqdm import tqdm import networkx as nx import numpy as np -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm -from pygraph.utils.trie import Trie +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm +from gklearn.utils.trie import Trie sys.path.insert(0, "../") @@ -32,8 +35,8 @@ def structuralspkernel(*args, node_kernels=None, edge_kernels=None, compute_method='naive', -# parallel='imap_unordered', - parallel=None, + parallel='imap_unordered', +# parallel=None, n_jobs=None, verbose=True): """Calculate mean average structural shortest path kernels between graphs. @@ -42,16 +45,20 @@ def structuralspkernel(*args, ---------- Gn : List of NetworkX graph List of graphs between which the kernels are calculated. - / + G1, G2 : NetworkX graphs Two graphs between which the kernel is calculated. + node_label : string Node attribute used as label. The default node label is atom. + edge_weight : string Edge attribute name corresponding to the edge weight. Applied for the computation of the shortest paths. + edge_label : string Edge attribute used as label. The default edge label is bond_type. + node_kernels : dict A dictionary of kernel functions for nodes, including 3 items: 'symb' for symbolic node labels, 'nsymb' for non-symbolic node labels, 'mix' @@ -60,6 +67,7 @@ def structuralspkernel(*args, non-symbolic label for each the two nodes. Each label is in form of 2-D dimension array (n_samples, n_features). Each function returns a number as the kernel value. Ignored when nodes are unlabeled. + edge_kernels : dict A dictionary of kernel functions for edges, including 3 items: 'symb' for symbolic edge labels, 'nsymb' for non-symbolic edge labels, 'mix' @@ -68,11 +76,15 @@ def structuralspkernel(*args, non-symbolic label for each the two edges. Each label is in form of 2-D dimension array (n_samples, n_features). Each function returns a number as the kernel value. Ignored when edges are unlabeled. + compute_method : string Computation method to store the shortest paths and compute the graph kernel. The Following choices are available: + 'trie': store paths as tries. + 'naive': store paths to lists. + n_jobs : int Number of jobs for parallelization. @@ -138,7 +150,7 @@ def structuralspkernel(*args, pool.close() pool.join() # ---- direct running, normally use single CPU core. ---- - elif parallel == None: + elif parallel is None: splist = [] if verbose: iterator = tqdm(Gn, desc='getting sp graphs', file=sys.stdout) @@ -200,7 +212,7 @@ def structuralspkernel(*args, parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker, glbv=(splist, Gn), n_jobs=n_jobs, verbose=verbose) # ---- direct running, normally use single CPU core. ---- - elif parallel == None: + elif parallel is None: from itertools import combinations_with_replacement itr = combinations_with_replacement(range(0, len(Gn)), 2) if verbose: @@ -849,4 +861,4 @@ def get_sps_as_trie(G, weight, directed): def wrapper_getSP_trie(weight, directed, itr_item): g = itr_item[0] i = itr_item[1] - return i, get_sps_as_trie(g, weight, directed) \ No newline at end of file + return i, get_sps_as_trie(g, weight, directed) diff --git a/pygraph/kernels/treeletKernel.py b/gklearn/kernels/treeletKernel.py similarity index 99% rename from pygraph/kernels/treeletKernel.py rename to gklearn/kernels/treeletKernel.py index 0417752..e003823 100644 --- a/pygraph/kernels/treeletKernel.py +++ b/gklearn/kernels/treeletKernel.py @@ -1,6 +1,8 @@ """ @author: linlin + @references: + [1] Gaüzère B, Brun L, Villemin D. Two new graphs kernels in chemoinformatics. Pattern Recognition Letters. 2012 Nov 1;33(15):2038-47. """ @@ -17,8 +19,8 @@ from tqdm import tqdm import networkx as nx import numpy as np -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm def treeletkernel(*args, sub_kernel, @@ -33,22 +35,29 @@ def treeletkernel(*args, ---------- Gn : List of NetworkX graph List of graphs between which the kernels are calculated. - / + G1, G2 : NetworkX graphs Two graphs between which the kernel is calculated. + sub_kernel : function The sub-kernel between 2 real number vectors. Each vector counts the numbers of isomorphic treelets in a graph. + node_label : string Node attribute used as label. The default node label is atom. + edge_label : string Edge attribute used as label. The default edge label is bond_type. + parallel : string/None Which paralleliztion method is applied to compute the kernel. The Following choices are available: + 'imap_unordered': use Python's multiprocessing.Pool.imap_unordered method. + None: no parallelization is applied. + n_jobs : int Number of jobs for parallelization. The default is to use all computational cores. This argument is only valid when one of the @@ -477,4 +486,4 @@ def find_all_paths(G, length, is_directed): break all_paths = list(filter(lambda a: a != [], all_paths)) - return all_paths \ No newline at end of file + return all_paths diff --git a/pygraph/kernels/unfinished/cyclicPatternKernel.py b/gklearn/kernels/unfinished/cyclicPatternKernel.py similarity index 100% rename from pygraph/kernels/unfinished/cyclicPatternKernel.py rename to gklearn/kernels/unfinished/cyclicPatternKernel.py diff --git a/pygraph/kernels/unfinished/pathKernel.py b/gklearn/kernels/unfinished/pathKernel.py similarity index 98% rename from pygraph/kernels/unfinished/pathKernel.py rename to gklearn/kernels/unfinished/pathKernel.py index 73db1d9..3511f2c 100644 --- a/pygraph/kernels/unfinished/pathKernel.py +++ b/gklearn/kernels/unfinished/pathKernel.py @@ -13,8 +13,8 @@ from tqdm import tqdm import networkx as nx import numpy as np -from pygraph.kernels.deltaKernel import deltakernel -from pygraph.utils.graphdataset import get_dataset_attributes +from gklearn.kernels.deltaKernel import deltakernel +from gklearn.utils.graphdataset import get_dataset_attributes def pathkernel(*args, node_label='atom', edge_label='bond_type'): diff --git a/pygraph/kernels/unfinished/treePatternKernel.py b/gklearn/kernels/unfinished/treePatternKernel.py similarity index 99% rename from pygraph/kernels/unfinished/treePatternKernel.py rename to gklearn/kernels/unfinished/treePatternKernel.py index c40187c..5e90a1d 100644 --- a/pygraph/kernels/unfinished/treePatternKernel.py +++ b/gklearn/kernels/unfinished/treePatternKernel.py @@ -15,7 +15,7 @@ from collections import Counter from tqdm import tqdm tqdm.monitor_interval = 0 -from pygraph.utils.utils import untotterTransformation +from gklearn.utils.utils import untotterTransformation def treepatternkernel(*args, diff --git a/pygraph/kernels/unfinished/weisfeilerLehmanKernel.py b/gklearn/kernels/unfinished/weisfeilerLehmanKernel.py similarity index 99% rename from pygraph/kernels/unfinished/weisfeilerLehmanKernel.py rename to gklearn/kernels/unfinished/weisfeilerLehmanKernel.py index 8c520a5..f5b903c 100644 --- a/pygraph/kernels/unfinished/weisfeilerLehmanKernel.py +++ b/gklearn/kernels/unfinished/weisfeilerLehmanKernel.py @@ -13,7 +13,7 @@ import networkx as nx import numpy as np import time -from pygraph.kernels.pathKernel import pathkernel +from gklearn.kernels.pathKernel import pathkernel def weisfeilerlehmankernel(*args, node_label = 'atom', edge_label = 'bond_type', height = 0, base_kernel = 'subtree'): """Calculate Weisfeiler-Lehman kernels between graphs. @@ -199,7 +199,7 @@ def _wl_spkernel_do(Gn, node_label, edge_label, height): Kmatrix : Numpy matrix Kernel matrix, each element of which is the Weisfeiler-Lehman kernel between 2 praphs. """ - from pygraph.utils.utils import getSPGraph + from gklearn.utils.utils import getSPGraph # init. height = int(height) diff --git a/pygraph/kernels/untilHPathKernel.py b/gklearn/kernels/untilHPathKernel.py similarity index 81% rename from pygraph/kernels/untilHPathKernel.py rename to gklearn/kernels/untilHPathKernel.py index 6c391f1..f8030f9 100644 --- a/pygraph/kernels/untilHPathKernel.py +++ b/gklearn/kernels/untilHPathKernel.py @@ -1,8 +1,11 @@ """ @author: linlin -@references: Liva Ralaivola, Sanjay J Swamidass, Hiroto Saigo, and Pierre -Baldi. Graph kernels for chemical informatics. Neural networks, -18(8):1093–1110, 2005. + +@references: + + [1] Liva Ralaivola, Sanjay J Swamidass, Hiroto Saigo, and Pierre + Baldi. Graph kernels for chemical informatics. Neural networks, + 18(8):1093–1110, 2005. """ import sys @@ -17,9 +20,9 @@ from tqdm import tqdm import networkx as nx import numpy as np -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm -from pygraph.utils.trie import Trie +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm +from gklearn.utils.trie import Trie def untilhpathkernel(*args, @@ -28,6 +31,7 @@ def untilhpathkernel(*args, depth=10, k_func='MinMax', compute_method='trie', + parallel=True, n_jobs=None, verbose=True): """Calculate path graph kernels up to depth/hight h between graphs. @@ -36,27 +40,38 @@ def untilhpathkernel(*args, ---------- Gn : List of NetworkX graph List of graphs between which the kernels are calculated. - / + G1, G2 : NetworkX graphs Two graphs between which the kernel is calculated. + node_label : string Node attribute used as label. The default node label is atom. + edge_label : string Edge attribute used as label. The default edge label is bond_type. + depth : integer Depth of search. Longest length of paths. + k_func : function A kernel function applied using different notions of fingerprint similarity, defining the type of feature map and normalization method applied for the graph kernel. The Following choices are available: + 'MinMax': use the MiniMax kernel and counting feature map. + 'tanimoto': use the Tanimoto kernel and binary feature map. + None: no sub-kernel is used, the kernel is computed directly. + compute_method : string Computation method to store paths and compute the graph kernel. The Following choices are available: + 'trie': store paths as tries. + 'naive': store paths to lists. + n_jobs : int Number of jobs for parallelization. @@ -86,34 +101,35 @@ def untilhpathkernel(*args, start_time = time.time() - # ---- use pool.imap_unordered to parallel and track progress. ---- - # get all paths of all graphs before calculating kernels to save time, - # but this may cost a lot of memory for large datasets. - pool = Pool(n_jobs) - itr = zip(Gn, range(0, len(Gn))) - if len(Gn) < 100 * n_jobs: - chunksize = int(len(Gn) / n_jobs) + 1 - else: - chunksize = 100 - all_paths = [[] for _ in range(len(Gn))] - if compute_method == 'trie' and k_func != None: - getps_partial = partial(wrapper_find_all_path_as_trie, depth, - ds_attrs, node_label, edge_label) - elif compute_method != 'trie' and k_func != None: - getps_partial = partial(wrapper_find_all_paths_until_length, depth, - ds_attrs, node_label, edge_label, True) - else: - getps_partial = partial(wrapper_find_all_paths_until_length, depth, - ds_attrs, node_label, edge_label, False) - if verbose: - iterator = tqdm(pool.imap_unordered(getps_partial, itr, chunksize), - desc='getting paths', file=sys.stdout) - else: - iterator = pool.imap_unordered(getps_partial, itr, chunksize) - for i, ps in iterator: - all_paths[i] = ps - pool.close() - pool.join() + if parallel == 'imap_unordered': + # ---- use pool.imap_unordered to parallel and track progress. ---- + # get all paths of all graphs before calculating kernels to save time, + # but this may cost a lot of memory for large datasets. + pool = Pool(n_jobs) + itr = zip(Gn, range(0, len(Gn))) + if len(Gn) < 100 * n_jobs: + chunksize = int(len(Gn) / n_jobs) + 1 + else: + chunksize = 100 + all_paths = [[] for _ in range(len(Gn))] + if compute_method == 'trie' and k_func != None: + getps_partial = partial(wrapper_find_all_path_as_trie, depth, + ds_attrs, node_label, edge_label) + elif compute_method != 'trie' and k_func != None: + getps_partial = partial(wrapper_find_all_paths_until_length, depth, + ds_attrs, node_label, edge_label, True) + else: + getps_partial = partial(wrapper_find_all_paths_until_length, depth, + ds_attrs, node_label, edge_label, False) + if verbose: + iterator = tqdm(pool.imap_unordered(getps_partial, itr, chunksize), + desc='getting paths', file=sys.stdout) + else: + iterator = pool.imap_unordered(getps_partial, itr, chunksize) + for i, ps in iterator: + all_paths[i] = ps + pool.close() + pool.join() # for g in Gn: # if compute_method == 'trie' and k_func != None: @@ -138,62 +154,76 @@ def untilhpathkernel(*args, ## all_paths[i] = ps ## print(time.time() - ttt) - if compute_method == 'trie' and k_func != None: - def init_worker(trie_toshare): - global G_trie - G_trie = trie_toshare - do_partial = partial(wrapper_uhpath_do_trie, k_func) - parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker, - glbv=(all_paths,), n_jobs=n_jobs, verbose=verbose) - elif compute_method != 'trie' and k_func != None: - def init_worker(plist_toshare): - global G_plist - G_plist = plist_toshare - do_partial = partial(wrapper_uhpath_do_naive, k_func) - parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker, - glbv=(all_paths,), n_jobs=n_jobs, verbose=verbose) - else: - def init_worker(plist_toshare): - global G_plist - G_plist = plist_toshare - do_partial = partial(wrapper_uhpath_do_kernelless, ds_attrs, edge_kernels) - parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker, - glbv=(all_paths,), n_jobs=n_jobs, verbose=verbose) + if compute_method == 'trie' and k_func != None: + def init_worker(trie_toshare): + global G_trie + G_trie = trie_toshare + do_partial = partial(wrapper_uhpath_do_trie, k_func) + parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker, + glbv=(all_paths,), n_jobs=n_jobs, verbose=verbose) + elif compute_method != 'trie' and k_func != None: + def init_worker(plist_toshare): + global G_plist + G_plist = plist_toshare + do_partial = partial(wrapper_uhpath_do_naive, k_func) + parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker, + glbv=(all_paths,), n_jobs=n_jobs, verbose=verbose) + else: + def init_worker(plist_toshare): + global G_plist + G_plist = plist_toshare + do_partial = partial(wrapper_uhpath_do_kernelless, ds_attrs, edge_kernels) + parallel_gm(do_partial, Kmatrix, Gn, init_worker=init_worker, + glbv=(all_paths,), n_jobs=n_jobs, verbose=verbose) + elif parallel == None: + from pympler import asizeof + # ---- direct running, normally use single CPU core. ---- +# print(asizeof.asized(all_paths, detail=1).format()) -# # ---- direct running, normally use single CPU core. ---- -# all_paths = [ -# find_all_paths_until_length( -# Gn[i], -# depth, -# ds_attrs, -# node_label=node_label, -# edge_label=edge_label) for i in tqdm( -# range(0, len(Gn)), desc='getting paths', file=sys.stdout) -# ] -# -# if compute_method == 'trie': -# pbar = tqdm( -# total=((len(Gn) + 1) * len(Gn) / 2), -# desc='calculating kernels', -# file=sys.stdout) -# for i in range(0, len(Gn)): -# for j in range(i, len(Gn)): -# Kmatrix[i][j] = _untilhpathkernel_do_trie(all_paths[i], -# all_paths[j], k_func) -# Kmatrix[j][i] = Kmatrix[i][j] -# pbar.update(1) -# else: -# pbar = tqdm( -# total=((len(Gn) + 1) * len(Gn) / 2), -# desc='calculating kernels', -# file=sys.stdout) -# for i in range(0, len(Gn)): -# for j in range(i, len(Gn)): -# Kmatrix[i][j] = _untilhpathkernel_do_naive(all_paths[i], all_paths[j], -# k_func) -# Kmatrix[j][i] = Kmatrix[i][j] -# pbar.update(1) + if compute_method == 'trie': + all_paths = [ + find_all_path_as_trie(Gn[i], + depth, + ds_attrs, + node_label=node_label, + edge_label=edge_label) for i in tqdm( + range(0, len(Gn)), desc='getting paths', file=sys.stdout) + ] +# sizeof_allpaths = asizeof.asizeof(all_paths) +# print(sizeof_allpaths) + pbar = tqdm( + total=((len(Gn) + 1) * len(Gn) / 2), + desc='calculating kernels', + file=sys.stdout) + for i in range(0, len(Gn)): + for j in range(i, len(Gn)): + Kmatrix[i][j] = _untilhpathkernel_do_trie(all_paths[i], + all_paths[j], k_func) + Kmatrix[j][i] = Kmatrix[i][j] + pbar.update(1) + else: + all_paths = [ + find_all_paths_until_length( + Gn[i], + depth, + ds_attrs, + node_label=node_label, + edge_label=edge_label) for i in tqdm( + range(0, len(Gn)), desc='getting paths', file=sys.stdout) + ] +# sizeof_allpaths = asizeof.asizeof(all_paths) +# print(sizeof_allpaths) + pbar = tqdm( + total=((len(Gn) + 1) * len(Gn) / 2), + desc='calculating kernels', + file=sys.stdout) + for i in range(0, len(Gn)): + for j in range(i, len(Gn)): + Kmatrix[i][j] = _untilhpathkernel_do_naive(all_paths[i], all_paths[j], + k_func) + Kmatrix[j][i] = Kmatrix[i][j] + pbar.update(1) run_time = time.time() - start_time if verbose: @@ -201,7 +231,7 @@ def untilhpathkernel(*args, % (depth, len(Gn), run_time)) # print(Kmatrix[0][0:10]) - return Kmatrix, run_time + return Kmatrix, run_time, sizeof_allpaths def _untilhpathkernel_do_trie(trie1, trie2, k_func): @@ -484,7 +514,7 @@ def find_all_paths_until_length(G, # consider labels # print(paths2labelseqs(all_paths, G, ds_attrs, node_label, edge_label)) - print() +# print() return (paths2labelseqs(all_paths, G, ds_attrs, node_label, edge_label) if tolabelseqs else all_paths) diff --git a/pygraph/kernels/weisfeilerLehmanKernel.py b/gklearn/kernels/weisfeilerLehmanKernel.py similarity index 99% rename from pygraph/kernels/weisfeilerLehmanKernel.py rename to gklearn/kernels/weisfeilerLehmanKernel.py index 93ad3ae..4a223a4 100644 --- a/pygraph/kernels/weisfeilerLehmanKernel.py +++ b/gklearn/kernels/weisfeilerLehmanKernel.py @@ -1,6 +1,8 @@ """ @author: linlin + @references: + [1] Shervashidze N, Schweitzer P, Leeuwen EJ, Mehlhorn K, Borgwardt KM. Weisfeiler-lehman graph kernels. Journal of Machine Learning Research. 2011;12(Sep):2539-61. @@ -17,9 +19,9 @@ from tqdm import tqdm import networkx as nx import numpy as np -#from pygraph.kernels.pathKernel import pathkernel -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.parallel import parallel_gm +#from gklearn.kernels.pathKernel import pathkernel +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.parallel import parallel_gm # @todo: support edge kernel, sp kernel, user-defined kernel. def weisfeilerlehmankernel(*args, @@ -36,26 +38,27 @@ def weisfeilerlehmankernel(*args, ---------- Gn : List of NetworkX graph List of graphs between which the kernels are calculated. - / + G1, G2 : NetworkX graphs Two graphs between which the kernel is calculated. + node_label : string Node attribute used as label. The default node label is atom. + edge_label : string Edge attribute used as label. The default edge label is bond_type. + height : int Subtree height. + base_kernel : string Base kernel used in each iteration of WL kernel. Only default 'subtree' kernel can be applied for now. -# The default base -# kernel is subtree kernel. For user-defined kernel, base_kernel is the -# name of the base kernel function used in each iteration of WL kernel. -# This function returns a Numpy matrix, each element of which is the -# user-defined Weisfeiler-Lehman kernel between 2 praphs. + parallel : None Which paralleliztion method is applied to compute the kernel. No parallelization can be applied for now. + n_jobs : int Number of jobs for parallelization. The default is to use all computational cores. This argument is only valid when one of the @@ -70,6 +73,11 @@ def weisfeilerlehmankernel(*args, ----- This function now supports WL subtree kernel only. """ +# The default base +# kernel is subtree kernel. For user-defined kernel, base_kernel is the +# name of the base kernel function used in each iteration of WL kernel. +# This function returns a Numpy matrix, each element of which is the +# user-defined Weisfeiler-Lehman kernel between 2 praphs. # pre-process base_kernel = base_kernel.lower() Gn = args[0] if len(args) == 1 else [args[0], args[1]] # arrange all graphs in a list @@ -356,7 +364,7 @@ def _wl_spkernel_do(Gn, node_label, edge_label, height): Kernel matrix, each element of which is the Weisfeiler-Lehman kernel between 2 praphs. """ pass - from pygraph.utils.utils import getSPGraph + from gklearn.utils.utils import getSPGraph # init. height = int(height) diff --git a/preimage/find_best_k.py b/gklearn/preimage/find_best_k.py similarity index 99% rename from preimage/find_best_k.py rename to gklearn/preimage/find_best_k.py index ed1ef44..6ef87d6 100644 --- a/preimage/find_best_k.py +++ b/gklearn/preimage/find_best_k.py @@ -11,7 +11,7 @@ import csv import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset +from gklearn.utils.graphfiles import loadDataset from preimage.test_k_closest_graphs import median_on_k_closest_graphs def find_best_k(): diff --git a/preimage/fitDistance.py b/gklearn/preimage/fitDistance.py similarity index 70% rename from preimage/fitDistance.py rename to gklearn/preimage/fitDistance.py index 458a102..90db40b 100644 --- a/preimage/fitDistance.py +++ b/gklearn/preimage/fitDistance.py @@ -20,16 +20,16 @@ import cvxpy as cp import sys sys.path.insert(0, "../") -from preimage.ged import GED, get_nb_edit_operations, get_nb_edit_operations_letter +from preimage.ged import GED, get_nb_edit_operations, get_nb_edit_operations_letter, get_nb_edit_operations_nonsymbolic from preimage.utils import kernel_distance_matrix def fit_GED_to_kernel_distance(Gn, node_label, edge_label, gkernel, itr_max, params_ged={'lib': 'gedlibpy', 'cost': 'CONSTANT', 'method': 'IPFP', 'stabilizer': None}, init_costs=[3, 3, 1, 3, 3, 1], - dataset='monoterpenoides', + dataset='monoterpenoides', Kmatrix=None, parallel=True): - dataset = dataset.lower() +# dataset = dataset.lower() # c_vi, c_vr, c_vs, c_ei, c_er, c_es or parts of them. # random.seed(1) @@ -44,7 +44,8 @@ def fit_GED_to_kernel_distance(Gn, node_label, edge_label, gkernel, itr_max, # idx_cost_nonzeros = [i for i, item in enumerate(edit_costs) if item != 0] # compute distances in feature space. - dis_k_mat, _, _, _ = kernel_distance_matrix(Gn, node_label, edge_label, gkernel=gkernel) + dis_k_mat, _, _, _ = kernel_distance_matrix(Gn, node_label, edge_label, + Kmatrix=Kmatrix, gkernel=gkernel) dis_k_vec = [] for i in range(len(dis_k_mat)): # for j in range(i, len(dis_k_mat)): @@ -57,8 +58,7 @@ def fit_GED_to_kernel_distance(Gn, node_label, edge_label, gkernel, itr_max, time0 = time.time() params_ged['dataset'] = dataset params_ged['edit_cost_constant'] = init_costs - ged_vec_init, ged_mat, n_edit_operations = compute_geds(Gn, params_ged, - dataset, + ged_vec_init, ged_mat, n_edit_operations = compute_geds(Gn, params_ged, parallel=parallel) residual_list = [np.sqrt(np.sum(np.square(np.array(ged_vec_init) - dis_k_vec)))] time_list = [time.time() - time0] @@ -73,6 +73,10 @@ def fit_GED_to_kernel_distance(Gn, node_label, edge_label, gkernel, itr_max, time0 = time.time() # "fit" geds to distances in feature space by tuning edit costs using the # Least Squares Method. + np.savez('results/xp_fit_method/fit_data_debug' + str(itr) + '.gm', + nb_cost_mat=nb_cost_mat, dis_k_vec=dis_k_vec, + n_edit_operations=n_edit_operations, ged_vec_init=ged_vec_init, + ged_mat=ged_mat) edit_costs_new, residual = update_costs(nb_cost_mat, dis_k_vec, dataset=dataset, cost=params_ged['cost']) for i in range(len(edit_costs_new)): @@ -87,7 +91,6 @@ def fit_GED_to_kernel_distance(Gn, node_label, edge_label, gkernel, itr_max, # compute new GEDs and numbers of edit operations. params_ged['edit_cost_constant'] = edit_costs_new # np.array([edit_costs_new[0], edit_costs_new[1], 0.75]) ged_vec, ged_mat, n_edit_operations = compute_geds(Gn, params_ged, - dataset, parallel=parallel) residual_list.append(np.sqrt(np.sum(np.square(np.array(ged_vec) - dis_k_vec)))) time_list.append(time.time() - time0) @@ -101,8 +104,14 @@ def fit_GED_to_kernel_distance(Gn, node_label, edge_label, gkernel, itr_max, time_list, nb_cost_mat_list -def compute_geds(Gn, params_ged, dataset, parallel=False): - get_nb_eo = get_nb_edit_operations_letter if dataset == 'letter' else get_nb_edit_operations +def compute_geds(Gn, params_ged, parallel=False): + edit_cost_name = params_ged['cost'] + if edit_cost_name == 'LETTER' or edit_cost_name == 'LETTER2': + get_nb_eo = get_nb_edit_operations_letter + elif edit_cost_name == 'NON_SYMBOLIC': + get_nb_eo = get_nb_edit_operations_nonsymbolic + else: + get_nb_eo = get_nb_edit_operations ged_mat = np.zeros((len(Gn), len(Gn))) if parallel: # print('parallel') @@ -166,10 +175,10 @@ def _compute_ged_parallel(g1, g2, params_ged, get_nb_eo): def update_costs(nb_cost_mat, dis_k_vec, dataset='monoterpenoides', - cost='CONSTANT', rw_constraints='2constraints'): - if dataset.lower() == 'letter': - if cost == 'LETTER': - pass + cost='CONSTANT', rw_constraints='inequality'): +# if dataset == 'Letter-high': + if cost == 'LETTER': + pass # # method 1: set alpha automatically, just tune c_vir and c_eir by # # LMS using cvxpy. # alpha = 0.5 @@ -192,7 +201,7 @@ def update_costs(nb_cost_mat, dis_k_vec, dataset='monoterpenoides', # edit_costs_new = x.value # edit_costs_new = np.array([edit_costs_new[0], edit_costs_new[1], alpha]) # residual = np.sqrt(prob.value) - + # # method 2: tune c_vir, c_eir and alpha by nonlinear programming by # # scipy.optimize.minimize. # w0 = nb_cost_mat[:,0] + nb_cost_mat[:,1] @@ -205,10 +214,10 @@ def update_costs(nb_cost_mat, dis_k_vec, dataset='monoterpenoides', # res = minimize(func_min, [0.9, 1.7, 0.75, 10], bounds=bounds) # edit_costs_new = res.x[0:3] # residual = res.fun - - # method 3: tune c_vir, c_eir and alpha by nonlinear programming using cvxpy. - - + + # method 3: tune c_vir, c_eir and alpha by nonlinear programming using cvxpy. + + # # method 4: tune c_vir, c_eir and alpha by QP function # # scipy.optimize.least_squares. An initial guess is required. # w0 = nb_cost_mat[:,0] + nb_cost_mat[:,1] @@ -220,7 +229,7 @@ def update_costs(nb_cost_mat, dis_k_vec, dataset='monoterpenoides', # res = optimize.root(func, [0.9, 1.7, 0.75, 100]) # edit_costs_new = res.x # residual = None - elif cost == 'LETTER2': + elif cost == 'LETTER2': # # 1. if c_vi != c_vr, c_ei != c_er. # nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4,5]] # x = cp.Variable(nb_cost_mat_new.shape[1]) @@ -247,44 +256,118 @@ def update_costs(nb_cost_mat, dis_k_vec, dataset='monoterpenoides', # edit_costs_new = [x.value[0], x.value[0], x.value[1], x.value[2], x.value[2]] # edit_costs_new = np.array(edit_costs_new) # residual = np.sqrt(prob.value) - if rw_constraints == 'inequality': - # c_vs <= c_vi + c_vr. - nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4,5]] + if rw_constraints == 'inequality': + # c_vs <= c_vi + c_vr. + nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4,5]] + x = cp.Variable(nb_cost_mat_new.shape[1]) + cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) + constraints = [x >= [0.01 for i in range(nb_cost_mat_new.shape[1])], + np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0] + prob = cp.Problem(cp.Minimize(cost_fun), constraints) + prob.solve() + edit_costs_new = x.value + residual = np.sqrt(prob.value) + elif rw_constraints == '2constraints': + # c_vs <= c_vi + c_vr and c_vi == c_vr, c_ei == c_er. + nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4,5]] + x = cp.Variable(nb_cost_mat_new.shape[1]) + cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) + constraints = [x >= [0.01 for i in range(nb_cost_mat_new.shape[1])], + np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0, + np.array([1.0, -1.0, 0.0, 0.0, 0.0]).T@x == 0.0, + np.array([0.0, 0.0, 0.0, 1.0, -1.0]).T@x == 0.0] + prob = cp.Problem(cp.Minimize(cost_fun), constraints) + prob.solve() + edit_costs_new = x.value + residual = np.sqrt(prob.value) + elif rw_constraints == 'no-constraint': + # no constraint. + nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4,5]] + x = cp.Variable(nb_cost_mat_new.shape[1]) + cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) + constraints = [x >= [0.01 for i in range(nb_cost_mat_new.shape[1])]] + prob = cp.Problem(cp.Minimize(cost_fun), constraints) + prob.solve() + edit_costs_new = x.value + residual = np.sqrt(prob.value) +# elif method == 'inequality_modified': +# # c_vs <= c_vi + c_vr. +# nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4,5]] +# x = cp.Variable(nb_cost_mat_new.shape[1]) +# cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) +# constraints = [x >= [0.0 for i in range(nb_cost_mat_new.shape[1])], +# np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0] +# prob = cp.Problem(cp.Minimize(cost_fun), constraints) +# prob.solve() +# # use same costs for insertion and removal rather than the fitted costs. +# edit_costs_new = [x.value[0], x.value[0], x.value[1], x.value[2], x.value[2]] +# edit_costs_new = np.array(edit_costs_new) +# residual = np.sqrt(prob.value) + elif cost == 'NON_SYMBOLIC': + is_n_attr = np.count_nonzero(nb_cost_mat[:,2]) + is_e_attr = np.count_nonzero(nb_cost_mat[:,5]) + + if dataset == 'SYNTHETICnew': +# nb_cost_mat_new = nb_cost_mat[:,[0,1,2,3,4]] + nb_cost_mat_new = nb_cost_mat[:,[2,3,4]] + x = cp.Variable(nb_cost_mat_new.shape[1]) + cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) +# constraints = [x >= [0.0 for i in range(nb_cost_mat_new.shape[1])], +# np.array([0.0, 0.0, 0.0, 1.0, -1.0]).T@x == 0.0] +# constraints = [x >= [0.0001 for i in range(nb_cost_mat_new.shape[1])]] + constraints = [x >= [0.0001 for i in range(nb_cost_mat_new.shape[1])], + np.array([0.0, 1.0, -1.0]).T@x == 0.0] + prob = cp.Problem(cp.Minimize(cost_fun), constraints) + prob.solve() +# print(x.value) + edit_costs_new = np.concatenate((np.array([0.0, 0.0]), x.value, + np.array([0.0]))) + residual = np.sqrt(prob.value) + + elif rw_constraints == 'inequality': + # c_vs <= c_vi + c_vr. + if is_n_attr and is_e_attr: + nb_cost_mat_new = nb_cost_mat[:,[0,1,2,3,4,5]] x = cp.Variable(nb_cost_mat_new.shape[1]) cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) constraints = [x >= [0.01 for i in range(nb_cost_mat_new.shape[1])], - np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0] + np.array([1.0, 1.0, -1.0, 0.0, 0.0, 0.0]).T@x >= 0.0, + np.array([0.0, 0.0, 0.0, 1.0, 1.0, -1.0]).T@x >= 0.0] prob = cp.Problem(cp.Minimize(cost_fun), constraints) prob.solve() edit_costs_new = x.value residual = np.sqrt(prob.value) - elif rw_constraints == '2constraints': - # c_vs <= c_vi + c_vr and c_vi == c_vr, c_ei == c_er. + elif is_n_attr and not is_e_attr: + nb_cost_mat_new = nb_cost_mat[:,[0,1,2,3,4]] + x = cp.Variable(nb_cost_mat_new.shape[1]) + cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) + constraints = [x >= [0.001 for i in range(nb_cost_mat_new.shape[1])], + np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0] + prob = cp.Problem(cp.Minimize(cost_fun), constraints) + prob.solve() + print(x.value) + edit_costs_new = np.concatenate((x.value, np.array([0.0]))) + residual = np.sqrt(prob.value) + elif not is_n_attr and is_e_attr: nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4,5]] x = cp.Variable(nb_cost_mat_new.shape[1]) cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) constraints = [x >= [0.01 for i in range(nb_cost_mat_new.shape[1])], - np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0, - np.array([1.0, -1.0, 0.0, 0.0, 0.0]).T@x == 0.0, - np.array([0.0, 0.0, 0.0, 1.0, -1.0]).T@x == 0.0] + np.array([0.0, 0.0, 1.0, 1.0, -1.0]).T@x >= 0.0] prob = cp.Problem(cp.Minimize(cost_fun), constraints) prob.solve() - edit_costs_new = x.value + edit_costs_new = np.concatenate((x.value[0:2], np.array([0.0]), x.value[2:])) + residual = np.sqrt(prob.value) + else: + nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4]] + x = cp.Variable(nb_cost_mat_new.shape[1]) + cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) + constraints = [x >= [0.01 for i in range(nb_cost_mat_new.shape[1])]] + prob = cp.Problem(cp.Minimize(cost_fun), constraints) + prob.solve() + edit_costs_new = np.concatenate((x.value[0:2], np.array([0.0]), + x.value[2:], np.array([0.0]))) residual = np.sqrt(prob.value) -# elif method == 'inequality_modified': -# # c_vs <= c_vi + c_vr. -# nb_cost_mat_new = nb_cost_mat[:,[0,1,3,4,5]] -# x = cp.Variable(nb_cost_mat_new.shape[1]) -# cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) -# constraints = [x >= [0.0 for i in range(nb_cost_mat_new.shape[1])], -# np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0] -# prob = cp.Problem(cp.Minimize(cost_fun), constraints) -# prob.solve() -# # use same costs for insertion and removal rather than the fitted costs. -# edit_costs_new = [x.value[0], x.value[0], x.value[1], x.value[2], x.value[2]] -# edit_costs_new = np.array(edit_costs_new) -# residual = np.sqrt(prob.value) - else: # # method 1: simple least square method. # edit_costs_new, residual, _, _ = np.linalg.lstsq(nb_cost_mat, dis_k_vec, diff --git a/preimage/ged.py b/gklearn/preimage/ged.py similarity index 83% rename from preimage/ged.py rename to gklearn/preimage/ged.py index 156036f..d6baa46 100644 --- a/preimage/ged.py +++ b/gklearn/preimage/ged.py @@ -21,22 +21,26 @@ def GED(g1, g2, dataset='monoterpenoides', lib='gedlibpy', cost='CHEM_1', method """ Compute GED for 2 graphs. """ - def convertGraph(G, dataset): + def convertGraph(G, cost): """Convert a graph to the proper NetworkX format that can be recognized by library gedlibpy. """ G_new = nx.Graph() - if dataset == 'monoterpenoides': - for nd, attrs in G.nodes(data=True): - G_new.add_node(str(nd), chem=attrs['atom']) - for nd1, nd2, attrs in G.edges(data=True): - G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type']) - elif dataset == 'letter': + if cost == 'LETTER' or cost == 'LETTER2': for nd, attrs in G.nodes(data=True): G_new.add_node(str(nd), x=str(attrs['attributes'][0]), y=str(attrs['attributes'][1])) for nd1, nd2, attrs in G.edges(data=True): G_new.add_edge(str(nd1), str(nd2)) + elif cost == 'NON_SYMBOLIC': + for nd, attrs in G.nodes(data=True): + G_new.add_node(str(nd)) + for a_name in G.graph['node_attrs']: + G_new.nodes[str(nd)][a_name] = str(attrs[a_name]) + for nd1, nd2, attrs in G.edges(data=True): + G_new.add_edge(str(nd1), str(nd2)) + for a_name in G.graph['edge_attrs']: + G_new.edges[str(nd1), str(nd2)][a_name] = str(attrs[a_name]) else: for nd, attrs in G.nodes(data=True): G_new.add_node(str(nd), chem=attrs['atom']) @@ -47,12 +51,12 @@ def GED(g1, g2, dataset='monoterpenoides', lib='gedlibpy', cost='CHEM_1', method return G_new - dataset = dataset.lower() +# dataset = dataset.lower() if lib == 'gedlibpy': gedlibpy.restart_env() - gedlibpy.add_nx_graph(convertGraph(g1, dataset), "") - gedlibpy.add_nx_graph(convertGraph(g2, dataset), "") + gedlibpy.add_nx_graph(convertGraph(g1, cost), "") + gedlibpy.add_nx_graph(convertGraph(g2, cost), "") listID = gedlibpy.get_all_graph_ids() gedlibpy.set_edit_cost(cost, edit_cost_constant=edit_cost_constant) @@ -127,7 +131,7 @@ def GED(g1, g2, dataset='monoterpenoides', lib='gedlibpy', cost='CHEM_1', method import sys import os sys.path.insert(0, "../") - from pygraph.utils.graphfiles import saveDataset + from gklearn.utils.graphfiles import saveDataset tmp_dir = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/output/tmp_ged/' if not os.path.exists(tmp_dir): @@ -370,8 +374,8 @@ def get_nb_edit_operations_letter(g1, g2, forward_map, backward_map): n_vr += 1 else: n_vs += 1 - diff_x = float(g1.nodes[i]['x']) - float(g2.nodes[map_i]['x']) - diff_y = float(g1.nodes[i]['y']) - float(g2.nodes[map_i]['y']) + diff_x = float(g1.nodes[nodes1[i]]['x']) - float(g2.nodes[map_i]['x']) + diff_y = float(g1.nodes[nodes1[i]]['y']) - float(g2.nodes[map_i]['y']) sod_vs += np.sqrt(np.square(diff_x) + np.square(diff_y)) for map_i in backward_map: if map_i == np.inf: @@ -399,5 +403,66 @@ def get_nb_edit_operations_letter(g1, g2, forward_map, backward_map): return n_vi, n_vr, n_vs, sod_vs, n_ei, n_er +def get_nb_edit_operations_nonsymbolic(g1, g2, forward_map, backward_map): + """Compute the number of each edit operations. + """ + n_vi = 0 + n_vr = 0 + n_vs = 0 + sod_vs = 0 + n_ei = 0 + n_er = 0 + n_es = 0 + sod_es = 0 + + nodes1 = [n for n in g1.nodes()] + for i, map_i in enumerate(forward_map): + if map_i == np.inf: + n_vr += 1 + else: + n_vs += 1 + sum_squares = 0 + for a_name in g1.graph['node_attrs']: + diff = float(g1.nodes[nodes1[i]][a_name]) - float(g2.nodes[map_i][a_name]) + sum_squares += np.square(diff) + sod_vs += np.sqrt(sum_squares) + for map_i in backward_map: + if map_i == np.inf: + n_vi += 1 + +# idx_nodes1 = range(0, len(node1)) + + edges1 = [e for e in g1.edges()] + for n1, n2 in edges1: + idx1 = nodes1.index(n1) + idx2 = nodes1.index(n2) + n1_g2 = forward_map[idx1] + n2_g2 = forward_map[idx2] + # one of the nodes is removed, thus the edge is removed. + if n1_g2 == np.inf or n2_g2 == np.inf: + n_er += 1 + # corresponding edge is in g2. + elif (n1_g2, n2_g2) in g2.edges(): + n_es += 1 + sum_squares = 0 + for a_name in g1.graph['edge_attrs']: + diff = float(g1.edges[n1, n2][a_name]) - float(g2.nodes[n1_g2, n2_g2][a_name]) + sum_squares += np.square(diff) + sod_es += np.sqrt(sum_squares) + elif (n2_g2, n1_g2) in g2.edges(): + n_es += 1 + sum_squares = 0 + for a_name in g1.graph['edge_attrs']: + diff = float(g1.edges[n2, n1][a_name]) - float(g2.nodes[n2_g2, n1_g2][a_name]) + sum_squares += np.square(diff) + sod_es += np.sqrt(sum_squares) + # corresponding nodes are in g2, however the edge is removed. + else: + n_er += 1 + n_ei = nx.number_of_edges(g2) - n_es + + return n_vi, n_vr, sod_vs, n_ei, n_er, sod_es + + if __name__ == '__main__': print('check test_ged.py') \ No newline at end of file diff --git a/preimage/iam.py b/gklearn/preimage/iam.py similarity index 98% rename from preimage/iam.py rename to gklearn/preimage/iam.py index 19b646c..327e7cf 100644 --- a/preimage/iam.py +++ b/gklearn/preimage/iam.py @@ -13,8 +13,8 @@ from tqdm import tqdm import sys sys.path.insert(0, "../") -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.utils import graph_isIdentical, get_node_labels, get_edge_labels +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.utils import graph_isIdentical, get_node_labels, get_edge_labels from ged import GED, ged_median @@ -436,9 +436,9 @@ def iam_upgraded(Gn_median, Gn_candidate, c_ei=3, c_er=3, c_es=1, ite_max=50, return G_gen_median_list, sod_gen_median, sod_list, G_set_median_list, sod_set_median -def iam_bash(Gn_names, edit_cost_constant, cost='CONSTANT', +def iam_bash(Gn_names, edit_cost_constant, cost='CONSTANT', initial_solutions=1, dataset='monoterpenoides', - graph_dir='/media/ljia/DATA/research-repo/codes/Linlin/py-graph/datasets/monoterpenoides/'): + graph_dir='/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/monoterpenoides/'): """Compute the iam by c++ implementation (gedlib) through bash. """ import os @@ -465,15 +465,17 @@ def iam_bash(Gn_names, edit_cost_constant, cost='CONSTANT', tmp_dir = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/output/tmp_ged/' fn_collection = tmp_dir + 'collection.' + str(time.time()) + str(random.randint(0, 1e9)) createCollectionFile(Gn_names, ['dummy'] * len(Gn_names), fn_collection) +# fn_collection = tmp_dir + 'collection_for_debug' # graph_dir = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/generated_datsets/monoterpenoides/gxl' - +# if dataset == 'Letter-high' or dataset == 'Fingerprint': +# dataset = 'letter' command = 'GEDLIB_HOME=\'/media/ljia/DATA/research-repo/codes/Linlin/gedlib\'\n' command += 'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GEDLIB_HOME/lib\n' command += 'export LD_LIBRARY_PATH\n' command += 'cd \'/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/bin\'\n' command += './iam_for_python_bash ' + dataset + ' ' + fn_collection \ - + ' \'' + graph_dir + '\' ' + ' ' + cost + ' ' + + ' \'' + graph_dir + '\' ' + ' ' + cost + ' ' + str(initial_solutions) + ' ' if edit_cost_constant is None: command += 'None' else: @@ -763,7 +765,7 @@ def test_iam_with_more_graphs_as_init(Gn, G_candidate, c_ei=3, c_er=3, c_es=1, ############################################################################### if __name__ == '__main__': - from pygraph.utils.graphfiles import loadDataset + from gklearn.utils.graphfiles import loadDataset ds = {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG.mat', 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}} # node/edge symb # ds = {'name': 'Letter-high', 'dataset': '../datasets/Letter-high/Letter-high_A.txt', diff --git a/gklearn/preimage/knn.py b/gklearn/preimage/knn.py new file mode 100644 index 0000000..887bd46 --- /dev/null +++ b/gklearn/preimage/knn.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Jan 10 13:22:04 2020 + +@author: ljia +""" +import numpy as np +#import matplotlib.pyplot as plt +from tqdm import tqdm +import random +#import csv +from shutil import copyfile + + +import sys +sys.path.insert(0, "../") +from preimage.iam import iam_bash +from gklearn.utils.graphfiles import loadDataset, loadGXL +from preimage.ged import GED +from preimage.utils import get_same_item_indices + +def test_knn(): + ds = {'name': 'monoterpenoides', + 'dataset': '../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb + Gn, y_all = loadDataset(ds['dataset']) +# Gn = Gn[0:50] +# gkernel = 'treeletkernel' +# node_label = 'atom' +# edge_label = 'bond_type' +# ds_name = 'mono' + dir_output = 'results/knn/' + graph_dir='/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/monoterpenoides/' + + k_nn = 1 + percent = 0.1 + repeats = 50 + edit_cost_constant = [3, 3, 1, 3, 3, 1] + + # get indices by classes. + y_idx = get_same_item_indices(y_all) + sod_sm_list_list + for repeat in range(0, repeats): + print('\n---------------------------------') + print('repeat =', repeat) + accuracy_sm_list = [] + accuracy_gm_list = [] + sod_sm_list = [] + sod_gm_list = [] + + random.seed(repeat) + set_median_list = [] + gen_median_list = [] + train_y_set = [] + for y, values in y_idx.items(): + print('\ny =', y) + size_median_set = int(len(values) * percent) + median_set_idx = random.sample(values, size_median_set) + print('median set: ', median_set_idx) + + # compute set median and gen median using IAM (C++ through bash). + # Gn_median = [Gn[idx] for idx in median_set_idx] + group_fnames = [Gn[g].graph['filename'] for g in median_set_idx] + sod_sm, sod_gm, fname_sm, fname_gm = iam_bash(group_fnames, edit_cost_constant, + graph_dir=graph_dir) + print('sod_sm, sod_gm:', sod_sm, sod_gm) + sod_sm_list.append(sod_sm) + sod_gm_list.append(sod_gm) + fname_sm_new = dir_output + 'medians/set_median.y' + str(int(y)) + '.repeat' + str(repeat) + '.gxl' + copyfile(fname_sm, fname_sm_new) + fname_gm_new = dir_output + 'medians/gen_median.y' + str(int(y)) + '.repeat' + str(repeat) + '.gxl' + copyfile(fname_gm, fname_gm_new) + set_median_list.append(loadGXL(fname_sm_new)) + gen_median_list.append(loadGXL(fname_gm_new)) + train_y_set.append(int(y)) + + print(sod_sm, sod_gm) + + # do 1-nn. + test_y_set = [int(y) for y in y_all] + accuracy_sm = knn(set_median_list, train_y_set, Gn, test_y_set, k=k_nn, distance='ged') + accuracy_gm = knn(set_median_list, train_y_set, Gn, test_y_set, k=k_nn, distance='ged') + accuracy_sm_list.append(accuracy_sm) + accuracy_gm_list.append(accuracy_gm) + print('current accuracy sm and gm:', accuracy_sm, accuracy_gm) + + # output + accuracy_sm_mean = np.mean(accuracy_sm_list) + accuracy_gm_mean = np.mean(accuracy_gm_list) + print('\ntotal average accuracy sm and gm:', accuracy_sm_mean, accuracy_gm_mean) + + +def knn(train_set, train_y_set, test_set, test_y_set, k=1, distance='ged'): + if k == 1 and distance == 'ged': + algo_options = '--threads 8 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' + params_ged = {'lib': 'gedlibpy', 'cost': 'CONSTANT', 'method': 'IPFP', + 'algo_options': algo_options, 'stabilizer': None} + accuracy = 0 + for idx_test, g_test in tqdm(enumerate(test_set), desc='computing 1-nn', + file=sys.stdout): + dis = np.inf + for idx_train, g_train in enumerate(train_set): + dis_cur, _, _ = GED(g_test, g_train, **params_ged) + if dis_cur < dis: + dis = dis_cur + test_y_cur = train_y_set[idx_train] + if test_y_cur == test_y_set[idx_test]: + accuracy += 1 + accuracy = accuracy / len(test_set) + + return accuracy + + + +if __name__ == '__main__': + test_knn() \ No newline at end of file diff --git a/gklearn/preimage/libs.py b/gklearn/preimage/libs.py new file mode 100644 index 0000000..3b8e623 --- /dev/null +++ b/gklearn/preimage/libs.py @@ -0,0 +1,6 @@ +import sys +import pathlib + +# insert gedlibpy library. +sys.path.insert(0, "../../") +from gedlibpy import librariesImport, gedlibpy diff --git a/preimage/median.py b/gklearn/preimage/median.py similarity index 95% rename from preimage/median.py rename to gklearn/preimage/median.py index ed3e6cd..1c5bb0f 100644 --- a/preimage/median.py +++ b/gklearn/preimage/median.py @@ -8,8 +8,8 @@ import time from gedlibpy import librariesImport, gedlibpy #import script sys.path.insert(0, "/home/bgauzere/dev/optim-graphes/") -import pygraph -from pygraph.utils.graphfiles import loadDataset +import gklearn +from gklearn.utils.graphfiles import loadDataset def replace_graph_in_env(script, graph, old_id, label='median'): """ @@ -199,7 +199,7 @@ if __name__ == "__main__": script.PySetMethod("IPFP", "") script.PyInitMethod() - dataset,my_y = pygraph.utils.graphfiles.loadDataset("/home/bgauzere/dev/gedlib/data/datasets/Letter/HIGH/Letter_Z.cxl") + dataset,my_y = gklearn.utils.graphfiles.loadDataset("/home/bgauzere/dev/gedlib/data/datasets/Letter/HIGH/Letter_Z.cxl") listID = script.PyGetAllGraphIds() median, sod = compute_median(script,listID,dataset,verbose=True) diff --git a/gklearn/preimage/median_benoit.py b/gklearn/preimage/median_benoit.py new file mode 100644 index 0000000..6712196 --- /dev/null +++ b/gklearn/preimage/median_benoit.py @@ -0,0 +1,201 @@ +import sys +import pathlib +import numpy as np +import networkx as nx + +import librariesImport +import script +sys.path.insert(0, "/home/bgauzere/dev/optim-graphes/") +import gklearn + +def replace_graph_in_env(script, graph, old_id, label='median'): + """ + Replace a graph in script + + If old_id is -1, add a new graph to the environnemt + + """ + if(old_id > -1): + script.PyClearGraph(old_id) + new_id = script.PyAddGraph(label) + for i in graph.nodes(): + script.PyAddNode(new_id,str(i),graph.node[i]) # !! strings are required bt gedlib + for e in graph.edges: + script.PyAddEdge(new_id, str(e[0]),str(e[1]), {}) + script.PyInitEnv() + script.PySetMethod("IPFP", "") + script.PyInitMethod() + + return new_id + +#Dessin median courrant +def draw_Letter_graph(graph): + import numpy as np + import networkx as nx + import matplotlib.pyplot as plt + plt.figure() + pos = {} + for n in graph.nodes: + pos[n] = np.array([float(graph.node[n]['x']),float(graph.node[n]['y'])]) + nx.draw_networkx(graph,pos) + plt.show() + +#compute new mappings +def update_mappings(script,median_id,listID): + med_distances = {} + med_mappings = {} + sod = 0 + for i in range(0,len(listID)): + script.PyRunMethod(median_id,listID[i]) + med_distances[i] = script.PyGetUpperBound(median_id,listID[i]) + med_mappings[i] = script.PyGetForwardMap(median_id,listID[i]) + sod += med_distances[i] + return med_distances, med_mappings, sod + +def calcul_Sij(all_mappings, all_graphs,i,j): + s_ij = 0 + for k in range(0,len(all_mappings)): + cur_graph = all_graphs[k] + cur_mapping = all_mappings[k] + size_graph = cur_graph.order() + if ((cur_mapping[i] < size_graph) and + (cur_mapping[j] < size_graph) and + (cur_graph.has_edge(cur_mapping[i], cur_mapping[j]) == True)): + s_ij += 1 + + return s_ij + +# def update_median_nodes_L1(median,listIdSet,median_id,dataset, mappings): +# from scipy.stats.mstats import gmean + +# for i in median.nodes(): +# for k in listIdSet: +# vectors = [] #np.zeros((len(listIdSet),2)) +# if(k != median_id): +# phi_i = mappings[k][i] +# if(phi_i < dataset[k].order()): +# vectors.append([float(dataset[k].node[phi_i]['x']),float(dataset[k].node[phi_i]['y'])]) + +# new_labels = gmean(vectors) +# median.node[i]['x'] = str(new_labels[0]) +# median.node[i]['y'] = str(new_labels[1]) +# return median + +def update_median_nodes(median,dataset,mappings): + #update node attributes + for i in median.nodes(): + nb_sub=0 + mean_label = {'x' : 0, 'y' : 0} + for k in range(0,len(mappings)): + phi_i = mappings[k][i] + if ( phi_i < dataset[k].order() ): + nb_sub += 1 + mean_label['x'] += 0.75*float(dataset[k].node[phi_i]['x']) + mean_label['y'] += 0.75*float(dataset[k].node[phi_i]['y']) + median.node[i]['x'] = str((1/0.75)*(mean_label['x']/nb_sub)) + median.node[i]['y'] = str((1/0.75)*(mean_label['y']/nb_sub)) + return median + +def update_median_edges(dataset, mappings, median, cei=0.425,cer=0.425): +#for letter high, ceir = 1.7, alpha = 0.75 + size_dataset = len(dataset) + ratio_cei_cer = cer/(cei + cer) + threshold = size_dataset*ratio_cei_cer + order_graph_median = median.order() + for i in range(0,order_graph_median): + for j in range(i+1,order_graph_median): + s_ij = calcul_Sij(mappings,dataset,i,j) + if(s_ij > threshold): + median.add_edge(i,j) + else: + if(median.has_edge(i,j)): + median.remove_edge(i,j) + return median + + + +def compute_median(script, listID, dataset,verbose=False): + """Compute a graph median of a dataset according to an environment + + Parameters + + script : An gedlib initialized environnement + listID (list): a list of ID in script: encodes the dataset + dataset (list): corresponding graphs in networkX format. We assume that graph + listID[i] corresponds to dataset[i] + + Returns: + A networkX graph, which is the median, with corresponding sod + """ + print(len(listID)) + median_set_index, median_set_sod = compute_median_set(script, listID) + print(median_set_index) + print(median_set_sod) + sods = [] + #Ajout median dans environnement + set_median = dataset[median_set_index].copy() + median = dataset[median_set_index].copy() + cur_med_id = replace_graph_in_env(script,median,-1) + med_distances, med_mappings, cur_sod = update_mappings(script,cur_med_id,listID) + sods.append(cur_sod) + if(verbose): + print(cur_sod) + ite_max = 50 + old_sod = cur_sod * 2 + ite = 0 + epsilon = 0.001 + + best_median + while((ite < ite_max) and (np.abs(old_sod - cur_sod) > epsilon )): + median = update_median_nodes(median,dataset, med_mappings) + median = update_median_edges(dataset,med_mappings,median) + + cur_med_id = replace_graph_in_env(script,median,cur_med_id) + med_distances, med_mappings, cur_sod = update_mappings(script,cur_med_id,listID) + + + sods.append(cur_sod) + if(verbose): + print(cur_sod) + ite += 1 + return median, cur_sod, sods, set_median + + draw_Letter_graph(median) + + +def compute_median_set(script,listID): + 'Returns the id in listID corresponding to median set' + #Calcul median set + N=len(listID) + map_id_to_index = {} + map_index_to_id = {} + for i in range(0,len(listID)): + map_id_to_index[listID[i]] = i + map_index_to_id[i] = listID[i] + + distances = np.zeros((N,N)) + for i in listID: + for j in listID: + script.PyRunMethod(i,j) + distances[map_id_to_index[i],map_id_to_index[j]] = script.PyGetUpperBound(i,j) + + median_set_index = np.argmin(np.sum(distances,0)) + sod = np.min(np.sum(distances,0)) + + return median_set_index, sod + +if __name__ == "__main__": + #Chargement du dataset + script.PyLoadGXLGraph('/home/bgauzere/dev/gedlib/data/datasets/Letter/HIGH/', '/home/bgauzere/dev/gedlib/data/collections/Letter_Z.xml') + script.PySetEditCost("LETTER") + script.PyInitEnv() + script.PySetMethod("IPFP", "") + script.PyInitMethod() + + dataset,my_y = gklearn.utils.graphfiles.loadDataset("/home/bgauzere/dev/gedlib/data/datasets/Letter/HIGH/Letter_Z.cxl") + + listID = script.PyGetAllGraphIds() + median, sod = compute_median(script,listID,dataset,verbose=True) + + print(sod) + draw_Letter_graph(median) diff --git a/gklearn/preimage/median_linlin.py b/gklearn/preimage/median_linlin.py new file mode 100644 index 0000000..6139558 --- /dev/null +++ b/gklearn/preimage/median_linlin.py @@ -0,0 +1,215 @@ +import sys +import pathlib +import numpy as np +import networkx as nx + +from gedlibpy import librariesImport, gedlibpy +sys.path.insert(0, "/home/bgauzere/dev/optim-graphes/") +import gklearn + +def replace_graph_in_env(script, graph, old_id, label='median'): + """ + Replace a graph in script + + If old_id is -1, add a new graph to the environnemt + + """ + if(old_id > -1): + script.PyClearGraph(old_id) + new_id = script.PyAddGraph(label) + for i in graph.nodes(): + script.PyAddNode(new_id,str(i),graph.node[i]) # !! strings are required bt gedlib + for e in graph.edges: + script.PyAddEdge(new_id, str(e[0]),str(e[1]), {}) + script.PyInitEnv() + script.PySetMethod("IPFP", "") + script.PyInitMethod() + + return new_id + +#Dessin median courrant +def draw_Letter_graph(graph): + import numpy as np + import networkx as nx + import matplotlib.pyplot as plt + plt.figure() + pos = {} + for n in graph.nodes: + pos[n] = np.array([float(graph.node[n]['x']),float(graph.node[n]['y'])]) + nx.draw_networkx(graph,pos) + plt.show() + +#compute new mappings +def update_mappings(script,median_id,listID): + med_distances = {} + med_mappings = {} + sod = 0 + for i in range(0,len(listID)): + script.PyRunMethod(median_id,listID[i]) + med_distances[i] = script.PyGetUpperBound(median_id,listID[i]) + med_mappings[i] = script.PyGetForwardMap(median_id,listID[i]) + sod += med_distances[i] + return med_distances, med_mappings, sod + +def calcul_Sij(all_mappings, all_graphs,i,j): + s_ij = 0 + for k in range(0,len(all_mappings)): + cur_graph = all_graphs[k] + cur_mapping = all_mappings[k] + size_graph = cur_graph.order() + if ((cur_mapping[i] < size_graph) and + (cur_mapping[j] < size_graph) and + (cur_graph.has_edge(cur_mapping[i], cur_mapping[j]) == True)): + s_ij += 1 + + return s_ij + +# def update_median_nodes_L1(median,listIdSet,median_id,dataset, mappings): +# from scipy.stats.mstats import gmean + +# for i in median.nodes(): +# for k in listIdSet: +# vectors = [] #np.zeros((len(listIdSet),2)) +# if(k != median_id): +# phi_i = mappings[k][i] +# if(phi_i < dataset[k].order()): +# vectors.append([float(dataset[k].node[phi_i]['x']),float(dataset[k].node[phi_i]['y'])]) + +# new_labels = gmean(vectors) +# median.node[i]['x'] = str(new_labels[0]) +# median.node[i]['y'] = str(new_labels[1]) +# return median + +def update_median_nodes(median,dataset,mappings): + #update node attributes + for i in median.nodes(): + nb_sub=0 + mean_label = {'x' : 0, 'y' : 0} + for k in range(0,len(mappings)): + phi_i = mappings[k][i] + if ( phi_i < dataset[k].order() ): + nb_sub += 1 + mean_label['x'] += 0.75*float(dataset[k].node[phi_i]['x']) + mean_label['y'] += 0.75*float(dataset[k].node[phi_i]['y']) + median.node[i]['x'] = str((1/0.75)*(mean_label['x']/nb_sub)) + median.node[i]['y'] = str((1/0.75)*(mean_label['y']/nb_sub)) + return median + +def update_median_edges(dataset, mappings, median, cei=0.425,cer=0.425): +#for letter high, ceir = 1.7, alpha = 0.75 + size_dataset = len(dataset) + ratio_cei_cer = cer/(cei + cer) + threshold = size_dataset*ratio_cei_cer + order_graph_median = median.order() + for i in range(0,order_graph_median): + for j in range(i+1,order_graph_median): + s_ij = calcul_Sij(mappings,dataset,i,j) + if(s_ij > threshold): + median.add_edge(i,j) + else: + if(median.has_edge(i,j)): + median.remove_edge(i,j) + return median + + + +def compute_median(script, listID, dataset,verbose=False): + """Compute a graph median of a dataset according to an environment + + Parameters + + script : An gedlib initialized environnement + listID (list): a list of ID in script: encodes the dataset + dataset (list): corresponding graphs in networkX format. We assume that graph + listID[i] corresponds to dataset[i] + + Returns: + A networkX graph, which is the median, with corresponding sod + """ + print(len(listID)) + median_set_index, median_set_sod = compute_median_set(script, listID) + print(median_set_index) + print(median_set_sod) + sods = [] + #Ajout median dans environnement + set_median = dataset[median_set_index].copy() + median = dataset[median_set_index].copy() + cur_med_id = replace_graph_in_env(script,median,-1) + med_distances, med_mappings, cur_sod = update_mappings(script,cur_med_id,listID) + sods.append(cur_sod) + if(verbose): + print(cur_sod) + ite_max = 50 + old_sod = cur_sod * 2 + ite = 0 + epsilon = 0.001 + + best_median + while((ite < ite_max) and (np.abs(old_sod - cur_sod) > epsilon )): + median = update_median_nodes(median,dataset, med_mappings) + median = update_median_edges(dataset,med_mappings,median) + + cur_med_id = replace_graph_in_env(script,median,cur_med_id) + med_distances, med_mappings, cur_sod = update_mappings(script,cur_med_id,listID) + + + sods.append(cur_sod) + if(verbose): + print(cur_sod) + ite += 1 + return median, cur_sod, sods, set_median + + draw_Letter_graph(median) + + +def compute_median_set(script,listID): + 'Returns the id in listID corresponding to median set' + #Calcul median set + N=len(listID) + map_id_to_index = {} + map_index_to_id = {} + for i in range(0,len(listID)): + map_id_to_index[listID[i]] = i + map_index_to_id[i] = listID[i] + + distances = np.zeros((N,N)) + for i in listID: + for j in listID: + script.PyRunMethod(i,j) + distances[map_id_to_index[i],map_id_to_index[j]] = script.PyGetUpperBound(i,j) + + median_set_index = np.argmin(np.sum(distances,0)) + sod = np.min(np.sum(distances,0)) + + return median_set_index, sod + +def _convertGraph(G): + """Convert a graph to the proper NetworkX format that can be + recognized by library gedlibpy. + """ + G_new = nx.Graph() + for nd, attrs in G.nodes(data=True): + G_new.add_node(str(nd), chem=attrs['atom']) +# G_new.add_node(str(nd), x=str(attrs['attributes'][0]), +# y=str(attrs['attributes'][1])) + for nd1, nd2, attrs in G.edges(data=True): + G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type']) +# G_new.add_edge(str(nd1), str(nd2)) + + return G_new + +if __name__ == "__main__": + #Chargement du dataset + gedlibpy.PyLoadGXLGraph('/home/bgauzere/dev/gedlib/data/datasets/Letter/HIGH/', '/home/bgauzere/dev/gedlib/data/collections/Letter_Z.xml') + gedlibpy.PySetEditCost("LETTER") + gedlibpy.PyInitEnv() + gedlibpy.PySetMethod("IPFP", "") + gedlibpy.PyInitMethod() + + dataset,my_y = gklearn.utils.graphfiles.loadDataset("/home/bgauzere/dev/gedlib/data/datasets/Letter/HIGH/Letter_Z.cxl") + + listID = gedlibpy.PyGetAllGraphIds() + median, sod = compute_median(gedlibpy,listID,dataset,verbose=True) + + print(sod) + draw_Letter_graph(median) diff --git a/preimage/pathfrequency.py b/gklearn/preimage/pathfrequency.py similarity index 100% rename from preimage/pathfrequency.py rename to gklearn/preimage/pathfrequency.py diff --git a/preimage/preimage_iam.py b/gklearn/preimage/preimage_iam.py similarity index 100% rename from preimage/preimage_iam.py rename to gklearn/preimage/preimage_iam.py diff --git a/preimage/preimage_random.py b/gklearn/preimage/preimage_random.py similarity index 99% rename from preimage/preimage_random.py rename to gklearn/preimage/preimage_random.py index a8a24d9..00a9e1a 100644 --- a/preimage/preimage_random.py +++ b/gklearn/preimage/preimage_random.py @@ -161,7 +161,7 @@ def preimage_random(Gn_init, Gn_median, alpha, idx_gi, Kmatrix, k, r_max, l, gke if __name__ == '__main__': - from pygraph.utils.graphfiles import loadDataset + from gklearn.utils.graphfiles import loadDataset # ds = {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt', # 'extra_params': {}} # node/edge symb diff --git a/preimage/test.py b/gklearn/preimage/test.py similarity index 98% rename from preimage/test.py rename to gklearn/preimage/test.py index f3a13c8..e67c2ce 100644 --- a/preimage/test.py +++ b/gklearn/preimage/test.py @@ -54,7 +54,7 @@ def convertGraph(G): def testNxGrapĥ(): import sys sys.path.insert(0, "../") - from pygraph.utils.graphfiles import loadDataset + from gklearn.utils.graphfiles import loadDataset ds = {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt', 'extra_params': {}} # node/edge symb Gn, y_all = loadDataset(ds['dataset'], extra_params=ds['extra_params']) diff --git a/preimage/test_fitDistance.py b/gklearn/preimage/test_fitDistance.py similarity index 95% rename from preimage/test_fitDistance.py rename to gklearn/preimage/test_fitDistance.py index 2f2907d..dd42bb6 100644 --- a/preimage/test_fitDistance.py +++ b/gklearn/preimage/test_fitDistance.py @@ -11,12 +11,41 @@ from tqdm import tqdm import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset +from gklearn.utils.graphfiles import loadDataset from utils import remove_edges from fitDistance import fit_GED_to_kernel_distance from utils import normalize_distance_matrix +def test_update_costs(): + from preimage.fitDistance import update_costs + import cvxpy as cp + + ds = np.load('results/xp_fit_method/fit_data_debug4.gm.npz') + nb_cost_mat = ds['nb_cost_mat'] + dis_k_vec = ds['dis_k_vec'] + n_edit_operations = ds['n_edit_operations'] + ged_vec_init = ds['ged_vec_init'] + ged_mat = ds['ged_mat'] + + nb_cost_mat_new = nb_cost_mat[:,[2,3,4]] + x = cp.Variable(nb_cost_mat_new.shape[1]) + cost_fun = cp.sum_squares(nb_cost_mat_new * x - dis_k_vec) +# constraints = [x >= [0.000 for i in range(nb_cost_mat_new.shape[1])], +# np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0] +# constraints = [x >= [0.000 for i in range(nb_cost_mat_new.shape[1])], +# np.array([1.0, 1.0, -1.0, 0.0, 0.0]).T@x >= 0.0, +# np.array([0.0, 0.0, 0.0, 1.0, -1.0]).T@x == 0.0] + constraints = [x >= [0.00 for i in range(nb_cost_mat_new.shape[1])], + np.array([0.0, 1.0, -1.0]).T@x == 0.0] +# constraints = [x >= [0.00000 for i in range(nb_cost_mat_new.shape[1])]] + prob = cp.Problem(cp.Minimize(cost_fun), constraints) + prob.solve() + print(x.value) + edit_costs_new = np.concatenate((x.value, np.array([0.0]))) + residual = np.sqrt(prob.value) + + def median_paper_clcpc_python_best(): """c_vs <= c_vi + c_vr, c_es <= c_ei + c_er with ged computation with python invoking the c++ code by bash command (with updated library). @@ -613,7 +642,9 @@ if __name__ == '__main__': # test_cs_leq_ci_plus_cr_python_bash_cpp() # median_paper_clcpc_python_bash_cpp() - median_paper_clcpc_python_best() +# median_paper_clcpc_python_best() # x = np.array([[1,2,3],[4,5,6],[7,8,9]]) -# xx = pairwise_substitution(x) \ No newline at end of file +# xx = pairwise_substitution(x) + + test_update_costs() \ No newline at end of file diff --git a/gklearn/preimage/test_ged.py b/gklearn/preimage/test_ged.py new file mode 100644 index 0000000..c5daee5 --- /dev/null +++ b/gklearn/preimage/test_ged.py @@ -0,0 +1,531 @@ +#export LD_LIBRARY_PATH=.:/export/home/lambertn/Documents/gedlibpy/lib/fann/:/export/home/lambertn/Documents/gedlibpy/lib/libsvm.3.22:/export/home/lambertn/Documents/gedlibpy/lib/nomad + +#Pour que "import script" trouve les librairies qu'a besoin GedLib +#Equivalent à définir la variable d'environnement LD_LIBRARY_PATH sur un bash +#import gedlibpy_linlin.librariesImport +#from gedlibpy_linlin import gedlibpy +from libs import * +import networkx as nx +import numpy as np +from tqdm import tqdm +import sys + + +def test_NON_SYMBOLIC_cost(): + """Test edit cost LETTER2. + """ + import sys + sys.path.insert(0, "../") + from preimage.ged import GED, get_nb_edit_operations_nonsymbolic, get_nb_edit_operations_letter + from preimage.test_k_closest_graphs import reform_attributes + from gklearn.utils.graphfiles import loadDataset + + dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/Letter-high/Letter-high_A.txt' + Gn, y_all = loadDataset(dataset) + + g1 = Gn[200] + g2 = Gn[1780] + reform_attributes(g1) + reform_attributes(g2) + + c_vi = 0.675 + c_vr = 0.675 + c_vs = 0.75 + c_ei = 0.425 + c_er = 0.425 + c_es = 0 + + edit_cost_constant = [c_vi, c_vr, c_vs, c_ei, c_er, c_es] + dis, pi_forward, pi_backward = GED(g1, g2, lib='gedlibpy', + cost='NON_SYMBOLIC', method='IPFP', edit_cost_constant=edit_cost_constant, + algo_options='', stabilizer=None) + n_vi, n_vr, sod_vs, n_ei, n_er, sod_es = get_nb_edit_operations_nonsymbolic(g1, g2, + pi_forward, pi_backward) + + print('# of operations:', n_vi, n_vr, sod_vs, n_ei, n_er, sod_es) + print('c_vi, c_vr, c_vs, c_ei, c_er:', c_vi, c_vr, c_vs, c_ei, c_er, c_es) + cost_computed = c_vi * n_vi + c_vr * n_vr + c_vs * sod_vs \ + + c_ei * n_ei + c_er * n_er + c_es * sod_es + print('dis (cost computed by GED):', dis) + print('cost computed by # of operations and edit cost constants:', cost_computed) + + +def test_LETTER2_cost(): + """Test edit cost LETTER2. + """ + import sys + sys.path.insert(0, "../") + from preimage.ged import GED, get_nb_edit_operations_letter + from preimage.test_k_closest_graphs import reform_attributes + from gklearn.utils.graphfiles import loadDataset + + ds = {'dataset': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/collections/Letter.xml', + 'graph_dir': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/datasets/Letter/HIGH/'} # node/edge symb + Gn, y_all = loadDataset(ds['dataset'], extra_params=ds['graph_dir']) + + g1 = Gn[200] + g2 = Gn[1780] + reform_attributes(g1) + reform_attributes(g2) + + c_vi = 0.675 + c_vr = 0.675 + c_vs = 0.75 + c_ei = 0.425 + c_er = 0.425 + + edit_cost_constant = [c_vi, c_vr, c_vs, c_ei, c_er] + dis, pi_forward, pi_backward = GED(g1, g2, dataset='letter', lib='gedlibpy', + cost='LETTER2', method='IPFP', edit_cost_constant=edit_cost_constant, + algo_options='', stabilizer=None) + n_vi, n_vr, n_vs, sod_vs, n_ei, n_er = get_nb_edit_operations_letter(g1, g2, + pi_forward, pi_backward) + + print('# of operations:', n_vi, n_vr, n_vs, sod_vs, n_ei, n_er) + print('c_vi, c_vr, c_vs, c_ei, c_er:', c_vi, c_vr, c_vs, c_ei, c_er) + cost_computed = c_vi * n_vi + c_vr * n_vr + c_vs * sod_vs \ + + c_ei * n_ei + c_er * n_er + print('dis (cost computed by GED):', dis) + print('cost computed by # of operations and edit cost constants:', cost_computed) + + + +def test_get_nb_edit_operations_letter(): + """Test whether function preimage.ged.get_nb_edit_operations_letter returns + correct numbers of edit operations. The distance/cost computed by GED + should be the same as the cost computed by number of operations and edit + cost constants. + """ + import sys + sys.path.insert(0, "../") + from preimage.ged import GED, get_nb_edit_operations_letter + from preimage.test_k_closest_graphs import reform_attributes + from gklearn.utils.graphfiles import loadDataset + + ds = {'dataset': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/collections/Letter.xml', + 'graph_dir': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/datasets/Letter/HIGH/'} # node/edge symb + Gn, y_all = loadDataset(ds['dataset'], extra_params=ds['graph_dir']) + + g1 = Gn[200] + g2 = Gn[1780] + reform_attributes(g1) + reform_attributes(g2) + + c_vir = 0.9 + c_eir = 1.7 + alpha = 0.75 + + edit_cost_constant = [c_vir, c_eir, alpha] + dis, pi_forward, pi_backward = GED(g1, g2, dataset='letter', lib='gedlibpy', + cost='LETTER', method='IPFP', edit_cost_constant=edit_cost_constant, + algo_options='', stabilizer=None) + n_vi, n_vr, n_vs, c_vs, n_ei, n_er = get_nb_edit_operations_letter(g1, g2, + pi_forward, pi_backward) + + print('# of operations and costs:', n_vi, n_vr, n_vs, c_vs, n_ei, n_er) + print('c_vir, c_eir, alpha:', c_vir, c_eir, alpha) + cost_computed = alpha * c_vir * (n_vi + n_vr) \ + + alpha * c_vs \ + + (1 - alpha) * c_eir * (n_ei + n_er) + print('dis (cost computed by GED):', dis) + print('cost computed by # of operations and edit cost constants:', cost_computed) + + +def test_get_nb_edit_operations(): + """Test whether function preimage.ged.get_nb_edit_operations returns correct + numbers of edit operations. The distance/cost computed by GED should be the + same as the cost computed by number of operations and edit cost constants. + """ + import sys + sys.path.insert(0, "../") + from preimage.ged import GED, get_nb_edit_operations + from gklearn.utils.graphfiles import loadDataset + + ds = {'dataset': '../datasets/monoterpenoides/dataset_10+.ds', + 'graph_dir': '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/monoterpenoides/'} # node/edge symb + Gn, y_all = loadDataset(ds['dataset']) + + g1 = Gn[20] + g2 = Gn[108] + + c_vi = 3 + c_vr = 3 + c_vs = 1 + c_ei = 3 + c_er = 3 + c_es = 1 + + edit_cost_constant = [c_vi, c_vr, c_vs, c_ei, c_er, c_es] + dis, pi_forward, pi_backward = GED(g1, g2, dataset='monoterpenoides', lib='gedlibpy', + cost='CONSTANT', method='IPFP', edit_cost_constant=edit_cost_constant, + algo_options='', stabilizer=None) + n_vi, n_vr, n_vs, n_ei, n_er, n_es = get_nb_edit_operations(g1, g2, + pi_forward, pi_backward) + + print('# of operations and costs:', n_vi, n_vr, n_vs, n_ei, n_er, n_es) + print('edit costs:', c_vi, c_vr, c_vs, c_ei, c_er, c_es) + cost_computed = n_vi * c_vi + n_vr * c_vr + n_vs * c_vs \ + + n_ei * c_ei + n_er * c_er + n_es * c_es + print('dis (cost computed by GED):', dis) + print('cost computed by # of operations and edit cost constants:', cost_computed) + + +def test_ged_python_bash_cpp(): + """Test ged computation with python invoking the c++ code by bash command (with updated library). + """ + sys.path.insert(0, "../") + from gklearn.utils.graphfiles import loadDataset + from preimage.ged import GED + + data_dir_prefix = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/' +# collection_file = data_dir_prefix + 'generated_datsets/monoterpenoides/gxl/monoterpenoides.xml' + collection_file = data_dir_prefix + 'generated_datsets/monoterpenoides/monoterpenoides_3_20.xml' + graph_dir = data_dir_prefix +'generated_datsets/monoterpenoides/gxl/' + + Gn, y = loadDataset(collection_file, extra_params=graph_dir) + + algo_options = '--threads 8 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' + + for repeat in range(0, 3): + # Generate the result file. + ged_filename = data_dir_prefix + 'output/test_ged/ged_mat_python_bash_' + str(repeat) + '_init40.3_20.txt' +# runtime_filename = data_dir_prefix + 'output/test_ged/runtime_mat_python_min_' + str(repeat) + '.txt' + + ged_file = open(ged_filename, 'a') +# runtime_file = open(runtime_filename, 'a') + + ged_mat = np.empty((len(Gn), len(Gn))) +# runtime_mat = np.empty((len(Gn), len(Gn))) + + for i in tqdm(range(len(Gn)), desc='computing GEDs', file=sys.stdout): + for j in range(len(Gn)): + print(i, j) + g1 = Gn[i] + g2 = Gn[j] + upper_bound, _, _ = GED(g1, g2, lib='gedlib-bash', cost='CONSTANT', + method='IPFP', + edit_cost_constant=[3.0, 3.0, 1.0, 3.0, 3.0, 1.0], + algo_options=algo_options) +# runtime = gedlibpy.get_runtime(g1, g2) + ged_mat[i][j] = upper_bound +# runtime_mat[i][j] = runtime + + # Write to files. + ged_file.write(str(int(upper_bound)) + ' ') +# runtime_file.write(str(runtime) + ' ') + + ged_file.write('\n') +# runtime_file.write('\n') + + ged_file.close() +# runtime_file.close() + + print('ged_mat') + print(ged_mat) +# print('runtime_mat:') +# print(runtime_mat) + + return + + + +def test_ged_best_settings_updated(): + """Test ged computation with best settings the same as in the C++ code (with updated library). + """ + + data_dir_prefix = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/' + collection_file = data_dir_prefix + 'generated_datsets/monoterpenoides/gxl/monoterpenoides.xml' +# collection_file = data_dir_prefix + 'generated_datsets/monoterpenoides/monoterpenoides_3_20.xml' + + graph_dir = data_dir_prefix +'generated_datsets/monoterpenoides/gxl/' + + algo_options = '--threads 8 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' + + for repeat in range(0, 3): + # Generate the result file. + ged_filename = data_dir_prefix + 'output/test_ged/ged_mat_python_updated_' + str(repeat) + '_init40.txt' + runtime_filename = data_dir_prefix + 'output/test_ged/runtime_mat_python_updated_' + str(repeat) + '_init40.txt' + + gedlibpy.restart_env() + gedlibpy.load_GXL_graphs(graph_dir, collection_file) + listID = gedlibpy.get_all_graph_ids() + gedlibpy.set_edit_cost('CONSTANT', [3.0, 3.0, 1.0, 3.0, 3.0, 1.0]) + gedlibpy.init() + gedlibpy.set_method("IPFP", algo_options) + gedlibpy.init_method() + + ged_mat = np.empty((len(listID), len(listID))) + runtime_mat = np.empty((len(listID), len(listID))) + + for i in tqdm(range(len(listID)), desc='computing GEDs', file=sys.stdout): + ged_file = open(ged_filename, 'a') + runtime_file = open(runtime_filename, 'a') + + for j in range(len(listID)): + g1 = listID[i] + g2 = listID[j] + gedlibpy.run_method(g1, g2) + upper_bound = gedlibpy.get_upper_bound(g1, g2) + runtime = gedlibpy.get_runtime(g1, g2) + ged_mat[i][j] = upper_bound + runtime_mat[i][j] = runtime + + # Write to files. + ged_file.write(str(int(upper_bound)) + ' ') + runtime_file.write(str(runtime) + ' ') + + ged_file.write('\n') + runtime_file.write('\n') + + ged_file.close() + runtime_file.close() + + print('ged_mat') + print(ged_mat) + print('runtime_mat:') + print(runtime_mat) + + return + + +def test_ged_best_settings(): + """Test ged computation with best settings the same as in the C++ code. + """ + + data_dir_prefix = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/' + collection_file = data_dir_prefix + 'generated_datsets/monoterpenoides/gxl/monoterpenoides.xml' + graph_dir = data_dir_prefix +'generated_datsets/monoterpenoides/gxl/' + + algo_options = '--threads 6 --initial-solutions 10 --ratio-runs-from-initial-solutions .5' + + for repeat in range(0, 3): + # Generate the result file. + ged_filename = data_dir_prefix + 'output/test_ged/ged_mat_python_best_settings_' + str(repeat) + '.txt' + runtime_filename = data_dir_prefix + 'output/test_ged/runtime_mat_python_best_settings_' + str(repeat) + '.txt' + + ged_file = open(ged_filename, 'a') + runtime_file = open(runtime_filename, 'a') + + gedlibpy.restart_env() + gedlibpy.load_GXL_graphs(graph_dir, collection_file) + listID = gedlibpy.get_all_graph_ids() + gedlibpy.set_edit_cost('CONSTANT', [3.0, 3.0, 1.0, 3.0, 3.0, 1.0]) + gedlibpy.init() + gedlibpy.set_method("IPFP", algo_options) + gedlibpy.init_method() + + ged_mat = np.empty((len(listID), len(listID))) + runtime_mat = np.empty((len(listID), len(listID))) + + for i in tqdm(range(len(listID)), desc='computing GEDs', file=sys.stdout): + for j in range(len(listID)): + g1 = listID[i] + g2 = listID[j] + gedlibpy.run_method(g1, g2) + upper_bound = gedlibpy.get_upper_bound(g1, g2) + runtime = gedlibpy.get_runtime(g1, g2) + ged_mat[i][j] = upper_bound + runtime_mat[i][j] = runtime + + # Write to files. + ged_file.write(str(int(upper_bound)) + ' ') + runtime_file.write(str(runtime) + ' ') + + ged_file.write('\n') + runtime_file.write('\n') + + ged_file.close() + runtime_file.close() + + print('ged_mat') + print(ged_mat) + print('runtime_mat:') + print(runtime_mat) + + return + + + +def test_ged_default(): + """Test ged computation with default settings. + """ + + data_dir_prefix = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/' + collection_file = data_dir_prefix + 'generated_datsets/monoterpenoides/gxl/monoterpenoides.xml' + graph_dir = data_dir_prefix +'generated_datsets/monoterpenoides/gxl/' + + for repeat in range(3): + # Generate the result file. + ged_filename = data_dir_prefix + 'output/test_ged/ged_mat_python_default_' + str(repeat) + '.txt' + runtime_filename = data_dir_prefix + 'output/test_ged/runtime_mat_python_default_' + str(repeat) + '.txt' + + ged_file = open(ged_filename, 'a') + runtime_file = open(runtime_filename, 'a') + + gedlibpy.restart_env() + gedlibpy.load_GXL_graphs(graph_dir, collection_file) + listID = gedlibpy.get_all_graph_ids() + gedlibpy.set_edit_cost('CONSTANT', [3.0, 3.0, 1.0, 3.0, 3.0, 1.0]) + gedlibpy.init() + gedlibpy.set_method("IPFP", "") + gedlibpy.init_method() + + ged_mat = np.empty((len(listID), len(listID))) + runtime_mat = np.empty((len(listID), len(listID))) + + for i in tqdm(range(len(listID)), desc='computing GEDs', file=sys.stdout): + for j in range(len(listID)): + g1 = listID[i] + g2 = listID[j] + gedlibpy.run_method(g1, g2) + upper_bound = gedlibpy.get_upper_bound(g1, g2) + runtime = gedlibpy.get_runtime(g1, g2) + ged_mat[i][j] = upper_bound + runtime_mat[i][j] = runtime + + # Write to files. + ged_file.write(str(int(upper_bound)) + ' ') + runtime_file.write(str(runtime) + ' ') + + ged_file.write('\n') + runtime_file.write('\n') + + ged_file.close() + runtime_file.close() + + print('ged_mat') + print(ged_mat) + print('runtime_mat:') + print(runtime_mat) + + return + + +def test_ged_min(): + """Test ged computation with the "min" stabilizer. + """ + sys.path.insert(0, "../") + from gklearn.utils.graphfiles import loadDataset + from preimage.ged import GED + + data_dir_prefix = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/' + collection_file = data_dir_prefix + 'generated_datsets/monoterpenoides/gxl/monoterpenoides.xml' + graph_dir = data_dir_prefix +'generated_datsets/monoterpenoides/gxl/' + + Gn, y = loadDataset(collection_file, extra_params=graph_dir) + +# algo_options = '--threads 6 --initial-solutions 10 --ratio-runs-from-initial-solutions .5' + + for repeat in range(0, 3): + # Generate the result file. + ged_filename = data_dir_prefix + 'output/test_ged/ged_mat_python_min_' + str(repeat) + '.txt' +# runtime_filename = data_dir_prefix + 'output/test_ged/runtime_mat_python_min_' + str(repeat) + '.txt' + + ged_file = open(ged_filename, 'a') +# runtime_file = open(runtime_filename, 'a') + + ged_mat = np.empty((len(Gn), len(Gn))) +# runtime_mat = np.empty((len(Gn), len(Gn))) + + for i in tqdm(range(len(Gn)), desc='computing GEDs', file=sys.stdout): + for j in range(len(Gn)): + g1 = Gn[i] + g2 = Gn[j] + upper_bound, _, _ = GED(g1, g2, lib='gedlibpy', cost='CONSTANT', + method='IPFP', + edit_cost_constant=[3.0, 3.0, 1.0, 3.0, 3.0, 1.0], + stabilizer='min', repeat=10) +# runtime = gedlibpy.get_runtime(g1, g2) + ged_mat[i][j] = upper_bound +# runtime_mat[i][j] = runtime + + # Write to files. + ged_file.write(str(int(upper_bound)) + ' ') +# runtime_file.write(str(runtime) + ' ') + + ged_file.write('\n') +# runtime_file.write('\n') + + ged_file.close() +# runtime_file.close() + + print('ged_mat') + print(ged_mat) +# print('runtime_mat:') +# print(runtime_mat) + + return + + +def init() : + print("List of Edit Cost Options : ") + for i in gedlibpy.list_of_edit_cost_options : + print (i) + print("") + + print("List of Method Options : ") + for j in gedlibpy.list_of_method_options : + print (j) + print("") + + print("List of Init Options : ") + for k in gedlibpy.list_of_init_options : + print (k) + print("") + + + + +def convertGraph(G): + G_new = nx.Graph() + for nd, attrs in G.nodes(data=True): + G_new.add_node(str(nd), chem=attrs['atom']) + for nd1, nd2, attrs in G.edges(data=True): + G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type']) + + return G_new + + +def testNxGrapĥ(): + import sys + sys.path.insert(0, "../") + from gklearn.utils.graphfiles import loadDataset + ds = {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt', + 'extra_params': {}} # node/edge symb + Gn, y_all = loadDataset(ds['dataset'], extra_params=ds['extra_params']) + + gedlibpy.restart_env() + for graph in Gn: + g_new = convertGraph(graph) + gedlibpy.add_nx_graph(g_new, "") + + listID = gedlibpy.get_all_graph_ids() + gedlibpy.set_edit_cost("CHEM_1") + gedlibpy.init() + gedlibpy.set_method("IPFP", "") + gedlibpy.init_method() + + print(listID) + g = listID[0] + h = listID[1] + + gedlibpy.run_method(g, h) + + print("Node Map : ", gedlibpy.get_node_map(g, h)) + print("Forward map : " , gedlibpy.get_forward_map(g, h), ", Backward map : ", gedlibpy.get_backward_map(g, h)) + print ("Upper Bound = " + str(gedlibpy.get_upper_bound(g, h)) + ", Lower Bound = " + str(gedlibpy.get_lower_bound(g, h)) + ", Runtime = " + str(gedlibpy.get_runtime(g, h))) + +if __name__ == '__main__': +# test_ged_default() +# test_ged_min() +# test_ged_best_settings() +# test_ged_best_settings_updated() +# test_ged_python_bash_cpp() +# test_get_nb_edit_operations() +# test_get_nb_edit_operations_letter() +# test_LETTER2_cost() + test_NON_SYMBOLIC_cost() + + + #init() + #testNxGrapĥ() diff --git a/preimage/test_iam.py b/gklearn/preimage/test_iam.py similarity index 99% rename from preimage/test_iam.py rename to gklearn/preimage/test_iam.py index 5d286cc..81f4c98 100644 --- a/preimage/test_iam.py +++ b/gklearn/preimage/test_iam.py @@ -16,8 +16,8 @@ import random #import os import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset -#from pygraph.utils.logger2file import * +from gklearn.utils.graphfiles import loadDataset +#from gklearn.utils.logger2file import * from iam import iam_upgraded from utils import remove_edges, compute_kernel, get_same_item_indices, dis_gstar #from ged import ged_median diff --git a/preimage/test_k_closest_graphs.py b/gklearn/preimage/test_k_closest_graphs.py similarity index 62% rename from preimage/test_k_closest_graphs.py rename to gklearn/preimage/test_k_closest_graphs.py index 39301aa..4cf1afa 100644 --- a/preimage/test_k_closest_graphs.py +++ b/gklearn/preimage/test_k_closest_graphs.py @@ -20,123 +20,143 @@ from functools import partial #import os import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset, loadGXL -#from pygraph.utils.logger2file import * +from gklearn.utils.graphfiles import loadDataset, loadGXL +#from gklearn.utils.logger2file import * from iam import iam_upgraded, iam_bash from utils import compute_kernel, dis_gstar, kernel_distance_matrix from fitDistance import fit_GED_to_kernel_distance #from ged import ged_median -def median_on_k_closest_graphs(Gn, node_label, edge_label, gkernel, k, fit_method, - graph_dir='/media/ljia/DATA/research-repo/codes/Linlin/py-graph/datasets/monoterpenoides/', - edit_costs=None, group_min=None, dataset='monoterpenoides', - cost='CONSTANT', parallel=True): - dataset = dataset.lower() - -# # compute distances in kernel space. -# dis_mat, _, _, _ = kernel_distance_matrix(Gn, node_label, edge_label, -# Kmatrix=None, gkernel=gkernel) -# # ged. -# gmfile = np.load('results/test_k_closest_graphs/ged_mat.fit_on_whole_dataset.with_medians.gm.npz') -# ged_mat = gmfile['ged_mat'] -# dis_mat = ged_mat[0:len(Gn), 0:len(Gn)] - -# # choose k closest graphs -# time0 = time.time() -# sod_ks_min, group_min = get_closest_k_graphs(dis_mat, k, parallel) -# time_spent = time.time() - time0 -# print('closest graphs:', sod_ks_min, group_min) -# print('time spent:', time_spent) -# group_min = (12, 13, 22, 29) # closest w.r.t path kernel -# group_min = (77, 85, 160, 171) # closest w.r.t ged -# group_min = (0,1,2,3,4,5,6,7,8,9,10,11) # closest w.r.t treelet kernel - Gn_median = [Gn[g].copy() for g in group_min] - - - # fit edit costs. +def fit_edit_cost_constants(fit_method, edit_cost_name, + edit_cost_constants=None, initial_solutions=1, + Gn_median=None, node_label=None, edge_label=None, + gkernel=None, dataset=None, + Gn=None, Kmatrix_median=None): + """fit edit cost constants. + """ if fit_method == 'random': # random - if cost == 'LETTER': - edit_cost_constant = random.sample(range(1, 10), 3) - edit_cost_constant = [item * 0.1 for item in edit_cost_constant] - elif cost == 'LETTER2': + if edit_cost_name == 'LETTER': + edit_cost_constants = random.sample(range(1, 10), 3) + edit_cost_constants = [item * 0.1 for item in edit_cost_constants] + elif edit_cost_name == 'LETTER2': random.seed(time.time()) - edit_cost_constant = random.sample(range(1, 10), 5) -# edit_cost_constant = [item * 0.1 for item in edit_cost_constant] + edit_cost_constants = random.sample(range(1, 10), 5) +# edit_cost_constants = [item * 0.1 for item in edit_cost_constants] + elif edit_cost_name == 'NON_SYMBOLIC': + edit_cost_constants = random.sample(range(1, 10), 6) + if Gn_median[0].graph['node_attrs'] == []: + edit_cost_constants[2] = 0 + if Gn_median[0].graph['edge_attrs'] == []: + edit_cost_constants[5] = 0 else: - edit_cost_constant = random.sample(range(1, 10), 6) - print('edit costs used:', edit_cost_constant) + edit_cost_constants = random.sample(range(1, 10), 6) + print('edit cost constants used:', edit_cost_constants) elif fit_method == 'expert': # expert - edit_cost_constant = [3, 3, 1, 3, 3, 1] + if edit_cost_name == 'LETTER': + edit_cost_constants = [0.9, 1.7, 0.75] + elif edit_cost_name == 'LETTER2': + edit_cost_constants = [0.675, 0.675, 0.75, 0.425, 0.425] + else: + edit_cost_constants = [3, 3, 1, 3, 3, 1] elif fit_method == 'k-graphs': itr_max = 6 - if cost == 'LETTER': + if edit_cost_name == 'LETTER': init_costs = [0.9, 1.7, 0.75] - elif cost == 'LETTER2': + elif edit_cost_name == 'LETTER2': init_costs = [0.675, 0.675, 0.75, 0.425, 0.425] + elif edit_cost_name == 'NON_SYMBOLIC': + init_costs = [0, 0, 1, 1, 1, 0] + if Gn_median[0].graph['node_attrs'] == []: + init_costs[2] = 0 + if Gn_median[0].graph['edge_attrs'] == []: + init_costs[5] = 0 else: init_costs = [3, 3, 1, 3, 3, 1] - algo_options = '--threads 1 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' - params_ged = {'lib': 'gedlibpy', 'cost': cost, 'method': 'IPFP', + algo_options = '--threads 1 --initial-solutions ' \ + + str(initial_solutions) + ' --ratio-runs-from-initial-solutions 1' + params_ged = {'lib': 'gedlibpy', 'cost': edit_cost_name, 'method': 'IPFP', 'algo_options': algo_options, 'stabilizer': None} # fit on k-graph subset - edit_cost_constant, _, _, _, _, _, _ = fit_GED_to_kernel_distance(Gn_median, + edit_cost_constants, _, _, _, _, _, _ = fit_GED_to_kernel_distance(Gn_median, node_label, edge_label, gkernel, itr_max, params_ged=params_ged, - init_costs=init_costs, dataset=dataset, parallel=True) + init_costs=init_costs, dataset=dataset, Kmatrix=Kmatrix_median, + parallel=True) elif fit_method == 'whole-dataset': itr_max = 6 - if cost == 'LETTER': + if edit_cost_name == 'LETTER': init_costs = [0.9, 1.7, 0.75] - elif cost == 'LETTER2': + elif edit_cost_name == 'LETTER2': init_costs = [0.675, 0.675, 0.75, 0.425, 0.425] else: init_costs = [3, 3, 1, 3, 3, 1] - algo_options = '--threads 1 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' - params_ged = {'lib': 'gedlibpy', 'cost': cost, 'method': 'IPFP', + algo_options = '--threads 1 --initial-solutions ' \ + + str(initial_solutions) + ' --ratio-runs-from-initial-solutions 1' + params_ged = {'lib': 'gedlibpy', 'cost': edit_cost_name, 'method': 'IPFP', 'algo_options': algo_options, 'stabilizer': None} # fit on all subset - edit_cost_constant, _, _, _, _, _, _ = fit_GED_to_kernel_distance(Gn, + edit_cost_constants, _, _, _, _, _, _ = fit_GED_to_kernel_distance(Gn, node_label, edge_label, gkernel, itr_max, params_ged=params_ged, init_costs=init_costs, dataset=dataset, parallel=True) elif fit_method == 'precomputed': - edit_cost_constant = edit_costs - - - # compute set median and gen median using IAM (C++ through bash). - group_fnames = [Gn[g].graph['filename'] for g in group_min] - sod_sm, sod_gm, fname_sm, fname_gm = iam_bash(group_fnames, edit_cost_constant, - cost=cost, graph_dir=graph_dir, - dataset=dataset) + pass - - # compute distances in kernel space. - Gn_median = [Gn[g].copy() for g in group_min] + return edit_cost_constants + + +def compute_distances_to_true_median(Gn_median, fname_sm, fname_gm, + gkernel, edit_cost_name, + Kmatrix_median=None): + # reform graphs. set_median = loadGXL(fname_sm) gen_median = loadGXL(fname_gm) # print(gen_median.nodes(data=True)) # print(gen_median.edges(data=True)) - if dataset == 'letter': - for g in Gn_median: - reform_attributes(g) - reform_attributes(set_median) - reform_attributes(gen_median) + if edit_cost_name == 'LETTER' or edit_cost_name == 'LETTER2' or edit_cost_name == 'NON_SYMBOLIC': +# dataset == 'Fingerprint': +# for g in Gn_median: +# reform_attributes(g) + reform_attributes(set_median, Gn_median[0].graph['node_attrs'], + Gn_median[0].graph['edge_attrs']) + reform_attributes(gen_median, Gn_median[0].graph['node_attrs'], + Gn_median[0].graph['edge_attrs']) - # compute distance in kernel space for set median. - Kmatrix_sm = compute_kernel([set_median] + Gn_median, gkernel, - None if dataset == 'letter' else 'chem', - None if dataset == 'letter' else 'valence', - False) + if edit_cost_name == 'LETTER' or edit_cost_name == 'LETTER2' or edit_cost_name == 'NON_SYMBOLIC': + node_label = None + edge_label = None + else: + node_label = 'chem' + edge_label = 'valence' + + # compute Gram matrix for median set. + if Kmatrix_median is None: + Kmatrix_median = compute_kernel(Gn_median, gkernel, node_label, edge_label, False) + + # compute distance in kernel space for set median. + kernel_sm = [] + for G_median in Gn_median: + km_tmp = compute_kernel([set_median, G_median], gkernel, node_label, edge_label, False) + kernel_sm.append(km_tmp[0, 1]) + Kmatrix_sm = np.concatenate((np.array([kernel_sm]), np.copy(Kmatrix_median)), axis=0) + Kmatrix_sm = np.concatenate((np.array([[km_tmp[0, 0]] + kernel_sm]).T, Kmatrix_sm), axis=1) +# Kmatrix_sm = compute_kernel([set_median] + Gn_median, gkernel, +# node_label, edge_label, False) dis_k_sm = dis_gstar(0, range(1, 1+len(Gn_median)), [1 / len(Gn_median)] * len(Gn_median), Kmatrix_sm, withterm3=False) # print(gen_median.nodes(data=True)) # print(gen_median.edges(data=True)) # print(set_median.nodes(data=True)) # print(set_median.edges(data=True)) + # compute distance in kernel space for generalized median. - Kmatrix_gm = compute_kernel([gen_median] + Gn_median, gkernel, - None if dataset == 'letter' else 'chem', - None if dataset == 'letter' else 'valence', - False) + kernel_gm = [] + for G_median in Gn_median: + km_tmp = compute_kernel([gen_median, G_median], gkernel, node_label, edge_label, False) + kernel_gm.append(km_tmp[0, 1]) + Kmatrix_gm = np.concatenate((np.array([kernel_gm]), np.copy(Kmatrix_median)), axis=0) + Kmatrix_gm = np.concatenate((np.array([[km_tmp[0, 0]] + kernel_gm]).T, Kmatrix_gm), axis=1) +# Kmatrix_gm = compute_kernel([gen_median] + Gn_median, gkernel, +# node_label, edge_label, False) dis_k_gm = dis_gstar(0, range(1, 1+len(Gn_median)), [1 / len(Gn_median)] * len(Gn_median), Kmatrix_gm, withterm3=False) @@ -145,23 +165,93 @@ def median_on_k_closest_graphs(Gn, node_label, edge_label, gkernel, k, fit_metho for idx in range(len(Gn_median)): dis_k_gi.append(dis_gstar(idx+1, range(1, 1+len(Gn_median)), [1 / len(Gn_median)] * len(Gn_median), Kmatrix_gm, withterm3=False)) - - print('sod_sm:', sod_sm) - print('sod_gm:', sod_gm) + print('dis_k_sm:', dis_k_sm) print('dis_k_gm:', dis_k_gm) print('dis_k_gi:', dis_k_gi) idx_dis_k_gi_min = np.argmin(dis_k_gi) dis_k_gi_min = dis_k_gi[idx_dis_k_gi_min] - print('index min dis_k_gi:', group_min[idx_dis_k_gi_min]) print('min dis_k_gi:', dis_k_gi_min) - return sod_sm, sod_gm, dis_k_sm, dis_k_gm, dis_k_gi, dis_k_gi_min, group_min[idx_dis_k_gi_min] + return dis_k_sm, dis_k_gm, dis_k_gi, dis_k_gi_min, idx_dis_k_gi_min + + +def median_on_k_closest_graphs(Gn, node_label, edge_label, gkernel, k, fit_method, + graph_dir=None, initial_solutions=1, + edit_cost_constants=None, group_min=None, + dataset=None, edit_cost_name=None, + Kmatrix=None, parallel=True): +# dataset = dataset.lower() + +# # compute distances in kernel space. +# dis_mat, _, _, _ = kernel_distance_matrix(Gn, node_label, edge_label, +# Kmatrix=None, gkernel=gkernel) +# # ged. +# gmfile = np.load('results/test_k_closest_graphs/ged_mat.fit_on_whole_dataset.with_medians.gm.npz') +# ged_mat = gmfile['ged_mat'] +# dis_mat = ged_mat[0:len(Gn), 0:len(Gn)] + +# # choose k closest graphs +# time0 = time.time() +# sod_ks_min, group_min = get_closest_k_graphs(dis_mat, k, parallel) +# time_spent = time.time() - time0 +# print('closest graphs:', sod_ks_min, group_min) +# print('time spent:', time_spent) +# group_min = (12, 13, 22, 29) # closest w.r.t path kernel +# group_min = (77, 85, 160, 171) # closest w.r.t ged +# group_min = (0,1,2,3,4,5,6,7,8,9,10,11) # closest w.r.t treelet kernel + Gn_median = [Gn[g].copy() for g in group_min] + if Kmatrix is not None: + Kmatrix_median = np.copy(Kmatrix[group_min,:]) + Kmatrix_median = Kmatrix_median[:,group_min] + + + # 1. fit edit cost constants. + time0 = time.time() + edit_cost_constants = fit_edit_cost_constants(fit_method, edit_cost_name, + edit_cost_constants=edit_cost_constants, initial_solutions=initial_solutions, + Gn_median=Gn_median, node_label=node_label, edge_label=edge_label, + gkernel=gkernel, dataset=dataset, + Gn=Gn, Kmatrix_median=Kmatrix_median) + time_fitting = time.time() - time0 + + + # 2. compute set median and gen median using IAM (C++ through bash). + print('\nstart computing set median and gen median using IAM (C++ through bash)...\n') + group_fnames = [Gn[g].graph['filename'] for g in group_min] + time0 = time.time() + sod_sm, sod_gm, fname_sm, fname_gm = iam_bash(group_fnames, edit_cost_constants, + cost=edit_cost_name, initial_solutions=initial_solutions, + graph_dir=graph_dir, dataset=dataset) + time_generating = time.time() - time0 + print('\nmedians computed.\n') + + + # 3. compute distances to real median. + print('\nstart computing distances to true median....\n') + Gn_median = [Gn[g].copy() for g in group_min] + dis_k_sm, dis_k_gm, dis_k_gi, dis_k_gi_min, idx_dis_k_gi_min = \ + compute_distances_to_true_median(Gn_median, fname_sm, fname_gm, + gkernel, edit_cost_name, + Kmatrix_median=Kmatrix_median) + idx_dis_k_gi_min = group_min[idx_dis_k_gi_min] + print('index min dis_k_gi:', idx_dis_k_gi_min) + print('sod_sm:', sod_sm) + print('sod_gm:', sod_gm) + + # collect return values. + return (sod_sm, sod_gm), \ + (dis_k_sm, dis_k_gm, dis_k_gi, dis_k_gi_min, idx_dis_k_gi_min), \ + (time_fitting, time_generating) -def reform_attributes(G): - for node in G.nodes: - G.nodes[node]['attributes'] = [G.nodes[node]['x'], G.nodes[node]['y']] +def reform_attributes(G, na_names=[], ea_names=[]): + if not na_names == []: + for node in G.nodes: + G.nodes[node]['attributes'] = [G.node[node][a_name] for a_name in na_names] + if not ea_names == []: + for edge in G.edges: + G.edges[edge]['attributes'] = [G.edge[edge][a_name] for a_name in ea_names] def get_closest_k_graphs(dis_mat, k, parallel): diff --git a/preimage/test_others.py b/gklearn/preimage/test_others.py similarity index 99% rename from preimage/test_others.py rename to gklearn/preimage/test_others.py index 24dc4c5..3b6a5bd 100644 --- a/preimage/test_others.py +++ b/gklearn/preimage/test_others.py @@ -13,7 +13,7 @@ from tqdm import tqdm import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset +from gklearn.utils.graphfiles import loadDataset from median import draw_Letter_graph from ged import GED, ged_median from utils import get_same_item_indices, compute_kernel, gram2distances, \ diff --git a/preimage/test_preimage_iam.py b/gklearn/preimage/test_preimage_iam.py similarity index 99% rename from preimage/test_preimage_iam.py rename to gklearn/preimage/test_preimage_iam.py index 34973fd..554d154 100644 --- a/preimage/test_preimage_iam.py +++ b/gklearn/preimage/test_preimage_iam.py @@ -16,7 +16,7 @@ import random #import os import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset +from gklearn.utils.graphfiles import loadDataset from utils import remove_edges, compute_kernel, get_same_item_indices from ged import ged_median diff --git a/preimage/test_preimage_mix.py b/gklearn/preimage/test_preimage_mix.py similarity index 99% rename from preimage/test_preimage_mix.py rename to gklearn/preimage/test_preimage_mix.py index ab6f8b4..9f00a37 100644 --- a/preimage/test_preimage_mix.py +++ b/gklearn/preimage/test_preimage_mix.py @@ -16,7 +16,7 @@ import random #import os import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset +from gklearn.utils.graphfiles import loadDataset from ged import ged_median from utils import compute_kernel, get_same_item_indices, remove_edges from preimage_iam import preimage_iam_random_mix diff --git a/preimage/test_preimage_random.py b/gklearn/preimage/test_preimage_random.py similarity index 99% rename from preimage/test_preimage_random.py rename to gklearn/preimage/test_preimage_random.py index 53d991b..b1d262a 100644 --- a/preimage/test_preimage_random.py +++ b/gklearn/preimage/test_preimage_random.py @@ -16,7 +16,7 @@ import random #import os import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset +from gklearn.utils.graphfiles import loadDataset from preimage_random import preimage_random from ged import ged_median diff --git a/preimage/utils.py b/gklearn/preimage/utils.py similarity index 81% rename from preimage/utils.py rename to gklearn/preimage/utils.py index ed6959e..567bc78 100644 --- a/preimage/utils.py +++ b/gklearn/preimage/utils.py @@ -13,14 +13,14 @@ import numpy as np import sys sys.path.insert(0, "../") -from pygraph.kernels.marginalizedKernel import marginalizedkernel -from pygraph.kernels.untilHPathKernel import untilhpathkernel -from pygraph.kernels.spKernel import spkernel +from gklearn.kernels.marginalizedKernel import marginalizedkernel +from gklearn.kernels.untilHPathKernel import untilhpathkernel +from gklearn.kernels.spKernel import spkernel import functools -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct, polynomialkernel -from pygraph.kernels.structuralspKernel import structuralspkernel -from pygraph.kernels.treeletKernel import treeletkernel -from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct, polynomialkernel +from gklearn.kernels.structuralspKernel import structuralspkernel +from gklearn.kernels.treeletKernel import treeletkernel +from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel def remove_edges(Gn): @@ -52,14 +52,22 @@ def compute_kernel(Gn, graph_kernel, node_label, edge_label, verbose): n_jobs=multiprocessing.cpu_count(), verbose=verbose) elif graph_kernel == 'spkernel': mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) - Kmatrix, _, _ = spkernel(Gn, node_label=node_label, node_kernels= + Kmatrix = np.empty((len(Gn), len(Gn))) +# Kmatrix[:] = np.nan + Kmatrix, _, idx = spkernel(Gn, node_label=node_label, node_kernels= {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}, n_jobs=multiprocessing.cpu_count(), verbose=verbose) +# for i, row in enumerate(idx): +# for j, col in enumerate(idx): +# Kmatrix[row, col] = Kmatrix_tmp[i, j] elif graph_kernel == 'structuralspkernel': mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) - Kmatrix, _ = structuralspkernel(Gn, node_label=node_label, node_kernels= - {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}, - n_jobs=multiprocessing.cpu_count(), verbose=verbose) + sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel} + Kmatrix, _ = structuralspkernel(Gn, node_label=node_label, + edge_label=edge_label, node_kernels=sub_kernels, + edge_kernels=sub_kernels, + parallel=None, n_jobs=multiprocessing.cpu_count(), + verbose=verbose) elif graph_kernel == 'treeletkernel': pkernel = functools.partial(polynomialkernel, d=2, c=1e5) # pkernel = functools.partial(gaussiankernel, gamma=1e-6) @@ -90,10 +98,11 @@ def gram2distances(Kmatrix): return dmatrix -def kernel_distance_matrix(Gn, node_label, edge_label, Kmatrix=None, gkernel=None): +def kernel_distance_matrix(Gn, node_label, edge_label, Kmatrix=None, + gkernel=None, verbose=True): dis_mat = np.empty((len(Gn), len(Gn))) if Kmatrix is None: - Kmatrix = compute_kernel(Gn, gkernel, node_label, edge_label, True) + Kmatrix = compute_kernel(Gn, gkernel, node_label, edge_label, verbose) for i in range(len(Gn)): for j in range(i, len(Gn)): dis = Kmatrix[i, i] + Kmatrix[j, j] - 2 * Kmatrix[i, j] diff --git a/gklearn/preimage/visualization.py b/gklearn/preimage/visualization.py new file mode 100644 index 0000000..ff28dc3 --- /dev/null +++ b/gklearn/preimage/visualization.py @@ -0,0 +1,586 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Dec 19 17:16:23 2019 + +@author: ljia +""" +import numpy as np +from sklearn.manifold import TSNE, Isomap +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset +from tqdm import tqdm + + +import sys +sys.path.insert(0, "../") +from gklearn.utils.graphfiles import loadDataset, loadGXL +from utils import kernel_distance_matrix, compute_kernel, dis_gstar, get_same_item_indices + + +def visualize_graph_dataset(dis_measure, visual_method, draw_figure, + draw_params={}, dis_mat=None, Gn=None, + median_set=None): + + + def draw_zoomed_axes(Gn_embedded, ax): + margin = 0.01 + if dis_measure == 'graph-kernel': + index = -2 + elif dis_measure == 'ged': + index = -1 + x1 = np.min(Gn_embedded[median_set + [index], 0]) - margin * np.max(Gn_embedded) + x2 = np.max(Gn_embedded[median_set + [index], 0]) + margin * np.max(Gn_embedded) + y1 = np.min(Gn_embedded[median_set + [index], 1]) - margin * np.max(Gn_embedded) + y2 = np.max(Gn_embedded[median_set + [index], 1]) + margin * np.max(Gn_embedded) + if (x1 < 0 and y1 < 0) or ((x1 > 0 and y1 > 0)): + loc = 2 + else: + loc = 3 + axins = zoomed_inset_axes(ax, 4, loc=loc) # zoom-factor: 2.5, location: upper-left + draw_figure(axins, Gn_embedded, dis_measure=dis_measure, + median_set=median_set, **draw_params) + axins.set_xlim(x1, x2) # apply the x-limits + axins.set_ylim(y1, y2) # apply the y-limits + plt.yticks(visible=False) + plt.xticks(visible=False) + loc1 = 1 if loc == 2 else 3 + mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5") + + + if dis_mat is None: + if dis_measure == 'graph-kernel': + gkernel = 'untilhpathkernel' + node_label = 'atom' + edge_label = 'bond_type' + dis_mat, _, _, _ = kernel_distance_matrix(Gn, node_label, edge_label, + Kmatrix=None, gkernel=gkernel) + elif dis_measure == 'ged': + pass + + if visual_method == 'tsne': + Gn_embedded = TSNE(n_components=2, metric='precomputed').fit_transform(dis_mat) + elif visual_method == 'isomap': + Gn_embedded = Isomap(n_components=2, metric='precomputed').fit_transform(dis_mat) + print(Gn_embedded.shape) + fig, ax = plt.subplots() + draw_figure(plt, Gn_embedded, dis_measure=dis_measure, legend=True, + median_set=median_set, **draw_params) +# draw_zoomed_axes(Gn_embedded, ax) + plt.show() + plt.clf() + + return + + +def draw_figure(ax, Gn_embedded, dis_measure=None, y_idx=None, legend=False, + median_set=None): + from matplotlib import colors as mcolors + colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)) +# colors = ['#08306b', '#08519c', '#2171b5', '#4292c6', '#6baed6', '#9ecae1', +# '#c6dbef', '#deebf7'] +# for i, values in enumerate(y_idx.values()): +# for item in values: +## ax.scatter(Gn_embedded[item,0], Gn_embedded[item,1], c=colors[i]) # , c='b') +# ax.scatter(Gn_embedded[item,0], Gn_embedded[item,1], c='b') +# ax.scatter(Gn_embedded[:,0], Gn_embedded[:,1], c='b') + h1 = ax.scatter(Gn_embedded[median_set, 0], Gn_embedded[median_set, 1], c='b') + if dis_measure == 'graph-kernel': + h2 = ax.scatter(Gn_embedded[-1, 0], Gn_embedded[-1, 1], c='darkorchid') # \psi + h3 = ax.scatter(Gn_embedded[-2, 0], Gn_embedded[-2, 1], c='gold') # gen median + h4 = ax.scatter(Gn_embedded[-3, 0], Gn_embedded[-3, 1], c='r') #c='g', marker='+') # set median + elif dis_measure == 'ged': + h3 = ax.scatter(Gn_embedded[-1, 0], Gn_embedded[-1, 1], c='gold') # gen median + h4 = ax.scatter(Gn_embedded[-2, 0], Gn_embedded[-2, 1], c='r') #c='g', marker='+') # set median + if legend: +# fig.subplots_adjust(bottom=0.17) + if dis_measure == 'graph-kernel': + ax.legend([h1, h2, h3, h4], + ['k closest graphs', 'true median', 'gen median', 'set median']) + elif dis_measure == 'ged': + ax.legend([h1, h3, h4], ['k closest graphs', 'gen median', 'set median']) +# fig.legend(handles, labels, loc='lower center', ncol=2, frameon=False) # , ncol=5, labelspacing=0.1, handletextpad=0.4, columnspacing=0.6) +# plt.savefig('symbolic_and_non_comparison_vertical_short.eps', format='eps', dpi=300, transparent=True, +# bbox_inches='tight') +# plt.show() + + +############################################################################### + +def visualize_distances_in_kernel(): + + ds = {'name': 'monoterpenoides', + 'dataset': '../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb + Gn, y_all = loadDataset(ds['dataset']) +# Gn = Gn[0:50] + fname_medians = 'expert.treelet' + # add set median. + fname_sm = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/preimage/results/test_k_closest_graphs/set_median.' + fname_medians + '.gxl' + set_median = loadGXL(fname_sm) + Gn.append(set_median) + # add generalized median (estimated pre-image.) + fname_gm = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/preimage/results/test_k_closest_graphs/gen_median.' + fname_medians + '.gxl' + gen_median = loadGXL(fname_gm) + Gn.append(gen_median) + + # compute distance matrix + median_set = [22, 29, 54, 74] + gkernel = 'treeletkernel' + node_label = 'atom' + edge_label = 'bond_type' + Gn_median_set = [Gn[i].copy() for i in median_set] + Kmatrix_median = compute_kernel(Gn + Gn_median_set, gkernel, node_label, + edge_label, True) + Kmatrix = Kmatrix_median[0:len(Gn), 0:len(Gn)] + dis_mat, _, _, _ = kernel_distance_matrix(Gn, node_label, edge_label, + Kmatrix=Kmatrix, gkernel=gkernel) + print('average distances: ', np.mean(np.mean(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + print('min distances: ', np.min(np.min(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + print('max distances: ', np.max(np.max(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + + # add distances for the image of exact median \psi. + dis_k_median_list = [] + for idx, g in enumerate(Gn): + dis_k_median_list.append(dis_gstar(idx, range(len(Gn), len(Gn) + len(Gn_median_set)), + [1 / len(Gn_median_set)] * len(Gn_median_set), + Kmatrix_median, withterm3=False)) + dis_mat_median = np.zeros((len(Gn) + 1, len(Gn) + 1)) + for i in range(len(Gn)): + for j in range(i, len(Gn)): + dis_mat_median[i, j] = dis_mat[i, j] + dis_mat_median[j, i] = dis_mat_median[i, j] + for i in range(len(Gn)): + dis_mat_median[i, -1] = dis_k_median_list[i] + dis_mat_median[-1, i] = dis_k_median_list[i] + + # get indices by classes. + y_idx = get_same_item_indices(y_all) + + # visualization. +# visualize_graph_dataset('graph-kernel', 'tsne', Gn) +# visualize_graph_dataset('graph-kernel', 'tsne', draw_figure, +# draw_params={'y_idx': y_idx}, dis_mat=dis_mat_median) + visualize_graph_dataset('graph-kernel', 'tsne', draw_figure, + draw_params={'y_idx': y_idx}, dis_mat=dis_mat_median, + median_set=median_set) + + +def visualize_distances_in_ged(): + from fitDistance import compute_geds + from ged import GED + ds = {'name': 'monoterpenoides', + 'dataset': '../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb + Gn, y_all = loadDataset(ds['dataset']) +# Gn = Gn[0:50] + # add set median. + fname_medians = 'expert.treelet' + fname_sm = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/preimage/results/test_k_closest_graphs/set_median.' + fname_medians + '.gxl' + set_median = loadGXL(fname_sm) + Gn.append(set_median) + # add generalized median (estimated pre-image.) + fname_gm = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/preimage/results/test_k_closest_graphs/gen_median.' + fname_medians + '.gxl' + gen_median = loadGXL(fname_gm) + Gn.append(gen_median) + + # compute/load ged matrix. +# # compute. +## k = 4 +## edit_costs = [0.16229209837639536, 0.06612870523413916, 0.04030113378793905, 0.20723547009415202, 0.3338607220394598, 0.27054392518077297] +# edit_costs = [3, 3, 1, 3, 3, 1] +## edit_costs = [7, 3, 5, 9, 2, 6] +# algo_options = '--threads 8 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' +# params_ged = {'lib': 'gedlibpy', 'cost': 'CONSTANT', 'method': 'IPFP', +# 'algo_options': algo_options, 'stabilizer': None, +# 'edit_cost_constant': edit_costs} +# _, ged_mat, _ = compute_geds(Gn, params_ged=params_ged, parallel=True) +# np.savez('results/test_k_closest_graphs/ged_mat.' + fname_medians + '.with_medians.gm', ged_mat=ged_mat) + # load from file. + gmfile = np.load('results/test_k_closest_graphs/ged_mat.' + fname_medians + '.with_medians.gm.npz') + ged_mat = gmfile['ged_mat'] +# # change medians. +# edit_costs = [3, 3, 1, 3, 3, 1] +# algo_options = '--threads 8 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' +# params_ged = {'lib': 'gedlibpy', 'cost': 'CONSTANT', 'method': 'IPFP', +# 'algo_options': algo_options, 'stabilizer': None, +# 'edit_cost_constant': edit_costs} +# for idx in tqdm(range(len(Gn) - 2), desc='computing GEDs', file=sys.stdout): +# dis, _, _ = GED(Gn[idx], set_median, **params_ged) +# ged_mat[idx, -2] = dis +# ged_mat[-2, idx] = dis +# dis, _, _ = GED(Gn[idx], gen_median, **params_ged) +# ged_mat[idx, -1] = dis +# ged_mat[-1, idx] = dis +# np.savez('results/test_k_closest_graphs/ged_mat.' + fname_medians + '.with_medians.gm', +# ged_mat=ged_mat) + + + # get indices by classes. + y_idx = get_same_item_indices(y_all) + + # visualization. + median_set = [22, 29, 54, 74] + visualize_graph_dataset('ged', 'tsne', draw_figure, + draw_params={'y_idx': y_idx}, dis_mat=ged_mat, + median_set=median_set) + +############################################################################### + + +def visualize_distances_in_kernel_monoterpenoides(): + + ds = {'dataset': '../datasets/monoterpenoides/dataset_10+.ds', + 'graph_dir': '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/monoterpenoides/'} # node/edge symb + Gn_original, y_all = loadDataset(ds['dataset']) +# Gn = Gn[0:50] + + # compute distance matrix +# median_set = [22, 29, 54, 74] + gkernel = 'treeletkernel' + fit_method = 'expert' + node_label = 'atom' + edge_label = 'bond_type' + ds_name = 'monoterpenoides' + fname_medians = fit_method + '.' + gkernel + dir_output = 'results/xp_monoterpenoides/' + repeat = 0 + + # get indices by classes. + y_idx = get_same_item_indices(y_all) + for i, (y, values) in enumerate(y_idx.items()): + print('\ny =', y) + k = len(values) + + Gn = [Gn_original[g].copy() for g in values] + # add set median. + fname_sm = dir_output + 'medians/' + str(int(y)) + '/set_median.k' + str(int(k)) \ + + '.y' + str(int(y)) + '.repeat' + str(repeat) + '.gxl' + set_median = loadGXL(fname_sm) + Gn.append(set_median) + # add generalized median (estimated pre-image.) + fname_gm = dir_output + 'medians/' + str(int(y)) + '/gen_median.k' + str(int(k)) \ + + '.y' + str(int(y)) + '.repeat' + str(repeat) + '.gxl' + gen_median = loadGXL(fname_gm) + Gn.append(gen_median) + + # compute distance matrix + median_set = range(0, len(values)) + + Gn_median_set = [Gn[i].copy() for i in median_set] + Kmatrix_median = compute_kernel(Gn + Gn_median_set, gkernel, node_label, + edge_label, False) + Kmatrix = Kmatrix_median[0:len(Gn), 0:len(Gn)] + dis_mat, _, _, _ = kernel_distance_matrix(Gn, node_label, edge_label, + Kmatrix=Kmatrix, gkernel=gkernel) + print('average distances: ', np.mean(np.mean(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + print('min distances: ', np.min(np.min(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + print('max distances: ', np.max(np.max(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + + # add distances for the image of exact median \psi. + dis_k_median_list = [] + for idx, g in enumerate(Gn): + dis_k_median_list.append(dis_gstar(idx, range(len(Gn), len(Gn) + len(Gn_median_set)), + [1 / len(Gn_median_set)] * len(Gn_median_set), + Kmatrix_median, withterm3=False)) + dis_mat_median = np.zeros((len(Gn) + 1, len(Gn) + 1)) + for i in range(len(Gn)): + for j in range(i, len(Gn)): + dis_mat_median[i, j] = dis_mat[i, j] + dis_mat_median[j, i] = dis_mat_median[i, j] + for i in range(len(Gn)): + dis_mat_median[i, -1] = dis_k_median_list[i] + dis_mat_median[-1, i] = dis_k_median_list[i] + + + # visualization. +# visualize_graph_dataset('graph-kernel', 'tsne', Gn) +# visualize_graph_dataset('graph-kernel', 'tsne', draw_figure, +# draw_params={'y_idx': y_idx}, dis_mat=dis_mat_median) + visualize_graph_dataset('graph-kernel', 'tsne', draw_figure, + draw_params={'y_idx': y_idx}, dis_mat=dis_mat_median, + median_set=median_set) + + +def visualize_distances_in_ged_monoterpenoides(): + from fitDistance import compute_geds + from ged import GED + + ds = {'dataset': '../datasets/monoterpenoides/dataset_10+.ds', + 'graph_dir': '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/monoterpenoides/'} # node/edge symb + Gn_original, y_all = loadDataset(ds['dataset']) +# Gn = Gn[0:50] + + # compute distance matrix +# median_set = [22, 29, 54, 74] + gkernel = 'treeletkernel' + fit_method = 'expert' + ds_name = 'monoterpenoides' + fname_medians = fit_method + '.' + gkernel + dir_output = 'results/xp_monoterpenoides/' + repeat = 0 +# edit_costs = [0.16229209837639536, 0.06612870523413916, 0.04030113378793905, 0.20723547009415202, 0.3338607220394598, 0.27054392518077297] + edit_costs = [3, 3, 1, 3, 3, 1] +# edit_costs = [7, 3, 5, 9, 2, 6] + + # get indices by classes. + y_idx = get_same_item_indices(y_all) + for i, (y, values) in enumerate(y_idx.items()): + print('\ny =', y) + k = len(values) + + Gn = [Gn_original[g].copy() for g in values] + # add set median. + fname_sm = dir_output + 'medians/' + str(int(y)) + '/set_median.k' + str(int(k)) \ + + '.y' + str(int(y)) + '.repeat' + str(repeat) + '.gxl' + set_median = loadGXL(fname_sm) + Gn.append(set_median) + # add generalized median (estimated pre-image.) + fname_gm = dir_output + 'medians/' + str(int(y)) + '/gen_median.k' + str(int(k)) \ + + '.y' + str(int(y)) + '.repeat' + str(repeat) + '.gxl' + gen_median = loadGXL(fname_gm) + Gn.append(gen_median) + + + # compute/load ged matrix. + # compute. + algo_options = '--threads 1 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' + params_ged = {'dataset': ds_name, 'lib': 'gedlibpy', 'cost': 'CONSTANT', + 'method': 'IPFP', 'algo_options': algo_options, + 'stabilizer': None, 'edit_cost_constant': edit_costs} + _, ged_mat, _ = compute_geds(Gn, params_ged=params_ged, parallel=True) + np.savez(dir_output + 'ged_mat.' + fname_medians + '.y' + str(int(y)) \ + + '.with_medians.gm', ged_mat=ged_mat) +# # load from file. +# gmfile = np.load('dir_output + 'ged_mat.' + fname_medians + '.y' + str(int(y)) + '.with_medians.gm.npz') +# ged_mat = gmfile['ged_mat'] +# # change medians. +# algo_options = '--threads 1 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' +# params_ged = {'lib': 'gedlibpy', 'cost': 'CONSTANT', 'method': 'IPFP', +# 'algo_options': algo_options, 'stabilizer': None, +# 'edit_cost_constant': edit_costs} +# for idx in tqdm(range(len(Gn) - 2), desc='computing GEDs', file=sys.stdout): +# dis, _, _ = GED(Gn[idx], set_median, **params_ged) +# ged_mat[idx, -2] = dis +# ged_mat[-2, idx] = dis +# dis, _, _ = GED(Gn[idx], gen_median, **params_ged) +# ged_mat[idx, -1] = dis +# ged_mat[-1, idx] = dis +# np.savez(dir_output + 'ged_mat.' + fname_medians + '.y' + str(int(y)) + '.with_medians.gm', +# ged_mat=ged_mat) + + # visualization. + median_set = range(0, len(values)) + visualize_graph_dataset('ged', 'tsne', draw_figure, + draw_params={'y_idx': y_idx}, dis_mat=ged_mat, + median_set=median_set) + + +############################################################################### + + +def visualize_distances_in_kernel_letter_h(): + + ds = {'dataset': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/collections/Letter.xml', + 'graph_dir': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/datasets/Letter/HIGH/'} # node/edge symb + Gn_original, y_all = loadDataset(ds['dataset'], extra_params=ds['graph_dir']) +# Gn = Gn[0:50] + + # compute distance matrix +# median_set = [22, 29, 54, 74] + gkernel = 'structuralspkernel' + fit_method = 'expert' + node_label = None + edge_label = None + ds_name = 'letter-h' + fname_medians = fit_method + '.' + gkernel + dir_output = 'results/xp_letter_h/' + k = 150 + repeat = 0 + + # get indices by classes. + y_idx = get_same_item_indices(y_all) + for i, (y, values) in enumerate(y_idx.items()): + print('\ny =', y) + + Gn = [Gn_original[g].copy() for g in values] + # add set median. + fname_sm = dir_output + 'medians/' + y + '/set_median.k' + str(int(k)) \ + + '.y' + y + '.repeat' + str(repeat) + '.gxl' + set_median = loadGXL(fname_sm) + Gn.append(set_median) + # add generalized median (estimated pre-image.) + fname_gm = dir_output + 'medians/' + y + '/gen_median.k' + str(int(k)) \ + + '.y' + y + '.repeat' + str(repeat) + '.gxl' + gen_median = loadGXL(fname_gm) + Gn.append(gen_median) + + # compute distance matrix + median_set = range(0, len(values)) + + Gn_median_set = [Gn[i].copy() for i in median_set] + Kmatrix_median = compute_kernel(Gn + Gn_median_set, gkernel, node_label, + edge_label, False) + Kmatrix = Kmatrix_median[0:len(Gn), 0:len(Gn)] + dis_mat, _, _, _ = kernel_distance_matrix(Gn, node_label, edge_label, + Kmatrix=Kmatrix, gkernel=gkernel) + print('average distances: ', np.mean(np.mean(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + print('min distances: ', np.min(np.min(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + print('max distances: ', np.max(np.max(dis_mat[0:len(Gn)-2, 0:len(Gn)-2]))) + + # add distances for the image of exact median \psi. + dis_k_median_list = [] + for idx, g in enumerate(Gn): + dis_k_median_list.append(dis_gstar(idx, range(len(Gn), len(Gn) + len(Gn_median_set)), + [1 / len(Gn_median_set)] * len(Gn_median_set), + Kmatrix_median, withterm3=False)) + dis_mat_median = np.zeros((len(Gn) + 1, len(Gn) + 1)) + for i in range(len(Gn)): + for j in range(i, len(Gn)): + dis_mat_median[i, j] = dis_mat[i, j] + dis_mat_median[j, i] = dis_mat_median[i, j] + for i in range(len(Gn)): + dis_mat_median[i, -1] = dis_k_median_list[i] + dis_mat_median[-1, i] = dis_k_median_list[i] + + + # visualization. +# visualize_graph_dataset('graph-kernel', 'tsne', Gn) +# visualize_graph_dataset('graph-kernel', 'tsne', draw_figure, +# draw_params={'y_idx': y_idx}, dis_mat=dis_mat_median) + visualize_graph_dataset('graph-kernel', 'tsne', draw_figure, + draw_params={'y_idx': y_idx}, dis_mat=dis_mat_median, + median_set=median_set) + + +def visualize_distances_in_ged_letter_h(): + from fitDistance import compute_geds + from preimage.test_k_closest_graphs import reform_attributes + + ds = {'dataset': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/collections/Letter.xml', + 'graph_dir': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/datasets/Letter/HIGH/'} # node/edge symb + Gn_original, y_all = loadDataset(ds['dataset'], extra_params=ds['graph_dir']) +# Gn = Gn[0:50] + + # compute distance matrix +# median_set = [22, 29, 54, 74] + gkernel = 'structuralspkernel' + fit_method = 'expert' + ds_name = 'letter-h' + fname_medians = fit_method + '.' + gkernel + dir_output = 'results/xp_letter_h/' + k = 150 + repeat = 0 +# edit_costs = [0.16229209837639536, 0.06612870523413916, 0.04030113378793905, 0.20723547009415202, 0.3338607220394598, 0.27054392518077297] + edit_costs = [3, 3, 1, 3, 3, 1] +# edit_costs = [7, 3, 5, 9, 2, 6] + + # get indices by classes. + y_idx = get_same_item_indices(y_all) + for i, (y, values) in enumerate(y_idx.items()): + print('\ny =', y) + + Gn = [Gn_original[g].copy() for g in values] + # add set median. + fname_sm = dir_output + 'medians/' + y + '/set_median.k' + str(int(k)) \ + + '.y' + y + '.repeat' + str(repeat) + '.gxl' + set_median = loadGXL(fname_sm) + Gn.append(set_median) + # add generalized median (estimated pre-image.) + fname_gm = dir_output + 'medians/' + y + '/gen_median.k' + str(int(k)) \ + + '.y' + y + '.repeat' + str(repeat) + '.gxl' + gen_median = loadGXL(fname_gm) + Gn.append(gen_median) + + + # compute/load ged matrix. + # compute. + algo_options = '--threads 1 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' + params_ged = {'dataset': 'Letter', 'lib': 'gedlibpy', 'cost': 'CONSTANT', + 'method': 'IPFP', 'algo_options': algo_options, + 'stabilizer': None, 'edit_cost_constant': edit_costs} + for g in Gn: + reform_attributes(g) + _, ged_mat, _ = compute_geds(Gn, params_ged=params_ged, parallel=True) + np.savez(dir_output + 'ged_mat.' + fname_medians + '.y' + y + '.with_medians.gm', ged_mat=ged_mat) +# # load from file. +# gmfile = np.load('dir_output + 'ged_mat.' + fname_medians + '.y' + y + '.with_medians.gm.npz') +# ged_mat = gmfile['ged_mat'] +# # change medians. +# algo_options = '--threads 1 --initial-solutions 40 --ratio-runs-from-initial-solutions 1' +# params_ged = {'lib': 'gedlibpy', 'cost': 'CONSTANT', 'method': 'IPFP', +# 'algo_options': algo_options, 'stabilizer': None, +# 'edit_cost_constant': edit_costs} +# for idx in tqdm(range(len(Gn) - 2), desc='computing GEDs', file=sys.stdout): +# dis, _, _ = GED(Gn[idx], set_median, **params_ged) +# ged_mat[idx, -2] = dis +# ged_mat[-2, idx] = dis +# dis, _, _ = GED(Gn[idx], gen_median, **params_ged) +# ged_mat[idx, -1] = dis +# ged_mat[-1, idx] = dis +# np.savez(dir_output + 'ged_mat.' + fname_medians + '.y' + y + '.with_medians.gm', +# ged_mat=ged_mat) + + + # visualization. + median_set = range(0, len(values)) + visualize_graph_dataset('ged', 'tsne', draw_figure, + draw_params={'y_idx': y_idx}, dis_mat=ged_mat, + median_set=median_set) + + +if __name__ == '__main__': + visualize_distances_in_kernel_letter_h() +# visualize_distances_in_ged_letter_h() +# visualize_distances_in_kernel_monoterpenoides() +# visualize_distances_in_kernel_monoterpenoides() +# visualize_distances_in_kernel() +# visualize_distances_in_ged() + + + + + + + +#def draw_figure_dis_k(ax, Gn_embedded, y_idx=None, legend=False): +# from matplotlib import colors as mcolors +# colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)) +## colors = ['#08306b', '#08519c', '#2171b5', '#4292c6', '#6baed6', '#9ecae1', +## '#c6dbef', '#deebf7'] +# for i, values in enumerate(y_idx.values()): +# for item in values: +## ax.scatter(Gn_embedded[item,0], Gn_embedded[item,1], c=colors[i]) # , c='b') +# ax.scatter(Gn_embedded[item,0], Gn_embedded[item,1], c='b') +# h1 = ax.scatter(Gn_embedded[[12, 13, 22, 29], 0], Gn_embedded[[12, 13, 22, 29], 1], c='r') +# h2 = ax.scatter(Gn_embedded[-1, 0], Gn_embedded[-1, 1], c='darkorchid') # \psi +# h3 = ax.scatter(Gn_embedded[-2, 0], Gn_embedded[-2, 1], c='gold') # gen median +# h4 = ax.scatter(Gn_embedded[-3, 0], Gn_embedded[-3, 1], c='r', marker='+') # set median +# if legend: +## fig.subplots_adjust(bottom=0.17) +# ax.legend([h1, h2, h3, h4], ['k clostest graphs', 'true median', 'gen median', 'set median']) +## fig.legend(handles, labels, loc='lower center', ncol=2, frameon=False) # , ncol=5, labelspacing=0.1, handletextpad=0.4, columnspacing=0.6) +## plt.savefig('symbolic_and_non_comparison_vertical_short.eps', format='eps', dpi=300, transparent=True, +## bbox_inches='tight') +## plt.show() + + + +#def draw_figure_ged(ax, Gn_embedded, y_idx=None, legend=False): +# from matplotlib import colors as mcolors +# colors = list(dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)) +## colors = ['#08306b', '#08519c', '#2171b5', '#4292c6', '#6baed6', '#9ecae1', +## '#c6dbef', '#deebf7'] +# for i, values in enumerate(y_idx.values()): +# for item in values: +## ax.scatter(Gn_embedded[item,0], Gn_embedded[item,1], c=colors[i]) # , c='b') +# ax.scatter(Gn_embedded[item,0], Gn_embedded[item,1], c='b') +# h1 = ax.scatter(Gn_embedded[[12, 13, 22, 29], 0], Gn_embedded[[12, 13, 22, 29], 1], c='r') +## h2 = ax.scatter(Gn_embedded[-1, 0], Gn_embedded[-1, 1], c='darkorchid') # \psi +# h3 = ax.scatter(Gn_embedded[-1, 0], Gn_embedded[-1, 1], c='gold') # gen median +# h4 = ax.scatter(Gn_embedded[-2, 0], Gn_embedded[-2, 1], c='r', marker='+') # set median +# if legend: +## fig.subplots_adjust(bottom=0.17) +# ax.legend([h1, h3, h4], ['k clostest graphs', 'gen median', 'set median']) +## fig.legend(handles, labels, loc='lower center', ncol=2, frameon=False) # , ncol=5, labelspacing=0.1, handletextpad=0.4, columnspacing=0.6) +## plt.savefig('symbolic_and_non_comparison_vertical_short.eps', format='eps', dpi=300, transparent=True, +## bbox_inches='tight') +## plt.show() \ No newline at end of file diff --git a/gklearn/preimage/xp_fit_method.py b/gklearn/preimage/xp_fit_method.py new file mode 100644 index 0000000..e1b6c6f --- /dev/null +++ b/gklearn/preimage/xp_fit_method.py @@ -0,0 +1,563 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Jan 14 15:39:29 2020 + +@author: ljia +""" +import numpy as np +import random +import csv +from shutil import copyfile +import networkx as nx +import matplotlib.pyplot as plt + +import sys +sys.path.insert(0, "../") +from gklearn.utils.graphfiles import loadDataset, loadGXL, saveGXL +from preimage.test_k_closest_graphs import median_on_k_closest_graphs, reform_attributes +from preimage.utils import get_same_item_indices, kernel_distance_matrix, compute_kernel +from preimage.find_best_k import getRelations + + +def get_dataset(ds_name): + if ds_name == 'Letter-high': # node non-symb + dataset = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/collections/Letter.xml' + graph_dir = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/datasets/Letter/HIGH/' + Gn, y_all = loadDataset(dataset, extra_params=graph_dir) + for G in Gn: + reform_attributes(G) + elif ds_name == 'Fingerprint': + dataset = '/media/ljia/DATA/research-repo/codes/Linlin/gedlib/data/collections/Fingerprint.xml' + graph_dir = '/media/ljia/DATA/research-repo/codes/Linlin/gedlib/data/datasets/Fingerprint/data/' + Gn, y_all = loadDataset(dataset, extra_params=graph_dir) + for G in Gn: + reform_attributes(G) + elif ds_name == 'SYNTHETIC': + pass + elif ds_name == 'SYNTHETICnew': + dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/SYNTHETICnew/SYNTHETICnew_A.txt' + graph_dir = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/generated_datsets/SYNTHETICnew' +# dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/Letter-high/Letter-high_A.txt' +# graph_dir = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/datasets/Letter/HIGH/' + Gn, y_all = loadDataset(dataset) + elif ds_name == 'Synthie': + pass + elif ds_name == 'COIL-RAG': + pass + elif ds_name == 'COLORS-3': + pass + elif ds_name == 'FRANKENSTEIN': + pass + + return Gn, y_all, graph_dir + + +def init_output_file(ds_name, gkernel, fit_method, dir_output): +# fn_output_detail = 'results_detail.' + ds_name + '.' + gkernel + '.' + fit_method + '.csv' + fn_output_detail = 'results_detail.' + ds_name + '.' + gkernel + '.csv' + f_detail = open(dir_output + fn_output_detail, 'a') + csv.writer(f_detail).writerow(['dataset', 'graph kernel', 'edit cost', + 'GED method', 'attr distance', 'fit method', 'k', + 'target', 'repeat', 'SOD SM', 'SOD GM', 'dis_k SM', 'dis_k GM', + 'min dis_k gi', 'SOD SM -> GM', 'dis_k SM -> GM', 'dis_k gi -> SM', + 'dis_k gi -> GM', 'fitting time', 'generating time', 'total time', + 'median set']) + f_detail.close() + +# fn_output_summary = 'results_summary.' + ds_name + '.' + gkernel + '.' + fit_method + '.csv' + fn_output_summary = 'results_summary.' + ds_name + '.' + gkernel + '.csv' + f_summary = open(dir_output + fn_output_summary, 'a') + csv.writer(f_summary).writerow(['dataset', 'graph kernel', 'edit cost', + 'GED method', 'attr distance', 'fit method', 'k', + 'target', 'SOD SM', 'SOD GM', 'dis_k SM', 'dis_k GM', + 'min dis_k gi', 'SOD SM -> GM', 'dis_k SM -> GM', 'dis_k gi -> SM', + 'dis_k gi -> GM', 'fitting time', 'generating time', 'total time', + '# SOD SM -> GM', '# dis_k SM -> GM', + '# dis_k gi -> SM', '# dis_k gi -> GM', 'repeats better SOD SM -> GM', + 'repeats better dis_k SM -> GM', 'repeats better dis_k gi -> SM', + 'repeats better dis_k gi -> GM']) + f_summary.close() + + return fn_output_detail, fn_output_summary + + +def xp_fit_method_for_non_symbolic(parameters, save_results=True, initial_solutions=1, + Gn_data=None, k_dis_data=None, Kmatrix=None): + + # 1. set parameters. + print('1. setting parameters...') + ds_name = parameters['ds_name'] + gkernel = parameters['gkernel'] + edit_cost_name = parameters['edit_cost_name'] + ged_method = parameters['ged_method'] + attr_distance = parameters['attr_distance'] + fit_method = parameters['fit_method'] + + node_label = None + edge_label = None + dir_output = 'results/xp_fit_method/' + + + # 2. get dataset. + print('2. getting dataset...') + if Gn_data is None: + Gn, y_all, graph_dir = get_dataset(ds_name) + else: + Gn = Gn_data[0] + y_all = Gn_data[1] + graph_dir = Gn_data[2] + + + # 3. compute kernel distance matrix. + print('3. computing kernel distance matrix...') + if k_dis_data is None: + dis_mat, dis_max, dis_min, dis_mean = kernel_distance_matrix(Gn, None, + None, Kmatrix=Kmatrix, gkernel=gkernel) + else: + dis_mat = k_dis_data[0] + dis_max = k_dis_data[1] + dis_min = k_dis_data[2] + dis_mean = k_dis_data[3] + print('pair distances - dis_max, dis_min, dis_mean:', dis_max, dis_min, dis_mean) + + + if save_results: + # create result files. + print('creating output files...') + fn_output_detail, fn_output_summary = init_output_file(ds_name, gkernel, + fit_method, dir_output) + + + # start repeats. + repeats = 1 +# k_list = range(2, 11) + k_list = [0] + # get indices by classes. + y_idx = get_same_item_indices(y_all) + random.seed(1) + rdn_seed_list = random.sample(range(0, repeats * 100), repeats) + + for k in k_list: +# print('\n--------- k =', k, '----------') + + sod_sm_mean_list = [] + sod_gm_mean_list = [] + dis_k_sm_mean_list = [] + dis_k_gm_mean_list = [] + dis_k_gi_min_mean_list = [] + time_fitting_mean_list = [] + time_generating_mean_list = [] + time_total_mean_list = [] + + # 3. start generating and computing over targets. + print('4. starting generating and computing over targets......') + for i, (y, values) in enumerate(y_idx.items()): +# y = 'I' +# values = y_idx[y] +# values = values[0:10] + print('\ny =', y) +# if y.strip() == 'A': +# continue + + k = len(values) + print('\n--------- k =', k, '----------') + + sod_sm_list = [] + sod_gm_list = [] + dis_k_sm_list = [] + dis_k_gm_list = [] + dis_k_gi_min_list = [] + time_fitting_list = [] + time_generating_list = [] + time_total_list = [] + nb_sod_sm2gm = [0, 0, 0] + nb_dis_k_sm2gm = [0, 0, 0] + nb_dis_k_gi2sm = [0, 0, 0] + nb_dis_k_gi2gm = [0, 0, 0] + repeats_better_sod_sm2gm = [] + repeats_better_dis_k_sm2gm = [] + repeats_better_dis_k_gi2sm = [] + repeats_better_dis_k_gi2gm = [] + + # get Gram matrix for this part of data. + if Kmatrix is not None: + Kmatrix_sub = Kmatrix[values,:] + Kmatrix_sub = Kmatrix_sub[:,values] + + for repeat in range(repeats): + print('\nrepeat =', repeat) + random.seed(rdn_seed_list[repeat]) + median_set_idx_idx = random.sample(range(0, len(values)), k) + median_set_idx = [values[idx] for idx in median_set_idx_idx] + print('median set: ', median_set_idx) + Gn_median = [Gn[g] for g in values] +# from notebooks.utils.plot_all_graphs import draw_Fingerprint_graph +# for Gn in Gn_median: +# draw_Fingerprint_graph(Gn, save=None) + + # GENERATING & COMPUTING!! + res_sods, res_dis_ks, res_times = median_on_k_closest_graphs(Gn_median, + node_label, edge_label, + gkernel, k, fit_method=fit_method, graph_dir=graph_dir, + edit_cost_constants=None, group_min=median_set_idx_idx, + dataset=ds_name, initial_solutions=initial_solutions, + edit_cost_name=edit_cost_name, + Kmatrix=Kmatrix_sub, parallel=False) + sod_sm = res_sods[0] + sod_gm = res_sods[1] + dis_k_sm = res_dis_ks[0] + dis_k_gm = res_dis_ks[1] + dis_k_gi = res_dis_ks[2] + dis_k_gi_min = res_dis_ks[3] + idx_dis_k_gi_min = res_dis_ks[4] + time_fitting = res_times[0] + time_generating = res_times[1] + + # write result detail. + sod_sm2gm = getRelations(np.sign(sod_gm - sod_sm)) + dis_k_sm2gm = getRelations(np.sign(dis_k_gm - dis_k_sm)) + dis_k_gi2sm = getRelations(np.sign(dis_k_sm - dis_k_gi_min)) + dis_k_gi2gm = getRelations(np.sign(dis_k_gm - dis_k_gi_min)) + if save_results: + f_detail = open(dir_output + fn_output_detail, 'a') + csv.writer(f_detail).writerow([ds_name, gkernel, + edit_cost_name, ged_method, attr_distance, + fit_method, k, y, repeat, + sod_sm, sod_gm, dis_k_sm, dis_k_gm, + dis_k_gi_min, sod_sm2gm, dis_k_sm2gm, dis_k_gi2sm, + dis_k_gi2gm, time_fitting, time_generating, + time_fitting + time_generating, median_set_idx]) + f_detail.close() + + # compute result summary. + sod_sm_list.append(sod_sm) + sod_gm_list.append(sod_gm) + dis_k_sm_list.append(dis_k_sm) + dis_k_gm_list.append(dis_k_gm) + dis_k_gi_min_list.append(dis_k_gi_min) + time_fitting_list.append(time_fitting) + time_generating_list.append(time_generating) + time_total_list.append(time_fitting + time_generating) + # # SOD SM -> GM + if sod_sm > sod_gm: + nb_sod_sm2gm[0] += 1 + repeats_better_sod_sm2gm.append(repeat) + elif sod_sm == sod_gm: + nb_sod_sm2gm[1] += 1 + elif sod_sm < sod_gm: + nb_sod_sm2gm[2] += 1 + # # dis_k SM -> GM + if dis_k_sm > dis_k_gm: + nb_dis_k_sm2gm[0] += 1 + repeats_better_dis_k_sm2gm.append(repeat) + elif dis_k_sm == dis_k_gm: + nb_dis_k_sm2gm[1] += 1 + elif dis_k_sm < dis_k_gm: + nb_dis_k_sm2gm[2] += 1 + # # dis_k gi -> SM + if dis_k_gi_min > dis_k_sm: + nb_dis_k_gi2sm[0] += 1 + repeats_better_dis_k_gi2sm.append(repeat) + elif dis_k_gi_min == dis_k_sm: + nb_dis_k_gi2sm[1] += 1 + elif dis_k_gi_min < dis_k_sm: + nb_dis_k_gi2sm[2] += 1 + # # dis_k gi -> GM + if dis_k_gi_min > dis_k_gm: + nb_dis_k_gi2gm[0] += 1 + repeats_better_dis_k_gi2gm.append(repeat) + elif dis_k_gi_min == dis_k_gm: + nb_dis_k_gi2gm[1] += 1 + elif dis_k_gi_min < dis_k_gm: + nb_dis_k_gi2gm[2] += 1 + + # save median graphs. + fname_sm = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/output/tmp_ged/set_median.gxl' + fn_pre_sm_new = dir_output + 'medians/set_median.' + fit_method \ + + '.k' + str(int(k)) + '.y' + str(y) + '.repeat' + str(repeat) + copyfile(fname_sm, fn_pre_sm_new + '.gxl') + fname_gm = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/output/tmp_ged/gen_median.gxl' + fn_pre_gm_new = dir_output + 'medians/gen_median.' + fit_method \ + + '.k' + str(int(k)) + '.y' + str(y) + '.repeat' + str(repeat) + copyfile(fname_gm, fn_pre_gm_new + '.gxl') + G_best_kernel = Gn_median[idx_dis_k_gi_min].copy() +# reform_attributes(G_best_kernel) + fn_pre_g_best_kernel = dir_output + 'medians/g_best_kernel.' + fit_method \ + + '.k' + str(int(k)) + '.y' + str(y) + '.repeat' + str(repeat) + saveGXL(G_best_kernel, fn_pre_g_best_kernel + '.gxl', method='default') + + # plot median graphs. + if ds_name == 'Letter-high': + set_median = loadGXL(fn_pre_sm_new + '.gxl') + gen_median = loadGXL(fn_pre_gm_new + '.gxl') + draw_Letter_graph(set_median, fn_pre_sm_new) + draw_Letter_graph(gen_median, fn_pre_gm_new) + draw_Letter_graph(G_best_kernel, fn_pre_g_best_kernel) + + # write result summary for each letter. + sod_sm_mean_list.append(np.mean(sod_sm_list)) + sod_gm_mean_list.append(np.mean(sod_gm_list)) + dis_k_sm_mean_list.append(np.mean(dis_k_sm_list)) + dis_k_gm_mean_list.append(np.mean(dis_k_gm_list)) + dis_k_gi_min_mean_list.append(np.mean(dis_k_gi_min_list)) + time_fitting_mean_list.append(np.mean(time_fitting_list)) + time_generating_mean_list.append(np.mean(time_generating_list)) + time_total_mean_list.append(np.mean(time_total_list)) + sod_sm2gm_mean = getRelations(np.sign(sod_gm_mean_list[-1] - sod_sm_mean_list[-1])) + dis_k_sm2gm_mean = getRelations(np.sign(dis_k_gm_mean_list[-1] - dis_k_sm_mean_list[-1])) + dis_k_gi2sm_mean = getRelations(np.sign(dis_k_sm_mean_list[-1] - dis_k_gi_min_mean_list[-1])) + dis_k_gi2gm_mean = getRelations(np.sign(dis_k_gm_mean_list[-1] - dis_k_gi_min_mean_list[-1])) + if save_results: + f_summary = open(dir_output + fn_output_summary, 'a') + csv.writer(f_summary).writerow([ds_name, gkernel, + edit_cost_name, ged_method, attr_distance, + fit_method, k, y, + sod_sm_mean_list[-1], sod_gm_mean_list[-1], + dis_k_sm_mean_list[-1], dis_k_gm_mean_list[-1], + dis_k_gi_min_mean_list[-1], sod_sm2gm_mean, dis_k_sm2gm_mean, + dis_k_gi2sm_mean, dis_k_gi2gm_mean, + time_fitting_mean_list[-1], time_generating_mean_list[-1], + time_total_mean_list[-1], nb_sod_sm2gm, + nb_dis_k_sm2gm, nb_dis_k_gi2sm, nb_dis_k_gi2gm, + repeats_better_sod_sm2gm, repeats_better_dis_k_sm2gm, + repeats_better_dis_k_gi2sm, repeats_better_dis_k_gi2gm]) + f_summary.close() + + + # write result summary for each letter. + sod_sm_mean = np.mean(sod_sm_mean_list) + sod_gm_mean = np.mean(sod_gm_mean_list) + dis_k_sm_mean = np.mean(dis_k_sm_mean_list) + dis_k_gm_mean = np.mean(dis_k_gm_mean_list) + dis_k_gi_min_mean = np.mean(dis_k_gi_min_list) + time_fitting_mean = np.mean(time_fitting_list) + time_generating_mean = np.mean(time_generating_list) + time_total_mean = np.mean(time_total_list) + sod_sm2gm_mean = getRelations(np.sign(sod_gm_mean - sod_sm_mean)) + dis_k_sm2gm_mean = getRelations(np.sign(dis_k_gm_mean - dis_k_sm_mean)) + dis_k_gi2sm_mean = getRelations(np.sign(dis_k_sm_mean - dis_k_gi_min_mean)) + dis_k_gi2gm_mean = getRelations(np.sign(dis_k_gm_mean - dis_k_gi_min_mean)) + if save_results: + f_summary = open(dir_output + fn_output_summary, 'a') + csv.writer(f_summary).writerow([ds_name, gkernel, + edit_cost_name, ged_method, attr_distance, + fit_method, k, 'all', + sod_sm_mean, sod_gm_mean, dis_k_sm_mean, dis_k_gm_mean, + dis_k_gi_min_mean, sod_sm2gm_mean, dis_k_sm2gm_mean, + dis_k_gi2sm_mean, dis_k_gi2gm_mean, + time_fitting_mean, time_generating_mean, time_total_mean]) + f_summary.close() + + print('\ncomplete.') + + +#Dessin median courrant +def draw_Letter_graph(graph, file_prefix): + plt.figure() + pos = {} + for n in graph.nodes: + pos[n] = np.array([float(graph.node[n]['x']),float(graph.node[n]['y'])]) + nx.draw_networkx(graph, pos) + plt.savefig(file_prefix + '.eps', format='eps', dpi=300) +# plt.show() + plt.clf() + + +if __name__ == "__main__": +# #### xp 1: Letter-high, spkernel. +# # load dataset. +# print('getting dataset and computing kernel distance matrix first...') +# ds_name = 'Letter-high' +# gkernel = 'spkernel' +# Gn, y_all, graph_dir = get_dataset(ds_name) +# # remove graphs without edges. +# Gn = [(idx, G) for idx, G in enumerate(Gn) if nx.number_of_edges(G) != 0] +# idx = [G[0] for G in Gn] +# Gn = [G[1] for G in Gn] +# y_all = [y_all[i] for i in idx] +## Gn = Gn[0:50] +## y_all = y_all[0:50] +# # compute pair distances. +# dis_mat, dis_max, dis_min, dis_mean = kernel_distance_matrix(Gn, None, None, +# Kmatrix=None, gkernel=gkernel, verbose=True) +## dis_mat, dis_max, dis_min, dis_mean = 0, 0, 0, 0 +# # fitting and computing. +# fit_methods = ['random', 'expert', 'k-graphs'] +# for fit_method in fit_methods: +# print('\n-------------------------------------') +# print('fit method:', fit_method) +# parameters = {'ds_name': ds_name, +# 'gkernel': gkernel, +# 'edit_cost_name': 'LETTER2', +# 'ged_method': 'mIPFP', +# 'attr_distance': 'euclidean', +# 'fit_method': fit_method} +# xp_fit_method_for_non_symbolic(parameters, save_results=True, +# initial_solutions=40, +# Gn_data = [Gn, y_all, graph_dir], +# k_dis_data = [dis_mat, dis_max, dis_min, dis_mean]) + + +# #### xp 2: Letter-high, sspkernel. +# # load dataset. +# print('getting dataset and computing kernel distance matrix first...') +# ds_name = 'Letter-high' +# gkernel = 'structuralspkernel' +# Gn, y_all, graph_dir = get_dataset(ds_name) +## Gn = Gn[0:50] +## y_all = y_all[0:50] +# # compute pair distances. +# dis_mat, dis_max, dis_min, dis_mean = kernel_distance_matrix(Gn, None, None, +# Kmatrix=None, gkernel=gkernel, verbose=True) +## dis_mat, dis_max, dis_min, dis_mean = 0, 0, 0, 0 +# # fitting and computing. +# fit_methods = ['random', 'expert', 'k-graphs'] +# for fit_method in fit_methods: +# print('\n-------------------------------------') +# print('fit method:', fit_method) +# parameters = {'ds_name': ds_name, +# 'gkernel': gkernel, +# 'edit_cost_name': 'LETTER2', +# 'ged_method': 'mIPFP', +# 'attr_distance': 'euclidean', +# 'fit_method': fit_method} +# print('parameters: ', parameters) +# xp_fit_method_for_non_symbolic(parameters, save_results=True, +# initial_solutions=40, +# Gn_data = [Gn, y_all, graph_dir], +# k_dis_data = [dis_mat, dis_max, dis_min, dis_mean]) + + +# #### xp 3: Fingerprint, sspkernel, using LETTER2. +# # load dataset. +# print('getting dataset and computing kernel distance matrix first...') +# ds_name = 'Fingerprint' +# gkernel = 'structuralspkernel' +# Gn, y_all, graph_dir = get_dataset(ds_name) +# # remove graphs without nodes and edges. +# Gn = [(idx, G) for idx, G in enumerate(Gn) if (nx.number_of_edges(G) != 0 +# and nx.number_of_edges(G) != 0)] +# idx = [G[0] for G in Gn] +# Gn = [G[1] for G in Gn] +# y_all = [y_all[i] for i in idx] +## Gn = Gn[0:50] +## y_all = y_all[0:50] +# # compute pair distances. +## dis_mat, dis_max, dis_min, dis_mean = kernel_distance_matrix(Gn, None, None, +## Kmatrix=None, gkernel=gkernel, verbose=True) +# dis_mat, dis_max, dis_min, dis_mean = 0, 0, 0, 0 +# # fitting and computing. +# fit_methods = ['k-graphs', 'expert', 'random', 'random', 'random'] +# for fit_method in fit_methods: +# print('\n-------------------------------------') +# print('fit method:', fit_method) +# parameters = {'ds_name': ds_name, +# 'gkernel': gkernel, +# 'edit_cost_name': 'LETTER2', +# 'ged_method': 'mIPFP', +# 'attr_distance': 'euclidean', +# 'fit_method': fit_method} +# xp_fit_method_for_non_symbolic(parameters, save_results=True, +# initial_solutions=40, +# Gn_data = [Gn, y_all, graph_dir], +# k_dis_data = [dis_mat, dis_max, dis_min, dis_mean]) + + +# #### xp 4: SYNTHETICnew, sspkernel, using NON_SYMBOLIC. +# # load dataset. +# print('getting dataset and computing kernel distance matrix first...') +# ds_name = 'SYNTHETICnew' +# gkernel = 'structuralspkernel' +# Gn, y_all, graph_dir = get_dataset(ds_name) +# # remove graphs without nodes and edges. +# Gn = [(idx, G) for idx, G in enumerate(Gn) if (nx.number_of_edges(G) != 0 +# and nx.number_of_edges(G) != 0)] +# idx = [G[0] for G in Gn] +# Gn = [G[1] for G in Gn] +# y_all = [y_all[i] for i in idx] +# Gn = Gn[0:10] +# y_all = y_all[0:10] +# for G in Gn: +# G.graph['filename'] = 'graph' + str(G.graph['name']) + '.gxl' +# # compute pair distances. +# dis_mat, dis_max, dis_min, dis_mean = kernel_distance_matrix(Gn, None, None, +# Kmatrix=None, gkernel=gkernel, verbose=True) +## dis_mat, dis_max, dis_min, dis_mean = 0, 0, 0, 0 +# # fitting and computing. +# fit_methods = ['k-graphs', 'random', 'random', 'random'] +# for fit_method in fit_methods: +# print('\n-------------------------------------') +# print('fit method:', fit_method) +# parameters = {'ds_name': ds_name, +# 'gkernel': gkernel, +# 'edit_cost_name': 'NON_SYMBOLIC', +# 'ged_method': 'mIPFP', +# 'attr_distance': 'euclidean', +# 'fit_method': fit_method} +# xp_fit_method_for_non_symbolic(parameters, save_results=True, +# initial_solutions=40, +# Gn_data = [Gn, y_all, graph_dir], +# k_dis_data = [dis_mat, dis_max, dis_min, dis_mean]) + + + ### xp 5: SYNTHETICnew, spkernel, using NON_SYMBOLIC. + gmfile = np.load('results/xp_fit_method/Kmatrix.SYNTHETICnew.spkernel.gm.npz') + Kmatrix = gmfile['Kmatrix'] + # normalization + Kmatrix_diag = Kmatrix.diagonal().copy() + for i in range(len(Kmatrix)): + for j in range(i, len(Kmatrix)): + Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j]) + Kmatrix[j][i] = Kmatrix[i][j] + run_time = 21821.35 + np.savez('results/xp_fit_method/Kmatrix.SYNTHETICnew.spkernel.gm', + Kmatrix=Kmatrix, run_time=run_time) + + # load dataset. + print('getting dataset and computing kernel distance matrix first...') + ds_name = 'SYNTHETICnew' + gkernel = 'spkernel' + Gn, y_all, graph_dir = get_dataset(ds_name) +# # remove graphs without nodes and edges. +# Gn = [(idx, G) for idx, G in enumerate(Gn) if (nx.number_of_edges(G) != 0 +# and nx.number_of_edges(G) != 0)] +# idx = [G[0] for G in Gn] +# Gn = [G[1] for G in Gn] +# y_all = [y_all[i] for i in idx] +# Gn = Gn[0:5] +# y_all = y_all[0:5] + for G in Gn: + G.graph['filename'] = 'graph' + str(G.graph['name']) + '.gxl' + + # compute/read Gram matrix and pair distances. +# Kmatrix = compute_kernel(Gn, gkernel, None, None, True) +# np.savez('results/xp_fit_method/Kmatrix.' + ds_name + '.' + gkernel + '.gm', +# Kmatrix=Kmatrix) + gmfile = np.load('results/xp_fit_method/Kmatrix.' + ds_name + '.' + gkernel + '.gm.npz') + Kmatrix = gmfile['Kmatrix'] + run_time = gmfile['run_time'] +# Kmatrix = Kmatrix[[0,1,2,3,4],:] +# Kmatrix = Kmatrix[:,[0,1,2,3,4]] + print('\nTime to compute Gram matrix for the whole dataset: ', run_time) + dis_mat, dis_max, dis_min, dis_mean = kernel_distance_matrix(Gn, None, None, + Kmatrix=Kmatrix, gkernel=gkernel, verbose=True) +# Kmatrix = np.zeros((len(Gn), len(Gn))) +# dis_mat, dis_max, dis_min, dis_mean = 0, 0, 0, 0 + + # fitting and computing. + fit_methods = ['k-graphs', 'random', 'random', 'random'] + for fit_method in fit_methods: + print('\n-------------------------------------') + print('fit method:', fit_method) + parameters = {'ds_name': ds_name, + 'gkernel': gkernel, + 'edit_cost_name': 'NON_SYMBOLIC', + 'ged_method': 'mIPFP', + 'attr_distance': 'euclidean', + 'fit_method': fit_method} + xp_fit_method_for_non_symbolic(parameters, save_results=True, + initial_solutions=1, + Gn_data=[Gn, y_all, graph_dir], + k_dis_data=[dis_mat, dis_max, dis_min, dis_mean], + Kmatrix=Kmatrix) \ No newline at end of file diff --git a/preimage/xp_letter_h.py b/gklearn/preimage/xp_letter_h.py similarity index 98% rename from preimage/xp_letter_h.py rename to gklearn/preimage/xp_letter_h.py index 4a707af..9a4121b 100644 --- a/preimage/xp_letter_h.py +++ b/gklearn/preimage/xp_letter_h.py @@ -14,9 +14,9 @@ import matplotlib.pyplot as plt import sys sys.path.insert(0, "../") -from pygraph.utils.graphfiles import loadDataset, loadGXL, saveGXL +from gklearn.utils.graphfiles import loadDataset, loadGXL, saveGXL from preimage.test_k_closest_graphs import median_on_k_closest_graphs, reform_attributes -from preimage.utils import get_same_item_indices +from preimage.utils import get_same_item_indices, kernel_distance_matrix from preimage.find_best_k import getRelations @@ -24,6 +24,8 @@ def xp_letter_h_LETTER2_cost(): ds = {'dataset': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/collections/Letter.xml', 'graph_dir': '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/data/datasets/Letter/HIGH/'} # node/edge symb Gn, y_all = loadDataset(ds['dataset'], extra_params=ds['graph_dir']) + + dis_mat, dis_max, dis_min, dis_mean = kernel_distance_matrix(Gn, None, None, Kmatrix=None, gkernel='structuralspkernel') for G in Gn: reform_attributes(G) # ds = {'name': 'Letter-high', @@ -469,7 +471,7 @@ def draw_Letter_graph(graph, file_prefix): plt.savefig(file_prefix + '.eps', format='eps', dpi=300) # plt.show() plt.clf() - + if __name__ == "__main__": # xp_letter_h() diff --git a/gklearn/preimage/xp_monoterpenoides.py b/gklearn/preimage/xp_monoterpenoides.py new file mode 100644 index 0000000..b3f2b82 --- /dev/null +++ b/gklearn/preimage/xp_monoterpenoides.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jan 16 11:03:11 2020 + +@author: ljia +""" + +import numpy as np +import random +import csv +from shutil import copyfile +import networkx as nx +import matplotlib.pyplot as plt + +import sys +sys.path.insert(0, "../") +from gklearn.utils.graphfiles import loadDataset, loadGXL, saveGXL +from preimage.test_k_closest_graphs import median_on_k_closest_graphs, reform_attributes +from preimage.utils import get_same_item_indices +from preimage.find_best_k import getRelations + +def xp_monoterpenoides(): + ds = {'dataset': '../datasets/monoterpenoides/dataset_10+.ds', + 'graph_dir': '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/monoterpenoides/'} # node/edge symb + Gn, y_all = loadDataset(ds['dataset']) +# ds = {'name': 'Letter-high', +# 'dataset': '../datasets/Letter-high/Letter-high_A.txt'} # node/edge symb +# Gn, y_all = loadDataset(ds['dataset']) +# Gn = Gn[0:50] + gkernel = 'treeletkernel' + node_label = 'atom' + edge_label = 'bond_type' + ds_name = 'monoterpenoides' + dir_output = 'results/xp_monoterpenoides/' + + repeats = 1 +# k_list = range(2, 11) + k_list = [0] + fit_method = 'k-graphs' + # get indices by classes. + y_idx = get_same_item_indices(y_all) + + # create result files. + fn_output_detail = 'results_detail.' + ds_name + '.' + gkernel + '.' + fit_method + '.csv' + f_detail = open(dir_output + fn_output_detail, 'a') + csv.writer(f_detail).writerow(['dataset', 'graph kernel', 'fit method', 'k', + 'target', 'repeat', 'SOD SM', 'SOD GM', 'dis_k SM', 'dis_k GM', + 'min dis_k gi', 'SOD SM -> GM', 'dis_k SM -> GM', 'dis_k gi -> SM', + 'dis_k gi -> GM', 'median set']) + f_detail.close() + fn_output_summary = 'results_summary.' + ds_name + '.' + gkernel + '.' + fit_method + '.csv' + f_summary = open(dir_output + fn_output_summary, 'a') + csv.writer(f_summary).writerow(['dataset', 'graph kernel', 'fit method', 'k', + 'target', 'SOD SM', 'SOD GM', 'dis_k SM', 'dis_k GM', + 'min dis_k gi', 'SOD SM -> GM', 'dis_k SM -> GM', 'dis_k gi -> SM', + 'dis_k gi -> GM', '# SOD SM -> GM', '# dis_k SM -> GM', + '# dis_k gi -> SM', '# dis_k gi -> GM', 'repeats better SOD SM -> GM', + 'repeats better dis_k SM -> GM', 'repeats better dis_k gi -> SM', + 'repeats better dis_k gi -> GM']) + f_summary.close() + + random.seed(1) + rdn_seed_list = random.sample(range(0, repeats * 100), repeats) + + for k in k_list: + print('\n--------- k =', k, '----------') + + sod_sm_mean_list = [] + sod_gm_mean_list = [] + dis_k_sm_mean_list = [] + dis_k_gm_mean_list = [] + dis_k_gi_min_mean_list = [] +# nb_sod_sm2gm = [0, 0, 0] +# nb_dis_k_sm2gm = [0, 0, 0] +# nb_dis_k_gi2sm = [0, 0, 0] +# nb_dis_k_gi2gm = [0, 0, 0] +# repeats_better_sod_sm2gm = [] +# repeats_better_dis_k_sm2gm = [] +# repeats_better_dis_k_gi2sm = [] +# repeats_better_dis_k_gi2gm = [] + + for i, (y, values) in enumerate(y_idx.items()): + print('\ny =', y) +# y = 'I' +# values = y_idx[y] + + k = len(values) +# k = kkk + + sod_sm_list = [] + sod_gm_list = [] + dis_k_sm_list = [] + dis_k_gm_list = [] + dis_k_gi_min_list = [] + nb_sod_sm2gm = [0, 0, 0] + nb_dis_k_sm2gm = [0, 0, 0] + nb_dis_k_gi2sm = [0, 0, 0] + nb_dis_k_gi2gm = [0, 0, 0] + repeats_better_sod_sm2gm = [] + repeats_better_dis_k_sm2gm = [] + repeats_better_dis_k_gi2sm = [] + repeats_better_dis_k_gi2gm = [] + + for repeat in range(repeats): + print('\nrepeat =', repeat) + random.seed(rdn_seed_list[repeat]) + median_set_idx_idx = random.sample(range(0, len(values)), k) + median_set_idx = [values[idx] for idx in median_set_idx_idx] + print('median set: ', median_set_idx) + Gn_median = [Gn[g] for g in values] + + sod_sm, sod_gm, dis_k_sm, dis_k_gm, dis_k_gi, dis_k_gi_min, idx_dis_k_gi_min \ + = median_on_k_closest_graphs(Gn_median, node_label, edge_label, + gkernel, k, fit_method=fit_method, graph_dir=ds['graph_dir'], + edit_costs=None, group_min=median_set_idx_idx, + dataset=ds_name, parallel=False) + + # write result detail. + sod_sm2gm = getRelations(np.sign(sod_gm - sod_sm)) + dis_k_sm2gm = getRelations(np.sign(dis_k_gm - dis_k_sm)) + dis_k_gi2sm = getRelations(np.sign(dis_k_sm - dis_k_gi_min)) + dis_k_gi2gm = getRelations(np.sign(dis_k_gm - dis_k_gi_min)) + f_detail = open(dir_output + fn_output_detail, 'a') + csv.writer(f_detail).writerow([ds_name, gkernel, fit_method, k, + y, repeat, + sod_sm, sod_gm, dis_k_sm, dis_k_gm, + dis_k_gi_min, sod_sm2gm, dis_k_sm2gm, dis_k_gi2sm, + dis_k_gi2gm, median_set_idx]) + f_detail.close() + + # compute result summary. + sod_sm_list.append(sod_sm) + sod_gm_list.append(sod_gm) + dis_k_sm_list.append(dis_k_sm) + dis_k_gm_list.append(dis_k_gm) + dis_k_gi_min_list.append(dis_k_gi_min) + # # SOD SM -> GM + if sod_sm > sod_gm: + nb_sod_sm2gm[0] += 1 + repeats_better_sod_sm2gm.append(repeat) + elif sod_sm == sod_gm: + nb_sod_sm2gm[1] += 1 + elif sod_sm < sod_gm: + nb_sod_sm2gm[2] += 1 + # # dis_k SM -> GM + if dis_k_sm > dis_k_gm: + nb_dis_k_sm2gm[0] += 1 + repeats_better_dis_k_sm2gm.append(repeat) + elif dis_k_sm == dis_k_gm: + nb_dis_k_sm2gm[1] += 1 + elif dis_k_sm < dis_k_gm: + nb_dis_k_sm2gm[2] += 1 + # # dis_k gi -> SM + if dis_k_gi_min > dis_k_sm: + nb_dis_k_gi2sm[0] += 1 + repeats_better_dis_k_gi2sm.append(repeat) + elif dis_k_gi_min == dis_k_sm: + nb_dis_k_gi2sm[1] += 1 + elif dis_k_gi_min < dis_k_sm: + nb_dis_k_gi2sm[2] += 1 + # # dis_k gi -> GM + if dis_k_gi_min > dis_k_gm: + nb_dis_k_gi2gm[0] += 1 + repeats_better_dis_k_gi2gm.append(repeat) + elif dis_k_gi_min == dis_k_gm: + nb_dis_k_gi2gm[1] += 1 + elif dis_k_gi_min < dis_k_gm: + nb_dis_k_gi2gm[2] += 1 + + # save median graphs. + fname_sm = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/output/tmp_ged/set_median.gxl' + fn_pre_sm_new = dir_output + 'medians/set_median.' + fit_method \ + + '.k' + str(int(k)) + '.y' + str(int(y)) + '.repeat' + str(repeat) + copyfile(fname_sm, fn_pre_sm_new + '.gxl') + fname_gm = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/output/tmp_ged/gen_median.gxl' + fn_pre_gm_new = dir_output + 'medians/gen_median.' + fit_method \ + + '.k' + str(int(k)) + '.y' + str(int(y)) + '.repeat' + str(repeat) + copyfile(fname_gm, fn_pre_gm_new + '.gxl') + G_best_kernel = Gn_median[idx_dis_k_gi_min].copy() +# reform_attributes(G_best_kernel) + fn_pre_g_best_kernel = dir_output + 'medians/g_best_kernel.' + fit_method \ + + '.k' + str(int(k)) + '.y' + str(int(y)) + '.repeat' + str(repeat) + saveGXL(G_best_kernel, fn_pre_g_best_kernel + '.gxl', method='gedlib') + +# # plot median graphs. +# set_median = loadGXL(fn_pre_sm_new + '.gxl') +# gen_median = loadGXL(fn_pre_gm_new + '.gxl') +# draw_Letter_graph(set_median, fn_pre_sm_new) +# draw_Letter_graph(gen_median, fn_pre_gm_new) +# draw_Letter_graph(G_best_kernel, fn_pre_g_best_kernel) + + # write result summary for each letter. + sod_sm_mean_list.append(np.mean(sod_sm_list)) + sod_gm_mean_list.append(np.mean(sod_gm_list)) + dis_k_sm_mean_list.append(np.mean(dis_k_sm_list)) + dis_k_gm_mean_list.append(np.mean(dis_k_gm_list)) + dis_k_gi_min_mean_list.append(np.mean(dis_k_gi_min_list)) + sod_sm2gm_mean = getRelations(np.sign(sod_gm_mean_list[-1] - sod_sm_mean_list[-1])) + dis_k_sm2gm_mean = getRelations(np.sign(dis_k_gm_mean_list[-1] - dis_k_sm_mean_list[-1])) + dis_k_gi2sm_mean = getRelations(np.sign(dis_k_sm_mean_list[-1] - dis_k_gi_min_mean_list[-1])) + dis_k_gi2gm_mean = getRelations(np.sign(dis_k_gm_mean_list[-1] - dis_k_gi_min_mean_list[-1])) + f_summary = open(dir_output + fn_output_summary, 'a') + csv.writer(f_summary).writerow([ds_name, gkernel, fit_method, k, y, + sod_sm_mean_list[-1], sod_gm_mean_list[-1], + dis_k_sm_mean_list[-1], dis_k_gm_mean_list[-1], + dis_k_gi_min_mean_list[-1], sod_sm2gm_mean, dis_k_sm2gm_mean, + dis_k_gi2sm_mean, dis_k_gi2gm_mean, nb_sod_sm2gm, + nb_dis_k_sm2gm, nb_dis_k_gi2sm, nb_dis_k_gi2gm, + repeats_better_sod_sm2gm, repeats_better_dis_k_sm2gm, + repeats_better_dis_k_gi2sm, repeats_better_dis_k_gi2gm]) + f_summary.close() + + + # write result summary for each letter. + sod_sm_mean = np.mean(sod_sm_mean_list) + sod_gm_mean = np.mean(sod_gm_mean_list) + dis_k_sm_mean = np.mean(dis_k_sm_mean_list) + dis_k_gm_mean = np.mean(dis_k_gm_mean_list) + dis_k_gi_min_mean = np.mean(dis_k_gi_min_list) + sod_sm2gm_mean = getRelations(np.sign(sod_gm_mean - sod_sm_mean)) + dis_k_sm2gm_mean = getRelations(np.sign(dis_k_gm_mean - dis_k_sm_mean)) + dis_k_gi2sm_mean = getRelations(np.sign(dis_k_sm_mean - dis_k_gi_min_mean)) + dis_k_gi2gm_mean = getRelations(np.sign(dis_k_gm_mean - dis_k_gi_min_mean)) + f_summary = open(dir_output + fn_output_summary, 'a') + csv.writer(f_summary).writerow([ds_name, gkernel, fit_method, k, 'all', + sod_sm_mean, sod_gm_mean, dis_k_sm_mean, dis_k_gm_mean, + dis_k_gi_min_mean, sod_sm2gm_mean, dis_k_sm2gm_mean, + dis_k_gi2sm_mean, dis_k_gi2gm_mean]) + f_summary.close() + + + print('\ncomplete.') + + +#Dessin median courrant +def draw_Letter_graph(graph, file_prefix): + plt.figure() + pos = {} + for n in graph.nodes: + pos[n] = np.array([float(graph.node[n]['x']),float(graph.node[n]['y'])]) + nx.draw_networkx(graph, pos) + plt.savefig(file_prefix + '.eps', format='eps', dpi=300) +# plt.show() + plt.clf() + + +if __name__ == "__main__": + xp_monoterpenoides() \ No newline at end of file diff --git a/pygraph/utils/__init__.py b/gklearn/utils/__init__.py similarity index 92% rename from pygraph/utils/__init__.py rename to gklearn/utils/__init__.py index 78012f0..ea9c418 100644 --- a/pygraph/utils/__init__.py +++ b/gklearn/utils/__init__.py @@ -1,5 +1,5 @@ # -*-coding:utf-8 -*- -"""Pygraph - utils module +"""gklearn - utils module Implement some methods to manage graphs graphfiles.py : load .gxl and .ct files diff --git a/pygraph/utils/graphdataset.py b/gklearn/utils/graphdataset.py similarity index 97% rename from pygraph/utils/graphdataset.py rename to gklearn/utils/graphdataset.py index f74532e..be23461 100644 --- a/pygraph/utils/graphdataset.py +++ b/gklearn/utils/graphdataset.py @@ -13,42 +13,67 @@ def get_dataset_attributes(Gn, ---------- Gn : List of NetworkX graph List of graphs whose information will be returned. + target : list The list of classification targets corresponding to Gn. Only works for classification problems. + attr_names : list List of strings which indicate which informations will be returned. The possible choices includes: + 'substructures': sub-structures Gn contains, including 'linear', 'non - linear' and 'cyclic'. + linear' and 'cyclic'. + 'node_labeled': whether vertices have symbolic labels. + 'edge_labeled': whether egdes have symbolic labels. + 'is_directed': whether graphs in Gn are directed. + 'dataset_size': number of graphs in Gn. + 'ave_node_num': average number of vertices of graphs in Gn. + 'min_node_num': minimum number of vertices of graphs in Gn. + 'max_node_num': maximum number of vertices of graphs in Gn. + 'ave_edge_num': average number of edges of graphs in Gn. + 'min_edge_num': minimum number of edges of graphs in Gn. + 'max_edge_num': maximum number of edges of graphs in Gn. + 'ave_node_degree': average vertex degree of graphs in Gn. + 'min_node_degree': minimum vertex degree of graphs in Gn. + 'max_node_degree': maximum vertex degree of graphs in Gn. + 'ave_fill_factor': average fill factor (number_of_edges / - (number_of_nodes ** 2)) of graphs in Gn. + (number_of_nodes ** 2)) of graphs in Gn. + 'min_fill_factor': minimum fill factor of graphs in Gn. + 'max_fill_factor': maximum fill factor of graphs in Gn. + 'node_label_num': number of symbolic vertex labels. + 'edge_label_num': number of symbolic edge labels. + 'node_attr_dim': number of dimensions of non-symbolic vertex labels. - Extracted from the 'attributes' attribute of graph nodes. - 'edge_attr_dim': number of dimensions of non-symbolic edge labels. - Extracted from the 'attributes' attribute of graph edges. - 'class_number': number of classes. Only available for classification - problems. + Extracted from the 'attributes' attribute of graph nodes. + + 'edge_attr_dim': number of dimensions of non-symbolic edge labels. + Extracted from the 'attributes' attribute of graph edges. + + 'class_number': number of classes. Only available for classification problems. + node_label : string Node attribute used as label. The default node label is atom. Mandatory when 'node_labeled' or 'node_label_num' is required. + edge_label : string Edge attribute used as label. The default edge label is bond_type. Mandatory when 'edge_labeled' or 'edge_label_num' is required. diff --git a/pygraph/utils/graphfiles.py b/gklearn/utils/graphfiles.py similarity index 64% rename from pygraph/utils/graphfiles.py rename to gklearn/utils/graphfiles.py index 48583dd..c183276 100644 --- a/pygraph/utils/graphfiles.py +++ b/gklearn/utils/graphfiles.py @@ -9,14 +9,19 @@ def loadCT(filename): ------ a typical example of data in .ct is like this: - 3 2 <- number of nodes and edges - 0.0000 0.0000 0.0000 C <- each line describes a node (x,y,z + label) - 0.0000 0.0000 0.0000 C - 0.0000 0.0000 0.0000 O - 1 3 1 1 <- each line describes an edge : to, from, bond type, bond stereo - 2 3 1 1 + 3 2 <- number of nodes and edges + + 0.0000 0.0000 0.0000 C <- each line describes a node (x,y,z + label) + + 0.0000 0.0000 0.0000 C + + 0.0000 0.0000 0.0000 O + + 1 3 1 1 <- each line describes an edge : to, from, bond type, bond stereo + + 2 3 1 1 - Check https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=10&ved=2ahUKEwivhaSdjsTlAhVhx4UKHczHA8gQFjAJegQIARAC&url=https%3A%2F%2Fwww.daylight.com%2Fmeetings%2Fmug05%2FKappler%2Fctfile.pdf&usg=AOvVaw1cDNrrmMClkFPqodlF2inS + Check `CTFile Formats file `__ for detailed format discription. """ import networkx as nx @@ -82,8 +87,41 @@ def loadGXL(filename): return g -def saveGXL(graph, filename, method='benoit'): - if method == 'benoit': +def saveGXL(graph, filename, method='default'): + if method == 'default': + gxl_file = open(filename, 'w') + gxl_file.write("\n") + gxl_file.write("\n") + gxl_file.write("\n") + gxl_file.write("\n") + for v, attrs in graph.nodes(data=True): + if graph.graph['node_labels'] == [] and graph.graph['node_attrs'] == []: + gxl_file.write("\n") + else: + gxl_file.write("") + for l_name in graph.graph['node_labels']: + gxl_file.write("" + + str(attrs[l_name]) + "") + for a_name in graph.graph['node_attrs']: + gxl_file.write("" + + str(attrs[a_name]) + "") + gxl_file.write("\n") + for v1, v2, attrs in graph.edges(data=True): + if graph.graph['edge_labels'] == [] and graph.graph['edge_attrs'] == []: + gxl_file.write("\n") + else: + gxl_file.write("") + for l_name in graph.graph['edge_labels']: + gxl_file.write("" + + str(attrs[l_name]) + "") + for a_name in graph.graph['edge_attrs']: + gxl_file.write("" + + str(attrs[a_name]) + "") + gxl_file.write("\n") + gxl_file.write("\n") + gxl_file.write("") + gxl_file.close() + elif method == 'benoit': import xml.etree.ElementTree as ET root_node = ET.Element('gxl') attr = dict() @@ -166,7 +204,7 @@ def loadSDF(filename): Notes ------ A SDF file contains a group of molecules, represented in the similar way as in MOL format. - Check http://www.nonlinear.com/progenesis/sdf-studio/v0.9/faq/sdf-file-format-guidance.aspx, 2018 for detailed structure. + Check `here `__ for detailed structure. """ import networkx as nx from os.path import basename @@ -216,7 +254,7 @@ def loadMAT(filename, extra_params): Notes ------ A MAT file contains a struct array containing graphs, and a column vector lx containing a class label for each graph. - Check README in downloadable file in http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/WL/, 2018 for detailed structure. + Check README in `downloadable file `__ for detailed structure. """ from scipy.io import loadmat import numpy as np @@ -278,52 +316,111 @@ def loadMAT(filename, extra_params): return data, y -def loadTXT(dirname_dataset): +def loadTXT(filename): """Load graph data from a .txt file. Notes ------ The graph data is loaded from separate files. - Check README in downloadable file http://tiny.cc/PK_MLJ_data, 2018 for detailed structure. + Check README in `downloadable file `__, 2018 for detailed structure. """ - import numpy as np +# import numpy as np import networkx as nx from os import listdir - from os.path import dirname + from os.path import dirname, basename + + + def get_label_names(frm): + """Get label names from DS_label_readme.txt file. + """ + + def get_names_from_line(line): + """Get names of labels/attributes from a line. + """ + str_names = line.split('[')[1].split(']')[0] + names = str_names.split(',') + names = [attr.strip() for attr in names] + return names + + + label_names = {'node_labels': [], 'node_attrs': [], + 'edge_labels': [], 'edge_attrs': []} + content_rm = open(frm).read().splitlines() + for line in content_rm: + line = line.strip() + if line.startswith('Node labels:'): + label_names['node_labels'] = get_names_from_line(line) + elif line.startswith('Node attributes:'): + label_names['node_attrs'] = get_names_from_line(line) + elif line.startswith('Edge labels:'): + label_names['edge_labels'] = get_names_from_line(line) + elif line.startswith('Edge attributes:'): + label_names['edge_attrs'] = get_names_from_line(line) + return label_names + + # get dataset name. + dirname_dataset = dirname(filename) + filename = basename(filename) + fn_split = filename.split('_A') + ds_name = fn_split[0].strip() + # load data file names for name in listdir(dirname_dataset): - if '_A' in name: + if ds_name + '_A' in name: fam = dirname_dataset + '/' + name - elif '_graph_indicator' in name: + elif ds_name + '_graph_indicator' in name: fgi = dirname_dataset + '/' + name - elif '_graph_labels' in name: + elif ds_name + '_graph_labels' in name: fgl = dirname_dataset + '/' + name - elif '_node_labels' in name: + elif ds_name + '_node_labels' in name: fnl = dirname_dataset + '/' + name - elif '_edge_labels' in name: + elif ds_name + '_edge_labels' in name: fel = dirname_dataset + '/' + name - elif '_edge_attributes' in name: + elif ds_name + '_edge_attributes' in name: fea = dirname_dataset + '/' + name - elif '_node_attributes' in name: + elif ds_name + '_node_attributes' in name: fna = dirname_dataset + '/' + name - elif '_graph_attributes' in name: + elif ds_name + '_graph_attributes' in name: fga = dirname_dataset + '/' + name + elif ds_name + '_label_readme' in name: + frm = dirname_dataset + '/' + name # this is supposed to be the node attrs, make sure to put this as the last 'elif' - elif '_attributes' in name: + elif ds_name + '_attributes' in name: fna = dirname_dataset + '/' + name + + # get labels and attributes names. + if 'frm' in locals(): + label_names = get_label_names(frm) + else: + label_names = {'node_labels': [], 'node_attrs': [], + 'edge_labels': [], 'edge_attrs': []} content_gi = open(fgi).read().splitlines() # graph indicator content_am = open(fam).read().splitlines() # adjacency matrix - content_gl = open(fgl).read().splitlines() # lass labels + content_gl = open(fgl).read().splitlines() # graph labels # create graphs and add nodes - data = [nx.Graph(name=i) for i in range(0, len(content_gl))] + data = [nx.Graph(name=str(i), + node_labels=label_names['node_labels'], + node_attrs=label_names['node_attrs'], + edge_labels=label_names['edge_labels'], + edge_attrs=label_names['edge_attrs']) for i in range(0, len(content_gl))] if 'fnl' in locals(): content_nl = open(fnl).read().splitlines() # node labels - for i, line in enumerate(content_gi): + for idx, line in enumerate(content_gi): # transfer to int first in case of unexpected blanks - data[int(line) - 1].add_node(i, atom=str(int(content_nl[i]))) + data[int(line) - 1].add_node(idx) + labels = [l.strip() for l in content_nl[idx].split(',')] + data[int(line) - 1].nodes[idx]['atom'] = str(int(labels[0])) # @todo: this should be removed after. + if data[int(line) - 1].graph['node_labels'] == []: + for i, label in enumerate(labels): + l_name = 'label_' + str(i) + data[int(line) - 1].nodes[idx][l_name] = label + data[int(line) - 1].graph['node_labels'].append(l_name) + else: + for i, l_name in enumerate(data[int(line) - 1].graph['node_labels']): + data[int(line) - 1].nodes[idx][l_name] = labels[i] else: for i, line in enumerate(content_gi): data[int(line) - 1].add_node(i) @@ -340,28 +437,52 @@ def loadTXT(dirname_dataset): # add edge labels if 'fel' in locals(): content_el = open(fel).read().splitlines() - for index, line in enumerate(content_el): - label = line.strip() - n = [int(i) - 1 for i in content_am[index].split(',')] + for idx, line in enumerate(content_el): + labels = [l.strip() for l in line.split(',')] + n = [int(i) - 1 for i in content_am[idx].split(',')] g = int(content_gi[n[0]]) - 1 - data[g].edges[n[0], n[1]]['bond_type'] = label + data[g].edges[n[0], n[1]]['bond_type'] = labels[0] # @todo: this should be removed after. + if data[g].graph['edge_labels'] == []: + for i, label in enumerate(labels): + l_name = 'label_' + str(i) + data[g].edges[n[0], n[1]][l_name] = label + data[g].graph['edge_labels'].append(l_name) + else: + for i, l_name in enumerate(data[g].graph['edge_labels']): + data[g].edges[n[0], n[1]][l_name] = labels[i] # add node attributes if 'fna' in locals(): content_na = open(fna).read().splitlines() - for i, line in enumerate(content_na): - attrs = [i.strip() for i in line.split(',')] - g = int(content_gi[i]) - 1 - data[g].nodes[i]['attributes'] = attrs + for idx, line in enumerate(content_na): + attrs = [a.strip() for a in line.split(',')] + g = int(content_gi[idx]) - 1 + data[g].nodes[idx]['attributes'] = attrs # @todo: this should be removed after. + if data[g].graph['node_attrs'] == []: + for i, attr in enumerate(attrs): + a_name = 'attr_' + str(i) + data[g].nodes[idx][a_name] = attr + data[g].graph['node_attrs'].append(a_name) + else: + for i, a_name in enumerate(data[g].graph['node_attrs']): + data[g].nodes[idx][a_name] = attrs[i] # add edge attributes if 'fea' in locals(): content_ea = open(fea).read().splitlines() - for index, line in enumerate(content_ea): - attrs = [i.strip() for i in line.split(',')] - n = [int(i) - 1 for i in content_am[index].split(',')] + for idx, line in enumerate(content_ea): + attrs = [a.strip() for a in line.split(',')] + n = [int(i) - 1 for i in content_am[idx].split(',')] g = int(content_gi[n[0]]) - 1 - data[g].edges[n[0], n[1]]['attributes'] = attrs + data[g].edges[n[0], n[1]]['attributes'] = attrs # @todo: this should be removed after. + if data[g].graph['edge_attrs'] == []: + for i, attr in enumerate(attrs): + a_name = 'attr_' + str(i) + data[g].edges[n[0], n[1]][a_name] = attr + data[g].graph['edge_attrs'].append(a_name) + else: + for i, a_name in enumerate(data[g].graph['edge_attrs']): + data[g].edges[n[0], n[1]][a_name] = attrs[i] # load y y = [int(i) for i in content_gl] @@ -384,25 +505,32 @@ def loadDataset(filename, filename_y=None, extra_params=None): Return ------ data : List of NetworkX graph. + y : List - Targets corresponding to graphs. + + Targets corresponding to graphs. Notes ----- This function supports following graph dataset formats: + 'ds': load data from .ds file. See comments of function loadFromDS for a example. + 'cxl': load data from Graph eXchange Language file (.cxl file). See - http://www.gupro.de/GXL/Introduction/background.html, 2019 for detail. + `here `__ for detail. + 'sdf': load data from structured data file (.sdf file). See - http://www.nonlinear.com/progenesis/sdf-studio/v0.9/faq/sdf-file-format-guidance.aspx, - 2018 for details. + `here `__ + for details. + 'mat': Load graph data from a MATLAB (up to version 7.1) .mat file. See - README in downloadable file in http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/WL/, - 2018 for details. + README in `downloadable file `__ + for details. + 'txt': Load graph data from a special .txt file. See - https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets, - 2019 for details. Note here filename is the name of either .txt file in - the dataset directory. + `here `__ + for details. Note here filename is the name of either .txt file in + the dataset directory. """ extension = splitext(filename)[1][1:] if extension == "ds": @@ -423,7 +551,7 @@ def loadDataset(filename, filename_y=None, extra_params=None): elif extension == 'xml': data, y = loadFromXML(filename, extra_params) elif extension == "sdf": - import numpy as np +# import numpy as np from tqdm import tqdm import sys @@ -448,8 +576,7 @@ def loadDataset(filename, filename_y=None, extra_params=None): elif extension == "mat": data, y = loadMAT(filename, extra_params) elif extension == 'txt': - dirname_dataset = dirname(filename) - data, y = loadTXT(dirname_dataset) + data, y = loadTXT(filename) # print(len(y)) # print(y) # print(data[0].nodes(data=True)) @@ -485,9 +612,13 @@ def loadFromXML(filename, extra_params): def loadFromDS(filename, filename_y): """Load data from .ds file. + Possible graph formats include: - '.ct': see function loadCT for detail. - '.gxl': see dunction loadGXL for detail. + + '.ct': see function loadCT for detail. + + '.gxl': see dunction loadGXL for detail. + Note these graph formats are checked automatically by the extensions of graph files. """ @@ -545,7 +676,7 @@ def saveDataset(Gn, y, gformat='gxl', group=None, filename='gfile', xparams=None if not os.path.exists(dirname_ds) : os.makedirs(dirname_ds) - if 'graph_dir' in xparams: + if xparams is not None and 'graph_dir' in xparams: graph_dir = xparams['graph_dir'] + '/' if not os.path.exists(graph_dir): os.makedirs(graph_dir) @@ -553,13 +684,14 @@ def saveDataset(Gn, y, gformat='gxl', group=None, filename='gfile', xparams=None graph_dir = dirname_ds if group == 'xml' and gformat == 'gxl': + kwargs = {'method': xparams['method']} if xparams is not None else {} with open(filename + '.xml', 'w') as fgroup: fgroup.write("") fgroup.write("\n") fgroup.write("\n") for idx, g in enumerate(Gn): fname_tmp = "graph" + str(idx) + ".gxl" - saveGXL(g, graph_dir + fname_tmp, method=xparams['method']) + saveGXL(g, graph_dir + fname_tmp, **kwargs) fgroup.write("\n\t") fgroup.write("\n") fgroup.close() @@ -589,34 +721,47 @@ if __name__ == '__main__': # print(Gn[1].edges(data=True)) # print(y[1]) - ### Convert graph from one format to another. - # .gxl file. - import networkx as nx - ds = {'name': 'monoterpenoides', - 'dataset': '../../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb - Gn, y = loadDataset(ds['dataset']) - y = [int(i) for i in y] - print(Gn[1].nodes(data=True)) - print(Gn[1].edges(data=True)) - print(y[1]) - # Convert a graph to the proper NetworkX format that can be recognized by library gedlib. - Gn_new = [] - for G in Gn: - G_new = nx.Graph() - for nd, attrs in G.nodes(data=True): - G_new.add_node(str(nd), chem=attrs['atom']) - for nd1, nd2, attrs in G.edges(data=True): - G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type']) -# G_new.add_edge(str(nd1), str(nd2)) - Gn_new.append(G_new) - print(Gn_new[1].nodes(data=True)) - print(Gn_new[1].edges(data=True)) - print(Gn_new[1]) - filename = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/generated_datsets/monoterpenoides/gxl/monoterpenoides' - xparams = {'method': 'gedlib'} - saveDataset(Gn, y, gformat='gxl', group='xml', filename=filename, xparams=xparams) +# ### Convert graph from one format to another. +# # .gxl file. +# import networkx as nx +# ds = {'name': 'monoterpenoides', +# 'dataset': '../../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb +# Gn, y = loadDataset(ds['dataset']) +# y = [int(i) for i in y] +# print(Gn[1].nodes(data=True)) +# print(Gn[1].edges(data=True)) +# print(y[1]) +# # Convert a graph to the proper NetworkX format that can be recognized by library gedlib. +# Gn_new = [] +# for G in Gn: +# G_new = nx.Graph() +# for nd, attrs in G.nodes(data=True): +# G_new.add_node(str(nd), chem=attrs['atom']) +# for nd1, nd2, attrs in G.edges(data=True): +# G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type']) +## G_new.add_edge(str(nd1), str(nd2)) +# Gn_new.append(G_new) +# print(Gn_new[1].nodes(data=True)) +# print(Gn_new[1].edges(data=True)) +# print(Gn_new[1]) +# filename = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/generated_datsets/monoterpenoides/gxl/monoterpenoides' +# xparams = {'method': 'gedlib'} +# saveDataset(Gn, y, gformat='gxl', group='xml', filename=filename, xparams=xparams) + # save dataset. # ds = {'name': 'MUTAG', 'dataset': '../../datasets/MUTAG/MUTAG.mat', # 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}} # node/edge symb # Gn, y = loadDataset(ds['dataset'], extra_params=ds['extra_params']) -# saveDataset(Gn, y, group='xml', filename='temp/temp') \ No newline at end of file +# saveDataset(Gn, y, group='xml', filename='temp/temp') + dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/SYNTHETICnew/SYNTHETICnew_A.txt' + Gn, y_all = loadDataset(dataset) + filename = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/generated_datsets/SYNTHETICnew/SYNTHETICnew' + saveDataset(Gn, y_all, gformat='gxl', group='xml', filename=filename) + + # test - new way to add labels and attributes. +# dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/SYNTHETICnew/SYNTHETICnew_A.txt' +# dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/Fingerprint/Fingerprint_A.txt' +# dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/Letter-med/Letter-med_A.txt' +# dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/AIDS/AIDS_A.txt' +# dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/ENZYMES_txt/ENZYMES_A_sparse.txt' +# Gn, y_all = loadDataset(dataset) \ No newline at end of file diff --git a/gklearn/utils/ipython_log.py b/gklearn/utils/ipython_log.py new file mode 100644 index 0000000..9574d84 --- /dev/null +++ b/gklearn/utils/ipython_log.py @@ -0,0 +1,7 @@ +# IPython log file + +runfile('/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage/test_iam.py', wdir='/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage') +runfile('/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage/test_iam.py', wdir='/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage') +runfile('/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage/test_iam.py', wdir='/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage') +runfile('/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage/test_iam.py', wdir='/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage') +runfile('/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage/test_iam.py', wdir='/media/ljia/DATA/research-repo/codes/Linlin/py-graph/preimage') diff --git a/pygraph/utils/isNotebook.py b/gklearn/utils/isNotebook.py similarity index 100% rename from pygraph/utils/isNotebook.py rename to gklearn/utils/isNotebook.py diff --git a/pygraph/utils/kernels.py b/gklearn/utils/kernels.py similarity index 94% rename from pygraph/utils/kernels.py rename to gklearn/utils/kernels.py index 1e9e44d..fb8e271 100644 --- a/pygraph/utils/kernels.py +++ b/gklearn/utils/kernels.py @@ -31,7 +31,7 @@ def gaussiankernel(x, y, gamma=None): K(x, y) = exp(-gamma ||x-y||^2). - Read more in the :ref:`User Guide `. + Read more in the `User Guide of scikit-learn library `__. Parameters ---------- @@ -155,4 +155,4 @@ def kernelproduct(k1, k2, d11, d12, d21=None, d22=None, lamda=1): if __name__ == '__main__': - o = polynomialkernel([1, 2], [3, 4], 2, 3) \ No newline at end of file + o = polynomialkernel([1, 2], [3, 4], 2, 3) diff --git a/gklearn/utils/logger2file.py b/gklearn/utils/logger2file.py new file mode 100644 index 0000000..2c7fc0b --- /dev/null +++ b/gklearn/utils/logger2file.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Nov 8 14:21:25 2019 + +@author: ljia +""" + +import sys +import time + +class Logger(object): + def __init__(self): + self.terminal = sys.stdout + self.log = open("log." + str(time.time()) + ".log", "a") + + def write(self, message): + self.terminal.write(message) + self.log.write(message) + + def flush(self): + #this flush method is needed for python 3 compatibility. + #this handles the flush command by doing nothing. + #you might want to specify some extra behavior here. + pass + +sys.stdout = Logger() \ No newline at end of file diff --git a/pygraph/utils/model_selection_precomputed.py b/gklearn/utils/model_selection_precomputed.py similarity index 99% rename from pygraph/utils/model_selection_precomputed.py rename to gklearn/utils/model_selection_precomputed.py index 177fca1..87c9714 100644 --- a/pygraph/utils/model_selection_precomputed.py +++ b/gklearn/utils/model_selection_precomputed.py @@ -16,7 +16,7 @@ import os import time import datetime #from os.path import basename, splitext -from pygraph.utils.graphfiles import loadDataset +from gklearn.utils.graphfiles import loadDataset from tqdm import tqdm #from memory_profiler import profile @@ -62,7 +62,7 @@ def model_selection_for_precomputed_kernel(datafile, Path of file storing y data. This parameter is optional depending on the given dataset file. extra_params : dict - Extra parameters for loading dataset. See function pygraph.utils. + Extra parameters for loading dataset. See function gklearn.utils. graphfiles.loadDataset for detail. ds_name : string Name of the dataset. @@ -76,8 +76,8 @@ def model_selection_for_precomputed_kernel(datafile, >>> import numpy as np >>> import sys >>> sys.path.insert(0, "../") - >>> from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel - >>> from pygraph.kernels.untilHPathKernel import untilhpathkernel + >>> from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel + >>> from gklearn.kernels.untilHPathKernel import untilhpathkernel >>> >>> datafile = '../datasets/MUTAG/MUTAG_A.txt' >>> estimator = untilhpathkernel diff --git a/pygraph/utils/parallel.py b/gklearn/utils/parallel.py similarity index 100% rename from pygraph/utils/parallel.py rename to gklearn/utils/parallel.py diff --git a/pygraph/utils/trie.py b/gklearn/utils/trie.py similarity index 93% rename from pygraph/utils/trie.py rename to gklearn/utils/trie.py index 92f9c28..8f02e0f 100644 --- a/pygraph/utils/trie.py +++ b/gklearn/utils/trie.py @@ -6,7 +6,7 @@ Created on Wed Jan 30 10:48:49 2019 Trie (prefix tree) @author: ljia @references: - https://viblo.asia/p/nlp-build-a-trie-data-structure-from-scratch-with-python-3P0lPzroKox, 2019.1 +`NLP: Build a Trie Data structure from scratch with python `__, 2019.1 """ import pickle @@ -15,6 +15,8 @@ import json """ Trie class """ class Trie: + """ + """ # init Trie class def __init__(self): self.root = self.getNode() @@ -108,4 +110,4 @@ class Trie: def load_from_json(self, file_name): json_file = open(file_name + ".json", "r") self.root = json.load(json_file) - json_file.close() \ No newline at end of file + json_file.close() diff --git a/pygraph/utils/unfinished/openblassettings.py b/gklearn/utils/unfinished/openblassettings.py similarity index 95% rename from pygraph/utils/unfinished/openblassettings.py rename to gklearn/utils/unfinished/openblassettings.py index 8b6c85a..e93acec 100644 --- a/pygraph/utils/unfinished/openblassettings.py +++ b/gklearn/utils/unfinished/openblassettings.py @@ -12,7 +12,7 @@ can be avoided. with num_threads(8): np.dot(x, y) @author: ali_m -@Reference: ali_m, https://stackoverflow.com/a/29582987, 2018.12 +@Reference: `ali_m's answer `__, 2018.12 """ import contextlib diff --git a/pygraph/utils/unused/suffix_tree.py b/gklearn/utils/unused/suffix_tree.py similarity index 98% rename from pygraph/utils/unused/suffix_tree.py rename to gklearn/utils/unused/suffix_tree.py index cddae41..4b15aa8 100644 --- a/pygraph/utils/unused/suffix_tree.py +++ b/gklearn/utils/unused/suffix_tree.py @@ -1,7 +1,7 @@ """ @author: linlin @references: - [1] https://github.com/ptrus/suffix-trees/blob/master/suffix_trees/STree.py, 2018.6 + [1] `ptrus/suffix-trees `__, 2018.6 """ import sys diff --git a/pygraph/utils/utils.py b/gklearn/utils/utils.py similarity index 99% rename from pygraph/utils/utils.py rename to gklearn/utils/utils.py index b15ce94..9e5ac92 100644 --- a/pygraph/utils/utils.py +++ b/gklearn/utils/utils.py @@ -225,8 +225,9 @@ def graph_isIdentical(G1, G2): labels/attributes, edge labels/attributes. Notes - ---- + ----- 1. The type of graphs has to be the same. + 2. Global/Graph attributes are neglected as they may contain names for graphs. """ # check nodes. @@ -259,4 +260,4 @@ def get_edge_labels(Gn, edge_label): el = set() for G in Gn: el = el | set(nx.get_edge_attributes(G, edge_label).values()) - return el \ No newline at end of file + return el diff --git a/notebooks/else/compute_spkernel_for_syntheticnew.py b/notebooks/else/compute_spkernel_for_syntheticnew.py new file mode 100644 index 0000000..339cc63 --- /dev/null +++ b/notebooks/else/compute_spkernel_for_syntheticnew.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Dec 23 16:40:52 2018 + +@author: ljia +""" +import sys +import numpy as np +import networkx as nx + +sys.path.insert(0, "../") +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.kernels.spKernel import spkernel +from sklearn.model_selection import ParameterGrid + +from libs import * +import multiprocessing +import functools +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct + + +if __name__ == "__main__": + # load dataset. + print('getting dataset and computing kernel distance matrix first...') + ds_name = 'SYNTHETICnew' + gkernel = 'spkernel' + dataset = '../datasets/SYNTHETICnew/SYNTHETICnew_A.txt' + Gn, y_all = loadDataset(dataset) + + for G in Gn: + G.graph['filename'] = 'graph' + str(G.graph['name']) + '.gxl' + + # compute/read Gram matrix and pair distances. + mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) + Kmatrix = np.empty((len(Gn), len(Gn))) + Kmatrix, run_time, idx = spkernel(Gn, node_label=None, node_kernels= + {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}, + n_jobs=multiprocessing.cpu_count(), verbose=True) + + # normalization + Kmatrix_diag = Kmatrix.diagonal().copy() + for i in range(len(Kmatrix)): + for j in range(i, len(Kmatrix)): + Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j]) + Kmatrix[j][i] = Kmatrix[i][j] + + np.savez('results/xp_fit_method/Kmatrix.' + ds_name + '.' + gkernel + '.gm', + Kmatrix=Kmatrix, run_time=run_time) + + print('complete!') diff --git a/notebooks/else/compute_sspkernel_for_syntheticnew.py b/notebooks/else/compute_sspkernel_for_syntheticnew.py new file mode 100644 index 0000000..6c13ef3 --- /dev/null +++ b/notebooks/else/compute_sspkernel_for_syntheticnew.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Dec 23 16:40:52 2018 + +@author: ljia +""" +import sys +import numpy as np +import networkx as nx + +sys.path.insert(0, "../") +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.kernels.structuralspKernel import structuralspkernel +from sklearn.model_selection import ParameterGrid + +from libs import * +import multiprocessing +import functools +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct + + +if __name__ == "__main__": + # load dataset. + print('getting dataset and computing kernel distance matrix first...') + ds_name = 'SYNTHETICnew' + gkernel = 'structuralspkernel' + dataset = '../datasets/SYNTHETICnew/SYNTHETICnew_A.txt' + Gn, y_all = loadDataset(dataset) + + for G in Gn: + G.graph['filename'] = 'graph' + str(G.graph['name']) + '.gxl' + + # compute/read Gram matrix and pair distances. + mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) + sub_kernels = {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel} + Kmatrix, run_time = structuralspkernel(Gn, node_label=None, edge_label=None, + node_kernels=sub_kernels, edge_kernels=sub_kernels, + parallel=None, # parallel='imap_unordered', + n_jobs=multiprocessing.cpu_count(), + verbose=True) + + # normalization + Kmatrix_diag = Kmatrix.diagonal().copy() + for i in range(len(Kmatrix)): + for j in range(i, len(Kmatrix)): + Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j]) + Kmatrix[j][i] = Kmatrix[i][j] + + np.savez('results/xp_fit_method/Kmatrix.' + ds_name + '.' + gkernel + '.gm', + Kmatrix=Kmatrix, run_time=run_time) + + print('complete!') \ No newline at end of file diff --git a/notebooks/else/job_graphkernels.sl b/notebooks/else/job_graphkernels.sl index a869a8e..6f982ed 100644 --- a/notebooks/else/job_graphkernels.sl +++ b/notebooks/else/job_graphkernels.sl @@ -15,5 +15,5 @@ #SBATCH --mem-per-cpu=4000 srun hostname -srun cd /home/2017018/ljia01/py-graph/notebooks +srun cd /home/2017018/ljia01/graphkit-learn/notebooks srun python3 run_spkernel.py diff --git a/notebooks/else/run_rwalk_symonly.py b/notebooks/else/run_rwalk_symonly.py index 0c874f0..8c929e6 100644 --- a/notebooks/else/run_rwalk_symonly.py +++ b/notebooks/else/run_rwalk_symonly.py @@ -10,8 +10,8 @@ import functools from libs import * import multiprocessing -from pygraph.kernels.rwalk_sym import randomwalkkernel -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.kernels.rwalk_sym import randomwalkkernel +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct import numpy as np diff --git a/notebooks/else/run_sp_symonly.py b/notebooks/else/run_sp_symonly.py index eaa321e..427dd42 100644 --- a/notebooks/else/run_sp_symonly.py +++ b/notebooks/else/run_sp_symonly.py @@ -10,9 +10,9 @@ import functools from libs import * import multiprocessing -from pygraph.kernels.sp_sym import spkernel -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct -#from pygraph.utils.model_selection_precomputed import trial_do +from gklearn.kernels.sp_sym import spkernel +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct +#from gklearn.utils.model_selection_precomputed import trial_do dslist = [ {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, diff --git a/notebooks/else/run_ssp_symonly.py b/notebooks/else/run_ssp_symonly.py index bfd6f15..081d596 100644 --- a/notebooks/else/run_ssp_symonly.py +++ b/notebooks/else/run_ssp_symonly.py @@ -10,8 +10,8 @@ import functools from libs import * import multiprocessing -from pygraph.kernels.ssp_sym import structuralspkernel -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.kernels.ssp_sym import structuralspkernel +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct dslist = [ {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, diff --git a/notebooks/libs.py b/notebooks/libs.py index e6d29f8..55d6d25 100644 --- a/notebooks/libs.py +++ b/notebooks/libs.py @@ -4,5 +4,5 @@ sys.path.insert(0, "../") import numpy as np -from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel +from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel from datasets.ds import dslist \ No newline at end of file diff --git a/notebooks/run_commonwalkkernel.ipynb b/notebooks/run_commonwalkkernel.ipynb index 470ae1c..c13109b 100644 --- a/notebooks/run_commonwalkkernel.ipynb +++ b/notebooks/run_commonwalkkernel.ipynb @@ -66,8 +66,8 @@ "import multiprocessing\n", "from sklearn.metrics.pairwise import rbf_kernel\n", "\n", - "from pygraph.kernels.commonWalkKernel import commonwalkkernel\n", - "from pygraph.utils.kernels import deltakernel, kernelproduct\n", + "from gklearn.kernels.commonWalkKernel import commonwalkkernel\n", + "from gklearn.utils.kernels import deltakernel, kernelproduct\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n", diff --git a/notebooks/run_commonwalkkernel.py b/notebooks/run_commonwalkkernel.py index b537c04..1422a4d 100644 --- a/notebooks/run_commonwalkkernel.py +++ b/notebooks/run_commonwalkkernel.py @@ -9,25 +9,25 @@ Created on Fri Sep 28 17:01:13 2018 from libs import * import multiprocessing -from pygraph.kernels.commonWalkKernel import commonwalkkernel +from gklearn.kernels.commonWalkKernel import commonwalkkernel dslist = [ - {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', - 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, - # contains single node graph, node symb - {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', - 'task': 'regression'}, # node symb - {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb - {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled - {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb - {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, - # node nsymb - {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb - {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, +# {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', +# 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, +# # contains single node graph, node symb +# {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', +# 'task': 'regression'}, # node symb +# {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb +# {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled +# {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb +# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, +# # node nsymb +# {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb +# {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, # node symb/nsymb -# {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb -# {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb -# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb + {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb + {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb + {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb # # {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'}, # # node/edge symb diff --git a/notebooks/run_marginalizedkernel.ipynb b/notebooks/run_marginalizedkernel.ipynb index 724f18b..82b9d01 100644 --- a/notebooks/run_marginalizedkernel.ipynb +++ b/notebooks/run_marginalizedkernel.ipynb @@ -98,7 +98,7 @@ "from libs import *\n", "import multiprocessing\n", "\n", - "from pygraph.kernels.marginalizedKernel import marginalizedkernel\n", + "from gklearn.kernels.marginalizedKernel import marginalizedkernel\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n", diff --git a/notebooks/run_marginalizedkernel.py b/notebooks/run_marginalizedkernel.py index cd7bf73..998fb6b 100644 --- a/notebooks/run_marginalizedkernel.py +++ b/notebooks/run_marginalizedkernel.py @@ -9,25 +9,25 @@ Created on Fri Sep 28 18:58:47 2018 from libs import * import multiprocessing -from pygraph.kernels.marginalizedKernel import marginalizedkernel +from gklearn.kernels.marginalizedKernel import marginalizedkernel dslist = [ - {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', - 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, - # contains single node graph, node symb - {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', - 'task': 'regression'}, # node symb - {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb - {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled - {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb - {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, - # node nsymb - {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, - # node symb/nsymb -# {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb -# {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb -# {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb -# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb +# {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', +# 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, +# # contains single node graph, node symb +# {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', +# 'task': 'regression'}, # node symb +# {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb +# {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled +# {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb +# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, +# # node nsymb +# {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, +# # node symb/nsymb + {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb + {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb + {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb + {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb # {'name': 'monoterpenoides', 'dataset': '../datasets/monoterpenoides/dataset_10+.ds'}, # node/edge symb # # {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'}, diff --git a/notebooks/run_randomwalkkernel.ipynb b/notebooks/run_randomwalkkernel.ipynb index c30c0ff..a5652da 100644 --- a/notebooks/run_randomwalkkernel.ipynb +++ b/notebooks/run_randomwalkkernel.ipynb @@ -30,7 +30,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/randomWalkKernel.py:108: UserWarning: All labels are ignored.\n", + "../gklearn/kernels/randomWalkKernel.py:108: UserWarning: All labels are ignored.\n", " warnings.warn('All labels are ignored.')\n" ] }, @@ -209,8 +209,8 @@ "from libs import *\n", "import multiprocessing\n", "\n", - "from pygraph.kernels.randomWalkKernel import randomwalkkernel\n", - "from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", + "from gklearn.kernels.randomWalkKernel import randomwalkkernel\n", + "from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", "\n", "import numpy as np\n", "\n", diff --git a/notebooks/run_randomwalkkernel.py b/notebooks/run_randomwalkkernel.py index a0feef7..f6e85f3 100644 --- a/notebooks/run_randomwalkkernel.py +++ b/notebooks/run_randomwalkkernel.py @@ -10,25 +10,25 @@ import functools from libs import * import multiprocessing -from pygraph.kernels.randomWalkKernel import randomwalkkernel -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.kernels.randomWalkKernel import randomwalkkernel +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct import numpy as np dslist = [ - {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', - 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, - # contains single node graph, node symb - {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', - 'task': 'regression'}, # node symb - {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb - {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled - {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb - {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, - # node nsymb - {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, - # node symb/nsymb +# {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', +# 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, +# # contains single node graph, node symb +# {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', +# 'task': 'regression'}, # node symb +# {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb +# {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled +# {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb +# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, +# # node nsymb +# {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, +# # node symb/nsymb {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb @@ -79,8 +79,8 @@ sub_kernels = [{'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}] for ds in dslist: print() print(ds['name']) -# for compute_method in ['sylvester', 'conjugate', 'fp', 'spectral']: - for compute_method in ['conjugate', 'fp']: + for compute_method in ['sylvester', 'conjugate', 'fp', 'spectral']: +# for compute_method in ['conjugate', 'fp']: if compute_method == 'sylvester': param_grid_precomputed = {'compute_method': ['sylvester'], # 'weight': np.linspace(0.01, 0.10, 10)} diff --git a/notebooks/run_spkernel.ipynb b/notebooks/run_spkernel.ipynb index b0d0eb2..b7fa0f4 100644 --- a/notebooks/run_spkernel.ipynb +++ b/notebooks/run_spkernel.ipynb @@ -162,9 +162,9 @@ "from libs import *\n", "import multiprocessing\n", "\n", - "from pygraph.kernels.spKernel import spkernel\n", - "from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", - "#from pygraph.utils.model_selection_precomputed import trial_do\n", + "from gklearn.kernels.spKernel import spkernel\n", + "from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", + "#from gklearn.utils.model_selection_precomputed import trial_do\n", "\n", "# datasets\n", "dslist = [\n", diff --git a/notebooks/run_spkernel.py b/notebooks/run_spkernel.py index 0698d2a..1822a1a 100644 --- a/notebooks/run_spkernel.py +++ b/notebooks/run_spkernel.py @@ -2,28 +2,28 @@ import functools from libs import * import multiprocessing -from pygraph.kernels.spKernel import spkernel -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct -#from pygraph.utils.model_selection_precomputed import trial_do +from gklearn.kernels.spKernel import spkernel +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct +#from gklearn.utils.model_selection_precomputed import trial_do # datasets dslist = [ - {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', - 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, - # contains single node graph, node symb - {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', - 'task': 'regression'}, # node symb - {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb - {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled - {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb - {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, - # node nsymb - {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, - # node symb/nsymb - {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb -# {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb -# {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb -# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb +# {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', +# 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, +# # contains single node graph, node symb +# {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', +# 'task': 'regression'}, # node symb +# {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb +# {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled +# {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb +# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, +# # node nsymb +# {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, +# # node symb/nsymb +# {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb + {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb + {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb + {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb # {'name': 'monoterpenoides', 'dataset': '../datasets/monoterpenoides/dataset_10+.ds'}, # node/edge # {'name': 'Letter-high', 'dataset': '../datasets/Letter-high/Letter-high_A.txt'}, # # node nsymb symb diff --git a/notebooks/run_structuralspkernel.ipynb b/notebooks/run_structuralspkernel.ipynb index c34fb1f..79dcd0d 100644 --- a/notebooks/run_structuralspkernel.ipynb +++ b/notebooks/run_structuralspkernel.ipynb @@ -117,8 +117,8 @@ "from libs import *\n", "import multiprocessing\n", "\n", - "from pygraph.kernels.structuralspKernel import structuralspkernel\n", - "from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", + "from gklearn.kernels.structuralspKernel import structuralspkernel\n", + "from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n", diff --git a/notebooks/run_structuralspkernel.py b/notebooks/run_structuralspkernel.py index 223d832..612cfc4 100644 --- a/notebooks/run_structuralspkernel.py +++ b/notebooks/run_structuralspkernel.py @@ -10,26 +10,26 @@ import functools from libs import * import multiprocessing -from pygraph.kernels.structuralspKernel import structuralspkernel -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.kernels.structuralspKernel import structuralspkernel +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct dslist = [ - {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', - 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, - # contains single node graph, node symb - {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', - 'task': 'regression'}, # node symb - {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb - {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled - {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb - {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, - # node nsymb - {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb - {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb - {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb +# {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', +# 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, +# # contains single node graph, node symb +# {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', +# 'task': 'regression'}, # node symb +# {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb +# {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled +# {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb +# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, +# # node nsymb +# {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb +# {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb +# {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb # {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, # # node symb/nsymb -# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb + {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb # {'name': 'Letter-high', 'dataset': '../datasets/Letter-high/Letter-high_A.txt'}, # # node nsymb symb # diff --git a/notebooks/run_treeletkernel.ipynb b/notebooks/run_treeletkernel.ipynb index 413a627..f9e3a5f 100644 --- a/notebooks/run_treeletkernel.ipynb +++ b/notebooks/run_treeletkernel.ipynb @@ -93,8 +93,8 @@ "from libs import *\n", "import multiprocessing\n", "\n", - "from pygraph.kernels.treeletKernel import treeletkernel\n", - "from pygraph.utils.kernels import gaussiankernel, polynomialkernel\n", + "from gklearn.kernels.treeletKernel import treeletkernel\n", + "from gklearn.utils.kernels import gaussiankernel, polynomialkernel\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n", diff --git a/notebooks/run_treeletkernel.py b/notebooks/run_treeletkernel.py index b4631fc..544f28b 100644 --- a/notebooks/run_treeletkernel.py +++ b/notebooks/run_treeletkernel.py @@ -10,27 +10,27 @@ from libs import * import multiprocessing import functools -from pygraph.kernels.treeletKernel import treeletkernel -from pygraph.utils.kernels import gaussiankernel, polynomialkernel +from gklearn.kernels.treeletKernel import treeletkernel +from gklearn.utils.kernels import gaussiankernel, polynomialkernel dslist = [ - {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', - 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, - # contains single node graph, node symb - {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', - 'task': 'regression'}, # node symb - {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb - {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled - {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb - {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb - {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb - {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb - {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, +# {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', +# 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, +# # contains single node graph, node symb +# {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', +# 'task': 'regression'}, # node symb +# {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb +# {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled +# {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb +# {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb +# {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb +# {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb +# {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, # {'name': 'monoterpenoides', 'dataset': '../datasets/monoterpenoides/dataset_10+.ds'}, # node/edge symb # node symb/nsymb -# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb -# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, -# # node nsymb + {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb + {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, + # node nsymb # # {'name': 'Mutagenicity', 'dataset': '../datasets/Mutagenicity/Mutagenicity_A.txt'}, # # node/edge symb diff --git a/notebooks/run_untilhpathkernel.ipynb b/notebooks/run_untilhpathkernel.ipynb index 265c3de..1cce3fc 100644 --- a/notebooks/run_untilhpathkernel.ipynb +++ b/notebooks/run_untilhpathkernel.ipynb @@ -252,8 +252,8 @@ "from libs import *\n", "import multiprocessing\n", "\n", - "from pygraph.kernels.untilHPathKernel import untilhpathkernel\n", - "from pygraph.utils.kernels import deltakernel, kernelproduct\n", + "from gklearn.kernels.untilHPathKernel import untilhpathkernel\n", + "from gklearn.utils.kernels import deltakernel, kernelproduct\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds',\n", @@ -338,7 +338,7 @@ "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_line_magic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'load_ext'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'line_profiler'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minsert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"../\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mpygraph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mkernel_train_test\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_line_magic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'load_ext'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'line_profiler'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minsert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"../\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mgklearn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mkernel_train_test\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py\u001b[0m in \u001b[0;36mrun_line_magic\u001b[0;34m(self, magic_name, line, _stack_depth)\u001b[0m\n\u001b[1;32m 2283\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'local_ns'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_getframe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstack_depth\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mf_locals\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2284\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuiltin_trap\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2285\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2286\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2287\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m\u001b[0m in \u001b[0;36mload_ext\u001b[0;34m(self, module_str)\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.6/dist-packages/IPython/core/magic.py\u001b[0m in \u001b[0;36m\u001b[0;34m(f, *a, **k)\u001b[0m\n\u001b[1;32m 185\u001b[0m \u001b[0;31m# but it's overkill for just that one bit of state.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 186\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mmagic_deco\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 187\u001b[0;31m \u001b[0mcall\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mlambda\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 188\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 189\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcallable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", @@ -357,8 +357,8 @@ "\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.untildPathKernel import untildpathkernel\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.untildPathKernel import untildpathkernel\n", "\n", "import numpy as np\n", "\n", diff --git a/notebooks/run_untilhpathkernel.py b/notebooks/run_untilhpathkernel.py index 3127ea5..00a4a40 100644 --- a/notebooks/run_untilhpathkernel.py +++ b/notebooks/run_untilhpathkernel.py @@ -9,7 +9,7 @@ Created on Fri Oct 5 19:19:33 2018 from libs import * import multiprocessing -from pygraph.kernels.untilHPathKernel import untilhpathkernel +from gklearn.kernels.untilHPathKernel import untilhpathkernel dslist = [ {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', diff --git a/notebooks/run_weisfeilerlehmankernel.ipynb b/notebooks/run_weisfeilerlehmankernel.ipynb index 6b3f515..785f163 100644 --- a/notebooks/run_weisfeilerlehmankernel.ipynb +++ b/notebooks/run_weisfeilerlehmankernel.ipynb @@ -45,8 +45,8 @@ "from libs import *\n", "import multiprocessing\n", "\n", - "from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", - "from pygraph.utils.kernels import gaussiankernel, polynomialkernel\n", + "from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", + "from gklearn.utils.kernels import gaussiankernel, polynomialkernel\n", "\n", "\n", "dslist = [\n", diff --git a/notebooks/run_weisfeilerlehmankernel.py b/notebooks/run_weisfeilerlehmankernel.py index ed03adc..d84fa15 100644 --- a/notebooks/run_weisfeilerlehmankernel.py +++ b/notebooks/run_weisfeilerlehmankernel.py @@ -9,27 +9,27 @@ Created on Mon Mar 21 11:19:33 2019 from libs import * import multiprocessing -from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel +from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel dslist = [ - {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', - 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, - # contains single node graph, node symb - {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', - 'task': 'regression'}, # node symb - {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb - {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled - {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb -# {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, -# # node nsymb - {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb - {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, +# {'name': 'Alkane', 'dataset': '../datasets/Alkane/dataset.ds', 'task': 'regression', +# 'dataset_y': '../datasets/Alkane/dataset_boiling_point_names.txt'}, +# # contains single node graph, node symb +# {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', +# 'task': 'regression'}, # node symb +# {'name': 'MAO', 'dataset': '../datasets/MAO/dataset.ds'}, # node/edge symb +# {'name': 'PAH', 'dataset': '../datasets/PAH/dataset.ds'}, # unlabeled +# {'name': 'MUTAG', 'dataset': '../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb + {'name': 'Letter-med', 'dataset': '../datasets/Letter-med/Letter-med_A.txt'}, + # node nsymb +# {'name': 'AIDS', 'dataset': '../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb +# {'name': 'ENZYMES', 'dataset': '../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, # node symb/nsymb - {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb - {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb - {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb +# {'name': 'NCI1', 'dataset': '../datasets/NCI1/NCI1_A.txt'}, # node symb +# {'name': 'NCI109', 'dataset': '../datasets/NCI109/NCI109_A.txt'}, # node symb +# {'name': 'D&D', 'dataset': '../datasets/DD/DD_A.txt'}, # node symb # {'name': 'monoterpenoides', 'dataset': '../datasets/monoterpenoides/dataset_10+.ds'}, # node/edge symb # diff --git a/notebooks/tests/memory_profile.ipynb b/notebooks/tests/memory_profile.ipynb index 1dd818f..cbf0c6f 100644 --- a/notebooks/tests/memory_profile.ipynb +++ b/notebooks/tests/memory_profile.ipynb @@ -68,7 +68,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Filename: ../../pygraph/utils/model_selection_precomputed.py\n", + "Filename: ../../gklearn/utils/model_selection_precomputed.py\n", "\n", "Line # Mem usage Increment Line Contents\n", "================================================\n", @@ -110,8 +110,8 @@ " 59 >>> import numpy as np\n", " 60 >>> import sys\n", " 61 >>> sys.path.insert(0, \"../\")\n", - " 62 >>> from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - " 63 >>> from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", + " 62 >>> from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + " 63 >>> from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", " 64 >>>\n", " 65 >>> datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", " 66 >>> estimator = weisfeilerlehmankernel\n", @@ -720,9 +720,9 @@ "from libs import *\n", "import multiprocessing\n", "\n", - "from pygraph.kernels.spKernel import spkernel\n", - "from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", - "#from pygraph.utils.model_selection_precomputed import trial_do\n", + "from gklearn.kernels.spKernel import spkernel\n", + "from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", + "#from gklearn.utils.model_selection_precomputed import trial_do\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../../datasets/acyclic/dataset_bps.ds',\n", diff --git a/notebooks/tests/test_lib.ipynb b/notebooks/tests/test_lib.ipynb index 2a25a1d..2903812 100644 --- a/notebooks/tests/test_lib.ipynb +++ b/notebooks/tests/test_lib.ipynb @@ -17,7 +17,7 @@ "\n", "import sys\n", "sys.path.insert(0, \"../../\")\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "# from ged.GED import ged\n", "# from ged.costfunctions import RiesenCostFunction, BasicCostFunction\n", "# from ged.bipartiteGED import computeBipartiteCostMatrix, getOptimalMapping" diff --git a/notebooks/tests/test_modelselection.ipynb b/notebooks/tests/test_modelselection.ipynb index fcc341d..a3e656a 100644 --- a/notebooks/tests/test_modelselection.ipynb +++ b/notebooks/tests/test_modelselection.ipynb @@ -66,9 +66,9 @@ "import sys\n", "import functools\n", "sys.path.insert(0, \"../../\")\n", - "from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - "from pygraph.kernels.spKernel import spkernel\n", - "from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", + "from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + "from gklearn.kernels.spKernel import spkernel\n", + "from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct\n", "\n", "datafile = '../../datasets/acyclic/dataset_bps.ds'\n", "estimator = spkernel\n", diff --git a/notebooks/tests/test_parallel_chunksize.py b/notebooks/tests/test_parallel_chunksize.py index d7ef8a1..828d5c3 100644 --- a/notebooks/tests/test_parallel_chunksize.py +++ b/notebooks/tests/test_parallel_chunksize.py @@ -23,10 +23,10 @@ from sklearn.model_selection import ParameterGrid sys.path.insert(0, "../") sys.path.insert(0, "../../") from libs import * -from pygraph.utils.utils import getSPGraph, direct_product -from pygraph.utils.graphdataset import get_dataset_attributes -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.utils.utils import getSPGraph, direct_product +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct def spkernel(*args, diff --git a/notebooks/tests/test_parallel_chunksize_2.py b/notebooks/tests/test_parallel_chunksize_2.py new file mode 100644 index 0000000..6f790b7 --- /dev/null +++ b/notebooks/tests/test_parallel_chunksize_2.py @@ -0,0 +1,690 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Test of parallel, find the best parallel chunksize and iteration seperation scheme. +Created on Wed Sep 26 12:09:34 2018 + +@author: ljia +""" + +import sys +import time +from itertools import combinations_with_replacement, product, combinations +from functools import partial +from multiprocessing import Pool +from tqdm import tqdm +import networkx as nx +import numpy as np +import functools +#import multiprocessing +from matplotlib import pyplot as plt +from sklearn.model_selection import ParameterGrid + +sys.path.insert(0, "../") +sys.path.insert(0, "../../") +from libs import * +from gklearn.utils.utils import getSPGraph, direct_product +from gklearn.utils.graphdataset import get_dataset_attributes +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct + + +def spkernel(*args, + node_label='atom', + edge_weight=None, + node_kernels=None, + n_jobs=None, + chunksize=1): + """Calculate shortest-path kernels between graphs. + """ + # pre-process + Gn = args[0] if len(args) == 1 else [args[0], args[1]] + Gn = [g.copy() for g in Gn] + weight = None + if edge_weight is None: + print('\n None edge weight specified. Set all weight to 1.\n') + else: + try: + some_weight = list( + nx.get_edge_attributes(Gn[0], edge_weight).values())[0] + if isinstance(some_weight, (float, int)): + weight = edge_weight + else: + print( + '\n Edge weight with name %s is not float or integer. Set all weight to 1.\n' + % edge_weight) + except: + print( + '\n Edge weight with name "%s" is not found in the edge attributes. Set all weight to 1.\n' + % edge_weight) + ds_attrs = get_dataset_attributes( + Gn, + attr_names=['node_labeled', 'node_attr_dim', 'is_directed'], + node_label=node_label) + + # remove graphs with no edges, as no sp can be found in their structures, + # so the kernel between such a graph and itself will be zero. + len_gn = len(Gn) + Gn = [(idx, G) for idx, G in enumerate(Gn) if nx.number_of_edges(G) != 0] + idx = [G[0] for G in Gn] + Gn = [G[1] for G in Gn] + if len(Gn) != len_gn: + print('\n %d graphs are removed as they don\'t contain edges.\n' % + (len_gn - len(Gn))) + + start_time = time.time() + + pool = Pool(n_jobs) + # get shortest path graphs of Gn + getsp_partial = partial(wrapper_getSPGraph, weight) + itr = zip(Gn, range(0, len(Gn))) + for i, g in tqdm( + pool.imap_unordered(getsp_partial, itr, chunksize), + desc='getting sp graphs', file=sys.stdout): + Gn[i] = g + pool.close() + pool.join() + + Kmatrix = np.zeros((len(Gn), len(Gn))) + + # ---- use pool.imap_unordered to parallel and track progress. ---- + def init_worker(gn_toshare): + global G_gn + G_gn = gn_toshare + do_partial = partial(wrapper_sp_do, ds_attrs, node_label, node_kernels) + itr = combinations_with_replacement(range(0, len(Gn)), 2) + with Pool(processes=n_jobs, initializer=init_worker, initargs=(Gn,)) as pool: + for i, j, kernel in tqdm(pool.imap_unordered(do_partial, itr, chunksize), + desc='calculating kernels', file=sys.stdout): + Kmatrix[i][j] = kernel + Kmatrix[j][i] = kernel + +# # ---- direct running, normally use single CPU core. ---- +# itr = combinations_with_replacement(range(0, len(Gn)), 2) +# for i, j in tqdm(itr, desc='calculating kernels', file=sys.stdout): +# kernel = spkernel_do(Gn[i], Gn[j], ds_attrs, node_label, node_kernels) +# Kmatrix[i][j] = kernel +# Kmatrix[j][i] = kernel + + run_time = time.time() - start_time + print( + "\n --- shortest path kernel matrix of size %d built in %s seconds ---" + % (len(Gn), run_time)) + + return Kmatrix, run_time, idx + + +def spkernel_do(g1, g2, ds_attrs, node_label, node_kernels): + + kernel = 0 + + # compute shortest path matrices first, method borrowed from FCSP. + vk_dict = {} # shortest path matrices dict + if ds_attrs['node_labeled']: + # node symb and non-synb labeled + if ds_attrs['node_attr_dim'] > 0: + kn = node_kernels['mix'] + for n1, n2 in product( + g1.nodes(data=True), g2.nodes(data=True)): + vk_dict[(n1[0], n2[0])] = kn( + n1[1][node_label], n2[1][node_label], + n1[1]['attributes'], n2[1]['attributes']) + # node symb labeled + else: + kn = node_kernels['symb'] + for n1 in g1.nodes(data=True): + for n2 in g2.nodes(data=True): + vk_dict[(n1[0], n2[0])] = kn(n1[1][node_label], + n2[1][node_label]) + else: + # node non-synb labeled + if ds_attrs['node_attr_dim'] > 0: + kn = node_kernels['nsymb'] + for n1 in g1.nodes(data=True): + for n2 in g2.nodes(data=True): + vk_dict[(n1[0], n2[0])] = kn(n1[1]['attributes'], + n2[1]['attributes']) + # node unlabeled + else: + for e1, e2 in product( + g1.edges(data=True), g2.edges(data=True)): + if e1[2]['cost'] == e2[2]['cost']: + kernel += 1 + return kernel + + # compute graph kernels + if ds_attrs['is_directed']: + for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)): + if e1[2]['cost'] == e2[2]['cost']: + nk11, nk22 = vk_dict[(e1[0], e2[0])], vk_dict[(e1[1], + e2[1])] + kn1 = nk11 * nk22 + kernel += kn1 + else: + for e1, e2 in product(g1.edges(data=True), g2.edges(data=True)): + if e1[2]['cost'] == e2[2]['cost']: + # each edge walk is counted twice, starting from both its extreme nodes. + nk11, nk12, nk21, nk22 = vk_dict[(e1[0], e2[0])], vk_dict[( + e1[0], e2[1])], vk_dict[(e1[1], + e2[0])], vk_dict[(e1[1], + e2[1])] + kn1 = nk11 * nk22 + kn2 = nk12 * nk21 + kernel += kn1 + kn2 + + return kernel + + +def wrapper_sp_do(ds_attrs, node_label, node_kernels, itr): + i = itr[0] + j = itr[1] + return i, j, spkernel_do(G_gn[i], G_gn[j], ds_attrs, node_label, node_kernels) + + +def wrapper_getSPGraph(weight, itr_item): + g = itr_item[0] + i = itr_item[1] + return i, getSPGraph(g, edge_weight=weight) + + + +# +# +#def commonwalkkernel(*args, +# node_label='atom', +# edge_label='bond_type', +# n=None, +# weight=1, +# compute_method=None, +# n_jobs=None, +# chunksize=1): +# """Calculate common walk graph kernels between graphs. +# """ +# compute_method = compute_method.lower() +# # arrange all graphs in a list +# Gn = args[0] if len(args) == 1 else [args[0], args[1]] +# Kmatrix = np.zeros((len(Gn), len(Gn))) +# ds_attrs = get_dataset_attributes( +# Gn, +# attr_names=['node_labeled', 'edge_labeled', 'is_directed'], +# node_label=node_label, +# edge_label=edge_label) +# if not ds_attrs['node_labeled']: +# for G in Gn: +# nx.set_node_attributes(G, '0', 'atom') +# if not ds_attrs['edge_labeled']: +# for G in Gn: +# nx.set_edge_attributes(G, '0', 'bond_type') +# if not ds_attrs['is_directed']: # convert +# Gn = [G.to_directed() for G in Gn] +# +# start_time = time.time() +# +# # ---- use pool.imap_unordered to parallel and track progress. ---- +# pool = Pool(n_jobs) +# itr = combinations_with_replacement(range(0, len(Gn)), 2) +## len_itr = int(len(Gn) * (len(Gn) + 1) / 2) +## if len_itr < 100: +## chunksize, extra = divmod(len_itr, n_jobs * 4) +## if extra: +## chunksize += 1 +## else: +## chunksize = 100 +# +# # direct product graph method - exponential +# if compute_method == 'exp': +# do_partial = partial(_commonwalkkernel_exp, Gn, node_label, edge_label, +# weight) +# # direct product graph method - geometric +# elif compute_method == 'geo': +# do_partial = partial(_commonwalkkernel_geo, Gn, node_label, edge_label, +# weight) +# +# for i, j, kernel in tqdm( +# pool.imap_unordered(do_partial, itr, chunksize), +# desc='calculating kernels', +# file=sys.stdout): +# Kmatrix[i][j] = kernel +# Kmatrix[j][i] = kernel +# pool.close() +# pool.join() +# +# run_time = time.time() - start_time +# print( +# "\n --- kernel matrix of common walk kernel of size %d built in %s seconds ---" +# % (len(Gn), run_time)) +# +# return Kmatrix, run_time +# +# +#def _commonwalkkernel_exp(Gn, node_label, edge_label, beta, ij): +# """Calculate walk graph kernels up to n between 2 graphs using exponential +# series. +# """ +# i = ij[0] +# j = ij[1] +# g1 = Gn[i] +# g2 = Gn[j] +# +# # get tensor product / direct product +# gp = direct_product(g1, g2, node_label, edge_label) +# A = nx.adjacency_matrix(gp).todense() +# +# ew, ev = np.linalg.eig(A) +# D = np.zeros((len(ew), len(ew))) +# for i in range(len(ew)): +# D[i][i] = np.exp(beta * ew[i]) +# exp_D = ev * D * ev.T +# +# return i, j, exp_D.sum() +# +# +#def _commonwalkkernel_geo(Gn, node_label, edge_label, gamma, ij): +# """Calculate common walk graph kernels up to n between 2 graphs using +# geometric series. +# """ +# i = ij[0] +# j = ij[1] +# g1 = Gn[i] +# g2 = Gn[j] +# +# # get tensor product / direct product +# gp = direct_product(g1, g2, node_label, edge_label) +# A = nx.adjacency_matrix(gp).todense() +# mat = np.identity(len(A)) - gamma * A +# try: +# return i, j, mat.I.sum() +# except np.linalg.LinAlgError: +# return i, j, np.nan + + +#def structuralspkernel(*args, +# node_label='atom', +# edge_weight=None, +# edge_label='bond_type', +# node_kernels=None, +# edge_kernels=None, +# n_jobs=None, +# chunksize=1): +# """Calculate mean average structural shortest path kernels between graphs. +# """ +# # pre-process +# Gn = args[0] if len(args) == 1 else [args[0], args[1]] +# +# weight = None +# if edge_weight is None: +# print('\n None edge weight specified. Set all weight to 1.\n') +# else: +# try: +# some_weight = list( +# nx.get_edge_attributes(Gn[0], edge_weight).values())[0] +# if isinstance(some_weight, (float, int)): +# weight = edge_weight +# else: +# print( +# '\n Edge weight with name %s is not float or integer. Set all weight to 1.\n' +# % edge_weight) +# except: +# print( +# '\n Edge weight with name "%s" is not found in the edge attributes. Set all weight to 1.\n' +# % edge_weight) +# ds_attrs = get_dataset_attributes( +# Gn, +# attr_names=['node_labeled', 'node_attr_dim', 'edge_labeled', +# 'edge_attr_dim', 'is_directed'], +# node_label=node_label, edge_label=edge_label) +# +# start_time = time.time() +# +# # get shortest paths of each graph in Gn +# splist = [[] for _ in range(len(Gn))] +# pool = Pool(n_jobs) +# # get shortest path graphs of Gn +# getsp_partial = partial(wrap_getSP, Gn, weight, ds_attrs['is_directed']) +## if len(Gn) < 100: +## # use default chunksize as pool.map when iterable is less than 100 +## chunksize, extra = divmod(len(Gn), n_jobs * 4) +## if extra: +## chunksize += 1 +## else: +## chunksize = 100 +# # chunksize = 300 # int(len(list(itr)) / n_jobs) +# for i, sp in tqdm( +# pool.imap_unordered(getsp_partial, range(0, len(Gn)), chunksize), +# desc='getting shortest paths', +# file=sys.stdout): +# splist[i] = sp +# +# Kmatrix = np.zeros((len(Gn), len(Gn))) +# +# # ---- use pool.imap_unordered to parallel and track progress. ---- +# do_partial = partial(structuralspkernel_do, Gn, splist, ds_attrs, +# node_label, edge_label, node_kernels, edge_kernels) +# itr = combinations_with_replacement(range(0, len(Gn)), 2) +## len_itr = int(len(Gn) * (len(Gn) + 1) / 2) +## if len_itr < 100: +## chunksize, extra = divmod(len_itr, n_jobs * 4) +## if extra: +## chunksize += 1 +## else: +## chunksize = 100 +# for i, j, kernel in tqdm( +# pool.imap_unordered(do_partial, itr, chunksize), +# desc='calculating kernels', +# file=sys.stdout): +# Kmatrix[i][j] = kernel +# Kmatrix[j][i] = kernel +# pool.close() +# pool.join() +# +# run_time = time.time() - start_time +# print( +# "\n --- shortest path kernel matrix of size %d built in %s seconds ---" +# % (len(Gn), run_time)) +# +# return Kmatrix, run_time +# +# +#def structuralspkernel_do(Gn, splist, ds_attrs, node_label, edge_label, +# node_kernels, edge_kernels, ij): +# +# iglobal = ij[0] +# jglobal = ij[1] +# g1 = Gn[iglobal] +# g2 = Gn[jglobal] +# spl1 = splist[iglobal] +# spl2 = splist[jglobal] +# kernel = 0 +# +# try: +# # First, compute shortest path matrices, method borrowed from FCSP. +# if ds_attrs['node_labeled']: +# # node symb and non-synb labeled +# if ds_attrs['node_attr_dim'] > 0: +# kn = node_kernels['mix'] +# vk_dict = {} # shortest path matrices dict +# for n1, n2 in product( +# g1.nodes(data=True), g2.nodes(data=True)): +# vk_dict[(n1[0], n2[0])] = kn( +# n1[1][node_label], n2[1][node_label], +# [n1[1]['attributes']], [n2[1]['attributes']]) +# # node symb labeled +# else: +# kn = node_kernels['symb'] +# vk_dict = {} # shortest path matrices dict +# for n1 in g1.nodes(data=True): +# for n2 in g2.nodes(data=True): +# vk_dict[(n1[0], n2[0])] = kn(n1[1][node_label], +# n2[1][node_label]) +# else: +# # node non-synb labeled +# if ds_attrs['node_attr_dim'] > 0: +# kn = node_kernels['nsymb'] +# vk_dict = {} # shortest path matrices dict +# for n1 in g1.nodes(data=True): +# for n2 in g2.nodes(data=True): +# vk_dict[(n1[0], n2[0])] = kn([n1[1]['attributes']], +# [n2[1]['attributes']]) +# # node unlabeled +# else: +# vk_dict = {} +# +# # Then, compute kernels between all pairs of edges, which idea is an +# # extension of FCSP. It suits sparse graphs, which is the most case we +# # went though. For dense graphs, it would be slow. +# if ds_attrs['edge_labeled']: +# # edge symb and non-synb labeled +# if ds_attrs['edge_attr_dim'] > 0: +# ke = edge_kernels['mix'] +# ek_dict = {} # dict of edge kernels +# for e1, e2 in product( +# g1.edges(data=True), g2.edges(data=True)): +# ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ke( +# e1[2][edge_label], e2[2][edge_label], +# [e1[2]['attributes']], [e2[2]['attributes']]) +# # edge symb labeled +# else: +# ke = edge_kernels['symb'] +# ek_dict = {} +# for e1 in g1.edges(data=True): +# for e2 in g2.edges(data=True): +# ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = ke( +# e1[2][edge_label], e2[2][edge_label]) +# else: +# # edge non-synb labeled +# if ds_attrs['edge_attr_dim'] > 0: +# ke = edge_kernels['nsymb'] +# ek_dict = {} +# for e1 in g1.edges(data=True): +# for e2 in g2.edges(data=True): +# ek_dict[((e1[0], e1[1]), (e2[0], e2[1]))] = kn( +# [e1[2]['attributes']], [e2[2]['attributes']]) +# # edge unlabeled +# else: +# ek_dict = {} +# +# # compute graph kernels +# if vk_dict: +# if ek_dict: +# for p1, p2 in product(spl1, spl2): +# if len(p1) == len(p2): +# kpath = vk_dict[(p1[0], p2[0])] +# if kpath: +# for idx in range(1, len(p1)): +# kpath *= vk_dict[(p1[idx], p2[idx])] * \ +# ek_dict[((p1[idx-1], p1[idx]), +# (p2[idx-1], p2[idx]))] +# if not kpath: +# break +# kernel += kpath # add up kernels of all paths +# else: +# for p1, p2 in product(spl1, spl2): +# if len(p1) == len(p2): +# kpath = vk_dict[(p1[0], p2[0])] +# if kpath: +# for idx in range(1, len(p1)): +# kpath *= vk_dict[(p1[idx], p2[idx])] +# if not kpath: +# break +# kernel += kpath # add up kernels of all paths +# else: +# if ek_dict: +# for p1, p2 in product(spl1, spl2): +# if len(p1) == len(p2): +# if len(p1) == 0: +# kernel += 1 +# else: +# kpath = 1 +# for idx in range(0, len(p1) - 1): +# kpath *= ek_dict[((p1[idx], p1[idx+1]), +# (p2[idx], p2[idx+1]))] +# if not kpath: +# break +# kernel += kpath # add up kernels of all paths +# else: +# for p1, p2 in product(spl1, spl2): +# if len(p1) == len(p2): +# kernel += 1 +# +# kernel = kernel / (len(spl1) * len(spl2)) # calculate mean average +# except KeyError: # missing labels or attributes +# pass +# +# return iglobal, jglobal, kernel +# +# +#def get_shortest_paths(G, weight, directed): +# """Get all shortest paths of a graph. +# """ +# sp = [] +# for n1, n2 in combinations(G.nodes(), 2): +# try: +# sptemp = nx.shortest_path(G, n1, n2, weight=weight) +# sp.append(sptemp) +# # each edge walk is counted twice, starting from both its extreme nodes. +# if not directed: +# sp.append(sptemp[::-1]) +# except nx.NetworkXNoPath: # nodes not connected +# # sp.append([]) +# pass +# # add single nodes as length 0 paths. +# sp += [[n] for n in G.nodes()] +# return sp +# +# +#def wrap_getSP(Gn, weight, directed, i): +# return i, get_shortest_paths(Gn[i], weight, directed) + + +def compute_gram_matrices(datafile, + estimator, + param_grid_precomputed, + datafile_y=None, + extra_params=None, + ds_name='ds-unknown', + n_jobs=1, + chunksize=1): + """ + + Parameters + ---------- + datafile : string + Path of dataset file. + estimator : function + kernel function used to estimate. This function needs to return a gram matrix. + param_grid_precomputed : dictionary + Dictionary with names (string) of parameters used to calculate gram matrices as keys and lists of parameter settings to try as values. This enables searching over any sequence of parameter settings. Params with length 1 will be omitted. + datafile_y : string + Path of file storing y data. This parameter is optional depending on the given dataset file. + """ + tqdm.monitor_interval = 0 + + # Load the dataset + dataset, y_all = loadDataset( + datafile, filename_y=datafile_y, extra_params=extra_params) + + # Grid of parameters with a discrete number of values for each. + param_list_precomputed = list(ParameterGrid(param_grid_precomputed)) + + gram_matrix_time = [ + ] # a list to store time to calculate gram matrices + + # calculate all gram matrices + for idx, params_out in enumerate(param_list_precomputed): + y = y_all[:] + params_out['n_jobs'] = n_jobs + params_out['chunksize'] = chunksize + rtn_data = estimator(dataset[:], **params_out) + Kmatrix = rtn_data[0] + current_run_time = rtn_data[1] + # for some kernels, some graphs in datasets may not meet the + # kernels' requirements for graph structure. These graphs are trimmed. + if len(rtn_data) == 3: + idx_trim = rtn_data[2] # the index of trimmed graph list + y = [y[idx] for idx in idx_trim] # trim y accordingly + + Kmatrix_diag = Kmatrix.diagonal().copy() + # remove graphs whose kernels with themselves are zeros + nb_g_ignore = 0 + for idx, diag in enumerate(Kmatrix_diag): + if diag == 0: + Kmatrix = np.delete(Kmatrix, (idx - nb_g_ignore), axis=0) + Kmatrix = np.delete(Kmatrix, (idx - nb_g_ignore), axis=1) + nb_g_ignore += 1 + # normalization + Kmatrix_diag = Kmatrix.diagonal().copy() + for i in range(len(Kmatrix)): + for j in range(i, len(Kmatrix)): + Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j]) + Kmatrix[j][i] = Kmatrix[i][j] + + gram_matrix_time.append(current_run_time) + + average_gram_matrix_time = np.mean(gram_matrix_time) + + return average_gram_matrix_time + + +dslist = [ + {'name': 'Alkane', 'dataset': '../../datasets/Alkane/dataset.ds', 'task': 'regression', + 'dataset_y': '../../datasets/Alkane/dataset_boiling_point_names.txt'}, + # contains single node graph, node symb + {'name': 'Acyclic', 'dataset': '../../datasets/acyclic/dataset_bps.ds', + 'task': 'regression'}, # node symb + {'name': 'MAO', 'dataset': '../../datasets/MAO/dataset.ds'}, # node/edge symb + {'name': 'PAH', 'dataset': '../../datasets/PAH/dataset.ds'}, # unlabeled + {'name': 'MUTAG', 'dataset': '../../datasets/MUTAG/MUTAG_A.txt'}, # node/edge symb + {'name': 'Letter-med', 'dataset': '../../datasets/Letter-med/Letter-med_A.txt'}, + # node nsymb + {'name': 'ENZYMES', 'dataset': '../../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, + # node symb/nsymb + {'name': 'AIDS', 'dataset': '../../datasets/AIDS/AIDS_A.txt'}, # node symb/nsymb, edge symb +# {'name': 'Mutagenicity', 'dataset': '../../datasets/Mutagenicity/Mutagenicity_A.txt'}, +# {'name': 'D&D', 'dataset': '../../datasets/D&D/DD.mat', +# 'extra_params': {'am_sp_al_nl_el': [0, 1, 2, 1, -1]}}, # node symb +] + +fig, ax = plt.subplots() +ax.set_xscale('log', nonposx='clip') +ax.set_yscale('log', nonposy='clip') +ax.set_xlabel('parallel chunksize') +ax.set_ylabel('runtime($s$)') +ax.set_title('28 cpus') +ax.grid(axis='both') + +estimator = spkernel +if estimator.__name__ == 'spkernel': + mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) + param_grid_precomputed = {'node_kernels': [ + {'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}]} + +elif estimator.__name__ == 'commonwalkkernel': + mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) + param_grid_precomputed = {'compute_method': ['geo'], + 'weight': [1]} +elif estimator.__name__ == 'structuralspkernel': + mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) + param_grid_precomputed = {'node_kernels': + [{'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}], + 'edge_kernels': + [{'symb': deltakernel, 'nsymb': gaussiankernel, 'mix': mixkernel}]} + +#list(range(10, 100, 20)) + +#chunklist = list(range(10, 100, 20)) + list(range(100, 1000, 200)) + \ +# list(range(1000, 10000, 2000)) + list(range(10000, 100000, 20000)) +# chunklist = list(range(300, 1000, 200)) + list(range(1000, 10000, 2000)) + list(range(10000, 100000, 20000)) +chunklist = list(range(10, 100, 10)) + list(range(100, 1000, 100)) + \ + list(range(1000, 10000, 1000)) + list(range(10000, 100000, 10000)) +#chunklist = list(range(1000, 10000, 1000)) +gmtmat = np.zeros((len(dslist), len(chunklist))) +cpus = 28 + +for idx1, ds in enumerate(dslist): + print() + print(ds['name']) + + for idx2, cs in enumerate(chunklist): + print(ds['name'], idx2, cs) + gmtmat[idx1][idx2] = compute_gram_matrices( + ds['dataset'], + estimator, + param_grid_precomputed, + + datafile_y=(ds['dataset_y'] if 'dataset_y' in ds else None), + extra_params=(ds['extra_params'] + if 'extra_params' in ds else None), + ds_name=ds['name'], + n_jobs=cpus, + chunksize=cs) + + print() + print(gmtmat[idx1, :]) + np.save('../test_parallel/' + estimator.__name__ + '.' + ds['name'] + '_' + + str(idx1), gmtmat[idx1, :]) + + p = ax.plot(chunklist, gmtmat[idx1, :], '.-', label=ds['name'], zorder=3) + ax.legend(loc='upper right', ncol=3, labelspacing=0.1, handletextpad=0.4, + columnspacing=0.6) + plt.savefig('../test_parallel/' + estimator.__name__ + str(idx1) + '_' + + str(cpus) + '.eps', format='eps', dpi=300) +# plt.show() \ No newline at end of file diff --git a/notebooks/tests/test_spkernel.ipynb b/notebooks/tests/test_spkernel.ipynb index 1675b90..554bc9e 100644 --- a/notebooks/tests/test_spkernel.ipynb +++ b/notebooks/tests/test_spkernel.ipynb @@ -605,7 +605,7 @@ "import numpy as np\n", "import time\n", "\n", - "from pygraph.utils.utils import getSPGraph\n", + "from gklearn.utils.utils import getSPGraph\n", "\n", "\n", "def spkernel(Gn):\n", diff --git a/notebooks/unfinished/run_cyclicpatternkernel.ipynb b/notebooks/unfinished/run_cyclicpatternkernel.ipynb index 3dc83b5..f0f02e0 100644 --- a/notebooks/unfinished/run_cyclicpatternkernel.ipynb +++ b/notebooks/unfinished/run_cyclicpatternkernel.ipynb @@ -31,7 +31,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/utils/model_selection_precomputed.py:100: RuntimeWarning: invalid value encountered in double_scalars\n", + "../gklearn/utils/model_selection_precomputed.py:100: RuntimeWarning: invalid value encountered in double_scalars\n", " Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j])\n" ] }, @@ -543,8 +543,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - "from pygraph.kernels.cyclicPatternKernel import cyclicpatternkernel\n", + "from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + "from gklearn.kernels.cyclicPatternKernel import cyclicpatternkernel\n", "\n", "datafile = '../../../../datasets/MAO/dataset.ds'\n", "estimator = cyclicpatternkernel\n", @@ -684,10 +684,10 @@ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m kernel_train_test(datafile, kernel_file_path, cyclicpatternkernel, kernel_para, hyper_name = 'cycle_bound', hyper_range = np.linspace(0, 500, 21), normalize = False,\n\u001b[0;32m---> 17\u001b[0;31m model_type = 'classification')\n\u001b[0m", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/utils/utils.py\u001b[0m in \u001b[0;36mkernel_train_test\u001b[0;34m(datafile, kernel_file_path, kernel_func, kernel_para, trials, splits, alpha_grid, C_grid, hyper_name, hyper_range, normalize, datafile_y, model_type)\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhyper_name\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;34m''\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0mkernel_para\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mhyper_name\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhyper_para\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 161\u001b[0;31m \u001b[0mKmatrix\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrun_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkernel_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkernel_para\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 162\u001b[0m \u001b[0mkernel_time_list\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_time\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpyplot\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/cyclicPatternKernel.py\u001b[0m in \u001b[0;36mcyclicpatternkernel\u001b[0;34m(node_label, edge_label, labeled, cycle_bound, *args)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;31m# get all cyclic and tree patterns of all graphs before calculating kernels to save time, but this may consume a lot of memory for large dataset.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m all_patterns = [ get_patterns(Gn[i], node_label = node_label, edge_label = edge_label, labeled = labeled, cycle_bound = cycle_bound)\n\u001b[0;32m---> 50\u001b[0;31m for i in tqdm(range(0, len(Gn)), desc = 'retrieve patterns', file=sys.stdout) ]\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'calculate kernels'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfile\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstdout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/cyclicPatternKernel.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;31m# get all cyclic and tree patterns of all graphs before calculating kernels to save time, but this may consume a lot of memory for large dataset.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m all_patterns = [ get_patterns(Gn[i], node_label = node_label, edge_label = edge_label, labeled = labeled, cycle_bound = cycle_bound)\n\u001b[0;32m---> 50\u001b[0;31m for i in tqdm(range(0, len(Gn)), desc = 'retrieve patterns', file=sys.stdout) ]\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'calculate kernels'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfile\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstdout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/cyclicPatternKernel.py\u001b[0m in \u001b[0;36mget_patterns\u001b[0;34m(G, node_label, edge_label, labeled, cycle_bound)\u001b[0m\n\u001b[1;32m 111\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[0mbicomponents\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbiconnected_component_subgraphs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# all biconnected components of G. this function use algorithm in reference [2], which (i guess) is slightly different from the one used in paper [1]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 113\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0msubgraph\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mbicomponents\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 114\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumber_of_edges\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msubgraph\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0msimple_cycles\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msimple_cycles\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_directed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# all simple cycles in biconnected components. this function use algorithm in reference [3], which has time complexity O((n+e)(N+1)) for n nodes, e edges and N simple cycles. Which might be slower than the algorithm applied in paper [1]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/utils/utils.py\u001b[0m in \u001b[0;36mkernel_train_test\u001b[0;34m(datafile, kernel_file_path, kernel_func, kernel_para, trials, splits, alpha_grid, C_grid, hyper_name, hyper_range, normalize, datafile_y, model_type)\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhyper_name\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;34m''\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0mkernel_para\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mhyper_name\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhyper_para\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 161\u001b[0;31m \u001b[0mKmatrix\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrun_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkernel_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkernel_para\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 162\u001b[0m \u001b[0mkernel_time_list\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_time\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpyplot\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/kernels/cyclicPatternKernel.py\u001b[0m in \u001b[0;36mcyclicpatternkernel\u001b[0;34m(node_label, edge_label, labeled, cycle_bound, *args)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;31m# get all cyclic and tree patterns of all graphs before calculating kernels to save time, but this may consume a lot of memory for large dataset.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m all_patterns = [ get_patterns(Gn[i], node_label = node_label, edge_label = edge_label, labeled = labeled, cycle_bound = cycle_bound)\n\u001b[0;32m---> 50\u001b[0;31m for i in tqdm(range(0, len(Gn)), desc = 'retrieve patterns', file=sys.stdout) ]\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'calculate kernels'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfile\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstdout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/kernels/cyclicPatternKernel.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;31m# get all cyclic and tree patterns of all graphs before calculating kernels to save time, but this may consume a lot of memory for large dataset.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m all_patterns = [ get_patterns(Gn[i], node_label = node_label, edge_label = edge_label, labeled = labeled, cycle_bound = cycle_bound)\n\u001b[0;32m---> 50\u001b[0;31m for i in tqdm(range(0, len(Gn)), desc = 'retrieve patterns', file=sys.stdout) ]\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'calculate kernels'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfile\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstdout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/kernels/cyclicPatternKernel.py\u001b[0m in \u001b[0;36mget_patterns\u001b[0;34m(G, node_label, edge_label, labeled, cycle_bound)\u001b[0m\n\u001b[1;32m 111\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[0mbicomponents\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbiconnected_component_subgraphs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# all biconnected components of G. this function use algorithm in reference [2], which (i guess) is slightly different from the one used in paper [1]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 113\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0msubgraph\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mbicomponents\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 114\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumber_of_edges\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msubgraph\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0msimple_cycles\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msimple_cycles\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_directed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# all simple cycles in biconnected components. this function use algorithm in reference [3], which has time complexity O((n+e)(N+1)) for n nodes, e edges and N simple cycles. Which might be slower than the algorithm applied in paper [1]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/networkx/algorithms/components/biconnected.py\u001b[0m in \u001b[0;36mbiconnected_component_subgraphs\u001b[0;34m(G, copy)\u001b[0m\n\u001b[1;32m 275\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mc\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mbiconnected_components\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 276\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcopy\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 277\u001b[0;31m \u001b[0;32myield\u001b[0m \u001b[0mG\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msubgraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 278\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 279\u001b[0m \u001b[0;32myield\u001b[0m \u001b[0mG\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msubgraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/networkx/classes/graph.py\u001b[0m in \u001b[0;36mcopy\u001b[0;34m(self, as_view)\u001b[0m\n\u001b[1;32m 1420\u001b[0m \u001b[0mG\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_nodes_from\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0md\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcopy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0md\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_node\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1421\u001b[0m G.add_edges_from((u, v, datadict.copy())\n\u001b[0;32m-> 1422\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mu\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnbrs\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_adj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1423\u001b[0m for v, datadict in nbrs.items())\n\u001b[1;32m 1424\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mG\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/networkx/classes/graph.py\u001b[0m in \u001b[0;36madd_edges_from\u001b[0;34m(self, ebunch_to_add, **attr)\u001b[0m\n\u001b[1;32m 926\u001b[0m \u001b[0mne\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mne\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 928\u001b[0;31m \u001b[0mu\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdd\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 929\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mne\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m2\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 930\u001b[0m \u001b[0mu\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", @@ -701,8 +701,8 @@ "\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.cyclicPatternKernel import cyclicpatternkernel\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.cyclicPatternKernel import cyclicpatternkernel\n", "\n", "import numpy as np\n", "\n", @@ -1028,8 +1028,8 @@ "\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.cyclicPatternKernel import cyclicpatternkernel\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.cyclicPatternKernel import cyclicpatternkernel\n", "\n", "import numpy as np\n", "\n", @@ -1119,8 +1119,8 @@ "\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.cyclicPatternKernel import cyclicpatternkernel\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.cyclicPatternKernel import cyclicpatternkernel\n", "\n", "import numpy as np\n", "\n", @@ -1207,8 +1207,8 @@ "import matplotlib.pyplot as plt\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.graphfiles import loadDataset\n", - "from pygraph.kernels.cyclicPatternKernel import cyclicpatternkernel\n", + "from gklearn.utils.graphfiles import loadDataset\n", + "from gklearn.kernels.cyclicPatternKernel import cyclicpatternkernel\n", "\n", "# datafile = '../../../../datasets/NCI-HIV/AIDO99SD.sdf'\n", "# datafile_y = '../../../../datasets/NCI-HIV/aids_conc_may04.txt'\n", @@ -1277,8 +1277,8 @@ "\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.cyclicPatternKernel import cyclicpatternkernel\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.cyclicPatternKernel import cyclicpatternkernel\n", "\n", "import numpy as np\n", "\n", diff --git a/notebooks/unfinished/run_treeletkernel_acyclic.ipynb b/notebooks/unfinished/run_treeletkernel_acyclic.ipynb index 662f3da..9c08a20 100644 --- a/notebooks/unfinished/run_treeletkernel_acyclic.ipynb +++ b/notebooks/unfinished/run_treeletkernel_acyclic.ipynb @@ -158,8 +158,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - "from pygraph.kernels.treeletKernel import treeletkernel\n", + "from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + "from gklearn.kernels.treeletKernel import treeletkernel\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "estimator = treeletkernel\n", @@ -186,8 +186,8 @@ "\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.treeletKernel import treeletkernel\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.treeletKernel import treeletkernel\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "kernel_file_path = 'kernelmatrices_path_acyclic/'\n", @@ -429,7 +429,7 @@ "from collections import Counter\n", "import networkx as nx\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", diff --git a/notebooks/unfinished/run_treepatternkernel.ipynb b/notebooks/unfinished/run_treepatternkernel.ipynb index bc97773..911ebb7 100644 --- a/notebooks/unfinished/run_treepatternkernel.ipynb +++ b/notebooks/unfinished/run_treepatternkernel.ipynb @@ -2721,7 +2721,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/utils/model_selection_precomputed.py:101: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/utils/model_selection_precomputed.py:101: RuntimeWarning: overflow encountered in double_scalars\n", " Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j])\n" ] }, @@ -2782,7 +2782,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:202: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:202: RuntimeWarning: overflow encountered in double_scalars\n", " + '.' + str(pair[1])]\n" ] }, @@ -2803,7 +2803,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/utils/model_selection_precomputed.py:101: RuntimeWarning: invalid value encountered in double_scalars\n", + "../gklearn/utils/model_selection_precomputed.py:101: RuntimeWarning: invalid value encountered in double_scalars\n", " Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j])\n" ] }, @@ -3432,7 +3432,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:182: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:182: RuntimeWarning: overflow encountered in double_scalars\n", " + '.' + str(pair[1])]\n" ] }, @@ -3453,7 +3453,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:185: RuntimeWarning: invalid value encountered in multiply\n", + "../gklearn/kernels/treePatternKernel.py:185: RuntimeWarning: invalid value encountered in multiply\n", " node_label]) * (1 + kh)\n" ] }, @@ -3670,7 +3670,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:205: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:205: RuntimeWarning: overflow encountered in double_scalars\n", " G1.node[n1][node_label] == G2.node[n2][node_label])\n" ] }, @@ -3685,7 +3685,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:202: RuntimeWarning: invalid value encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:202: RuntimeWarning: invalid value encountered in double_scalars\n", " + '.' + str(pair[1])]\n" ] }, @@ -3700,7 +3700,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:205: RuntimeWarning: invalid value encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:205: RuntimeWarning: invalid value encountered in double_scalars\n", " G1.node[n1][node_label] == G2.node[n2][node_label])\n" ] }, @@ -3715,7 +3715,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:203: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:203: RuntimeWarning: overflow encountered in double_scalars\n", " kh += kh_tmp\n" ] }, @@ -3730,7 +3730,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:236: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:236: RuntimeWarning: overflow encountered in double_scalars\n", " kernel = sum(all_kh.values())\n" ] }, @@ -4025,7 +4025,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:222: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:222: RuntimeWarning: overflow encountered in double_scalars\n", " + '.' + str(pair[1])]\n" ] }, @@ -4046,7 +4046,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:225: RuntimeWarning: invalid value encountered in multiply\n", + "../gklearn/kernels/treePatternKernel.py:225: RuntimeWarning: invalid value encountered in multiply\n", " G1.node[n1][node_label] == G2.node[n2][node_label])\n" ] }, @@ -4061,7 +4061,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:222: RuntimeWarning: invalid value encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:222: RuntimeWarning: invalid value encountered in double_scalars\n", " + '.' + str(pair[1])]\n" ] }, @@ -4264,7 +4264,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:183: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:183: RuntimeWarning: overflow encountered in double_scalars\n", " kh += 1 / lmda * kh_tmp\n" ] }, @@ -4659,7 +4659,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "../pygraph/kernels/treePatternKernel.py:223: RuntimeWarning: overflow encountered in double_scalars\n", + "../gklearn/kernels/treePatternKernel.py:223: RuntimeWarning: overflow encountered in double_scalars\n", " kh += 1 / lmda * kh_tmp\n" ] }, @@ -4731,7 +4731,7 @@ "\nDuring handling of the above exception, another exception occurred:\n", "\u001b[0;31mLinAlgError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m model_selection_for_precomputed_kernel(datafile, estimator, param_grid_precomputed, param_grid, \n\u001b[0;32m---> 33\u001b[0;31m 'regression', NUM_TRIALS=30)\n\u001b[0m", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/utils/model_selection_precomputed.py\u001b[0m in \u001b[0;36mmodel_selection_for_precomputed_kernel\u001b[0;34m(datafile, estimator, param_grid_precomputed, param_grid, model_type, NUM_TRIALS, datafile_y)\u001b[0m\n\u001b[1;32m 165\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtrain_index\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalid_index\u001b[0m \u001b[0;32min\u001b[0m \u001b[0minner_cv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_app\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 166\u001b[0m KR.fit(X_app[train_index, :]\n\u001b[0;32m--> 167\u001b[0;31m [:, train_index], y_app[train_index])\n\u001b[0m\u001b[1;32m 168\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 169\u001b[0m \u001b[0;31m# predict on the train, validation and test set\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/utils/model_selection_precomputed.py\u001b[0m in \u001b[0;36mmodel_selection_for_precomputed_kernel\u001b[0;34m(datafile, estimator, param_grid_precomputed, param_grid, model_type, NUM_TRIALS, datafile_y)\u001b[0m\n\u001b[1;32m 165\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtrain_index\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalid_index\u001b[0m \u001b[0;32min\u001b[0m \u001b[0minner_cv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_app\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 166\u001b[0m KR.fit(X_app[train_index, :]\n\u001b[0;32m--> 167\u001b[0;31m [:, train_index], y_app[train_index])\n\u001b[0m\u001b[1;32m 168\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 169\u001b[0m \u001b[0;31m# predict on the train, validation and test set\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/.local/lib/python3.5/site-packages/sklearn/kernel_ridge.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, X, y, sample_weight)\u001b[0m\n\u001b[1;32m 160\u001b[0m self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,\n\u001b[1;32m 161\u001b[0m \u001b[0msample_weight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 162\u001b[0;31m copy)\n\u001b[0m\u001b[1;32m 163\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mravel\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdual_coef_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdual_coef_\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mravel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/.local/lib/python3.5/site-packages/sklearn/linear_model/ridge.py\u001b[0m in \u001b[0;36m_solve_cholesky_kernel\u001b[0;34m(K, y, alpha, sample_weight, copy)\u001b[0m\n\u001b[1;32m 154\u001b[0m warnings.warn(\"Singular matrix in solving dual problem. Using \"\n\u001b[1;32m 155\u001b[0m \"least-squares solution instead.\")\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0mdual_coef\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlinalg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlstsq\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mK\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;31m# K is expensive to compute and store in memory so change it back in\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/.local/lib/python3.5/site-packages/scipy/linalg/basic.py\u001b[0m in \u001b[0;36mlstsq\u001b[0;34m(a, b, cond, overwrite_a, overwrite_b, check_finite, lapack_driver)\u001b[0m\n\u001b[1;32m 1241\u001b[0m cond, False, False)\n\u001b[1;32m 1242\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minfo\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1243\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mLinAlgError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SVD did not converge in Linear Least Squares\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1244\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minfo\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1245\u001b[0m raise ValueError('illegal value in %d-th argument of internal %s'\n", @@ -4745,8 +4745,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - "from pygraph.kernels.treePatternKernel import treepatternkernel\n", + "from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + "from gklearn.kernels.treePatternKernel import treepatternkernel\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "estimator = treepatternkernel\n", @@ -7724,8 +7724,8 @@ "\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.treePatternKernel import treepatternkernel\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.treePatternKernel import treepatternkernel\n", "\n", "import numpy as np\n", "\n", @@ -7836,10 +7836,10 @@ "import matplotlib.pyplot as plt\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.graphfiles import loadDataset\n", - "from pygraph.utils.utils import kernel_train_test\n", + "from gklearn.utils.graphfiles import loadDataset\n", + "from gklearn.utils.utils import kernel_train_test\n", "\n", - "from pygraph.kernels.treePatternKernel import treepatternkernel, _treepatternkernel_do\n", + "from gklearn.kernels.treePatternKernel import treepatternkernel, _treepatternkernel_do\n", "\n", "import numpy as np\n", "\n", @@ -7914,8 +7914,8 @@ "\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.cyclicPatternKernel import cyclicpatternkernel\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.cyclicPatternKernel import cyclicpatternkernel\n", "\n", "import numpy as np\n", "\n", diff --git a/notebooks/unfinished/run_weisfeilerLehmankernel.ipynb b/notebooks/unfinished/run_weisfeilerLehmankernel.ipynb index d1616c1..a9df23d 100644 --- a/notebooks/unfinished/run_weisfeilerLehmankernel.ipynb +++ b/notebooks/unfinished/run_weisfeilerLehmankernel.ipynb @@ -11,8 +11,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - "from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", + "from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + "from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", "\n", "dslist = [ \n", " {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', 'task': 'regression'}, # node_labeled\n", @@ -485,8 +485,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - "from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", + "from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + "from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "estimator = weisfeilerlehmankernel\n", @@ -1313,8 +1313,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - "from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", + "from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + "from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "estimator = weisfeilerlehmankernel\n", @@ -2203,8 +2203,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", - "from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", + "from gklearn.utils.model_selection_precomputed import model_selection_for_precomputed_kernel\n", + "from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "estimator = weisfeilerlehmankernel\n", @@ -2532,8 +2532,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel, _wl_subtreekernel_do\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel, _wl_subtreekernel_do\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "kernel_file_path = 'kernelmatrices_weisfeilerlehman_subtree_acyclic/'\n", @@ -2684,9 +2684,9 @@ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0mkernel_para\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_label\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'atom'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_label\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'bond_type'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbase_kernel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'sp'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0mkernel_train_test\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdatafile\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkernel_file_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweisfeilerlehmankernel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkernel_para\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhyper_name\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'height'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhyper_range\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlinspace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m11\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnormalize\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;31m# %lprun -f _wl_subtreekernel_do \\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/utils/utils.py\u001b[0m in \u001b[0;36mkernel_train_test\u001b[0;34m(datafile, kernel_file_path, kernel_func, kernel_para, trials, splits, alpha_grid, C_grid, hyper_name, hyper_range, normalize, datafile_y, model_type)\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhyper_name\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;34m''\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0mkernel_para\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mhyper_name\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhyper_para\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 161\u001b[0;31m \u001b[0mKmatrix\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrun_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkernel_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkernel_para\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 162\u001b[0m \u001b[0mkernel_time_list\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_time\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpyplot\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py\u001b[0m in \u001b[0;36mweisfeilerlehmankernel\u001b[0;34m(node_label, edge_label, height, base_kernel, *args)\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;31m# for WL shortest path kernel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mbase_kernel\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'sp'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 58\u001b[0;31m \u001b[0mKmatrix\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_wl_spkernel_do\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnode_label\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_label\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0;31m# for WL edge kernel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/weisfeilerLehmanKernel.py\u001b[0m in \u001b[0;36m_wl_spkernel_do\u001b[0;34m(Gn, node_label, edge_label, height)\u001b[0m\n\u001b[1;32m 254\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0me1\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mGn\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0medges\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 255\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0me2\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mGn\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0medges\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 256\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'cost'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'cost'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'cost'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 257\u001b[0m \u001b[0mKmatrix\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mKmatrix\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mKmatrix\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/utils/utils.py\u001b[0m in \u001b[0;36mkernel_train_test\u001b[0;34m(datafile, kernel_file_path, kernel_func, kernel_para, trials, splits, alpha_grid, C_grid, hyper_name, hyper_range, normalize, datafile_y, model_type)\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhyper_name\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;34m''\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0mkernel_para\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mhyper_name\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhyper_para\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 161\u001b[0;31m \u001b[0mKmatrix\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrun_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkernel_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkernel_para\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 162\u001b[0m \u001b[0mkernel_time_list\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_time\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpyplot\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/kernels/weisfeilerLehmanKernel.py\u001b[0m in \u001b[0;36mweisfeilerlehmankernel\u001b[0;34m(node_label, edge_label, height, base_kernel, *args)\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;31m# for WL shortest path kernel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mbase_kernel\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'sp'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 58\u001b[0;31m \u001b[0mKmatrix\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_wl_spkernel_do\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnode_label\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_label\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0;31m# for WL edge kernel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/kernels/weisfeilerLehmanKernel.py\u001b[0m in \u001b[0;36m_wl_spkernel_do\u001b[0;34m(Gn, node_label, edge_label, height)\u001b[0m\n\u001b[1;32m 254\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0me1\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mGn\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0medges\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 255\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0me2\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mGn\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0medges\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 256\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'cost'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'cost'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'cost'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0me2\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 257\u001b[0m \u001b[0mKmatrix\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mKmatrix\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mKmatrix\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } @@ -2698,8 +2698,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel, _wl_subtreekernel_do\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel, _wl_subtreekernel_do\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "kernel_file_path = 'kernelmatrices_weisfeilerlehman_subtree_acyclic/'\n", @@ -3033,8 +3033,8 @@ "import numpy as np\n", "import sys\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.utils import kernel_train_test\n", - "from pygraph.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel, _wl_subtreekernel_do\n", + "from gklearn.utils.utils import kernel_train_test\n", + "from gklearn.kernels.weisfeilerLehmanKernel import weisfeilerlehmankernel, _wl_subtreekernel_do\n", "\n", "datafile = '../../../../datasets/acyclic/Acyclic/dataset_bps.ds'\n", "kernel_file_path = 'kernelmatrices_weisfeilerlehman_subtree_acyclic/'\n", @@ -3150,10 +3150,10 @@ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlabelset1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlabelset2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 68\u001b[0;31m \u001b[0mkernel\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mspkernel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mG2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 69\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkernel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 70\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/spKernel.py\u001b[0m in \u001b[0;36mspkernel\u001b[0;34m(edge_weight, *args)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mstart_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0mGn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m \u001b[0mgetSPGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_weight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0medge_weight\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mG\u001b[0m \u001b[0;32min\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m]\u001b[0m \u001b[0;31m# get shortest path graphs of Gn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/kernels/spKernel.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mstart_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0mGn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m \u001b[0mgetSPGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_weight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0medge_weight\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mG\u001b[0m \u001b[0;32min\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m]\u001b[0m \u001b[0;31m# get shortest path graphs of Gn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/utils/utils.py\u001b[0m in \u001b[0;36mgetSPGraph\u001b[0;34m(G, edge_weight)\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0mBorgwardt\u001b[0m \u001b[0mKM\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mKriegel\u001b[0m \u001b[0mHP\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mShortest\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mpath\u001b[0m \u001b[0mkernels\u001b[0m \u001b[0mon\u001b[0m \u001b[0mgraphs\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mInData\u001b[0m \u001b[0mMining\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFifth\u001b[0m \u001b[0mIEEE\u001b[0m \u001b[0mInternational\u001b[0m \u001b[0mConference\u001b[0m \u001b[0mon\u001b[0m \u001b[0;36m2005\u001b[0m \u001b[0mNov\u001b[0m \u001b[0;36m27\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mpp\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0;36m8\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mpp\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mIEEE\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \"\"\"\n\u001b[0;32m---> 37\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfloydTransformation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_weight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0medge_weight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mfloydTransformation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_weight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'bond_type'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/pygraph/utils/utils.py\u001b[0m in \u001b[0;36mfloydTransformation\u001b[0;34m(G, edge_weight)\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0mBorgwardt\u001b[0m \u001b[0mKM\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mKriegel\u001b[0m \u001b[0mHP\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mShortest\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mpath\u001b[0m \u001b[0mkernels\u001b[0m \u001b[0mon\u001b[0m \u001b[0mgraphs\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mInData\u001b[0m \u001b[0mMining\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFifth\u001b[0m \u001b[0mIEEE\u001b[0m \u001b[0mInternational\u001b[0m \u001b[0mConference\u001b[0m \u001b[0mon\u001b[0m \u001b[0;36m2005\u001b[0m \u001b[0mNov\u001b[0m \u001b[0;36m27\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mpp\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0;36m8\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mpp\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mIEEE\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m \"\"\"\n\u001b[0;32m---> 58\u001b[0;31m \u001b[0mspMatrix\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloyd_warshall_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0medge_weight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0mS\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mS\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_nodes_from\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnodes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/kernels/spKernel.py\u001b[0m in \u001b[0;36mspkernel\u001b[0;34m(edge_weight, *args)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mstart_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0mGn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m \u001b[0mgetSPGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_weight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0medge_weight\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mG\u001b[0m \u001b[0;32min\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m]\u001b[0m \u001b[0;31m# get shortest path graphs of Gn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/kernels/spKernel.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0mstart_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m \u001b[0mGn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m \u001b[0mgetSPGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_weight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0medge_weight\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mG\u001b[0m \u001b[0;32min\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m]\u001b[0m \u001b[0;31m# get shortest path graphs of Gn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/utils/utils.py\u001b[0m in \u001b[0;36mgetSPGraph\u001b[0;34m(G, edge_weight)\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0mBorgwardt\u001b[0m \u001b[0mKM\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mKriegel\u001b[0m \u001b[0mHP\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mShortest\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mpath\u001b[0m \u001b[0mkernels\u001b[0m \u001b[0mon\u001b[0m \u001b[0mgraphs\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mInData\u001b[0m \u001b[0mMining\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFifth\u001b[0m \u001b[0mIEEE\u001b[0m \u001b[0mInternational\u001b[0m \u001b[0mConference\u001b[0m \u001b[0mon\u001b[0m \u001b[0;36m2005\u001b[0m \u001b[0mNov\u001b[0m \u001b[0;36m27\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mpp\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0;36m8\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mpp\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mIEEE\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \"\"\"\n\u001b[0;32m---> 37\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfloydTransformation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_weight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0medge_weight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mfloydTransformation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0medge_weight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'bond_type'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/ljia/DATA/research-repo/codes/Linlin/py-graph/gklearn/utils/utils.py\u001b[0m in \u001b[0;36mfloydTransformation\u001b[0;34m(G, edge_weight)\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0mBorgwardt\u001b[0m \u001b[0mKM\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mKriegel\u001b[0m \u001b[0mHP\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mShortest\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mpath\u001b[0m \u001b[0mkernels\u001b[0m \u001b[0mon\u001b[0m \u001b[0mgraphs\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mInData\u001b[0m \u001b[0mMining\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFifth\u001b[0m \u001b[0mIEEE\u001b[0m \u001b[0mInternational\u001b[0m \u001b[0mConference\u001b[0m \u001b[0mon\u001b[0m \u001b[0;36m2005\u001b[0m \u001b[0mNov\u001b[0m \u001b[0;36m27\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mpp\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0;36m8\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mpp\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mIEEE\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m \"\"\"\n\u001b[0;32m---> 58\u001b[0;31m \u001b[0mspMatrix\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfloyd_warshall_numpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweight\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0medge_weight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0mS\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0mS\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_nodes_from\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnodes\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/networkx/algorithms/shortest_paths/dense.py\u001b[0m in \u001b[0;36mfloyd_warshall_numpy\u001b[0;34m(G, nodelist, weight)\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;31m# nonedges are not given the value 0 as well.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 53\u001b[0m A = nx.to_numpy_matrix(G, nodelist=nodelist, multigraph_weight=min,\n\u001b[0;32m---> 54\u001b[0;31m weight=weight, nonedge=np.inf)\n\u001b[0m\u001b[1;32m 55\u001b[0m \u001b[0mn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mm\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mA\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0mI\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0midentity\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/networkx/convert_matrix.py\u001b[0m in \u001b[0;36mto_numpy_matrix\u001b[0;34m(G, nodelist, dtype, order, multigraph_weight, weight, nonedge)\u001b[0m\n\u001b[1;32m 446\u001b[0m A = to_numpy_array(G, nodelist=nodelist, dtype=dtype, order=order,\n\u001b[1;32m 447\u001b[0m \u001b[0mmultigraph_weight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmultigraph_weight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweight\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 448\u001b[0;31m nonedge=nonedge)\n\u001b[0m\u001b[1;32m 449\u001b[0m \u001b[0mM\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masmatrix\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mA\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 450\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mM\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.5/dist-packages/networkx/convert_matrix.py\u001b[0m in \u001b[0;36mto_numpy_array\u001b[0;34m(G, nodelist, dtype, order, multigraph_weight, weight, nonedge)\u001b[0m\n\u001b[1;32m 1061\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1062\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mnodelist\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1063\u001b[0;31m \u001b[0mnodelist\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mG\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1064\u001b[0m \u001b[0mnodeset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnodelist\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1065\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnodelist\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnodeset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", @@ -3165,8 +3165,8 @@ "import sys\n", "import networkx as nx\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.graphfiles import loadDataset\n", - "from pygraph.kernels.spkernel import spkernel\n", + "from gklearn.utils.graphfiles import loadDataset\n", + "from gklearn.kernels.spkernel import spkernel\n", "\n", "import matplotlib.pyplot as plt\n", "\n", @@ -3435,8 +3435,8 @@ "import numpy as np\n", "import time\n", "\n", - "from pygraph.kernels.spkernel import spkernel\n", - "from pygraph.kernels.pathKernel import pathkernel\n", + "from gklearn.kernels.spkernel import spkernel\n", + "from gklearn.kernels.pathKernel import pathkernel\n", "\n", "def weisfeilerlehmankernel(*args, height = 0, base_kernel = 'subtree'):\n", " \"\"\"Calculate Weisfeiler-Lehman kernels between graphs.\n", @@ -3774,8 +3774,8 @@ "from collections import Counter\n", "import networkx as nx\n", "sys.path.insert(0, \"../\")\n", - "from pygraph.utils.graphfiles import loadDataset\n", - "from pygraph.kernels.spkernel import spkernel\n", + "from gklearn.utils.graphfiles import loadDataset\n", + "from gklearn.kernels.spkernel import spkernel\n", "\n", "dataset, y = loadDataset(\"../../../../datasets/acyclic/Acyclic/dataset_bps.ds\")\n", "G1 = dataset[15]\n", diff --git a/notebooks/utils/check_gm_gstsp.py b/notebooks/utils/check_gm_gstsp.py index 61a683e..c6f0389 100644 --- a/notebooks/utils/check_gm_gstsp.py +++ b/notebooks/utils/check_gm_gstsp.py @@ -19,7 +19,7 @@ import sys sys.path.insert(0, "../../") import numpy as np import networkx as nx -from pygraph.utils.graphfiles import loadDataset +from gklearn.utils.graphfiles import loadDataset import matplotlib.pyplot as plt from numpy.linalg import eig diff --git a/notebooks/utils/get_dataset_attributes.ipynb b/notebooks/utils/get_dataset_attributes.ipynb index 69430b4..7e0bdb0 100644 --- a/notebooks/utils/get_dataset_attributes.ipynb +++ b/notebooks/utils/get_dataset_attributes.ipynb @@ -575,8 +575,8 @@ "source": [ "import sys\n", "sys.path.insert(0, \"../../\")\n", - "from pygraph.utils.graphfiles import loadDataset\n", - "from pygraph.utils.graphdataset import get_dataset_attributes\n", + "from gklearn.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphdataset import get_dataset_attributes\n", "\n", "dslist = [\n", " {'name': 'Acyclic', 'dataset': '../../datasets/acyclic/dataset_bps.ds',},\n", diff --git a/notebooks/utils/get_dataset_attributes.py b/notebooks/utils/get_dataset_attributes.py index bcc85d1..911f573 100644 --- a/notebooks/utils/get_dataset_attributes.py +++ b/notebooks/utils/get_dataset_attributes.py @@ -8,8 +8,8 @@ Created on Wed Oct 17 16:07:38 2018 import sys sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.graphdataset import get_dataset_attributes +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.graphdataset import get_dataset_attributes dslist = [ {'name': 'Acyclic', 'dataset': '../../datasets/acyclic/dataset_bps.ds',}, diff --git a/notebooks/utils/plot_all_graphs.ipynb b/notebooks/utils/plot_all_graphs.ipynb index ef5f76b..4831626 100644 --- a/notebooks/utils/plot_all_graphs.ipynb +++ b/notebooks/utils/plot_all_graphs.ipynb @@ -3678,7 +3678,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -6711,7 +6711,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -8102,7 +8102,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -10015,7 +10015,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -13816,7 +13816,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -17241,7 +17241,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -27505,7 +27505,7 @@ "import numpy as np\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "def nb2letter(nb):\n", " if nb == 0:\n", @@ -30990,7 +30990,7 @@ "import numpy as np\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "def nb2letter(nb):\n", " if nb == 0:\n", @@ -43073,7 +43073,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -43223,7 +43223,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -44891,7 +44891,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -45490,7 +45490,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -46169,7 +46169,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", @@ -46415,7 +46415,7 @@ "import matplotlib.pyplot as plt\n", "\n", "import networkx as nx\n", - "from pygraph.utils.graphfiles import loadDataset\n", + "from gklearn.utils.graphfiles import loadDataset\n", "\n", "\n", "def main(): \n", diff --git a/notebooks/utils/plot_all_graphs.py b/notebooks/utils/plot_all_graphs.py new file mode 100644 index 0000000..1bb1f07 --- /dev/null +++ b/notebooks/utils/plot_all_graphs.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Jan 7 15:25:36 2020 + +@author: ljia +""" + +# draw all the praphs +import sys +import pathlib +sys.path.insert(0, "../../") + +import matplotlib.pyplot as plt + +import networkx as nx +import numpy as np +from gklearn.utils.graphfiles import loadDataset, loadGXL + + +def main(): +# # monoterpenoides dataset. +# dataset, y = loadDataset("../../datasets/monoterpenoides/dataset_10+.ds") +# for idx in [12,22,29,74]: +# print(idx) +# print(nx.get_node_attributes(dataset[idx], 'atom')) +# edge_labels = nx.get_edge_attributes(dataset[idx], 'bond_type') +# print(edge_labels) +# pos=nx.spring_layout(dataset[idx]) +# nx.draw(dataset[idx], pos, labels=nx.get_node_attributes(dataset[idx], 'atom'), with_labels=True) +# edge_labels = nx.draw_networkx_edge_labels(dataset[idx], pos, +# edge_labels=edge_labels, +# font_color='pink') +# plt.show() + + +# # Fingerprint dataset. +# dataset = '/media/ljia/DATA/research-repo/codes/Linlin/gedlib/data/collections/Fingerprint.xml' +# graph_dir = '/media/ljia/DATA/research-repo/codes/Linlin/gedlib/data/datasets/Fingerprint/data/' +# Gn, y_all = loadDataset(dataset, extra_params=graph_dir) +## dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/Fingerprint/Fingerprint_A.txt' +## Gn, y_all = loadDataset(dataset) +# +# idx_no_node = [] +# idx_no_edge = [] +# idx_no_both = [] +# for idx, G in enumerate(Gn): +# if nx.number_of_nodes(G) == 0: +# idx_no_node.append(idx) +# if nx.number_of_edges(G) == 0: +# idx_no_both.append(idx) +# if nx.number_of_edges(G) == 0: +# idx_no_edge.append(idx) +## file_prefix = '../results/graph_images/Fingerprint/' + G.graph['name'] +## draw_Fingerprint_graph(Gn[idx], file_prefix=file_prefix, save=True) +# print('nb_no_node: ', len(idx_no_node)) +# print('nb_no_edge: ', len(idx_no_edge)) +# print('nb_no_both: ', len(idx_no_both)) +# print('idx_no_node: ', idx_no_node) +# print('idx_no_edge: ', idx_no_edge) +# print('idx_no_both: ', idx_no_both) +# +# for idx in [0, 10, 100, 1000]: +# print(idx) +# print(Gn[idx].nodes(data=True)) +# print(Gn[idx].edges(data=True)) +# draw_Fingerprint_graph(Gn[idx], file_prefix='') + + + # SYNTHETIC dataset. + dataset = '/media/ljia/DATA/research-repo/codes/Linlin/graphkit-learn/datasets/SYNTHETICnew/SYNTHETICnew_A.txt' + Gn, y_all = loadDataset(dataset) + + idx_no_node = [] + idx_no_edge = [] + idx_no_both = [] + for idx, G in enumerate(Gn): + if nx.number_of_nodes(G) == 0: + idx_no_node.append(idx) + if nx.number_of_edges(G) == 0: + idx_no_both.append(idx) + if nx.number_of_edges(G) == 0: + idx_no_edge.append(idx) +# file_prefix = '../results/graph_images/SYNTHETIC/' + G.graph['name'] +# draw_SYNTHETIC_graph(Gn[idx], file_prefix=file_prefix, save=True) +# draw_SYNTHETIC_graph(Gn[idx]) + print('nb_no_node: ', len(idx_no_node)) + print('nb_no_edge: ', len(idx_no_edge)) + print('nb_no_both: ', len(idx_no_both)) + print('idx_no_node: ', idx_no_node) + print('idx_no_edge: ', idx_no_edge) + print('idx_no_both: ', idx_no_both) + + for idx in [0, 10, 100]: + print(idx) + print(Gn[idx].nodes(data=True)) + print(Gn[idx].edges(data=True)) + draw_SYNTHETIC_graph(Gn[idx], save=None) + + +def plot_a_graph(graph_filename): + graph = loadGXL(graph_filename) + print(nx.get_node_attributes(graph, 'atom')) + edge_labels = nx.get_edge_attributes(graph, 'bond_type') + print(edge_labels) + pos=nx.spring_layout(graph) + nx.draw(graph, pos, labels=nx.get_node_attributes(graph, 'atom'), with_labels=True) + edge_labels = nx.draw_networkx_edge_labels(graph, pos, + edge_labels=edge_labels, + font_color='pink') + plt.show() + + +#Dessin median courrant +def draw_Fingerprint_graph(graph, file_prefix=None, save=None): + plt.figure() + pos = {} + for n in graph.nodes: + pos[n] = np.array([float(graph.node[n]['x']), float(graph.node[n]['y'])]) + # set plot settings. + max_x = np.max([p[0] for p in pos.values()]) if len(pos) > 0 else 10 + min_x = np.min([p[0] for p in pos.values()]) if len(pos) > 0 else 10 + max_y = np.max([p[1] for p in pos.values()]) if len(pos) > 0 else 10 + min_y = np.min([p[1] for p in pos.values()]) if len(pos) > 0 else 10 + padding_x = (max_x - min_x + 10) * 0.1 + padding_y = (max_y - min_y + 10) * 0.1 + range_x = max_x + padding_x - (min_x - padding_x) + range_y = max_y + padding_y - (min_y - padding_y) + if range_x > range_y: + plt.xlim(min_x - padding_x, max_x + padding_x) + plt.ylim(min_y - padding_y - (range_x - range_y) / 2, + max_y + padding_y + (range_x - range_y) / 2) + else: + plt.xlim(min_x - padding_x - (range_y - range_x) / 2, + max_x + padding_x + (range_y - range_x) / 2) + plt.ylim(min_y - padding_y, max_y + padding_y) + plt.gca().set_aspect('equal', adjustable='box') + nx.draw_networkx(graph, pos) + if save is not None: + plt.savefig(file_prefix + '.eps', format='eps', dpi=300) + else: + plt.show() + plt.clf() + + +def draw_SYNTHETIC_graph(graph, file_prefix=None, save=None): + plt.figure() + nx.draw_networkx(graph) + if save is not None: + plt.savefig(file_prefix + '.eps', format='eps', dpi=300) + else: + plt.show() + plt.clf() + + +if __name__ == '__main__': + main() +# gfn = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/output/tmp_ged/set_median.gxl' +# plot_a_graph(gfn) +# gfn = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/output/tmp_ged/gen_median.gxl' +# plot_a_graph(gfn) \ No newline at end of file diff --git a/notebooks/utils/run_degree_differs_cw.py b/notebooks/utils/run_degree_differs_cw.py index ca7ec4b..e229801 100644 --- a/notebooks/utils/run_degree_differs_cw.py +++ b/notebooks/utils/run_degree_differs_cw.py @@ -11,8 +11,8 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * @@ -24,7 +24,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.commonWalkKernel import commonwalkkernel + from gklearn.kernels.commonWalkKernel import commonwalkkernel estimator = commonwalkkernel param_grid_precomputed = [{'compute_method': ['geo'], 'weight': np.linspace(0.01, 0.15, 15)}, diff --git a/notebooks/utils/run_degree_differs_ma.py b/notebooks/utils/run_degree_differs_ma.py index c4e8a08..cc3ddf1 100644 --- a/notebooks/utils/run_degree_differs_ma.py +++ b/notebooks/utils/run_degree_differs_ma.py @@ -11,8 +11,8 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * @@ -24,7 +24,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.marginalizedKernel import marginalizedkernel + from gklearn.kernels.marginalizedKernel import marginalizedkernel estimator = marginalizedkernel #param_grid_precomputed = {'p_quit': np.linspace(0.1, 0.3, 3), # 'n_iteration': np.linspace(1, 1, 1), diff --git a/notebooks/utils/run_degree_differs_rw.py b/notebooks/utils/run_degree_differs_rw.py index 878fe56..dd20221 100644 --- a/notebooks/utils/run_degree_differs_rw.py +++ b/notebooks/utils/run_degree_differs_rw.py @@ -11,14 +11,14 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * import multiprocessing import functools -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct dslist = [ {'name': 'ENZYMES', 'dataset': '../../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, @@ -26,7 +26,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.randomWalkKernel import randomwalkkernel + from gklearn.kernels.randomWalkKernel import randomwalkkernel estimator = randomwalkkernel param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)}, {'alpha': np.logspace(-10, 10, num=41, base=10)}] diff --git a/notebooks/utils/run_degree_differs_sp.py b/notebooks/utils/run_degree_differs_sp.py index bec68c1..1e83db3 100644 --- a/notebooks/utils/run_degree_differs_sp.py +++ b/notebooks/utils/run_degree_differs_sp.py @@ -11,14 +11,14 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * import functools import multiprocessing -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct dslist = [ {'name': 'ENZYMES', 'dataset': '../../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, @@ -26,7 +26,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.spKernel import spkernel + from gklearn.kernels.spKernel import spkernel estimator = spkernel mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) param_grid_precomputed = {'node_kernels': [ diff --git a/notebooks/utils/run_degree_differs_ssp.py b/notebooks/utils/run_degree_differs_ssp.py index e054874..a954cab 100644 --- a/notebooks/utils/run_degree_differs_ssp.py +++ b/notebooks/utils/run_degree_differs_ssp.py @@ -11,14 +11,14 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * import functools import multiprocessing -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct dslist = [ {'name': 'ENZYMES', 'dataset': '../../datasets/ENZYMES_txt/ENZYMES_A_sparse.txt'}, @@ -26,7 +26,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.structuralspKernel import structuralspkernel + from gklearn.kernels.structuralspKernel import structuralspkernel estimator = structuralspkernel mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) param_grid_precomputed = {'node_kernels': diff --git a/notebooks/utils/run_degree_differs_uhp.py b/notebooks/utils/run_degree_differs_uhp.py index 06f93b5..b046d80 100644 --- a/notebooks/utils/run_degree_differs_uhp.py +++ b/notebooks/utils/run_degree_differs_uhp.py @@ -11,8 +11,8 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * @@ -24,7 +24,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.untilHPathKernel import untilhpathkernel + from gklearn.kernels.untilHPathKernel import untilhpathkernel estimator = untilhpathkernel param_grid_precomputed = {'depth': np.linspace(1, 10, 10), # [2], 'k_func': ['MinMax', 'tanimoto'], diff --git a/notebooks/utils/run_vertex_differs_cw.py b/notebooks/utils/run_vertex_differs_cw.py index dddf90d..b385b1b 100644 --- a/notebooks/utils/run_vertex_differs_cw.py +++ b/notebooks/utils/run_vertex_differs_cw.py @@ -10,8 +10,8 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * @@ -34,7 +34,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.commonWalkKernel import commonwalkkernel + from gklearn.kernels.commonWalkKernel import commonwalkkernel estimator = commonwalkkernel param_grid_precomputed = [{'compute_method': ['geo'], 'weight': np.linspace(0.01, 0.15, 15)}, diff --git a/notebooks/utils/run_vertex_differs_ma.py b/notebooks/utils/run_vertex_differs_ma.py index f5665c4..92279e4 100644 --- a/notebooks/utils/run_vertex_differs_ma.py +++ b/notebooks/utils/run_vertex_differs_ma.py @@ -11,8 +11,8 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * @@ -35,7 +35,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.marginalizedKernel import marginalizedkernel + from gklearn.kernels.marginalizedKernel import marginalizedkernel estimator = marginalizedkernel #param_grid_precomputed = {'p_quit': np.linspace(0.1, 0.3, 3), # 'n_iteration': np.linspace(1, 1, 1), diff --git a/notebooks/utils/run_vertex_differs_rw.py b/notebooks/utils/run_vertex_differs_rw.py index 8224e5f..065efb9 100644 --- a/notebooks/utils/run_vertex_differs_rw.py +++ b/notebooks/utils/run_vertex_differs_rw.py @@ -11,14 +11,14 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * import multiprocessing import functools -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct dslist = [ # {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', @@ -37,7 +37,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.randomWalkKernel import randomwalkkernel + from gklearn.kernels.randomWalkKernel import randomwalkkernel estimator = randomwalkkernel param_grid = [{'C': np.logspace(-10, 10, num=41, base=10)}, {'alpha': np.logspace(-10, 10, num=41, base=10)}] diff --git a/notebooks/utils/run_vertex_differs_sp.py b/notebooks/utils/run_vertex_differs_sp.py index 684a33d..39dcf0f 100644 --- a/notebooks/utils/run_vertex_differs_sp.py +++ b/notebooks/utils/run_vertex_differs_sp.py @@ -11,14 +11,14 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * import functools import multiprocessing -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct dslist = [ # {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', @@ -37,7 +37,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.spKernel import spkernel + from gklearn.kernels.spKernel import spkernel estimator = spkernel mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) param_grid_precomputed = {'node_kernels': [ diff --git a/notebooks/utils/run_vertex_differs_ssp.py b/notebooks/utils/run_vertex_differs_ssp.py index 03ecba1..fa341b2 100644 --- a/notebooks/utils/run_vertex_differs_ssp.py +++ b/notebooks/utils/run_vertex_differs_ssp.py @@ -11,14 +11,14 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * import functools import multiprocessing -from pygraph.utils.kernels import deltakernel, gaussiankernel, kernelproduct +from gklearn.utils.kernels import deltakernel, gaussiankernel, kernelproduct dslist = [ # {'name': 'Acyclic', 'dataset': '../datasets/acyclic/dataset_bps.ds', @@ -37,7 +37,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.structuralspKernel import structuralspkernel + from gklearn.kernels.structuralspKernel import structuralspkernel estimator = structuralspkernel mixkernel = functools.partial(kernelproduct, deltakernel, gaussiankernel) param_grid_precomputed = {'node_kernels': diff --git a/notebooks/utils/run_vertex_differs_uhp.py b/notebooks/utils/run_vertex_differs_uhp.py index e67063f..b2665a3 100644 --- a/notebooks/utils/run_vertex_differs_uhp.py +++ b/notebooks/utils/run_vertex_differs_uhp.py @@ -11,8 +11,8 @@ import numpy as np import networkx as nx sys.path.insert(0, "../../") -from pygraph.utils.graphfiles import loadDataset -from pygraph.utils.model_selection_precomputed import compute_gram_matrices +from gklearn.utils.graphfiles import loadDataset +from gklearn.utils.model_selection_precomputed import compute_gram_matrices from sklearn.model_selection import ParameterGrid from libs import * @@ -35,7 +35,7 @@ dslist = [ ] def run_ms(dataset, y, ds): - from pygraph.kernels.untilHPathKernel import untilhpathkernel + from gklearn.kernels.untilHPathKernel import untilhpathkernel estimator = untilhpathkernel param_grid_precomputed = {'depth': np.linspace(1, 10, 10), # [2], 'k_func': ['MinMax', 'tanimoto'], diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..c24493b --- /dev/null +++ b/setup.py @@ -0,0 +1,21 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="graphkit-learn", + version="0.1b2", + author="Linlin Jia", + author_email="linlin.jia@insa-rouen.fr", + description="A Python library for graph kernels based on linear patterns", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/jajupmochi/graphkit-learn", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Operating System :: OS Independent", + ], +) diff --git a/tests/tests_tools.py b/tests/tests_tools.py index f358d5e..18778d4 100644 --- a/tests/tests_tools.py +++ b/tests/tests_tools.py @@ -1 +1 @@ -print('Hello py-graph!') +print('Hello graphkit-learn!')