diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..298ea9e --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle new file mode 100644 index 0000000..56cced3 Binary files /dev/null and b/docs/_build/doctrees/environment.pickle differ diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree new file mode 100644 index 0000000..1579d82 Binary files /dev/null and b/docs/_build/doctrees/index.doctree differ diff --git a/docs/_build/doctrees/modules.doctree b/docs/_build/doctrees/modules.doctree new file mode 100644 index 0000000..c46a2dc Binary files /dev/null and b/docs/_build/doctrees/modules.doctree differ diff --git a/docs/_build/doctrees/pygraph.doctree b/docs/_build/doctrees/pygraph.doctree new file mode 100644 index 0000000..7c2a62b Binary files /dev/null and b/docs/_build/doctrees/pygraph.doctree differ diff --git a/docs/_build/doctrees/pygraph.utils.doctree b/docs/_build/doctrees/pygraph.utils.doctree new file mode 100644 index 0000000..2c2141e Binary files /dev/null and b/docs/_build/doctrees/pygraph.utils.doctree differ diff --git a/docs/_build/html/.buildinfo b/docs/_build/html/.buildinfo new file mode 100644 index 0000000..7f6bd34 --- /dev/null +++ b/docs/_build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: b72b368ba10131aed8c3edbb863096bd +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html new file mode 100644 index 0000000..a0e2ef3 --- /dev/null +++ b/docs/_build/html/_modules/index.html @@ -0,0 +1,196 @@ + + + + + +
+ + + + +
+""" Obtain all kinds of attributes of a graph dataset.
+"""
+
+
+[docs]def get_dataset_attributes(Gn,
+ target=None,
+ attr_names=[],
+ node_label=None,
+ edge_label=None):
+ """Returns the structure and property information of the graph dataset Gn.
+
+ Parameters
+ ----------
+ Gn : List of NetworkX graph
+ List of graphs whose information will be returned.
+ target : list
+ The list of classification targets corresponding to Gn. Only works for
+ classification problems.
+ attr_names : list
+ List of strings which indicate which informations will be returned. The
+ possible choices includes:
+ 'substructures': sub-structures Gn contains, including 'linear', 'non
+ linear' and 'cyclic'.
+ 'node_labeled': whether vertices have symbolic labels.
+ 'edge_labeled': whether egdes have symbolic labels.
+ 'is_directed': whether graphs in Gn are directed.
+ 'dataset_size': number of graphs in Gn.
+ 'ave_node_num': average number of vertices of graphs in Gn.
+ 'min_node_num': minimum number of vertices of graphs in Gn.
+ 'max_node_num': maximum number of vertices of graphs in Gn.
+ 'ave_edge_num': average number of edges of graphs in Gn.
+ 'min_edge_num': minimum number of edges of graphs in Gn.
+ 'max_edge_num': maximum number of edges of graphs in Gn.
+ 'ave_node_degree': average vertex degree of graphs in Gn.
+ 'min_node_degree': minimum vertex degree of graphs in Gn.
+ 'max_node_degree': maximum vertex degree of graphs in Gn.
+ 'ave_fill_factor': average fill factor (number_of_edges /
+ (number_of_nodes ** 2)) of graphs in Gn.
+ 'min_fill_factor': minimum fill factor of graphs in Gn.
+ 'max_fill_factor': maximum fill factor of graphs in Gn.
+ 'node_label_num': number of symbolic vertex labels.
+ 'edge_label_num': number of symbolic edge labels.
+ 'node_attr_dim': number of dimensions of non-symbolic vertex labels.
+ Extracted from the 'attributes' attribute of graph nodes.
+ 'edge_attr_dim': number of dimensions of non-symbolic edge labels.
+ Extracted from the 'attributes' attribute of graph edges.
+ 'class_number': number of classes. Only available for classification
+ problems.
+ node_label : string
+ Node attribute used as label. The default node label is atom. Mandatory
+ when 'node_labeled' or 'node_label_num' is required.
+ edge_label : string
+ Edge attribute used as label. The default edge label is bond_type.
+ Mandatory when 'edge_labeled' or 'edge_label_num' is required.
+
+ Return
+ ------
+ attrs : dict
+ Value for each property.
+ """
+ import networkx as nx
+ import numpy as np
+
+ attrs = {}
+
+ def get_dataset_size(Gn):
+ return len(Gn)
+
+ def get_all_node_num(Gn):
+ return [nx.number_of_nodes(G) for G in Gn]
+
+ def get_ave_node_num(all_node_num):
+ return np.mean(all_node_num)
+
+ def get_min_node_num(all_node_num):
+ return np.amin(all_node_num)
+
+ def get_max_node_num(all_node_num):
+ return np.amax(all_node_num)
+
+ def get_all_edge_num(Gn):
+ return [nx.number_of_edges(G) for G in Gn]
+
+ def get_ave_edge_num(all_edge_num):
+ return np.mean(all_edge_num)
+
+ def get_min_edge_num(all_edge_num):
+ return np.amin(all_edge_num)
+
+ def get_max_edge_num(all_edge_num):
+ return np.amax(all_edge_num)
+
+ def is_node_labeled(Gn):
+ return False if node_label is None else True
+
+ def get_node_label_num(Gn):
+ nl = set()
+ for G in Gn:
+ nl = nl | set(nx.get_node_attributes(G, node_label).values())
+ return len(nl)
+
+ def is_edge_labeled(Gn):
+ return False if edge_label is None else True
+
+ def get_edge_label_num(Gn):
+ el = set()
+ for G in Gn:
+ el = el | set(nx.get_edge_attributes(G, edge_label).values())
+ return len(el)
+
+ def is_directed(Gn):
+ return nx.is_directed(Gn[0])
+
+ def get_ave_node_degree(Gn):
+ return np.mean([np.mean(list(dict(G.degree()).values())) for G in Gn])
+
+ def get_max_node_degree(Gn):
+ return np.amax([np.mean(list(dict(G.degree()).values())) for G in Gn])
+
+ def get_min_node_degree(Gn):
+ return np.amin([np.mean(list(dict(G.degree()).values())) for G in Gn])
+
+ # get fill factor, the number of non-zero entries in the adjacency matrix.
+ def get_ave_fill_factor(Gn):
+ return np.mean([nx.number_of_edges(G) / (nx.number_of_nodes(G)
+ * nx.number_of_nodes(G)) for G in Gn])
+
+ def get_max_fill_factor(Gn):
+ return np.amax([nx.number_of_edges(G) / (nx.number_of_nodes(G)
+ * nx.number_of_nodes(G)) for G in Gn])
+
+ def get_min_fill_factor(Gn):
+ return np.amin([nx.number_of_edges(G) / (nx.number_of_nodes(G)
+ * nx.number_of_nodes(G)) for G in Gn])
+
+ def get_substructures(Gn):
+ subs = set()
+ for G in Gn:
+ degrees = list(dict(G.degree()).values())
+ if any(i == 2 for i in degrees):
+ subs.add('linear')
+ if np.amax(degrees) >= 3:
+ subs.add('non linear')
+ if 'linear' in subs and 'non linear' in subs:
+ break
+
+ if is_directed(Gn):
+ for G in Gn:
+ if len(list(nx.find_cycle(G))) > 0:
+ subs.add('cyclic')
+ break
+ # else:
+ # # @todo: this method does not work for big graph with large amount of edges like D&D, try a better way.
+ # upper = np.amin([nx.number_of_edges(G) for G in Gn]) * 2 + 10
+ # for G in Gn:
+ # if (nx.number_of_edges(G) < upper):
+ # cyc = list(nx.simple_cycles(G.to_directed()))
+ # if any(len(i) > 2 for i in cyc):
+ # subs.add('cyclic')
+ # break
+ # if 'cyclic' not in subs:
+ # for G in Gn:
+ # cyc = list(nx.simple_cycles(G.to_directed()))
+ # if any(len(i) > 2 for i in cyc):
+ # subs.add('cyclic')
+ # break
+
+ return subs
+
+ def get_class_num(target):
+ return len(set(target))
+
+ def get_node_attr_dim(Gn):
+ for G in Gn:
+ for n in G.nodes(data=True):
+ if 'attributes' in n[1]:
+ return len(n[1]['attributes'])
+ return 0
+
+ def get_edge_attr_dim(Gn):
+ for G in Gn:
+ if nx.number_of_edges(G) > 0:
+ for e in G.edges(data=True):
+ if 'attributes' in e[2]:
+ return len(e[2]['attributes'])
+ return 0
+
+ if attr_names == []:
+ attr_names = [
+ 'substructures',
+ 'node_labeled',
+ 'edge_labeled',
+ 'is_directed',
+ 'dataset_size',
+ 'ave_node_num',
+ 'min_node_num',
+ 'max_node_num',
+ 'ave_edge_num',
+ 'min_edge_num',
+ 'max_edge_num',
+ 'ave_node_degree',
+ 'min_node_degree',
+ 'max_node_degree',
+ 'ave_fill_factor',
+ 'min_fill_factor',
+ 'max_fill_factor',
+ 'node_label_num',
+ 'edge_label_num',
+ 'node_attr_dim',
+ 'edge_attr_dim',
+ 'class_number',
+ ]
+
+ # dataset size
+ if 'dataset_size' in attr_names:
+
+ attrs.update({'dataset_size': get_dataset_size(Gn)})
+
+ # graph node number
+ if any(i in attr_names
+ for i in ['ave_node_num', 'min_node_num', 'max_node_num']):
+
+ all_node_num = get_all_node_num(Gn)
+
+ if 'ave_node_num' in attr_names:
+
+ attrs.update({'ave_node_num': get_ave_node_num(all_node_num)})
+
+ if 'min_node_num' in attr_names:
+
+ attrs.update({'min_node_num': get_min_node_num(all_node_num)})
+
+ if 'max_node_num' in attr_names:
+
+ attrs.update({'max_node_num': get_max_node_num(all_node_num)})
+
+ # graph edge number
+ if any(i in attr_names for i in
+ ['ave_edge_num', 'min_edge_num', 'max_edge_num']):
+
+ all_edge_num = get_all_edge_num(Gn)
+
+ if 'ave_edge_num' in attr_names:
+
+ attrs.update({'ave_edge_num': get_ave_edge_num(all_edge_num)})
+
+ if 'max_edge_num' in attr_names:
+
+ attrs.update({'max_edge_num': get_max_edge_num(all_edge_num)})
+
+ if 'min_edge_num' in attr_names:
+
+ attrs.update({'min_edge_num': get_min_edge_num(all_edge_num)})
+
+ # label number
+ if any(i in attr_names for i in ['node_labeled', 'node_label_num']):
+ is_nl = is_node_labeled(Gn)
+ node_label_num = get_node_label_num(Gn)
+
+ if 'node_labeled' in attr_names:
+ # graphs are considered node unlabeled if all nodes have the same label.
+ attrs.update({'node_labeled': is_nl if node_label_num > 1 else False})
+
+ if 'node_label_num' in attr_names:
+ attrs.update({'node_label_num': node_label_num})
+
+ if any(i in attr_names for i in ['edge_labeled', 'edge_label_num']):
+ is_el = is_edge_labeled(Gn)
+ edge_label_num = get_edge_label_num(Gn)
+
+ if 'edge_labeled' in attr_names:
+ # graphs are considered edge unlabeled if all edges have the same label.
+ attrs.update({'edge_labeled': is_el if edge_label_num > 1 else False})
+
+ if 'edge_label_num' in attr_names:
+ attrs.update({'edge_label_num': edge_label_num})
+
+ if 'is_directed' in attr_names:
+ attrs.update({'is_directed': is_directed(Gn)})
+
+ if 'ave_node_degree' in attr_names:
+ attrs.update({'ave_node_degree': get_ave_node_degree(Gn)})
+
+ if 'max_node_degree' in attr_names:
+ attrs.update({'max_node_degree': get_max_node_degree(Gn)})
+
+ if 'min_node_degree' in attr_names:
+ attrs.update({'min_node_degree': get_min_node_degree(Gn)})
+
+ if 'ave_fill_factor' in attr_names:
+ attrs.update({'ave_fill_factor': get_ave_fill_factor(Gn)})
+
+ if 'max_fill_factor' in attr_names:
+ attrs.update({'max_fill_factor': get_max_fill_factor(Gn)})
+
+ if 'min_fill_factor' in attr_names:
+ attrs.update({'min_fill_factor': get_min_fill_factor(Gn)})
+
+ if 'substructures' in attr_names:
+ attrs.update({'substructures': get_substructures(Gn)})
+
+ if 'class_number' in attr_names:
+ attrs.update({'class_number': get_class_num(target)})
+
+ if 'node_attr_dim' in attr_names:
+ attrs['node_attr_dim'] = get_node_attr_dim(Gn)
+
+ if 'edge_attr_dim' in attr_names:
+ attrs['edge_attr_dim'] = get_edge_attr_dim(Gn)
+
+ from collections import OrderedDict
+ return OrderedDict(
+ sorted(attrs.items(), key=lambda i: attr_names.index(i[0])))
+
+""" Utilities function to manage graph files
+"""
+from os.path import dirname, splitext
+
+[docs]def loadCT(filename):
+ """load data from a Chemical Table (.ct) file.
+
+ Notes
+ ------
+ a typical example of data in .ct is like this:
+
+ 3 2 <- number of nodes and edges
+ 0.0000 0.0000 0.0000 C <- each line describes a node (x,y,z + label)
+ 0.0000 0.0000 0.0000 C
+ 0.0000 0.0000 0.0000 O
+ 1 3 1 1 <- each line describes an edge : to, from, bond type, bond stereo
+ 2 3 1 1
+
+ Check https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=10&ved=2ahUKEwivhaSdjsTlAhVhx4UKHczHA8gQFjAJegQIARAC&url=https%3A%2F%2Fwww.daylight.com%2Fmeetings%2Fmug05%2FKappler%2Fctfile.pdf&usg=AOvVaw1cDNrrmMClkFPqodlF2inS
+ for detailed format discription.
+ """
+ import networkx as nx
+ from os.path import basename
+ g = nx.Graph()
+ with open(filename) as f:
+ content = f.read().splitlines()
+ g = nx.Graph(
+ name = str(content[0]),
+ filename = basename(filename)) # set name of the graph
+ tmp = content[1].split(" ")
+ if tmp[0] == '':
+ nb_nodes = int(tmp[1]) # number of the nodes
+ nb_edges = int(tmp[2]) # number of the edges
+ else:
+ nb_nodes = int(tmp[0])
+ nb_edges = int(tmp[1])
+ # patch for compatibility : label will be removed later
+ for i in range(0, nb_nodes):
+ tmp = content[i + 2].split(" ")
+ tmp = [x for x in tmp if x != '']
+ g.add_node(i, atom=tmp[3].strip(),
+ label=[item.strip() for item in tmp[3:]],
+ attributes=[item.strip() for item in tmp[0:3]])
+ for i in range(0, nb_edges):
+ tmp = content[i + g.number_of_nodes() + 2].split(" ")
+ tmp = [x for x in tmp if x != '']
+ g.add_edge(int(tmp[0]) - 1, int(tmp[1]) - 1,
+ bond_type=tmp[2].strip(),
+ label=[item.strip() for item in tmp[2:]])
+ return g
+
+
+[docs]def loadGXL(filename):
+ from os.path import basename
+ import networkx as nx
+ import xml.etree.ElementTree as ET
+
+ tree = ET.parse(filename)
+ root = tree.getroot()
+ index = 0
+ g = nx.Graph(filename=basename(filename), name=root[0].attrib['id'])
+ dic = {} # used to retrieve incident nodes of edges
+ for node in root.iter('node'):
+ dic[node.attrib['id']] = index
+ labels = {}
+ for attr in node.iter('attr'):
+ labels[attr.attrib['name']] = attr[0].text
+ if 'chem' in labels:
+ labels['label'] = labels['chem']
+ labels['atom'] = labels['chem']
+ g.add_node(index, **labels)
+ index += 1
+
+ for edge in root.iter('edge'):
+ labels = {}
+ for attr in edge.iter('attr'):
+ labels[attr.attrib['name']] = attr[0].text
+ if 'valence' in labels:
+ labels['label'] = labels['valence']
+ labels['bond_type'] = labels['valence']
+ g.add_edge(dic[edge.attrib['from']], dic[edge.attrib['to']], **labels)
+ return g
+
+
+[docs]def saveGXL(graph, filename, method='benoit'):
+ if method == 'benoit':
+ import xml.etree.ElementTree as ET
+ root_node = ET.Element('gxl')
+ attr = dict()
+ attr['id'] = str(graph.graph['name'])
+ attr['edgeids'] = 'true'
+ attr['edgemode'] = 'undirected'
+ graph_node = ET.SubElement(root_node, 'graph', attrib=attr)
+
+ for v in graph:
+ current_node = ET.SubElement(graph_node, 'node', attrib={'id': str(v)})
+ for attr in graph.nodes[v].keys():
+ cur_attr = ET.SubElement(
+ current_node, 'attr', attrib={'name': attr})
+ cur_value = ET.SubElement(cur_attr,
+ graph.nodes[v][attr].__class__.__name__)
+ cur_value.text = graph.nodes[v][attr]
+
+ for v1 in graph:
+ for v2 in graph[v1]:
+ if (v1 < v2): # Non oriented graphs
+ cur_edge = ET.SubElement(
+ graph_node,
+ 'edge',
+ attrib={
+ 'from': str(v1),
+ 'to': str(v2)
+ })
+ for attr in graph[v1][v2].keys():
+ cur_attr = ET.SubElement(
+ cur_edge, 'attr', attrib={'name': attr})
+ cur_value = ET.SubElement(
+ cur_attr, graph[v1][v2][attr].__class__.__name__)
+ cur_value.text = str(graph[v1][v2][attr])
+
+ tree = ET.ElementTree(root_node)
+ tree.write(filename)
+ elif method == 'gedlib':
+ # reference: https://github.com/dbblumenthal/gedlib/blob/master/data/generate_molecules.py#L22
+# pass
+ gxl_file = open(filename, 'w')
+ gxl_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
+ gxl_file.write("<!DOCTYPE gxl SYSTEM \"http://www.gupro.de/GXL/gxl-1.0.dtd\">\n")
+ gxl_file.write("<gxl xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n")
+ gxl_file.write("<graph id=\"" + str(graph.graph['name']) + "\" edgeids=\"true\" edgemode=\"undirected\">\n")
+ for v, attrs in graph.nodes(data=True):
+ gxl_file.write("<node id=\"_" + str(v) + "\">")
+ gxl_file.write("<attr name=\"" + "chem" + "\"><int>" + str(attrs['chem']) + "</int></attr>")
+ gxl_file.write("</node>\n")
+ for v1, v2, attrs in graph.edges(data=True):
+ gxl_file.write("<edge from=\"_" + str(v1) + "\" to=\"_" + str(v2) + "\">")
+ gxl_file.write("<attr name=\"valence\"><int>" + str(attrs['valence']) + "</int></attr>")
+# gxl_file.write("<attr name=\"valence\"><int>" + "1" + "</int></attr>")
+ gxl_file.write("</edge>\n")
+ gxl_file.write("</graph>\n")
+ gxl_file.write("</gxl>")
+ gxl_file.close()
+ elif method == 'gedlib-letter':
+ # reference: https://github.com/dbblumenthal/gedlib/blob/master/data/generate_molecules.py#L22
+ # and https://github.com/dbblumenthal/gedlib/blob/master/data/datasets/Letter/HIGH/AP1_0000.gxl
+ gxl_file = open(filename, 'w')
+ gxl_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
+ gxl_file.write("<!DOCTYPE gxl SYSTEM \"http://www.gupro.de/GXL/gxl-1.0.dtd\">\n")
+ gxl_file.write("<gxl xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n")
+ gxl_file.write("<graph id=\"" + str(graph.graph['name']) + "\" edgeids=\"false\" edgemode=\"undirected\">\n")
+ for v, attrs in graph.nodes(data=True):
+ gxl_file.write("<node id=\"_" + str(v) + "\">")
+ gxl_file.write("<attr name=\"x\"><float>" + str(attrs['attributes'][0]) + "</float></attr>")
+ gxl_file.write("<attr name=\"y\"><float>" + str(attrs['attributes'][1]) + "</float></attr>")
+ gxl_file.write("</node>\n")
+ for v1, v2, attrs in graph.edges(data=True):
+ gxl_file.write("<edge from=\"_" + str(v1) + "\" to=\"_" + str(v2) + "\"/>\n")
+ gxl_file.write("</graph>\n")
+ gxl_file.write("</gxl>")
+ gxl_file.close()
+
+
+[docs]def loadSDF(filename):
+ """load data from structured data file (.sdf file).
+
+ Notes
+ ------
+ A SDF file contains a group of molecules, represented in the similar way as in MOL format.
+ Check http://www.nonlinear.com/progenesis/sdf-studio/v0.9/faq/sdf-file-format-guidance.aspx, 2018 for detailed structure.
+ """
+ import networkx as nx
+ from os.path import basename
+ from tqdm import tqdm
+ import sys
+ data = []
+ with open(filename) as f:
+ content = f.read().splitlines()
+ index = 0
+ pbar = tqdm(total=len(content) + 1, desc='load SDF', file=sys.stdout)
+ while index < len(content):
+ index_old = index
+
+ g = nx.Graph(name=content[index].strip()) # set name of the graph
+
+ tmp = content[index + 3]
+ nb_nodes = int(tmp[:3]) # number of the nodes
+ nb_edges = int(tmp[3:6]) # number of the edges
+
+ for i in range(0, nb_nodes):
+ tmp = content[i + index + 4]
+ g.add_node(i, atom=tmp[31:34].strip())
+
+ for i in range(0, nb_edges):
+ tmp = content[i + index + g.number_of_nodes() + 4]
+ tmp = [tmp[i:i + 3] for i in range(0, len(tmp), 3)]
+ g.add_edge(
+ int(tmp[0]) - 1, int(tmp[1]) - 1, bond_type=tmp[2].strip())
+
+ data.append(g)
+
+ index += 4 + g.number_of_nodes() + g.number_of_edges()
+ while content[index].strip() != '$$$$': # seperator
+ index += 1
+ index += 1
+
+ pbar.update(index - index_old)
+ pbar.update(1)
+ pbar.close()
+
+ return data
+
+
+[docs]def loadMAT(filename, extra_params):
+ """Load graph data from a MATLAB (up to version 7.1) .mat file.
+
+ Notes
+ ------
+ A MAT file contains a struct array containing graphs, and a column vector lx containing a class label for each graph.
+ Check README in downloadable file in http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/WL/, 2018 for detailed structure.
+ """
+ from scipy.io import loadmat
+ import numpy as np
+ import networkx as nx
+ data = []
+ content = loadmat(filename)
+ order = extra_params['am_sp_al_nl_el']
+ # print(content)
+ # print('----')
+ for key, value in content.items():
+ if key[0] == 'l': # class label
+ y = np.transpose(value)[0].tolist()
+ # print(y)
+ elif key[0] != '_':
+ # print(value[0][0][0])
+ # print()
+ # print(value[0][0][1])
+ # print()
+ # print(value[0][0][2])
+ # print()
+ # if len(value[0][0]) > 3:
+ # print(value[0][0][3])
+ # print('----')
+ # if adjacency matrix is not compressed / edge label exists
+ if order[1] == 0:
+ for i, item in enumerate(value[0]):
+ # print(item)
+ # print('------')
+ g = nx.Graph(name=i) # set name of the graph
+ nl = np.transpose(item[order[3]][0][0][0]) # node label
+ # print(item[order[3]])
+ # print()
+ for index, label in enumerate(nl[0]):
+ g.add_node(index, atom=str(label))
+ el = item[order[4]][0][0][0] # edge label
+ for edge in el:
+ g.add_edge(
+ edge[0] - 1, edge[1] - 1, bond_type=str(edge[2]))
+ data.append(g)
+ else:
+ from scipy.sparse import csc_matrix
+ for i, item in enumerate(value[0]):
+ # print(item)
+ # print('------')
+ g = nx.Graph(name=i) # set name of the graph
+ nl = np.transpose(item[order[3]][0][0][0]) # node label
+ # print(nl)
+ # print()
+ for index, label in enumerate(nl[0]):
+ g.add_node(index, atom=str(label))
+ sam = item[order[0]] # sparse adjacency matrix
+ index_no0 = sam.nonzero()
+ for col, row in zip(index_no0[0], index_no0[1]):
+ # print(col)
+ # print(row)
+ g.add_edge(col, row)
+ data.append(g)
+ # print(g.edges(data=True))
+ return data, y
+
+
+[docs]def loadTXT(dirname_dataset):
+ """Load graph data from a .txt file.
+
+ Notes
+ ------
+ The graph data is loaded from separate files.
+ Check README in downloadable file http://tiny.cc/PK_MLJ_data, 2018 for detailed structure.
+ """
+ import numpy as np
+ import networkx as nx
+ from os import listdir
+ from os.path import dirname
+
+ # load data file names
+ for name in listdir(dirname_dataset):
+ if '_A' in name:
+ fam = dirname_dataset + '/' + name
+ elif '_graph_indicator' in name:
+ fgi = dirname_dataset + '/' + name
+ elif '_graph_labels' in name:
+ fgl = dirname_dataset + '/' + name
+ elif '_node_labels' in name:
+ fnl = dirname_dataset + '/' + name
+ elif '_edge_labels' in name:
+ fel = dirname_dataset + '/' + name
+ elif '_edge_attributes' in name:
+ fea = dirname_dataset + '/' + name
+ elif '_node_attributes' in name:
+ fna = dirname_dataset + '/' + name
+ elif '_graph_attributes' in name:
+ fga = dirname_dataset + '/' + name
+ # this is supposed to be the node attrs, make sure to put this as the last 'elif'
+ elif '_attributes' in name:
+ fna = dirname_dataset + '/' + name
+
+ content_gi = open(fgi).read().splitlines() # graph indicator
+ content_am = open(fam).read().splitlines() # adjacency matrix
+ content_gl = open(fgl).read().splitlines() # lass labels
+
+ # create graphs and add nodes
+ data = [nx.Graph(name=i) for i in range(0, len(content_gl))]
+ if 'fnl' in locals():
+ content_nl = open(fnl).read().splitlines() # node labels
+ for i, line in enumerate(content_gi):
+ # transfer to int first in case of unexpected blanks
+ data[int(line) - 1].add_node(i, atom=str(int(content_nl[i])))
+ else:
+ for i, line in enumerate(content_gi):
+ data[int(line) - 1].add_node(i)
+
+ # add edges
+ for line in content_am:
+ tmp = line.split(',')
+ n1 = int(tmp[0]) - 1
+ n2 = int(tmp[1]) - 1
+ # ignore edge weight here.
+ g = int(content_gi[n1]) - 1
+ data[g].add_edge(n1, n2)
+
+ # add edge labels
+ if 'fel' in locals():
+ content_el = open(fel).read().splitlines()
+ for index, line in enumerate(content_el):
+ label = line.strip()
+ n = [int(i) - 1 for i in content_am[index].split(',')]
+ g = int(content_gi[n[0]]) - 1
+ data[g].edges[n[0], n[1]]['bond_type'] = label
+
+ # add node attributes
+ if 'fna' in locals():
+ content_na = open(fna).read().splitlines()
+ for i, line in enumerate(content_na):
+ attrs = [i.strip() for i in line.split(',')]
+ g = int(content_gi[i]) - 1
+ data[g].nodes[i]['attributes'] = attrs
+
+ # add edge attributes
+ if 'fea' in locals():
+ content_ea = open(fea).read().splitlines()
+ for index, line in enumerate(content_ea):
+ attrs = [i.strip() for i in line.split(',')]
+ n = [int(i) - 1 for i in content_am[index].split(',')]
+ g = int(content_gi[n[0]]) - 1
+ data[g].edges[n[0], n[1]]['attributes'] = attrs
+
+ # load y
+ y = [int(i) for i in content_gl]
+
+ return data, y
+
+
+[docs]def loadDataset(filename, filename_y=None, extra_params=None):
+ """Read graph data from filename and load them as NetworkX graphs.
+
+ Parameters
+ ----------
+ filename : string
+ The name of the file from where the dataset is read.
+ filename_y : string
+ The name of file of the targets corresponding to graphs.
+ extra_params : dict
+ Extra parameters only designated to '.mat' format.
+
+ Return
+ ------
+ data : List of NetworkX graph.
+ y : List
+ Targets corresponding to graphs.
+
+ Notes
+ -----
+ This function supports following graph dataset formats:
+ 'ds': load data from .ds file. See comments of function loadFromDS for a example.
+ 'cxl': load data from Graph eXchange Language file (.cxl file). See
+ http://www.gupro.de/GXL/Introduction/background.html, 2019 for detail.
+ 'sdf': load data from structured data file (.sdf file). See
+ http://www.nonlinear.com/progenesis/sdf-studio/v0.9/faq/sdf-file-format-guidance.aspx,
+ 2018 for details.
+ 'mat': Load graph data from a MATLAB (up to version 7.1) .mat file. See
+ README in downloadable file in http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/WL/,
+ 2018 for details.
+ 'txt': Load graph data from a special .txt file. See
+ https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets,
+ 2019 for details. Note here filename is the name of either .txt file in
+ the dataset directory.
+ """
+ extension = splitext(filename)[1][1:]
+ if extension == "ds":
+ data, y = loadFromDS(filename, filename_y)
+ elif extension == "cxl":
+ import xml.etree.ElementTree as ET
+
+ dirname_dataset = dirname(filename)
+ tree = ET.parse(filename)
+ root = tree.getroot()
+ data = []
+ y = []
+ for graph in root.iter('graph'):
+ mol_filename = graph.attrib['file']
+ mol_class = graph.attrib['class']
+ data.append(loadGXL(dirname_dataset + '/' + mol_filename))
+ y.append(mol_class)
+ elif extension == 'xml':
+ data, y = loadFromXML(filename, extra_params)
+ elif extension == "sdf":
+ import numpy as np
+ from tqdm import tqdm
+ import sys
+
+ data = loadSDF(filename)
+
+ y_raw = open(filename_y).read().splitlines()
+ y_raw.pop(0)
+ tmp0 = []
+ tmp1 = []
+ for i in range(0, len(y_raw)):
+ tmp = y_raw[i].split(',')
+ tmp0.append(tmp[0])
+ tmp1.append(tmp[1].strip())
+
+ y = []
+ for i in tqdm(range(0, len(data)), desc='ajust data', file=sys.stdout):
+ try:
+ y.append(tmp1[tmp0.index(data[i].name)].strip())
+ except ValueError: # if data[i].name not in tmp0
+ data[i] = []
+ data = list(filter(lambda a: a != [], data))
+ elif extension == "mat":
+ data, y = loadMAT(filename, extra_params)
+ elif extension == 'txt':
+ dirname_dataset = dirname(filename)
+ data, y = loadTXT(dirname_dataset)
+ # print(len(y))
+ # print(y)
+ # print(data[0].nodes(data=True))
+ # print('----')
+ # print(data[0].edges(data=True))
+ # for g in data:
+ # print(g.nodes(data=True))
+ # print('----')
+ # print(g.edges(data=True))
+
+ return data, y
+
+
+[docs]def loadFromXML(filename, extra_params):
+ import xml.etree.ElementTree as ET
+
+ if extra_params:
+ dirname_dataset = extra_params
+ else:
+ dirname_dataset = dirname(filename)
+ tree = ET.parse(filename)
+ root = tree.getroot()
+ data = []
+ y = []
+ for graph in root.iter('graph'):
+ mol_filename = graph.attrib['file']
+ mol_class = graph.attrib['class']
+ data.append(loadGXL(dirname_dataset + '/' + mol_filename))
+ y.append(mol_class)
+
+ return data, y
+
+
+[docs]def loadFromDS(filename, filename_y):
+ """Load data from .ds file.
+ Possible graph formats include:
+ '.ct': see function loadCT for detail.
+ '.gxl': see dunction loadGXL for detail.
+ Note these graph formats are checked automatically by the extensions of
+ graph files.
+ """
+ dirname_dataset = dirname(filename)
+ data = []
+ y = []
+ content = open(filename).read().splitlines()
+ extension = splitext(content[0].split(' ')[0])[1][1:]
+ if filename_y is None or filename_y == '':
+ if extension == 'ct':
+ for i in range(0, len(content)):
+ tmp = content[i].split(' ')
+ # remove the '#'s in file names
+ data.append(
+ loadCT(dirname_dataset + '/' + tmp[0].replace('#', '', 1)))
+ y.append(float(tmp[1]))
+ elif extension == 'gxl':
+ for i in range(0, len(content)):
+ tmp = content[i].split(' ')
+ # remove the '#'s in file names
+ data.append(
+ loadGXL(dirname_dataset + '/' + tmp[0].replace('#', '', 1)))
+ y.append(float(tmp[1]))
+ else: # y in a seperate file
+ if extension == 'ct':
+ for i in range(0, len(content)):
+ tmp = content[i]
+ # remove the '#'s in file names
+ data.append(
+ loadCT(dirname_dataset + '/' + tmp.replace('#', '', 1)))
+ elif extension == 'gxl':
+ for i in range(0, len(content)):
+ tmp = content[i]
+ # remove the '#'s in file names
+ data.append(
+ loadGXL(dirname_dataset + '/' + tmp.replace('#', '', 1)))
+
+ content_y = open(filename_y).read().splitlines()
+ # assume entries in filename and filename_y have the same order.
+ for item in content_y:
+ tmp = item.split(' ')
+ # assume the 3rd entry in a line is y (for Alkane dataset)
+ y.append(float(tmp[2]))
+
+ return data, y
+
+
+[docs]def saveDataset(Gn, y, gformat='gxl', group=None, filename='gfile', xparams=None):
+ """Save list of graphs.
+ """
+ import os
+ dirname_ds = os.path.dirname(filename)
+ if dirname_ds != '':
+ dirname_ds += '/'
+ if not os.path.exists(dirname_ds) :
+ os.makedirs(dirname_ds)
+
+ if 'graph_dir' in xparams:
+ graph_dir = xparams['graph_dir'] + '/'
+ if not os.path.exists(graph_dir):
+ os.makedirs(graph_dir)
+ else:
+ graph_dir = dirname_ds
+
+ if group == 'xml' and gformat == 'gxl':
+ with open(filename + '.xml', 'w') as fgroup:
+ fgroup.write("<?xml version=\"1.0\"?>")
+ fgroup.write("\n<!DOCTYPE GraphCollection SYSTEM \"http://www.inf.unibz.it/~blumenthal/dtd/GraphCollection.dtd\">")
+ fgroup.write("\n<GraphCollection>")
+ for idx, g in enumerate(Gn):
+ fname_tmp = "graph" + str(idx) + ".gxl"
+ saveGXL(g, graph_dir + fname_tmp, method=xparams['method'])
+ fgroup.write("\n\t<graph file=\"" + fname_tmp + "\" class=\"" + str(y[idx]) + "\"/>")
+ fgroup.write("\n</GraphCollection>")
+ fgroup.close()
+
+
+if __name__ == '__main__':
+# ### Load dataset from .ds file.
+# # .ct files.
+# ds = {'name': 'Alkane', 'dataset': '../../datasets/Alkane/dataset.ds',
+# 'dataset_y': '../../datasets/Alkane/dataset_boiling_point_names.txt'}
+# Gn, y = loadDataset(ds['dataset'], filename_y=ds['dataset_y'])
+## ds = {'name': 'Acyclic', 'dataset': '../../datasets/acyclic/dataset_bps.ds'} # node symb
+## Gn, y = loadDataset(ds['dataset'])
+## ds = {'name': 'MAO', 'dataset': '../../datasets/MAO/dataset.ds'} # node/edge symb
+## Gn, y = loadDataset(ds['dataset'])
+## ds = {'name': 'PAH', 'dataset': '../../datasets/PAH/dataset.ds'} # unlabeled
+## Gn, y = loadDataset(ds['dataset'])
+# print(Gn[1].nodes(data=True))
+# print(Gn[1].edges(data=True))
+# print(y[1])
+
+# # .gxl file.
+# ds = {'name': 'monoterpenoides',
+# 'dataset': '../../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb
+# Gn, y = loadDataset(ds['dataset'])
+# print(Gn[1].nodes(data=True))
+# print(Gn[1].edges(data=True))
+# print(y[1])
+
+ ### Convert graph from one format to another.
+ # .gxl file.
+ import networkx as nx
+ ds = {'name': 'monoterpenoides',
+ 'dataset': '../../datasets/monoterpenoides/dataset_10+.ds'} # node/edge symb
+ Gn, y = loadDataset(ds['dataset'])
+ y = [int(i) for i in y]
+ print(Gn[1].nodes(data=True))
+ print(Gn[1].edges(data=True))
+ print(y[1])
+ # Convert a graph to the proper NetworkX format that can be recognized by library gedlib.
+ Gn_new = []
+ for G in Gn:
+ G_new = nx.Graph()
+ for nd, attrs in G.nodes(data=True):
+ G_new.add_node(str(nd), chem=attrs['atom'])
+ for nd1, nd2, attrs in G.edges(data=True):
+ G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])
+# G_new.add_edge(str(nd1), str(nd2))
+ Gn_new.append(G_new)
+ print(Gn_new[1].nodes(data=True))
+ print(Gn_new[1].edges(data=True))
+ print(Gn_new[1])
+ filename = '/media/ljia/DATA/research-repo/codes/others/gedlib/tests_linlin/generated_datsets/monoterpenoides/gxl/monoterpenoides'
+ xparams = {'method': 'gedlib'}
+ saveDataset(Gn, y, gformat='gxl', group='xml', filename=filename, xparams=xparams)
+
+# ds = {'name': 'MUTAG', 'dataset': '../../datasets/MUTAG/MUTAG.mat',
+# 'extra_params': {'am_sp_al_nl_el': [0, 0, 3, 1, 2]}} # node/edge symb
+# Gn, y = loadDataset(ds['dataset'], extra_params=ds['extra_params'])
+# saveDataset(Gn, y, group='xml', filename='temp/temp')
+
+""" Functions for python system.
+"""
+
+[docs]def isNotebook():
+ """check if code is executed in the IPython notebook.
+ """
+ try:
+ shell = get_ipython().__class__.__name__
+ if shell == 'ZMQInteractiveShell':
+ return True # Jupyter notebook or qtconsole
+ elif shell == 'TerminalInteractiveShell':
+ return False # Terminal running IPython
+ else:
+ return False # Other type (?)
+ except NameError:
+ return False # Probably standard Python interpreter
+
+"""Those who are not graph kernels. We can be kernels for nodes or edges!
+These kernels are defined between pairs of vectors.
+"""
+import numpy as np
+
+[docs]def deltakernel(x, y):
+ """Delta kernel. Return 1 if x == y, 0 otherwise.
+
+ Parameters
+ ----------
+ x, y : any
+ Two parts to compare.
+
+ Return
+ ------
+ kernel : integer
+ Delta kernel.
+
+ References
+ ----------
+ [1] H. Kashima, K. Tsuda, and A. Inokuchi. Marginalized kernels between
+ labeled graphs. In Proceedings of the 20th International Conference on
+ Machine Learning, Washington, DC, United States, 2003.
+ """
+ return x == y #(1 if condition else 0)
+
+
+[docs]def gaussiankernel(x, y, gamma=None):
+ """Gaussian kernel.
+ Compute the rbf (gaussian) kernel between x and y:
+
+ K(x, y) = exp(-gamma ||x-y||^2).
+
+ Read more in the :ref:`User Guide <rbf_kernel>`.
+
+ Parameters
+ ----------
+ x, y : array
+
+ gamma : float, default None
+ If None, defaults to 1.0 / n_features
+
+ Returns
+ -------
+ kernel : float
+ """
+ if gamma is None:
+ gamma = 1.0 / len(x)
+
+ xt = np.array([float(itm) for itm in x])
+ yt = np.array([float(itm) for itm in y])
+ kernel = xt - yt
+ kernel = kernel ** 2
+ kernel = np.sum(kernel)
+ kernel *= -gamma
+ kernel = np.exp(kernel)
+ return kernel
+
+
+[docs]def polynomialkernel(x, y, d=1, c=0):
+ """Polynomial kernel.
+ Compute the polynomial kernel between x and y:
+
+ K(x, y) = <x, y> ^d + c.
+
+ Parameters
+ ----------
+ x, y : array
+
+ d : integer, default 1
+
+ c : float, default 0
+
+ Returns
+ -------
+ kernel : float
+ """
+ return np.dot(x, y) ** d + c
+
+
+[docs]def linearkernel(x, y):
+ """Polynomial kernel.
+ Compute the polynomial kernel between x and y:
+
+ K(x, y) = <x, y>.
+
+ Parameters
+ ----------
+ x, y : array
+
+ d : integer, default 1
+
+ c : float, default 0
+
+ Returns
+ -------
+ kernel : float
+ """
+ return np.dot(x, y)
+
+
+[docs]def kernelsum(k1, k2, d11, d12, d21=None, d22=None, lamda1=1, lamda2=1):
+ """Sum of a pair of kernels.
+
+ k = lamda1 * k1(d11, d12) + lamda2 * k2(d21, d22)
+
+ Parameters
+ ----------
+ k1, k2 : function
+ A pair of kernel functions.
+ d11, d12:
+ Inputs of k1. If d21 or d22 is None, apply d11, d12 to both k1 and k2.
+ d21, d22:
+ Inputs of k2.
+ lamda1, lamda2: float
+ Coefficients of the product.
+
+ Return
+ ------
+ kernel : integer
+
+ """
+ if d21 == None or d22 == None:
+ kernel = lamda1 * k1(d11, d12) + lamda2 * k2(d11, d12)
+ else:
+ kernel = lamda1 * k1(d11, d12) + lamda2 * k2(d21, d22)
+ return kernel
+
+
+[docs]def kernelproduct(k1, k2, d11, d12, d21=None, d22=None, lamda=1):
+ """Product of a pair of kernels.
+
+ k = lamda * k1(d11, d12) * k2(d21, d22)
+
+ Parameters
+ ----------
+ k1, k2 : function
+ A pair of kernel functions.
+ d11, d12:
+ Inputs of k1. If d21 or d22 is None, apply d11, d12 to both k1 and k2.
+ d21, d22:
+ Inputs of k2.
+ lamda: float
+ Coefficient of the product.
+
+ Return
+ ------
+ kernel : integer
+ """
+ if d21 == None or d22 == None:
+ kernel = lamda * k1(d11, d12) * k2(d11, d12)
+ else:
+ kernel = lamda * k1(d11, d12) * k2(d21, d22)
+ return kernel
+
+
+if __name__ == '__main__':
+ o = polynomialkernel([1, 2], [3, 4], 2, 3)
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Nov 8 14:21:25 2019
+
+@author: ljia
+"""
+
+import sys
+import time
+
+[docs]class Logger(object):
+ def __init__(self):
+ self.terminal = sys.stdout
+ self.log = open("log." + str(time.time()) + ".log", "a")
+
+
+
+[docs] def flush(self):
+ #this flush method is needed for python 3 compatibility.
+ #this handles the flush command by doing nothing.
+ #you might want to specify some extra behavior here.
+ pass
+
+sys.stdout = Logger()
+
+import numpy as np
+import matplotlib
+matplotlib.use('Agg')
+from matplotlib import pyplot as plt
+from sklearn.kernel_ridge import KernelRidge
+from sklearn.svm import SVC
+from sklearn.metrics import accuracy_score, mean_squared_error
+from sklearn.model_selection import KFold, train_test_split, ParameterGrid
+
+#from joblib import Parallel, delayed
+from multiprocessing import Pool, Array
+from functools import partial
+import sys
+sys.path.insert(0, "../")
+import os
+import time
+import datetime
+#from os.path import basename, splitext
+from pygraph.utils.graphfiles import loadDataset
+from tqdm import tqdm
+
+#from memory_profiler import profile
+
+#@profile
+[docs]def model_selection_for_precomputed_kernel(datafile,
+ estimator,
+ param_grid_precomputed,
+ param_grid,
+ model_type,
+ NUM_TRIALS=30,
+ datafile_y=None,
+ extra_params=None,
+ ds_name='ds-unknown',
+ n_jobs=1,
+ read_gm_from_file=False,
+ verbose=True):
+ """Perform model selection, fitting and testing for precomputed kernels
+ using nested CV. Print out neccessary data during the process then finally
+ the results.
+
+ Parameters
+ ----------
+ datafile : string
+ Path of dataset file.
+ estimator : function
+ kernel function used to estimate. This function needs to return a gram matrix.
+ param_grid_precomputed : dictionary
+ Dictionary with names (string) of parameters used to calculate gram
+ matrices as keys and lists of parameter settings to try as values. This
+ enables searching over any sequence of parameter settings. Params with
+ length 1 will be omitted.
+ param_grid : dictionary
+ Dictionary with names (string) of parameters used as penelties as keys
+ and lists of parameter settings to try as values. This enables
+ searching over any sequence of parameter settings. Params with length 1
+ will be omitted.
+ model_type : string
+ Type of the problem, can be 'regression' or 'classification'.
+ NUM_TRIALS : integer
+ Number of random trials of outer cv loop. The default is 30.
+ datafile_y : string
+ Path of file storing y data. This parameter is optional depending on
+ the given dataset file.
+ extra_params : dict
+ Extra parameters for loading dataset. See function pygraph.utils.
+ graphfiles.loadDataset for detail.
+ ds_name : string
+ Name of the dataset.
+ n_jobs : int
+ Number of jobs for parallelization.
+ read_gm_from_file : boolean
+ Whether gram matrices are loaded from a file.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import sys
+ >>> sys.path.insert(0, "../")
+ >>> from pygraph.utils.model_selection_precomputed import model_selection_for_precomputed_kernel
+ >>> from pygraph.kernels.untilHPathKernel import untilhpathkernel
+ >>>
+ >>> datafile = '../datasets/MUTAG/MUTAG_A.txt'
+ >>> estimator = untilhpathkernel
+ >>> param_grid_precomputed = {’depth’: np.linspace(1, 10, 10), ’k_func’:
+ [’MinMax’, ’tanimoto’], ’compute_method’: [’trie’]}
+ >>> # ’C’ for classification problems and ’alpha’ for regression problems.
+ >>> param_grid = [{’C’: np.logspace(-10, 10, num=41, base=10)}, {’alpha’:
+ np.logspace(-10, 10, num=41, base=10)}]
+ >>>
+ >>> model_selection_for_precomputed_kernel(datafile, estimator,
+ param_grid_precomputed, param_grid[0], 'classification', ds_name=’MUTAG’)
+ """
+ tqdm.monitor_interval = 0
+
+ results_dir = '../notebooks/results/' + estimator.__name__
+ if not os.path.exists(results_dir):
+ os.makedirs(results_dir)
+ # a string to save all the results.
+ str_fw = '###################### log time: ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '. ######################\n\n'
+ str_fw += '# This file contains results of ' + estimator.__name__ + ' on dataset ' + ds_name + ',\n# including gram matrices, serial numbers for gram matrix figures and performance.\n\n'
+
+ # setup the model type
+ model_type = model_type.lower()
+ if model_type != 'regression' and model_type != 'classification':
+ raise Exception(
+ 'The model type is incorrect! Please choose from regression or classification.'
+ )
+ if verbose:
+ print()
+ print('--- This is a %s problem ---' % model_type)
+ str_fw += 'This is a %s problem.\n' % model_type
+
+ # calculate gram matrices rather than read them from file.
+ if read_gm_from_file == False:
+ # Load the dataset
+ if verbose:
+ print()
+ print('\n1. Loading dataset from file...')
+ if isinstance(datafile, str):
+ dataset, y_all = loadDataset(
+ datafile, filename_y=datafile_y, extra_params=extra_params)
+ else: # load data directly from variable.
+ dataset = datafile
+ y_all = datafile_y
+
+ # import matplotlib.pyplot as plt
+ # import networkx as nx
+ # nx.draw_networkx(dataset[30])
+ # plt.show()
+
+ # Grid of parameters with a discrete number of values for each.
+ param_list_precomputed = list(ParameterGrid(param_grid_precomputed))
+ param_list = list(ParameterGrid(param_grid))
+
+ gram_matrices = [
+ ] # a list to store gram matrices for all param_grid_precomputed
+ gram_matrix_time = [
+ ] # a list to store time to calculate gram matrices
+ param_list_pre_revised = [
+ ] # list to store param grids precomputed ignoring the useless ones
+
+ # calculate all gram matrices
+ if verbose:
+ print()
+ print('2. Calculating gram matrices. This could take a while...')
+ str_fw += '\nII. Gram matrices.\n\n'
+ tts = time.time() # start training time
+ nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN)
+ for idx, params_out in enumerate(param_list_precomputed):
+ y = y_all[:]
+ params_out['n_jobs'] = n_jobs
+ params_out['verbose'] = verbose
+# print(dataset)
+# import networkx as nx
+# nx.draw_networkx(dataset[1])
+# plt.show()
+ rtn_data = estimator(dataset[:], **params_out)
+ Kmatrix = rtn_data[0]
+ current_run_time = rtn_data[1]
+ # for some kernels, some graphs in datasets may not meet the
+ # kernels' requirements for graph structure. These graphs are trimmed.
+ if len(rtn_data) == 3:
+ idx_trim = rtn_data[2] # the index of trimmed graph list
+ y = [y[idxt] for idxt in idx_trim] # trim y accordingly
+# Kmatrix = np.random.rand(2250, 2250)
+# current_run_time = 0.1
+
+ # remove graphs whose kernels with themselves are zeros
+ # @todo: y not changed accordingly?
+ Kmatrix_diag = Kmatrix.diagonal().copy()
+ nb_g_ignore = 0
+ for idxk, diag in enumerate(Kmatrix_diag):
+ if diag == 0:
+ Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=0)
+ Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=1)
+ nb_g_ignore += 1
+ # normalization
+ # @todo: works only for undirected graph?
+ Kmatrix_diag = Kmatrix.diagonal().copy()
+ for i in range(len(Kmatrix)):
+ for j in range(i, len(Kmatrix)):
+ Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j])
+ Kmatrix[j][i] = Kmatrix[i][j]
+ if verbose:
+ print()
+ if params_out == {}:
+ if verbose:
+ print('the gram matrix is: ')
+ str_fw += 'the gram matrix is:\n\n'
+ else:
+ if verbose:
+ print('the gram matrix with parameters', params_out, 'is: \n\n')
+ str_fw += 'the gram matrix with parameters %s is:\n\n' % params_out
+ if len(Kmatrix) < 2:
+ nb_gm_ignore += 1
+ if verbose:
+ print('ignored, as at most only one of all its diagonal value is non-zero.')
+ str_fw += 'ignored, as at most only one of all its diagonal value is non-zero.\n\n'
+ else:
+ if np.isnan(Kmatrix).any(
+ ): # if the matrix contains elements that are not numbers
+ nb_gm_ignore += 1
+ if verbose:
+ print('ignored, as it contains elements that are not numbers.')
+ str_fw += 'ignored, as it contains elements that are not numbers.\n\n'
+ else:
+# print(Kmatrix)
+ str_fw += np.array2string(
+ Kmatrix,
+ separator=',') + '\n\n'
+# separator=',',
+# threshold=np.inf,
+# floatmode='unique') + '\n\n'
+
+ fig_file_name = results_dir + '/GM[ds]' + ds_name
+ if params_out != {}:
+ fig_file_name += '[params]' + str(idx)
+ plt.imshow(Kmatrix)
+ plt.colorbar()
+ plt.savefig(fig_file_name + '.eps', format='eps', dpi=300)
+# plt.show()
+ plt.clf()
+ gram_matrices.append(Kmatrix)
+ gram_matrix_time.append(current_run_time)
+ param_list_pre_revised.append(params_out)
+ if nb_g_ignore > 0:
+ if verbose:
+ print(', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore)
+ str_fw += ', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore
+ if verbose:
+ print()
+ print(
+ '{} gram matrices are calculated, {} of which are ignored.'.format(
+ len(param_list_precomputed), nb_gm_ignore))
+ str_fw += '{} gram matrices are calculated, {} of which are ignored.\n\n'.format(len(param_list_precomputed), nb_gm_ignore)
+ str_fw += 'serial numbers of gram matrix figures and their corresponding parameters settings:\n\n'
+ str_fw += ''.join([
+ '{}: {}\n'.format(idx, params_out)
+ for idx, params_out in enumerate(param_list_precomputed)
+ ])
+
+ if verbose:
+ print()
+ if len(gram_matrices) == 0:
+ if verbose:
+ print('all gram matrices are ignored, no results obtained.')
+ str_fw += '\nall gram matrices are ignored, no results obtained.\n\n'
+ else:
+ # save gram matrices to file.
+# np.savez(results_dir + '/' + ds_name + '.gm',
+# gms=gram_matrices, params=param_list_pre_revised, y=y,
+# gmtime=gram_matrix_time)
+ if verbose:
+ print(
+ '3. Fitting and predicting using nested cross validation. This could really take a while...'
+ )
+
+ # ---- use pool.imap_unordered to parallel and track progress. ----
+# train_pref = []
+# val_pref = []
+# test_pref = []
+# def func_assign(result, var_to_assign):
+# for idx, itm in enumerate(var_to_assign):
+# itm.append(result[idx])
+# trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, y, model_type)
+#
+# parallel_me(trial_do_partial, range(NUM_TRIALS), func_assign,
+# [train_pref, val_pref, test_pref], glbv=gram_matrices,
+# method='imap_unordered', n_jobs=n_jobs, chunksize=1,
+# itr_desc='cross validation')
+
+ def init_worker(gms_toshare):
+ global G_gms
+ G_gms = gms_toshare
+
+# gram_matrices = np.array(gram_matrices)
+# gms_shape = gram_matrices.shape
+# gms_array = Array('d', np.reshape(gram_matrices.copy(), -1, order='C'))
+# pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gms_array, gms_shape))
+ pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gram_matrices,))
+ trial_do_partial = partial(parallel_trial_do, param_list_pre_revised, param_list, y, model_type)
+ train_pref = []
+ val_pref = []
+ test_pref = []
+# if NUM_TRIALS < 1000 * n_jobs:
+# chunksize = int(NUM_TRIALS / n_jobs) + 1
+# else:
+# chunksize = 1000
+ chunksize = 1
+ if verbose:
+ iterator = tqdm(pool.imap_unordered(trial_do_partial,
+ range(NUM_TRIALS), chunksize), desc='cross validation', file=sys.stdout)
+ else:
+ iterator = pool.imap_unordered(trial_do_partial, range(NUM_TRIALS), chunksize)
+ for o1, o2, o3 in iterator:
+ train_pref.append(o1)
+ val_pref.append(o2)
+ test_pref.append(o3)
+ pool.close()
+ pool.join()
+
+# # ---- use pool.map to parallel. ----
+# pool = Pool(n_jobs)
+# trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, gram_matrices, y[0:250], model_type)
+# result_perf = pool.map(trial_do_partial, range(NUM_TRIALS))
+# train_pref = [item[0] for item in result_perf]
+# val_pref = [item[1] for item in result_perf]
+# test_pref = [item[2] for item in result_perf]
+
+# # ---- direct running, normally use a single CPU core. ----
+# train_pref = []
+# val_pref = []
+# test_pref = []
+# for i in tqdm(range(NUM_TRIALS), desc='cross validation', file=sys.stdout):
+# o1, o2, o3 = trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, i)
+# train_pref.append(o1)
+# val_pref.append(o2)
+# test_pref.append(o3)
+# print()
+
+ if verbose:
+ print()
+ print('4. Getting final performance...')
+ str_fw += '\nIII. Performance.\n\n'
+ # averages and confidences of performances on outer trials for each combination of parameters
+ average_train_scores = np.mean(train_pref, axis=0)
+# print('val_pref: ', val_pref[0][0])
+ average_val_scores = np.mean(val_pref, axis=0)
+# print('test_pref: ', test_pref[0][0])
+ average_perf_scores = np.mean(test_pref, axis=0)
+ # sample std is used here
+ std_train_scores = np.std(train_pref, axis=0, ddof=1)
+ std_val_scores = np.std(val_pref, axis=0, ddof=1)
+ std_perf_scores = np.std(test_pref, axis=0, ddof=1)
+
+ if model_type == 'regression':
+ best_val_perf = np.amin(average_val_scores)
+ else:
+ best_val_perf = np.amax(average_val_scores)
+# print('average_val_scores: ', average_val_scores)
+# print('best_val_perf: ', best_val_perf)
+# print()
+ best_params_index = np.where(average_val_scores == best_val_perf)
+ # find smallest val std with best val perf.
+ best_val_stds = [
+ std_val_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ min_val_std = np.amin(best_val_stds)
+ best_params_index = np.where(std_val_scores == min_val_std)
+ best_params_out = [
+ param_list_pre_revised[i] for i in best_params_index[0]
+ ]
+ best_params_in = [param_list[i] for i in best_params_index[1]]
+ if verbose:
+ print('best_params_out: ', best_params_out)
+ print('best_params_in: ', best_params_in)
+ print()
+ print('best_val_perf: ', best_val_perf)
+ print('best_val_std: ', min_val_std)
+ str_fw += 'best settings of hyper-params to build gram matrix: %s\n' % best_params_out
+ str_fw += 'best settings of other hyper-params: %s\n\n' % best_params_in
+ str_fw += 'best_val_perf: %s\n' % best_val_perf
+ str_fw += 'best_val_std: %s\n' % min_val_std
+
+# print(best_params_index)
+# print(best_params_index[0])
+# print(average_perf_scores)
+ final_performance = [
+ average_perf_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ final_confidence = [
+ std_perf_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ if verbose:
+ print('final_performance: ', final_performance)
+ print('final_confidence: ', final_confidence)
+ str_fw += 'final_performance: %s\n' % final_performance
+ str_fw += 'final_confidence: %s\n' % final_confidence
+ train_performance = [
+ average_train_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ train_std = [
+ std_train_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ if verbose:
+ print('train_performance: %s' % train_performance)
+ print('train_std: ', train_std)
+ str_fw += 'train_performance: %s\n' % train_performance
+ str_fw += 'train_std: %s\n\n' % train_std
+
+ if verbose:
+ print()
+ tt_total = time.time() - tts # training time for all hyper-parameters
+ average_gram_matrix_time = np.mean(gram_matrix_time)
+ std_gram_matrix_time = np.std(gram_matrix_time, ddof=1) if len(gram_matrix_time) > 1 else 0
+ best_gram_matrix_time = [
+ gram_matrix_time[i] for i in best_params_index[0]
+ ]
+ ave_bgmt = np.mean(best_gram_matrix_time)
+ std_bgmt = np.std(best_gram_matrix_time, ddof=1) if len(best_gram_matrix_time) > 1 else 0
+ if verbose:
+ print('time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'
+ .format(average_gram_matrix_time, std_gram_matrix_time))
+ print('time to calculate best gram matrix: {:.2f}±{:.2f}s'.format(
+ ave_bgmt, std_bgmt))
+ print('total training time with all hyper-param choices: {:.2f}s'.format(
+ tt_total))
+ str_fw += 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s\n'.format(average_gram_matrix_time, std_gram_matrix_time)
+ str_fw += 'time to calculate best gram matrix: {:.2f}±{:.2f}s\n'.format(ave_bgmt, std_bgmt)
+ str_fw += 'total training time with all hyper-param choices: {:.2f}s\n\n'.format(tt_total)
+
+ # # save results to file
+ # np.savetxt(results_name_pre + 'average_train_scores.dt',
+ # average_train_scores)
+ # np.savetxt(results_name_pre + 'average_val_scores', average_val_scores)
+ # np.savetxt(results_name_pre + 'average_perf_scores.dt',
+ # average_perf_scores)
+ # np.savetxt(results_name_pre + 'std_train_scores.dt', std_train_scores)
+ # np.savetxt(results_name_pre + 'std_val_scores.dt', std_val_scores)
+ # np.savetxt(results_name_pre + 'std_perf_scores.dt', std_perf_scores)
+
+ # np.save(results_name_pre + 'best_params_index', best_params_index)
+ # np.save(results_name_pre + 'best_params_pre.dt', best_params_out)
+ # np.save(results_name_pre + 'best_params_in.dt', best_params_in)
+ # np.save(results_name_pre + 'best_val_perf.dt', best_val_perf)
+ # np.save(results_name_pre + 'best_val_std.dt', best_val_std)
+ # np.save(results_name_pre + 'final_performance.dt', final_performance)
+ # np.save(results_name_pre + 'final_confidence.dt', final_confidence)
+ # np.save(results_name_pre + 'train_performance.dt', train_performance)
+ # np.save(results_name_pre + 'train_std.dt', train_std)
+
+ # np.save(results_name_pre + 'gram_matrix_time.dt', gram_matrix_time)
+ # np.save(results_name_pre + 'average_gram_matrix_time.dt',
+ # average_gram_matrix_time)
+ # np.save(results_name_pre + 'std_gram_matrix_time.dt',
+ # std_gram_matrix_time)
+ # np.save(results_name_pre + 'best_gram_matrix_time.dt',
+ # best_gram_matrix_time)
+
+ # read gram matrices from file.
+ else:
+ # Grid of parameters with a discrete number of values for each.
+# param_list_precomputed = list(ParameterGrid(param_grid_precomputed))
+ param_list = list(ParameterGrid(param_grid))
+
+ # read gram matrices from file.
+ if verbose:
+ print()
+ print('2. Reading gram matrices from file...')
+ str_fw += '\nII. Gram matrices.\n\nGram matrices are read from file, see last log for detail.\n'
+ gmfile = np.load(results_dir + '/' + ds_name + '.gm.npz')
+ gram_matrices = gmfile['gms'] # a list to store gram matrices for all param_grid_precomputed
+ gram_matrix_time = gmfile['gmtime'] # time used to compute the gram matrices
+ param_list_pre_revised = gmfile['params'] # list to store param grids precomputed ignoring the useless ones
+ y = gmfile['y'].tolist()
+
+ tts = time.time() # start training time
+# nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN)
+ if verbose:
+ print(
+ '3. Fitting and predicting using nested cross validation. This could really take a while...'
+ )
+
+ # ---- use pool.imap_unordered to parallel and track progress. ----
+ def init_worker(gms_toshare):
+ global G_gms
+ G_gms = gms_toshare
+
+ pool = Pool(processes=n_jobs, initializer=init_worker, initargs=(gram_matrices,))
+ trial_do_partial = partial(parallel_trial_do, param_list_pre_revised, param_list, y, model_type)
+ train_pref = []
+ val_pref = []
+ test_pref = []
+ chunksize = 1
+ if verbose:
+ iterator = tqdm(pool.imap_unordered(trial_do_partial,
+ range(NUM_TRIALS), chunksize), desc='cross validation', file=sys.stdout)
+ else:
+ iterator = pool.imap_unordered(trial_do_partial, range(NUM_TRIALS), chunksize)
+ for o1, o2, o3 in iterator:
+ train_pref.append(o1)
+ val_pref.append(o2)
+ test_pref.append(o3)
+ pool.close()
+ pool.join()
+
+ # # ---- use pool.map to parallel. ----
+ # result_perf = pool.map(trial_do_partial, range(NUM_TRIALS))
+ # train_pref = [item[0] for item in result_perf]
+ # val_pref = [item[1] for item in result_perf]
+ # test_pref = [item[2] for item in result_perf]
+
+ # # ---- use joblib.Parallel to parallel and track progress. ----
+ # trial_do_partial = partial(trial_do, param_list_pre_revised, param_list, gram_matrices, y, model_type)
+ # result_perf = Parallel(n_jobs=n_jobs, verbose=10)(delayed(trial_do_partial)(trial) for trial in range(NUM_TRIALS))
+ # train_pref = [item[0] for item in result_perf]
+ # val_pref = [item[1] for item in result_perf]
+ # test_pref = [item[2] for item in result_perf]
+
+# # ---- direct running, normally use a single CPU core. ----
+# train_pref = []
+# val_pref = []
+# test_pref = []
+# for i in tqdm(range(NUM_TRIALS), desc='cross validation', file=sys.stdout):
+# o1, o2, o3 = trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, i)
+# train_pref.append(o1)
+# val_pref.append(o2)
+# test_pref.append(o3)
+
+ if verbose:
+ print()
+ print('4. Getting final performance...')
+ str_fw += '\nIII. Performance.\n\n'
+ # averages and confidences of performances on outer trials for each combination of parameters
+ average_train_scores = np.mean(train_pref, axis=0)
+ average_val_scores = np.mean(val_pref, axis=0)
+ average_perf_scores = np.mean(test_pref, axis=0)
+ # sample std is used here
+ std_train_scores = np.std(train_pref, axis=0, ddof=1)
+ std_val_scores = np.std(val_pref, axis=0, ddof=1)
+ std_perf_scores = np.std(test_pref, axis=0, ddof=1)
+
+ if model_type == 'regression':
+ best_val_perf = np.amin(average_val_scores)
+ else:
+ best_val_perf = np.amax(average_val_scores)
+ best_params_index = np.where(average_val_scores == best_val_perf)
+ # find smallest val std with best val perf.
+ best_val_stds = [
+ std_val_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ min_val_std = np.amin(best_val_stds)
+ best_params_index = np.where(std_val_scores == min_val_std)
+ best_params_out = [
+ param_list_pre_revised[i] for i in best_params_index[0]
+ ]
+ best_params_in = [param_list[i] for i in best_params_index[1]]
+ if verbose:
+ print('best_params_out: ', best_params_out)
+ print('best_params_in: ', best_params_in)
+ print()
+ print('best_val_perf: ', best_val_perf)
+ print('best_val_std: ', min_val_std)
+ str_fw += 'best settings of hyper-params to build gram matrix: %s\n' % best_params_out
+ str_fw += 'best settings of other hyper-params: %s\n\n' % best_params_in
+ str_fw += 'best_val_perf: %s\n' % best_val_perf
+ str_fw += 'best_val_std: %s\n' % min_val_std
+
+ final_performance = [
+ average_perf_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ final_confidence = [
+ std_perf_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ if verbose:
+ print('final_performance: ', final_performance)
+ print('final_confidence: ', final_confidence)
+ str_fw += 'final_performance: %s\n' % final_performance
+ str_fw += 'final_confidence: %s\n' % final_confidence
+ train_performance = [
+ average_train_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ train_std = [
+ std_train_scores[value][best_params_index[1][idx]]
+ for idx, value in enumerate(best_params_index[0])
+ ]
+ if verbose:
+ print('train_performance: %s' % train_performance)
+ print('train_std: ', train_std)
+ str_fw += 'train_performance: %s\n' % train_performance
+ str_fw += 'train_std: %s\n\n' % train_std
+
+ if verbose:
+ print()
+ average_gram_matrix_time = np.mean(gram_matrix_time)
+ std_gram_matrix_time = np.std(gram_matrix_time, ddof=1) if len(gram_matrix_time) > 1 else 0
+ best_gram_matrix_time = [
+ gram_matrix_time[i] for i in best_params_index[0]
+ ]
+ ave_bgmt = np.mean(best_gram_matrix_time)
+ std_bgmt = np.std(best_gram_matrix_time, ddof=1) if len(best_gram_matrix_time) > 1 else 0
+ if verbose:
+ print(
+ 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s'
+ .format(average_gram_matrix_time, std_gram_matrix_time))
+ print('time to calculate best gram matrix: {:.2f}±{:.2f}s'.format(
+ ave_bgmt, std_bgmt))
+ tt_poster = time.time() - tts # training time with hyper-param choices who did not participate in calculation of gram matrices
+ if verbose:
+ print(
+ 'training time with hyper-param choices who did not participate in calculation of gram matrices: {:.2f}s'.format(
+ tt_poster))
+ print('total training time with all hyper-param choices: {:.2f}s'.format(
+ tt_poster + np.sum(gram_matrix_time)))
+# str_fw += 'time to calculate gram matrix with different hyper-params: {:.2f}±{:.2f}s\n'.format(average_gram_matrix_time, std_gram_matrix_time)
+# str_fw += 'time to calculate best gram matrix: {:.2f}±{:.2f}s\n'.format(ave_bgmt, std_bgmt)
+ str_fw += 'training time with hyper-param choices who did not participate in calculation of gram matrices: {:.2f}s\n\n'.format(tt_poster)
+
+ # open file to save all results for this dataset.
+ if not os.path.exists(results_dir):
+ os.makedirs(results_dir)
+
+ # print out results as table.
+ str_fw += printResultsInTable(param_list, param_list_pre_revised, average_val_scores,
+ std_val_scores, average_perf_scores, std_perf_scores,
+ average_train_scores, std_train_scores, gram_matrix_time,
+ model_type, verbose)
+
+ # open file to save all results for this dataset.
+ if not os.path.exists(results_dir + '/' + ds_name + '.output.txt'):
+ with open(results_dir + '/' + ds_name + '.output.txt', 'w') as f:
+ f.write(str_fw)
+ else:
+ with open(results_dir + '/' + ds_name + '.output.txt', 'r+') as f:
+ content = f.read()
+ f.seek(0, 0)
+ f.write(str_fw + '\n\n\n' + content)
+
+
+[docs]def trial_do(param_list_pre_revised, param_list, gram_matrices, y, model_type, trial): # Test set level
+
+# # get gram matrices from global variables.
+# gram_matrices = np.reshape(G_gms.copy(), G_gms_shape, order='C')
+
+ # Arrays to store scores
+ train_pref = np.zeros((len(param_list_pre_revised), len(param_list)))
+ val_pref = np.zeros((len(param_list_pre_revised), len(param_list)))
+ test_pref = np.zeros((len(param_list_pre_revised), len(param_list)))
+
+ # randomness added to seeds of split function below. "high" is "size" times
+ # 10 so that at least 10 different random output will be yielded. Remove
+ # these lines if identical outputs is required.
+ rdm_out = np.random.RandomState(seed=None)
+ rdm_seed_out_l = rdm_out.uniform(high=len(param_list_pre_revised) * 10,
+ size=len(param_list_pre_revised))
+# print(trial, rdm_seed_out_l)
+# print()
+ # loop for each outer param tuple
+ for index_out, params_out in enumerate(param_list_pre_revised):
+ # get gram matrices from global variables.
+# gm_now = G_gms[index_out * G_gms_shape[1] * G_gms_shape[2]:(index_out + 1) * G_gms_shape[1] * G_gms_shape[2]]
+# gm_now = np.reshape(gm_now.copy(), (G_gms_shape[1], G_gms_shape[2]), order='C')
+ gm_now = gram_matrices[index_out].copy()
+
+ # split gram matrix and y to app and test sets.
+ indices = range(len(y))
+ # The argument "random_state" in function "train_test_split" can not be
+ # set to None, because it will use RandomState instance used by
+ # np.random, which is possible for multiple subprocesses to inherit the
+ # same seed if they forked at the same time, leading to identical
+ # random variates for different subprocesses. Instead, we use "trial"
+ # and "index_out" parameters to generate different seeds for different
+ # trials/subprocesses and outer loops. "rdm_seed_out_l" is used to add
+ # randomness into seeds, so that it yields a different output every
+ # time the program is run. To yield identical outputs every time,
+ # remove the second line below. Same method is used to the "KFold"
+ # function in the inner loop.
+ rdm_seed_out = (trial + 1) * (index_out + 1)
+ rdm_seed_out = (rdm_seed_out + int(rdm_seed_out_l[index_out])) % (2 ** 32 - 1)
+# print(trial, rdm_seed_out)
+ X_app, X_test, y_app, y_test, idx_app, idx_test = train_test_split(
+ gm_now, y, indices, test_size=0.1,
+ random_state=rdm_seed_out, shuffle=True)
+# print(trial, idx_app, idx_test)
+# print()
+ X_app = X_app[:, idx_app]
+ X_test = X_test[:, idx_app]
+ y_app = np.array(y_app)
+ y_test = np.array(y_test)
+
+ rdm_seed_in_l = rdm_out.uniform(high=len(param_list) * 10,
+ size=len(param_list))
+ # loop for each inner param tuple
+ for index_in, params_in in enumerate(param_list):
+# if trial == 0:
+# print(index_out, index_in)
+# print('params_in: ', params_in)
+# st = time.time()
+ rdm_seed_in = (trial + 1) * (index_out + 1) * (index_in + 1)
+# print("rdm_seed_in1: ", trial, index_in, rdm_seed_in)
+ rdm_seed_in = (rdm_seed_in + int(rdm_seed_in_l[index_in])) % (2 ** 32 - 1)
+# print("rdm_seed_in2: ", trial, index_in, rdm_seed_in)
+ inner_cv = KFold(n_splits=10, shuffle=True, random_state=rdm_seed_in)
+ current_train_perf = []
+ current_valid_perf = []
+ current_test_perf = []
+
+ # For regression use the Kernel Ridge method
+# try:
+ if model_type == 'regression':
+ kr = KernelRidge(kernel='precomputed', **params_in)
+ # loop for each split on validation set level
+ # validation set level
+ for train_index, valid_index in inner_cv.split(X_app):
+# print("train_index, valid_index: ", trial, index_in, train_index, valid_index)
+# if trial == 0:
+# print('train_index: ', train_index)
+# print('valid_index: ', valid_index)
+# print('idx_test: ', idx_test)
+# print('y_app[train_index]: ', y_app[train_index])
+# print('X_app[train_index, :][:, train_index]: ', X_app[train_index, :][:, train_index])
+# print('X_app[valid_index, :][:, train_index]: ', X_app[valid_index, :][:, train_index])
+ kr.fit(X_app[train_index, :][:, train_index],
+ y_app[train_index])
+
+ # predict on the train, validation and test set
+ y_pred_train = kr.predict(
+ X_app[train_index, :][:, train_index])
+ y_pred_valid = kr.predict(
+ X_app[valid_index, :][:, train_index])
+# if trial == 0:
+# print('y_pred_valid: ', y_pred_valid)
+# print()
+ y_pred_test = kr.predict(
+ X_test[:, train_index])
+
+ # root mean squared errors
+ current_train_perf.append(
+ np.sqrt(
+ mean_squared_error(
+ y_app[train_index], y_pred_train)))
+ current_valid_perf.append(
+ np.sqrt(
+ mean_squared_error(
+ y_app[valid_index], y_pred_valid)))
+# if trial == 0:
+# print(mean_squared_error(
+# y_app[valid_index], y_pred_valid))
+ current_test_perf.append(
+ np.sqrt(
+ mean_squared_error(
+ y_test, y_pred_test)))
+ # For clcassification use SVM
+ else:
+ svc = SVC(kernel='precomputed', cache_size=200,
+ verbose=False, **params_in)
+ # loop for each split on validation set level
+ # validation set level
+ for train_index, valid_index in inner_cv.split(X_app):
+# np.savez("bug.npy",X_app[train_index, :][:, train_index],y_app[train_index])
+# if trial == 0:
+# print('train_index: ', train_index)
+# print('valid_index: ', valid_index)
+# print('idx_test: ', idx_test)
+# print('y_app[train_index]: ', y_app[train_index])
+# print('X_app[train_index, :][:, train_index]: ', X_app[train_index, :][:, train_index])
+# print('X_app[valid_index, :][:, train_index]: ', X_app[valid_index, :][:, train_index])
+ svc.fit(X_app[train_index, :][:, train_index],
+ y_app[train_index])
+
+ # predict on the train, validation and test set
+ y_pred_train = svc.predict(
+ X_app[train_index, :][:, train_index])
+ y_pred_valid = svc.predict(
+ X_app[valid_index, :][:, train_index])
+ y_pred_test = svc.predict(
+ X_test[:, train_index])
+
+ # root mean squared errors
+ current_train_perf.append(
+ accuracy_score(y_app[train_index],
+ y_pred_train))
+ current_valid_perf.append(
+ accuracy_score(y_app[valid_index],
+ y_pred_valid))
+ current_test_perf.append(
+ accuracy_score(y_test, y_pred_test))
+# except ValueError:
+# print(sys.exc_info()[0])
+# print(params_out, params_in)
+
+ # average performance on inner splits
+ train_pref[index_out][index_in] = np.mean(
+ current_train_perf)
+ val_pref[index_out][index_in] = np.mean(
+ current_valid_perf)
+ test_pref[index_out][index_in] = np.mean(
+ current_test_perf)
+# print(time.time() - st)
+# if trial == 0:
+# print('val_pref: ', val_pref)
+# print('test_pref: ', test_pref)
+
+ return train_pref, val_pref, test_pref
+
+[docs]def parallel_trial_do(param_list_pre_revised, param_list, y, model_type, trial):
+ train_pref, val_pref, test_pref = trial_do(param_list_pre_revised,
+ param_list, G_gms, y,
+ model_type, trial)
+ return train_pref, val_pref, test_pref
+
+
+[docs]def compute_gram_matrices(dataset, y, estimator, param_list_precomputed,
+ results_dir, ds_name,
+ n_jobs=1, str_fw='', verbose=True):
+ gram_matrices = [
+ ] # a list to store gram matrices for all param_grid_precomputed
+ gram_matrix_time = [
+ ] # a list to store time to calculate gram matrices
+ param_list_pre_revised = [
+ ] # list to store param grids precomputed ignoring the useless ones
+
+ nb_gm_ignore = 0 # the number of gram matrices those should not be considered, as they may contain elements that are not numbers (NaN)
+ for idx, params_out in enumerate(param_list_precomputed):
+ params_out['n_jobs'] = n_jobs
+# print(dataset)
+# import networkx as nx
+# nx.draw_networkx(dataset[1])
+# plt.show()
+ rtn_data = estimator(dataset[:], **params_out)
+ Kmatrix = rtn_data[0]
+ current_run_time = rtn_data[1]
+ # for some kernels, some graphs in datasets may not meet the
+ # kernels' requirements for graph structure. These graphs are trimmed.
+ if len(rtn_data) == 3:
+ idx_trim = rtn_data[2] # the index of trimmed graph list
+ y = [y[idxt] for idxt in idx_trim] # trim y accordingly
+
+ Kmatrix_diag = Kmatrix.diagonal().copy()
+ # remove graphs whose kernels with themselves are zeros
+ nb_g_ignore = 0
+ for idxk, diag in enumerate(Kmatrix_diag):
+ if diag == 0:
+ Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=0)
+ Kmatrix = np.delete(Kmatrix, (idxk - nb_g_ignore), axis=1)
+ nb_g_ignore += 1
+ # normalization
+ for i in range(len(Kmatrix)):
+ for j in range(i, len(Kmatrix)):
+ Kmatrix[i][j] /= np.sqrt(Kmatrix_diag[i] * Kmatrix_diag[j])
+ Kmatrix[j][i] = Kmatrix[i][j]
+
+ if verbose:
+ print()
+ if params_out == {}:
+ if verbose:
+ print('the gram matrix is: ')
+ str_fw += 'the gram matrix is:\n\n'
+ else:
+ if verbose:
+ print('the gram matrix with parameters', params_out, 'is: ')
+ str_fw += 'the gram matrix with parameters %s is:\n\n' % params_out
+ if len(Kmatrix) < 2:
+ nb_gm_ignore += 1
+ if verbose:
+ print('ignored, as at most only one of all its diagonal value is non-zero.')
+ str_fw += 'ignored, as at most only one of all its diagonal value is non-zero.\n\n'
+ else:
+ if np.isnan(Kmatrix).any(
+ ): # if the matrix contains elements that are not numbers
+ nb_gm_ignore += 1
+ if verbose:
+ print('ignored, as it contains elements that are not numbers.')
+ str_fw += 'ignored, as it contains elements that are not numbers.\n\n'
+ else:
+# print(Kmatrix)
+ str_fw += np.array2string(
+ Kmatrix,
+ separator=',') + '\n\n'
+# separator=',',
+# threshold=np.inf,
+# floatmode='unique') + '\n\n'
+
+ fig_file_name = results_dir + '/GM[ds]' + ds_name
+ if params_out != {}:
+ fig_file_name += '[params]' + str(idx)
+ plt.imshow(Kmatrix)
+ plt.colorbar()
+ plt.savefig(fig_file_name + '.eps', format='eps', dpi=300)
+# plt.show()
+ plt.clf()
+ gram_matrices.append(Kmatrix)
+ gram_matrix_time.append(current_run_time)
+ param_list_pre_revised.append(params_out)
+ if nb_g_ignore > 0:
+ if verbose:
+ print(', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore)
+ str_fw += ', where %d graphs are ignored as their graph kernels with themselves are zeros.' % nb_g_ignore
+ if verbose:
+ print()
+ print(
+ '{} gram matrices are calculated, {} of which are ignored.'.format(
+ len(param_list_precomputed), nb_gm_ignore))
+ str_fw += '{} gram matrices are calculated, {} of which are ignored.\n\n'.format(len(param_list_precomputed), nb_gm_ignore)
+ str_fw += 'serial numbers of gram matrix figures and their corresponding parameters settings:\n\n'
+ str_fw += ''.join([
+ '{}: {}\n'.format(idx, params_out)
+ for idx, params_out in enumerate(param_list_precomputed)
+ ])
+
+ return gram_matrices, gram_matrix_time, param_list_pre_revised, y, str_fw
+
+
+[docs]def read_gram_matrices_from_file(results_dir, ds_name):
+ gmfile = np.load(results_dir + '/' + ds_name + '.gm.npz')
+ gram_matrices = gmfile['gms'] # a list to store gram matrices for all param_grid_precomputed
+ param_list_pre_revised = gmfile['params'] # list to store param grids precomputed ignoring the useless ones
+ y = gmfile['y'].tolist()
+ return gram_matrices, param_list_pre_revised, y
+
+
+[docs]def printResultsInTable(param_list, param_list_pre_revised, average_val_scores,
+ std_val_scores, average_perf_scores, std_perf_scores,
+ average_train_scores, std_train_scores, gram_matrix_time,
+ model_type, verbose):
+ from collections import OrderedDict
+ from tabulate import tabulate
+ table_dict = {}
+ if model_type == 'regression':
+ for param_in in param_list:
+ param_in['alpha'] = '{:.2e}'.format(param_in['alpha'])
+ else:
+ for param_in in param_list:
+ param_in['C'] = '{:.2e}'.format(param_in['C'])
+ table_dict['params'] = [{**param_out, **param_in}
+ for param_in in param_list for param_out in param_list_pre_revised]
+ table_dict['gram_matrix_time'] = [
+ '{:.2f}'.format(gram_matrix_time[index_out])
+ for param_in in param_list
+ for index_out, _ in enumerate(param_list_pre_revised)
+ ]
+ table_dict['valid_perf'] = [
+ '{:.2f}±{:.2f}'.format(average_val_scores[index_out][index_in],
+ std_val_scores[index_out][index_in])
+ for index_in, _ in enumerate(param_list)
+ for index_out, _ in enumerate(param_list_pre_revised)
+ ]
+ table_dict['test_perf'] = [
+ '{:.2f}±{:.2f}'.format(average_perf_scores[index_out][index_in],
+ std_perf_scores[index_out][index_in])
+ for index_in, _ in enumerate(param_list)
+ for index_out, _ in enumerate(param_list_pre_revised)
+ ]
+ table_dict['train_perf'] = [
+ '{:.2f}±{:.2f}'.format(average_train_scores[index_out][index_in],
+ std_train_scores[index_out][index_in])
+ for index_in, _ in enumerate(param_list)
+ for index_out, _ in enumerate(param_list_pre_revised)
+ ]
+
+ keyorder = [
+ 'params', 'train_perf', 'valid_perf', 'test_perf',
+ 'gram_matrix_time'
+ ]
+ if verbose:
+ print()
+ tb_print = tabulate(OrderedDict(sorted(table_dict.items(),
+ key=lambda i: keyorder.index(i[0]))), headers='keys')
+# print(tb_print)
+ return 'table of performance v.s. hyper-params:\n\n%s\n\n' % tb_print
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Dec 11 11:39:46 2018
+Parallel aid functions.
+@author: ljia
+"""
+import multiprocessing
+from multiprocessing import Pool
+from tqdm import tqdm
+import sys
+
+[docs]def parallel_me(func, func_assign, var_to_assign, itr, len_itr=None, init_worker=None,
+ glbv=None, method=None, n_jobs=None, chunksize=None, itr_desc='',
+ verbose=True):
+ '''
+ '''
+ if method == 'imap_unordered':
+ if glbv: # global varibles required.
+# def init_worker(v_share):
+# global G_var
+# G_var = v_share
+ if n_jobs == None:
+ n_jobs = multiprocessing.cpu_count()
+ with Pool(processes=n_jobs, initializer=init_worker,
+ initargs=glbv) as pool:
+ if chunksize == None:
+ if len_itr < 100 * n_jobs:
+ chunksize = int(len_itr / n_jobs) + 1
+ else:
+ chunksize = 100
+ for result in (tqdm(pool.imap_unordered(func, itr, chunksize),
+ desc=itr_desc, file=sys.stdout) if verbose else
+ pool.imap_unordered(func, itr, chunksize)):
+ func_assign(result, var_to_assign)
+ else:
+ if n_jobs == None:
+ n_jobs = multiprocessing.cpu_count()
+ with Pool(processes=n_jobs) as pool:
+ if chunksize == None:
+ if len_itr < 100 * n_jobs:
+ chunksize = int(len_itr / n_jobs) + 1
+ else:
+ chunksize = 100
+ for result in (tqdm(pool.imap_unordered(func, itr, chunksize),
+ desc=itr_desc, file=sys.stdout) if verbose else
+ pool.imap_unordered(func, itr, chunksize)):
+ func_assign(result, var_to_assign)
+
+
+
+[docs]def parallel_gm(func, Kmatrix, Gn, init_worker=None, glbv=None,
+ method='imap_unordered', n_jobs=None, chunksize=None,
+ verbose=True):
+ from itertools import combinations_with_replacement
+ def func_assign(result, var_to_assign):
+ var_to_assign[result[0]][result[1]] = result[2]
+ var_to_assign[result[1]][result[0]] = result[2]
+ itr = combinations_with_replacement(range(0, len(Gn)), 2)
+ len_itr = int(len(Gn) * (len(Gn) + 1) / 2)
+ parallel_me(func, func_assign, Kmatrix, itr, len_itr=len_itr,
+ init_worker=init_worker, glbv=glbv, method=method, n_jobs=n_jobs,
+ chunksize=chunksize, itr_desc='calculating kernels', verbose=verbose)
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Jan 30 10:48:49 2019
+
+Trie (prefix tree)
+@author: ljia
+@references:
+ https://viblo.asia/p/nlp-build-a-trie-data-structure-from-scratch-with-python-3P0lPzroKox, 2019.1
+"""
+
+import pickle
+import json
+
+""" Trie class
+"""
+[docs]class Trie:
+ # init Trie class
+ def __init__(self):
+ self.root = self.getNode()
+
+
+
+[docs] def insertWord(self, word):
+ current = self.root
+ for ch in word:
+
+ if ch in current["children"]:
+ node = current["children"][ch]
+ else:
+ node = self.getNode()
+ current["children"][ch] = node
+
+ current = node
+ current["isEndOfWord"] = True
+ if 'count' in current:
+ current['count'] += 1
+ else:
+ current['count'] = 1
+
+[docs] def searchWord(self, word):
+ current = self.root
+ for ch in word:
+ if ch not in current["children"]:
+ return 0
+ node = current["children"][ch]
+
+ current = node
+ if 'count' in current:
+ return current["count"]
+ else:
+ return 0
+
+[docs] def searchWordPrefix(self, word):
+ current = self.root
+ for ch in word:
+ if not current["children"].has_key(ch):
+ return False
+ node = current["children"][ch]
+
+ current = node
+ # return True if children contain keys and values
+ return bool(current["children"])
+
+
+
+ def _delete(self, current, word, index):
+ if(index == len(word)):
+ if not current["isEndOfWord"]:
+ return False
+ current["isEndOfWord"] = False
+ return len(current["children"].keys()) == 0
+
+ ch = word[index]
+ if not current["children"].has_key(ch):
+ return False
+ node = current["children"][ch]
+
+ should_delete_current_node = self._delete(node, word, index + 1)
+
+ if should_delete_current_node:
+ current["children"].pop(ch)
+ return len(current["children"].keys()) == 0
+
+ return False
+
+[docs] def save_to_pickle(self, file_name):
+ f = open(file_name + ".pkl", "wb")
+ pickle.dump(self.root, f)
+ f.close()
+
+[docs] def load_from_pickle(self, file_name):
+ f = open(file_name + ".pkl", "rb")
+ self.root = pickle.load(f)
+ f.close()
+
+
+
+[docs] def save_to_json(self, file_name):
+ json_data = json.dumps(self.root)
+ f = open(file_name + ".json", "w")
+ f.write(json_data)
+ f.close()
+
+[docs] def load_from_json(self, file_name):
+ json_file = open(file_name + ".json", "r")
+ self.root = json.load(json_file)
+ json_file.close()
+
+import networkx as nx
+import numpy as np
+from copy import deepcopy
+#from itertools import product
+
+# from tqdm import tqdm
+
+
+[docs]def getSPLengths(G1):
+ sp = nx.shortest_path(G1)
+ distances = np.zeros((G1.number_of_nodes(), G1.number_of_nodes()))
+ for i in sp.keys():
+ for j in sp[i].keys():
+ distances[i, j] = len(sp[i][j]) - 1
+ return distances
+
+
+[docs]def getSPGraph(G, edge_weight=None):
+ """Transform graph G to its corresponding shortest-paths graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ The graph to be tramsformed.
+ edge_weight : string
+ edge attribute corresponding to the edge weight.
+
+ Return
+ ------
+ S : NetworkX graph
+ The shortest-paths graph corresponding to G.
+
+ Notes
+ ------
+ For an input graph G, its corresponding shortest-paths graph S contains the same set of nodes as G, while there exists an edge between all nodes in S which are connected by a walk in G. Every edge in S between two nodes is labeled by the shortest distance between these two nodes.
+
+ References
+ ----------
+ [1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE.
+ """
+ return floydTransformation(G, edge_weight=edge_weight)
+
+
+[docs]def floydTransformation(G, edge_weight=None):
+ """Transform graph G to its corresponding shortest-paths graph using Floyd-transformation.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ The graph to be tramsformed.
+ edge_weight : string
+ edge attribute corresponding to the edge weight. The default edge weight is bond_type.
+
+ Return
+ ------
+ S : NetworkX graph
+ The shortest-paths graph corresponding to G.
+
+ References
+ ----------
+ [1] Borgwardt KM, Kriegel HP. Shortest-path kernels on graphs. InData Mining, Fifth IEEE International Conference on 2005 Nov 27 (pp. 8-pp). IEEE.
+ """
+ spMatrix = nx.floyd_warshall_numpy(G, weight=edge_weight)
+ S = nx.Graph()
+ S.add_nodes_from(G.nodes(data=True))
+ ns = list(G.nodes())
+ for i in range(0, G.number_of_nodes()):
+ for j in range(i + 1, G.number_of_nodes()):
+ if spMatrix[i, j] != np.inf:
+ S.add_edge(ns[i], ns[j], cost=spMatrix[i, j])
+ return S
+
+
+[docs]def untotterTransformation(G, node_label, edge_label):
+ """Transform graph G according to Mahé et al.'s work to filter out tottering patterns of marginalized kernel and tree pattern kernel.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ The graph to be tramsformed.
+ node_label : string
+ node attribute used as label. The default node label is 'atom'.
+ edge_label : string
+ edge attribute used as label. The default edge label is 'bond_type'.
+
+ Return
+ ------
+ gt : NetworkX graph
+ The transformed graph corresponding to G.
+
+ References
+ ----------
+ [1] Pierre Mahé, Nobuhisa Ueda, Tatsuya Akutsu, Jean-Luc Perret, and Jean-Philippe Vert. Extensions of marginalized graph kernels. In Proceedings of the twenty-first international conference on Machine learning, page 70. ACM, 2004.
+ """
+ # arrange all graphs in a list
+ G = G.to_directed()
+ gt = nx.Graph()
+ gt.graph = G.graph
+ gt.add_nodes_from(G.nodes(data=True))
+ for edge in G.edges():
+ gt.add_node(edge)
+ gt.node[edge].update({node_label: G.node[edge[1]][node_label]})
+ gt.add_edge(edge[0], edge)
+ gt.edges[edge[0], edge].update({
+ edge_label:
+ G[edge[0]][edge[1]][edge_label]
+ })
+ for neighbor in G[edge[1]]:
+ if neighbor != edge[0]:
+ gt.add_edge(edge, (edge[1], neighbor))
+ gt.edges[edge, (edge[1], neighbor)].update({
+ edge_label:
+ G[edge[1]][neighbor][edge_label]
+ })
+ # nx.draw_networkx(gt)
+ # plt.show()
+
+ # relabel nodes using consecutive integers for convenience of kernel calculation.
+ gt = nx.convert_node_labels_to_integers(
+ gt, first_label=0, label_attribute='label_orignal')
+ return gt
+
+
+[docs]def direct_product(G1, G2, node_label, edge_label):
+ """Return the direct/tensor product of directed graphs G1 and G2.
+
+ Parameters
+ ----------
+ G1, G2 : NetworkX graph
+ The original graphs.
+ node_label : string
+ node attribute used as label. The default node label is 'atom'.
+ edge_label : string
+ edge attribute used as label. The default edge label is 'bond_type'.
+
+ Return
+ ------
+ gt : NetworkX graph
+ The direct product graph of G1 and G2.
+
+ Notes
+ -----
+ This method differs from networkx.tensor_product in that this method only adds nodes and edges in G1 and G2 that have the same labels to the direct product graph.
+
+ References
+ ----------
+ [1] Thomas Gärtner, Peter Flach, and Stefan Wrobel. On graph kernels: Hardness results and efficient alternatives. Learning Theory and Kernel Machines, pages 129–143, 2003.
+ """
+ # arrange all graphs in a list
+ from itertools import product
+ # G = G.to_directed()
+ gt = nx.DiGraph()
+ # add nodes
+ for u, v in product(G1, G2):
+ if G1.nodes[u][node_label] == G2.nodes[v][node_label]:
+ gt.add_node((u, v))
+ gt.nodes[(u, v)].update({node_label: G1.nodes[u][node_label]})
+ # add edges, faster for sparse graphs (no so many edges), which is the most case for now.
+ for (u1, v1), (u2, v2) in product(G1.edges, G2.edges):
+ if (u1, u2) in gt and (
+ v1, v2
+ ) in gt and G1.edges[u1, v1][edge_label] == G2.edges[u2,
+ v2][edge_label]:
+ gt.add_edge((u1, u2), (v1, v2))
+ gt.edges[(u1, u2), (v1, v2)].update({
+ edge_label:
+ G1.edges[u1, v1][edge_label]
+ })
+
+ # # add edges, faster for dense graphs (a lot of edges, complete graph would be super).
+ # for u, v in product(gt, gt):
+ # if (u[0], v[0]) in G1.edges and (
+ # u[1], v[1]
+ # ) in G2.edges and G1.edges[u[0],
+ # v[0]][edge_label] == G2.edges[u[1],
+ # v[1]][edge_label]:
+ # gt.add_edge((u[0], u[1]), (v[0], v[1]))
+ # gt.edges[(u[0], u[1]), (v[0], v[1])].update({
+ # edge_label:
+ # G1.edges[u[0], v[0]][edge_label]
+ # })
+
+ # relabel nodes using consecutive integers for convenience of kernel calculation.
+ # gt = nx.convert_node_labels_to_integers(
+ # gt, first_label=0, label_attribute='label_orignal')
+ return gt
+
+
+[docs]def graph_deepcopy(G):
+ """Deep copy a graph, including deep copy of all nodes, edges and
+ attributes of the graph, nodes and edges.
+
+ Note
+ ----
+ It is the same as the NetworkX function graph.copy(), as far as I know.
+ """
+ # add graph attributes.
+ labels = {}
+ for k, v in G.graph.items():
+ labels[k] = deepcopy(v)
+ if G.is_directed():
+ G_copy = nx.DiGraph(**labels)
+ else:
+ G_copy = nx.Graph(**labels)
+
+ # add nodes
+ for nd, attrs in G.nodes(data=True):
+ labels = {}
+ for k, v in attrs.items():
+ labels[k] = deepcopy(v)
+ G_copy.add_node(nd, **labels)
+
+ # add edges.
+ for nd1, nd2, attrs in G.edges(data=True):
+ labels = {}
+ for k, v in attrs.items():
+ labels[k] = deepcopy(v)
+ G_copy.add_edge(nd1, nd2, **labels)
+
+ return G_copy
+
+
+[docs]def graph_isIdentical(G1, G2):
+ """Check if two graphs are identical, including: same nodes, edges, node
+ labels/attributes, edge labels/attributes.
+
+ Notes
+ ----
+ 1. The type of graphs has to be the same.
+ 2. Global/Graph attributes are neglected as they may contain names for graphs.
+ """
+ # check nodes.
+ nlist1 = [n for n in G1.nodes(data=True)]
+ nlist2 = [n for n in G2.nodes(data=True)]
+ if not nlist1 == nlist2:
+ return False
+ # check edges.
+ elist1 = [n for n in G1.edges(data=True)]
+ elist2 = [n for n in G2.edges(data=True)]
+ if not elist1 == elist2:
+ return False
+ # check graph attributes.
+
+ return True
+
+
+[docs]def get_node_labels(Gn, node_label):
+ """Get node labels of dataset Gn.
+ """
+ nl = set()
+ for G in Gn:
+ nl = nl | set(nx.get_node_attributes(G, node_label).values())
+ return nl
+
+
+[docs]def get_edge_labels(Gn, edge_label):
+ """Get edge labels of dataset Gn.
+ """
+ el = set()
+ for G in Gn:
+ el = el | set(nx.get_edge_attributes(G, edge_label).values())
+ return el
+
' + _('Hide Search Matches') + '
') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/docs/_build/html/_static/documentation_options.js b/docs/_build/html/_static/documentation_options.js new file mode 100644 index 0000000..168d437 --- /dev/null +++ b/docs/_build/html/_static/documentation_options.js @@ -0,0 +1,296 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SEARCH_LANGUAGE_STOP_WORDS: ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"] +}; + + + +/* Non-minified version JS is _stemmer.js if file is provided */ +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + + + + + +var splitChars = (function() { + var result = {}; + var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648, + 1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702, + 2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971, + 2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345, + 3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761, + 3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823, + 4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125, + 8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695, + 11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587, + 43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141]; + var i, j, start, end; + for (i = 0; i < singles.length; i++) { + result[singles[i]] = true; + } + var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709], + [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161], + [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568], + [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807], + [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047], + [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383], + [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450], + [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547], + [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673], + [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820], + [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946], + [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023], + [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173], + [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332], + [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481], + [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718], + [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791], + [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095], + [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205], + [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687], + [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968], + [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869], + [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102], + [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271], + [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592], + [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822], + [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167], + [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959], + [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143], + [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318], + [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483], + [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101], + [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567], + [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292], + [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444], + [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783], + [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311], + [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511], + [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774], + [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071], + [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263], + [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519], + [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647], + [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967], + [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295], + [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274], + [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007], + [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381], + [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]]; + for (i = 0; i < ranges.length; i++) { + start = ranges[i][0]; + end = ranges[i][1]; + for (j = start; j <= end; j++) { + result[j] = true; + } + } + return result; +})(); + +function splitQuery(query) { + var result = []; + var start = -1; + for (var i = 0; i < query.length; i++) { + if (splitChars[query.charCodeAt(i)]) { + if (start !== -1) { + result.push(query.slice(start, i)); + start = -1; + } + } else if (start === -1) { + start = i; + } + } + if (start !== -1) { + result.push(query.slice(start)); + } + return result; +} + + diff --git a/docs/_build/html/_static/down-pressed.png b/docs/_build/html/_static/down-pressed.png new file mode 100644 index 0000000..5756c8c Binary files /dev/null and b/docs/_build/html/_static/down-pressed.png differ diff --git a/docs/_build/html/_static/down.png b/docs/_build/html/_static/down.png new file mode 100644 index 0000000..1b3bdad Binary files /dev/null and b/docs/_build/html/_static/down.png differ diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/docs/_build/html/_static/file.png differ diff --git a/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf b/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf new file mode 100644 index 0000000..809c1f5 Binary files /dev/null and b/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf differ diff --git a/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf b/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf new file mode 100644 index 0000000..fc981ce Binary files /dev/null and b/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf differ diff --git a/docs/_build/html/_static/fonts/Inconsolata.ttf b/docs/_build/html/_static/fonts/Inconsolata.ttf new file mode 100644 index 0000000..4b8a36d Binary files /dev/null and b/docs/_build/html/_static/fonts/Inconsolata.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato-Bold.ttf b/docs/_build/html/_static/fonts/Lato-Bold.ttf new file mode 100644 index 0000000..1d23c70 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato-Bold.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato-Regular.ttf b/docs/_build/html/_static/fonts/Lato-Regular.ttf new file mode 100644 index 0000000..0f3d0f8 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato-Regular.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.eot b/docs/_build/html/_static/fonts/Lato/lato-bold.eot new file mode 100644 index 0000000..3361183 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bold.eot differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.ttf b/docs/_build/html/_static/fonts/Lato/lato-bold.ttf new file mode 100644 index 0000000..29f691d Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bold.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.woff b/docs/_build/html/_static/fonts/Lato/lato-bold.woff new file mode 100644 index 0000000..c6dff51 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bold.woff differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 b/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 new file mode 100644 index 0000000..bb19504 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot new file mode 100644 index 0000000..3d41549 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf new file mode 100644 index 0000000..f402040 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff new file mode 100644 index 0000000..88ad05b Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 new file mode 100644 index 0000000..c4e3d80 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.eot b/docs/_build/html/_static/fonts/Lato/lato-italic.eot new file mode 100644 index 0000000..3f82642 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-italic.eot differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.ttf b/docs/_build/html/_static/fonts/Lato/lato-italic.ttf new file mode 100644 index 0000000..b4bfc9b Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-italic.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.woff b/docs/_build/html/_static/fonts/Lato/lato-italic.woff new file mode 100644 index 0000000..76114bc Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-italic.woff differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 b/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 new file mode 100644 index 0000000..3404f37 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.eot b/docs/_build/html/_static/fonts/Lato/lato-regular.eot new file mode 100644 index 0000000..11e3f2a Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-regular.eot differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.ttf b/docs/_build/html/_static/fonts/Lato/lato-regular.ttf new file mode 100644 index 0000000..74decd9 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-regular.ttf differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.woff b/docs/_build/html/_static/fonts/Lato/lato-regular.woff new file mode 100644 index 0000000..ae1307f Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-regular.woff differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 b/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 new file mode 100644 index 0000000..3bf9843 Binary files /dev/null and b/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf b/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf new file mode 100644 index 0000000..df5d1df Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf b/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf new file mode 100644 index 0000000..eb52a79 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot new file mode 100644 index 0000000..79dc8ef Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf new file mode 100644 index 0000000..df5d1df Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff new file mode 100644 index 0000000..6cb6000 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 new file mode 100644 index 0000000..7059e23 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot new file mode 100644 index 0000000..2f7ca78 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf new file mode 100644 index 0000000..eb52a79 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff new file mode 100644 index 0000000..f815f63 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 new file mode 100644 index 0000000..f2c76e5 Binary files /dev/null and b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.eot b/docs/_build/html/_static/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000..e9f60ca Binary files /dev/null and b/docs/_build/html/_static/fonts/fontawesome-webfont.eot differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.svg b/docs/_build/html/_static/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000..855c845 --- /dev/null +++ b/docs/_build/html/_static/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.ttf b/docs/_build/html/_static/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000..35acda2 Binary files /dev/null and b/docs/_build/html/_static/fonts/fontawesome-webfont.ttf differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.woff b/docs/_build/html/_static/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000..400014a Binary files /dev/null and b/docs/_build/html/_static/fonts/fontawesome-webfont.woff differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 b/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 new file mode 100644 index 0000000..4d13fc6 Binary files /dev/null and b/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 differ diff --git a/docs/_build/html/_static/jquery-3.2.1.js b/docs/_build/html/_static/jquery-3.2.1.js new file mode 100644 index 0000000..d2d8ca4 --- /dev/null +++ b/docs/_build/html/_static/jquery-3.2.1.js @@ -0,0 +1,10253 @@ +/*! + * jQuery JavaScript Library v3.2.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2017-03-20T18:59Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.2.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: