Former-commit-id:master160ba3a41d
[formerly07125c718a
] [formerly0f81003685
[formerlya146528d67
]] [formerly6162a89a5a
[formerly68cbb6a42e
] [formerly10cc4b89ce
[formerlye619c2260c
]]] [formerly972f0e1432
[formerly830ea86dae
] [formerly106d9a0f66
[formerly3962e5ff74
]] [formerlybee878ccbf
[formerly5adab1c596
] [formerlyc54a7b24ef
[formerly0837d0e446
]]]] [formerly9fa4730309
[formerlyd7c7dcd386
] [formerlyf86f25ed8c
[formerlya733830dd8
]] [formerlye97b53c22a
[formerlycbf53cf9e6
] [formerlyf8d34b54a2
[formerlydd5f2e6564
]]] [formerly03b22869c2
[formerly58598aa338
] [formerly681ff31fb0
[formerlyb7377674fe
]] [formerly184f356443
[formerly83d86648d4
] [formerlydace51781a
[formerly1905e1df45
]]]]] [formerly4ab9ecea87
[formerlyb9f81966d5
] [formerlye84c3e3f44
[formerly8fc402120e
]] [formerly9764ae63eb
[formerly9966f5263f
] [formerly6583b27757
[formerly169394eb28
]]] [formerly49995d4a88
[formerly8e7979866b
] [formerly58bea2ac95
[formerly7bcb79d10d
]] [formerly3acd90de39
[formerly7a92bba6fb
] [formerly252dbf6199
[formerlyf2edef8814
]]]] [formerlya8432b1188
[formerly7163030193
] [formerlyd5dbcc8158
[formerly2c5f43bb93
]] [formerly525582cafa
[formerly859c0edef3
] [formerly1141577189
[formerly96f62655fa
]]] [formerly714f425f40
[formerlyf011025268
] [formerly7b8c741ed1
[formerly1e1e679c0a
]] [formerly3eb17e347f
[formerlya84f30a477
] [formerly674df774fc
[formerlyfae7de48dd
]]]]]] Former-commit-id:a2a07c1383
[formerly416ec93396
] [formerly06fb3d76be
[formerlye76996cbe8
]] [formerlye9ff0e36a3
[formerly868c786b3f
] [formerly82b60165d5
[formerly034747c213
]]] [formerlybabc12a712
[formerly0a146b28ef
] [formerly72314677fb
[formerlyff42b8e151
]] [formerly88b97d74c7
[formerly504b50e547
] [formerly4b2f6111ce
[formerly802b1a6270
]]]] [formerly0214cb7a32
[formerly5c77097795
] [formerlyb7dd35aac3
[formerly11a359eac6
]] [formerly385645172d
[formerlya793ce79c6
] [formerlyaa3a910f82
[formerlyd7ba434edc
]]] [formerly3c67ce9dcf
[formerly3291186b5f
] [formerly8f5992dff3
[formerly9c8a2f8cb6
]] [formerlycdfbac45be
[formerlyb4308298d3
] [formerly674df774fc
]]]] Former-commit-id:cee65f78f4
[formerly1332ce886f
] [formerly19fc724aa5
[formerlyf03559e414
]] [formerly3f91aa8494
[formerlyd8866a78a0
] [formerly9966d727db
[formerly070aa61aec
]]] [formerlya766a963a7
[formerlya244707ec8
] [formerlya6beaca84e
[formerly1e24b63e11
]] [formerly617c158624
[formerly8e22ff2a52
] [formerly83e7b575e7
[formerly2516625a71
]]]] Former-commit-id:7a068f1f72
[formerly48570c29d1
] [formerlyc86777b7c2
[formerly563d9825b0
]] [formerly9a85ff3517
[formerly7ede1c1e5e
] [formerlyf087239b16
[formerly0aa3080e5f
]]] Former-commit-id:d1c39434d1
[formerly67be9733bc
] [formerlyebecc0cbc2
[formerly28f45cdda9
]] Former-commit-id:895aa36247
[formerlyfddc7bee0d
] Former-commit-id:5c4c810268
@@ -1,70 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: ABOD | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_abod')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||
step_5.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_5.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_5.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2, 4,)) | |||
step_5.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='replace') | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,67 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: auto encoder | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_ae')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,71 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import numpy as np | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: Standardization | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(1,2,3,4,5,)) | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# # Step 4: test primitive | |||
primitive_4 = index.get_primitive('d3m.primitives.tods.detection_algorithm.AutoRegODetector') | |||
step_4 = PrimitiveStep(primitive=primitive_4) | |||
step_4.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_4.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=10) | |||
# step_4.add_hyperparameter(name='weights', argument_type=ArgumentType.VALUE, data=weights_ndarray) | |||
step_4.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
# step_4.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) # There is sth wrong with multi-dimensional | |||
step_4.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_4.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce') | |||
step_4.add_output('produce') | |||
step_4.add_output('produce_score') | |||
pipeline_description.add_step(step_4) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.4.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,50 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.axiswise_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,44 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: BKFilter | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.bk_filter')) | |||
# step_2.add_hyperparameter(name = 'columns_using_method', argument_type=ArgumentType.VALUE, data = 'name') | |||
step_2.add_hyperparameter(name = 'use_semantic_types', argument_type=ArgumentType.VALUE, data = True) | |||
step_2.add_hyperparameter(name = 'use_columns', argument_type=ArgumentType.VALUE, data = (2,3)) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,51 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_cblof') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,48 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: Categorical to Binary | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.data_processing.categorical_to_binary') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(3,)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,49 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
#Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.auto_correlation') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
step_2.add_hyperparameter(name="use_columns", argument_type=ArgumentType.VALUE, data = (2, 3)) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.data_processing.column_filter') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,43 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: ContinuityValidation | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.continuity_validation')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name = 'continuity_option', argument_type=ArgumentType.VALUE, data = 'imputation') | |||
step_2.add_hyperparameter(name = 'interval', argument_type=ArgumentType.VALUE, data = 0.3) | |||
# Or: | |||
# step_2.add_hyperparameter(name = 'continuity_option', argument_type=ArgumentType.VALUE, data = 'ablation') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,49 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.deeplog') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
#step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# # Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,50 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: Discrete Cosine Transform | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.discrete_cosine_transform') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,42 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: DuplicationValidation | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.duplication_validation')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name = 'keep_option', argument_type=ArgumentType.VALUE, data = 'average') # Or: 'first' | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,48 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: Fast Fourier Transform | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.fast_fourier_transform') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,68 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: HBOS | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_hbos')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||
step_5.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
# step_5.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,71 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: HBOS | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_hbos')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.4.produce') | |||
step_5.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_5.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
# step_5.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_5.add_output('produce_score') | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# pipeline_description.add_output(name='output score', data_reference='steps.5.produce_score') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,46 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: HPFilter | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.hp_filter')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name = 'use_columns', argument_type=ArgumentType.VALUE, data = [2,3,6]) | |||
step_2.add_hyperparameter(name = 'use_semantic_types', argument_type=ArgumentType.VALUE, data = True) | |||
step_2.add_hyperparameter(name = 'return_result', argument_type=ArgumentType.VALUE, data = 'append') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,76 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: holt smoothing | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.holt_smoothing')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_5.add_hyperparameter(name="exclude_columns", argument_type=ArgumentType.VALUE, data = (2, 3)) | |||
step_5.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Step 6: isolation forest | |||
#step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.anomaly_detection.isolation_forest.Algorithm')) | |||
#step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
#step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
#step_6.add_output('produce') | |||
#pipeline_description.add_step(step_6) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,76 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: holt winters exponential smoothing | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.holt_winters_exponential_smoothing')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_5.add_hyperparameter(name="use_columns", argument_type=ArgumentType.VALUE, data = (2, 3)) | |||
step_5.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Step 6: isolation forest | |||
#step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.anomaly_detection.isolation_forest.Algorithm')) | |||
#step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
#step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
#step_6.add_output('produce') | |||
#pipeline_description.add_step(step_6) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,59 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_iforest') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
# step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
# step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce_score') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.3.produce_score') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,71 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import numpy as np | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: Standardization | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(1,2,3,4,5,)) | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# # Step 4: test primitive | |||
primitive_4 = index.get_primitive('d3m.primitives.tods.detection_algorithm.KDiscordODetector') | |||
step_4 = PrimitiveStep(primitive=primitive_4) | |||
step_4.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_4.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=10) | |||
# step_4.add_hyperparameter(name='weights', argument_type=ArgumentType.VALUE, data=weights_ndarray) | |||
step_4.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
# step_4.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) # There is sth wrong with multi-dimensional | |||
step_4.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_4.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce') | |||
step_4.add_output('produce') | |||
step_4.add_output('produce_score') | |||
pipeline_description.add_step(step_4) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.4.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,51 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_knn') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,51 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_loda') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,51 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_lof') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,70 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import numpy as np | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# # Step 2: Standardization | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(1,2,3,4,5,)) | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# # Step 3: test primitive | |||
primitive_4 = index.get_primitive('d3m.primitives.tods.detection_algorithm.LSTMODetector') | |||
step_4 = PrimitiveStep(primitive=primitive_4) | |||
step_4.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_4.add_hyperparameter(name='diff_group_method', argument_type=ArgumentType.VALUE, data='average') | |||
step_4.add_hyperparameter(name='feature_dim', argument_type=ArgumentType.VALUE, data=5) | |||
step_4.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
# step_4.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) # There is sth wrong with multi-dimensional | |||
step_4.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_4.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce') | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.4.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,49 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.matrix_profile') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=3) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# # Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,77 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: mean average transform | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.moving_average_transform')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_5.add_hyperparameter(name="use_columns", argument_type=ArgumentType.VALUE, data = (2, 3)) | |||
step_5.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Step 6: isolation forest | |||
#step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.anomaly_detection.isolation_forest.Algorithm')) | |||
#step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
#step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
#step_6.add_output('produce') | |||
#pipeline_description.add_step(step_6) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,50 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: Non Negative Matrix Factorization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.non_negative_matrix_factorization') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_hyperparameter(name='rank', argument_type=ArgumentType.VALUE, data=5) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,51 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_ocsvm') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,71 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import numpy as np | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: Standardization | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(1,2,3,4,5,)) | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# # Step 4: test primitive | |||
primitive_4 = index.get_primitive('d3m.primitives.tods.detection_algorithm.PCAODetector') | |||
step_4 = PrimitiveStep(primitive=primitive_4) | |||
step_4.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_4.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=10) | |||
# step_4.add_hyperparameter(name='weights', argument_type=ArgumentType.VALUE, data=weights_ndarray) | |||
step_4.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
# step_4.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) # There is sth wrong with multi-dimensional | |||
step_4.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_4.add_hyperparameter(name='return_subseq_inds', argument_type=ArgumentType.VALUE, data=True) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.3.produce') | |||
step_4.add_output('produce') | |||
step_4.add_output('produce_score') | |||
pipeline_description.add_step(step_4) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.4.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,49 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.power_transformer') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,51 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_cof') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,49 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.quantile_transformer') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,54 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.reinforcement.rule_filter')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2, 4,)) | |||
step_3.add_hyperparameter(name='rule', argument_type=ArgumentType.VALUE, data='#4# % 2 == 0 and #2# <= 0.3') | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,49 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_sod') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# # Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,76 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: simple exponential smoothing | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.simple_exponential_smoothing')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_5.add_hyperparameter(name="use_columns", argument_type=ArgumentType.VALUE, data = (1,)) | |||
step_5.add_hyperparameter(name="use_semantic_types", argument_type=ArgumentType.VALUE, data = True) | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Step 6: isolation forest | |||
#step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.anomaly_detection.isolation_forest.Algorithm')) | |||
#step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
#step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
#step_6.add_output('produce') | |||
#pipeline_description.add_step(step_6) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,49 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,44 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: TRMF | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.trmf')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name = 'lags', argument_type=ArgumentType.VALUE, data = [1,2,10,100]) | |||
# step_2.add_hyperparameter(name = 'K', argument_type=ArgumentType.VALUE, data = 3) | |||
# step_2.add_hyperparameter(name = 'use_columns', argument_type=ArgumentType.VALUE, data = (2, 3, 4, 5, 6)) | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,48 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: Fast Fourier Transform | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.telemanom') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,86 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: dataframe transformation | |||
# primitive_1 = index.get_primitive('d3m.primitives.data_transformation.SKPowerTransformer') | |||
# primitive_1 = index.get_primitive('d3m.primitives.data_transformation.SKStandardization') | |||
# primitive_1 = index.get_primitive('d3m.primitives.data_transformation.SKQuantileTransformer') | |||
#Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.data_processing.time_interval_transform') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name="time_interval", argument_type=ArgumentType.VALUE, data = '5T') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# | |||
# # Step 2: column_parser | |||
# step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
# step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
# step_2.add_output('produce') | |||
# pipeline_description.add_step(step_2) | |||
# | |||
# | |||
# # Step 3: extract_columns_by_semantic_types(attributes) | |||
# step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
# step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
# step_3.add_output('produce') | |||
# step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
# data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
# pipeline_description.add_step(step_3) | |||
# | |||
# # Step 4: extract_columns_by_semantic_types(targets) | |||
# step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
# step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
# step_4.add_output('produce') | |||
# step_4.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
# data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
# pipeline_description.add_step(step_4) | |||
# | |||
# attributes = 'steps.3.produce' | |||
# targets = 'steps.4.produce' | |||
# | |||
# # Step 5: imputer | |||
# step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_cleaning.imputer.SKlearn')) | |||
# step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
# step_5.add_output('produce') | |||
# pipeline_description.add_step(step_5) | |||
# | |||
# # Step 6: random_forest | |||
# step_6 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.regression.random_forest.SKlearn')) | |||
# step_6.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.5.produce') | |||
# step_6.add_argument(name='outputs', argument_type=ArgumentType.CONTAINER, data_reference=targets) | |||
# step_6.add_output('produce') | |||
# pipeline_description.add_step(step_6) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.1.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,44 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: TruncatedSVD | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.feature_analysis.truncated_svd')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name = 'n_components', argument_type=ArgumentType.VALUE, data = 3) | |||
step_2.add_hyperparameter(name = 'use_columns', argument_type=ArgumentType.VALUE, data = (2, 3, 4, 5, 6)) | |||
step_2.add_hyperparameter(name = 'return_result', argument_type=ArgumentType.VALUE, data = 'append') | |||
step_2.add_hyperparameter(name = 'use_semantic_types', argument_type=ArgumentType.VALUE, data = True) | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,67 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget']) | |||
pipeline_description.add_step(step_3) | |||
attributes = 'steps.2.produce' | |||
targets = 'steps.3.produce' | |||
# Step 4: imputer | |||
step_4 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.impute_missing')) | |||
step_4.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_4.add_output('produce') | |||
pipeline_description.add_step(step_4) | |||
# Step 5: variatinal auto encoder | |||
step_5 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_vae')) | |||
step_5.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference=attributes) | |||
step_5.add_output('produce') | |||
pipeline_description.add_step(step_5) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.5.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,64 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test WaveletTransform | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.feature_analysis.wavelet_transform') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='wavelet', argument_type=ArgumentType.VALUE, data='db8') | |||
step_2.add_hyperparameter(name='level', argument_type=ArgumentType.VALUE, data=2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 2: test inverse WaveletTransform | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.wavelet_transform') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='wavelet', argument_type=ArgumentType.VALUE, data='db8') | |||
step_3.add_hyperparameter(name='level', argument_type=ArgumentType.VALUE, data=2) | |||
step_3.add_hyperparameter(name='inverse', argument_type=ArgumentType.VALUE, data=1) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=False) | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='new') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,50 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_mogaal') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,50 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: test primitive | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.detection_algorithm.pyod_sogaal') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='contamination', argument_type=ArgumentType.VALUE, data=0.1) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,)) # There is sth wrong with multi-dimensional | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# Final Output | |||
pipeline_description.add_output(name='output predictions', data_reference='steps.2.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
print(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() |
@@ -1,61 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.spectral_residual_transform') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='avg_filter_dimension', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(8,9,10,11,12)) # There is sth wrong with multi-dimensional | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_abs_energy') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(8,9,10,11,12)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_abs_sum') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(8,9,10,11,12)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_g_mean') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_h_mean') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_kurtosis') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_maximum') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_mean') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_mean_abs') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_mean_abs_temporal_derivative') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_mean_temporal_derivative') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_median') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,63 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_median_abs_deviation') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_minimum') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_skew') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_std') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_var') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_variation') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_vec_sum') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_willison_amplitude') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='window_size', argument_type=ArgumentType.VALUE, data=4) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(5,6)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,62 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.feature_analysis.statistical_zero_crossing') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(9,10)) # There is sth wrong with multi-dimensional | |||
step_3.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -1,61 +0,0 @@ | |||
from d3m import index | |||
from d3m.metadata.base import ArgumentType | |||
from d3m.metadata.pipeline import Pipeline, PrimitiveStep | |||
from d3m.metadata import hyperparams | |||
import copy | |||
# -> dataset_to_dataframe -> column_parser -> extract_columns_by_semantic_types(attributes) -> imputer -> random_forest | |||
# extract_columns_by_semantic_types(targets) -> ^ | |||
# Creating pipeline | |||
pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# # Step 2: Standardization | |||
primitive_2 = index.get_primitive('d3m.primitives.tods.timeseries_processing.transformation.standard_scaler') | |||
step_2 = PrimitiveStep(primitive=primitive_2) | |||
step_2.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_2.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(2,3,4,5,6)) | |||
step_2.add_hyperparameter(name='return_result', argument_type=ArgumentType.VALUE, data='append') | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
pipeline_description.add_step(step_2) | |||
# # Step 3: test primitive | |||
# primitive_3 = index.get_primitive('d3m.primitives.anomaly_detection.KNNPrimitive') | |||
primitive_3 = index.get_primitive('d3m.primitives.tods.timeseries_processing.decomposition.time_series_seasonality_trend_decomposition') | |||
step_3 = PrimitiveStep(primitive=primitive_3) | |||
step_3.add_hyperparameter(name='period', argument_type=ArgumentType.VALUE, data=5) | |||
step_3.add_hyperparameter(name='use_semantic_types', argument_type=ArgumentType.VALUE, data=True) | |||
step_3.add_hyperparameter(name='use_columns', argument_type=ArgumentType.VALUE, data=(8,9,10,11,12)) # There is sth wrong with multi-dimensional | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.2.produce') | |||
step_3.add_output('produce') | |||
pipeline_description.add_step(step_3) | |||
# Final Output | |||
pipeline_description.add_output(name='output', data_reference='steps.3.produce') | |||
# Output to YAML | |||
yaml = pipeline_description.to_yaml() | |||
with open('pipeline.yml', 'w') as f: | |||
f.write(yaml) | |||
# Or you can output json | |||
#data = pipline_description.to_json() | |||
@@ -14,13 +14,13 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -28,7 +28,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -16,13 +16,13 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -30,7 +30,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -19,14 +19,14 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -15,7 +15,7 @@ pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -10,14 +10,14 @@ pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
#Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
@@ -13,7 +13,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
@@ -11,14 +11,14 @@ pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -15,7 +15,7 @@ pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
@@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -14,13 +14,13 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -28,7 +28,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -14,13 +14,13 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -28,7 +28,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -8,14 +8,14 @@ pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) | |||
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
@@ -17,13 +17,13 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -31,7 +31,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -17,13 +17,13 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -31,7 +31,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -19,14 +19,14 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
@@ -12,21 +12,21 @@ pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -19,14 +19,14 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
@@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -17,14 +17,14 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -32,7 +32,7 @@ step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALU | |||
pipeline_description.add_step(step_2) | |||
# Step 3: extract_columns_by_semantic_types(targets) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_3.add_output('produce') | |||
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, | |||
@@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: Column Parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -12,21 +12,21 @@ pipeline_description = Pipeline() | |||
pipeline_description.add_input(name='inputs') | |||
# Step 0: dataset_to_dataframe | |||
primitive_0 = index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common') | |||
primitive_0 = index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe') | |||
step_0 = PrimitiveStep(primitive=primitive_0) | |||
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') | |||
step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -19,7 +19,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
@@ -14,13 +14,13 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# Step 1: column_parser | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) | |||
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||
pipeline_description.add_step(step_1) | |||
# Step 2: extract_columns_by_semantic_types(attributes) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) | |||
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) | |||
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') | |||
step_2.add_output('produce') | |||
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, data=['https://metadata.datadrivendiscovery.org/types/Attribute']) | |||
@@ -18,7 +18,7 @@ step_0.add_output('produce') | |||
pipeline_description.add_step(step_0) | |||
# # Step 1: column_parser | |||
primitive_1 = index.get_primitive('d3m.primitives.data_transformation.column_parser.Common') | |||
primitive_1 = index.get_primitive('d3m.primitives.tods.data_processing.column_parser') | |||
step_1 = PrimitiveStep(primitive=primitive_1) | |||
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') | |||
step_1.add_output('produce') | |||