From bc6f3b473a320f59074e515ffd8ae0a3f71e4d93 Mon Sep 17 00:00:00 2001 From: lhenry15 Date: Mon, 21 Jun 2021 23:15:46 -0500 Subject: [PATCH] fix searcher python_path --- examples/axolotl_interface/run_pipeline.py | 6 +++--- examples/axolotl_interface/run_search.py | 2 +- tods/detection_algorithm/PyodAE.py | 2 +- tods/searcher/brute_force_search.py | 12 ++++++------ 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/examples/axolotl_interface/run_pipeline.py b/examples/axolotl_interface/run_pipeline.py index 04c45ff..189bfeb 100644 --- a/examples/axolotl_interface/run_pipeline.py +++ b/examples/axolotl_interface/run_pipeline.py @@ -13,8 +13,8 @@ parser.add_argument('--table_path', type=str, default=default_data_path, help='Input the path of the input data table') parser.add_argument('--target_index', type=int, default=6, help='Index of the ground truth (for evaluation)') -parser.add_argument('--metric',type=str, default='F1_MACRO', - help='Evaluation Metric (F1, F1_MACRO)') +parser.add_argument('--metric',type=str, default='ALL', + help='Evaluation Metric (F1, F1_MACRO, RECALL, PRECISION, ALL)') parser.add_argument('--pipeline_path', default=os.path.join(this_path, './example_pipelines/autoencoder_pipeline.json'), help='Input the path of the pre-built pipeline description') @@ -35,6 +35,6 @@ pipeline = load_pipeline(pipeline_path) # Run the pipeline pipeline_result = evaluate_pipeline(dataset, pipeline, metric) -print(pipeline_result) +print(pipeline_result.scores) #raise pipeline_result.error[0] diff --git a/examples/axolotl_interface/run_search.py b/examples/axolotl_interface/run_search.py index 2bc0c01..7f68314 100644 --- a/examples/axolotl_interface/run_search.py +++ b/examples/axolotl_interface/run_search.py @@ -9,7 +9,7 @@ from tods.searcher import BruteForceSearch #table_path = 'datasets/NAB/realTweets/labeled_Twitter_volume_GOOG.csv' # The path of the dataset #target_index = 2 # what column is the target -table_path = 'datasets/yahoo_sub_5.csv' +table_path = '../../datasets/anomaly/raw_data/yahoo_sub_5.csv' target_index = 6 # what column is the target #table_path = 'datasets/NAB/realTweets/labeled_Twitter_volume_IBM.csv' # The path of the dataset time_limit = 30 # How many seconds you wanna search diff --git a/tods/detection_algorithm/PyodAE.py b/tods/detection_algorithm/PyodAE.py index 1e53a7f..abfe72b 100644 --- a/tods/detection_algorithm/PyodAE.py +++ b/tods/detection_algorithm/PyodAE.py @@ -160,7 +160,7 @@ class Hyperparams(Hyperparams_ODBase): contamination = hyperparams.Uniform( lower=0., upper=0.5, - default=0.1, + default=0.01, description='The amount of contamination of the data set, i.e. the proportion of outliers in the data set. ', semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'] ) diff --git a/tods/searcher/brute_force_search.py b/tods/searcher/brute_force_search.py index 611fddd..18b87c1 100644 --- a/tods/searcher/brute_force_search.py +++ b/tods/searcher/brute_force_search.py @@ -191,19 +191,19 @@ def _generate_pipline(combinations): # pragma: no cover # The first three steps are fixed # Step 0: dataset_to_dataframe - step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common')) + step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe')) step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') step_0.add_output('produce') pipeline_description.add_step(step_0) - # Step 1: column_parser - step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common')) + # Step 1: column_parsr + step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser')) step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') step_1.add_output('produce') pipeline_description.add_step(step_1) # Step 2: extract_columns_by_semantic_types(attributes) - step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) + step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') step_2.add_output('produce') step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, @@ -211,7 +211,7 @@ def _generate_pipline(combinations): # pragma: no cover pipeline_description.add_step(step_2) # Step 3: extract_columns_by_semantic_types(targets) - step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common')) + step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types')) step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') step_3.add_output('produce') step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, @@ -243,7 +243,7 @@ def _generate_pipline(combinations): # pragma: no cover #pipeline_description.add_step(tods_step_7) # Finalize the pipeline - final_step = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common')) + final_step = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.construct_predictions')) final_step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.6.produce') final_step.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') final_step.add_output('produce')