Browse Source

fix searcher python_path

master
lhenry15 4 years ago
parent
commit
bc6f3b473a
4 changed files with 11 additions and 11 deletions
  1. +3
    -3
      examples/axolotl_interface/run_pipeline.py
  2. +1
    -1
      examples/axolotl_interface/run_search.py
  3. +1
    -1
      tods/detection_algorithm/PyodAE.py
  4. +6
    -6
      tods/searcher/brute_force_search.py

+ 3
- 3
examples/axolotl_interface/run_pipeline.py View File

@@ -13,8 +13,8 @@ parser.add_argument('--table_path', type=str, default=default_data_path,
help='Input the path of the input data table') help='Input the path of the input data table')
parser.add_argument('--target_index', type=int, default=6, parser.add_argument('--target_index', type=int, default=6,
help='Index of the ground truth (for evaluation)') help='Index of the ground truth (for evaluation)')
parser.add_argument('--metric',type=str, default='F1_MACRO',
help='Evaluation Metric (F1, F1_MACRO)')
parser.add_argument('--metric',type=str, default='ALL',
help='Evaluation Metric (F1, F1_MACRO, RECALL, PRECISION, ALL)')
parser.add_argument('--pipeline_path', parser.add_argument('--pipeline_path',
default=os.path.join(this_path, './example_pipelines/autoencoder_pipeline.json'), default=os.path.join(this_path, './example_pipelines/autoencoder_pipeline.json'),
help='Input the path of the pre-built pipeline description') help='Input the path of the pre-built pipeline description')
@@ -35,6 +35,6 @@ pipeline = load_pipeline(pipeline_path)


# Run the pipeline # Run the pipeline
pipeline_result = evaluate_pipeline(dataset, pipeline, metric) pipeline_result = evaluate_pipeline(dataset, pipeline, metric)
print(pipeline_result)
print(pipeline_result.scores)
#raise pipeline_result.error[0] #raise pipeline_result.error[0]



+ 1
- 1
examples/axolotl_interface/run_search.py View File

@@ -9,7 +9,7 @@ from tods.searcher import BruteForceSearch
#table_path = 'datasets/NAB/realTweets/labeled_Twitter_volume_GOOG.csv' # The path of the dataset #table_path = 'datasets/NAB/realTweets/labeled_Twitter_volume_GOOG.csv' # The path of the dataset
#target_index = 2 # what column is the target #target_index = 2 # what column is the target


table_path = 'datasets/yahoo_sub_5.csv'
table_path = '../../datasets/anomaly/raw_data/yahoo_sub_5.csv'
target_index = 6 # what column is the target target_index = 6 # what column is the target
#table_path = 'datasets/NAB/realTweets/labeled_Twitter_volume_IBM.csv' # The path of the dataset #table_path = 'datasets/NAB/realTweets/labeled_Twitter_volume_IBM.csv' # The path of the dataset
time_limit = 30 # How many seconds you wanna search time_limit = 30 # How many seconds you wanna search


+ 1
- 1
tods/detection_algorithm/PyodAE.py View File

@@ -160,7 +160,7 @@ class Hyperparams(Hyperparams_ODBase):
contamination = hyperparams.Uniform( contamination = hyperparams.Uniform(
lower=0., lower=0.,
upper=0.5, upper=0.5,
default=0.1,
default=0.01,
description='The amount of contamination of the data set, i.e. the proportion of outliers in the data set. ', description='The amount of contamination of the data set, i.e. the proportion of outliers in the data set. ',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'] semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
) )


+ 6
- 6
tods/searcher/brute_force_search.py View File

@@ -191,19 +191,19 @@ def _generate_pipline(combinations): # pragma: no cover
# The first three steps are fixed # The first three steps are fixed
# Step 0: dataset_to_dataframe # Step 0: dataset_to_dataframe
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.dataset_to_dataframe.Common'))
step_0 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.dataset_to_dataframe'))
step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0') step_0.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='inputs.0')
step_0.add_output('produce') step_0.add_output('produce')
pipeline_description.add_step(step_0) pipeline_description.add_step(step_0)


# Step 1: column_parser
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.column_parser.Common'))
# Step 1: column_parsr
step_1 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.column_parser'))
step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') step_1.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce')
step_1.add_output('produce') step_1.add_output('produce')
pipeline_description.add_step(step_1) pipeline_description.add_step(step_1)


# Step 2: extract_columns_by_semantic_types(attributes) # Step 2: extract_columns_by_semantic_types(attributes)
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common'))
step_2 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types'))
step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') step_2.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce')
step_2.add_output('produce') step_2.add_output('produce')
step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, step_2.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE,
@@ -211,7 +211,7 @@ def _generate_pipline(combinations): # pragma: no cover
pipeline_description.add_step(step_2) pipeline_description.add_step(step_2)


# Step 3: extract_columns_by_semantic_types(targets) # Step 3: extract_columns_by_semantic_types(targets)
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common'))
step_3 = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.extract_columns_by_semantic_types'))
step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce') step_3.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.0.produce')
step_3.add_output('produce') step_3.add_output('produce')
step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE, step_3.add_hyperparameter(name='semantic_types', argument_type=ArgumentType.VALUE,
@@ -243,7 +243,7 @@ def _generate_pipline(combinations): # pragma: no cover
#pipeline_description.add_step(tods_step_7) #pipeline_description.add_step(tods_step_7)


# Finalize the pipeline # Finalize the pipeline
final_step = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.data_transformation.construct_predictions.Common'))
final_step = PrimitiveStep(primitive=index.get_primitive('d3m.primitives.tods.data_processing.construct_predictions'))
final_step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.6.produce') final_step.add_argument(name='inputs', argument_type=ArgumentType.CONTAINER, data_reference='steps.6.produce')
final_step.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce') final_step.add_argument(name='reference', argument_type=ArgumentType.CONTAINER, data_reference='steps.1.produce')
final_step.add_output('produce') final_step.add_output('produce')


Loading…
Cancel
Save