- Notifications
You must be signed in to change notification settings - Fork 1.1k
Mamokari/observability#307
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base:master
Are you sure you want to change the base?
Uh oh!
There was an error while loading. Please reload this page.
Changes from all commits
07f985a5adb1bc5bc9778cb2694a534eaec56a20bf0f614c4bb0d1583a619daff513b3f61d444b2d81d10904c59cbd100a704d345421c14cbbc871acc95eafe1a76f807e432fb48da8eb25c7e2b814d4713841e266cffef8328b888cac588c113423File filter
Filter by extension
Conversations
Uh oh!
There was an error while loading. Please reload this page.
Jump to
Uh oh!
There was an error while loading. Please reload this page.
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -109,3 +109,6 @@ condaenv.* | ||
| .mypy_cache/ | ||
| .DS_Store | ||
| #pycharm | ||
| .idea | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -26,7 +26,11 @@ | ||
| fromazureml.coreimportRun | ||
| importargparse | ||
| importtraceback | ||
| fromutil.model_helperimportget_model | ||
| fromdiabetes_regression.util.model_helperimportget_model | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As @j-so mentioned, all the references to | ||
| fromutils.logger.logger_interfaceimportSeverity | ||
| fromutils.logger.observabilityimportObservability | ||
| observability=Observability() | ||
| run=Run.get_context() | ||
| @@ -42,7 +46,7 @@ | ||
| # load_dotenv() | ||
| # sources_dir = os.environ.get("SOURCES_DIR_TRAIN") | ||
| # if (sources_dir is None): | ||
| # sources_dir = 'diabetes_regression' | ||
| # sources_dir = '.' | ||
| # path_to_util = os.path.join(".", sources_dir, "util") | ||
| # sys.path.append(os.path.abspath(path_to_util)) # NOQA: E402 | ||
| # from model_helper import get_model | ||
| @@ -89,7 +93,8 @@ | ||
| parser.add_argument( | ||
| "--allow_run_cancel", | ||
| type=str, | ||
| help="Set this to false to avoid evaluation step from cancelling run after an unsuccessful evaluation", # NOQA: E501 | ||
| help="Set this to false to avoid evaluation step from cancelling " | ||
| "run after an unsuccessful evaluation", | ||
| default="true", | ||
| ) | ||
| @@ -109,42 +114,47 @@ | ||
| tag_name='experiment_name' | ||
| model=get_model( | ||
| model_name=model_name, | ||
| tag_name=tag_name, | ||
| tag_value=exp.name, | ||
| aml_workspace=ws) | ||
| model_name=model_name, | ||
| tag_name=tag_name, | ||
| tag_value=exp.name, | ||
| aml_workspace=ws) | ||
| if (modelisnotNone): | ||
| production_model_mse=10000 | ||
| if (metric_evalinmodel.tags): | ||
| production_model_mse=float(model.tags[metric_eval]) | ||
| new_model_mse=float(run.parent.get_metrics().get(metric_eval)) | ||
| if (production_model_mseisNoneornew_model_mseisNone): | ||
| print("Unable to find", metric_eval, "metrics, " | ||
| "exiting evaluation") | ||
| if((allow_run_cancel).lower() =='true'): | ||
| observability.log("Unable to find"+ | ||
| metric_eval+"metrics, exiting evaluation") | ||
| if((allow_run_cancel).lower() =='true'): | ||
| run.parent.cancel() | ||
| else: | ||
| print( | ||
| observability.log( | ||
| "Current Production model mse:{}, " | ||
| "New trained model mse:{}".format( | ||
| production_model_mse, new_model_mse | ||
| ) | ||
| ) | ||
| if (new_model_mse<production_model_mse): | ||
| print("New trained model performs better, " | ||
| "thus it should be registered") | ||
| observability.log("New trained model performs better, " | ||
| "thus it should be registered") | ||
| else: | ||
| print("New trained model metric is worse than or equal to " | ||
| "production model so skipping model registration.") | ||
| if((allow_run_cancel).lower() =='true'): | ||
| observability.log("New trained model metric is worse " | ||
| "than or equal to " | ||
| "production model so skipping " | ||
| "model registration.") | ||
| if ((allow_run_cancel).lower() =='true'): | ||
| run.parent.cancel() | ||
| else: | ||
| print("This is the first model, " | ||
| "thus it should be registered") | ||
| observability.log("This is the first model, " | ||
| "thus it should be registered") | ||
| exceptException: | ||
| traceback.print_exc(limit=None, file=None, chain=True) | ||
| print("Something went wrong trying to evaluate. Exiting.") | ||
| observability.log( | ||
| description="Something went wrong trying to evaluate. Exiting.", | ||
| severity=Severity.ERROR) | ||
| raise | ||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is this needed? This environment variable is used to keep the name of the folder, since the name will likely change during bootstrap. For this reason, we should avoid hardcoding this name in the pipeline and code.