diff --git a/aviary/docs/getting_started/onboarding_level1.ipynb b/aviary/docs/getting_started/onboarding_level1.ipynb index 030014f7c..28ed8d8d5 100644 --- a/aviary/docs/getting_started/onboarding_level1.ipynb +++ b/aviary/docs/getting_started/onboarding_level1.ipynb @@ -1020,7 +1020,7 @@ "source": [ "### SQLite database file\n", "\n", - "There is a `.db` file after run. By default, it is {glue:md}`record_filename_default` in the current directory. This is an SQLite database file. In level 2 and level 3, we will be able to choose a different name. Our run is recorded into this file. You generally shouldn't need to parse through this file on your own, but it is available if you're seeking additional problem information." + "There is a `.db` file created after run called {glue:md}'problem_history.db' in the report directory. This is an SQLite database file. Our run is recorded into this file. You generally shouldn't need to parse through this file on your own, but it is available if you're seeking additional problem information." ] }, { @@ -1152,7 +1152,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "aviary", "language": "python", "name": "python3" }, @@ -1166,7 +1166,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.12.9" } }, "nbformat": 4, diff --git a/aviary/docs/getting_started/onboarding_level2.ipynb b/aviary/docs/getting_started/onboarding_level2.ipynb index 0cbabe800..c081c0cdd 100644 --- a/aviary/docs/getting_started/onboarding_level2.ipynb +++ b/aviary/docs/getting_started/onboarding_level2.ipynb @@ -116,13 +116,11 @@ "- {glue:md}`phase_info`: not provided (and will be loaded from {glue:md}`phase_info_path`)\n", "- {glue:md}`optimizer`: {glue:md}`optimizer_default`\n", "- {glue:md}`objective_type`: {glue:md}`objective_type_default`\n", - "- {glue:md}`record_filename`: {glue:md}`record_filename_default`\n", - "- {glue:md}`restart_filename`: {glue:md}`restart_filename_default`)\n", + "- {glue:md}`restart_filename`: {glue:md}`restart_filename_default`\n", "- {glue:md}`max_iter`: {glue:md}`max_iter_default`\n", "- {glue:md}`run_driver`: {glue:md}`run_driver_default`\n", "- {glue:md}`make_plots`: {glue:md}`make_plots_default`\n", "- {glue:md}`phase_info_parameterization`: {glue:md}`phase_info_parameterization_default`\n", - "- {glue:md}`optimization_history_filename`: {glue:md}`optimization_history_filename_default`\n", "- {glue:md}`verbosity`: {glue:md}`verbosity_default`\n", "\n", "All the above arguments are straightforward except {glue:md}`objective_type`. Even though {glue:md}`objective_type` is `None`, it is not treated as `None`. In this scenario, the objective is set based on {glue:md}`problem_type` when using the {glue:md}`2DOF` mission method. There are three options for {glue:md}`problem_type` which is set to {glue:md}`SIZING` as default when aircraft is created. Aviary has the following mapping when user does not set {glue:md}`objective_type` but set `mission_method` to {glue:md}`2DOF` (in .csv file):\n", @@ -228,7 +226,6 @@ "aircraft_data = 'models/aircraft/test_aircraft/aircraft_for_bench_GwGm.csv'\n", "optimizer = 'IPOPT'\n", "objective_type = None\n", - "record_filename = 'aviary_history.db'\n", "restart_filename = None\n", "max_iter = 0\n", "phase_info = deepcopy(av.default_2DOF_phase_info)\n", @@ -263,7 +260,7 @@ "prob.setup()\n", "\n", "# run the problem we just set up\n", - "prob.run_aviary_problem(record_filename, restart_filename=restart_filename)" + "prob.run_aviary_problem(restart_filename=restart_filename)" ] }, { @@ -313,7 +310,6 @@ "aircraft_data = 'models/aircraft/test_aircraft/aircraft_for_bench_GwGm.csv'\n", "optimizer = 'IPOPT'\n", "objective_type = None\n", - "record_filename = 'aviary_history.db'\n", "restart_filename = None\n", "max_iter = 1\n", "\n", @@ -822,7 +818,7 @@ "id": "107f7407", "metadata": {}, "source": [ - "This is a simple wrapper of Dymos' [run_problem()](https://openmdao.github.io/dymos/api/run_problem_function.html) function. It allows the users to provide `record_filename`, `restart_filename`, `suppress_solver_print`, and `run_driver`. In our case, `record_filename` is changed to `aviary_history.db` and `restart_filename` is set to `None`. The rest of the arguments take default values. If a restart file name is provided, aviary (or dymos) will load the states, controls, and parameters as given in the provided case as the initial guess for the next run. We have discussed the `.db` file in [level 1 onboarding doc](onboarding_level1) and will discuss how to use it to generate useful output in [level 3 onboarding doc](onboarding_level3).\n", + "This is a simple wrapper of Dymos' [run_problem()](https://openmdao.github.io/dymos/api/run_problem_function.html) function. It allows the users to provide, `restart_filename`, `suppress_solver_print`, and `run_driver`. In our case, `restart_filename` is set to `None`. The rest of the arguments take default values. If a restart file name is provided, aviary (or dymos) will load the states, controls, and parameters as given in the provided case as the initial guess for the next run. We have discussed the `.db` file in [level 1 onboarding doc](onboarding_level1) and will discuss how to use it to generate useful output in [level 3 onboarding doc](onboarding_level3).\n", "\n", "Finally, we can add a few print statements for the variables that we are interested:\n" ] @@ -951,7 +947,6 @@ "mass_method = 'FLOPS'\n", "optimizer = 'SLSQP'\n", "objective_type = None\n", - "record_filename = 'history.db'\n", "restart_filename = None\n", "\n", "# Build problem\n", @@ -975,7 +970,7 @@ "\n", "prob.setup()\n", "\n", - "prob.run_aviary_problem(record_filename)\n", + "prob.run_aviary_problem()\n", "\n", "print('done')" ] @@ -1018,7 +1013,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "aviary", "language": "python", "name": "python3" }, @@ -1032,7 +1027,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.12.9" } }, "nbformat": 4, diff --git a/aviary/docs/user_guide/aviary_commands.ipynb b/aviary/docs/user_guide/aviary_commands.ipynb index 661c37baf..62d56db6c 100644 --- a/aviary/docs/user_guide/aviary_commands.ipynb +++ b/aviary/docs/user_guide/aviary_commands.ipynb @@ -787,8 +787,6 @@ "source": [ "To use this utility, either a problem has been run or a run script is provided.\n", "\n", - "{glue:md}`--problem_recorder` is an input. Default is {glue:md}`problem_recorder_default`.\n", - "{glue:md}`--driver_recorder` is an optional input.\n", "{glue:md}`--port` is the dashboard server port ID. The default is {glue:md}`port_default` meaning any free port.\n", "{glue:md}`-b` or {glue:md}`--background` indicates to run in background. Default is `False`.\n", "{glue:md}`-d` or {glue:md}`--debug` indicates to show debugging output. Default is `False`.\n", @@ -845,7 +843,7 @@ ], "metadata": { "kernelspec": { - "display_name": "base", + "display_name": "aviary", "language": "python", "name": "python3" }, diff --git a/aviary/docs/user_guide/examples_of_the_same_mission_at_different_UI_levels.ipynb b/aviary/docs/user_guide/examples_of_the_same_mission_at_different_UI_levels.ipynb index 530b32745..fdec20b2b 100644 --- a/aviary/docs/user_guide/examples_of_the_same_mission_at_different_UI_levels.ipynb +++ b/aviary/docs/user_guide/examples_of_the_same_mission_at_different_UI_levels.ipynb @@ -174,9 +174,7 @@ "\n", "prob.setup()\n", "\n", - "prob.run_aviary_problem(\n", - " record_filename='level2_example.db', suppress_solver_print=True, make_plots=False\n", - ")" + "prob.run_aviary_problem(suppress_solver_print=True, make_plots=False)" ] }, { diff --git a/aviary/docs/user_guide/postprocessing_and_visualizing_results.ipynb b/aviary/docs/user_guide/postprocessing_and_visualizing_results.ipynb index 4a162a196..5bd5c18fc 100644 --- a/aviary/docs/user_guide/postprocessing_and_visualizing_results.ipynb +++ b/aviary/docs/user_guide/postprocessing_and_visualizing_results.ipynb @@ -51,32 +51,31 @@ "\n", "| **Section** | **File** | **Location** |\n", "|--------------|-----------------------------------------------|--------------------------------------------------------------------------------|\n", - "| Model | Input Checks | ./reports/*name_of_run_script*/input_checkss.md |\n", - "| Model | Inputs | ./reports/*name_of_run_script*/inputs.html |\n", - "| Model | Debug Input List | ./input_list.txt |\n", - "| Model | Debug Input List | ./output_list.txt |\n", - "| Model | N2 | ./reports/*name_of_run_script*/n2.html |\n", - "| Model | Trajectory Linkage Report | ./reports/*name_of_run_script*/traj_linkage_report.html |\n", - "| Optimization | Driver Scaling Report | ./reports/*name_of_run_script*/driver_scaling_report.html |\n", - "| Optimization | Total Coloring Report | ./reports/*name_of_run_script*/total_coloring.html |\n", - "| Optimization | Optimization Report | ./reports/*name_of_run_script*/opt_report.html |\n", - "| Optimization | SNOPT Output (similarly for other optimizers) | ./reports/*name_of_run_script*/SNOPT_print.out |\n", - "| Optimization | Driver recording | Case Recorder file specified by `driver_recorder` command option |\n", - "| Results | Trajectory Results Report | ./reports/*name_of_run_script*/traj_results_report.html |\n", - "| Results | Subsystem Results | ./reports/subsystems/*name_of_subsystem.md (or .html)* |\n", - "| Results | Mission Results | ./reports/subsystems/mission_summary.md |\n", - "| Results | Problem final case recording | Case Recorder file specified by `problem_recorder` command option, default is {glue:md}`problem_recorder_default` |\n", - "\n", - "As an example of the workflow for the dashboard, assume that the user has run an Aviary script, {glue:md}`run_level2_example.py`, which records both the `Problem` final case and also all the cases of the optimization done by the [`Driver`](https://openmdao.org/newdocs/versions/latest/features/building_blocks/drivers/). The sample code can be found in {glue:md}`aviary/examples` folder. (To record both the Problem final case and also the Driver optimization iterations, the user must make use of the {glue:md}`optimization_history_filename` option in the call to {glue:md}`run_aviary_problem()`.)\n", + "| Model | Input Checks | ./*name_of_run_script*_out/reports/input_checks.md |\n", + "| Model | Inputs | ./*name_of_run_script*_out/reports/inputs.html |\n", + "| Model | Debug Input List | ./*name_of_run_script*_out/reports/input_list.txt |\n", + "| Model | Debug Input List | ./*name_of_run_script*_out/reports/output_list.txt |\n", + "| Model | N2 | ./*name_of_run_script*_out/reports/n2.html |\n", + "| Model | Trajectory Linkage Report | ./*name_of_run_script*_out/reports/traj_linkage_report.html |\n", + "| Optimization | Driver Scaling Report | ./*name_of_run_script*_out/reports/driver_scaling_report.html |\n", + "| Optimization | Total Coloring Report | ./*name_of_run_script*_out/reports/total_coloring.html |\n", + "| Optimization | Optimization Report | ./*name_of_run_script*_out/reports/opt_report.html |\n", + "| Optimization | SNOPT Output (similarly for other optimizers) | ./*name_of_run_script*_out/reports/SNOPT_print.out |\n", + "| Results | Trajectory Results Report | ./*name_of_run_script*/reports/traj_results_report.html |\n", + "| Results | Subsystem Results | ./*name_of_run_script*_out/reports/subsystems/*name_of_subsystem.md (or .html)*|\n", + "| Results | Mission Results | ./*name_of_run_script*_out/reports/subsystems/mission_summary.md |\n", + "| Results | Problem final case recording | ./*name_of_run_script*_out/problem_history.db |\n", + "\n", + "As an example of the workflow for the dashboard, assume that the user has run an Aviary script, {glue:md}`run_level2_example.py`, which records both the `Problem` final case and also all the cases of the optimization done by the [`Driver`](https://openmdao.org/newdocs/versions/latest/features/building_blocks/drivers/). The sample code can be found in {glue:md}`aviary/examples` folder. (To record both the Problem final case and also the Driver optimization iterations, the user must make use of the `verbosity` flag in the call to {glue:md}`run_aviary_problem()`.)\n", "\n", "```bash\n", "python level2_example.py\n", "```\n", "\n", - "In this example, the case recorder files are named `problem_final_case.db` and `driver_cases.db`, respectively. So after the run is completed, the user could run the dashboard using:\n", + "After the run is completed, the user can run the dashboard using:\n", "\n", "```bash\n", - "aviary dashboard level2_example --problem_recorder=problem_final_case.db --driver_recorder=driver_cases.db\n", + "aviary dashboard level2_example\n", "```\n", "\n", "```{note}\n", @@ -122,9 +121,7 @@ "file_name = 'run_level2_example'\n", "commands = [\n", " 'python ' + file_name + '.py',\n", - " 'aviary dashboard '\n", - " + file_name\n", - " + ' --problem_recorder=problem_final_case.db --driver_recorder=driver_cases.db --background',\n", + " 'aviary dashboard ' + file_name + '--background',\n", "]\n", "with tempfile.TemporaryDirectory() as tempdir:\n", " os.chdir(tempdir)\n", @@ -150,14 +147,12 @@ "The Problem recorder file is required for the Aircraft 3d model tab to be displayed in the dashboard.\n", "```\n", "\n", - "The {glue:md}`--problem_recorder` and {glue:md}`--driver_recorder` options to the dashboard command are used to indicate the file names for those recorder files, if they are not the standard values of {glue:md}`problem_recorder_default` and {glue:md}`driver_recorder_default`, respectively. If {glue:md}`--driver_recorder` is set to the string `\"None\"`, then the driver case recorder file is ignored. This is useful if the user is not interested in seeing dashboard tabs related to driver history. If that file is large, it could unnecessarily be read and slow down the generation of the dashboard significantly.\n", - "\n", "### Saving and Sharing Dashboards\n", "\n", "The user can also save a dashboard and share it with other users to view. The dashboard is saved as a zip file. To save a dashboard to a file, use the {glue:md}`--save` option. For example, \n", "\n", "```bash\n", - "aviary dashboard --save --problem_recorder=problem_final_case.db --driver_recorder=driver_cases.db\n", + "aviary dashboard --save\n", "```\n", "\n", "By default, the zip file is named based on the name of the problem. So in this example, the saved zip file will be named {glue:md}`run_level2_example.zip`.\n", @@ -165,7 +160,7 @@ "If the user wants to save to a different file, they can provide that file name as an argument to the {glue:md}`--save` option as in this example:\n", "\n", "```bash\n", - "aviary dashboard --save saved_dashboard.zip level2_example --problem_recorder=problem_final_case.db --driver_recorder=driver_cases.db\n", + "aviary dashboard --save saved_dashboard.zip level2_example\n", "```\n", "\n", "In this case, the zip file will be named `saved_dashboard.zip`. \n", @@ -225,24 +220,7 @@ "\n", "### Database Output Files\n", "\n", - "There is an SQLite database output. By default, it is {glue:md}`problem_recorder_default`. It can be used to rerun your case though we do not detail that here. Users can write separate Python script to create user customized outputs and graphs. We will show how to use the this database to create user's customized graph in [the onboarding docs](../getting_started/onboarding)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "remove-cell" - ] - }, - "outputs": [], - "source": [ - "# Testing Cell\n", - "from aviary.interface.methods_for_level2 import AviaryProblem\n", - "from aviary.utils.doctape import check_args\n", - "\n", - "check_args(AviaryProblem.run_aviary_problem, {'record_filename': 'problem_history.db'}, exact=False)" + "Aviary creates an SQLite database output called `problem_history.db`. It can be used to rerun your case, though we do not detail that here. Users can write a separate Python script to create user customized outputs and graphs. We will show how to use the this database to create a customized graph in [the onboarding docs](../getting_started/onboarding)." ] }, { @@ -306,7 +284,9 @@ " verbosity=verbosity,\n", " )\n", " sys.stdout = old_stdout\n", - " folder_contents = [f.name for f in os.scandir(tempdir)]\n", + " folder_contents = [\n", + " f.name for f in os.scandir(tempdir + '/aircraft_for_bench_FwFm_out/reports/')\n", + " ]\n", " all_files = []\n", " for p, d, f in os.walk(tempdir):\n", " all_files += f\n", @@ -409,7 +389,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "aviary", "language": "python", "name": "python3" }, @@ -423,7 +403,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.12.9" } }, "nbformat": 4, diff --git a/aviary/examples/run_level2_with_detailed_landing.py b/aviary/examples/run_level2_with_detailed_landing.py index 6ae393ab9..251311473 100644 --- a/aviary/examples/run_level2_with_detailed_landing.py +++ b/aviary/examples/run_level2_with_detailed_landing.py @@ -167,13 +167,13 @@ prob.setup() -prob.run_aviary_problem(record_filename='detailed_landing.db') +prob.run_aviary_problem() try: loc = prob.get_outputs_dir() - cr = om.CaseReader(f'{loc}/detailed_landing.db') + cr = om.CaseReader(f'{loc}/problem_history.db') except: - cr = om.CaseReader('detailed_landing.db') + cr = om.CaseReader('problem_history.db') cases = cr.get_cases('problem') case = cases[0] diff --git a/aviary/examples/run_level2_with_detailed_takeoff.py b/aviary/examples/run_level2_with_detailed_takeoff.py index a3a497a0a..679f6beb0 100644 --- a/aviary/examples/run_level2_with_detailed_takeoff.py +++ b/aviary/examples/run_level2_with_detailed_takeoff.py @@ -327,13 +327,13 @@ prob.setup() - prob.run_aviary_problem(record_filename='detailed_takeoff.db', suppress_solver_print=True) + prob.run_aviary_problem(suppress_solver_print=True) try: loc = prob.get_outputs_dir() - cr = om.CaseReader(f'{loc}/detailed_takeoff.db') + cr = om.CaseReader(f'{loc}/problem_history.db') except: - cr = om.CaseReader('detailed_takeoff.db') + cr = om.CaseReader('problem_history.db') cases = cr.get_cases('problem') case = cases[0] diff --git a/aviary/examples/run_level3_example.py b/aviary/examples/run_level3_example.py index 4f153a468..e9b6faaec 100644 --- a/aviary/examples/run_level3_example.py +++ b/aviary/examples/run_level3_example.py @@ -1,16 +1,17 @@ +import warnings + +import dymos as dm +import openmdao.api as om + import aviary.api as av from aviary.core.pre_mission_group import PreMissionGroup -import openmdao.api as om -from aviary.models.missions.height_energy_default import phase_info -from aviary.variable_info.variables import Aircraft, Mission, Dynamic from aviary.mission.flops_based.phases.energy_phase import EnergyPhase -from aviary.variable_info.variable_meta_data import _MetaData as BaseMetaData +from aviary.models.missions.height_energy_default import phase_info from aviary.utils.aviary_values import AviaryValues -import dymos as dm from aviary.variable_info.enums import Verbosity -from aviary.variable_info.functions import setup_trajectory_params -import warnings -from aviary.variable_info.functions import setup_model_options +from aviary.variable_info.functions import setup_model_options, setup_trajectory_params +from aviary.variable_info.variable_meta_data import _MetaData as BaseMetaData +from aviary.variable_info.variables import Aircraft, Dynamic, Mission class L3SubsystemsGroup(om.Group): @@ -513,40 +514,9 @@ def initialize(self): prob.set_val(Mission.Design.GROSS_MASS, 175400, units='lbm') prob.set_val(Mission.Summary.GROSS_MASS, 175400, units='lbm') -prob.verbosity = Verbosity.VERBOSE +prob.verbosity = Verbosity.BRIEF prob.run_aviary_problem() -prob.model.list_vars(units=True, print_arrays=True) -prob.list_driver_vars( - print_arrays=True, - desvar_opts=[ - 'lower', - 'upper', - 'ref', - 'ref0', - 'indices', - 'adder', - 'scaler', - 'parallel_deriv_color', - 'cache_linear_solution', - 'units', - 'min', - 'max', - ], - cons_opts=[ - 'lower', - 'upper', - 'equals', - 'ref', - 'ref0', - 'indices', - 'adder', - 'scaler', - 'linear', - 'parallel_deriv_color', - 'cache_linear_solution', - 'units', - 'min', - 'max', - ], -) + +# prob.model.list_vars(units=True, print_arrays=True) +# prob.list_driver_vars(print_arrays=True) diff --git a/aviary/interface/methods_for_level1.py b/aviary/interface/methods_for_level1.py index 0f8324486..6ffc72903 100644 --- a/aviary/interface/methods_for_level1.py +++ b/aviary/interface/methods_for_level1.py @@ -14,13 +14,11 @@ def run_aviary( phase_info, optimizer=None, objective_type=None, - record_filename='problem_history.db', restart_filename=None, max_iter=50, run_driver=True, make_plots=True, phase_info_parameterization=None, - optimization_history_filename=None, verbosity=None, ): """ @@ -42,8 +40,6 @@ def run_aviary( The optimizer to use. objective_type : str, optional Type of the optimization objective. - record_filename : str, optional - Filename for recording the solution, defaults to 'dymos_solution.db'. restart_filename : str, optional Filename to use for restarting the optimization, if applicable. max_iter : int, optional @@ -55,9 +51,6 @@ def run_aviary( phase_info_parameterization : function, optional Additional information to parameterize the phase_info object based on desired cruise altitude and Mach. - optimization_history_filename : str or Path - The name of the database file where the driver iterations are to be recorded. The default is - None. verbosity : Verbosity or int, optional Sets level of information outputted to the terminal during model execution. If provided, overrides verbosity specified in aircraft_data. @@ -109,11 +102,9 @@ def run_aviary( prob.setup(verbosity=verbosity) prob.run_aviary_problem( - record_filename, restart_filename=restart_filename, run_driver=run_driver, make_plots=make_plots, - optimization_history_filename=optimization_history_filename, verbosity=verbosity, ) diff --git a/aviary/interface/methods_for_level2.py b/aviary/interface/methods_for_level2.py index dd74a62c0..afcff7ffb 100644 --- a/aviary/interface/methods_for_level2.py +++ b/aviary/interface/methods_for_level2.py @@ -1144,8 +1144,6 @@ def set_initial_guesses(self, parent_prob=None, parent_prefix='', verbosity=None def run_aviary_problem( self, - record_filename='problem_history.db', - optimization_history_filename=None, restart_filename=None, suppress_solver_print=True, run_driver=True, @@ -1159,12 +1157,6 @@ def run_aviary_problem( Parameters ---------- - record_filename : str, optional - The name of the database file where the solutions are to be recorded. The - default is "problem_history.db". - optimization_history_filename : str, None - The name of the database file where the driver iterations are to be - recorded. The default is None. restart_filename : str, optional The name of the file that contains previously computed solutions which are to be used as starting points for this run. If it is None (default), no @@ -1182,6 +1174,8 @@ def run_aviary_problem( False. make_plots : bool, optional If True (default), Dymos html plots will be generated as part of the output. + verbosity : Verbosity or int, optional + Controls the level of printouts for this method. """ # `self.verbosity` is "true" verbosity for entire run. `verbosity` is verbosity # override for just this method @@ -1193,9 +1187,12 @@ def run_aviary_problem( if verbosity >= Verbosity.VERBOSE: # VERBOSE, DEBUG self.final_setup() - with open('input_list.txt', 'w') as outfile: + with open(self.get_reports_dir() / 'input_list.txt', 'w') as outfile: self.model.list_inputs(out_stream=outfile) + recorder = om.SqliteRecorder('optimization_history.db') + self.driver.add_recorder(recorder) + # Creates a flag to determine if the user would or would not like a payload/range diagram payload_range_bool = False if self.problem_type is not ProblemType.MULTI_MISSION: @@ -1205,10 +1202,6 @@ def run_aviary_problem( if suppress_solver_print: self.set_solver_print(level=0) - if optimization_history_filename: - recorder = om.SqliteRecorder(optimization_history_filename) - self.driver.add_recorder(recorder) - # and run mission, and dynamics if run_driver: self.result = dm.run_problem( @@ -1216,7 +1209,7 @@ def run_aviary_problem( run_driver=run_driver, simulate=simulate, make_plots=make_plots, - solution_record_file=record_filename, + solution_record_file='problem_history.db', restart=restart_filename, ) @@ -1241,7 +1234,7 @@ def run_aviary_problem( ) if verbosity >= Verbosity.VERBOSE: # VERBOSE, DEBUG - with open('output_list.txt', 'w') as outfile: + with open(Path(self.get_reports_dir()) / 'output_list.txt', 'w') as outfile: self.model.list_outputs(out_stream=outfile) # Checks if the payload/range toggle in the aviary inputs csv file has been set and that the diff --git a/aviary/interface/test/test_height_energy_mission.py b/aviary/interface/test/test_height_energy_mission.py index 184a9d499..d8a254e2b 100644 --- a/aviary/interface/test/test_height_energy_mission.py +++ b/aviary/interface/test/test_height_energy_mission.py @@ -122,7 +122,6 @@ def run_mission(self, phase_info, optimizer): make_plots=self.make_plots, max_iter=self.max_iter, optimizer=optimizer, - optimization_history_filename='driver_test.db', verbosity=0, ) @@ -141,10 +140,7 @@ def test_mission_basic_and_dashboard(self): self.assertIsNotNone(prob) self.assertTrue(prob.result.success) - cmd = ( - 'aviary dashboard --problem_recorder dymos_solution.db --driver_recorder ' - f'driver_test.db {prob.driver._problem()._name}' - ) + cmd = f'aviary dashboard {prob.driver._problem()._name}' # this only tests that a given command line tool returns a 0 return code. It doesn't # check the expected output at all. The underlying functions that implement the # commands should be tested separately. diff --git a/aviary/validation_cases/benchmark_tests/test_0_iters.py b/aviary/validation_cases/benchmark_tests/test_0_iters.py index ae149f9e8..76a2fa218 100644 --- a/aviary/validation_cases/benchmark_tests/test_0_iters.py +++ b/aviary/validation_cases/benchmark_tests/test_0_iters.py @@ -3,12 +3,10 @@ from openmdao.utils.testing_utils import require_pyoptsparse, use_tempdirs -from aviary.models.missions.height_energy_default import ( - phase_info as height_energy_phase_info, -) -from aviary.models.missions.two_dof_default import phase_info as two_dof_phase_info from aviary.interface.methods_for_level2 import AviaryProblem from aviary.models.aircraft.advanced_single_aisle.advanced_single_aisle_data import inputs +from aviary.models.missions.height_energy_default import phase_info as height_energy_phase_info +from aviary.models.missions.two_dof_default import phase_info as two_dof_phase_info class BaseProblemPhaseTestCase(unittest.TestCase): @@ -27,7 +25,7 @@ def build_and_run_problem(self, input_filename, phase_info, objective_type=None) prob.add_design_variables() prob.add_objective(objective_type if objective_type else None) prob.setup() - prob.run_aviary_problem('dymos_solution.db', make_plots=False) + prob.run_aviary_problem(make_plots=False) @use_tempdirs diff --git a/aviary/visualization/dashboard.py b/aviary/visualization/dashboard.py index d584f94b6..f18cefc0c 100644 --- a/aviary/visualization/dashboard.py +++ b/aviary/visualization/dashboard.py @@ -1,10 +1,8 @@ import argparse -from collections import defaultdict import functools import importlib.util import json import os -from pathlib import Path import re import shutil import traceback @@ -104,21 +102,6 @@ def _dashboard_setup_parser(parser): nargs='*', help='Name of aviary script that was run (not including .py).', ) - - parser.add_argument( - '--problem_recorder', - type=str, - help='Problem case recorder file name', - dest='problem_recorder', - default='problem_history.db', - ) - parser.add_argument( - '--driver_recorder', - type=_none_or_str, - help='Driver case recorder file name. Set to None if file is ignored', - dest='driver_recorder', - default='driver_history.db', - ) parser.add_argument( '--port', dest='port', @@ -189,7 +172,7 @@ def _dashboard_cmd(options, user_args): if not options.force and report_dir_path.is_dir(): raise RuntimeError( f'The reports directory {report_dir_path} already exists. If you wish ' - 'to overrite the existing directory, use the --force option' + 'to overwrite the existing directory, use the --force option' ) if ( report_dir_path.is_dir() @@ -199,8 +182,8 @@ def _dashboard_cmd(options, user_args): shutil.unpack_archive(options.script_name, report_dir_path) dashboard( report_dir_name, - options.problem_recorder, - options.driver_recorder, + # options.problem_recorder, + # options.driver_recorder, options.port, options.run_in_background, ) @@ -218,8 +201,8 @@ def _dashboard_cmd(options, user_args): dashboard( options.script_name, - options.problem_recorder, - options.driver_recorder, + # options.problem_recorder, + # options.driver_recorder, options.port, options.run_in_background, ) @@ -461,7 +444,7 @@ def create_report_frame(documentation, format, text_filepath): return report_pane -def create_aviary_variables_table_data_nested(script_name, recorder_file): +def create_aviary_variables_table_data_nested(output_dir, recorder_file): """ Create a JSON file with information about Aviary variables. @@ -475,6 +458,8 @@ def create_aviary_variables_table_data_nested(script_name, recorder_file): Parameters ---------- + output_dir : Path, str + Output directory for case recorder_file : str Name of the recorder file containing the Problem cases. @@ -564,7 +549,7 @@ def create_aviary_variables_table_data_nested(script_name, recorder_file): ) aviary_variables_file_path = ( - f'{script_name}_out/reports/aviary_vars/{aviary_variables_json_file_name}' + f'{output_dir}/reports/aviary_vars/{aviary_variables_json_file_name}' ) with open(aviary_variables_file_path, 'w') as fp: json.dump(table_data_nested, fp) @@ -652,7 +637,7 @@ def create_aircraft_3d_file(recorder_file, reports_dir, outfilepath): ---------- recorder_file : str Name of the case recorder file. - reports_dir : str + reports_dir : Path Path of the directory containing the reports from the run. outfilepath : str The path to the location where the file should be created. @@ -667,7 +652,7 @@ def create_aircraft_3d_file(recorder_file, reports_dir, outfilepath): # next to the HTML file shutil.copy( aviary_dir.joinpath('visualization/assets/aviary_airlines.png'), - Path(reports_dir) / 'aviary_airlines.png', + reports_dir / 'aviary_airlines.png', ) aircraft_3d_model = Aircraft3DModel(recorder_file) @@ -1138,7 +1123,7 @@ def create_payload_range_frame(title, results_tabs_list, documentation, csv_file # The main script that generates all the tabs in the dashboard -def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_background=False): +def dashboard(script_name, port=0, run_in_background=False): """ Generate the dashboard app display. @@ -1146,26 +1131,24 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg ---------- script_name : str Name of the script file whose results will be displayed by this dashboard. - problem_recorder : str - Name of the recorder file containing the Problem cases. - driver_recorder : str or None - Name of the recorder file containing the Driver cases. If None, the driver tab will not be added port : int HTTP port used for the dashboard webapp. If 0, use any free port """ - reports_dir = f'{script_name}_out/reports/' - out_dir = f'{script_name}_out/' + out_dir = Path(f'{script_name}') + if not out_dir.exists(): + out_dir = Path(f'{script_name}_out') + if not out_dir.exists(): + raise FileNotFoundError(f"Output directory for '{script_name}' could not be found.") - if not Path(reports_dir).is_dir(): - raise ValueError( - f"The script name, '{script_name}', does not have a reports folder " - f"associated with it. The directory '{reports_dir}' does not exist." - ) + reports_dir = out_dir / 'reports' - problem_recorder_path = Path(out_dir) / problem_recorder + if not reports_dir.is_dir(): + raise FileNotFoundError(f"Reports directory could not be found in '{out_dir}'.") + + problem_recorder_path = Path(out_dir) / 'problem_history.db' if not os.path.isfile(problem_recorder_path): - issue_warning(f'Given Problem case recorder file {problem_recorder_path} does not exist.') + issue_warning(f'Problem case recorder file {problem_recorder_path} does not exist.') # TODO - use lists and functions to do this with a lot less code ####### Model Tab ####### @@ -1177,7 +1160,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg model_tabs_list, 'Detailed checks on the model inputs.', 'markdown', - Path(reports_dir) / 'input_checks.md', + reports_dir / 'input_checks.md', ) # Debug Input List @@ -1185,15 +1168,18 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg 'Debug Input List', model_tabs_list, """ - A plain text display of the model inputs. Recommended for beginners. Only created if Settings.VERBOSITY is set to at least 2 in the input deck. - The variables are listed in a tree structure. There are three columns. The left column is a list of variable names, - the middle column is the value, and the right column is the - promoted variable name. The hierarchy is phase, subgroups, components, and variables. An input variable can appear under - different phases and within different components. Its values can be different because its value has - been updated during the computation. On the top-left corner is the total number of inputs. - That number counts the duplicates because one variable can appear in different phases.""", + A plain text display of the model inputs. Recommended for beginners. Only created if + Settings.VERBOSITY is set to at least 2 (VERBOSE or higher) in the input deck. The variables + are listed in a tree structure. There are three columns. The left column is a list of + variable names, the middle column is the value, and the right column is the promoted + variable name. The hierarchy is phase, subgroups, components, and variables. An input + variable can appear under different phases and within different components. Its values can + be different because its value has been updated during the computation. On the top-left + corner is the total number of inputs. That number counts the duplicates because one variable + can appear in different phases. + """, 'text', - Path(reports_dir) / 'input_list.txt', + reports_dir / 'input_list.txt', ) # Debug Output List @@ -1201,15 +1187,18 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg 'Debug Output List', model_tabs_list, """ - A plain text display of the model outputs. Recommended for beginners. Only created if Settings.VERBOSITY is set to at least 2 in the input deck. - The variables are listed in a tree structure. There are three columns. The left column is a list of variable names, - the middle column is the value, and the right column is the - promoted variable name. The hierarchy is phase, subgroups, components, and variables. An output variable can appear under - different phases and within different components. Its values can be different because its value has - been updated during the computation. On the top-left corner is the total number of outputs. - That number counts the duplicates because one variable can appear in different phases.""", + A plain text display of the model outputs. Recommended for beginners. Only created if + Settings.VERBOSITY is set to at least 2 (VERBOSE or higher) in the input deck. The variables + are listed in a tree structure. There are three columns. The left column is a list of + variable names, the middle column is the value, and the right column is the promoted + variable name. The hierarchy is phase, subgroups, components, and variables. An output + variable can appear under different phases and within different components. Its values can + be different because its value has been updated during the computation. On the top-left + corner is the total number of outputs. That number counts the duplicates because one + variable can appear in different phases. + """, 'text', - Path(reports_dir) / 'output_list.txt', + reports_dir / 'output_list.txt', ) # Inputs @@ -1218,7 +1207,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg model_tabs_list, 'Detailed report on the model inputs.', 'html', - Path(reports_dir) / 'inputs.html', + reports_dir / 'inputs.html', ) # N2 @@ -1232,7 +1221,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg It can be used to systematically identify, define, tabulate, design, and analyze functional and physical interfaces.""", 'html', - Path(reports_dir) / 'n2.html', + reports_dir / 'n2.html', ) # Trajectory Linkage @@ -1245,7 +1234,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg It can be used to identify errant linkages between fixed quantities. """, 'html', - Path(reports_dir) / 'traj_linkage_report.html', + reports_dir / 'traj_linkage_report.html', ) # Driver scaling @@ -1259,22 +1248,22 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg design variables (DV). """, 'html', - Path(reports_dir) / 'driver_scaling_report.html', + reports_dir / 'driver_scaling_report.html', ) ####### Optimization Tab ####### optimization_tabs_list = [] # Optimization History Plot - if driver_recorder: - if os.path.isfile(driver_recorder): - df = convert_driver_case_recorder_file_to_df(f'{driver_recorder}') - cr = om.CaseReader(f'{driver_recorder}') - opt_history_pane = create_optimization_history_plot(cr, df) - optimization_tabs_list.append(('Optimization History', opt_history_pane)) + opt_history_path = out_dir / 'optimization_history.db' + if opt_history_path.exists(): + df = convert_driver_case_recorder_file_to_df(opt_history_path) + cr = om.CaseReader(opt_history_path) + opt_history_pane = create_optimization_history_plot(cr, df) + optimization_tabs_list.append(('Optimization History', opt_history_pane)) # IPOPT report - if os.path.isfile(Path(reports_dir) / 'IPOPT.out'): + if os.path.isfile(reports_dir / 'IPOPT.out'): ipopt_pane = create_report_frame( 'IPOPT Output', optimization_tabs_list, @@ -1282,7 +1271,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg This report is generated by the IPOPT optimizer. """, 'text', - Path(reports_dir) / 'IPOPT.out', + reports_dir / 'IPOPT.out', ) # Optimization report @@ -1294,11 +1283,11 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg On the top is a summary of the optimization, followed by the objective, design variables, constraints, and optimizer settings. This report is important when dissecting optimal results produced by Aviary.""", 'html', - Path(reports_dir) / 'opt_report.html', + reports_dir / 'opt_report.html', ) # PyOpt report - if os.path.isfile(Path(reports_dir) / 'pyopt_solution.out'): + if os.path.isfile(reports_dir / 'pyopt_solution.out'): create_report_frame( 'PyOpt Solution', optimization_tabs_list, @@ -1306,11 +1295,11 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg This report is generated by the pyOptSparse optimizer. """, 'text', - Path(reports_dir) / 'pyopt_solution.txt', + reports_dir / 'pyopt_solution.txt', ) # SNOPT report - if os.path.isfile(Path(reports_dir) / 'SNOPT_print.out'): + if os.path.isfile(reports_dir / 'SNOPT_print.out'): create_report_frame( 'SNOPT Output', optimization_tabs_list, @@ -1318,18 +1307,18 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg This report is generated by the SNOPT optimizer. """, 'text', - Path(reports_dir) / 'SNOPT_print.out', + reports_dir / 'SNOPT_print.out', ) # SNOPT summary - if os.path.isfile(Path(reports_dir) / 'SNOPT_summary.out'): + if os.path.isfile(reports_dir / 'SNOPT_summary.out'): create_report_frame( 'SNOPT Summary', optimization_tabs_list, """ This is a report generated by the SNOPT optimizer that summarizes the optimization results.""", 'text', - Path(reports_dir) / 'SNOPT_summary.out', + reports_dir / 'SNOPT_summary.out', ) # Coloring report @@ -1338,7 +1327,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg optimization_tabs_list, 'The report shows metadata associated with the creation of the coloring.', 'html', - Path(reports_dir) / 'total_coloring.html', + reports_dir / 'total_coloring.html', ) ####### Results Tab ####### @@ -1348,7 +1337,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg if problem_recorder_path: if os.path.isfile(problem_recorder_path): try: - aircraft_3d_file = Path(reports_dir) / 'aircraft_3d.html' + aircraft_3d_file = reports_dir / 'aircraft_3d.html' create_aircraft_3d_file(problem_recorder_path, reports_dir, aircraft_3d_file) create_report_frame( 'Aircraft 3d model', @@ -1369,11 +1358,11 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg # Make the Aviary variables table pane if os.path.isfile(problem_recorder_path): try: - # Make dir reports/script_name/aviary_vars if needed - aviary_vars_dir = Path(reports_dir) / 'aviary_vars' + # Make dir reports/out_dir/aviary_vars if needed + aviary_vars_dir = reports_dir / 'aviary_vars' aviary_vars_dir.mkdir(parents=True, exist_ok=True) - # copy index.html file to reports/script_name/aviary_vars/index.html + # copy index.html file to reports/out_dir/aviary_vars/index.html aviary_dir = Path(importlib.util.find_spec('aviary').origin).parent shutil.copy( @@ -1384,12 +1373,12 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg aviary_dir.joinpath('visualization/assets/aviary_vars/script.js'), aviary_vars_dir.joinpath('script.js'), ) - # copy script.js file to reports/script_name/aviary_vars/index.html. + # copy script.js file to reports/out_dir/aviary_vars/index.html. # mod the script.js file to point at the json file # create the json file and put it in - # reports/script_name/aviary_vars/aviary_vars.json + # reports/out_dir/aviary_vars/aviary_vars.json create_aviary_variables_table_data_nested( - script_name, problem_recorder_path + out_dir, problem_recorder_path ) # create the json file except Exception as e: pane = _create_message_pane( @@ -1403,7 +1392,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg results_tabs_list, 'Table showing Aviary variables.', 'html', - Path(reports_dir) / 'aviary_vars/index.html', + reports_dir / 'aviary_vars/index.html', ) # Mission Summary @@ -1412,7 +1401,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg results_tabs_list, 'A report of mission results from an Aviary problem.', 'markdown', - Path(reports_dir) / 'mission_summary.md', + reports_dir / 'mission_summary.md', ) # Run status pane @@ -1420,7 +1409,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg 'Run status pane', results_tabs_list, 'A high level overview of the status of the run.', - Path(reports_dir) / 'status.json', + reports_dir / 'status.json', ) run_status_pane_tab_number = len(results_tabs_list) - 1 @@ -1433,7 +1422,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg Any value that is included in the timeseries data is included in this report. This data is useful for post-processing, especially those used for acoustic analysis. """, - Path(reports_dir) / 'mission_timeseries_data.csv', + reports_dir / 'mission_timeseries_data.csv', ) # Paylaod Range Output Pane @@ -1443,7 +1432,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg """ Defines key operating points on the aircraft's payload-range envelope from Design and Fallout missions. """, - Path(reports_dir) / 'payload_range_data.csv', + reports_dir / 'payload_range_data.csv', ) # Trajectory results @@ -1460,7 +1449,7 @@ def dashboard(script_name, problem_recorder, driver_recorder, port, run_in_backg zooming into a particular region for details, etc. """, 'html', - Path(reports_dir) / 'traj_results_report.html', + reports_dir / 'traj_results_report.html', ) # Interactive XY plot of mission variables @@ -1531,7 +1520,7 @@ def save_dashboard(event): tabs.active = 0 # make the Results tab active initially # get status of run for display in the header of each page - status_string_for_header = get_run_status(Path(reports_dir) / 'status.json') + status_string_for_header = get_run_status(reports_dir / 'status.json') template = pn.template.FastListTemplate( title=f'Aviary Dashboard for {script_name}: {status_string_for_header}',