diff --git a/.ratexcludes b/.ratexcludes
index 8acb1c02ea..964a183a24 100644
--- a/.ratexcludes
+++ b/.ratexcludes
@@ -17,3 +17,4 @@
**/PACMAN/**
**/DataSpecification/**
**/spalloc/**
+**/unittests/**/*.rpt
diff --git a/spinn_front_end_common/data/fec_data_view.py b/spinn_front_end_common/data/fec_data_view.py
index 17a8d871d5..1dc40ae0c6 100644
--- a/spinn_front_end_common/data/fec_data_view.py
+++ b/spinn_front_end_common/data/fec_data_view.py
@@ -74,6 +74,7 @@ class _FecDataModel(object):
"_notification_protocol",
"_max_run_time_steps",
"_monitor_map",
+ "_reset_number",
"_run_number",
"_run_step",
"_simulation_time_step_ms",
@@ -111,6 +112,7 @@ def _clear(self):
self._n_boards_required = None
self._n_chips_required = None
self._none_labelled_edge_count = 0
+ self._reset_number = 0
self._run_number = None
self._simulation_time_step_ms = None
self._simulation_time_step_per_ms = None
@@ -414,6 +416,50 @@ def has_time_scale_factor(cls):
"""
return cls.__fec_data._time_scale_factor is not None
+ # reset number
+
+ @classmethod
+ def get_reset_number(cls):
+ """
+ Get the number of times a reset has happened.
+
+ Only counts the first reset after each run.
+
+ So resets that are first soft then hard are ignored.
+ Double reset calls without a run and resets before run are ignored.
+
+ Reset numbers start at zero
+
+ :return:
+ :raises ~spinn_utilities.exceptions.SpiNNUtilsException:
+ If the run_number is currently unavailable
+ """
+ if cls.__fec_data._reset_number is None:
+ raise cls._exception("run_number")
+ return cls.__fec_data._reset_number
+
+ @classmethod
+ def get_reset_str(cls):
+ """
+ Get the number of times a reset has happene as a string. Zero as ""
+
+ Only counts the first reset after each run.
+
+ So resets that are first soft then hard are ignored.
+ Double reset calls without a run and resets before run are ignored.
+
+ Reset numbers start at zero
+
+ :raises ~spinn_utilities.exceptions.SpiNNUtilsException:
+ If the run_number is currently unavailable
+ """
+ if cls.__fec_data._reset_number is None:
+ raise cls._exception("run_number")
+ if cls.__fec_data._reset_number:
+ return str(cls.__fec_data._reset_number)
+ else:
+ return ""
+
# run number
@classmethod
diff --git a/spinn_front_end_common/data/fec_data_writer.py b/spinn_front_end_common/data/fec_data_writer.py
index 49d4fceb8a..b4451a6ca9 100644
--- a/spinn_front_end_common/data/fec_data_writer.py
+++ b/spinn_front_end_common/data/fec_data_writer.py
@@ -85,6 +85,8 @@ def finish_run(self):
@overrides(PacmanDataWriter._hard_reset)
def _hard_reset(self):
+ if self.is_ran_last():
+ self.__fec_data._reset_number += 1
PacmanDataWriter._hard_reset(self)
SpiNNManDataWriter._local_hard_reset(self)
self.__fec_data._hard_reset()
@@ -92,6 +94,8 @@ def _hard_reset(self):
@overrides(PacmanDataWriter._soft_reset)
def _soft_reset(self):
+ if self.is_ran_last():
+ self.__fec_data._reset_number += 1
PacmanDataWriter._soft_reset(self)
SpiNNManDataWriter._local_soft_reset(self)
self.__fec_data._soft_reset()
diff --git a/spinn_front_end_common/interface/abstract_spinnaker_base.py b/spinn_front_end_common/interface/abstract_spinnaker_base.py
index 1336d02de7..6077fe2522 100644
--- a/spinn_front_end_common/interface/abstract_spinnaker_base.py
+++ b/spinn_front_end_common/interface/abstract_spinnaker_base.py
@@ -68,6 +68,8 @@
AbstractVertexWithEdgeToDependentVertices,
AbstractCanReset)
from spinn_front_end_common.interface.buffer_management import BufferManager
+from spinn_front_end_common.interface.buffer_management.storage_objects \
+ import BufferDatabase
from spinn_front_end_common.interface.config_handler import ConfigHandler
from spinn_front_end_common.interface.interface_functions import (
application_finisher, application_runner,
@@ -98,7 +100,7 @@
host_no_bitfield_router_compression import (
ordered_covering_compression, pair_compression)
from spinn_front_end_common.interface.provenance import (
- FecTimer, ProvenanceWriter, TimerCategory, TimerWork)
+ FecTimer, GlobalProvenance, TimerCategory, TimerWork)
from spinn_front_end_common.interface.splitter_selectors import (
splitter_selector)
from spinn_front_end_common.interface.java_caller import JavaCaller
@@ -109,12 +111,12 @@
memory_map_on_host_chip_report, network_specification,
router_collision_potential_report,
routing_table_from_machine_report, tags_from_machine_report,
- write_json_machine, write_json_placements,
+ write_chip_active_report, write_json_machine, write_json_placements,
write_json_routing_tables, drift_report)
from spinn_front_end_common.utilities.iobuf_extractor import IOBufExtractor
from spinn_front_end_common.utilities.utility_objs import ExecutableType
from spinn_front_end_common.utility_models import (
- DataSpeedUpPacketGatherMachineVertex)
+ DataSpeedUpPacketGatherMachineVertex, ChipPowerMonitorMachineVertex)
from spinn_front_end_common.utilities.report_functions.reports import (
generate_comparison_router_report, partitioner_report,
placer_reports_with_application_graph,
@@ -439,6 +441,9 @@ def __run(self, run_time, sync_time):
self._do_mapping(total_run_time)
+ if not self._data_writer.is_ran_last():
+ self._execute_record_core_names()
+
# Check if anything has per-timestep SDRAM usage
is_per_timestep_sdram = self._is_per_timestep_sdram()
@@ -721,7 +726,7 @@ def _get_machine(self):
def _create_version_provenance(self):
""" Add the version information to the provenance data at the start.
"""
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
db.insert_version("spinn_utilities_version", spinn_utils_version)
db.insert_version("spinn_machine_version", spinn_machine_version)
db.insert_version("spalloc_version", spalloc_version)
@@ -888,6 +893,12 @@ def _do_placer(self, system_placements):
raise ConfigurationException(
f"Unexpected cfg setting placer: {name}")
+ def _execute_record_core_names(self):
+ with FecTimer(
+ "Record core names to databse", TimerWork.REPORT):
+ with BufferDatabase() as db:
+ db.store_vertex_labels()
+
def _execute_system_multicast_routing_generator(self):
"""
Runs, times and logs the SystemMulticastRoutingGenerator is required
@@ -2271,10 +2282,33 @@ def _print_iobuf(errors, warnings):
for error in errors:
logger.error(error)
+ def _execute_prepare_chip_power(self):
+ with FecTimer("Prepare Chip Power", TimerWork.REPORT) as timer:
+ if timer.skip_if_cfg_false("Reports", "write_energy_report"):
+ return
+ if timer.skip_if_virtual_board():
+ return
+ db = BufferDatabase()
+ db.store_chip_power_monitors(ChipPowerMonitorMachineVertex)
+ db.close()
+
+ def _report_chip_active(self):
+ with FecTimer("Prepare Chip Power", TimerWork.REPORT) as timer:
+ if timer.skip_if_cfg_false("Reports", "write_energy_report"):
+ return
+ if timer.skip_if_virtual_board():
+ return
+ write_chip_active_report()
+
+ def _do_end_of_run(self):
+ if not self._data_writer.is_ran_last():
+ return
+ self._execute_prepare_chip_power()
+ self._report_chip_active()
+
def reset(self):
""" Code that puts the simulation back at time zero
"""
- FecTimer.start_category(TimerCategory.RESETTING)
if not self._data_writer.is_ran_last():
if not self._data_writer.is_ran_ever():
logger.error("Ignoring the reset before the run")
@@ -2282,12 +2316,10 @@ def reset(self):
logger.error("Ignoring the repeated reset call")
return
+ FecTimer.start_category(TimerCategory.RESETTING)
logger.info("Resetting")
- # rewind the buffers from the buffer manager, to start at the beginning
- # of the simulation again and clear buffered out
- if self._data_writer.has_buffer_manager():
- self._data_writer.get_buffer_manager().reset()
+ self._do_end_of_run()
if self._data_writer.get_user_accessed_machine():
logger.warning(
@@ -2297,6 +2329,11 @@ def reset(self):
else:
self._data_writer.soft_reset()
+ # rewind the buffers from the buffer manager, to start at the beginning
+ # of the simulation again and clear buffered out
+ if self._data_writer.has_buffer_manager():
+ self._data_writer.get_buffer_manager().reset()
+
# Reset the graph off the machine, to set things to time 0
self.__reset_graph_elements()
FecTimer.end_category(TimerCategory.RESETTING)
@@ -2368,6 +2405,8 @@ def stop(self):
set_config("Reports", "read_provenance_data", "True")
self._do_read_provenance()
+ self._do_end_of_run()
+
except Exception as e:
self._recover_from_error(e)
self.write_errored_file()
diff --git a/spinn_front_end_common/interface/buffer_management/buffer_manager.py b/spinn_front_end_common/interface/buffer_management/buffer_manager.py
index d43517b740..7d2e7d9c23 100644
--- a/spinn_front_end_common/interface/buffer_management/buffer_manager.py
+++ b/spinn_front_end_common/interface/buffer_management/buffer_manager.py
@@ -91,9 +91,6 @@ class BufferManager(object):
# Dictionary of sender vertex -> buffers sent
"_sent_messages",
- # storage area for received data from cores
- "_db",
-
# Lock to avoid multiple messages being processed at the same time
"_thread_lock_buffer_out",
@@ -121,9 +118,6 @@ def __init__(self):
# Dictionary of sender vertex -> buffers sent
self._sent_messages = dict()
- # storage area for received data from cores
- self._db = BufferDatabase()
-
# Lock to avoid multiple messages being processed at the same time
self._thread_lock_buffer_out = threading.RLock()
self._thread_lock_buffer_in = threading.RLock()
@@ -303,9 +297,6 @@ def reset(self):
beginning of its expected regions and clears the buffered out\
data files.
"""
- #
- self._db.reset()
-
# rewind buffered in
for vertex in self._sender_vertices:
for region in vertex.get_regions():
@@ -328,7 +319,8 @@ def clear_recorded_data(self, x, y, p, recording_region_id):
:param int p: placement p coordinate
:param int recording_region_id: the recording region ID
"""
- self._db.clear_region(x, y, p, recording_region_id)
+ with BufferDatabase() as db:
+ db.clear_region(x, y, p, recording_region_id)
def _create_message_to_send(self, size, vertex, region):
""" Creates a single message to send with the given boundaries.
@@ -575,14 +567,15 @@ def __python_get_data_for_placements(self, recording_placements):
"""
:param ~pacman.model.placements.Placements recording_placements:
Where to get the data from.
- """
+ """
# get data
progress = ProgressBar(
len(recording_placements),
"Extracting buffers from the last run")
- for placement in progress.over(recording_placements):
- self._retreive_by_placement(placement)
+ with BufferDatabase() as db:
+ for placement in progress.over(recording_placements):
+ self._retreive_by_placement(db, placement)
def get_data_by_placement(self, placement, recording_region_id):
""" Get the data container for all the data retrieved\
@@ -602,12 +595,14 @@ def get_data_by_placement(self, placement, recording_region_id):
"so no data read".format(placement.vertex))
with self._thread_lock_buffer_out:
# data flush has been completed - return appropriate data
- return self._db.get_region_data(
- placement.x, placement.y, placement.p, recording_region_id)
+ with BufferDatabase() as db:
+ return db.get_region_data(
+ placement.x, placement.y, placement.p, recording_region_id)
- def _retreive_by_placement(self, placement):
+ def _retreive_by_placement(self, db, placement):
""" Retrieve the data for a vertex; must be locked first.
+ :param db BufferDatabase: dtabase to store into
:param ~pacman.model.placements.Placement placement:
the placement to get the data from
:param int recording_region_id: desired recording data region
@@ -623,7 +618,7 @@ def _retreive_by_placement(self, placement):
size, addr, missing = sizes_and_addresses[region]
data = self._request_data(
placement.x, placement.y, addr, size)
- self._db.store_data_in_region_buffer(
+ db.store_data_in_region_buffer(
placement.x, placement.y, placement.p, region, missing, data)
def _get_region_information(self, addr, x, y, p):
diff --git a/spinn_front_end_common/interface/buffer_management/storage_objects/__init__.py b/spinn_front_end_common/interface/buffer_management/storage_objects/__init__.py
index 8fe96807f3..e3f4d8211a 100644
--- a/spinn_front_end_common/interface/buffer_management/storage_objects/__init__.py
+++ b/spinn_front_end_common/interface/buffer_management/storage_objects/__init__.py
@@ -15,6 +15,6 @@
from .buffered_sending_region import BufferedSendingRegion
from .buffers_sent_deque import BuffersSentDeque
-from .sqllite_database import BufferDatabase
+from .buffer_database import BufferDatabase
__all__ = ["BufferedSendingRegion", "BuffersSentDeque", "BufferDatabase"]
diff --git a/spinn_front_end_common/interface/buffer_management/storage_objects/sqllite_database.py b/spinn_front_end_common/interface/buffer_management/storage_objects/buffer_database.py
similarity index 68%
rename from spinn_front_end_common/interface/buffer_management/storage_objects/sqllite_database.py
rename to spinn_front_end_common/interface/buffer_management/storage_objects/buffer_database.py
index 4bf8db2943..497944d893 100644
--- a/spinn_front_end_common/interface/buffer_management/storage_objects/sqllite_database.py
+++ b/spinn_front_end_common/interface/buffer_management/storage_objects/buffer_database.py
@@ -13,24 +13,20 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-import os
import sqlite3
import time
-from spinn_utilities.abstract_context_manager import AbstractContextManager
from spinn_front_end_common.data import FecDataView
-from spinn_front_end_common.utilities.sqlite_db import SQLiteDB
+from spinn_front_end_common.utilities.base_database import BaseDatabase
-_DDL_FILE = os.path.join(os.path.dirname(__file__), "db.sql")
_SECONDS_TO_MICRO_SECONDS_CONVERSION = 1000
#: Name of the database in the data folder
-DB_FILE_NAME = "buffer.sqlite3"
def _timestamp():
return int(time.time() * _SECONDS_TO_MICRO_SECONDS_CONVERSION)
-class BufferDatabase(SQLiteDB, AbstractContextManager):
+class BufferDatabase(BaseDatabase):
""" Specific implementation of the Database for SQLite 3.
There should only ever be a single Database Object in use at any time.
@@ -48,57 +44,6 @@ class BufferDatabase(SQLiteDB, AbstractContextManager):
__slots__ = []
- def __init__(self, database_file=None):
- """
- :param str database_file:
- The name of a file that contains (or will contain) an SQLite
- database holding the data.
- If omitted the default location will be used.
- """
- if database_file is None:
- database_file = self.default_database_file()
-
- super().__init__(database_file, ddl_file=_DDL_FILE)
-
- @classmethod
- def default_database_file(cls):
- return os.path.join(
- FecDataView.get_run_dir_path(), DB_FILE_NAME)
-
- def reset(self):
- """
- UGLY SHOULD NOT NEVER DELETE THE FILE!
-
- .. note::
- This method will be removed when the database moves to
- keeping data after reset.
-
- :rtype: None
- """
- database_file = self.default_database_file()
- self.close()
- if os.path.exists(database_file):
- os.remove(database_file)
- super().__init__(database_file, ddl_file=_DDL_FILE)
-
- def clear(self):
- """ Clears the data for all regions.
-
- .. note::
- This method will be removed when the database moves to
- keeping data after reset.
-
- :rtype: None
- """
- with self.transaction() as cursor:
- cursor.execute(
- """
- UPDATE region SET
- content = CAST('' AS BLOB), content_len = 0,
- fetches = 0, append_time = NULL
- """)
- cursor.execute("DELETE FROM region_extra")
-
def clear_region(self, x, y, p, region):
""" Clears the data for a single region.
@@ -182,28 +127,6 @@ def __read_contents(self, cursor, x, y, p, region):
data = c_buffer
return memoryview(data)
- @staticmethod
- def __get_core_id(cursor, x, y, p):
- """
- :param ~sqlite3.Cursor cursor:
- :param int x:
- :param int y:
- :param int p:
- :rtype: int
- """
- for row in cursor.execute(
- """
- SELECT core_id FROM region_view
- WHERE x = ? AND y = ? AND processor = ?
- LIMIT 1
- """, (x, y, p)):
- return row["core_id"]
- cursor.execute(
- """
- INSERT INTO core(x, y, processor) VALUES(?, ?, ?)
- """, (x, y, p))
- return cursor.lastrowid
-
def __get_region_id(self, cursor, x, y, p, region):
"""
:param ~sqlite3.Cursor cursor:
@@ -220,7 +143,7 @@ def __get_region_id(self, cursor, x, y, p, region):
LIMIT 1
""", (x, y, p, region)):
return row["region_id"]
- core_id = self.__get_core_id(cursor, x, y, p)
+ core_id = self._get_core_id(cursor, x, y, p)
cursor.execute(
"""
INSERT INTO region(
@@ -274,7 +197,7 @@ def store_data_in_region_buffer(self, x, y, p, region, missing, data):
region_id, content, content_len)
VALUES (?, CAST(? AS BLOB), ?)
""", (region_id, datablob, len(data)))
- assert cursor.rowcount == 1
+ assert cursor.rowcount == 1
def __use_main_table(self, cursor, region_id):
"""
@@ -314,3 +237,115 @@ def get_region_data(self, x, y, p, region):
return data, False
except LookupError:
return memoryview(b''), True
+
+ def _set_core_name(self, cursor, x, y, p, core_name):
+ """
+ :param ~sqlite3.Cursor cursor:
+ :param int x:
+ :param int y:
+ :param int p:
+ :param str core_name:
+ """
+ try:
+ cursor.execute(
+ """
+ INSERT INTO core (x, y, processor, core_name)
+ VALUES (?, ?, ? ,?)
+ """, (x, y, p, core_name))
+ except sqlite3.IntegrityError:
+ cursor.execute(
+ """
+ UPDATE core SET core_name = ?
+ WHERE x = ? AND y = ? and processor = ?
+ """, (core_name, x, y, p))
+
+ def store_vertex_labels(self):
+ """
+ Stores the name of all cores including monitors
+
+ """
+ with self.transaction() as cursor:
+ for placement in FecDataView.iterate_placemements():
+ self._set_core_name(cursor, placement.x, placement.y,
+ placement.p, placement.vertex.label)
+ for chip in FecDataView.get_machine().chips:
+ for processor in chip.processors:
+ if processor.is_monitor:
+ self._set_core_name(
+ cursor, chip.x, chip.y, processor.processor_id,
+ f"SCAMP(OS)_{chip.x}:{chip.y}")
+
+ def get_core_name(self, x, y, p):
+ """
+ Retruns the name of the Vertex or monitor running on the core
+
+ :param int x:
+ :param int y:
+ :param int p:
+ :return: The Vertex name, a monitor name or None if nothing running
+ :rtype: str or None
+ """
+ with self.transaction() as cursor:
+ for row in cursor.execute(
+ """
+ SELECT core_name
+ FROM core
+ WHERE x = ? AND y = ? and processor = ?
+ """, (x, y, p)):
+ if row["core_name"]:
+ return str(row["core_name"], 'utf8')
+ else:
+ return None
+
+ def store_chip_power_monitors(self, monitor_class):
+ """
+ Store the existence and sampling frequency of all chip power monitors
+
+ :param Class monitor_class: The Class of chip power monitors.
+ This is a parameter to avoid circular or ugly importing here
+ """
+ with self.transaction() as cursor:
+ cursor.execute(
+ """
+ CREATE TABLE IF NOT EXISTS chip_power_monitors(
+ cpm_id INTEGER PRIMARY KEY autoincrement,
+ core_id INTEGER NOT NULL
+ REFERENCES core(core_id) ON DELETE RESTRICT,
+ sampling_frequency FLOAT NOT NULL)
+ """)
+
+ cursor.execute(
+ """
+ CREATE VIEW IF NOT EXISTS chip_power_monitors_view AS
+ SELECT core_id, x, y, processor, sampling_frequency
+ FROM core NATURAL JOIN chip_power_monitors
+ """)
+
+ for placement in FecDataView.iterate_placements_by_vertex_type(
+ monitor_class):
+ core_id = self._get_core_id(
+ cursor, placement.x, placement.y, placement.p)
+ cursor.execute(
+ """
+ REPLACE INTO chip_power_monitors(
+ core_id, sampling_frequency)
+ VALUES (?, ?)
+ """, (core_id, placement.vertex.sampling_frequency))
+ assert cursor.rowcount == 1
+
+ def iterate_chip_power_monitor_cores(self):
+ """
+ Iterates of the chip power monintors
+
+ :return: iterates of dict like object contaning "x", "y" ,"processor"
+ and "sampling_frequency" fields
+ :rtpye: sqlite3.Row
+ """
+ with self.transaction() as cursor:
+ for row in cursor.execute(
+ """
+ SELECT x, y, processor, sampling_frequency
+ FROM chip_power_monitors_view
+ ORDER BY core_id
+ """):
+ yield row
diff --git a/spinn_front_end_common/interface/buffer_management/storage_objects/db.sql b/spinn_front_end_common/interface/buffer_management/storage_objects/db.sql
deleted file mode 100644
index b75bd93cb6..0000000000
--- a/spinn_front_end_common/interface/buffer_management/storage_objects/db.sql
+++ /dev/null
@@ -1,68 +0,0 @@
--- Copyright (c) 2018-2019 The University of Manchester
---
--- This program is free software: you can redistribute it and/or modify
--- it under the terms of the GNU General Public License as published by
--- the Free Software Foundation, either version 3 of the License, or
--- (at your option) any later version.
---
--- This program is distributed in the hope that it will be useful,
--- but WITHOUT ANY WARRANTY; without even the implied warranty of
--- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
--- GNU General Public License for more details.
---
--- You should have received a copy of the GNU General Public License
--- along with this program. If not, see .
-
--- https://www.sqlite.org/pragma.html#pragma_synchronous
-PRAGMA main.synchronous = OFF;
-
--- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--- A table describing the cores.
-CREATE TABLE IF NOT EXISTS core(
- core_id INTEGER PRIMARY KEY AUTOINCREMENT,
- x INTEGER NOT NULL,
- y INTEGER NOT NULL,
- processor INTEGER NOT NULL);
--- Every processor has a unique ID
-CREATE UNIQUE INDEX IF NOT EXISTS coreSanity ON core(
- x ASC, y ASC, processor ASC);
-
-
--- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--- A table describing recording regions.
-CREATE TABLE IF NOT EXISTS region(
- region_id INTEGER PRIMARY KEY AUTOINCREMENT,
- core_id INTEGER NOT NULL
- REFERENCES core(core_id) ON DELETE RESTRICT,
- local_region_index INTEGER NOT NULL,
- address INTEGER,
- content BLOB NOT NULL DEFAULT '',
- content_len INTEGER DEFAULT 0,
- fetches INTEGER NOT NULL DEFAULT 0,
- append_time INTEGER);
--- Every recording region has a unique vertex and index
-CREATE UNIQUE INDEX IF NOT EXISTS regionSanity ON region(
- core_id ASC, local_region_index ASC);
-
--- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--- A table containing the data which doesn't fit in the content column of the
--- region table; care must be taken with this to not exceed 1GB! We actually
--- store one per auto-pause-resume cycle as that is more efficient.
-CREATE TABLE IF NOT EXISTS region_extra(
- extra_id INTEGER PRIMARY KEY ASC AUTOINCREMENT,
- region_id INTEGER NOT NULL
- REFERENCES region(region_id) ON DELETE RESTRICT,
- content BLOB NOT NULL DEFAULT '',
- content_len INTEGER DEFAULT 0);
-
-CREATE VIEW IF NOT EXISTS region_view AS
- SELECT core_id, region_id, x, y, processor, local_region_index, address,
- content, content_len, fetches, append_time,
- (fetches > 1) AS have_extra
-FROM core NATURAL JOIN region;
-
-CREATE VIEW IF NOT EXISTS extra_view AS
- SELECT core_id, region_id, extra_id, x, y, processor, local_region_index,
- address, append_time, region_extra.content AS content,
- region_extra.content_len AS content_len
-FROM core NATURAL JOIN region NATURAL JOIN region_extra;
diff --git a/spinn_front_end_common/interface/interface_functions/compute_energy_used.py b/spinn_front_end_common/interface/interface_functions/compute_energy_used.py
index 70ad2aec28..15456051d0 100644
--- a/spinn_front_end_common/interface/interface_functions/compute_energy_used.py
+++ b/spinn_front_end_common/interface/interface_functions/compute_energy_used.py
@@ -17,7 +17,7 @@
from spinn_utilities.config_holder import (get_config_int, get_config_str)
from spinn_front_end_common.data import FecDataView
from spinn_front_end_common.interface.provenance import (
- ProvenanceReader, TimerCategory, TimerWork)
+ GlobalProvenance, ProvenanceReader, TimerCategory, TimerWork)
from spinn_front_end_common.utilities.utility_objs import PowerUsed
from spinn_front_end_common.utility_models import (
ChipPowerMonitorMachineVertex)
@@ -71,14 +71,14 @@ def compute_energy_used(machine_allocation_controller=None):
FecDataView.get_current_run_timesteps() *
FecDataView.get_time_scale_factor())
machine = FecDataView.get_machine()
- db = ProvenanceReader()
- dsg_time = db.get_category_timer_sum(TimerCategory.DATA_GENERATION)
- execute_time = db.get_category_timer_sum(TimerCategory.RUN_LOOP)
- # NOTE: this extraction time is part of the execution time; it does not
- # refer to the time taken in e.g. pop.get_data() or projection.get()
- extraction_time = db.get_timer_sum_by_work(TimerWork.EXTRACT_DATA)
- load_time = db.get_category_timer_sum(TimerCategory.LOADING)
- mapping_time = db.get_category_timer_sum(TimerCategory.MAPPING)
+ with GlobalProvenance() as db:
+ dsg_time = db.get_category_timer_sum(TimerCategory.DATA_GENERATION)
+ execute_time = db.get_category_timer_sum(TimerCategory.RUN_LOOP)
+ # NOTE: this extraction time is part of the execution time; it does not
+ # refer to the time taken in e.g. pop.get_data() or projection.get()
+ extraction_time = db.get_timer_sum_by_work(TimerWork.EXTRACT_DATA)
+ load_time = db.get_category_timer_sum(TimerCategory.LOADING)
+ mapping_time = db.get_category_timer_sum(TimerCategory.MAPPING)
# TODO get_machine not include here
power_used = PowerUsed()
@@ -187,13 +187,14 @@ def _router_packet_energy(power_used):
:param PowerUsed power_used:
"""
energy_cost = 0.0
- for name, cost in _COST_PER_TYPE.items():
- data = ProvenanceReader().get_router_by_chip(name)
- for (x, y, value) in data:
- this_cost = value * cost
- energy_cost += this_cost
- if this_cost:
- power_used.add_router_active_energy(x, y, this_cost)
+ with ProvenanceReader() as db:
+ for name, cost in _COST_PER_TYPE.items():
+ data = db.get_router_by_chip(name)
+ for (x, y, value) in data:
+ this_cost = value * cost
+ energy_cost += this_cost
+ if this_cost:
+ power_used.add_router_active_energy(x, y, this_cost)
power_used.packet_joules = energy_cost
@@ -359,8 +360,8 @@ def _calculate_loading_energy(machine, load_time_ms, n_monitors, n_frames):
# pylint: disable=too-many-arguments
# find time in milliseconds
- reader = ProvenanceReader()
- total_time_ms = reader.get_timer_sum_by_category(TimerCategory.LOADING)
+ with GlobalProvenance() as db:
+ total_time_ms = db.get_timer_sum_by_category(TimerCategory.LOADING)
# handle monitor core active cost
@@ -405,8 +406,8 @@ def _calculate_data_extraction_energy(machine, n_monitors, n_frames):
# find time
# TODO is this what was desired
total_time_ms = 0
- buffer_time_ms = ProvenanceReader().get_timer_sum_by_work(
- TimerWork.EXTRACT_DATA)
+ with GlobalProvenance() as db:
+ buffer_time_ms = db.get_timer_sum_by_work(TimerWork.EXTRACT_DATA)
energy_cost = 0
# NOTE: Buffer time could be None if nothing was set to record
diff --git a/spinn_front_end_common/interface/interface_functions/energy_provenance_reporter.py b/spinn_front_end_common/interface/interface_functions/energy_provenance_reporter.py
index 90726dfc63..0f0cdc56ff 100644
--- a/spinn_front_end_common/interface/interface_functions/energy_provenance_reporter.py
+++ b/spinn_front_end_common/interface/interface_functions/energy_provenance_reporter.py
@@ -47,8 +47,6 @@ def energy_provenance_reporter(power_used):
db.insert_core(
x, y, p, "Energy (Joules)",
power_used.get_core_active_energy_joules(x, y, p))
- if p == 0:
- db.add_core_name(x, y, p, "SCAMP(OS)")
for x, y in power_used.active_routers:
db.insert_router(
x, y, "Energy (Joules)",
diff --git a/spinn_front_end_common/interface/interface_functions/placements_provenance_gatherer.py b/spinn_front_end_common/interface/interface_functions/placements_provenance_gatherer.py
index 40fa932abb..41ef888afc 100644
--- a/spinn_front_end_common/interface/interface_functions/placements_provenance_gatherer.py
+++ b/spinn_front_end_common/interface/interface_functions/placements_provenance_gatherer.py
@@ -18,7 +18,7 @@
from spinn_utilities.log import FormatAdapter
from spinn_utilities.progress_bar import ProgressBar
from spinn_front_end_common.interface.provenance import (
- AbstractProvidesProvenanceDataFromMachine, ProvenanceWriter)
+ AbstractProvidesProvenanceDataFromMachine)
logger = FormatAdapter(logging.getLogger(__name__))
@@ -56,9 +56,6 @@ def _add_placement_provenance(placement, errors):
# get data
try:
placement.vertex.get_provenance_data_from_machine(placement)
- with ProvenanceWriter() as db:
- db.add_core_name(placement.x, placement.y, placement.p,
- placement.vertex.label)
except Exception: # pylint: disable=broad-except
errors.append(traceback.format_exc())
diff --git a/spinn_front_end_common/interface/interface_functions/spalloc_allocator.py b/spinn_front_end_common/interface/interface_functions/spalloc_allocator.py
index 05b3c15e7b..32de40d6dc 100644
--- a/spinn_front_end_common/interface/interface_functions/spalloc_allocator.py
+++ b/spinn_front_end_common/interface/interface_functions/spalloc_allocator.py
@@ -166,7 +166,8 @@ def _launch_checked_job(n_boards, spalloc_kw_args):
connections = job.connections
info = str(connections).replace("{", "[").replace("}", "]")
logger.info("boards: " + info)
- ProvenanceWriter().insert_board_provenance(connections)
+ with ProvenanceWriter() as db:
+ db.insert_board_provenance(connections)
if hostname in avoid_boards:
avoid_jobs.append(job)
logger.warning(
diff --git a/spinn_front_end_common/interface/java_caller.py b/spinn_front_end_common/interface/java_caller.py
index fc8a6f4f48..1f8c765ea8 100644
--- a/spinn_front_end_common/interface/java_caller.py
+++ b/spinn_front_end_common/interface/java_caller.py
@@ -28,7 +28,8 @@
from spinn_front_end_common.utilities.exceptions import ConfigurationException
from spinn_front_end_common.interface.buffer_management.buffer_models import (
AbstractReceiveBuffersToHost)
-
+from spinn_front_end_common.interface.buffer_management.storage_objects \
+ import BufferDatabase
logger = FormatAdapter(logging.getLogger(__name__))
@@ -44,8 +45,6 @@ class JavaCaller(object):
__slots__ = [
"_chipxy_by_ethernet",
- # The folder holding sqlite databases etc.
- "_report_folder",
# The call to get java to work. Including the path if required.
"_java_call",
# The location of the java jar file
@@ -72,7 +71,6 @@ def __init__(self):
:raise ConfigurationException: if simple parameter checking fails.
"""
self._recording = None
- self._report_folder = FecDataView.get_run_dir_path()
self._java_call = get_config_str("Java", "java_call")
result = subprocess.call([self._java_call, '-version'])
if result != 0:
@@ -175,15 +173,6 @@ def _machine_json(self):
self._machine_json_path = write_json_machine(progress_bar=False)
return self._machine_json_path
- def set_report_folder(self, report_folder):
- """ Passes the database file in.
-
- :param str report_folder:
- Path to directory with SQLite databases and into which java will
- write.
- """
- self._report_folder = report_folder
-
def set_placements(self, used_placements):
""" Passes in the placements leaving this class to decide pass it to
Java.
@@ -356,13 +345,16 @@ def get_all_data(self):
if self._gatherer_iptags is None:
result = self._run_java(
'download', self._placement_json, self._machine_json(),
- self._report_folder)
+ BufferDatabase.default_database_file(),
+ FecDataView.get_run_dir_path())
else:
result = self._run_java(
'gather', self._placement_json, self._machine_json(),
- self._report_folder)
+ BufferDatabase.default_database_file(),
+ FecDataView.get_run_dir_path())
if result != 0:
- log_file = os.path.join(self._report_folder, "jspin.log")
+ log_file = os.path.join(
+ FecDataView.get_run_dir_path(), "jspin.log")
raise PacmanExternalAlgorithmFailedToCompleteException(
"Java call exited with value " + str(result) + " see "
+ str(log_file) + " for logged info")
@@ -374,9 +366,10 @@ def execute_data_specification(self):
On failure of the Java code.
"""
result = self._run_java(
- 'dse', self._machine_json(), self._report_folder)
+ 'dse', self._machine_json(), FecDataView.get_run_dir_path())
if result != 0:
- log_file = os.path.join(self._report_folder, "jspin.log")
+ log_file = os.path.join(
+ FecDataView.get_run_dir_path(), "jspin.log")
raise PacmanExternalAlgorithmFailedToCompleteException(
"Java call exited with value " + str(result) + " see "
+ str(log_file) + " for logged info")
@@ -389,9 +382,10 @@ def execute_system_data_specification(self):
On failure of the Java code.
"""
result = self._run_java(
- 'dse_sys', self._machine_json(), self._report_folder)
+ 'dse_sys', self._machine_json(), FecDataView.get_run_dir_path())
if result != 0:
- log_file = os.path.join(self._report_folder, "jspin.log")
+ log_file = os.path.join(
+ FecDataView.get_run_dir_path(), "jspin.log")
raise PacmanExternalAlgorithmFailedToCompleteException(
"Java call exited with value " + str(result) + " see "
+ str(log_file) + " for logged info")
@@ -411,12 +405,14 @@ def execute_app_data_specification(self, use_monitors):
if use_monitors:
result = self._run_java(
'dse_app_mon', self._placement_json, self._machine_json(),
- self._report_folder, self._report_folder)
+ FecDataView.get_run_dir_path(), FecDataView.get_run_dir_path())
else:
result = self._run_java(
- 'dse_app', self._machine_json(), self._report_folder)
+ 'dse_app', self._machine_json(),
+ FecDataView.get_run_dir_path())
if result != 0:
- log_file = os.path.join(self._report_folder, "jspin.log")
+ log_file = os.path.join(
+ FecDataView.get_run_dir_path(), "jspin.log")
raise PacmanExternalAlgorithmFailedToCompleteException(
"Java call exited with value " + str(result) + " see "
+ str(log_file) + " for logged info")
diff --git a/spinn_front_end_common/interface/provenance/__init__.py b/spinn_front_end_common/interface/provenance/__init__.py
index 7139b535a6..96a307f0ec 100644
--- a/spinn_front_end_common/interface/provenance/__init__.py
+++ b/spinn_front_end_common/interface/provenance/__init__.py
@@ -18,6 +18,7 @@
from .abstract_provides_provenance_data_from_machine import (
AbstractProvidesProvenanceDataFromMachine)
from .fec_timer import FecTimer
+from .global_provenance import GlobalProvenance
from .log_store_db import LogStoreDB
from .provenance_reader import ProvenanceReader
from .provides_provenance_data_from_machine_impl import (
@@ -27,6 +28,7 @@
from .timer_work import TimerWork
__all__ = ["AbstractProvidesLocalProvenanceData", "FecTimer",
+ "GlobalProvenance",
"AbstractProvidesProvenanceDataFromMachine", "LogStoreDB",
"ProvenanceReader", "ProvenanceWriter",
"ProvidesProvenanceDataFromMachineImpl",
diff --git a/spinn_front_end_common/interface/provenance/fec_timer.py b/spinn_front_end_common/interface/provenance/fec_timer.py
index 7056bc4095..3c20ce08ab 100644
--- a/spinn_front_end_common/interface/provenance/fec_timer.py
+++ b/spinn_front_end_common/interface/provenance/fec_timer.py
@@ -20,8 +20,7 @@
from spinn_utilities.config_holder import (get_config_bool)
from spinn_utilities.log import FormatAdapter
from spinn_front_end_common.data import FecDataView
-from spinn_front_end_common.interface.provenance.provenance_writer import (
- ProvenanceWriter)
+from .global_provenance import GlobalProvenance
logger = FormatAdapter(logging.getLogger(__name__))
@@ -108,7 +107,7 @@ def _report(self, message):
def skip(self, reason):
message = f"{self._algorithm} skipped as {reason}"
timedelta = self._stop_timer()
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
db.insert_timing(self._category_id, self._algorithm, self._work,
timedelta, reason)
self._report(message)
@@ -157,7 +156,7 @@ def skip_if_cfgs_false(self, section, option1, option2):
def error(self, reason):
timedelta = self._stop_timer()
message = f"{self._algorithm} failed after {timedelta} as {reason}"
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
db.insert_timing(self._category_id, self._algorithm,
self._work, timedelta, reason)
self._report(message)
@@ -190,7 +189,7 @@ def __exit__(self, exc_type, exc_value, traceback):
f"after {timedelta}"
skip = f"Exception {ex}"
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
db.insert_timing(self._category_id, self._algorithm, self._work,
timedelta, skip)
self._report(message)
@@ -205,7 +204,7 @@ def __stop_category(cls):
"""
time_now = _now()
if cls._category_id:
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
diff = _convert_to_timedelta(time_now - cls._category_time)
db.insert_category_timing(cls._category_id, diff)
return time_now
@@ -218,7 +217,7 @@ def _change_category(cls, category):
:param TimerCategory category: Category to switch to
"""
time_now = cls.__stop_category()
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
cls._category_id = db.insert_category(category, cls._machine_on)
cls._category = category
cls._category_time = time_now
diff --git a/spinn_front_end_common/interface/provenance/global.sql b/spinn_front_end_common/interface/provenance/global.sql
new file mode 100644
index 0000000000..63b1471df1
--- /dev/null
+++ b/spinn_front_end_common/interface/provenance/global.sql
@@ -0,0 +1,86 @@
+-- Copyright (c) 2018-2022 The University of Manchester
+--
+-- This program is free software: you can redistribute it and/or modify
+-- it under the terms of the GNU General Public License as published by
+-- the Free Software Foundation, either version 3 of the License, or
+-- (at your option) any later version.
+--
+-- This program is distributed in the hope that it will be useful,
+-- but WITHOUT ANY WARRANTY; without even the implied warranty of
+-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+-- GNU General Public License for more details.
+--
+-- You should have received a copy of the GNU General Public License
+-- along with this program. If not, see .
+
+-- https://www.sqlite.org/pragma.html#pragma_synchronous
+PRAGMA main.synchronous = OFF;
+
+-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+-- A table holding the values for versions
+CREATE TABLE IF NOT EXISTS version_provenance(
+ version_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ description STRING NOT NULL,
+ the_value STRING NOT NULL);
+
+-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+-- A table holding the values for algorithm timings
+CREATE TABLE IF NOT EXISTS timer_provenance(
+ timer_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ category_id INTEGER NOT NULL,
+ algorithm STRING NOT NULL,
+ work STRING NOT NULL,
+ time_taken INTEGER NOT NULL,
+ skip_reason STRING);
+
+CREATE VIEW IF NOT EXISTS full_timer_view AS
+ SELECT timer_id, category, algorithm, work, machine_on, timer_provenance.time_taken, n_run, n_loop, skip_reason
+ FROM timer_provenance ,category_timer_provenance
+ WHERE timer_provenance.category_id = category_timer_provenance.category_id
+ ORDER BY timer_id;
+
+CREATE VIEW IF NOT EXISTS timer_view AS
+ SELECT category, algorithm, work, machine_on, time_taken, n_run, n_loop
+ FROM full_timer_view
+ WHERE skip_reason is NULL
+ ORDER BY timer_id;
+
+-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+-- A table holding the values for category timings
+CREATE TABLE IF NOT EXISTS category_timer_provenance(
+ category_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ category STRING NOT NULL,
+ time_taken INTEGER,
+ machine_on BOOL NOT NULL,
+ n_run INTEGER NOT NULL,
+ n_loop INTEGER);
+
+---------------------------------------------------------------------
+-- A table to store log.info
+CREATE TABLE IF NOT EXISTS p_log_provenance(
+ log_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ timestamp TIMESTAMP NOT NULL,
+ level INTEGER NOT NULL,
+ message STRING NOT NULL);
+
+CREATE TABLE IF NOT EXISTS log_level_names(
+ level INTEGER PRIMARY KEY NOT NULL,
+ name STRING NOT NULL);
+
+INSERT OR IGNORE INTO log_level_names
+ (level, name)
+VALUES
+ (50, "CRITICAL"),
+ (40, "ERROR"),
+ (30, "WARNING"),
+ (20, "INFO"),
+ (10, "DEBUG");
+
+CREATE VIEW IF NOT EXISTS p_log_view AS
+ SELECT
+ timestamp,
+ name,
+ message
+ FROM p_log_provenance left join log_level_names
+ ON p_log_provenance.level = log_level_names.level
+ ORDER BY p_log_provenance.log_id;
diff --git a/spinn_front_end_common/interface/provenance/global_provenance.py b/spinn_front_end_common/interface/provenance/global_provenance.py
new file mode 100644
index 0000000000..7c1f97af5b
--- /dev/null
+++ b/spinn_front_end_common/interface/provenance/global_provenance.py
@@ -0,0 +1,416 @@
+# Copyright (c) 2017-2022 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from datetime import datetime
+import logging
+import os
+import re
+from spinn_utilities.log import FormatAdapter
+from spinn_front_end_common.data import FecDataView
+from spinn_front_end_common.utilities.constants import (
+ MICRO_TO_MILLISECOND_CONVERSION)
+from spinn_front_end_common.utilities.sqlite_db import SQLiteDB
+
+logger = FormatAdapter(logging.getLogger(__name__))
+
+_DDL_FILE = os.path.join(os.path.dirname(__file__), "global.sql")
+_RE = re.compile(r"(\d+)([_,:])(\d+)(?:\2(\d+))?")
+
+
+class GlobalProvenance(SQLiteDB):
+ """ Specific implementation of the Database for SQLite 3.
+
+ .. note::
+ *Not thread safe on the same database file.*
+ Threads can access different DBs just fine.
+
+ .. note::
+ This totally relies on the way SQLite's type affinities function.
+ You can't port to a different database engine without a lot of work.
+ """
+
+ __slots__ = [
+ "_database_file"
+ ]
+
+ @classmethod
+ def get_global_provenace_path(cls):
+ """ Get the path of the current provenance database of the last run
+
+ .. warning::
+ Calling this method between start/reset and run may result in a
+ path to a database not yet created.
+
+ :raises ValueError:
+ if the system is in a state where path can't be retrieved,
+ for example before run is called
+ """
+ return os.path.join(
+ FecDataView.get_timestamp_dir_path(),
+ "global_provenance.sqlite3")
+
+ def __init__(self, database_file=None, memory=False):
+ """
+ :param database_file:
+ The name of a file that contains (or will contain) an SQLite
+ database holding the data.
+ If omitted, either the default file path or an unshared in-memory
+ database will be used (suitable only for testing).
+ :type database_file: str or None
+ :param bool memory:
+ Flag to say unshared in-memory can be used.
+ Otherwise a None file will mean the default should be used
+
+ """
+ if database_file is None and not memory:
+ database_file = self.get_global_provenace_path()
+ self._database_file = database_file
+ SQLiteDB.__init__(self, database_file, ddl_file=_DDL_FILE,
+ row_factory=None, text_factory=None)
+
+ def insert_version(self, description, the_value):
+ """
+ Inserts data into the version_provenance table
+
+ :param str description: The package for which the version applies
+ :param str the_value: The version to be recorded
+ """
+ with self.transaction() as cur:
+ cur.execute(
+ """
+ INSERT INTO version_provenance(
+ description, the_value)
+ VALUES(?, ?)
+ """, [description, the_value])
+
+ def insert_category(self, category, machine_on):
+ """
+ Inserts category into the category_timer_provenance returning id
+
+ :param TimerCategory category: Name of Category starting
+ :param bool machine_on: If the machine was done during all
+ or some of the time
+ """
+ with self.transaction() as cur:
+ cur.execute(
+ """
+ INSERT INTO category_timer_provenance(
+ category, machine_on, n_run, n_loop)
+ VALUES(?, ?, ?, ?)
+ """,
+ [category.category_name, machine_on,
+ FecDataView.get_run_number(),
+ FecDataView.get_run_step()])
+ return cur.lastrowid
+
+ def insert_category_timing(self, category_id, timedelta):
+ """
+ Inserts run time into the category
+
+ :param int category_id: id of the Category finished
+ :param ~datetime.timedelta timedelta: Time to be recorded
+ """
+ time_taken = (
+ (timedelta.seconds * MICRO_TO_MILLISECOND_CONVERSION) +
+ (timedelta.microseconds / MICRO_TO_MILLISECOND_CONVERSION))
+
+ with self.transaction() as cur:
+ cur.execute(
+ """
+ UPDATE category_timer_provenance
+ SET
+ time_taken = ?
+ WHERE category_id = ?
+ """, (time_taken, category_id))
+
+ def insert_timing(
+ self, category, algorithm, work, timedelta, skip_reason):
+ """
+ Inserts algorithms run times into the timer_provenance table
+
+ :param int category: Category Id of the Algorithm
+ :param str algorithm: Algorithm name
+ :param TimerWork work: Type of work being done
+ :param ~datetime.timedelta timedelta: Time to be recorded
+ :param skip_reason: The reason the algorthm was skipped or None if
+ it was not skipped
+ :tpye skip_reason: str or None
+ """
+ time_taken = (
+ (timedelta.seconds * MICRO_TO_MILLISECOND_CONVERSION) +
+ (timedelta.microseconds / MICRO_TO_MILLISECOND_CONVERSION))
+ with self.transaction() as cur:
+ cur.execute(
+ """
+ INSERT INTO timer_provenance(
+ category_id, algorithm, work, time_taken, skip_reason)
+ VALUES(?, ?, ?, ?, ?)
+ """,
+ [category, algorithm, work.work_name, time_taken, skip_reason])
+
+ def store_log(self, level, message, timestamp=None):
+ """
+ Stores log messages into the database
+
+ :param int level:
+ :param str message:
+ """
+ if timestamp is None:
+ timestamp = datetime.now()
+ with self.transaction() as cur:
+ cur.execute(
+ """
+ INSERT INTO p_log_provenance(
+ timestamp, level, message)
+ VALUES(?, ?, ?)
+ """,
+ [timestamp, level, message])
+
+ def _test_log_locked(self, text):
+ """
+ THIS IS A TESTING METHOD.
+
+ This will lock the database and then try to do a log
+ """
+ with self.transaction() as cur:
+ # lock the database
+ cur.execute(
+ """
+ INSERT INTO version_provenance(
+ description, the_value)
+ VALUES("foo", "bar")
+ """)
+ cur.lastrowid
+ # try logging and storing while locked.
+ logger.warning(text)
+
+ def run_query(self, query, params=()):
+ """
+ Opens a connection to the database, runs a query, extracts the results
+ and closes the connection
+
+ The return type depends on the use_sqlite_rows param.
+ By default this method returns tuples (lookup by index) but the
+ advanced tuple type can be used instead, which supports lookup by name
+ used in the query (use ``AS name`` in the query to set).
+
+ This method will not allow queries that change the database unless the
+ read_only flag is set to False.
+
+ .. note::
+ This method is mainly provided as a support method for the later
+ methods that return specific data. For new IntergationTests
+ please add a specific method rather than call this directly.
+
+ :param str query: The SQL query to be run. May include ``?`` wildcards
+ :param ~collections.abc.Iterable(str or int) params:
+ The values to replace the ``?`` wildcards with.
+ The number and types must match what the query expects
+ :param bool read_only: see :py:meth:`get_database_handle`
+ :param bool use_sqlite_rows: see :py:meth:`get_database_handle`
+ :return: A list possibly empty of tuples/rows
+ (one for each row in the database)
+ where the number and type of the values corresponds to the where
+ statement
+ :rtype: list(tuple or ~sqlite3.Row)
+ """
+ results = []
+ with self.transaction() as cur:
+ for row in cur.execute(query, params):
+ results.append(row)
+ return results
+
+ def get_timer_provenance(self, algorithm):
+ """
+ Gets the timer provenance item(s) from the last run
+
+ :param str algorithm:
+ The value to LIKE search for in the algorithm column.
+ Can be the full name, or have ``%`` and ``_`` wildcards.
+ :return:
+ A possibly multiline string with for each row which matches the
+ like a line ``algorithm: value``
+ :rtype: str
+ """
+ query = """
+ SELECT algorithm, time_taken
+ FROM timer_provenance
+ WHERE algorithm LIKE ?
+ """
+ return "\n".join(
+ f"{row[0]}: {row[1]}"
+ for row in self.run_query(query, [algorithm]))
+
+ def get_run_times(self):
+ """
+ Gets the algorithm running times from the last run. If an algorithm is
+ invoked multiple times in the run, its times are summed.
+
+ :return:
+ A possibly multiline string with for each row which matches the
+ like a line ``description_name: time``. The times are in seconds.
+ :rtype: str
+ """
+ # We know the database actually stores microseconds for durations
+ query = """
+ SELECT description, SUM(time_taken) / 1000000.0
+ FROM timer_provenance
+ GROUP BY description
+ ORDER BY the_value
+ """
+ return "\n".join(
+ f"{row[0].replace('_', ' ')}: {row[1]} s"
+ for row in self.run_query(query))
+
+ def get_run_time_of_BufferExtractor(self):
+ """
+ Gets the BufferExtractor provenance item(s) from the last run
+
+ :return:
+ A possibly multiline string with for each row which matches the
+ like %BufferExtractor description_name: value
+ :rtype: str
+ """
+ return self.get_timer_provenance("%BufferExtractor")
+
+ def get_category_timer_sum(self, category):
+ """
+ Get the total runtime for one category of algorithms
+
+ :param TimerCategory category:
+ :return: total off all runtimes with this category
+ :rtype: int
+ """
+ query = """
+ SELECT sum(time_taken)
+ FROM category_timer_provenance
+ WHERE category = ?
+ """
+ data = self.run_query(query, [category.category_name])
+ try:
+ info = data[0][0]
+ if info is None:
+ return 0
+ return info
+ except IndexError:
+ return 0
+
+ def get_category_timer_sums(self, category):
+ """
+ Get the runtime for one category of algorithms
+ split machine on, machine off
+
+ :param TimerCategory category:
+ :return: total on and off time of instances with this category
+ :rtype: int
+ """
+ on = 0
+ off = 0
+ query = """
+ SELECT sum(time_taken), machine_on
+ FROM category_timer_provenance
+ WHERE category = ?
+ GROUP BY machine_on
+ """
+ try:
+ for data in self.run_query(query, [category.category_name]):
+ if data[1]:
+ on = data[0]
+ else:
+ off = data[0]
+ except IndexError:
+ pass
+ return on, off
+
+ def get_timer_sum_by_category(self, category):
+ """
+ Get the total runtime for one category of algorithms
+
+ :param TimerCategory category:
+ :return: total off all runtimes with this category
+ :rtype: int
+ """
+ query = """
+ SELECT sum(time_taken)
+ FROM full_timer_view
+ WHERE category = ?
+ """
+ data = self.run_query(query, [category.category_name])
+ try:
+ info = data[0][0]
+ if info is None:
+ return 0
+ return info
+ except IndexError:
+ return 0
+
+ def get_timer_sum_by_work(self, work):
+ """
+ Get the total runtime for one work type of algorithms
+
+ :param TimerWork work:
+ :return: total off all runtimes with this category
+ :rtype: int
+ """
+ query = """
+ SELECT sum(time_taken)
+ FROM full_timer_view
+ WHERE work = ?
+ """
+ data = self.run_query(query, [work.work_name])
+ try:
+ info = data[0][0]
+ if info is None:
+ return 0
+ return info
+ except IndexError:
+ return 0
+
+ def get_timer_sum_by_algorithm(self, algorithm):
+ """
+ Get the total runtime for one algorithm
+
+ :param str algorithm:
+ :return: total off all runtimes with this algorithm
+ :rtype: int
+ """
+ query = """
+ SELECT sum(time_taken)
+ FROM timer_provenance
+ WHERE algorithm = ?
+ """
+ data = self.run_query(query, [algorithm])
+ try:
+ info = data[0][0]
+ if info is None:
+ return 0
+ return info
+ except IndexError:
+ return 0
+
+ def retreive_log_messages(self, min_level=0):
+ """
+ Retrieves all log messages at or above the min_level
+
+ :param int min_level:
+ :rtype: list(tuple(int, str))
+ """
+ query = """
+ SELECT message
+ FROM p_log_provenance
+ WHERE level >= ?
+ """
+ messages = self.run_query(query, [min_level])
+ return list(map(lambda x: x[0], messages))
diff --git a/spinn_front_end_common/interface/provenance/log_store_db.py b/spinn_front_end_common/interface/provenance/log_store_db.py
index a4b1e55e79..8b330c0f1a 100644
--- a/spinn_front_end_common/interface/provenance/log_store_db.py
+++ b/spinn_front_end_common/interface/provenance/log_store_db.py
@@ -16,8 +16,7 @@
import sqlite3
from spinn_utilities.log_store import LogStore
from spinn_utilities.overrides import overrides
-from .provenance_writer import ProvenanceWriter
-from .provenance_reader import ProvenanceReader
+from .global_provenance import GlobalProvenance
class LogStoreDB(LogStore):
@@ -25,7 +24,7 @@ class LogStoreDB(LogStore):
@overrides(LogStore.store_log)
def store_log(self, level, message, timestamp=None):
try:
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
db.store_log(level, message, timestamp)
except sqlite3.OperationalError as ex:
if "database is locked" in ex.args:
@@ -37,8 +36,9 @@ def store_log(self, level, message, timestamp=None):
@overrides(LogStore.retreive_log_messages)
def retreive_log_messages(self, min_level=0):
- return ProvenanceReader().retreive_log_messages(min_level)
+ with GlobalProvenance() as db:
+ return db.retreive_log_messages(min_level)
@overrides(LogStore.get_location)
def get_location(self):
- return ProvenanceReader.get_last_run_database_path()
+ return GlobalProvenance.get_global_provenace_path()
diff --git a/spinn_front_end_common/interface/provenance/provenance_reader.py b/spinn_front_end_common/interface/provenance/provenance_reader.py
index cb12669d80..7b34ca953c 100644
--- a/spinn_front_end_common/interface/provenance/provenance_reader.py
+++ b/spinn_front_end_common/interface/provenance/provenance_reader.py
@@ -14,13 +14,12 @@
# along with this program. If not, see .
import os
-import sqlite3
from spinn_front_end_common.data import FecDataView
from spinn_front_end_common.utilities.constants import PROVENANCE_DB
-from spinn_front_end_common.utilities.sqlite_db import SQLiteDB
+from spinn_front_end_common.utilities.base_database import BaseDatabase
-class ProvenanceReader(object):
+class ProvenanceReader(BaseDatabase):
"""
Provides a connection to a database containing provenance for the current
run and some convenience methods for extracting provenance data from it.
@@ -66,50 +65,10 @@ def __init__(self, provenance_data_path=None):
:param provenance_data_path: Path to the provenance database to wrap
:type provenance_data_path: None or str
"""
- if provenance_data_path:
- self._provenance_data_path = provenance_data_path
- else:
- self._provenance_data_path = self.get_last_run_database_path()
+ super().__init__(provenance_data_path, read_only=True,
+ row_factory=None, text_factory=None)
- def get_database_handle(self, read_only=True, use_sqlite_rows=False):
- """
- Gets a handle to the open database.
-
- You *should* use this as a Python context handler. A typical usage
- pattern is this::
-
- with reader.get_database_handler() as db:
- with db.transaction() as cursor:
- for row in cursor.execute(...):
- # process row
-
- .. note::
- This method is mainly provided as a support method for the later
- methods that return specific data. For new IntergationTests
- please add a specific method rather than call this directly.
-
- .. warning::
- It is the callers responsibility to close the database.
- The recommended usage is therefore a ``with`` statement
-
- :param bool read_only: If true will return a readonly database
- :param bool use_sqlite_rows:
- If ``True`` the results of :py:meth:`run_query` will be
- :py:class:`~sqlite3.Row`\\ s.
- If ``False`` the results of :py:meth:`run_query` will be
- :py:class:`tuple`\\ s.
- :return: an open sqlite3 connection
- :rtype: SQLiteDB
- """
- if not os.path.exists(self._provenance_data_path):
- raise Exception(f"no such DB: {self._provenance_data_path}")
- db = SQLiteDB(self._provenance_data_path, read_only=read_only,
- row_factory=(sqlite3.Row if use_sqlite_rows else None),
- text_factory=None)
- return db
-
- def run_query(
- self, query, params=(), read_only=True, use_sqlite_rows=False):
+ def run_query(self, query, params=()):
"""
Opens a connection to the database, runs a query, extracts the results
and closes the connection
@@ -139,13 +98,10 @@ def run_query(
statement
:rtype: list(tuple or ~sqlite3.Row)
"""
- if not os.path.exists(self._provenance_data_path):
- raise Exception("no such DB: " + self._provenance_data_path)
results = []
- with self.get_database_handle(read_only, use_sqlite_rows) as db:
- with db.transaction() as cur:
- for row in cur.execute(query, params):
- results.append(row)
+ with self.transaction() as cur:
+ for row in cur.execute(query, params):
+ results.append(row)
return results
def cores_with_late_spikes(self):
@@ -166,59 +122,6 @@ def cores_with_late_spikes(self):
"""
return self.run_query(query)
- def get_timer_provenance(self, algorithm):
- """
- Gets the timer provenance item(s) from the last run
-
- :param str algorithm:
- The value to LIKE search for in the algorithm column.
- Can be the full name, or have ``%`` and ``_`` wildcards.
- :return:
- A possibly multiline string with for each row which matches the
- like a line ``algorithm: value``
- :rtype: str
- """
- query = """
- SELECT algorithm, time_taken
- FROM timer_provenance
- WHERE algorithm LIKE ?
- """
- return "\n".join(
- f"{row[0]}: {row[1]}"
- for row in self.run_query(query, [algorithm]))
-
- def get_run_times(self):
- """
- Gets the algorithm running times from the last run. If an algorithm is
- invoked multiple times in the run, its times are summed.
-
- :return:
- A possibly multiline string with for each row which matches the
- like a line ``description_name: time``. The times are in seconds.
- :rtype: str
- """
- # We know the database actually stores microseconds for durations
- query = """
- SELECT description, SUM(time_taken) / 1000000.0
- FROM timer_provenance
- GROUP BY description
- ORDER BY the_value
- """
- return "\n".join(
- f"{row[0].replace('_', ' ')}: {row[1]} s"
- for row in self.run_query(query))
-
- def get_run_time_of_BufferExtractor(self):
- """
- Gets the BufferExtractor provenance item(s) from the last run
-
- :return:
- A possibly multiline string with for each row which matches the
- like %BufferExtractor description_name: value
- :rtype: str
- """
- return self.get_timer_provenance("%BufferExtractor")
-
def get_provenance_for_router(self, x, y):
"""
Gets the provenance item(s) from the last run relating to a chip
@@ -242,9 +145,8 @@ def get_provenance_for_router(self, x, y):
ORDER BY description
"""
return "\n".join(
- f"{ row['description'] }: { row['value'] }"
- for row in self.run_query(query, [int(x), int(y)],
- use_sqlite_rows=True))
+ f"{ row[0] }: { row[1] }"
+ for row in self.run_query(query, [int(x), int(y)]))
def get_cores_with_provenace(self):
"""
@@ -298,121 +200,6 @@ def get_monitor_by_chip(self, description):
except IndexError:
return None
- def get_category_timer_sum(self, category):
- """
- Get the total runtime for one category of algorithms
-
- :param TimerCategory category:
- :return: total off all runtimes with this category
- :rtype: int
- """
- query = """
- SELECT sum(time_taken)
- FROM category_timer_provenance
- WHERE category = ?
- """
- data = self.run_query(query, [category.category_name])
- try:
- info = data[0][0]
- if info is None:
- return 0
- return info
- except IndexError:
- return 0
-
- def get_category_timer_sums(self, category):
- """
- Get the runtime for one category of algorithms
- split machine on, machine off
-
- :param TimerCategory category:
- :return: total on and off time of instances with this category
- :rtype: int
- """
- on = 0
- off = 0
- query = """
- SELECT sum(time_taken), machine_on
- FROM category_timer_provenance
- WHERE category = ?
- GROUP BY machine_on
- """
- try:
- for data in self.run_query(query, [category.category_name]):
- if data[1]:
- on = data[0]
- else:
- off = data[0]
- except IndexError:
- pass
- return on, off
-
- def get_timer_sum_by_category(self, category):
- """
- Get the total runtime for one category of algorithms
-
- :param TimerCategory category:
- :return: total off all runtimes with this category
- :rtype: int
- """
- query = """
- SELECT sum(time_taken)
- FROM full_timer_view
- WHERE category = ?
- """
- data = self.run_query(query, [category.category_name])
- try:
- info = data[0][0]
- if info is None:
- return 0
- return info
- except IndexError:
- return 0
-
- def get_timer_sum_by_work(self, work):
- """
- Get the total runtime for one work type of algorithms
-
- :param TimerWork work:
- :return: total off all runtimes with this category
- :rtype: int
- """
- query = """
- SELECT sum(time_taken)
- FROM full_timer_view
- WHERE work = ?
- """
- data = self.run_query(query, [work.work_name])
- try:
- info = data[0][0]
- if info is None:
- return 0
- return info
- except IndexError:
- return 0
-
- def get_timer_sum_by_algorithm(self, algorithm):
- """
- Get the total runtime for one algorithm
-
- :param str algorithm:
- :return: total off all runtimes with this algorithm
- :rtype: int
- """
- query = """
- SELECT sum(time_taken)
- FROM timer_provenance
- WHERE algorithm = ?
- """
- data = self.run_query(query, [algorithm])
- try:
- info = data[0][0]
- if info is None:
- return 0
- return info
- except IndexError:
- return 0
-
def messages(self):
"""
List all the provenance messages
@@ -426,21 +213,6 @@ def messages(self):
"""
return self.run_query(query, [])
- def retreive_log_messages(self, min_level=0):
- """
- Retrieves all log messages at or above the min_level
-
- :param int min_level:
- :rtype: list(tuple(int, str))
- """
- query = """
- SELECT message
- FROM p_log_provenance
- WHERE level >= ?
- """
- messages = self.run_query(query, [min_level])
- return list(map(lambda x: x[0], messages))
-
@staticmethod
def demo():
""" A demonstration of how to use this class.
@@ -448,25 +220,23 @@ def demo():
See also unittests/interface/provenance/test_provenance_database.py
"""
# This uses the example file in the same directory as this script
- pr = ProvenanceReader(os.path.join(
- os.path.dirname(__file__), "provenance.sqlite3"))
- print("DIRECT QUERY:")
- query = """
- SELECT x, y, the_value
- FROM router_provenance
- WHERE description = 'Local_P2P_Packets'
- """
- results = pr.run_query(query)
- for row in results:
- print(row)
- print("\nCORES WITH LATE SPIKES:")
- print(pr.cores_with_late_spikes())
- print("\nRUN TIME OF BUFFER EXTRACTOR:")
- print(pr.get_run_time_of_BufferExtractor())
- print("\nROUETER (0,0) PROVENANCE:")
- print(pr.get_provenance_for_router(0, 0))
- print("\nCORES WITH PROVENACE")
- print(pr.get_cores_with_provenace())
+ with ProvenanceReader(os.path.join(
+ os.path.dirname(__file__), "provenance.sqlite3")) as pr:
+ print("DIRECT QUERY:")
+ query = """
+ SELECT x, y, the_value
+ FROM router_provenance
+ WHERE description = 'Local_P2P_Packets'
+ """
+ results = pr.run_query(query)
+ for row in results:
+ print(row)
+ print("\nCORES WITH LATE SPIKES:")
+ print(pr.cores_with_late_spikes())
+ print("\nROUETER (0,0) PROVENANCE:")
+ print(pr.get_provenance_for_router(0, 0))
+ print("\nCORES WITH PROVENACE")
+ print(pr.get_cores_with_provenace())
if __name__ == '__main__':
diff --git a/spinn_front_end_common/interface/provenance/provenance_writer.py b/spinn_front_end_common/interface/provenance/provenance_writer.py
index a2796b60cc..d5d15cb083 100644
--- a/spinn_front_end_common/interface/provenance/provenance_writer.py
+++ b/spinn_front_end_common/interface/provenance/provenance_writer.py
@@ -13,24 +13,15 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-from datetime import datetime
import logging
-import os
-import re
from spinn_utilities.config_holder import get_config_int
from spinn_utilities.log import FormatAdapter
-from spinn_front_end_common.data import FecDataView
-from spinn_front_end_common.utilities.constants import (
- MICRO_TO_MILLISECOND_CONVERSION, PROVENANCE_DB)
-from spinn_front_end_common.utilities.sqlite_db import SQLiteDB
+from spinn_front_end_common.utilities.base_database import BaseDatabase
logger = FormatAdapter(logging.getLogger(__name__))
-_DDL_FILE = os.path.join(os.path.dirname(__file__), "db.sql")
-_RE = re.compile(r"(\d+)([_,:])(\d+)(?:\2(\d+))?")
-
-class ProvenanceWriter(SQLiteDB):
+class ProvenanceWriter(BaseDatabase):
""" Specific implementation of the Database for SQLite 3.
.. note::
@@ -42,9 +33,7 @@ class ProvenanceWriter(SQLiteDB):
You can't port to a different database engine without a lot of work.
"""
- __slots__ = [
- "_database_file"
- ]
+ __slots__ = []
def __init__(self, database_file=None, memory=False):
"""
@@ -59,26 +48,7 @@ def __init__(self, database_file=None, memory=False):
Otherwise a None file will mean the default should be used
"""
- if database_file is None and not memory:
- database_file = os.path.join(
- FecDataView.get_provenance_dir_path(), PROVENANCE_DB)
- self._database_file = database_file
- SQLiteDB.__init__(self, database_file, ddl_file=_DDL_FILE)
-
- def insert_version(self, description, the_value):
- """
- Inserts data into the version_provenance table
-
- :param str description: The package for which the version applies
- :param str the_value: The version to be recorded
- """
- with self.transaction() as cur:
- cur.execute(
- """
- INSERT INTO version_provenance(
- description, the_value)
- VALUES(?, ?)
- """, [description, the_value])
+ super().__init__(database_file)
def insert_power(self, description, the_value):
"""
@@ -95,90 +65,6 @@ def insert_power(self, description, the_value):
VALUES(?, ?)
""", [description, the_value])
- def insert_category(self, category, machine_on):
- """
- Inserts category into the category_timer_provenance returning id
-
- :param TimerCategory category: Name of Category starting
- :param bool machine_on: If the machine was done during all
- or some of the time
- """
- with self.transaction() as cur:
- cur.execute(
- """
- INSERT INTO category_timer_provenance(
- category, machine_on, n_run, n_loop)
- VALUES(?, ?, ?, ?)
- """,
- [category.category_name, machine_on,
- FecDataView.get_run_number(),
- FecDataView.get_run_step()])
- return cur.lastrowid
-
- def insert_category_timing(self, category_id, timedelta):
- """
- Inserts run time into the category
-
- :param int category_id: id of the Category finished
- :param ~datetime.timedelta timedelta: Time to be recorded
- """
- time_taken = (
- (timedelta.seconds * MICRO_TO_MILLISECOND_CONVERSION) +
- (timedelta.microseconds / MICRO_TO_MILLISECOND_CONVERSION))
-
- with self.transaction() as cur:
- cur.execute(
- """
- UPDATE category_timer_provenance
- SET
- time_taken = ?
- WHERE category_id = ?
- """, (time_taken, category_id))
-
- def insert_timing(
- self, category, algorithm, work, timedelta, skip_reason):
- """
- Inserts algorithms run times into the timer_provenance table
-
- :param int category: Category Id of the Algorithm
- :param str algorithm: Algorithm name
- :param TimerWork work: Type of work being done
- :param ~datetime.timedelta timedelta: Time to be recorded
- :param skip_reason: The reason the algorthm was skipped or None if
- it was not skipped
- :tpye skip_reason: str or None
- """
- time_taken = (
- (timedelta.seconds * MICRO_TO_MILLISECOND_CONVERSION) +
- (timedelta.microseconds / MICRO_TO_MILLISECOND_CONVERSION))
- with self.transaction() as cur:
- cur.execute(
- """
- INSERT INTO timer_provenance(
- category_id, algorithm, work, time_taken, skip_reason)
- VALUES(?, ?, ?, ?, ?)
- """,
- [category, algorithm, work.work_name, time_taken, skip_reason])
-
- def insert_other(self, category, description, the_value):
- """
- Insert unforeseen provenance into the other_provenace_table
-
- This allows to add provenance that does not readily fit into any of
- the other categerogies
-
- :param str category: grouping from this provenance
- :param str description: Specific provenance being saved
- :param ste the_value: Data
- """
- with self.transaction() as cur:
- cur.execute(
- """
- INSERT INTO other_provenance(
- category, description, the_value)
- VALUES(?, ?, ?)
- """, [category, description, the_value])
-
def insert_gatherer(self, x, y, address, bytes_read, run, description,
the_value):
"""
@@ -247,32 +133,13 @@ def insert_core(self, x, y, p, description, the_value):
:param int the_value: data
"""
with self.transaction() as cur:
+ core_id = self._get_core_id(cur, x, y, p)
cur.execute(
"""
INSERT INTO core_provenance(
- x, y, p, description, the_value)
- VALUES(?, ?, ?, ?, ?)
- """, [x, y, p, description, the_value])
-
- def add_core_name(self, x, y, p, core_name):
- """
- Adds a vertex or similar name for the core to the core_mapping table
-
- A second call to the same core is silently ignored even if the name
- if different.
-
- :param int x: X coordinate of the chip
- :param int y: Y coordinate of the chip
- :param int p: id of the core
- :param str core_name: Name to assign
- """
- with self.transaction() as cur:
- cur.execute(
- """
- INSERT OR IGNORE INTO core_mapping(
- x, y, p, core_name)
- VALUES(?, ?, ?, ?)
- """, [x, y, p, core_name])
+ core_id, description, the_value)
+ VALUES(?, ?, ?)
+ """, [core_id, description, the_value])
def insert_report(self, message):
"""
@@ -338,24 +205,6 @@ def insert_board_provenance(self, connections):
""", ((x, y, ipaddress)
for ((x, y), ipaddress) in connections.items()))
- def store_log(self, level, message, timestamp=None):
- """
- Stores log messages into the database
-
- :param int level:
- :param str message:
- """
- if timestamp is None:
- timestamp = datetime.now()
- with self.transaction() as cur:
- cur.execute(
- """
- INSERT INTO p_log_provenance(
- timestamp, level, message)
- VALUES(?, ?, ?)
- """,
- [timestamp, level, message])
-
def _test_log_locked(self, text):
"""
THIS IS A TESTING METHOD.
diff --git a/spinn_front_end_common/utilities/base_database.py b/spinn_front_end_common/utilities/base_database.py
new file mode 100644
index 0000000000..2a6beccb17
--- /dev/null
+++ b/spinn_front_end_common/utilities/base_database.py
@@ -0,0 +1,91 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import os
+import sqlite3
+import time
+from spinn_utilities.abstract_context_manager import AbstractContextManager
+from spinn_front_end_common.data import FecDataView
+from spinn_front_end_common.utilities.sqlite_db import SQLiteDB
+
+_DDL_FILE = os.path.join(os.path.dirname(__file__),
+ "db.sql")
+_SECONDS_TO_MICRO_SECONDS_CONVERSION = 1000
+#: Name of the database in the data folder
+
+
+def _timestamp():
+ return int(time.time() * _SECONDS_TO_MICRO_SECONDS_CONVERSION)
+
+
+class BaseDatabase(SQLiteDB, AbstractContextManager):
+ """ Specific implementation of the Database for SQLite 3.
+
+ There should only ever be a single Database Object in use at any time.
+ In the case of application_graph_changed the first should closed and
+ a new one created.
+
+ If 2 database objects where opened with the database_file they hold the
+ same data. Unless someone else deletes that file.
+
+
+ .. note::
+ *Not thread safe on the same database file!*
+ Threads can access different DBs just fine.
+ """
+
+ __slots__ = ["_database_file"]
+
+ def __init__(self, database_file=None, *, read_only=False,
+ row_factory=sqlite3.Row, text_factory=memoryview):
+ """
+ :param str database_file:
+ The name of a file that contains (or will contain) an SQLite
+ database holding the data.
+ If omitted the default location will be used.
+ """
+ if database_file:
+ self._database_file = database_file
+ else:
+ self._database_file = self.default_database_file()
+ super().__init__(
+ self._database_file, read_only=read_only, row_factory=row_factory,
+ text_factory=text_factory, ddl_file=_DDL_FILE)
+
+ @classmethod
+ def default_database_file(cls):
+ return os.path.join(FecDataView.get_run_dir_path(),
+ f"data{FecDataView.get_reset_str()}.sqlite3")
+
+ def _get_core_id(self, cursor, x, y, p):
+ """
+ :param ~sqlite3.Cursor cursor:
+ :param int x:
+ :param int y:
+ :param int p:
+ :rtype: int
+ """
+ for row in cursor.execute(
+ """
+ SELECT core_id FROM core
+ WHERE x = ? AND y = ? AND processor = ?
+ LIMIT 1
+ """, (x, y, p)):
+ return row["core_id"]
+ cursor.execute(
+ """
+ INSERT INTO core(x, y, processor) VALUES(?, ?, ?)
+ """, (x, y, p))
+ return cursor.lastrowid
diff --git a/spinn_front_end_common/interface/provenance/db.sql b/spinn_front_end_common/utilities/db.sql
similarity index 66%
rename from spinn_front_end_common/interface/provenance/db.sql
rename to spinn_front_end_common/utilities/db.sql
index 1a83071739..76ca567c6d 100644
--- a/spinn_front_end_common/interface/provenance/db.sql
+++ b/spinn_front_end_common/utilities/db.sql
@@ -1,4 +1,4 @@
--- Copyright (c) 2018-2022 The University of Manchester
+-- Copyright (c) 2018-2019 The University of Manchester
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
@@ -17,11 +17,56 @@
PRAGMA main.synchronous = OFF;
-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--- A table holding the values for versions
-CREATE TABLE IF NOT EXISTS version_provenance(
- version_id INTEGER PRIMARY KEY AUTOINCREMENT,
- description STRING NOT NULL,
- the_value STRING NOT NULL);
+-- A table describing the cores.
+CREATE TABLE IF NOT EXISTS core(
+ core_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ x INTEGER NOT NULL,
+ y INTEGER NOT NULL,
+ processor INTEGER NOT NULL,
+ core_name STRING);
+-- Every processor has a unique ID
+CREATE UNIQUE INDEX IF NOT EXISTS coreSanity ON core(
+ x ASC, y ASC, processor ASC);
+
+
+-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+-- A table describing recording regions.
+CREATE TABLE IF NOT EXISTS region(
+ region_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ core_id INTEGER NOT NULL
+ REFERENCES core(core_id) ON DELETE RESTRICT,
+ local_region_index INTEGER NOT NULL,
+ address INTEGER,
+ content BLOB NOT NULL DEFAULT '',
+ content_len INTEGER DEFAULT 0,
+ fetches INTEGER NOT NULL DEFAULT 0,
+ append_time INTEGER);
+-- Every recording region has a unique vertex and index
+CREATE UNIQUE INDEX IF NOT EXISTS regionSanity ON region(
+ core_id ASC, local_region_index ASC);
+
+-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+-- A table containing the data which doesn't fit in the content column of the
+-- region table; care must be taken with this to not exceed 1GB! We actually
+-- store one per auto-pause-resume cycle as that is more efficient.
+CREATE TABLE IF NOT EXISTS region_extra(
+ extra_id INTEGER PRIMARY KEY ASC AUTOINCREMENT,
+ region_id INTEGER NOT NULL
+ REFERENCES region(region_id) ON DELETE RESTRICT,
+ content BLOB NOT NULL DEFAULT '',
+ content_len INTEGER DEFAULT 0);
+
+CREATE VIEW IF NOT EXISTS region_view AS
+ SELECT core_id, region_id, x, y, processor, local_region_index, address,
+ content, content_len, fetches, append_time,
+ (fetches > 1) AS have_extra
+FROM core NATURAL JOIN region;
+
+CREATE VIEW IF NOT EXISTS extra_view AS
+ SELECT core_id, region_id, extra_id, x, y, processor, local_region_index,
+ address, append_time, region_extra.content AS content,
+ region_extra.content_len AS content_len
+FROM core NATURAL JOIN region NATURAL JOIN region_extra;
-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-- A table holding the values for power provenance
@@ -31,45 +76,6 @@ CREATE TABLE IF NOT EXISTS power_provenance(
description STRING NOT NULL,
the_value FLOAT NOT NULL);
--- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--- A table holding the values for algorithm timings
-CREATE TABLE IF NOT EXISTS timer_provenance(
- timer_id INTEGER PRIMARY KEY AUTOINCREMENT,
- category_id INTEGER NOT NULL,
- algorithm STRING NOT NULL,
- work STRING NOT NULL,
- time_taken INTEGER NOT NULL,
- skip_reason STRING);
-
-CREATE VIEW IF NOT EXISTS full_timer_view AS
- SELECT timer_id, category, algorithm, work, machine_on, timer_provenance.time_taken, n_run, n_loop
- FROM timer_provenance ,category_timer_provenance
- WHERE timer_provenance.category_id = category_timer_provenance.category_id
- ORDER BY timer_id;
-
-CREATE VIEW IF NOT EXISTS timer_view AS
- SELECT category, algorithm, work, machine_on, time_taken, n_run, n_loop
- FROM full_timer_view
- WHERE skip_reason is NULL
- ORDER BY timer_id;
-
--- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--- A table holding the values for category timings
-CREATE TABLE IF NOT EXISTS category_timer_provenance(
- category_id INTEGER PRIMARY KEY AUTOINCREMENT,
- category STRING NOT NULL,
- time_taken INTEGER,
- machine_on BOOL NOT NULL,
- n_run INTEGER NOT NULL,
- n_loop INTEGER);
-
--- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
--- A table holding the values for uncategorised general provenance
-CREATE TABLE IF NOT EXISTS other_provenance(
- other_id INTEGER PRIMARY KEY AUTOINCREMENT,
- category STRING NOT NULL,
- description STRING NOT NULL,
- the_value STRING NOT NULL);
-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-- A table holding the values for data speed up packet gathers
@@ -155,28 +161,17 @@ CREATE VIEW IF NOT EXISTS router_summary_view AS
-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-- A table holding the values for each core
CREATE TABLE IF NOT EXISTS core_provenance(
- core_id INTEGER PRIMARY KEY AUTOINCREMENT,
- x INTEGER NOT NULL,
- y INTEGER NOT NULL,
- p INTEGER NOT NULL,
+ cp_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ core_id INTEGER NOT NULL
+ REFERENCES core(core_id) ON DELETE RESTRICT,
description STRING NOT NULL,
the_value INTEGER NOT NULL);
--- A table holding the mapping from vertex name to core x, y, p
-CREATE TABLE IF NOT EXISTS core_mapping(
- core_name STRING NOT NULL,
- x INTEGER,
- y INTEGER,
- p INTEGER);
-
--- Every core has a unique x,y,p location.
-CREATE UNIQUE INDEX IF NOT EXISTS core_sanity ON core_mapping(
- x ASC, y ASC, p ASC);
-- Create a view combining core name and data
CREATE VIEW IF NOT EXISTS core_provenance_view AS
- SELECT core_name, x, y, p, description, the_value
- FROM core_provenance NATURAL JOIN core_mapping;
+ SELECT core_name, x, y, processor as p, description, the_value
+ FROM core_provenance NATURAL JOIN core;
-- Compute some basic statistics per core over the provenance
CREATE VIEW IF NOT EXISTS core_stats_view AS
@@ -224,33 +219,3 @@ CREATE TABLE IF NOT EXISTS boards_provenance(
ip_addres STRING NOT NULL,
ethernet_x INTEGER NOT NULL,
ethernet_y INTEGER NOT NULL);
-
----------------------------------------------------------------------
--- A table to store log.info
-CREATE TABLE IF NOT EXISTS p_log_provenance(
- log_id INTEGER PRIMARY KEY AUTOINCREMENT,
- timestamp TIMESTAMP NOT NULL,
- level INTEGER NOT NULL,
- message STRING NOT NULL);
-
-CREATE TABLE IF NOT EXISTS log_level_names(
- level INTEGER PRIMARY KEY NOT NULL,
- name STRING NOT NULL);
-
-INSERT OR IGNORE INTO log_level_names
- (level, name)
-VALUES
- (50, "CRITICAL"),
- (40, "ERROR"),
- (30, "WARNING"),
- (20, "INFO"),
- (10, "DEBUG");
-
-CREATE VIEW IF NOT EXISTS p_log_view AS
- SELECT
- timestamp,
- name,
- message
- FROM p_log_provenance left join log_level_names
- ON p_log_provenance.level = log_level_names.level
- ORDER BY p_log_provenance.log_id;
diff --git a/spinn_front_end_common/utilities/exceptions.py b/spinn_front_end_common/utilities/exceptions.py
index b7e5edf158..29b6ba4a42 100644
--- a/spinn_front_end_common/utilities/exceptions.py
+++ b/spinn_front_end_common/utilities/exceptions.py
@@ -60,3 +60,9 @@ class BufferedRegionNotPresent(SpinnFrontEndException):
class CantFindSDRAMToUseException(SpinnFrontEndException):
""" Raised when malloc and sdram stealing cannot occur.
"""
+
+
+class NoProvenanceDatabaseException(SpinnFrontEndException):
+ """
+ Raised when the Provenance database has not yet been created.
+ """
diff --git a/spinn_front_end_common/utilities/report_functions/__init__.py b/spinn_front_end_common/utilities/report_functions/__init__.py
index 2a28071ecc..c66d0aa9e2 100644
--- a/spinn_front_end_common/utilities/report_functions/__init__.py
+++ b/spinn_front_end_common/utilities/report_functions/__init__.py
@@ -16,6 +16,7 @@
from .bit_field_compressor_report import bitfield_compressor_report
from .bit_field_summary import BitFieldSummary
from .board_chip_report import board_chip_report
+from .chip_active_report import write_chip_active_report
from .energy_report import EnergyReport
from .fixed_route_from_machine_report import fixed_route_from_machine_report
from .memory_map_on_host_chip_report import memory_map_on_host_chip_report
@@ -44,6 +45,7 @@
"router_collision_potential_report",
"routing_table_from_machine_report",
"tags_from_machine_report",
+ "write_chip_active_report",
"write_json_machine",
"write_json_placements",
"write_json_routing_tables",
diff --git a/spinn_front_end_common/utilities/report_functions/bit_field_compressor_report.py b/spinn_front_end_common/utilities/report_functions/bit_field_compressor_report.py
index c5ea95ba8f..2b4ee03988 100644
--- a/spinn_front_end_common/utilities/report_functions/bit_field_compressor_report.py
+++ b/spinn_front_end_common/utilities/report_functions/bit_field_compressor_report.py
@@ -20,10 +20,12 @@
from spinn_utilities.log import FormatAdapter
from spinn_front_end_common.abstract_models import AbstractHasAssociatedBinary
from spinn_front_end_common.data import FecDataView
-from spinn_front_end_common.interface.provenance import ProvenanceWriter
+from spinn_front_end_common.interface.provenance import \
+ (ProvenanceReader, ProvenanceWriter)
from .bit_field_summary import BitFieldSummary
+from spinn_front_end_common.utilities.exceptions import (
+ NoProvenanceDatabaseException)
from spinn_front_end_common.utilities.utility_objs import ExecutableType
-from spinn_front_end_common.interface.provenance import ProvenanceReader
logger = FormatAdapter(logging.getLogger(__name__))
_FILE_NAME = "bit_field_compressed_summary.rpt"
@@ -56,11 +58,15 @@ def bitfield_compressor_report():
file_name = os.path.join(FecDataView.get_run_dir_path(), _FILE_NAME)
try:
with open(file_name, "w", encoding="utf-8") as f:
- return _write_report(f)
+ _write_report(f)
except IOError:
logger.exception("Generate_placement_reports: Can't open file"
" {} for writing.", _FILE_NAME)
- return None
+ return
+ except NoProvenanceDatabaseException:
+ logger.exception(
+ "No proveance found to write bitfield_compressor_report")
+ return
def _merged_component(to_merge_per_chip, writer):
@@ -84,24 +90,25 @@ def _merged_component(to_merge_per_chip, writer):
to_merge_chips = set(to_merge_per_chip.keys())
found = False
- for (x, y, merged) in ProvenanceReader().get_router_by_chip(
- MERGED_NAME):
- if (x, y) not in to_merge_per_chip:
- continue
- to_merge = to_merge_per_chip[x, y]
- to_merge_chips.discard((x, y))
- found = True
- writer.write(
- "Chip {}:{} has {} bitfields out of {} merged into it."
- " Which is {:.2%}\n".format(
- x, y, merged, to_merge, merged / to_merge))
- total_bit_fields_merged += int(merged)
- if merged > top_bit_field:
- top_bit_field = merged
- if merged < min_bit_field:
- min_bit_field = merged
- average_per_chip_merged += merged
- n_chips += 1
+ with ProvenanceReader() as db:
+ for (x, y, merged) in db.get_router_by_chip(
+ MERGED_NAME):
+ if (x, y) not in to_merge_per_chip:
+ continue
+ to_merge = to_merge_per_chip[x, y]
+ to_merge_chips.discard((x, y))
+ found = True
+ writer.write(
+ "Chip {}:{} has {} bitfields out of {} merged into it."
+ " Which is {:.2%}\n".format(
+ x, y, merged, to_merge, merged / to_merge))
+ total_bit_fields_merged += int(merged)
+ if merged > top_bit_field:
+ top_bit_field = merged
+ if merged < min_bit_field:
+ min_bit_field = merged
+ average_per_chip_merged += merged
+ n_chips += 1
if found:
average_per_chip_merged = (
diff --git a/spinn_front_end_common/utilities/report_functions/chip_active_report.py b/spinn_front_end_common/utilities/report_functions/chip_active_report.py
new file mode 100644
index 0000000000..e67e342067
--- /dev/null
+++ b/spinn_front_end_common/utilities/report_functions/chip_active_report.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2017-2019 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import logging
+import numpy
+import os
+from spinn_utilities.config_holder import get_config_int
+from spinn_utilities.exceptions import SpiNNUtilsException
+from spinn_utilities.log import FormatAdapter
+from spinn_front_end_common.data import FecDataView
+from spinn_front_end_common.interface.buffer_management.storage_objects \
+ import (BufferDatabase)
+from spinn_front_end_common.interface.interface_functions.compute_energy_used\
+ import (MILLIWATTS_PER_CHIP_ACTIVE_OVERHEAD)
+
+logger = FormatAdapter(logging.getLogger(__name__))
+
+#: converter between joules to kilowatt hours
+JOULES_TO_KILOWATT_HOURS = 3600000
+
+
+def write_chip_active_report(report_path=None, buffer_path=None):
+ """ Writes the report.
+
+ :param report_path: Where to write the report if not using the default
+ :type report_path: None or str
+ :param buffer_path: Where the provenance sqlite3 files is located
+ if not using the default.
+ :type buffer_path: None or str
+ :rtype: None
+ """
+ if report_path is None:
+ try:
+ report_path = os.path.join(
+ FecDataView.get_run_dir_path(),
+ f"chip_active_report{FecDataView.get_reset_str()}.rpt")
+ except SpiNNUtilsException:
+ report_path = os.path.join(
+ os.path.curdir, "chip_active_report.rpt")
+ logger.warning(f"no report_path so writing to {report_path}")
+
+ # create detailed report
+ with open(report_path, "w", encoding="utf-8") as f:
+ __write_report(f, buffer_path)
+
+
+def __write_report(f, buffer_path):
+ db = BufferDatabase(buffer_path)
+ n_samples_per_recording = get_config_int(
+ "EnergyMonitor", "n_samples_per_recording_entry")
+
+ milliwatts = MILLIWATTS_PER_CHIP_ACTIVE_OVERHEAD / 18
+ activity_total = 0
+ energy_total = 0
+
+ for row in db.iterate_chip_power_monitor_cores():
+ record_raw, data_missing = db.get_region_data(
+ row["x"], row["y"], row["processor"], 0)
+ results = (
+ numpy.frombuffer(record_raw, dtype="uint32").reshape(-1, 18) /
+ n_samples_per_recording)
+ active_sums = numpy.sum(results, axis=0)
+ activity_count = numpy.sum(results)
+ time_for_recorded_sample =\
+ (row["sampling_frequency"] * n_samples_per_recording) / 1000
+ energy_factor = time_for_recorded_sample * milliwatts
+
+ for core in range(0, 18):
+ label = db.get_core_name(row["x"], row["y"], core)
+ if (active_sums[core] > 0) or label:
+ f.write(
+ f"processor {row['x']}:{row['y']}:{core}({label})"
+ f" was active for {active_sums[core]}ms "
+ f" using { active_sums[core] * energy_factor} Joules\n")
+
+ energy = activity_count * energy_factor
+ activity_total += activity_count
+ energy_total += energy
+ f.write(
+ f"Total for chip {row['x']}:{row['y']} "
+ f" was {activity_count}ms of activity "
+ f" using {energy} Joules\n\n")
+ f.write(
+ f"Total "
+ f" was {activity_total}ms of activity "
+ f" using {energy_total} Joules\n\n")
diff --git a/spinn_front_end_common/utilities/report_functions/energy_report.py b/spinn_front_end_common/utilities/report_functions/energy_report.py
index 43af0ddb65..f42beedc3b 100644
--- a/spinn_front_end_common/utilities/report_functions/energy_report.py
+++ b/spinn_front_end_common/utilities/report_functions/energy_report.py
@@ -20,7 +20,7 @@
from spinn_utilities.log import FormatAdapter
from spinn_front_end_common.data import FecDataView
from spinn_front_end_common.interface.provenance import (
- FecTimer, ProvenanceReader, TimerCategory)
+ FecTimer, GlobalProvenance, TimerCategory)
from spinn_front_end_common.utility_models import ChipPowerMonitorMachineVertex
from spinn_front_end_common.utilities.exceptions import ConfigurationException
from spinn_front_end_common.interface.interface_functions.compute_energy_used\
@@ -309,8 +309,8 @@ def _write_load_time_cost(power_used, f):
"""
# find time in milliseconds
- reader = ProvenanceReader()
- total_time_ms = reader.get_timer_sum_by_category(TimerCategory.LOADING)
+ with GlobalProvenance() as db:
+ total_time_ms = db.get_timer_sum_by_category(TimerCategory.LOADING)
# handle active routers etc
active_router_cost = (
@@ -336,9 +336,9 @@ def _write_data_extraction_time_cost(power_used, f):
"""
# find time
- reader = ProvenanceReader()
- total_time_ms = reader.get_timer_sum_by_algorithm(
- FecTimer.APPLICATION_RUNNER)
+ with GlobalProvenance() as db:
+ total_time_ms = db.get_timer_sum_by_algorithm(
+ FecTimer.APPLICATION_RUNNER)
# handle active routers etc
energy_cost_of_active_router = (
diff --git a/spinn_front_end_common/utilities/sqlite_db.py b/spinn_front_end_common/utilities/sqlite_db.py
index 89259517b9..010d24a87f 100644
--- a/spinn_front_end_common/utilities/sqlite_db.py
+++ b/spinn_front_end_common/utilities/sqlite_db.py
@@ -112,7 +112,7 @@ def __init__(self, database_file=None, *, read_only=False, ddl_file=None,
if text_factory:
self.__db.text_factory = text_factory
- if ddl_file:
+ if not read_only and ddl_file:
with open(ddl_file, encoding="utf-8") as f:
sql = f.read()
self.__db.executescript(sql)
diff --git a/spinn_front_end_common/utility_models/data_speed_up_packet_gatherer_machine_vertex.py b/spinn_front_end_common/utility_models/data_speed_up_packet_gatherer_machine_vertex.py
index 5c9dc89457..375e1e8dda 100644
--- a/spinn_front_end_common/utility_models/data_speed_up_packet_gatherer_machine_vertex.py
+++ b/spinn_front_end_common/utility_models/data_speed_up_packet_gatherer_machine_vertex.py
@@ -1479,8 +1479,6 @@ def get_provenance_data_from_machine(self, placement):
n_sdp_sent, n_sdp_recvd, n_in_streams, n_out_streams = (
_FOUR_WORDS.unpack_from(data))
with ProvenanceWriter() as db:
- db.add_core_name(
- placement.x, placement.y, placement.p, placement.vertex.label)
db.insert_core(
placement.x, placement.y, placement.p,
"Sent_SDP_Packets", n_sdp_sent)
diff --git a/unittests/data/test_simulator_data.py b/unittests/data/test_simulator_data.py
index 9e10fe8cc8..8e5a1fb342 100644
--- a/unittests/data/test_simulator_data.py
+++ b/unittests/data/test_simulator_data.py
@@ -247,6 +247,8 @@ def test_directories_reset(self):
writer = FecDataWriter.setup()
run_dir = FecDataView.get_run_dir_path()
self.assertIn("run_1", run_dir)
+ self.assertEqual(0, writer.get_reset_number())
+ self.assertEqual("", writer.get_reset_str())
writer.start_run()
run_dir = FecDataView.get_run_dir_path()
self.assertIn("run_1", run_dir)
@@ -259,7 +261,10 @@ def test_directories_reset(self):
writer.finish_run()
run_dir = FecDataView.get_run_dir_path()
self.assertIn("run_1", run_dir)
+ self.assertEqual(0, writer.get_reset_number())
writer.hard_reset()
+ self.assertEqual(1, writer.get_reset_number())
+ self.assertEqual("1", writer.get_reset_str())
run_dir = FecDataView.get_run_dir_path()
self.assertIn("run_3", run_dir)
writer.start_run()
@@ -321,11 +326,14 @@ def test_run_number(self):
self.assertEqual(3, FecDataView.get_run_number())
# run_dir_path only changed on hard reset
self.assertIn("run_1", FecDataView.get_run_dir_path())
+ self.assertEqual(0, writer.get_reset_number())
writer.soft_reset()
+ self.assertEqual(1, writer.get_reset_number())
self.assertEqual(3, FecDataView.get_run_number())
# run_dir_path only changed on hard reset
self.assertIn("run_1", FecDataView.get_run_dir_path())
writer.hard_reset()
+ self.assertEqual(1, writer.get_reset_number())
self.assertEqual(3, FecDataView.get_run_number())
# run_dir_path changed by hard reset
self.assertIn("run_3", FecDataView.get_run_dir_path())
diff --git a/unittests/interface/buffer_management/test_buffered_database.py b/unittests/interface/buffer_management/test_buffered_database.py
index d6e0cb0cac..3733b89b29 100644
--- a/unittests/interface/buffer_management/test_buffered_database.py
+++ b/unittests/interface/buffer_management/test_buffered_database.py
@@ -15,6 +15,9 @@
import unittest
import os
+from pacman.model.graphs.machine import SimpleMachineVertex
+from pacman.model.placements import Placement, Placements
+from spinn_front_end_common.data.fec_data_writer import FecDataWriter
from spinn_front_end_common.interface.buffer_management.storage_objects \
import BufferDatabase
from spinn_front_end_common.interface.config_setup import unittest_setup
@@ -29,19 +32,39 @@ def test_use_database(self):
f = BufferDatabase.default_database_file()
self.assertFalse(os.path.isfile(f), "no existing DB at first")
- brd = BufferDatabase()
- self.assertTrue(os.path.isfile(f), "DB now exists")
+ with BufferDatabase() as brd:
+ self.assertTrue(os.path.isfile(f), "DB now exists")
- # TODO missing
- # data, missing = brd.get_region_data(0, 0, 0, 0)
- # self.assertTrue(missing, "data should be 'missing'")
- # self.assertEqual(data, b"")
+ # TODO missing
+ # data, missing = brd.get_region_data(0, 0, 0, 0)
+ # self.assertTrue(missing, "data should be 'missing'")
+ # self.assertEqual(data, b"")
- brd.store_data_in_region_buffer(0, 0, 0, 0, False, b"abc")
- brd.store_data_in_region_buffer(0, 0, 0, 0, False, b"def")
- data, missing = brd.get_region_data(0, 0, 0, 0)
+ brd.store_data_in_region_buffer(0, 0, 0, 0, False, b"abc")
+ brd.store_data_in_region_buffer(0, 0, 0, 0, False, b"def")
+ data, missing = brd.get_region_data(0, 0, 0, 0)
- self.assertFalse(missing, "data shouldn't be 'missing'")
- self.assertEqual(bytes(data), b"abcdef")
+ self.assertFalse(missing, "data shouldn't be 'missing'")
+ self.assertEqual(bytes(data), b"abcdef")
- self.assertTrue(os.path.isfile(f), "DB still exists")
+ self.assertTrue(os.path.isfile(f), "DB still exists")
+
+ def test_placements(self):
+ writer = FecDataWriter.mock()
+ info = Placements([])
+ p1 = Placement(SimpleMachineVertex(None, label="V1"), 1, 2, 3)
+ info.add_placement(p1)
+ v2 = SimpleMachineVertex(None, label="V2")
+ p2 = Placement(v2, 1, 2, 5)
+ info.add_placement(p2)
+ info.add_placement(Placement(SimpleMachineVertex(None), 2, 2, 3))
+ writer.set_placements(info)
+ with BufferDatabase() as db:
+ db.store_data_in_region_buffer(1, 2, 3, 0, False, b"abc")
+ db.store_vertex_labels()
+ label = db.get_core_name(1, 2, 3)
+ self.assertEqual("V1", label)
+ label = db.get_core_name(1, 2, 5)
+ self.assertEqual("V2", label)
+ label = db.get_core_name(4, 3, 0)
+ self.assertEqual("SCAMP(OS)_4:3", label)
diff --git a/unittests/interface/provenance/test_provenance_database.py b/unittests/interface/provenance/test_provenance_database.py
index 6a4e8ead03..c17c67de45 100644
--- a/unittests/interface/provenance/test_provenance_database.py
+++ b/unittests/interface/provenance/test_provenance_database.py
@@ -22,7 +22,8 @@
from spinn_utilities.config_holder import set_config
from spinn_front_end_common.interface.config_setup import unittest_setup
from spinn_front_end_common.interface.provenance import (
- LogStoreDB, ProvenanceWriter, ProvenanceReader, TimerCategory, TimerWork)
+ LogStoreDB, GlobalProvenance, ProvenanceWriter, ProvenanceReader,
+ TimerCategory, TimerWork)
logger = FormatAdapter(logging.getLogger(__name__))
@@ -44,25 +45,26 @@ def as_set(self, items):
return results
def test_version(self):
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
db.insert_version("spinn_utilities_version", "1!6.0.1")
db.insert_version("numpy_version", "1.17.4")
- data = ProvenanceReader().run_query("select * from version_provenance")
- versions = [
- (1, 'spinn_utilities_version', '1!6.0.1'),
- (2, 'numpy_version', '1.17.4')]
- self.assertListEqual(data, versions)
+ data = db.run_query("select * from version_provenance")
+ versions = [
+ (1, 'spinn_utilities_version', '1!6.0.1'),
+ (2, 'numpy_version', '1.17.4')]
+ self.assertListEqual(data, versions)
def test_power(self):
with ProvenanceWriter() as db:
db.insert_power("num_cores", 34)
db.insert_power("total time (seconds)", 6.81)
- data = ProvenanceReader().run_query("select * from power_provenance")
- power = [(1, 'num_cores', 34.0), (2, 'total time (seconds)', 6.81)]
- self.assertListEqual(data, power)
+ with ProvenanceReader() as db:
+ data = db.run_query("select * from power_provenance")
+ power = [(1, 'num_cores', 34.0), (2, 'total time (seconds)', 6.81)]
+ self.assertListEqual(data, power)
def test_timings(self):
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
mapping_id = db.insert_category(TimerCategory.MAPPING, False)
db.insert_timing(
mapping_id, "compressor", TimerWork.OTHER,
@@ -80,22 +82,21 @@ def test_timings(self):
db.insert_timing(
execute_id, "clear", TimerWork.OTHER,
timedelta(milliseconds=4), None)
- reader = ProvenanceReader()
- data = reader.get_timer_sum_by_category(TimerCategory.MAPPING)
- self.assertEqual(12 + 123, data)
- data = reader.get_timer_sum_by_category(TimerCategory.RUN_LOOP)
- self.assertEqual(134 + 344 + 4, data)
- data = reader.get_timer_sum_by_category(TimerCategory.SHUTTING_DOWN)
- self.assertEqual(0, data)
- data = reader.get_timer_sum_by_algorithm("router_report")
- self.assertEqual(123, data)
- data = reader.get_timer_sum_by_algorithm("clear")
- self.assertEqual(4, data)
- data = reader.get_timer_sum_by_algorithm("junk")
- self.assertEqual(0, data)
+ data = db.get_timer_sum_by_category(TimerCategory.MAPPING)
+ self.assertEqual(12 + 123, data)
+ data = db.get_timer_sum_by_category(TimerCategory.RUN_LOOP)
+ self.assertEqual(134 + 344 + 4, data)
+ data = db.get_timer_sum_by_category(TimerCategory.SHUTTING_DOWN)
+ self.assertEqual(0, data)
+ data = db.get_timer_sum_by_algorithm("router_report")
+ self.assertEqual(123, data)
+ data = db.get_timer_sum_by_algorithm("clear")
+ self.assertEqual(4, data)
+ data = db.get_timer_sum_by_algorithm("junk")
+ self.assertEqual(0, data)
def test_category_timings(self):
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
id = db.insert_category(TimerCategory.MAPPING, False)
db.insert_category_timing(id, timedelta(milliseconds=12))
@@ -108,22 +109,17 @@ def test_category_timings(self):
id = db.insert_category(TimerCategory.RUN_LOOP, False)
db.insert_category_timing(id, timedelta(milliseconds=344))
- reader = ProvenanceReader()
- data = reader.get_category_timer_sum(TimerCategory.MAPPING)
+ data = db.get_category_timer_sum(TimerCategory.MAPPING)
self.assertEqual(12 + 123, data)
- def test_other(self):
- with ProvenanceWriter() as db:
- db.insert_other("foo", "bar", 12)
-
def test_gatherer(self):
with ProvenanceWriter() as db:
db.insert_gatherer(
1, 3, 1715886360, 80, 1, "Extraction_time", 00.234)
db.insert_gatherer(
1, 3, 1715886360, 80, 1, "Lost Packets", 12)
- reader = ProvenanceReader()
- data = reader.run_query("Select * from gatherer_provenance")
+ with ProvenanceReader() as db:
+ data = db.run_query("Select * from gatherer_provenance")
expected = [(1, 1, 3, 1715886360, 80, 1, 'Extraction_time', 0.234),
(2, 1, 3, 1715886360, 80, 1, 'Lost Packets', 12.0)]
self.assertListEqual(expected, data)
@@ -135,12 +131,12 @@ def test_router(self):
db.insert_router(1, 3, "des2", 67)
db.insert_router(1, 3, "des1", 48)
db.insert_router(5, 5, "des1", 48, False)
- reader = ProvenanceReader()
- data = set(reader.get_router_by_chip("des1"))
- chip_set = {(1, 3, 34), (1, 2, 45), (1, 3, 48), (5, 5, 48)}
- self.assertSetEqual(data, chip_set)
- data = reader.get_router_by_chip("junk")
- self.assertEqual(0, len(data))
+ with ProvenanceReader() as db:
+ data = set(db.get_router_by_chip("des1"))
+ chip_set = {(1, 3, 34), (1, 2, 45), (1, 3, 48), (5, 5, 48)}
+ self.assertSetEqual(data, chip_set)
+ data = db.get_router_by_chip("junk")
+ self.assertEqual(0, len(data))
def test_monitor(self):
with ProvenanceWriter() as db:
@@ -148,12 +144,12 @@ def test_monitor(self):
db.insert_monitor(1, 2, "des1", 45)
db.insert_monitor(1, 3, "des2", 67)
db.insert_monitor(1, 3, "des1", 48)
- reader = ProvenanceReader()
- data = set(reader.get_monitor_by_chip("des1"))
- chip_set = {(1, 3, 34), (1, 2, 45), (1, 3, 48)}
- self.assertSetEqual(data, chip_set)
- data = reader.get_monitor_by_chip("junk")
- self.assertEqual(0, len(data))
+ with ProvenanceReader() as db:
+ data = set(db.get_monitor_by_chip("des1"))
+ chip_set = {(1, 3, 34), (1, 2, 45), (1, 3, 48)}
+ self.assertSetEqual(data, chip_set)
+ data = db.get_monitor_by_chip("junk")
+ self.assertEqual(0, len(data))
def test_cores(self):
with ProvenanceWriter() as db:
@@ -162,16 +158,6 @@ def test_cores(self):
db.insert_core(1, 3, 2, "des2", 67)
db.insert_core(1, 3, 1, "des1", 48)
- def test_core_name(self):
- with ProvenanceWriter() as db:
- db.add_core_name(1, 3, 2, "first_core")
- db.add_core_name(1, 3, 3, "second_core")
- db.add_core_name(1, 3, 2, "first_core")
- db.add_core_name(1, 3, 2, "new_name is ignored")
- reader = ProvenanceReader()
- data = reader.run_query("Select * from core_mapping")
- self.assertEqual(2, len(data))
-
def test_messages(self):
set_config("Reports", "provenance_report_cutoff", 3)
with LogCapture() as lc:
@@ -182,15 +168,15 @@ def test_messages(self):
db.insert_report("vier")
self.assertEqual(3, len(lc.records))
- reader = ProvenanceReader()
- data = reader.messages()
+ with ProvenanceReader() as db:
+ data = db.messages()
self.assertEqual(4, len(data))
def test_connector(self):
with ProvenanceWriter() as db:
db.insert_connector("the pre", "A post", "OneToOne", "foo", 12)
- reader = ProvenanceReader()
- data = reader.run_query("Select * from connector_provenance")
+ with ProvenanceReader() as db:
+ data = db.run_query("Select * from connector_provenance")
expected = [(1, 'the pre', 'A post', 'OneToOne', 'foo', 12)]
self.assertListEqual(expected, data)
@@ -217,7 +203,7 @@ def test_database_locked(self):
ls = LogStoreDB()
logger.set_log_store(ls)
logger.warning("this works")
- with ProvenanceWriter() as db:
+ with GlobalProvenance() as db:
db._test_log_locked("locked")
logger.warning("not locked")
logger.warning("this wis fine")
diff --git a/unittests/utilities/.gitignore b/unittests/utilities/.gitignore
new file mode 100644
index 0000000000..b911337edd
--- /dev/null
+++ b/unittests/utilities/.gitignore
@@ -0,0 +1 @@
+*.rpt
\ No newline at end of file
diff --git a/unittests/utilities/data.sqlite3 b/unittests/utilities/data.sqlite3
new file mode 100644
index 0000000000..3d85d3ef85
Binary files /dev/null and b/unittests/utilities/data.sqlite3 differ
diff --git a/unittests/utilities/test_chip_active.py b/unittests/utilities/test_chip_active.py
new file mode 100644
index 0000000000..04dc86546e
--- /dev/null
+++ b/unittests/utilities/test_chip_active.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2017-2022 The University of Manchester
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import os
+import unittest
+from spinn_utilities.exceptions import InvalidDirectory
+from spinn_front_end_common.data.fec_data_writer import FecDataWriter
+from spinn_front_end_common.interface.config_setup import unittest_setup
+from spinn_front_end_common.utilities.report_functions import (
+ write_chip_active_report)
+
+
+class TestChipActive(unittest.TestCase):
+
+ def setUp(cls):
+ unittest_setup()
+
+ def test_no_params(self):
+ try:
+ write_chip_active_report()
+ failed = False
+ except Exception as ex:
+ self.assertIn("no such table", str(ex))
+ failed = True
+ self.assertTrue(failed)
+
+ def test_db_only(self):
+ # make sure there is not run_dir_path so falls back on default
+ writer = FecDataWriter.setup()
+ try:
+ writer.set_run_dir_path("THIS DIRECTORY DOES NOT EXIST")
+ except InvalidDirectory:
+ pass
+ db_path = os.path.join(os.path.dirname(__file__), "data.sqlite3")
+ write_chip_active_report(buffer_path=db_path)
+
+ def test_all_params(self):
+ # make sure there is not run_dir_path so falls back on default
+ writer = FecDataWriter.setup()
+ try:
+ writer.set_run_dir_path("THIS DIRECTORY DOES NOT EXIST")
+ except InvalidDirectory:
+ pass
+ db_path = os.path.join(os.path.dirname(__file__), "data.sqlite3")
+ report = os.path.join(os.path.dirname(__file__), "my_active.rpt")
+ write_chip_active_report(report_path=report, buffer_path=db_path)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/unittests/utilities/test_fec_timer.py b/unittests/utilities/test_fec_timer.py
index 4874d1be2e..20a32eb320 100644
--- a/unittests/utilities/test_fec_timer.py
+++ b/unittests/utilities/test_fec_timer.py
@@ -17,7 +17,7 @@
import unittest
from testfixtures import LogCapture
from spinn_front_end_common.interface.provenance import (
- FecTimer, ProvenanceReader, TimerCategory, TimerWork)
+ FecTimer, GlobalProvenance, TimerCategory, TimerWork)
from spinn_front_end_common.interface.config_setup import unittest_setup
@@ -74,17 +74,15 @@ def test_nested(self):
FecTimer.end_category(TimerCategory.GET_MACHINE)
FecTimer.end_category(TimerCategory.MAPPING)
FecTimer.end_category(TimerCategory.RUN_OTHER)
- on, off = ProvenanceReader().get_category_timer_sums(
- TimerCategory.RUN_OTHER)
- total = ProvenanceReader().get_category_timer_sum(
- TimerCategory.RUN_OTHER)
- self.assertGreater(on, 0)
- self.assertGreater(off, 0)
- self.assertEqual(total, on + off)
- on, off = ProvenanceReader().get_category_timer_sums(
- TimerCategory.MAPPING)
- self.assertGreater(on, 0)
- self.assertGreater(off, 0)
+ with GlobalProvenance() as db:
+ on, off = db.get_category_timer_sums(TimerCategory.RUN_OTHER)
+ total = db.get_category_timer_sum(TimerCategory.RUN_OTHER)
+ self.assertGreater(on, 0)
+ self.assertGreater(off, 0)
+ self.assertEqual(total, on + off)
+ on, off = db.get_category_timer_sums(TimerCategory.MAPPING)
+ self.assertGreater(on, 0)
+ self.assertGreater(off, 0)
def test_repeat_middle(self):
FecTimer.start_category(TimerCategory.WAITING)
@@ -107,13 +105,14 @@ def test_repeat_stopped(self):
FecTimer.start_category(TimerCategory.WAITING)
FecTimer.start_category(TimerCategory.SHUTTING_DOWN)
FecTimer.start_category(TimerCategory.SHUTTING_DOWN)
- total = ProvenanceReader().get_category_timer_sum(
- TimerCategory.SHUTTING_DOWN)
- self.assertEqual(total, 0)
- FecTimer.stop_category_timing()
- total = ProvenanceReader().get_category_timer_sum(
- TimerCategory.SHUTTING_DOWN)
- self.assertGreater(total, 0)
+ with GlobalProvenance() as db:
+ total = db.get_category_timer_sum(
+ TimerCategory.SHUTTING_DOWN)
+ self.assertEqual(total, 0)
+ FecTimer.stop_category_timing()
+ total = db.get_category_timer_sum(
+ TimerCategory.SHUTTING_DOWN)
+ self.assertGreater(total, 0)
def test_repeat_mess(self):
FecTimer.start_category(TimerCategory.WAITING)
@@ -136,38 +135,33 @@ def test_mess(self):
def test_stop_category_timing_clean(self):
FecTimer.start_category(TimerCategory.WAITING)
FecTimer.start_category(TimerCategory.RUN_OTHER)
- before = ProvenanceReader().get_category_timer_sum(
- TimerCategory.WAITING)
- FecTimer.start_category(TimerCategory.MAPPING)
- FecTimer.end_category(TimerCategory.MAPPING)
- FecTimer.end_category(TimerCategory.RUN_OTHER)
- FecTimer.stop_category_timing()
- total = ProvenanceReader().get_category_timer_sum(
- TimerCategory.WAITING)
- self.assertGreater(total, before)
- other = ProvenanceReader().get_category_timer_sum(
- TimerCategory.RUN_OTHER)
- self.assertGreater(other, 0)
+ with GlobalProvenance() as db:
+ before = db.get_category_timer_sum(TimerCategory.WAITING)
+ FecTimer.start_category(TimerCategory.MAPPING)
+ FecTimer.end_category(TimerCategory.MAPPING)
+ FecTimer.end_category(TimerCategory.RUN_OTHER)
+ FecTimer.stop_category_timing()
+ total = db.get_category_timer_sum(TimerCategory.WAITING)
+ self.assertGreater(total, before)
+ other = db.get_category_timer_sum(TimerCategory.RUN_OTHER)
+ self.assertGreater(other, 0)
def test_stop_category_timing_messy(self):
FecTimer.start_category(TimerCategory.WAITING)
FecTimer.start_category(TimerCategory.RUN_OTHER)
- before = ProvenanceReader().get_category_timer_sum(
- TimerCategory.WAITING)
- FecTimer.start_category(TimerCategory.MAPPING)
- FecTimer.start_category(TimerCategory.SHUTTING_DOWN)
- FecTimer.end_category(TimerCategory.SHUTTING_DOWN)
- FecTimer.stop_category_timing()
- mapping = ProvenanceReader().get_category_timer_sum(
- TimerCategory.MAPPING)
- self.assertGreater(mapping, 0)
- total = ProvenanceReader().get_category_timer_sum(
- TimerCategory.WAITING)
- # As we never ended RUN_OTHER we never got back to WAITING
- self.assertEqual(total, before)
- other = ProvenanceReader().get_category_timer_sum(
- TimerCategory.RUN_OTHER)
- self.assertGreater(other, 0)
+ with GlobalProvenance() as db:
+ before = db.get_category_timer_sum(TimerCategory.WAITING)
+ FecTimer.start_category(TimerCategory.MAPPING)
+ FecTimer.start_category(TimerCategory.SHUTTING_DOWN)
+ FecTimer.end_category(TimerCategory.SHUTTING_DOWN)
+ FecTimer.stop_category_timing()
+ mapping = db.get_category_timer_sum(TimerCategory.MAPPING)
+ self.assertGreater(mapping, 0)
+ total = db.get_category_timer_sum(TimerCategory.WAITING)
+ # As we never ended RUN_OTHER we never got back to WAITING
+ self.assertEqual(total, before)
+ other = db.get_category_timer_sum(TimerCategory.RUN_OTHER)
+ self.assertGreater(other, 0)
def test_stop_last_category_blocked(self):
FecTimer.start_category(TimerCategory.WAITING)