diff --git a/.gitignore b/.gitignore index 005625ff..b8d71b11 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,8 @@ -*.py[cod] +# Python cruft +__pycache__ + +# MacOS Cruft +.DS_Store # Packages *.egg @@ -10,23 +14,27 @@ parts sdist develop-eggs tmp + +# Caches .mypy_cache +# Testing +.tox +.coverage +htmlcov + # Installer logs pip-log.txt -# Other +# Tools (editors, IDEs, local config) *.iml .idea *.swp -doc/_build/ .vim +.venv + +# Other +doc/_build/ # Local sandbox/playground /sandbox - -.DS_Store - -# Coverage output -.coverage -htmlcov diff --git a/.travis.yml b/.travis.yml index cf4cce8c..a9b75193 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,15 @@ +dist: xenial + language: python + python: - - 3.6-dev -install: - - pip install -r requirements.txt - - pip install -r requirements-optional.txt -script: python setup.py develop && python setup.py test + - "3.6" + - "3.7" + +install: + - pip install -U pip setuptools wheel + - pip install -U -e .[dev] + - pip install -U -r requirements-optional.txt +script: + - pytest diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..9bbb31ae --- /dev/null +++ b/Makefile @@ -0,0 +1,18 @@ +.PHONY: +test: + pytest tests + +.PHONY: +lint: + # for now, extend to other directories later + flake8 cubes + +.PHONY: +clean: + find . \( -path '*/__pycache__/*' -o -name __pycache__ \) -delete + + +.PHONY: +format: + isort -rc cubes + black cubes diff --git a/cubes/__init__.py b/cubes/__init__.py index 7ed08fbd..b820c73d 100644 --- a/cubes/__init__.py +++ b/cubes/__init__.py @@ -1,17 +1,15 @@ -"""OLAP Cubes""" +"""OLAP Cubes.""" __version__ = "1.1" +from .auth import * +from .calendar import * from .common import * -from .query import * -from .metadata import * -from .workspace import * from .errors import * from .formatters import * -from .mapper import * -from .calendar import * -from .auth import * from .logging import * +from .mapper import * +from .metadata import * from .namespace import * - - +from .query import * +from .workspace import * diff --git a/cubes/auth.py b/cubes/auth.py index 87a3ceec..68c06ee9 100644 --- a/cubes/auth.py +++ b/cubes/auth.py @@ -2,11 +2,14 @@ import os.path from collections import defaultdict -from .query import Cell, cut_from_string, cut_from_dict, PointCut -from .metadata import string_to_dimension_level -from .errors import UserError, ConfigurationError, NoSuchDimensionError +from typing import Dict, List, Optional + from .common import read_json_file, sorted_dependencies +from .errors import ConfigurationError, NoSuchDimensionError, UserError from .ext import Extensible +from .metadata import string_to_dimension_level +from .metadata.cube import Cube +from .query import Cell, PointCut, cut_from_dict, cut_from_string from .settings import Setting, SettingType __all__ = ( @@ -14,21 +17,28 @@ "SimpleAuthorizer", "AuthorizationError", "NotAuthorized", - "right_from_dict" + "right_from_dict", ) -ALL_CUBES_WILDCARD = '*' +ALL_CUBES_WILDCARD = "*" + class AuthorizationError(UserError): - """Raised when there is any authorization-related error. Use - more specific `NotAuthorized` when access right is denied.""" + """Raised when there is any authorization-related error. + + Use more specific `NotAuthorized` when access right is denied. + """ + pass + class NotAuthorized(AuthorizationError): """Raised when user is not authorized for the request.""" + # Note: This is not called NotAuthorizedError as it is not in fact an # error, it is just type of signal. + class Authorizer(Extensible, abstract=True): __extension_type__ = "authorizer" @@ -42,9 +52,11 @@ def authorize(self, token, cubes): def restricted_cell(self, token, cube, cell=None): """Restricts the `cell` for `cube` according to authorization by - `token`. If no cell is provided or the cell is empty then returns - the restriction cell. If there is no restriction, returns the original - `cell` if provided or `None`. + `token`. + + If no cell is provided or the cell is empty then returns the + restriction cell. If there is no restriction, returns the + original `cell` if provided or `None`. """ return cell @@ -56,13 +68,19 @@ def hierarchy_limits(self, token, cube): class NoopAuthorizer(Authorizer, name="noop"): def __init__(self): - super(NoopAuthorizer, self).__init__() - - -class _SimpleAccessRight(object): - def __init__(self, roles, allowed_cubes, denied_cubes, cell_restrictions, - hierarchy_limits): - self.roles = set(roles) if roles else set([]) + super().__init__() + + +class _SimpleAccessRight: + def __init__( + self, + roles: Optional[List[str]], + allowed_cubes: Optional[List[str]], + denied_cubes: Optional[List[str]], + cell_restrictions, + hierarchy_limits, + ) -> None: + self.roles = set(roles) if roles else set() self.cell_restrictions = cell_restrictions or {} self.hierarchy_limits = defaultdict(list) @@ -76,11 +94,11 @@ def __init__(self, roles, allowed_cubes, denied_cubes, cell_restrictions, self.hierarchy_limits = dict(self.hierarchy_limits) - self.allowed_cubes = set(allowed_cubes) if allowed_cubes else set([]) - self.denied_cubes = set(denied_cubes) if denied_cubes else set([]) + self.allowed_cubes = set(allowed_cubes) if allowed_cubes else set() + self.denied_cubes = set(denied_cubes) if denied_cubes else set() self._get_patterns() - def _get_patterns(self): + def _get_patterns(self) -> None: self.allowed_cube_suffix = [] self.allowed_cube_prefix = [] self.denied_cube_suffix = [] @@ -98,13 +116,14 @@ def _get_patterns(self): if cube.endswith("*"): self.denied_cube_prefix.append(cube[:-1]) - def merge(self, other): + def merge(self, other: "_SimpleAccessRight") -> None: """Merge `right` with the receiver: * `allowed_cubes` are merged (union) * `denied_cubes` are merged (union) * `cell_restrictions` from `other` with same cube replace restrictions - from the receiver""" + from the receiver + """ self.roles |= other.roles self.allowed_cubes |= other.allowed_cubes @@ -116,7 +135,7 @@ def merge(self, other): else: self.cell_restrictions[cube] += restrictions - for cube, limits in other.hierarchy_limits.items(): + for cube, limits in other.hierarchy_limits.items(): if not cube in self.hierarchy_limits: self.hierarchy_limits[cube] = limits else: @@ -124,12 +143,13 @@ def merge(self, other): self._get_patterns() - def is_allowed(self, name, allow_after_denied=True): + def is_allowed(self, name: str, allow_after_denied: bool = True) -> bool: allow = False if self.allowed_cubes: - if (name in self.allowed_cubes) or \ - (ALL_CUBES_WILDCARD in self.allowed_cubes): + if (name in self.allowed_cubes) or ( + ALL_CUBES_WILDCARD in self.allowed_cubes + ): allow = True if not allow and self.allowed_cube_prefix: @@ -139,8 +159,7 @@ def is_allowed(self, name, allow_after_denied=True): deny = False if self.denied_cubes: - if (name in self.denied_cubes) or \ - (ALL_CUBES_WILDCARD in self.denied_cubes): + if (name in self.denied_cubes) or (ALL_CUBES_WILDCARD in self.denied_cubes): deny = True if not deny and self.denied_cube_prefix: @@ -177,61 +196,66 @@ def to_dict(self): "allowed_cubes": list(self.allowed_cubes), "denied_cubes": list(self.denied_cubes), "cell_restrictions": self.cell_restrictions, - "hierarchy_limits": self.hierarchy_limits + "hierarchy_limits": self.hierarchy_limits, } return as_dict -def right_from_dict(info): +def right_from_dict(info: Dict[str, List[str]]) -> _SimpleAccessRight: return _SimpleAccessRight( - roles=info.get('roles'), - allowed_cubes=info.get('allowed_cubes'), - denied_cubes=info.get('denied_cubes'), - cell_restrictions=info.get('cell_restrictions'), - hierarchy_limits=info.get('hierarchy_limits') + roles=info.get("roles"), + allowed_cubes=info.get("allowed_cubes"), + denied_cubes=info.get("denied_cubes"), + cell_restrictions=info.get("cell_restrictions"), + hierarchy_limits=info.get("hierarchy_limits"), ) + class SimpleAuthorizer(Authorizer, name="simple"): extension_settings = [ Setting( - name= "rights_file", - desc= "JSON file with access rights", - type= SettingType.str, + name="rights_file", + desc="JSON file with access rights", + type=SettingType.str, ), Setting( - name= "roles_file", - desc= "JSON file with access right roles", - type= SettingType.str, + name="roles_file", + desc="JSON file with access right roles", + type=SettingType.str, ), Setting( - name= "order", - desc= "Order of allow/deny", - type= SettingType.str, - values= ["allow_deny", "deny_allow"] + name="order", + desc="Order of allow/deny", + type=SettingType.str, + values=["allow_deny", "deny_allow"], ), + Setting(name="guest", desc="Name of the 'guest' role", type=SettingType.str), Setting( - name= "guest", - desc= "Name of the 'guest' role", - type= SettingType.str, + name="identity_dimension", + desc="Name of dimension which key is equivalent to the identity token", + type=SettingType.str, ), - Setting( - name= "identity_dimension", - desc= "Name of dimension which key is equivalent to the identity " - "token", - type= SettingType.str, - ), - ] - def __init__(self, rights_file=None, roles_file=None, roles=None, - rights=None, identity_dimension=None, order=None, - guest=None, **options): - """Creates a simple JSON-file based authorizer. Reads data from - `rights_file` and `roles_file` and merge them with `roles` and - `rights` dictionaries respectively.""" + def __init__( + self, + rights_file=None, + roles_file=None, + roles: Optional[Dict[str, Dict[str, List[str]]]] = None, + rights: Optional[Dict[str, Dict[str, List[str]]]] = None, + identity_dimension=None, + order: Optional[str] = None, + guest=None, + **options + ) -> None: + """Creates a simple JSON-file based authorizer. + + Reads data from `rights_file` and `roles_file` and merge them + with `roles` and `rights` dictionaries respectively. + """ - super(SimpleAuthorizer, self).__init__() + super().__init__() roles = roles or {} rights = rights or {} @@ -268,7 +292,7 @@ def __init__(self, rights_file=None, roles_file=None, roles=None, role = right_from_dict(info) self.roles[key] = role - deps = dict((name, role.roles) for name, role in self.roles.items()) + deps = {name: role.roles for name, role in self.roles.items()} order = sorted_dependencies(deps) for name in order: @@ -298,7 +322,9 @@ def __init__(self, rights_file=None, roles_file=None, roles=None, self.identity_hierarchy = None def expand_roles(self, info): - """Merge `right` with its roles. `right` has to be a dictionary. + """Merge `right` with its roles. + + `right` has to be a dictionary. """ right = right_from_dict(info) for role_name in list(right.roles): @@ -307,7 +333,7 @@ def expand_roles(self, info): return right - def right(self, token): + def right(self, token: str) -> _SimpleAccessRight: try: right = self.rights[token] except KeyError: @@ -319,7 +345,7 @@ def right(self, token): return right - def authorize(self, token, cubes): + def authorize(self, token: str, cubes: List[Cube]) -> List[Cube]: try: right = self.right(token) except NotAuthorized: @@ -372,9 +398,10 @@ def restricted_cell(self, identity, cube, cell): hier = ident_dim.hierarchy(self.identity_hierarchy) if len(hier) != 1: - raise ConfigurationError("Identity hierarchy has to be flat " - "(%s in dimension %s is not)" - % (str(hier), str(ident_dim))) + raise ConfigurationError( + "Identity hierarchy has to be flat " + "(%s in dimension %s is not)" % (str(hier), str(ident_dim)) + ) # TODO: set as hidden cut = PointCut(ident_dim, [identity], hierarchy=hier, hidden=True) @@ -389,5 +416,3 @@ def hierarchy_limits(self, token, cube): right = self.right(token) return right.hierarchy_limits.get(str(cube), []) - - diff --git a/cubes/calendar.py b/cubes/calendar.py index 6c0f797e..a75e687d 100644 --- a/cubes/calendar.py +++ b/cubes/calendar.py @@ -2,33 +2,19 @@ """Date and time utilities.""" import re +from datetime import datetime, tzinfo +from typing import Dict, List, Optional, Union -from typing import ( - Dict, - List, - Optional, - Union, -) - -from dateutil.relativedelta import ( - relativedelta, - MO, TU, WE, TH, FR, SA, SU, - ) +from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE, relativedelta from dateutil.tz import gettz, tzlocal, tzstr -from datetime import datetime, tzinfo -from time import gmtime -from .metadata import Hierarchy, HierarchyPath, Dimension from .errors import ArgumentError, ConfigurationError +from .metadata import Dimension, Hierarchy, HierarchyPath -__all__ = ( - "Calendar", - "calendar_hierarchy_units" -) +__all__ = ("Calendar", "calendar_hierarchy_units") -_CALENDAR_UNITS = ["year", "quarter", "month", "day", "hour", "minute", - "weekday"] +_CALENDAR_UNITS = ["year", "quarter", "month", "day", "hour", "minute", "weekday"] # FIXME: [typing] Change to enum @@ -50,10 +36,10 @@ "day": UNIT_DAY, "hour": UNIT_HOUR, "minute": UNIT_MINUTE, - "second": UNIT_SECOND + "second": UNIT_SECOND, } -_DATEUTIL_WEEKDAYS = { 0: MO, 1: TU, 2: WE, 3: TH, 4: FR, 5: SA, 6: SU } +_DATEUTIL_WEEKDAYS = {0: MO, 1: TU, 2: WE, 3: TH, 4: FR, 5: SA, 6: SU} _WEEKDAY_NUMBERS = { "monday": 0, @@ -62,31 +48,32 @@ "thursday": 3, "friday": 4, "saturday": 5, - "sunday": 6 + "sunday": 6, } -RELATIVE_FINE_TIME_RX = re.compile(r"(?P\d+)?" - "(?P\w+)" - "(?P(ago|forward))") +RELATIVE_FINE_TIME_RX = re.compile( + r"(?P\d+)?" r"(?P\w+)(?P(ago|forward))" +) -RELATIVE_TRUNCATED_TIME_RX = re.compile(r"(?P(last|next))" - "(?P\d+)?" - "(?P\w+)") +RELATIVE_TRUNCATED_TIME_RX = re.compile( + r"(?P(last|next))" r"(?P\d+)?" r"(?P\w+)" +) month_to_quarter = lambda month: ((month - 1) // 3) + 1 def calendar_hierarchy_units(hierarchy: Hierarchy) -> List[str]: - """Return time units for levels in the hierarchy. The hierarchy is - expected to be a date/time hierarchy and every level should have a `role` - property specified. If the role is not specified, then the role is - determined from the level name. + """Return time units for levels in the hierarchy. The hierarchy is expected + to be a date/time hierarchy and every level should have a `role` property + specified. If the role is not specified, then the role is determined from + the level name. Roles/units: `year`, `quarter`, `month`, `day`, `hour`, `minute`, `weekday` - If unknown role is encountered an exception is raised.""" + If unknown role is encountered an exception is raised. + """ units: List[str] units = [] @@ -97,8 +84,9 @@ def calendar_hierarchy_units(hierarchy: Hierarchy) -> List[str]: if role in _CALENDAR_UNITS: units.append(role) else: - raise ArgumentError("Unknown time role '%s' for level '%s'" - % (role, str(level))) + raise ArgumentError( + "Unknown time role '{}' for level '{}'".format(role, str(level)) + ) return units @@ -111,39 +99,37 @@ def add_time_units(time: datetime, unit: str, amount: int) -> datetime: months: int = 0 years: int = 0 - if unit == 'hour': + if unit == "hour": hours = amount - elif unit == 'day': + elif unit == "day": days = amount - elif unit == 'week': + elif unit == "week": days = amount * 7 - elif unit == 'month': + elif unit == "month": months = amount - elif unit == 'quarter': + elif unit == "quarter": months = amount * 3 - elif unit == 'year': + elif unit == "year": years = amount else: raise ArgumentError(f"Unknown unit {unit} for subtraction.") - return time + relativedelta(hours=hours, - days=days, - months=months, - years=years) + return time + relativedelta(hours=hours, days=days, months=months, years=years) -class Calendar(object): +class Calendar: timezone_name: Optional[str] timezone: tzinfo - def __init__(self, - first_weekday: Union[str,int]=0, - timezone: str=None) -> None: + def __init__( + self, first_weekday: Union[str, int] = 0, timezone: str = None + ) -> None: """Creates a Calendar object for providing date/time paths and for relative date/time manipulation. Values for `first_weekday` are 0 for Monday, 6 for Sunday. Default is - 0.""" + 0. + """ if isinstance(first_weekday, str): try: @@ -168,8 +154,10 @@ def now(self) -> datetime: return datetime.now(self.timezone) def path(self, time: datetime, units: List[str]) -> HierarchyPath: - """Returns a path from `time` containing date/time `units`. `units` - can be a list of strings or a `Hierarchy` object.""" + """Returns a path from `time` containing date/time `units`. + + `units` can be a list of strings or a `Hierarchy` object. + """ if not units: return [] @@ -187,7 +175,7 @@ def path(self, time: datetime, units: List[str]) -> HierarchyPath: elif unit == "weekday": value = (time.weekday() - self.first_weekday) % 7 else: - raise ArgumentError("Unknown calendar unit '%s'" % (unit, )) + raise ArgumentError(f"Unknown calendar unit '{unit}'") path.append(value) return path @@ -199,8 +187,10 @@ def now_path(self, units: List[str]) -> HierarchyPath: return self.path(self.now(), units) def truncate_time(self, time: datetime, unit: str) -> datetime: - """Truncates the `time` to calendar unit `unit`. Consider week start - day from the calendar.""" + """Truncates the `time` to calendar unit `unit`. + + Consider week start day from the calendar. + """ unit_order = _UNIT_ORDER[unit] @@ -212,26 +202,26 @@ def truncate_time(self, time: datetime, unit: str) -> datetime: elif unit_order > UNIT_SECOND: time = time.replace(second=0) - if unit == 'hour': + if unit == "hour": pass - elif unit == 'day': + elif unit == "day": time = time.replace(hour=0) - elif unit == 'week': + elif unit == "week": time = time.replace(hour=0) weekday = _DATEUTIL_WEEKDAYS[self.first_weekday] time = time + relativedelta(days=-6, weekday=weekday) - elif unit == 'month': + elif unit == "month": time = time.replace(day=1, hour=0) - elif unit == 'quarter': + elif unit == "quarter": month = (month_to_quarter(time.month) - 1) * 3 + 1 time = time.replace(month=month, day=1, hour=0) - elif unit == 'year': + elif unit == "year": time = time.replace(month=1, day=1, hour=0) else: @@ -239,13 +229,12 @@ def truncate_time(self, time: datetime, unit: str) -> datetime: return time - def since_period_start(self, - period: str, - unit: str, - time: datetime=None) -> int: + def since_period_start(self, period: str, unit: str, time: datetime = None) -> int: """Returns distance between `time` and the nearest `period` start - relative to `time` in `unit` units. For example: distance between - today and start of this year.""" + relative to `time` in `unit` units. + + For example: distance between today and start of this year. + """ if time is None: time = self.now() @@ -264,10 +253,9 @@ def since_period_start(self, else: raise ValueError("Unrecognized period unit: %s" % unit) - def named_relative_path(self, - reference: str, - units: List[str], - date: datetime=None) -> HierarchyPath: + def named_relative_path( + self, reference: str, units: List[str], date: datetime = None + ) -> HierarchyPath: """""" offset: int @@ -296,8 +284,7 @@ def named_relative_path(self, try: offset = int(offset_str) except ValueError: - raise ArgumentError("Relative time offset should be a " - "number") + raise ArgumentError("Relative time offset should be a number") else: offset = 1 @@ -322,16 +309,15 @@ def named_relative_path(self, return self.path(date, units) -class CalendarMemberConverter(object): +class CalendarMemberConverter: calendar: Calendar def __init__(self, calendar: Calendar) -> None: self.calendar = calendar - def __call__(self, - dimension: Dimension, - hierarchy: Hierarchy, - path: HierarchyPath) -> HierarchyPath: + def __call__( + self, dimension: Dimension, hierarchy: Hierarchy, path: HierarchyPath + ) -> HierarchyPath: if len(path) != 1: return path @@ -344,4 +330,3 @@ def __call__(self, return [value] return path - diff --git a/cubes/common.py b/cubes/common.py index 9f5983c4..aa385a38 100644 --- a/cubes/common.py +++ b/cubes/common.py @@ -1,27 +1,16 @@ # -*- encoding: utf-8 -*- """Utility functions for computing combinations of dimensions and hierarchy -levels""" - -from typing import ( - Any, - Collection, - Dict, - Hashable, - List, - Optional, - TypeVar, - Union, -) +levels.""" -import re -import os.path import json - +import os.path +import re from collections import OrderedDict +from typing import Any, Collection, Dict, Hashable, List, Optional, Union +from .errors import ArgumentError, ConfigurationError, ModelInconsistencyError from .types import JSONType -from .errors import ModelInconsistencyError, ArgumentError, ConfigurationError __all__ = [ "IgnoringDictionary", @@ -48,18 +37,19 @@ def to_str(b: bytes) -> str: class IgnoringDictionary(OrderedDict): """Simple dictionary extension that will ignore any keys of which values are empty (None/False)""" + def __setitem__(self, key: str, value: Any) -> None: if value is not None: - super(IgnoringDictionary, self).__setitem__(key, value) + super().__setitem__(key, value) def set(self, key: str, value: Any) -> None: """Sets `value` for `key` even if value is null.""" - super(IgnoringDictionary, self).__setitem__(key, value) + super().__setitem__(key, value) def __repr__(self) -> str: items = [] for key, value in self.items(): - item = '%s: %s' % (repr(key), repr(value)) + item = "{}: {}".format(repr(key), repr(value)) items.append(item) return "{%s}" % ", ".join(items) @@ -68,14 +58,13 @@ def __repr__(self) -> str: def assert_instance(obj: Any, class_: Any, label: str) -> None: """Raises ArgumentError when `obj` is not instance of `cls`""" if not isinstance(obj, class_): - raise ModelInconsistencyError("%s should be sublcass of %s, " - "provided: %s" % (label, - class_.__name__, - type(obj).__name__)) + raise ModelInconsistencyError( + "%s should be sublcass of %s, " + "provided: %s" % (label, class_.__name__, type(obj).__name__) + ) -def assert_all_instances(list_: List[Any], class_: Any, - label: str="object") -> None: +def assert_all_instances(list_: List[Any], class_: Any, label: str = "object") -> None: """Raises ArgumentError when objects in `list_` are not instances of `cls`""" for obj in list_ or []: @@ -84,6 +73,7 @@ def assert_all_instances(list_: List[Any], class_: Any, class MissingPackageError(Exception): """Exception raised when encountered a missing package.""" + pass @@ -96,11 +86,13 @@ class MissingPackage: source: Optional[str] comment: Optional[str] - def __init__(self, - package: str, - feature:Optional[str]=None, - source:Optional[str]=None, - comment:Optional[str]=None) -> None: + def __init__( + self, + package: str, + feature: Optional[str] = None, + source: Optional[str] = None, + comment: Optional[str] = None, + ) -> None: self.package = package self.feature = feature self.source = source @@ -128,20 +120,29 @@ def _fail(self) -> None: else: comment = "" - raise MissingPackageError(f"Optional package '{self.package}' " - f"is not installed. " - f"Please install the package" - f"{source}{use}{comment}") + raise MissingPackageError( + f"Optional package '{self.package}' " + f"is not installed. " + f"Please install the package" + f"{source}{use}{comment}" + ) + # ... -def optional_import(name: str, - feature:Optional[str]=None, - source:Optional[str]=None, - comment:Optional[str]=None) -> MissingPackage: - """Optionally import package `name`. If package does not exist, import a - placeholder object, that raises an exception with more detailed - description about the missing package.""" + +def optional_import( + name: str, + feature: Optional[str] = None, + source: Optional[str] = None, + comment: Optional[str] = None, +) -> MissingPackage: + """Optionally import package `name`. + + If package does not exist, import a placeholder object, that raises + an exception with more detailed description about the missing + package. + """ try: return __import__(name) @@ -149,10 +150,9 @@ def optional_import(name: str, return MissingPackage(name, feature, source, comment) -def expand_dictionary(record: Dict[str, Any], - separator:str='.') -> Dict[str,Any]: +def expand_dictionary(record: Dict[str, Any], separator: str = ".") -> Dict[str, Any]: """Return expanded dictionary: treat keys are paths separated by - `separator`, create sub-dictionaries as necessary""" + `separator`, create sub-dictionaries as necessary.""" result: Dict[str, Any] = {} @@ -170,7 +170,7 @@ def expand_dictionary(record: Dict[str, Any], # TODO: py3: Typecheck `obj` with some protocol # TODO: Make this a pure function def localize_common(obj: Any, trans: JSONType) -> None: - """Localize common attributes: label and description""" + """Localize common attributes: label and description.""" if "label" in trans: obj.label = trans["label"] @@ -179,11 +179,15 @@ def localize_common(obj: Any, trans: JSONType) -> None: # TODO: Make this a pure function -def localize_attributes(attribs: Dict[str, Any], - translations:Dict[str, JSONType]) -> None: - """Localize list of attributes. `translations` should be a dictionary with - keys as attribute names, values are dictionaries with localizable - attribute metadata, such as ``label`` or ``description``.""" +def localize_attributes( + attribs: Dict[str, Any], translations: Dict[str, JSONType] +) -> None: + """Localize list of attributes. + + `translations` should be a dictionary with keys as attribute names, + values are dictionaries with localizable attribute metadata, such as + ``label`` or ``description``. + """ for (name, atrans) in translations.items(): attrib = attribs[name] @@ -212,18 +216,20 @@ def get_localizable_attributes(obj: Any) -> Dict[str, str]: def decamelize(name: str) -> str: - s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name) - return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1) + s1 = re.sub("(.)([A-Z][a-z]+)", r"\1 \2", name) + return re.sub("([a-z0-9])([A-Z])", r"\1 \2", s1) def to_identifier(name: str) -> str: - return re.sub(r' ', r'_', name).lower() + return re.sub(r" ", r"_", name).lower() + +def to_label(name: str, capitalize: bool = True) -> str: + """Converts `name` into label by replacing underscores by spaces. -def to_label(name: str, capitalize: bool=True) -> str: - """Converts `name` into label by replacing underscores by spaces. If - `capitalize` is ``True`` (default) then the first letter of the label is - capitalized.""" + If `capitalize` is ``True`` (default) then the first letter of the + label is capitalized. + """ label = name.replace("_", " ") if capitalize: @@ -233,18 +239,19 @@ def to_label(name: str, capitalize: bool=True) -> str: # FIXME: type: Fix the type -def coalesce_option_value(value: Any, value_type: str, label: str=None) -> Any: - """Convert string into an object value of `value_type`. The type might be: - `string` (no conversion), `integer`, `float`, `list` – comma separated - list of strings. +def coalesce_option_value(value: Any, value_type: str, label: str = None) -> Any: + """Convert string into an object value of `value_type`. + + The type might be: `string` (no conversion), `integer`, `float`, + `list` – comma separated list of strings. """ return_value: Union[str, List[str], float, int, bool] value_type = value_type.lower() try: - if value_type in ('string', 'str'): + if value_type in ("string", "str"): return_value = str(value) - elif value_type == 'list': + elif value_type == "list": if isinstance(value, str): return_value = value.split(",") else: @@ -269,16 +276,19 @@ def coalesce_option_value(value: Any, value_type: str, label: str=None) -> Any: else: label = "" - raise ArgumentError(f"Unable to convert {label}value '{value}' " - f"into type {value_type}") + raise ArgumentError( + f"Unable to convert {label}value '{value}' " f"into type {value_type}" + ) return return_value # FIXME: type: Fix the types def coalesce_options(options: Any, types: Any) -> Any: - """Coalesce `options` dictionary according to types dictionary. Keys in - `types` refer to keys in `options`, values of `types` are value types: - string, list, float, integer or bool.""" + """Coalesce `options` dictionary according to types dictionary. + + Keys in `types` refer to keys in `options`, values of `types` are + value types: string, list, float, integer or bool. + """ out = {} @@ -291,27 +301,27 @@ def coalesce_options(options: Any, types: Any) -> Any: return out -def read_json_file(path:str, kind:str=None) -> JSONType: - """Read a JSON from `path`. This is convenience function that provides - more descriptive exception handling.""" +def read_json_file(path: str, kind: str = None) -> JSONType: + """Read a JSON from `path`. + + This is convenience function that provides more descriptive + exception handling. + """ kind = "%s " % str(kind) if kind else "" if not os.path.exists(path): - raise ConfigurationError("Can not find %sfile '%s'" - % (kind, path)) + raise ConfigurationError(f"Can not find {kind}file '{path}'") try: f = open(path, encoding="utf-8") - except IOError: - raise ConfigurationError("Can not open %sfile '%s'" - % (kind, path)) + except OSError: + raise ConfigurationError(f"Can not open {kind}file '{path}'") try: content = json.load(f) except ValueError as e: - raise SyntaxError("Syntax error in %sfile %s: %s" - % (kind, path, str(e))) + raise SyntaxError("Syntax error in {}file {}: {}".format(kind, path, str(e))) finally: f.close() @@ -320,9 +330,9 @@ def read_json_file(path:str, kind:str=None) -> JSONType: # FIXME: type: fix the type def sorted_dependencies(graph: Any) -> Any: - """Return keys from `deps` ordered by dependency (topological sort). - `deps` is a dictionary where keys are strings and values are list of - strings where keys is assumed to be dependant on values. + """Return keys from `deps` ordered by dependency (topological sort). `deps` + is a dictionary where keys are strings and values are list of strings where + keys is assumed to be dependant on values. Example:: @@ -333,13 +343,13 @@ def sorted_dependencies(graph: Any) -> Any: Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}`` """ - graph = dict((key, set(value)) for key, value in graph.items()) + graph = {key: set(value) for key, value in graph.items()} # L ← Empty list that will contain the sorted elements L = [] # S ← Set of all nodes with no dependencies (incoming edges) - S = set(parent for parent, req in graph.items() if not req) + S = {parent for parent, req in graph.items() if not req} while S: # remove a node n from S @@ -362,14 +372,14 @@ def sorted_dependencies(graph: Any) -> Any: nonempty = [k for k, v in graph.items() if v] if nonempty: - raise ArgumentError("Cyclic dependency of: %s" - % ", ".join(nonempty)) + raise ArgumentError("Cyclic dependency of: %s" % ", ".join(nonempty)) return L + def list_hash(values: Collection[Hashable]) -> int: """Return a hash value of a sequence of hashable items.""" hash_value = 0 - + for value in values: hash_value = hash_value ^ hash(value) diff --git a/cubes/datastructures.py b/cubes/datastructures.py index abeabd69..a5a2bf65 100644 --- a/cubes/datastructures.py +++ b/cubes/datastructures.py @@ -7,19 +7,14 @@ """ -from typing import ( - Generic, - Mapping, - TypeVar -) +from typing import Generic, TypeVar -__all__ = [ - "AttributeDict", -] +__all__ = ["AttributeDict"] T = TypeVar("T") + # # Credits: # Originally from the Celery project: http://www.celeryproject.org @@ -28,7 +23,6 @@ class AttributeDict(dict, Generic[T]): """Augment classes with a Mapping interface by adding attribute access. I.e. `d.key -> d[key]`. - """ def __getattr__(self, key: str) -> T: @@ -37,10 +31,9 @@ def __getattr__(self, key: str) -> T: return self[key] except KeyError: raise AttributeError( - '{0!r} object has no attribute {1!r}'.format( - type(self).__name__, key)) + "{!r} object has no attribute {!r}".format(type(self).__name__, key) + ) def __setattr__(self, key: str, value: T) -> None: """`d[key] = value -> d.key = value`""" self[key] = value - diff --git a/cubes/errors.py b/cubes/errors.py index 77f62edc..069502db 100644 --- a/cubes/errors.py +++ b/cubes/errors.py @@ -1,78 +1,99 @@ # -*- coding: utf-8 -*- """Exceptions used in Cubes. -The base exception calss is :class:`.CubesError`.""" +The base exception calss is :class:`.CubesError`. +""" -from typing import Optional from collections import OrderedDict +from typing import Optional + from .types import JSONType + class CubesError(Exception): """Generic error class.""" + class InconsistencyError(CubesError): - """Raised when something bad happened in cubes – very likely an edge - case that is not handled properly. + """Raised when something bad happened in cubes – very likely an edge case + that is not handled properly. - It is very unlikely that the user might fix this error by changing his/her - input. + It is very unlikely that the user might fix this error by changing + his/her input. """ + class UserError(CubesError): """Superclass for all errors caused by the cubes and slicer users. Error messages from this error might be safely passed to the front-end. Do not include any information that you would not like to be public. - Users can fix the error.""" + Users can fix the error. + """ + error_type = "unknown_user_error" + class InternalError(CubesError): """Superclass for all errors that happened on the server side: configuration issues, connection problems, model inconsistencies... - If you handle this exception, don't display content of this error to the - clients (such as over the web), as it might contain information about the - server configuration, database or other internals. + If you handle this exception, don't display content of this error to + the clients (such as over the web), as it might contain information + about the server configuration, database or other internals. """ + error_type = "internal_error" + class ConfigurationError(InternalError): """Raised when there is a problem with workspace configuration assumed.""" + class BackendError(InternalError): - """Raised by a backend. Should be handled separately, for example: should - not be passed to the client from the server due to possible internal - schema exposure. + """Raised by a backend. + + Should be handled separately, for example: should not be passed to + the client from the server due to possible internal schema exposure. """ + class WorkspaceError(InternalError): """Backend Workspace related exception.""" + class BrowserError(InternalError): """AggregationBrowser related exception.""" + pass + class StoreError(InternalError): """AggregationBrowser related exception.""" + pass + class ModelError(InternalError): """Model related exception.""" + class ExpressionError(ModelError): """Expression related exception such as unknown attribute or cirular - attribute reference""" + attribute reference.""" + # TODO: necessary? or rename to PhysicalModelError class MappingError(ModelError): """Raised when there are issues by mapping from logical model to physical - database schema. """ + database schema.""" # TODO: change all instances to ModelError class ModelInconsistencyError(ModelError): """Raised when there is incosistency in model structure.""" + class MissingObjectError(UserError): error_type: str = "missing_object" object_type: Optional[str] = None @@ -80,7 +101,7 @@ class MissingObjectError(UserError): message: Optional[str] name: Optional[str] - def __init__(self, message:str=None, name:str=None) -> None: + def __init__(self, message: str = None, name: str = None) -> None: self.message = message self.name = name @@ -97,25 +118,33 @@ def to_dict(self) -> JSONType: return d + class NoSuchDimensionError(MissingObjectError): """Raised when an unknown dimension is requested.""" + object_type = "dimension" + class NoSuchCubeError(MissingObjectError): """Raised when an unknown cube is requested.""" + object_type = "cube" + class NoSuchAttributeError(UserError): """Raised when an unknown attribute, measure or detail requested.""" + object_type = "attribute" + class ArgumentError(UserError): - """Raised when an invalid or conflicting function argument is supplied. - """ + """Raised when an invalid or conflicting function argument is supplied.""" + class HierarchyError(UserError): """Raised when attemt to get level deeper than deepest level in a - hierarchy""" + hierarchy.""" + error_type = "hierarchy" @@ -125,9 +154,12 @@ class HierarchyError(UserError): # Not quite errors, but used for signalling # class TemplateRequired(ModelError): - """Raised by a model provider which can provide a dimension, but requires - a template. Signals to the caller that the creation of a dimension should - be retried when the template is available.""" + """Raised by a model provider which can provide a dimension, but requires a + template. + + Signals to the caller that the creation of a dimension should be + retried when the template is available. + """ template: str @@ -136,4 +168,3 @@ def __init__(self, template: str) -> None: def __str__(self) -> str: return self.template - diff --git a/cubes/ext.py b/cubes/ext.py index 843b5769..2f7ab297 100644 --- a/cubes/ext.py +++ b/cubes/ext.py @@ -1,35 +1,31 @@ # -*- coding: utf-8 -*- -from typing import ( - Any, - cast, - Collection, - Dict, - List, - Mapping, - NamedTuple, - Optional, - Type, - TypeVar, - Union, - ) - from collections import OrderedDict +from importlib import import_module from textwrap import dedent +from typing import ( + Any, + Collection, + Dict, + List, + Mapping, + NamedTuple, + Optional, + Type, + TypeVar, + Union, + cast, +) + from pkg_resources import iter_entry_points -from .common import decamelize, coalesce_options -from .errors import ArgumentError, InternalError, ConfigurationError -# TODO: Reconsider need of SettingsDict -from .settings import Setting, SettingsDict, distill_settings, SettingValue +from .common import coalesce_options, decamelize +from .errors import ArgumentError, ConfigurationError, InternalError -from importlib import import_module +# TODO: Reconsider need of SettingsDict +from .settings import Setting, SettingsDict, SettingValue, distill_settings -__all__ = [ - "Extensible", - "ExtensionRegistry", - "get_registry", -] +__all__ = ["Extensible", "ExtensionRegistry", "get_registry"] # Known extension types. # Keys: @@ -46,33 +42,23 @@ "pass_parameter": "cubes.server.auth", "http_basic_proxy": "cubes.server.auth", }, - "authorizer": { - "simple": "cubes.auth", - }, - "browser": { - "sql":"cubes.sql.browser", - "slicer":"cubes.server.browser", - }, + "authorizer": {"simple": "cubes.auth"}, + "browser": {"sql": "cubes.sql.browser", "slicer": "cubes.server.browser"}, "formatter": { "cross_table": "cubes.formatters", "csv": "cubes.formatters", - 'xlsx': 'cubes.formatters', + "xlsx": "cubes.formatters", "html_cross_table": "cubes.formatters", }, - "model_provider": { - "slicer":"cubes.server.store", - }, + "model_provider": {"slicer": "cubes.server.store"}, "request_log_handler": { "default": "cubes.server.logging", "csv": "cubes.server.logging", - 'xlsx': 'cubes.server.logging', + "xlsx": "cubes.server.logging", "json": "cubes.server.logging", "sql": "cubes.sql.logging", }, - "store": { - "sql":"cubes.sql.store", - "slicer":"cubes.server.store", - }, + "store": {"sql": "cubes.sql.store", "slicer": "cubes.server.store"}, } EXTENSION_TYPES: Dict[str, str] = { @@ -94,6 +80,7 @@ # authenticator: () # request_log_handler: (store?) + class ExtensionDescription(NamedTuple): type: str name: str @@ -112,11 +99,10 @@ def __init__(self, name: str) -> None: self.classes = {} self.modules = {} - def register_extension(self, name: str, extension: Type["Extensible"]) \ - -> None: + def register_extension(self, name: str, extension: Type["Extensible"]) -> None: # Sanity assertion. Should not happen, but still... - assert(issubclass(extension, Extensible)) + assert issubclass(extension, Extensible) self.classes[name] = extension @@ -134,13 +120,12 @@ def extension(self, name: str) -> Type["Extensible"]: try: extension = self.classes[name] except KeyError: - raise InternalError(f"Unknown extension '{name}' " - f"of type '{self.name}'") + raise InternalError(f"Unknown extension '{name}' " f"of type '{self.name}'") return extension def names(self) -> Collection[str]: - """Return extension `type_` names""" + """Return extension `type_` names.""" names: List[str] names = list(set(self.classes.keys()) | set(self.modules.keys())) return sorted(names) @@ -151,20 +136,21 @@ def describe(self, name: str) -> ExtensionDescription: doc = ext.extension_desc or ext.__doc__ or "(No documentation)" desc = ExtensionDescription( - type= self.name, - name= name, - label= ext.extension_label or name, - doc=doc, - settings = ext.extension_settings or []) + type=self.name, + name=name, + label=ext.extension_label or name, + doc=doc, + settings=ext.extension_settings or [], + ) return desc _registries: Dict[str, ExtensionRegistry] = {} + def _initialize_registry(name: str) -> None: - assert name not in _registries, \ - f"Extension registry '{name}' already initialized" + assert name not in _registries, f"Extension registry '{name}' already initialized" registry = ExtensionRegistry(name) @@ -189,7 +175,8 @@ def get_registry(name: str) -> ExtensionRegistry: return _registries[name] -T = TypeVar('T', bound="Extensible") +T = TypeVar("T", bound="Extensible") + class Extensible: __extension_type__ = "undefined" @@ -198,16 +185,20 @@ class Extensible: extension_desc: Optional[str] = None extension_label: Optional[str] = None - def __init_subclass__(cls, name: Optional[str]=None, abstract: bool=False) -> None: - assert cls.__extension_type__ in EXTENSION_TYPES, \ - f"Invalid extension type '{cls.__extension_type__}' " \ - f"for extension '{cls}'" + def __init_subclass__( + cls, name: Optional[str] = None, abstract: bool = False + ) -> None: + assert cls.__extension_type__ in EXTENSION_TYPES, ( + f"Invalid extension type '{cls.__extension_type__}' " + f"for extension '{cls}'" + ) # Note: We reqire either name or a flag explicitly to prevent potential # hidden errors by accidentally omitting the extension name. - assert (name is not None) ^ abstract, \ - f"Extension class {cls} should have either name " \ - f"or abstract flag specified." + assert (name is not None) ^ abstract, ( + f"Extension class {cls} should have either name " + f"or abstract flag specified." + ) if name is not None: cls.extension_name = name @@ -216,9 +207,11 @@ def __init_subclass__(cls, name: Optional[str]=None, abstract: bool=False) -> No registry.register_extension(name, cls) else: if cls.__extension_type__ == "undefined": - raise InternalError(f"Abstract extension '{cls}' has no " - f"concrete __extension_type__ " - f"assigned") + raise InternalError( + f"Abstract extension '{cls}' has no " + f"concrete __extension_type__ " + f"assigned" + ) else: # We do nothing for abstract subclasses pass @@ -243,8 +236,9 @@ def create_with_settings(cls: Type[T], settings: SettingsDict) -> T: return cast(T, cls(**settings)) # type: ignore @classmethod - def distill_settings(cls: Type[T], mapping: Mapping[str, Any]) \ - -> Dict[str, Optional[SettingValue]]: + def distill_settings( + cls: Type[T], mapping: Mapping[str, Any] + ) -> Dict[str, Optional[SettingValue]]: return distill_settings(mapping, cls.extension_settings) diff --git a/cubes/formatters.py b/cubes/formatters.py index edf207a6..7c42e791 100644 --- a/cubes/formatters.py +++ b/cubes/formatters.py @@ -1,16 +1,19 @@ # -*- coding: utf-8 -*- -import json -import csv import codecs import csv import datetime import decimal +import json import os import tempfile +from collections import namedtuple from io import StringIO -from collections import namedtuple +from . import ext +from .errors import ArgumentError +from .query.constants import SPLIT_DIMENSION_NAME +from .settings import Setting, SettingType try: import jinja2 @@ -24,13 +27,8 @@ except ImportError: from .common import MissingPackage - openpyxl = MissingPackage('openpyxl', 'pyexcel or other xlsx/xlsm reader/writer') + openpyxl = MissingPackage("openpyxl", "pyexcel or other xlsx/xlsm reader/writer") -from .errors import ArgumentError -from . import ext -from .settings import Setting, SettingType - -from .query.constants import SPLIT_DIMENSION_NAME __all__ = [ "create_formatter", @@ -38,26 +36,28 @@ "HTMLCrossTableFormatter", "SlicerJSONEncoder", "csv_generator", - 'xlsx_generator', + "xlsx_generator", "JSONLinesGenerator", ] def create_formatter(type_, *args, **kwargs): - """Creates a formatter of type `type`. Passes rest of the arguments to the - formatters initialization method.""" + """Creates a formatter of type `type`. + + Passes rest of the arguments to the formatters initialization + method. + """ return ext.formatter(type_, *args, **kwargs) def _jinja_env(): - """Create and return cubes jinja2 environment""" - loader = jinja2.PackageLoader('cubes', 'templates') + """Create and return cubes jinja2 environment.""" + loader = jinja2.PackageLoader("cubes", "templates") env = jinja2.Environment(loader=loader) return env -def csv_generator(records, fields, include_header=True, header=None, - dialect=csv.excel): +def csv_generator(records, fields, include_header=True, header=None, dialect=csv.excel): def _row_string(row): writer.writerow(row) data = queue.getvalue() @@ -93,8 +93,8 @@ def xlsx_generator(records, fields, include_header=True, header=None): return fn -class JSONLinesGenerator(object): - def __init__(self, iterable, separator='\n'): +class JSONLinesGenerator: + def __init__(self, iterable, separator="\n"): """Creates a generator that yields one JSON record per record from `iterable` separated by a newline character..""" self.iterable = iterable @@ -105,20 +105,20 @@ def __init__(self, iterable, separator='\n'): def __iter__(self): for obj in self.iterable: string = self.encoder.encode(obj) - yield u"{}{}".format(string, self.separator) + yield f"{string}{self.separator}" class SlicerJSONEncoder(json.JSONEncoder): - def __init__(self, *args, **kwargs): - """Creates a JSON encoder that will convert some data values and also allows - iterables to be used in the object graph. + def __init__(self, *args, **kwargs) -> None: + """Creates a JSON encoder that will convert some data values and also + allows iterables to be used in the object graph. :Attributes: * `iterator_limit` - limits number of objects to be fetched from iterator. Default: 1000. """ - super(SlicerJSONEncoder, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.iterator_limit = 1000 @@ -151,8 +151,10 @@ def default(self, o): class Formatter(ext.Extensible, abstract=True): - """Empty class for the time being. Currently used only for finding all - built-in subclasses""" + """Empty class for the time being. + + Currently used only for finding all built-in subclasses + """ __extension_type__ = "formatter" @@ -160,8 +162,9 @@ def __call__(self, *args, **kwargs): return self.format(*args, **kwargs) def format(self, *args, **kwargs): - raise NotImplementedError("Subclasses are expected to implement " - "the format() method") + raise NotImplementedError( + "Subclasses are expected to implement the format() method" + ) # Main pre-formatting @@ -171,8 +174,7 @@ def format(self, *args, **kwargs): def make_cross_table(result, onrows=None, oncolumns=None, aggregates_on=None): - """ - Creates a cross table from a drilldown (might be any list of records). + """Creates a cross table from a drilldown (might be any list of records). `onrows` contains list of attribute names to be placed at rows and `oncolumns` contains list of attribute names to be placet at columns. `aggregates_on` specifies where the aggregates will be incuded: @@ -192,7 +194,6 @@ def make_cross_table(result, onrows=None, oncolumns=None, aggregates_on=None): values of attributes in `onrows`. * `data` - list of aggregate data per row. Each row is a list of aggregate tuples. - """ if not result.drilldown: @@ -254,9 +255,11 @@ def make_cross_table(result, onrows=None, oncolumns=None, aggregates_on=None): def coalesce_table_labels(attributes, onrows, oncolumns): - """Returns a tuple 9`onrows`, `oncolumns`) containing `attributes`. If - both are empty, all attributes will be put on rows. If one of the two is - empty, the rest of attributes is put on that axis.""" + """Returns a tuple 9`onrows`, `oncolumns`) containing `attributes`. + + If both are empty, all attributes will be put on rows. If one of the + two is empty, the rest of attributes is put on that axis. + """ if not onrows or not oncolumns: onrows = onrows or [] oncolumns = oncolumns or [] @@ -272,11 +275,7 @@ def coalesce_table_labels(attributes, onrows, oncolumns): class CrossTableFormatter(Formatter, name="cross_table"): extension_settings = [ - Setting( - name= "indent", - type= SettingType.int, - label= "Output indent", - ), + Setting(name="indent", type=SettingType.int, label="Output indent") ] mime_type = "application/json" @@ -300,61 +299,68 @@ def __init__(self, indent=None): self.indent = indent or 4 self.encoder = SlicerJSONEncoder(indent=indent) - def format(self, cube, result, onrows=None, oncolumns=None, aggregates=None, - aggregates_on=None): - onrows, oncolumns = coalesce_table_labels(result.attributes, - onrows, - oncolumns) - table = make_cross_table(result, - onrows=onrows, - oncolumns=oncolumns, - aggregates_on=aggregates_on) - - d = { - "columns": table.columns, - "rows": table.rows, - "data": table.data - } + def format( + self, + cube, + result, + onrows=None, + oncolumns=None, + aggregates=None, + aggregates_on=None, + ): + onrows, oncolumns = coalesce_table_labels(result.attributes, onrows, oncolumns) + table = make_cross_table( + result, onrows=onrows, oncolumns=oncolumns, aggregates_on=aggregates_on + ) + + d = {"columns": table.columns, "rows": table.rows, "data": table.data} output = self.encoder.encode(d) return output class HTMLCrossTableFormatter(CrossTableFormatter, name="html_cross_table"): - extension_settings = [ - Setting( - name= "table_style", - desc= "CSS style for the table" - ) - ] + extension_settings = [Setting(name="table_style", desc="CSS style for the table")] mime_type = "text/html" def __init__(self, table_style=None): - """Create a simple HTML table formatter. See `CrossTableFormatter` for - information about arguments.""" + """Create a simple HTML table formatter. + + See `CrossTableFormatter` for information about arguments. + """ self.env = _jinja_env() self.template = self.env.get_template("cross_table.html") self.table_style = table_style - def format(self, cube, result, onrows=None, oncolumns=None, aggregates=None, - aggregates_on=None): - onrows, oncolumns = coalesce_table_labels(result.attributes, - onrows, - oncolumns) - table = make_cross_table(result, - onrows=onrows, - oncolumns=oncolumns, - aggregates_on=aggregates_on) - - output = self.template.render(table=table, - table_style=self.table_style) + def format( + self, + cube, + result, + onrows=None, + oncolumns=None, + aggregates=None, + aggregates_on=None, + ): + onrows, oncolumns = coalesce_table_labels(result.attributes, onrows, oncolumns) + table = make_cross_table( + result, onrows=onrows, oncolumns=oncolumns, aggregates_on=aggregates_on + ) + + output = self.template.render(table=table, table_style=self.table_style) return output class CSVFormatter(Formatter, name="csv"): - def format(self, cube, result, onrows=None, oncolumns=None, aggregates=None, - aggregates_on=None): + def format( + self, + cube, + result, + onrows=None, + oncolumns=None, + aggregates=None, + aggregates_on=None, + ): if any([onrows, oncolumns]): raise ArgumentError("Column/row layout options are not supported") @@ -363,16 +369,17 @@ def format(self, cube, result, onrows=None, oncolumns=None, aggregates=None, for l in result.labels: # TODO: add a little bit of polish to this if l == SPLIT_DIMENSION_NAME: - header.append('Matches Filters') + header.append("Matches Filters") else: - header += [attr.label or attr.name - for attr in cube.get_attributes([l], aggregated=True)] + header += [ + attr.label or attr.name + for attr in cube.get_attributes([l], aggregated=True) + ] fields = result.labels - generator = csv_generator(result, - fields, - include_header=bool(header), - header=header) + generator = csv_generator( + result, fields, include_header=bool(header), header=header + ) rows = [row.decode("utf-8") for row in generator] output = "".join(rows) @@ -381,24 +388,32 @@ def format(self, cube, result, onrows=None, oncolumns=None, aggregates=None, class XLSXFormatter(Formatter, name="xlsx"): # TODO(serbernar): write formatter - def format(self, cube, result, onrows=None, oncolumns=None, aggregates=None, - aggregates_on=None): + def format( + self, + cube, + result, + onrows=None, + oncolumns=None, + aggregates=None, + aggregates_on=None, + ): if any([onrows, oncolumns]): raise ArgumentError("Column/row layout options are not supported") header = [] for l in result.labels: if l == SPLIT_DIMENSION_NAME: - header.append('Matches Filters') + header.append("Matches Filters") else: - header += [attr.label or attr.name - for attr in cube.get_attributes([l], aggregated=True)] + header += [ + attr.label or attr.name + for attr in cube.get_attributes([l], aggregated=True) + ] fields = result.labels - generator = csv_generator(result, - fields, - include_header=bool(header), - header=header) + generator = csv_generator( + result, fields, include_header=bool(header), header=header + ) rows = [str(row) for row in generator] output = "".join(rows) return output diff --git a/cubes/logging.py b/cubes/logging.py index ec116b03..b12192e0 100644 --- a/cubes/logging.py +++ b/cubes/logging.py @@ -1,20 +1,17 @@ # -*- coding: utf-8 -*- +from logging import FileHandler, Formatter, Logger, StreamHandler, getLogger from typing import Optional, Union -from logging import getLogger, Formatter, StreamHandler, FileHandler, Logger -__all__ = [ - "get_logger", - "create_logger", -] +__all__ = ["get_logger", "create_logger"] DEFAULT_LOGGER_NAME = "cubes" DEFAULT_FORMAT = "%(asctime)s %(levelname)s %(message)s" logger: Optional[Logger] = None # TODO: make name first -def get_logger(path:str=None, format_:str=None, name:str=None) -> Logger: - """Get brewery default logger""" +def get_logger(path: str = None, format_: str = None, name: str = None) -> Logger: + """Get brewery default logger.""" global logger if logger: @@ -22,8 +19,9 @@ def get_logger(path:str=None, format_:str=None, name:str=None) -> Logger: else: return create_logger(path, format_, name) -def create_logger(path:str=None, format_:str=None, name:str=None) -> Logger: - """Create a default logger""" + +def create_logger(path: str = None, format_: str = None, name: str = None) -> Logger: + """Create a default logger.""" global logger logger = getLogger(name or DEFAULT_LOGGER_NAME) logger.propagate = False @@ -44,4 +42,3 @@ def create_logger(path:str=None, format_:str=None, name:str=None) -> Logger: logger.addHandler(handler) return logger - diff --git a/cubes/mapper.py b/cubes/mapper.py index 95facb7a..b0b1070e 100644 --- a/cubes/mapper.py +++ b/cubes/mapper.py @@ -1,26 +1,24 @@ # -*- coding: utf-8 -*- -"""Logical to Physical Mappers""" +"""Logical to Physical Mappers.""" # TODO: This should be moved under query sub-module -from typing import Optional, Any, Collection, Tuple, Dict -from logging import Logger import collections +from logging import Logger +from typing import Any, Collection, Dict, Optional, Tuple from .logging import get_logger -from .types import JSONType -from .metadata.cube import Cube from .metadata.attributes import AttributeBase +from .metadata.cube import Cube +from .types import JSONType +__all__ = ("Mapper",) -__all__ = ( - "Mapper", -) class Mapper: """Mapper is core class for translating logical model to physical database - schema. - """ + schema.""" + # WARNING: do not put any SQL/engine/connection related stuff into this # class yet. It might be moved to the cubes as one of top-level modules # and subclassed here. @@ -31,19 +29,15 @@ class Mapper: locale: Optional[str] attributes: Dict[str, AttributeBase] - def __init__(self, - cube: Cube, - locale: str=None, - **naming: Any) -> None: - """Abstract class for mappers which maps logical references to - physical references (tables and columns). + def __init__(self, cube: Cube, locale: str = None, **naming: Any) -> None: + """Abstract class for mappers which maps logical references to physical + references (tables and columns). Attributes: * `cube` - mapped cube * `fact_name` – fact name, if not specified then `cube.name` is used * `schema` – default database schema - """ self.logger = get_logger() @@ -60,6 +54,7 @@ def __init__(self, def _collect_attributes(self) -> None: """Collect all cube attributes and create a dictionary where keys are logical references and values are `cubes.model.Attribute` objects. + This method should be used after each cube or mappings change. """ @@ -70,11 +65,11 @@ def _collect_attributes(self) -> None: # FIXME: This is mutating (see #416) def set_locale(self, locale: str) -> None: - """Change the mapper's locale""" + """Change the mapper's locale.""" self.locale = locale self._collect_attributes() - def logical(self, attribute: AttributeBase, locale:str=None) -> str: + def logical(self, attribute: AttributeBase, locale: str = None) -> str: """Returns logical reference as string for `attribute` in `dimension`. If `dimension` is ``Null`` then fact table is assumed. The logical reference might have following forms: @@ -91,9 +86,12 @@ def logical(self, attribute: AttributeBase, locale:str=None) -> str: return reference - def split_logical(self, reference: str) -> Tuple[Optional[str],str]: - """Returns tuple (`dimension`, `attribute`) from `logical_reference` string. Syntax - of the string is: ``dimensions.attribute``.""" + def split_logical(self, reference: str) -> Tuple[Optional[str], str]: + """Returns tuple (`dimension`, `attribute`) from `logical_reference` + string. + + Syntax of the string is: ``dimensions.attribute``. + """ split = reference.split(".") @@ -104,7 +102,7 @@ def split_logical(self, reference: str) -> Tuple[Optional[str],str]: else: return (None, reference) - def physical(self, attribute: AttributeBase, locale:str=None) -> str: + def physical(self, attribute: AttributeBase, locale: str = None) -> str: """Returns physical reference for attribute. Returned value is backend specific. Default implementation returns a value from the mapping dictionary. @@ -113,4 +111,3 @@ def physical(self, attribute: AttributeBase, locale:str=None) -> str: """ return self.mappings.get(attribute.localized_ref(locale)) - diff --git a/cubes/metadata/__init__.py b/cubes/metadata/__init__.py index a3719c65..2aebe2f9 100644 --- a/cubes/metadata/__init__.py +++ b/cubes/metadata/__init__.py @@ -1,9 +1,8 @@ # -*- encoding: utf-8 -*- -from .base import * from .attributes import * -from .dimension import * +from .base import * from .cube import * -from .providers import * +from .dimension import * from .localization import * - +from .providers import * diff --git a/cubes/metadata/attributes.py b/cubes/metadata/attributes.py index 0369ce24..3d9a968c 100644 --- a/cubes/metadata/attributes.py +++ b/cubes/metadata/attributes.py @@ -1,33 +1,20 @@ # -*- encoding: utf-8 -*- import copy +from typing import Any, Collection, Dict, List, Optional, Set, Type, TypeVar, cast -from typing import ( - Any, - Collection, - Dict, - Iterable, - List, - Optional, - Set, - Type, - TypeVar, - cast, - ) from expressions import inspect_variables # type: ignore +from ..common import JSONType, get_localizable_attributes +from ..errors import ArgumentError, ExpressionError, ModelError from .base import ModelObject -from ..errors import ModelError, ArgumentError, ExpressionError -from ..common import get_localizable_attributes, JSONType __all__ = [ "AttributeBase", "Attribute", "Measure", "MeasureAggregate", - "create_list_of", - "collect_attributes", "depsort_attributes", "collect_dependencies", @@ -39,8 +26,11 @@ def expand_attribute_metadata(metadata: JSONType) -> JSONType: - """Fixes metadata of an attribute. If `metadata` is a string it will be - converted into a dictionary with key `"name"` set to the string value.""" + """Fixes metadata of an attribute. + + If `metadata` is a string it will be converted into a dictionary + with key `"name"` set to the string value. + """ if isinstance(metadata, str): metadata = {"name": metadata} @@ -48,8 +38,7 @@ def expand_attribute_metadata(metadata: JSONType) -> JSONType: class AttributeBase(ModelObject): - """Base class for dimension attributes, measures and measure - aggregates. + """Base class for dimension attributes, measures and measure aggregates. Attributes: @@ -76,8 +65,8 @@ class AttributeBase(ModelObject): specified. """ - ASC = 'asc' - DESC = 'desc' + ASC = "asc" + DESC = "desc" localizable_attributes = ["label", "description", "format"] @@ -97,8 +86,7 @@ class AttributeBase(ModelObject): @classmethod def from_metadata(cls, metadata: JSONType) -> "AttributeBase": """Create an attribute from `metadata` which can be a dictionary or a - string representing the attribute name. - """ + string representing the attribute name.""" attribute: AttributeBase @@ -108,23 +96,26 @@ def from_metadata(cls, metadata: JSONType) -> "AttributeBase": attribute = copy.copy(metadata) elif isinstance(metadata, dict): if "name" not in metadata: - raise ModelError("Model objects metadata require at least " - "name to be present.") + raise ModelError( + "Model objects metadata require at least name to be present." + ) attribute = cls(**metadata) return attribute - def __init__(self, - name: str, - label: Optional[str]=None, - description: Optional[str]=None, - order: Optional[str]=None, - info: Optional[JSONType]=None, - format: Optional[str]=None, - missing_value: Optional[str]=None, - expression: Optional[str]=None, - **kwargs: Any) -> None: - super(AttributeBase, self).__init__(name, label, description, info) + def __init__( + self, + name: str, + label: Optional[str] = None, + description: Optional[str] = None, + order: Optional[str] = None, + info: Optional[JSONType] = None, + format: Optional[str] = None, + missing_value: Optional[str] = None, + expression: Optional[str] = None, + **kwargs: Any, + ) -> None: + super().__init__(name, label, description, info) self.format = format self.missing_value = missing_value @@ -142,8 +133,9 @@ def __init__(self, elif self.order.startswith("desc"): self.order = Attribute.DESC else: - raise ArgumentError("Unknown ordering '%s' for attributes" - " '%s'" % (order, self.ref)) + raise ArgumentError( + f"Unknown ordering '{order}' for attributes '{self.ref}'" + ) else: self.order = None @@ -158,13 +150,15 @@ def __eq__(self, other: Any) -> bool: return False # TODO: should we be this strict? - return self.name == other.name \ - and self.label == other.label \ - and self.info == other.info \ - and self.description == other.description \ - and self.format == other.format \ - and self.expression == other.expression \ + return ( + self.name == other.name + and self.label == other.label + and self.info == other.info + and self.description == other.description + and self.format == other.format + and self.expression == other.expression and self.missing_value == other.missing_value + ) def __ne__(self, other: Any) -> bool: return not self.__eq__(other) @@ -173,7 +167,7 @@ def __hash__(self) -> int: return hash(self.ref) def to_dict(self, **options: Any) -> JSONType: - d = super(AttributeBase, self).to_dict(**options) + d = super().to_dict(**options) d["format"] = self.format d["order"] = self.order @@ -184,7 +178,7 @@ def to_dict(self, **options: Any) -> JSONType: return d - def localizable_dictionary(self) -> Dict[str,str]: + def localizable_dictionary(self) -> Dict[str, str]: locale = {} locale.update(get_localizable_attributes(self)) @@ -195,7 +189,7 @@ def is_localizable(self) -> bool: def localize(self, trans: JSONType) -> None: """Localize the attribute, allow localization of the format.""" - super(AttributeBase, self).localized(trans) + super().localized(trans) self.format = trans.get("format", self.format) @property @@ -203,17 +197,18 @@ def is_base(self) -> bool: return not self.expression def localized_ref(self, locale: Optional[str]) -> str: - """Returns localized attribute reference for locale `locale`. - """ + """Returns localized attribute reference for locale `locale`.""" if locale is not None: if not self.locales: - raise ArgumentError("Attribute '{}' is not loalizable " - "(localization {} requested)" - .format(self.name, locale)) + raise ArgumentError( + "Attribute '{}' is not loalizable " + "(localization {} requested)".format(self.name, locale) + ) elif locale not in self.locales: - raise ArgumentError("Attribute '{}' has no localization {} " - "(has: {})" - .format(self.name, locale, self.locales)) + raise ArgumentError( + "Attribute '{}' has no localization {} " + "(has: {})".format(self.name, locale, self.locales) + ) else: locale_suffix = "." + locale else: @@ -223,13 +218,14 @@ def localized_ref(self, locale: Optional[str]) -> str: @property def dependencies(self) -> Set[str]: - """Set of attributes that the `attribute` depends on. If the - `attribute` is an expresion, then returns the direct dependencies from - the expression. If the attribute is an aggregate with an unary - function operating on a measure, then the measure is considered as a - dependency. Attribute can't have both expression and measure - specified, since you can have only expression or an function, not - both. + """Set of attributes that the `attribute` depends on. + + If the `attribute` is an expresion, then returns the direct + dependencies from the expression. If the attribute is an + aggregate with an unary function operating on a measure, then + the measure is considered as a dependency. Attribute can't have + both expression and measure specified, since you can have only + expression or an function, not both. """ if not self.expression: return set() @@ -242,18 +238,20 @@ class Attribute(AttributeBase): # FIXME: Reconsider necessity of this attribute _dimension: Any - def __init__(self, - name: str, - label: str=None, - description: Optional[str]=None, - order: Optional[str]=None, - info: Optional[JSONType]=None, - format: Optional[str]=None, - dimension: Optional[Any]=None, - locales: Optional[List[str]]=None, - missing_value: Optional[str]=None, - expression: Optional[str]=None, - **kwargs: Any) -> None: + def __init__( + self, + name: str, + label: str = None, + description: Optional[str] = None, + order: Optional[str] = None, + info: Optional[JSONType] = None, + format: Optional[str] = None, + dimension: Optional[Any] = None, + locales: Optional[List[str]] = None, + missing_value: Optional[str] = None, + expression: Optional[str] = None, + **kwargs: Any, + ) -> None: """Dimension attribute object. Also used as fact detail. Attributes: @@ -280,14 +278,16 @@ def __init__(self, dimension has to be assigned after copying. """ - super(Attribute, self).__init__(name=name, - label=label, - description=description, - order=order, - info=info, - format=format, - missing_value=missing_value, - expression=expression) + super().__init__( + name=name, + label=label, + description=description, + order=order, + info=info, + format=format, + missing_value=missing_value, + expression=expression, + ) self._dimension = None self.dimension = dimension @@ -304,37 +304,39 @@ def dimension(self, dimension: Any) -> None: if dimension.is_flat and not dimension.has_details: self.ref = dimension.name else: - self.ref = dimension.name + '.' + str(self.name) + self.ref = dimension.name + "." + str(self.name) else: self.ref = str(self.name) self._dimension = dimension def __deepcopy__(self, memo: Any) -> "Attribute": # Note: copied attribute is disowned - return Attribute(self.name, - self.label, - dimension=None, - locales=copy.deepcopy(self.locales, memo), - order=copy.deepcopy(self.order, memo), - description=self.description, - info=copy.deepcopy(self.info, memo), - format=self.format, - missing_value=self.missing_value, - expression=self.expression) + return Attribute( + self.name, + self.label, + dimension=None, + locales=copy.deepcopy(self.locales, memo), + order=copy.deepcopy(self.order, memo), + description=self.description, + info=copy.deepcopy(self.info, memo), + format=self.format, + missing_value=self.missing_value, + expression=self.expression, + ) def __eq__(self, other: Any) -> bool: - if not super(Attribute, self).__eq__(other): + if not super().__eq__(other): return False # TODO: we are not comparing dimension (owner) here return self.locales == other.locales - def __hash__(self) -> int: + def __hash__(self) -> int: return hash(self.ref) def to_dict(self, **options: Any) -> JSONType: # FIXME: Depreciated key "full_name" in favour of "ref" - d = super(Attribute, self).to_dict(**options) + d = super().to_dict(**options) d["locales"] = self.locales @@ -353,22 +355,25 @@ class Measure(AttributeBase): window_size: Optional[int] nonadditive: Optional[str] - def __init__(self, name: str, - label: str=None, - description: Optional[str]=None, - order: Optional[str]=None, - info: Optional[JSONType]=None, - format: Optional[str]=None, - dimension: Optional[Any]=None, - locales: Optional[List[str]]=None, - missing_value: Optional[str]=None, - expression: Optional[str]=None, - # FIXME: Remove this argument - aggregates: Optional[List[Any]]=None, - formula: Optional[str]=None, - nonadditive: Optional[str]=None, - window_size: Optional[int]=None, - **kwargs: Any) -> None: + def __init__( + self, + name: str, + label: str = None, + description: Optional[str] = None, + order: Optional[str] = None, + info: Optional[JSONType] = None, + format: Optional[str] = None, + dimension: Optional[Any] = None, + locales: Optional[List[str]] = None, + missing_value: Optional[str] = None, + expression: Optional[str] = None, + # FIXME: Remove this argument + aggregates: Optional[List[Any]] = None, + formula: Optional[str] = None, + nonadditive: Optional[str] = None, + window_size: Optional[int] = None, + **kwargs: Any, + ) -> None: """Create a measure attribute. Properties in addition to the attribute base properties: @@ -388,11 +393,16 @@ def __init__(self, name: str, String representation of a `Measure` returns its full reference. """ - super(Measure, self).__init__(name=name, label=label, - description=description, order=order, - info=info, format=format, - missing_value=None, - expression=expression) + super().__init__( + name=name, + label=label, + description=description, + order=order, + info=info, + format=format, + missing_value=None, + expression=expression, + ) self.formula = formula self.aggregates = aggregates @@ -406,35 +416,39 @@ def __init__(self, name: str, elif nonadditive == "time": self.nonadditive = "time" else: - raise ModelError("Unknown non-additive measure type '%s'" - % nonadditive) + raise ModelError("Unknown non-additive measure type '%s'" % nonadditive) def __deepcopy__(self, memo: Any) -> "Measure": - return Measure(self.name, self.label, - order=copy.deepcopy(self.order, memo), - description=self.description, - info=copy.deepcopy(self.info, memo), - format=self.format, - missing_value=self.missing_value, - aggregates=self.aggregates, - expression=self.expression, - formula=self.formula, - nonadditive=self.nonadditive, - window_size=self.window_size) + return Measure( + self.name, + self.label, + order=copy.deepcopy(self.order, memo), + description=self.description, + info=copy.deepcopy(self.info, memo), + format=self.format, + missing_value=self.missing_value, + aggregates=self.aggregates, + expression=self.expression, + formula=self.formula, + nonadditive=self.nonadditive, + window_size=self.window_size, + ) def __eq__(self, other: Any) -> bool: - if not super(Measure, self).__eq__(other): + if not super().__eq__(other): return False - return self.aggregates == other.aggregates \ - and self.formula == other.formula \ - and self.window_size == other.window_size + return ( + self.aggregates == other.aggregates + and self.formula == other.formula + and self.window_size == other.window_size + ) def __hash__(self) -> int: return hash(self.ref) def to_dict(self, **options: Any) -> JSONType: - d = super(Measure, self).to_dict(**options) + d = super().to_dict(**options) d["formula"] = self.formula d["aggregates"] = self.aggregates d["window_size"] = self.window_size @@ -443,32 +457,36 @@ def to_dict(self, **options: Any) -> JSONType: def default_aggregates(self) -> List["MeasureAggregate"]: """Creates default measure aggregates from a list of receiver's - measures. This is just a convenience function, correct models should - contain explicit list of aggregates. If no aggregates are specified, - then the only aggregate `sum` is assumed. + measures. + + This is just a convenience function, correct models should + contain explicit list of aggregates. If no aggregates are + specified, then the only aggregate `sum` is assumed. """ aggregates = [] for agg in self.aggregates or ["sum"]: if agg == "identity": - name = u"%s" % self.name + name = "%s" % self.name measure = None function = None else: - name = u"%s_%s" % (self.name, agg) + name = f"{self.name}_{agg}" measure = self.name function = agg - aggregate = MeasureAggregate(name=name, - label=None, - description=self.description, - order=self.order, - info=self.info, - format=self.format, - measure=measure, - function=function, - window_size=self.window_size) + aggregate = MeasureAggregate( + name=name, + label=None, + description=self.description, + order=self.order, + info=self.info, + format=self.format, + measure=measure, + function=function, + window_size=self.window_size, + ) aggregates.append(aggregate) @@ -483,22 +501,25 @@ class MeasureAggregate(AttributeBase): nonadditive: Optional[str] window_size: Optional[int] - def __init__(self, name: str, - label: str=None, - description: Optional[str]=None, - order: Optional[str]=None, - info: Optional[JSONType]=None, - format: Optional[str]=None, - locales: Optional[List[str]]=None, - missing_value: Optional[str]=None, - expression: Optional[str]=None, - measure: Optional[str]=None, - function: Optional[str]=None, - formula: Optional[str]=None, - nonadditive: Optional[str]=None, - window_size: Optional[int]=None, - **kwargs:Any) -> None: - """Masure aggregate + def __init__( + self, + name: str, + label: str = None, + description: Optional[str] = None, + order: Optional[str] = None, + info: Optional[JSONType] = None, + format: Optional[str] = None, + locales: Optional[List[str]] = None, + missing_value: Optional[str] = None, + expression: Optional[str] = None, + measure: Optional[str] = None, + function: Optional[str] = None, + formula: Optional[str] = None, + nonadditive: Optional[str] = None, + window_size: Optional[int] = None, + **kwargs: Any, + ) -> None: + """Masure aggregate. Attributes: @@ -511,12 +532,16 @@ def __init__(self, name: str, the measure in most of the times) """ - super(MeasureAggregate, self).__init__(name=name, label=label, - description=description, - order=order, info=info, - format=format, - missing_value=missing_value, - expression=expression) + super().__init__( + name=name, + label=label, + description=description, + order=order, + info=info, + format=format, + missing_value=missing_value, + expression=expression, + ) self.function = function self.formula = formula @@ -525,29 +550,33 @@ def __init__(self, name: str, self.window_size = window_size def __deepcopy__(self, memo: Any) -> "MeasureAggregate": - return MeasureAggregate(self.name, - self.label, - order=copy.deepcopy(self.order, memo), - description=self.description, - info=copy.deepcopy(self.info, memo), - format=self.format, - missing_value=self.missing_value, - measure=self.measure, - function=self.function, - formula=self.formula, - expression=self.expression, - nonadditive=self.nonadditive, - window_size=self.window_size) + return MeasureAggregate( + self.name, + self.label, + order=copy.deepcopy(self.order, memo), + description=self.description, + info=copy.deepcopy(self.info, memo), + format=self.format, + missing_value=self.missing_value, + measure=self.measure, + function=self.function, + formula=self.formula, + expression=self.expression, + nonadditive=self.nonadditive, + window_size=self.window_size, + ) def __eq__(self, other: Any) -> bool: - if not super(MeasureAggregate, self).__eq__(other): + if not super().__eq__(other): return False - return str(self.function) == str(other.function) \ - and self.measure == other.measure \ - and self.formula == other.formula \ - and self.nonadditive == other.nonadditive \ + return ( + str(self.function) == str(other.function) + and self.measure == other.measure + and self.formula == other.formula + and self.nonadditive == other.nonadditive and self.window_size == other.window_size + ) def __hash__(self) -> int: return hash(self.ref) @@ -557,7 +586,7 @@ def is_base(self) -> bool: return not self.expression and not self.function def to_dict(self, **options: Any) -> JSONType: - d = super(MeasureAggregate, self).to_dict(**options) + d = super().to_dict(**options) d["function"] = self.function d["formula"] = self.formula d["measure"] = self.measure @@ -568,19 +597,22 @@ def to_dict(self, **options: Any) -> JSONType: @property def dependencies(self) -> Set[str]: - """Set of attributes that the `attribute` depends on. If the - `attribute` is an expresion, then returns the direct dependencies from - the expression. If the attribute is an aggregate with an unary - function operating on a measure, then the measure is considered as a - dependency. Attribute can't have both expression and measure - specified, since you can have only expression or an function, not - both. + """Set of attributes that the `attribute` depends on. + + If the `attribute` is an expresion, then returns the direct + dependencies from the expression. If the attribute is an + aggregate with an unary function operating on a measure, then + the measure is considered as a dependency. Attribute can't have + both expression and measure specified, since you can have only + expression or an function, not both. """ if self.measure: if self.expression: - raise ModelError("Aggregate '{}' has both measure and " - "expression set".format(self.ref)) - return set([self.measure]) + raise ModelError( + "Aggregate '{}' has both measure and " + "expression set".format(self.ref) + ) + return {self.measure} if not self.expression: return set() @@ -595,12 +627,14 @@ def create_list_of(class_: Type[T], objects: Collection[JSONType]) -> List[T]: # FIXME: [typing] Reconsider this from type perspective -def collect_attributes(attributes: Collection[T], - *containers: Any) -> List[T]: - """Collect attributes from arguments. `containers` are objects with - method `all_attributes` or might be `Nulls`. Returns a list of attributes. - Note that the function does not check whether the attribute is an actual - attribute object or a string.""" +def collect_attributes(attributes: Collection[T], *containers: Any) -> List[T]: + """Collect attributes from arguments. + + `containers` are objects with method `all_attributes` or might be + `Nulls`. Returns a list of attributes. Note that the function does + not check whether the attribute is an actual attribute object or a + string. + """ # Method for decreasing noise/boilerplate collected: List[T] = [] @@ -615,11 +649,13 @@ def collect_attributes(attributes: Collection[T], return collected -def collect_dependencies(attributes: Collection[T], - all_attributes: Collection[T]) -> List[str]: - """Collect all original and dependant cube attributes for - `attributes`, sorted by their dependency: starting with attributes - that don't depend on anything. For exapmle, if the `attributes` is [a, +def collect_dependencies( + attributes: Collection[T], all_attributes: Collection[T] +) -> List[str]: + """Collect all original and dependant cube attributes for `attributes`, + sorted by their dependency: starting with attributes that don't depend on + anything. For exapmle, if the `attributes` is [a, + b] and a = c * 2, then the result list would be [b, c, a] or [c, b, a]. @@ -631,25 +667,28 @@ def collect_dependencies(attributes: Collection[T], Returns a list of sorted attribute references. """ - dependencies = {attr.ref:attr.dependencies for attr in all_attributes} + dependencies = {attr.ref: attr.dependencies for attr in all_attributes} # depsorted contains attribute names in order of dependencies starting # with base attributes (those that don't depend on anything, directly # represented by columns) and ending with derived attributes - depsorted = depsort_attributes([attr.ref for attr in attributes], - cast(Dict[str, List[str]], dependencies)) + depsorted = depsort_attributes( + [attr.ref for attr in attributes], cast(Dict[str, List[str]], dependencies) + ) return depsorted -def depsort_attributes(attributes: List[str], - all_dependencies: Dict[str, List[str]] - ) -> List[str]: + +def depsort_attributes( + attributes: List[str], all_dependencies: Dict[str, List[str]] +) -> List[str]: """Returns a sorted list of attributes by their dependencies. `attributes` is a list of attribute names, `all_dependencies` is a dictionary where keys are attribute names and values are direct attribute dependencies (that is attributes in attribute's expression, for example). `all_dependencies` should contain all known attributes, variables and constants. - Raises an exception when a circular dependecy is detected.""" + Raises an exception when a circular dependecy is detected. + """ bases: Set[str] = set() @@ -666,7 +705,7 @@ def depsort_attributes(attributes: List[str], try: attr_deps = all_dependencies[attr] except KeyError as e: - raise ExpressionError("Unknown attribute '{}'".format(e)) + raise ExpressionError(f"Unknown attribute '{e}'") if not attr_deps: bases.add(attr) @@ -674,8 +713,9 @@ def depsort_attributes(attributes: List[str], required |= set(attr_deps) - seen # Remaining dependencies to be processed (not base attributes) - remaining: Dict[str,List[str]] = {attr:all_dependencies[attr] for attr in seen - if attr not in bases} + remaining: Dict[str, List[str]] = { + attr: all_dependencies[attr] for attr in seen if attr not in bases + } sorted_deps = [] @@ -683,8 +723,7 @@ def depsort_attributes(attributes: List[str], base = bases.pop() sorted_deps.append(base) - dependants = [attr for attr, deps in remaining.items() - if base in deps] + dependants = [attr for attr, deps in remaining.items() if base in deps] for attr in dependants: # Remove the current dependency @@ -697,8 +736,8 @@ def depsort_attributes(attributes: List[str], if remaining: remaining_str = ", ".join(sorted(remaining)) - raise ExpressionError("Circular attribute reference (remaining: {})" - .format(remaining_str)) + raise ExpressionError( + f"Circular attribute reference (remaining: {remaining_str})" + ) return sorted_deps - diff --git a/cubes/metadata/base.py b/cubes/metadata/base.py index 7923f281..4a5a4e39 100644 --- a/cubes/metadata/base.py +++ b/cubes/metadata/base.py @@ -1,29 +1,17 @@ - # -*- encoding: utf-8 -*- -"""Cube logical model""" +"""Cube logical model.""" import json import os import re import shutil - -from typing import ( - Any, - Collection, - Dict, - IO, - List, - Optional, - TypeVar, - cast, - ) - +from collections import OrderedDict +from typing import IO, Any, Collection, Dict, List, Optional, TypeVar, cast from urllib.parse import urlparse from urllib.request import urlopen -from collections import OrderedDict -from ..common import IgnoringDictionary, to_label, JSONType -from ..errors import ModelError, ArgumentError, CubesError +from ..common import IgnoringDictionary, JSONType, to_label +from ..errors import ArgumentError, CubesError, ModelError __all__ = ( "ModelObject", @@ -34,7 +22,7 @@ ) -class ModelObject(object): +class ModelObject: """Base classs for all model objects.""" localizable_attributes: List[str] = [] @@ -47,13 +35,17 @@ class ModelObject(object): ref: str - def __init__(self, - name: str, - label: Optional[str]=None, - description: Optional[str]=None, - info: Optional[JSONType]=None) -> None: - """Initializes model object basics. Assures that the `info` is a - dictionary.""" + def __init__( + self, + name: str, + label: Optional[str] = None, + description: Optional[str] = None, + info: Optional[JSONType] = None, + ) -> None: + """Initializes model object basics. + + Assures that the `info` is a dictionary. + """ self.name = name self.label = label @@ -62,10 +54,12 @@ def __init__(self, # FIXME: Consolidate the options def to_dict(self, **options: Any) -> JSONType: - """Convert to a dictionary. If `with_mappings` is ``True`` (which is - default) then `joins`, `mappings`, `fact` and `options` are included. - Should be set to ``False`` when returning a dictionary that will be - provided in an user interface or through server API. + """Convert to a dictionary. + + If `with_mappings` is ``True`` (which is default) then `joins`, + `mappings`, `fact` and `options` are included. Should be set to + ``False`` when returning a dictionary that will be provided in + an user interface or through server API. """ out = IgnoringDictionary() @@ -86,7 +80,7 @@ def localized(self, context: Any) -> "ModelObject": """Returns a copy of the cube translated with `translation`""" acopy: Any - acopy = self.__class__.__new__(self.__class__) # type: ignore + acopy = self.__class__.__new__(self.__class__) acopy.__dict__ = self.__dict__.copy() d = acopy.__dict__ @@ -106,27 +100,32 @@ def localized(self, context: Any) -> "ModelObject": return acopy - _T = TypeVar("_T", bound=ModelObject) + # TODO: [typing] Make `objects` collection of Protocol of `Named` objects # See PEP 544. # -def object_dict(objects:Collection[_T], - by_ref:bool=False, - error_message:Optional[str]=None, - error_dict:Dict[str,_T]=None) -> Dict[str,_T]: +def object_dict( + objects: Collection[_T], + by_ref: bool = False, + error_message: Optional[str] = None, + error_dict: Dict[str, _T] = None, +) -> Dict[str, _T]: """Make an ordered dictionary from model objects `objects` where keys are - object names. If `for_ref` is `True` then object's `ref` (reference) is - used instead of object name. Keys are supposed to be unique in the list, - otherwise an exception is raised.""" + object names. + + If `for_ref` is `True` then object's `ref` (reference) is used + instead of object name. Keys are supposed to be unique in the list, + otherwise an exception is raised. + """ if by_ref: items = ((obj.ref, obj) for obj in objects) else: items = ((obj.name, obj) for obj in objects) - ordered: Dict[str,Any] = OrderedDict() + ordered: Dict[str, Any] = OrderedDict() for key, value in items: if key in ordered: @@ -146,14 +145,18 @@ def object_dict(objects:Collection[_T], # strip_mappings(cube) -> remove mappings from cube # strip_mappings + def _json_from_url(url: str) -> JSONType: """Opens `resource` either as a file with `open()`or as URL with - `urlopen()`. Returns opened handle. """ + `urlopen()`. + + Returns opened handle. + """ parts = urlparse(url) handle: IO[Any] - if parts.scheme in ('', 'file'): + if parts.scheme in ("", "file"): handle = open(parts.path, encoding="utf-8") elif len(parts.scheme) == 1: # TODO: This is temporary hack for MS Windows which can be replaced by @@ -165,7 +168,7 @@ def _json_from_url(url: str) -> JSONType: try: desc = json.load(handle) except ValueError as e: - raise SyntaxError("Syntax error in %s: %s" % (url, str(e))) + raise SyntaxError("Syntax error in {}: {}".format(url, str(e))) finally: handle.close() @@ -174,12 +177,14 @@ def _json_from_url(url: str) -> JSONType: def read_model_metadata(source: str) -> JSONType: """Reads a model description from `source` which can be a filename, URL, - file-like object or a path to a directory. Returns a model description - dictionary.""" + file-like object or a path to a directory. + + Returns a model description dictionary. + """ if isinstance(source, str): parts = urlparse(source) - if parts.scheme in ('', 'file') and os.path.isdir(parts.path): + if parts.scheme in ("", "file") and os.path.isdir(parts.path): source = parts.path return read_model_metadata_bundle(source) elif len(parts.scheme) == 1 and os.path.isdir(source): @@ -207,10 +212,10 @@ def read_model_metadata_bundle(path: str) -> JSONType: if not os.path.isdir(path): raise ArgumentError("Path '%s' is not a directory.") - info_path = os.path.join(path, 'model.json') + info_path = os.path.join(path, "model.json") if not os.path.exists(info_path): - raise ModelError('main model info %s does not exist' % info_path) + raise ModelError("main model info %s does not exist" % info_path) model = _json_from_url(info_path) @@ -224,58 +229,61 @@ def read_model_metadata_bundle(path: str) -> JSONType: for dirname, dirnames, filenames in os.walk(path): for filename in filenames: - if os.path.splitext(filename)[1] != '.json': + if os.path.splitext(filename)[1] != ".json": continue - split = re.split('_', filename) + split = re.split("_", filename) prefix = split[0] obj_path = os.path.join(dirname, filename) - if prefix in ('dim', 'dimension'): + if prefix in ("dim", "dimension"): desc = _json_from_url(obj_path) try: name = desc["name"] except KeyError: - raise ModelError("Dimension file '%s' has no name key" % - obj_path) + raise ModelError("Dimension file '%s' has no name key" % obj_path) if name in model["dimensions"]: - raise ModelError("Dimension '%s' defined multiple times " % - "(in '%s')" % (name, obj_path) ) + raise ModelError( + "Dimension '%s' defined multiple times " + % "(in '%s')" + % (name, obj_path) + ) model["dimensions"].append(desc) - elif prefix == 'cube': + elif prefix == "cube": desc = _json_from_url(obj_path) try: name = desc["name"] except KeyError: - raise ModelError("Cube file '%s' has no name key" % - obj_path) + raise ModelError("Cube file '%s' has no name key" % obj_path) if name in model["cubes"]: - raise ModelError("Cube '%s' defined multiple times " - "(in '%s')" % (name, obj_path) ) + raise ModelError( + "Cube '%s' defined multiple times " + "(in '%s')" % (name, obj_path) + ) model["cubes"].append(desc) return model -def write_model_metadata_bundle(path: str, - metadata: JSONType, - replace:bool =False) -> None: +def write_model_metadata_bundle( + path: str, metadata: JSONType, replace: bool = False +) -> None: """Writes a model metadata bundle into new directory `target` from - `metadata`. Directory should not exist.""" + `metadata`. + + Directory should not exist. + """ if os.path.exists(path): if not os.path.isdir(path): - raise CubesError("Target exists and is a file, " - "can not replace") + raise CubesError("Target exists and is a file, can not replace") elif not os.path.exists(os.path.join(path, "model.json")): - raise CubesError("Target is not a model directory, " - "can not replace.") + raise CubesError("Target is not a model directory, can not replace.") if replace: shutil.rmtree(path) else: - raise CubesError("Target already exists. " - "Remove it or force replacement.") + raise CubesError("Target already exists. Remove it or force replacement.") os.makedirs(path) @@ -299,4 +307,3 @@ def write_model_metadata_bundle(path: str, filename = os.path.join(path, "model.json") with open(filename, "w") as f: json.dump(metadata, f, indent=4) - diff --git a/cubes/metadata/cube.py b/cubes/metadata/cube.py index a6f85e25..22daf098 100644 --- a/cubes/metadata/cube.py +++ b/cubes/metadata/cube.py @@ -1,44 +1,49 @@ # -*- encoding: utf-8 -*- -"""Cube logical model""" +"""Cube logical model.""" from collections import OrderedDict, defaultdict +from typing import Any, Collection, Dict, List, Optional, Set, Tuple, Union -from typing import Collection, Optional, List, Dict, Any, Union, Set, Tuple - -from ..types import JSONType, OptionsType from ..common import assert_all_instances, get_localizable_attributes -from ..errors import ModelError, ArgumentError, NoSuchAttributeError, \ - NoSuchDimensionError +from ..errors import ( + ArgumentError, + ModelError, + NoSuchAttributeError, + NoSuchDimensionError, +) +from ..types import JSONType, OptionsType +from .attributes import ( + Attribute, + AttributeBase, + Measure, + MeasureAggregate, + collect_dependencies, + create_list_of, + expand_attribute_metadata, +) from .base import ModelObject, object_dict - -from .attributes import Attribute, Measure, MeasureAggregate, create_list_of, \ - collect_dependencies, expand_attribute_metadata, \ - AttributeBase - from .dimension import Dimension # TODO: This should belong here # from ..query.statutils import aggregate_calculator_labels -__all__ = [ - "Cube", -] +__all__ = ["Cube"] DEFAULT_FACT_COUNT_AGGREGATE = { "name": "fact_count", "label": "Count", - "function": "count" + "function": "count", } # TODO: make this configurable IMPLICIT_AGGREGATE_LABELS = { - "sum": u"Sum of {measure}", - "count": u"Record Count", - "count_nonempty": u"Non-empty count of {measure}", - "min": u"{measure} Minimum", - "max": u"{measure} Maximum", - "avg": u"Average of {measure}", + "sum": "Sum of {measure}", + "count": "Record Count", + "count_nonempty": "Non-empty count of {measure}", + "min": "{measure} Minimum", + "max": "{measure} Maximum", + "avg": "Average of {measure}", } @@ -101,7 +106,6 @@ class Cube(ModelObject): context (overrides the dimension's value) * `default_hierarchy_name` – which hierarchy will be used as default in the linked dimension - """ localizable_attributes = ["label", "description"] @@ -130,32 +134,35 @@ class Cube(ModelObject): mappings: Optional[JSONType] - def __init__(self, - name: str, - dimensions: Optional[Collection[Dimension]]=None, - measures: Optional[Collection[Measure]]=None, - aggregates: Optional[Collection[MeasureAggregate]]=None, - label: Optional[str]=None, - details: Optional[Collection[Attribute]]=None, - mappings: Optional[Collection[JSONType]]=None, - joins: Optional[JSONType]=None, - fact: Optional[str]=None, - key: Optional[str]=None, - description: Optional[str]=None, - browser_options: Optional[OptionsType]=None, - info: Optional[JSONType]=None, - dimension_links: Optional[JSONType]=None, - locale: Optional[str]=None, - category: Optional[str]=None, - store: Optional[str]=None, - **options: Any) -> None: - - super(Cube, self).__init__(name, label, description, info) + def __init__( + self, + name: str, + dimensions: Optional[Collection[Dimension]] = None, + measures: Optional[Collection[Measure]] = None, + aggregates: Optional[Collection[MeasureAggregate]] = None, + label: Optional[str] = None, + details: Optional[Collection[Attribute]] = None, + mappings: Optional[Collection[JSONType]] = None, + joins: Optional[JSONType] = None, + fact: Optional[str] = None, + key: Optional[str] = None, + description: Optional[str] = None, + browser_options: Optional[OptionsType] = None, + info: Optional[JSONType] = None, + dimension_links: Optional[JSONType] = None, + locale: Optional[str] = None, + category: Optional[str] = None, + store: Optional[str] = None, + **options: Any, + ) -> None: + + super().__init__(name, label, description, info) # FIXME: Only one should be passed to the cube - links if dimensions is not None and dimension_links is not None: - raise ModelError("Both dimensions and dimension_links provided, " - "use only one.") + raise ModelError( + "Both dimensions and dimension_links provided, use only one." + ) self.locale = locale @@ -194,8 +201,10 @@ def __init__(self, if dimensions is not None: if not all([isinstance(dim, Dimension) for dim in dimensions]): - raise ModelError("Dimensions for cube initialization should be " - "a list of Dimension instances.") + raise ModelError( + "Dimensions for cube initialization should be " + "a list of Dimension instances." + ) for dim in dimensions: self._add_dimension(dim) # @@ -206,20 +215,22 @@ def __init__(self, measures = measures or [] assert_all_instances(measures, Measure, "measure") - self._measures = object_dict(measures, - error_message="Duplicate measure {key} " - "in cube {cube}", - error_dict={"cube": self.name}) + self._measures = object_dict( + measures, + error_message="Duplicate measure {key} in cube {cube}", + error_dict={"cube": self.name}, + ) # Aggregates # aggregates = aggregates or [] assert_all_instances(aggregates, MeasureAggregate, "aggregate") - self._aggregates = object_dict(aggregates, - error_message="Duplicate aggregate " - "{key} in cube {cube}", - error_dict={"cube": self.name}) + self._aggregates = object_dict( + aggregates, + error_message="Duplicate aggregate {key} in cube {cube}", + error_dict={"cube": self.name}, + ) # We don't need to access details by name details = details or [] @@ -228,10 +239,12 @@ def __init__(self, @classmethod def from_metadata(cls, metadata: JSONType) -> "Cube": - """Create a cube object from `metadata` dictionary. The cube has no - dimensions attached after creation. You should link the dimensions to the - cube according to the `Cube.dimension_links` property using - `Cube._add_dimension()`""" + """Create a cube object from `metadata` dictionary. + + The cube has no dimensions attached after creation. You should + link the dimensions to the cube according to the + `Cube.dimension_links` property using `Cube._add_dimension()` + """ measures: List[Measure] details: List[Attribute] @@ -259,8 +272,8 @@ def from_metadata(cls, metadata: JSONType) -> "Cube": aggregates = metadata.pop("aggregates", []) aggregates = create_list_of(MeasureAggregate, aggregates) - aggregate_dict = dict((a.name, a) for a in aggregates) - measure_dict = dict((m.name, m) for m in measures) + aggregate_dict = {a.name: a for a in aggregates} + measure_dict = {m.name: m for m in measures} # TODO: Depreciate? if metadata.get("implicit_aggregates", False): @@ -273,16 +286,20 @@ def from_metadata(cls, metadata: JSONType) -> "Cube": existing = aggregate_dict.get(aggregate.name) if existing: if existing.function != aggregate.function: - raise ModelError("Aggregate '%s' function mismatch. " - "Implicit function %s, explicit function:" - " %s." % (aggregate.name, - aggregate.function, - existing.function)) + raise ModelError( + "Aggregate '%s' function mismatch. " + "Implicit function %s, explicit function:" + " %s." + % (aggregate.name, aggregate.function, existing.function) + ) continue # or the same function and measure - existing = [agg for agg in aggregates - if agg.function == aggregate.function - and agg.measure == measure.name] + existing = [ + agg + for agg in aggregates + if agg.function == aggregate.function + and agg.measure == measure.name + ] if existing: continue @@ -306,12 +323,13 @@ def from_metadata(cls, metadata: JSONType) -> "Cube": if measure and aggregate.nonadditive is None: aggregate.nonadditive = measure.nonadditive - return cls(measures=measures, - aggregates=aggregates, - dimension_links=dimension_links, - details=details, - **metadata) - + return cls( + measures=measures, + aggregates=aggregates, + dimension_links=dimension_links, + details=details, + **metadata, + ) @property def measures(self) -> List[Measure]: @@ -320,24 +338,25 @@ def measures(self) -> List[Measure]: # TODO: Either str or Measure, not an union def measure(self, name: Union[str, Measure]) -> Measure: """Get measure object. If `obj` is a string, then measure with given - name is returned, otherwise measure object is returned if it belongs - to the cube. Returned object is of `Measure` type. + name is returned, otherwise measure object is returned if it belongs to + the cube. Returned object is of `Measure` type. - Raises `NoSuchAttributeError` when there is no such measure or when - there are multiple measures with the same name (which also means that - the model is not valid). + Raises `NoSuchAttributeError` when there is no such measure or + when there are multiple measures with the same name (which also + means that the model is not valid). """ name = str(name) try: return self._measures[name] except KeyError: - raise NoSuchAttributeError("Cube '%s' has no measure '%s'" % - (self.name, name)) + raise NoSuchAttributeError(f"Cube '{self.name}' has no measure '{name}'") def get_measures(self, measures: List[str]) -> List[Measure]: - """Get a list of measures as `Attribute` objects. If `measures` is - `None` then all cube's measures are returned.""" + """Get a list of measures as `Attribute` objects. + + If `measures` is `None` then all cube's measures are returned. + """ array: List[Measure] = [] @@ -353,27 +372,29 @@ def aggregates(self) -> List[MeasureAggregate]: return list(self._aggregates.values()) # FIXME: String on name - def aggregate(self, name:Union[str,MeasureAggregate]) -> MeasureAggregate: + def aggregate(self, name: Union[str, MeasureAggregate]) -> MeasureAggregate: """Get aggregate object. If `obj` is a string, then aggregate with given name is returned, otherwise aggregate object is returned if it belongs to the cube. Returned object is of `MeasureAggregate` type. - Raises `NoSuchAttributeError` when there is no such aggregate or when - there are multiple aggregates with the same name (which also means - that the model is not valid). + Raises `NoSuchAttributeError` when there is no such aggregate or + when there are multiple aggregates with the same name (which + also means that the model is not valid). """ name = str(name) try: return self._aggregates[name] except KeyError: - raise NoSuchAttributeError("Cube '%s' has no measure aggregate " - "'%s'" % (self.name, name)) + raise NoSuchAttributeError( + f"Cube '{self.name}' has no measure aggregate '{name}'" + ) # TODO: We should probably don't return all on None # Recommended replacement: just use plain aggregate() and check for list # in the caller. - def get_aggregates(self, names: Optional[List[str]]=None) \ - -> List[MeasureAggregate]: + def get_aggregates( + self, names: Optional[List[str]] = None + ) -> List[MeasureAggregate]: """Get a list of aggregates with `names`.""" if not names: return self.aggregates @@ -382,17 +403,19 @@ def get_aggregates(self, names: Optional[List[str]]=None) \ # TODO: Reconsider necessity of this one def aggregates_for_measure(self, name: str) -> List[MeasureAggregate]: - """Returns aggregtates for measure with `name`. Only direct function - aggregates are returned. If the measure is specified in an expression, - the aggregate is not included in the returned list""" + """Returns aggregtates for measure with `name`. + + Only direct function aggregates are returned. If the measure is + specified in an expression, the aggregate is not included in the + returned list + """ return [agg for agg in self.aggregates if agg.measure == name] @property def all_dimension_keys(self) -> List[Attribute]: """Returns all attributes that represent keys of dimensions and their - levels.. - """ + levels..""" attributes: List[Attribute] = [] for dim in self.dimensions: @@ -406,14 +429,14 @@ def all_attributes(self) -> List[AttributeBase]: """All cube's attributes: attributes of dimensions, details, measures and aggregates. Use this method if you need to prepare structures for any kind of query. For attributes for more specific types of queries - refer to :meth:`Cube.all_fact_attributes` and + refer to :meth:`Cube.all_fact_attributes` and. + :meth:`Cube.all_aggregate_attributes`. .. versionchanged:: 1.1 Returns all attributes, including aggregates. Original functionality is available as `all_fact_attributes()` - """ attributes: List[AttributeBase] = [] @@ -458,23 +481,23 @@ def all_fact_attributes(self) -> List[Attribute]: return attributes @property - def attribute_dependencies(self) -> Dict[str,Set[str]]: - """Dictionary of dependencies between attributes. Values are - references of attributes that the key attribute depends on. For - example for attribute `a` which has expression `b + c` the dictionary - would be: `{"a": ["b", "c"]}`. The result dictionary includes all - cubes' attributes and aggregates. + def attribute_dependencies(self) -> Dict[str, Set[str]]: + """Dictionary of dependencies between attributes. Values are references + of attributes that the key attribute depends on. For example for + attribute `a` which has expression `b + c` the dictionary would be: + `{"a": ["b", "c"]}`. The result dictionary includes all cubes' + attributes and aggregates. .. versionadded:: 1.1 """ attributes = self.all_attributes + self.all_aggregate_attributes - return {attr.ref:attr.dependencies for attr in attributes} + return {attr.ref: attr.dependencies for attr in attributes} @property def all_aggregate_attributes(self) -> List[AttributeBase]: """All cube's attributes for aggregation: attributes of dimensions and - aggregates. """ + aggregates.""" attributes: List[AttributeBase] = [] for dim in self.dimensions: @@ -508,13 +531,14 @@ def attribute(self, attribute: Union[str, AttributeBase]) -> AttributeBase: if measure.name == name: return measure - raise NoSuchAttributeError("Cube '%s' has no attribute '%s'" - % (self.name, attribute)) + raise NoSuchAttributeError(f"Cube '{self.name}' has no attribute '{attribute}'") # TODO: Rename to collect_attributes - def get_attributes(self, - attributes:Collection[Union[str,AttributeBase]]=None, - aggregated:bool=False) -> Collection[AttributeBase]: + def get_attributes( + self, + attributes: Collection[Union[str, AttributeBase]] = None, + aggregated: bool = False, + ) -> Collection[AttributeBase]: """Returns a list of cube's attributes. If `aggregated` is `True` then attributes after aggregation are returned, otherwise attributes for a fact are considered. @@ -527,7 +551,8 @@ def get_attributes(self, If `simplified_references` is `True` then dimension attribute references in `attrubutes` are considered simplified, otherwise they - are considered as full (dim.attribute).""" + are considered as full (dim.attribute). + """ # TODO: this should be a dictionary created in __init__ once this # class becomes immutable @@ -548,17 +573,20 @@ def get_attributes(self, try: attr = lookup[name] except KeyError: - raise NoSuchAttributeError("Unknown attribute '{}' in cube " - "'{}'".format(name, self.name)) + raise NoSuchAttributeError( + f"Unknown attribute '{name}' in cube '{self.name}'" + ) result.append(attr) return result - def collect_dependencies(self, attributes: Collection[AttributeBase])\ - -> Collection[AttributeBase]: - """Collect all original and dependant cube attributes for - `attributes`, sorted by their dependency: starting with attributes - that don't depend on anything. For exapmle, if the `attributes` is [a, + def collect_dependencies( + self, attributes: Collection[AttributeBase] + ) -> Collection[AttributeBase]: + """Collect all original and dependant cube attributes for `attributes`, + sorted by their dependency: starting with attributes that don't depend + on anything. For exapmle, if the `attributes` is [a, + b] and a = c * 2, then the result list would be [b, c, a] or [c, b, a]. @@ -578,9 +606,11 @@ def collect_dependencies(self, attributes: Collection[AttributeBase])\ # TODO: This is mutable method def link_dimension(self, dimension: Dimension) -> None: """Links `dimension` object or a clone of it to the cube according to - the specification of cube's dimension link. See + the specification of cube's dimension link. See. + :meth:`Dimension.clone` for more information about cloning a - dimension.""" + dimension. + """ link = self.dimension_links.get(dimension.name) @@ -591,17 +621,22 @@ def link_dimension(self, dimension: Dimension) -> None: # TODO: this method should be used only during object initialization def _add_dimension(self, dimension: Dimension) -> None: - """Add dimension to cube. Replace dimension with same name. Raises - `ModelInconsistencyError` when dimension with same name already exists - in the receiver. """ + """Add dimension to cube. + + Replace dimension with same name. Raises + `ModelInconsistencyError` when dimension with same name already + exists in the receiver. + """ if not dimension: - raise ArgumentError("Trying to add None dimension to cube '%s'." - % self.name) + raise ArgumentError( + "Trying to add None dimension to cube '%s'." % self.name + ) elif not isinstance(dimension, Dimension): - raise ArgumentError("Dimension added to cube '%s' is not a " - "Dimension instance. It is '%s'" - % (self.name, type(dimension))) + raise ArgumentError( + "Dimension added to cube '%s' is not a " + "Dimension instance. It is '%s'" % (self.name, type(dimension)) + ) self._dimensions[dimension.name] = dimension @@ -622,28 +657,30 @@ def dimension(self, obj: Union[str, Dimension]) -> Dimension: # the list of required dimensions if not obj: - raise NoSuchDimensionError("Requested dimension should not be " - "none (cube '{}')".format(self.name)) + raise NoSuchDimensionError( + "Requested dimension should not be " + "none (cube '{}')".format(self.name) + ) name = str(obj) try: return self._dimensions[str(name)] except KeyError: - raise NoSuchDimensionError("cube '{}' has no dimension '{}'" - .format(self.name, name)) + raise NoSuchDimensionError(f"cube '{self.name}' has no dimension '{name}'") # TODO Rename. The name does not match description. # FIXME: Very complicted return type. Unnecessary. @property - def distilled_hierarchies(self) -> Dict[Tuple[str,Optional[str]],List[str]]: + def distilled_hierarchies(self) -> Dict[Tuple[str, Optional[str]], List[str]]: """Returns a dictionary of hierarchies. Keys are hierarchy references and values are hierarchy level key attribute references. .. warning:: - This method might change in the future. Consider experimental.""" + This method might change in the future. Consider experimental. + """ - hierarchies: Dict[Tuple[str,Optional[str]],List[str]] = {} + hierarchies: Dict[Tuple[str, Optional[str]], List[str]] = {} for dim in self.dimensions: for hier in dim.hierarchies: key = (dim.name, hier.name) @@ -657,13 +694,15 @@ def distilled_hierarchies(self) -> Dict[Tuple[str,Optional[str]],List[str]]: return hierarchies def to_dict(self, **options: Any) -> JSONType: - """Convert to a dictionary. If `with_mappings` is ``True`` (which is - default) then `joins`, `mappings`, `fact` and `options` are included. - Should be set to ``False`` when returning a dictionary that will be - provided in an user interface or through server API. + """Convert to a dictionary. + + If `with_mappings` is ``True`` (which is default) then `joins`, + `mappings`, `fact` and `options` are included. Should be set to + ``False`` when returning a dictionary that will be provided in + an user interface or through server API. """ - out = super(Cube, self).to_dict(**options) + out = super().to_dict(**options) out["locale"] = self.locale out["category"] = self.category @@ -712,23 +751,31 @@ def __eq__(self, other: Any) -> bool: if other is None or type(other) != type(self): return False - if self.name != other.name or self.label != other.label \ - or self.description != other.description: + if ( + self.name != other.name + or self.label != other.label + or self.description != other.description + ): return False - elif self.dimensions != other.dimensions \ - or self.measures != other.measures \ - or self.aggregates != other.aggregates \ - or self.details != other.details \ - or self.mappings != other.mappings \ - or self.joins != other.joins \ - or self.browser_options != other.browser_options \ - or self.info != other.info: + elif ( + self.dimensions != other.dimensions + or self.measures != other.measures + or self.aggregates != other.aggregates + or self.details != other.details + or self.mappings != other.mappings + or self.joins != other.joins + or self.browser_options != other.browser_options + or self.info != other.info + ): return False return True # TODO: Validation result as its own types def validate(self) -> List[Any]: - """Validate cube. See Model.validate() for more information. """ + """Validate cube. + + See Model.validate() for more information. + """ results = [] # Check whether all attributes, measures and keys are Attribute objects @@ -738,9 +785,13 @@ def validate(self) -> List[Any]: for measure in self.measures: if not isinstance(measure, Attribute): - results.append(('error', - "Measure '%s' in cube '%s' is not instance" - "of Attribute" % (measure, self.name))) + results.append( + ( + "error", + "Measure '%s' in cube '%s' is not instance" + "of Attribute" % (measure, self.name), + ) + ) else: measures.add(str(measure)) @@ -748,16 +799,25 @@ def validate(self) -> List[Any]: for detail in self.details: if not isinstance(detail, Attribute): - results.append(('error', "Detail '%s' in cube '%s' is not " - "instance of Attribute" - % (detail, self.name))) + results.append( + ( + "error", + "Detail '%s' in cube '%s' is not " + "instance of Attribute" % (detail, self.name), + ) + ) if str(detail) in details: - results.append(('error', "Duplicate detail '%s' in cube '%s'"\ - % (detail, self.name))) + results.append( + ("error", f"Duplicate detail '{detail}' in cube '{self.name}'") + ) elif str(detail) in measures: - results.append(('error', "Duplicate detail '%s' in cube '%s'" - " - specified also as measure" \ - % (detail, self.name))) + results.append( + ( + "error", + "Duplicate detail '%s' in cube '%s'" + " - specified also as measure" % (detail, self.name), + ) + ) else: details.add(str(detail)) @@ -766,7 +826,7 @@ def validate(self) -> List[Any]: return results def localize(self, trans: JSONType) -> None: - super(Cube, self).localized(trans) + super().localized(trans) self.category = trans.get("category", self.category) @@ -807,8 +867,7 @@ def __str__(self) -> str: return self.name -def _measure_aggregate_label(aggregate: MeasureAggregate, - measure: Measure) -> str: +def _measure_aggregate_label(aggregate: MeasureAggregate, measure: Measure) -> str: function = aggregate.function if function: template = IMPLICIT_AGGREGATE_LABELS.get(function, "{measure}") @@ -832,10 +891,13 @@ def _measure_aggregate_label(aggregate: MeasureAggregate, # TODO: Link should be it's own type def expand_dimension_links(metadata: List[JSONType]) -> List[JSONType]: - """Expands links to dimensions. `metadata` should be a list of strings or - dictionaries (might be mixed). Returns a list of dictionaries with at - least one key `name`. Other keys are: `hierarchies`, - `default_hierarchy_name`, `nonadditive`, `cardinality`, `template`""" + """Expands links to dimensions. + + `metadata` should be a list of strings or dictionaries (might be + mixed). Returns a list of dictionaries with at least one key `name`. + Other keys are: `hierarchies`, `default_hierarchy_name`, + `nonadditive`, `cardinality`, `template` + """ links: List[JSONType] = [] @@ -852,7 +914,9 @@ def expand_dimension_links(metadata: List[JSONType]) -> List[JSONType]: def expand_cube_metadata(metadata: JSONType) -> JSONType: """Expands `metadata` to be as complete as possible cube metadata. - `metadata` should be a dictionary.""" + + `metadata` should be a dictionary. + """ metadata = dict(metadata) @@ -877,8 +941,10 @@ def expand_cube_metadata(metadata: JSONType) -> JSONType: link["hierarchies"] = hiers if dim_hiers: - raise ModelError("There are hierarchies specified for non-linked " - "dimensions: %s." % (dim_hiers.keys())) + raise ModelError( + "There are hierarchies specified for non-linked " + "dimensions: %s." % (dim_hiers.keys()) + ) nonadditive = metadata.pop("nonadditive", None) if "measures" in metadata: @@ -896,5 +962,3 @@ def expand_cube_metadata(metadata: JSONType) -> JSONType: metadata["dimensions"] = links return metadata - - diff --git a/cubes/metadata/defaults.py b/cubes/metadata/defaults.py index 24319f39..573ad685 100644 --- a/cubes/metadata/defaults.py +++ b/cubes/metadata/defaults.py @@ -1,26 +1,25 @@ # -*- encoding: utf-8 -*- -"""Metadata validation -""" +"""Metadata validation.""" -import pkgutil import json -from ..common import to_str - +import pkgutil from collections import namedtuple +from ..common import to_str + try: import jsonschema except ImportError: from ..common import MissingPackage + jsonschema = MissingPackage("jsonschema", "Model validation") -__all__ = ( - "validate_model", -) +__all__ = ("validate_model",) -ValidationError = namedtuple("ValidationError", - ["severity", "scope", "object", "property", "message"]) +ValidationError = namedtuple( + "ValidationError", ["severity", "scope", "object", "property", "message"] +) def validate_model(metadata): @@ -30,7 +29,7 @@ def validate_model(metadata): return validator.validate() -class ModelMetadataValidator(object): +class ModelMetadataValidator: def __init__(self, metadata): self.metadata = metadata @@ -81,11 +80,15 @@ def validate_model(self): if dims and isinstance(dims, list): for dim in dims: if isinstance(dim, str): - err = ValidationError("default", "model", None, - "dimensions", - "Dimension '%s' is not described, " - "creating flat single-attribute " - "dimension" % dim) + err = ValidationError( + "default", + "model", + None, + "dimensions", + "Dimension '%s' is not described, " + "creating flat single-attribute " + "dimension" % dim, + ) errors.append(err) return errors @@ -103,20 +106,30 @@ def validate_dimension(self, dim): errors = self._collect_errors("dimension", name, validator, dim) if "default_hierarchy_name" not in dim: - error = ValidationError("default", "dimension", name, None, - "No default hierarchy name specified, " - "using first one") + error = ValidationError( + "default", + "dimension", + name, + None, + "No default hierarchy name specified, using first one", + ) errors.append(error) if "levels" not in dim and "attributes" not in dim: - error = ValidationError("default", "dimension", name, None, - "Neither levels nor attributes specified, " - "creating flat dimension without details") + error = ValidationError( + "default", + "dimension", + name, + None, + "Neither levels nor attributes specified, " + "creating flat dimension without details", + ) errors.append(error) elif "levels" in dim and "attributes" in dim: - error = ValidationError("error", "dimension", name, None, - "Both levels and attributes specified") + error = ValidationError( + "error", "dimension", name, None, "Both levels and attributes specified" + ) errors.append(error) return errors diff --git a/cubes/metadata/dimension.py b/cubes/metadata/dimension.py index 39cb14a5..998d5f46 100644 --- a/cubes/metadata/dimension.py +++ b/cubes/metadata/dimension.py @@ -2,50 +2,53 @@ import copy import re - from collections import OrderedDict -from typing import ( - Any, - Collection, - Dict, - List, - Optional, - Set, - Sized, - Tuple, - Union, - cast, - ) +from typing import Any, Collection, Dict, List, Optional, Set, Sized, Tuple, Union, cast from ..common import get_localizable_attributes +from ..errors import ( + ArgumentError, + HierarchyError, + ModelError, + ModelInconsistencyError, + NoSuchAttributeError, + TemplateRequired, +) from ..types import JSONType -from ..errors import ModelError, ArgumentError, HierarchyError, \ - NoSuchAttributeError, ModelInconsistencyError, TemplateRequired - -from .base import ModelObject, object_dict from .attributes import Attribute, expand_attribute_metadata - - +from .base import ModelObject, object_dict __all__ = [ "Dimension", "Hierarchy", "Level", "HierarchyPath", - "string_to_dimension_level", ] # FIXME: See #354 _DEFAULT_LEVEL_ROLES = { - "time": ["year", "quarter", "month", "day", "hour", "minute", "second", - "week", "weeknum", "dow", - "isoyear", "isoweek", "isoweekday"] + "time": [ + "year", + "quarter", + "month", + "day", + "hour", + "minute", + "second", + "week", + "weeknum", + "dow", + "isoyear", + "isoweek", + "isoweekday", + ] } HierarchyPath = List[str] + class Level(ModelObject): """Object representing a hierarchy level. Holds all level attributes. @@ -90,27 +93,28 @@ class Level(ModelObject): pagination or cut through this level. Note: the `attributes` are going to be owned by the `dimension`. - """ localizable_attributes = ["label", "description"] localizable_lists = ["attributes"] - def __init__(self, - name: str, - attributes: List[Attribute], - key:Optional[str]=None, - order_attribute:Optional[str]=None, - order:Optional[str]=None, - label_attribute:Optional[str]=None, - label:Optional[str]=None, - info:Optional[JSONType]=None, - cardinality:Optional[str]=None, - role:Optional[str]=None, - nonadditive:Optional[str]=None, - description:Optional[str]=None) -> None: - - super(Level, self).__init__(name, label, description, info) + def __init__( + self, + name: str, + attributes: List[Attribute], + key: Optional[str] = None, + order_attribute: Optional[str] = None, + order: Optional[str] = None, + label_attribute: Optional[str] = None, + label: Optional[str] = None, + info: Optional[JSONType] = None, + cardinality: Optional[str] = None, + role: Optional[str] = None, + nonadditive: Optional[str] = None, + description: Optional[str] = None, + ) -> None: + + super().__init__(name, label, description, info) self.cardinality = cardinality self.role = role @@ -126,8 +130,7 @@ def __init__(self, elif nonadditive in ["all", "any"]: self.nonadditive = "all" elif nonadditive != "time": - raise ModelError("Unknown non-additive diension type '%s'" - % nonadditive) + raise ModelError("Unknown non-additive diension type '%s'" % nonadditive) self.nonadditive = nonadditive if key: @@ -156,9 +159,10 @@ def __init__(self, try: self.order_attribute = self.attribute(order_attribute) except NoSuchAttributeError: - raise NoSuchAttributeError("Unknown order attribute {} in " - "level {}" - .format(order_attribute, self.name)) + raise NoSuchAttributeError( + "Unknown order attribute {} in " + "level {}".format(order_attribute, self.name) + ) else: self.order_attribute = self.attributes[0] @@ -167,12 +171,13 @@ def __init__(self, self.cardinality = cardinality @classmethod - def from_metadata(cls, - metadata: JSONType, - name: str=None, - dimension: "Dimension"=None) -> "Level": - """Create a level object from metadata. `name` can override level name in - the metadata.""" + def from_metadata( + cls, metadata: JSONType, name: str = None, dimension: "Dimension" = None + ) -> "Level": + """Create a level object from metadata. + + `name` can override level name in the metadata. + """ level_name: str @@ -189,32 +194,34 @@ def from_metadata(cls, attributes.append(attr) return cls( - name=level_name, - attributes=attributes, - key=metadata.get("key"), - order_attribute=metadata.get("order_attribute"), - order=metadata.get("order"), - label_attribute=metadata.get("label_attribute"), - label=metadata.get("label"), - info=metadata.get("info"), - cardinality=metadata.get("cardinality"), - role=metadata.get("role"), - nonadditive=metadata.get("nonadditive"), - description=metadata.get("description"), - ) + name=level_name, + attributes=attributes, + key=metadata.get("key"), + order_attribute=metadata.get("order_attribute"), + order=metadata.get("order"), + label_attribute=metadata.get("label_attribute"), + label=metadata.get("label"), + info=metadata.get("info"), + cardinality=metadata.get("cardinality"), + role=metadata.get("role"), + nonadditive=metadata.get("nonadditive"), + description=metadata.get("description"), + ) def __eq__(self, other: Any) -> bool: if not other or type(other) != type(self): return False - elif self.name != other.name \ - or self.label != other.label \ - or self.key != other.key \ - or self.cardinality != other.cardinality \ - or self.role != other.role \ - or self.label_attribute != other.label_attribute \ - or self.order_attribute != other.order_attribute \ - or self.nonadditive != other.nonadditive \ - or self.attributes != other.attributes: + elif ( + self.name != other.name + or self.label != other.label + or self.key != other.key + or self.cardinality != other.cardinality + or self.role != other.role + or self.label_attribute != other.label_attribute + or self.order_attribute != other.order_attribute + or self.nonadditive != other.nonadditive + or self.attributes != other.attributes + ): return False return True @@ -239,25 +246,26 @@ def __deepcopy__(self, memo: Any) -> "Level": else: order_attribute = None - return Level(self.name, - attributes=copy.deepcopy(self.attributes, memo), - key=self.key.name, - order_attribute=order_attribute, - order=self.order, - label_attribute=self.label_attribute.name, - info=copy.copy(self.info), - label=copy.copy(self.label), - cardinality=self.cardinality, - nonadditive=self.nonadditive, - role=self.role - ) + return Level( + self.name, + attributes=copy.deepcopy(self.attributes, memo), + key=self.key.name, + order_attribute=order_attribute, + order=self.order, + label_attribute=self.label_attribute.name, + info=copy.copy(self.info), + label=copy.copy(self.label), + cardinality=self.cardinality, + nonadditive=self.nonadditive, + role=self.role, + ) def to_dict(self, **options: Any) -> JSONType: - """Convert to dictionary""" + """Convert to dictionary.""" full_attribute_names = cast(bool, options.get("full_attribute_names")) - out = super(Level, self).to_dict(**options) + out = super().to_dict(**options) out["role"] = self.role @@ -274,8 +282,7 @@ def to_dict(self, **options: Any) -> JSONType: out["cardinality"] = self.cardinality out["nonadditive"] = self.nonadditive - out["attributes"] = [attr.to_dict(**options) for attr in - self.attributes] + out["attributes"] = [attr.to_dict(**options) for attr in self.attributes] return out def attribute(self, name: str) -> Attribute: @@ -308,29 +315,32 @@ def localizable_dictionary(self) -> JSONType: return locale -def string_to_dimension_level(astring: str) -> Tuple[str,str,str]: +def string_to_dimension_level(astring: str) -> Tuple[str, str, str]: """Converts `astring` into a dimension level tuple (`dimension`, `hierarchy`, `level`). The string should have a format: ``dimension@hierarchy:level``. Hierarchy and level are optional. - Raises `ArgumentError` when `astring` does not match expected pattern. + Raises `ArgumentError` when `astring` does not match expected + pattern. """ if not astring: raise ArgumentError("Drilldown string should not be empty") ident = r"[\w\d_]" - pattern = r"(?P%s+)(@(?P%s+))?(:(?P%s+))?" % (ident, - ident, - ident) + pattern = r"(?P{}+)(@(?P{}+))?(:(?P{}+))?".format( + ident, ident, ident + ) match = re.match(pattern, astring) if match: d = match.groupdict() return (d["dim"], d["hier"], d["level"]) else: - raise ArgumentError("String '%s' does not match drilldown level " - "pattern 'dimension@hierarchy:level'" % astring) + raise ArgumentError( + "String '%s' does not match drilldown level " + "pattern 'dimension@hierarchy:level'" % astring + ) class Hierarchy(ModelObject, Sized): @@ -340,12 +350,14 @@ class Hierarchy(ModelObject, Sized): levels: List[Level] _levels: Dict[str, Level] - def __init__(self, - name: str, - levels: List[Level], - label: Optional[str]=None, - info: Optional[JSONType]=None, - description: Optional[str]=None) -> None: + def __init__( + self, + name: str, + levels: List[Level], + label: Optional[str] = None, + info: Optional[JSONType] = None, + description: Optional[str] = None, + ) -> None: """Dimension hierarchy - specifies order of dimension levels. Attributes: @@ -362,24 +374,28 @@ def __init__(self, dimension. """ - super(Hierarchy, self).__init__(name, label, description, info) + super().__init__(name, label, description, info) if not levels: - raise ModelInconsistencyError("Hierarchy level list should " - "not be empty (in %s)" % self.name) + raise ModelInconsistencyError( + "Hierarchy level list should not be empty (in %s)" % self.name + ) if any(isinstance(level, str) for level in levels): - raise ModelInconsistencyError("Levels should not be provided as " - "strings to Hierarchy.") + raise ModelInconsistencyError( + "Levels should not be provided as strings to Hierarchy." + ) self._levels = object_dict(levels) def __deepcopy__(self, memo: Any) -> "Hierarchy": - return Hierarchy(self.name, - label=self.label, - description=self.description, - info=copy.deepcopy(self.info, memo), - levels=copy.deepcopy(self.levels, memo)) + return Hierarchy( + self.name, + label=self.label, + description=self.description, + info=copy.deepcopy(self.info, memo), + levels=copy.deepcopy(self.levels, memo), + ) @property def levels(self) -> List[Level]: @@ -396,9 +412,11 @@ def levels(self, levels: List[Level]) -> None: def level_names(self) -> List[str]: return list(self._levels) - def keys(self, depth:Optional[int]=None) -> List[Attribute]: - """Return names of keys for all levels in the hierarchy to `depth`. If - `depth` is `None` then all levels are returned.""" + def keys(self, depth: Optional[int] = None) -> List[Attribute]: + """Return names of keys for all levels in the hierarchy to `depth`. + + If `depth` is `None` then all levels are returned. + """ levels: Collection[Level] if depth is not None: @@ -412,8 +430,11 @@ def __eq__(self, other: Any) -> bool: if not other or type(other) != type(self): return False - return self.name == other.name and self.label == other.label \ - and self.levels == other.levels + return ( + self.name == other.name + and self.label == other.label + and self.levels == other.levels + ) def __ne__(self, other: Any) -> bool: return not self.__eq__(other) @@ -431,33 +452,39 @@ def __getitem__(self, item: str) -> Level: try: return self._levels[item] except IndexError: - raise HierarchyError("Hierarchy '%s' has only %d levels, " - "asking for deeper level" - % (self.name, len(self._levels))) + raise HierarchyError( + "Hierarchy '%s' has only %d levels, " + "asking for deeper level" % (self.name, len(self._levels)) + ) def __contains__(self, item: Level) -> bool: if item in self.levels: return True return item in [level.name for level in self.levels] - def levels_for_depth(self, depth:int, - drilldown:bool=False) -> List[Level]: - """Returns levels for given `depth`. If `path` is longer than - hierarchy levels, `cubes.ArgumentError` exception is raised""" + def levels_for_depth(self, depth: int, drilldown: bool = False) -> List[Level]: + """Returns levels for given `depth`. + + If `path` is longer than hierarchy levels, `cubes.ArgumentError` + exception is raised + """ extend = 1 if drilldown else 0 if depth + extend > len(self.levels): - raise HierarchyError("Depth %d is longer than hierarchy " - "levels %s (drilldown: %s)" % - (depth, self._levels, drilldown)) + raise HierarchyError( + "Depth %d is longer than hierarchy " + "levels %s (drilldown: %s)" % (depth, self._levels, drilldown) + ) - return self.levels[0:depth + extend] + return self.levels[0 : depth + extend] def next_level(self, level: Optional[Level]) -> Optional[Level]: - """Returns next level in hierarchy after `level`. If `level` is last - level, returns ``None``. If `level` is ``None``, then the first level - is returned.""" + """Returns next level in hierarchy after `level`. + + If `level` is last level, returns ``None``. If `level` is + ``None``, then the first level is returned. + """ if not level: return self.levels[0] @@ -469,8 +496,10 @@ def next_level(self, level: Optional[Level]) -> Optional[Level]: return self.levels[index + 1] def previous_level(self, level: Optional[Level]) -> Optional[Level]: - """Returns previous level in hierarchy after `level`. If `level` is - first level or ``None``, returns ``None``""" + """Returns previous level in hierarchy after `level`. + + If `level` is first level or ``None``, returns ``None`` + """ if level is None: return None @@ -482,36 +511,40 @@ def previous_level(self, level: Optional[Level]) -> Optional[Level]: return self.levels[index - 1] def level_index(self, name: str) -> int: - """Get order index of level `name`. Can be used for ordering and - comparing levels within hierarchy.""" + """Get order index of level `name`. + + Can be used for ordering and comparing levels within hierarchy. + """ try: return list(self._levels).index(name) except ValueError: - raise HierarchyError(f"Level {name} is not part of hierarchy " - f"{self.name}") + raise HierarchyError( + f"Level {name} is not part of hierarchy " f"{self.name}" + ) def is_last(self, level: Level) -> bool: """Returns `True` if `level` is last level of the hierarchy.""" return level == self.levels[-1] - def rollup(self, path: HierarchyPath, - level:Optional[Level]=None) -> List[str]: + def rollup(self, path: HierarchyPath, level: Optional[Level] = None) -> List[str]: """Rolls-up the path to the `level`. If `level` is ``None`` then path is rolled-up only one level. If `level` is deeper than last level of `path` the - `cubes.HierarchyError` exception is raised. If `level` is the same as - `path` level, nothing happens.""" + `cubes.HierarchyError` exception is raised. If `level` is the + same as `path` level, nothing happens. + """ last: Optional[int] if level is not None: last = self.level_index(level.name) + 1 if last > len(path): - raise HierarchyError("Can not roll-up: level '%s' – it is " - "deeper than deepest element of path %s" % - (str(level), path)) + raise HierarchyError( + "Can not roll-up: level '%s' – it is " + "deeper than deepest element of path %s" % (str(level), path) + ) else: if len(path) > 0: last = len(path) - 1 @@ -524,9 +557,11 @@ def rollup(self, path: HierarchyPath, return path[0:last] def path_is_base(self, path: HierarchyPath) -> bool: - """Returns True if path is base path for the hierarchy. Base path is a + """Returns True if path is base path for the hierarchy. Base path is a. + path where there are no more levels to be added - no drill down - possible.""" + possible. + """ return path is not None and len(path) == len(self.levels) @@ -551,11 +586,10 @@ def to_dict(self, **options: Any) -> JSONType: * `name`: hierarchy name * `label`: human readable label (localizable) * `levels`: level names - """ depth = cast(int, options.get("depth")) - out = super(Hierarchy, self).to_dict(**options) + out = super().to_dict(**options) levels = [str(l) for l in self.levels] @@ -574,15 +608,17 @@ def localizable_dictionary(self) -> JSONType: return locale -def expand_dimension_metadata(metadata: JSONType, - expand_levels: bool=False) -> JSONType: - """ - Expands `metadata` to be as complete as possible dimension metadata. If - `expand_levels` is `True` then levels metadata are expanded as well. +def expand_dimension_metadata( + metadata: JSONType, expand_levels: bool = False +) -> JSONType: + """Expands `metadata` to be as complete as possible dimension metadata. + + If `expand_levels` is `True` then levels metadata are expanded as + well. """ if isinstance(metadata, str): - metadata = {"name":metadata, "levels": [metadata]} + metadata = {"name": metadata, "levels": [metadata]} else: metadata = dict(metadata) @@ -594,8 +630,13 @@ def expand_dimension_metadata(metadata: JSONType, # Fix levels levels = metadata.get("levels", []) if not levels and expand_levels: - attributes = ["attributes", "key", "order_attribute", "order", - "label_attribute"] + attributes = [ + "attributes", + "key", + "order_attribute", + "order", + "label_attribute", + ] level = {} for attr in attributes: if attr in metadata: @@ -616,8 +657,9 @@ def expand_dimension_metadata(metadata: JSONType, # Fix hierarchies if "hierarchy" in metadata and "hierarchies" in metadata: - raise ModelInconsistencyError("Both 'hierarchy' and 'hierarchies'" - " specified. Use only one") + raise ModelInconsistencyError( + "Both 'hierarchy' and 'hierarchies' specified. Use only one" + ) hierarchy = metadata.get("hierarchy") if hierarchy: @@ -632,8 +674,11 @@ def expand_dimension_metadata(metadata: JSONType, def expand_hierarchy_metadata(metadata: JSONType) -> JSONType: - """Returns a hierarchy metadata as a dictionary. Makes sure that required - properties are present. Raises exception on missing values.""" + """Returns a hierarchy metadata as a dictionary. + + Makes sure that required properties are present. Raises exception on + missing values. + """ try: name = metadata["name"] @@ -645,13 +690,17 @@ def expand_hierarchy_metadata(metadata: JSONType) -> JSONType: return metadata + def expand_level_metadata(metadata: JSONType) -> JSONType: - """Returns a level description as a dictionary. If provided as string, - then it is going to be used as level name and as its only attribute. If a - dictionary is provided and has no attributes, then level will contain only - attribute with the same name as the level name.""" + """Returns a level description as a dictionary. + + If provided as string, then it is going to be used as level name and + as its only attribute. If a dictionary is provided and has no + attributes, then level will contain only attribute with the same + name as the level name. + """ if isinstance(metadata, str): - metadata = {"name":metadata, "attributes": [metadata]} + metadata = {"name": metadata, "attributes": [metadata]} else: metadata = dict(metadata) @@ -663,10 +712,7 @@ def expand_level_metadata(metadata: JSONType) -> JSONType: attributes = metadata.get("attributes") if not attributes: - attribute = { - "name": name, - "label": metadata.get("label") - } + attribute = {"name": name, "label": metadata.get("label")} attributes = [attribute] @@ -683,10 +729,7 @@ def expand_level_metadata(metadata: JSONType) -> JSONType: class Dimension(ModelObject): - """ - Cube dimension. - - """ + """Cube dimension.""" localizable_attributes = ["label", "description"] localizable_lists = ["levels", "hierarchies"] @@ -708,23 +751,25 @@ class Dimension(ModelObject): nonadditive: Optional[str] # TODO: new signature: __init__(self, name, *attributes, **kwargs): - def __init__(self, - name: str, - levels: Optional[List[Level]]=None, - hierarchies: Optional[List[Hierarchy]]=None, - default_hierarchy_name: Optional[str]=None, - label: Optional[str]=None, - description: Optional[str]=None, - info: Optional[JSONType]=None, - role: Optional[str]=None, - cardinality: Optional[str]=None, - category: Optional[str]=None, - master: Optional["Dimension"]=None, - nonadditive: Optional[str]=None, - attributes: Optional[List[Attribute]]=None, - **desc: Any) -> None: - - """Create a new dimension + def __init__( + self, + name: str, + levels: Optional[List[Level]] = None, + hierarchies: Optional[List[Hierarchy]] = None, + default_hierarchy_name: Optional[str] = None, + label: Optional[str] = None, + description: Optional[str] = None, + info: Optional[JSONType] = None, + role: Optional[str] = None, + cardinality: Optional[str] = None, + category: Optional[str] = None, + master: Optional["Dimension"] = None, + nonadditive: Optional[str] = None, + attributes: Optional[List[Attribute]] = None, + **desc: Any, + ) -> None: + + """Create a new dimension. Attributes: @@ -763,7 +808,7 @@ def __init__(self, Note: The hierarchy will be owned by the dimension. """ - super(Dimension, self).__init__(name, label, description, info) + super().__init__(name, label, description, info) self.role = role self.cardinality = cardinality @@ -782,8 +827,7 @@ def __init__(self, elif nonadditive in ["all", "any"]: self.nonadditive = "all" elif nonadditive != "time": - raise ModelError("Unknown non-additive diension type '%s'" - % nonadditive) + raise ModelError("Unknown non-additive diension type '%s'" % nonadditive) self.nonadditive = nonadditive @@ -799,7 +843,7 @@ def __init__(self, # Own the levels and their attributes self._levels = object_dict(levels or []) - + # FIXME: See #354 default_roles: List[str] if self.role is not None: @@ -820,10 +864,11 @@ def __init__(self, for a in level.attributes: # Own the attribute if a.dimension is not None and a.dimension is not self: - raise ModelError("Dimension '%s' can not claim attribute " - "'%s' because it is owned by another " - "dimension '%s'." - % (self.name, a.name, a.dimension.name)) + raise ModelError( + "Dimension '%s' can not claim attribute " + "'%s' because it is owned by another " + "dimension '%s'." % (self.name, a.name, a.dimension.name) + ) a.dimension = self self._attributes[a.name] = a self._attributes_by_ref[a.ref] = a @@ -832,9 +877,9 @@ def __init__(self, if hierarchies: error_message = "Duplicate hierarchy '{key}' in cube '{cube}'" error_dict = {"cube": self.name} - self._hierarchies = object_dict(hierarchies, - error_message=error_message, - error_dict=error_dict) + self._hierarchies = object_dict( + hierarchies, error_message=error_message, error_dict=error_dict + ) else: default = Hierarchy("default", self.levels) self._hierarchies = object_dict([default]) @@ -844,16 +889,17 @@ def __init__(self, # the first hierarchy in the hierarchy list. default_name = default_hierarchy_name or "default" - hierarchy = self._hierarchies.get(default_name, - list(self._hierarchies.values())[0]) + hierarchy = self._hierarchies.get( + default_name, list(self._hierarchies.values())[0] + ) self._default_hierarchy = hierarchy self.default_hierarchy_name = hierarchy.name @classmethod - def from_metadata(cls, - metadata: JSONType, - templates:Dict[str,"Dimension"]=None) -> "Dimension": + def from_metadata( + cls, metadata: JSONType, templates: Dict[str, "Dimension"] = None + ) -> "Dimension": """Create a dimension from a `metadata` dictionary. Some rules: * ``levels`` might contain level names as strings – names of levels to @@ -862,7 +908,6 @@ def from_metadata(cls, hierarchies to inherit from the template * all levels that are not covered by hierarchies are not included in the final dimension - """ template: Optional[Dimension] @@ -891,14 +936,16 @@ def from_metadata(cls, # Create copy of template's hierarchies, but reference newly # created copies of level objects hierarchies = [] - level_dict = dict((level.name, level) for level in levels) + level_dict = {level.name: level for level in levels} for hier in template._hierarchies.values(): hier_levels = [level_dict[level.name] for level in hier.levels] - hier_copy = Hierarchy(hier.name, - hier_levels, - label=hier.label, - info=copy.deepcopy(hier.info)) + hier_copy = Hierarchy( + hier.name, + hier_levels, + label=hier.label, + info=copy.deepcopy(hier.info), + ) hierarchies.append(hier_copy) default_hierarchy_name = template.default_hierarchy_name @@ -924,8 +971,7 @@ def from_metadata(cls, # Fix the metadata, but don't create default level if the template # provides levels. - metadata = expand_dimension_metadata(metadata, - expand_levels=not bool(levels)) + metadata = expand_dimension_metadata(metadata, expand_levels=not bool(levels)) name = metadata.get("name") @@ -943,7 +989,7 @@ def from_metadata(cls, if not cardinality: info = metadata.get("info", {}) if "high_cardinality" in info: - cardinality = "high" + cardinality = "high" # Levels # ------ @@ -956,9 +1002,11 @@ def from_metadata(cls, for level_md in metadata["levels"]: if isinstance(level_md, str): if not template: - raise ModelError("Can not specify just a level name " - "(%s) if there is no template for " - "dimension %s" % (level_md, name)) + raise ModelError( + "Can not specify just a level name " + "(%s) if there is no template for " + "dimension %s" % (level_md, name) + ) level = template.level(level_md) else: level = Level.from_metadata(level_md) @@ -980,12 +1028,10 @@ def from_metadata(cls, # Hierarchies # ----------- if "hierarchies" in metadata: - hierarchies = _create_hierarchies(metadata["hierarchies"], - levels, - template) + hierarchies = _create_hierarchies(metadata["hierarchies"], levels, template) else: # Keep only hierarchies which include existing levels - level_names = set([level.name for level in levels]) + level_names = {level.name for level in levels} keep = [] for hier in hierarchies: if any(level.name not in level_names for level in hier.levels): @@ -994,9 +1040,9 @@ def from_metadata(cls, keep.append(hier) hierarchies = keep - - default_hierarchy_name = metadata.get("default_hierarchy_name", - default_hierarchy_name) + default_hierarchy_name = metadata.get( + "default_hierarchy_name", default_hierarchy_name + ) if not hierarchies: # Create single default hierarchy @@ -1006,36 +1052,39 @@ def from_metadata(cls, # hierarchies. Retain the original level order used_level_names: Set[str] = set() for hier in hierarchies: - used_level_names |= set(level.name for level in hier.levels) + used_level_names |= {level.name for level in hier.levels} levels = [level for level in levels if level.name in used_level_names] - return cls(name=name, - levels=levels, - hierarchies=hierarchies, - default_hierarchy_name=default_hierarchy_name, - label=label, - description=description, - info=info, - cardinality=cardinality, - role=role, - category=category, - nonadditive=nonadditive - ) + return cls( + name=name, + levels=levels, + hierarchies=hierarchies, + default_hierarchy_name=default_hierarchy_name, + label=label, + description=description, + info=info, + cardinality=cardinality, + role=role, + category=category, + nonadditive=nonadditive, + ) def __eq__(self, other: Any) -> bool: if other is None or type(other) != type(self): return False - cond = self.name == other.name \ - and self.role == other.role \ - and self.label == other.label \ - and self.description == other.description \ - and self.cardinality == other.cardinality \ - and self.category == other.category \ - and self.default_hierarchy_name == other.default_hierarchy_name \ - and self._levels == other._levels \ - and self._hierarchies == other._hierarchies + cond = ( + self.name == other.name + and self.role == other.role + and self.label == other.label + and self.description == other.description + and self.cardinality == other.cardinality + and self.category == other.category + and self.default_hierarchy_name == other.default_hierarchy_name + and self._levels == other._levels + and self._hierarchies == other._hierarchies + ) return cond @@ -1054,8 +1103,10 @@ def has_details(self) -> bool: @property def levels(self) -> List[Level]: - """Get list of all dimension levels. Order is not guaranteed, use a - hierarchy to have known order.""" + """Get list of all dimension levels. + + Order is not guaranteed, use a hierarchy to have known order. + """ return list(self._levels.values()) @levels.setter @@ -1077,44 +1128,54 @@ def hierarchies(self, hierarchies: List[Hierarchy]) -> None: @property def level_names(self) -> List[str]: - """Get list of level names. Order is not guaranteed, use a hierarchy - to have known order.""" + """Get list of level names. + + Order is not guaranteed, use a hierarchy to have known order. + """ return list(self._levels.keys()) def level(self, obj: Union[Level, str]) -> Level: - """Get level by name or as Level object. This method is used for - coalescing value""" + """Get level by name or as Level object. + + This method is used for coalescing value + """ if isinstance(obj, str): if obj not in self._levels: - raise KeyError("No level %s in dimension %s" % - (obj, self.name)) + raise KeyError(f"No level {obj} in dimension {self.name}") return self._levels[obj] elif isinstance(obj, Level): return obj else: - raise ValueError("Unknown level object %s (should be a string " - "or Level)" % obj) + raise ValueError( + "Unknown level object %s (should be a string or Level)" % obj + ) - def hierarchy(self, obj: Optional[Union[Hierarchy, str]]=None) -> Hierarchy: - """Get hierarchy object either by name or as `Hierarchy`. If `obj` is - ``None`` then default hierarchy is returned.""" + def hierarchy(self, obj: Optional[Union[Hierarchy, str]] = None) -> Hierarchy: + """Get hierarchy object either by name or as `Hierarchy`. + + If `obj` is ``None`` then default hierarchy is returned. + """ if obj is None: return self._default_hierarchy elif isinstance(obj, str): if obj not in self._hierarchies: - raise ModelError("No hierarchy %s in dimension %s" % - (obj, self.name)) + raise ModelError(f"No hierarchy {obj} in dimension {self.name}") return self._hierarchies[obj] elif isinstance(obj, Hierarchy): return obj else: - raise ValueError("Unknown hierarchy object %s (should be a " - "string or Hierarchy instance)" % obj) + raise ValueError( + "Unknown hierarchy object %s (should be a " + "string or Hierarchy instance)" % obj + ) - def attribute(self, name: str, by_ref: bool=False) -> Attribute: - """Get dimension attribute. `name` is an attribute name (default) or - attribute reference if `by_ref` is `True`.`.""" + def attribute(self, name: str, by_ref: bool = False) -> Attribute: + """Get dimension attribute. + + `name` is an attribute name (default) or attribute reference if + `by_ref` is `True`.`. + """ if by_ref: return self._attributes_by_ref[name] @@ -1122,15 +1183,15 @@ def attribute(self, name: str, by_ref: bool=False) -> Attribute: try: return self._attributes[name] except KeyError: - raise NoSuchAttributeError("Unknown attribute '{}' " - "in dimension '{}'" - .format(name, self.name), - name) - + raise NoSuchAttributeError( + "Unknown attribute '{}' " + "in dimension '{}'".format(name, self.name), + name, + ) @property def is_flat(self) -> bool: - """Is true if dimension has only one level""" + """Is true if dimension has only one level.""" if self.master: return self.master.is_flat @@ -1138,29 +1199,36 @@ def is_flat(self) -> bool: @property def key_attributes(self) -> List[Attribute]: - """Return all dimension key attributes, regardless of hierarchy. Order - is not guaranteed, use a hierarchy to have known order.""" + """Return all dimension key attributes, regardless of hierarchy. + + Order is not guaranteed, use a hierarchy to have known order. + """ return [level.key for level in self._levels.values()] @property def attributes(self) -> List[Attribute]: - """Return all dimension attributes regardless of hierarchy. Order is - not guaranteed, use :meth:`cubes.Hierarchy.all_attributes` to get - known order. Order of attributes within level is preserved.""" + """Return all dimension attributes regardless of hierarchy. + + Order is not guaranteed, use + :meth:`cubes.Hierarchy.all_attributes` to get known order. Order + of attributes within level is preserved. + """ return list(self._attributes.values()) - def clone(self, - hierarchies: Optional[List[str]]=None, - exclude_hierarchies:Optional[List[str]]=None, - nonadditive: Optional[str]=None, - default_hierarchy_name: Optional[str]=None, - cardinality: Optional[str]=None, - alias: Optional[str]=None, - **extra: Any) -> "Dimension": - """Returns a clone of the receiver with some modifications. `master` - of the clone is set to the receiver. + def clone( + self, + hierarchies: Optional[List[str]] = None, + exclude_hierarchies: Optional[List[str]] = None, + nonadditive: Optional[str] = None, + default_hierarchy_name: Optional[str] = None, + cardinality: Optional[str] = None, + alias: Optional[str] = None, + **extra: Any, + ) -> "Dimension": + """Returns a clone of the receiver with some modifications. `master` of + the clone is set to the receiver. * `hierarchies` – limit hierarchies only to those specified in `hierarchies`. If default hierarchy name is not in the new hierarchy @@ -1174,9 +1242,9 @@ def clone(self, name: str if hierarchies == []: - raise ModelInconsistencyError("Can not remove all hierarchies" - "from a dimension (%s)." - % self.name) + raise ModelInconsistencyError( + "Can not remove all hierarchies from a dimension (%s)." % self.name + ) linked: List[Hierarchy] = [] @@ -1223,8 +1291,9 @@ def clone(self, # is invalid. if not default_hierarchy_name: - if any(hier.name == self.default_hierarchy_name - for hier in cloned_hierarchies): + if any( + hier.name == self.default_hierarchy_name for hier in cloned_hierarchies + ): default_hierarchy_name = self.default_hierarchy_name else: default_hierarchy_name = cloned_hierarchies[0].name @@ -1232,23 +1301,25 @@ def clone(self, # TODO: should we do deppcopy on info? name = alias or self.name - return Dimension(name=name, - levels=levels, - hierarchies=cloned_hierarchies, - default_hierarchy_name=default_hierarchy_name, - label=self.label, - description=self.description, - info=self.info, - role=self.role, - cardinality=cardinality, - master=self, - nonadditive=nonadditive, - **extra) + return Dimension( + name=name, + levels=levels, + hierarchies=cloned_hierarchies, + default_hierarchy_name=default_hierarchy_name, + label=self.label, + description=self.description, + info=self.info, + role=self.role, + cardinality=cardinality, + master=self, + nonadditive=nonadditive, + **extra, + ) - def to_dict(self, **options:Any) -> JSONType: - """Return dictionary representation of the dimension""" + def to_dict(self, **options: Any) -> JSONType: + """Return dictionary representation of the dimension.""" - out = super(Dimension, self).to_dict(**options) + out = super().to_dict(**options) hierarchy_limits = options.get("hierarchy_limits") @@ -1288,93 +1359,125 @@ def to_dict(self, **options:Any) -> JSONType: # TODO: Change to List[ValidationResult] def validate(self) -> List[Any]: - """Validate dimension. See Model.validate() for more information. """ + """Validate dimension. + + See Model.validate() for more information. + """ results = [] if not self.levels: - results.append(('error', "No levels in dimension '%s'" - % (self.name))) + results.append(("error", "No levels in dimension '%s'" % (self.name))) return results if not self._hierarchies: msg = "No hierarchies in dimension '%s'" % (self.name) if self.is_flat: level = self.levels[0] - results.append(('default', - msg + ", flat level '%s' will be used" % - (level.name))) + results.append( + ("default", msg + ", flat level '%s' will be used" % (level.name)) + ) elif len(self.levels) > 1: - results.append(('error', - msg + ", more than one levels exist (%d)" % - len(self.levels))) + results.append( + ( + "error", + msg + ", more than one levels exist (%d)" % len(self.levels), + ) + ) else: - results.append(('error', msg)) + results.append(("error", msg)) else: # if self._hierarchies if not self.default_hierarchy_name: - if len(self._hierarchies) > 1 and \ - "default" not in self._hierarchies: - results.append(('error', - "No defaut hierarchy specified, there is " - "more than one hierarchy in dimension " - "'%s'" % self.name)) - - if self.default_hierarchy_name \ - and not self._hierarchies.get(self.default_hierarchy_name): - results.append(('error', - "Default hierarchy '%s' does not exist in " - "dimension '%s'" % - (self.default_hierarchy_name, self.name))) + if len(self._hierarchies) > 1 and "default" not in self._hierarchies: + results.append( + ( + "error", + "No defaut hierarchy specified, there is " + "more than one hierarchy in dimension " + "'%s'" % self.name, + ) + ) + + if self.default_hierarchy_name and not self._hierarchies.get( + self.default_hierarchy_name + ): + results.append( + ( + "error", + "Default hierarchy '%s' does not exist in " + "dimension '%s'" % (self.default_hierarchy_name, self.name), + ) + ) attributes: Set[str] = set() first_occurence: Dict[str, str] = {} for level_name, level in self._levels.items(): if not level.attributes: - results.append(('error', - "Level '%s' in dimension '%s' has no " - "attributes" % (level.name, self.name))) + results.append( + ( + "error", + "Level '%s' in dimension '%s' has no " + "attributes" % (level.name, self.name), + ) + ) continue if not level.key: attr = level.attributes[0] - results.append(('default', - "Level '%s' in dimension '%s' has no key " - "attribute specified, first attribute will " - "be used: '%s'" - % (level.name, self.name, attr))) + results.append( + ( + "default", + "Level '%s' in dimension '%s' has no key " + "attribute specified, first attribute will " + "be used: '%s'" % (level.name, self.name, attr), + ) + ) if level.attributes and level.key: if level.key.name not in [a.name for a in level.attributes]: - results.append(('error', - "Key '%s' in level '%s' in dimension " - "'%s' is not in level's attribute list" - % (level.key, level.name, self.name))) + results.append( + ( + "error", + "Key '%s' in level '%s' in dimension " + "'%s' is not in level's attribute list" + % (level.key, level.name, self.name), + ) + ) for attribute in level.attributes: attr_name = attribute.ref if attr_name in attributes: first = first_occurence[attr_name] - results.append(('error', - "Duplicate attribute '%s' in dimension " - "'%s' level '%s' (also defined in level " - "'%s')" % (attribute, self.name, - level_name, first))) + results.append( + ( + "error", + "Duplicate attribute '%s' in dimension " + "'%s' level '%s' (also defined in level " + "'%s')" % (attribute, self.name, level_name, first), + ) + ) else: attributes.add(attr_name) first_occurence[attr_name] = level_name if not isinstance(attribute, Attribute): - results.append(('error', - "Attribute '%s' in dimension '%s' is " - "not instance of Attribute" - % (attribute, self.name))) + results.append( + ( + "error", + "Attribute '%s' in dimension '%s' is " + "not instance of Attribute" % (attribute, self.name), + ) + ) if attribute.dimension is not self: - results.append(('error', - "Dimension (%s) of attribute '%s' does " - "not match with owning dimension %s" - % (attribute.dimension, attribute, - self.name))) + results.append( + ( + "error", + "Dimension (%s) of attribute '%s' does " + "not match with owning dimension %s" + % (attribute.dimension, attribute, self.name), + ) + ) return results @@ -1382,8 +1485,7 @@ def __str__(self) -> str: return self.name def __repr__(self) -> str: - return "" % (self.name, - self._levels) + return f"" def localizable_dictionary(self) -> Dict[str, Any]: locale: Dict[str, Any] = {} @@ -1405,9 +1507,8 @@ def localizable_dictionary(self) -> Dict[str, Any]: def _create_hierarchies( - metadata: JSONType, - levels: List[Level], - template: Optional[Dimension]) -> List[Hierarchy]: + metadata: JSONType, levels: List[Level], template: Optional[Dimension] +) -> List[Hierarchy]: """Create dimension hierarchies from `metadata` (a list of dictionaries or strings) and possibly inherit from `template` dimension.""" @@ -1422,8 +1523,10 @@ def _create_hierarchies( if template is not None: hier = template.hierarchy(md) else: - raise ModelError("Can not specify just a hierarchy name " - "({}) if there is no template".format(md)) + raise ModelError( + "Can not specify just a hierarchy name " + "({}) if there is no template".format(md) + ) else: md = dict(md) level_names = md.pop("levels") @@ -1433,5 +1536,3 @@ def _create_hierarchies( hierarchies.append(hier) return hierarchies - - diff --git a/cubes/metadata/localization.py b/cubes/metadata/localization.py index 9e59bf0e..46b96192 100644 --- a/cubes/metadata/localization.py +++ b/cubes/metadata/localization.py @@ -5,21 +5,32 @@ # TODO: Should go away with new approach to model object description in #398 -class ModelObjectLocalizationContext(object): - def __init__(self, translation, context, object_type, object_name): + +from typing import Any, Dict, Optional + + +class ModelObjectLocalizationContext: + def __init__( + self, + translation: Dict[str, Any], + context: "LocalizationContext", + object_type: str, + object_name: str, + ) -> None: self.translation = translation self.object_type = object_type self.object_name = object_name self.context = context - def get(self, key, default=None): + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: try: return self.translation[key] except KeyError: - return self.context.get(self.object_type, self.object_name, key, - default) + return self.context.get(self.object_type, self.object_name, key, default) - def object_localization(self, object_type, name): + def object_localization( + self, object_type: str, name: str + ) -> "ModelObjectLocalizationContext": try: objects = self.translation[object_type] except KeyError: @@ -28,22 +39,23 @@ def object_localization(self, object_type, name): try: trans = objects[name] except KeyError: - return ModelObjectLocalizationContext({}, self.context, - object_type, name) + return ModelObjectLocalizationContext({}, self.context, object_type, name) # Make string-only translations as translations of labels if isinstance(trans, str): trans = {"label": trans} - return ModelObjectLocalizationContext(trans, self.context, - object_type, name) + return ModelObjectLocalizationContext(trans, self.context, object_type, name) -class LocalizationContext(object): - def __init__(self, translation, parent=None): + +class LocalizationContext: + def __init__(self, translation: Dict[str, Dict], parent=None) -> None: self.translation = translation self.parent = parent - def object_localization(self, object_type, name): + def object_localization( + self, object_type: str, name: str + ) -> ModelObjectLocalizationContext: try: objects = self.translation[object_type] except KeyError: @@ -60,7 +72,13 @@ def object_localization(self, object_type, name): return ModelObjectLocalizationContext(trans, self, object_type, name) - def get(self, object_type, object_name, key, default=None): + def get( + self, + object_type: str, + object_name: str, + key: str, + default: Optional[str] = None, + ) -> Optional[str]: try: objects = self.translation[object_type] except KeyError: @@ -83,8 +101,11 @@ def get(self, object_type, object_name, key, default=None): def _get_translation(self, obj, type_): """Returns translation in language `lang` for model object `obj` of - type `type_`. The type can be: ``cube`` or ``dimension``. Looks in - parent if current namespace does not have the translation.""" + type `type_`. + + The type can be: ``cube`` or ``dimension``. Looks in parent if + current namespace does not have the translation. + """ lookup = [] visited = set() @@ -100,7 +121,7 @@ def _get_translation(self, obj, type_): lookup_map = { "cube": "cubes", "dimension": "dimensions", - "defaults": "defaults" + "defaults": "defaults", } objkey = lookup_map[type_] @@ -110,4 +131,3 @@ def _get_translation(self, obj, type_): return trans[objkey][obj] return None - diff --git a/cubes/metadata/model.py b/cubes/metadata/model.py index 791c1a38..dc0e310e 100644 --- a/cubes/metadata/model.py +++ b/cubes/metadata/model.py @@ -1,41 +1,52 @@ -# -*- encoding: utf-8 -*- -"""Logical model.""" - -import re -import copy - -from collections import OrderedDict, defaultdict - -from expressions import inspect_variables - -from .common import IgnoringDictionary, to_label -from .common import assert_all_instances -from .common import get_localizable_attributes -from .errors import ModelError, ArgumentError, ExpressionError, HierarchyError -from .errors import NoSuchAttributeError, NoSuchDimensionError -from .errors import ModelInconsistencyError, TemplateRequired -from .metadata import expand_cube_metadata, expand_dimension_links -from .metadata import expand_dimension_metadata, expand_level_metadata - - -__all__ = [ - "ModelObject", - "Cube", - "Dimension", - "Hierarchy", - "Level", - "AttributeBase", - "Attribute", - "Measure", - "MeasureAggregate", - - "create_list_of", - "object_dict", - - "collect_attributes", - "depsort_attributes", - "collect_dependencies", - "string_to_dimension_level", -] - - +# Not used, right ? +# TODO: remove. + +# # -*- encoding: utf-8 -*- +# """Logical model.""" +# +# import copy +# import re +# from collections import OrderedDict, defaultdict +# +# from expressions import inspect_variables +# +# from .common import ( +# IgnoringDictionary, +# assert_all_instances, +# get_localizable_attributes, +# to_label, +# ) +# from .errors import ( +# ArgumentError, +# ExpressionError, +# HierarchyError, +# ModelError, +# ModelInconsistencyError, +# NoSuchAttributeError, +# NoSuchDimensionError, +# TemplateRequired, +# ) +# from .metadata import ( +# expand_cube_metadata, +# expand_dimension_links, +# expand_dimension_metadata, +# expand_level_metadata, +# ) +# +# __all__ = [ +# "ModelObject", +# "Cube", +# "Dimension", +# "Hierarchy", +# "Level", +# "AttributeBase", +# "Attribute", +# "Measure", +# "MeasureAggregate", +# "create_list_of", +# "object_dict", +# "collect_attributes", +# "depsort_attributes", +# "collect_dependencies", +# "string_to_dimension_level", +# ] diff --git a/cubes/metadata/physical.py b/cubes/metadata/physical.py index e0da09bb..d402edfb 100644 --- a/cubes/metadata/physical.py +++ b/cubes/metadata/physical.py @@ -1,11 +1,11 @@ -"""Physical Metadata""" +"""Physical Metadata.""" -from typing import Any, List, Optional, Hashable, NamedTuple from enum import Enum +from typing import Any, Hashable, List, Optional -from ..types import JSONType from ..common import list_hash from ..errors import ArgumentError +from ..types import JSONType # TODO: [typing] Make JoinMethod enum @@ -21,11 +21,12 @@ class JoinKey(Hashable): table: Optional[str] columns: List[str] - def __init__(self, - columns: List[str], - table: Optional[str]=None, - schema: Optional[str]=None, - ) -> None: + def __init__( + self, + columns: List[str], + table: Optional[str] = None, + schema: Optional[str] = None, + ) -> None: self.columns = columns self.table = table self.schema = schema @@ -50,8 +51,10 @@ def from_dict(cls, obj: JSONType) -> "JoinKey": # TODO: Legacy - deprecated if isinstance(obj, (list, tuple)): - raise ValueError(f"Join key specified as a list/tuple. " - f"should be a dictionary: '{obj}'") + raise ValueError( + f"Join key specified as a list/tuple. " + f"should be a dictionary: '{obj}'" + ) if isinstance(obj, str): split: List[Optional[str]] @@ -73,11 +76,10 @@ def from_dict(cls, obj: JSONType) -> "JoinKey": else: columns = [] - return JoinKey(columns= columns, table= table, schema= schema) + return JoinKey(columns=columns, table=table, schema=schema) def __hash__(self) -> int: - column_hash: int - column_hash = list_hash(self.columns) # type: ignore + column_hash: int = list_hash(self.columns) return hash(self.schema) ^ hash(self.table) ^ column_hash @@ -85,9 +87,11 @@ def __eq__(self, other: Any) -> bool: if not isinstance(other, JoinKey): return False else: - return self.columns == other.columns \ - and self.table == other.table \ - and self.schema == other.schema + return ( + self.columns == other.columns + and self.table == other.table + and self.schema == other.schema + ) def __str__(self) -> str: schema = f"{self.schema}." if self.schema is not None else "" @@ -97,6 +101,7 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"JoinKey({self})" + # FIXME: Put this string into the named tuple below (requires python/mypy#3043) """Table join specification. `master` and `detail` are TableColumnReference @@ -105,6 +110,7 @@ def __repr__(self) -> str: detail members (right outer join) and *match* – members must match (inner join).""" + class Join(Hashable): # TODO: [typingI nvestigate optional keys - where, how? @@ -117,29 +123,33 @@ class Join(Hashable): # Method how the table is joined method: JoinMethod - def __init__(self, - master: JoinKey, - detail: JoinKey, - alias: Optional[str]=None, - method: Optional[JoinMethod]=None) -> None: + def __init__( + self, + master: JoinKey, + detail: JoinKey, + alias: Optional[str] = None, + method: Optional[JoinMethod] = None, + ) -> None: self.master = master self.detail = detail self.alias = alias self.method = method or JoinMethod.match - def __hash__(self) -> int: - return hash(self.master) ^ hash(self.detail) \ - ^ hash(self.alias) ^ hash(self.method) + return ( + hash(self.master) ^ hash(self.detail) ^ hash(self.alias) ^ hash(self.method) + ) def __eq__(self, other: Any) -> bool: if not isinstance(other, Join): return False else: - return self.master == other.master \ - and self.detail == other.detail \ - and self.alias == other.alias \ - and self.method == other.method + return ( + self.master == other.master + and self.detail == other.detail + and self.alias == other.alias + and self.method == other.method + ) @classmethod def from_dict(cls, obj: JSONType) -> "Join": @@ -161,12 +171,13 @@ def from_dict(cls, obj: JSONType) -> "Join": # TODO: Deprecated, remove if isinstance(obj, list): - alias = None + alias = None method = None if len(obj) < 2 or len(obj) > 4: - raise ArgumentError(f"Join object can have 1 to 4 items" - f" has {len(obj)}: {obj}") + raise ArgumentError( + f"Join object can have 1 to 4 items" f" has {len(obj)}: {obj}" + ) padded: List[str] padded = obj + [None] * (4 - len(obj)) @@ -179,7 +190,7 @@ def from_dict(cls, obj: JSONType) -> "Join": elif isinstance(obj, dict): if "master" not in obj: raise ArgumentError(f"Join '{obj}' has no master.") - else: + else: master = JoinKey.from_dict(obj["master"]) if "detail" not in obj: @@ -209,6 +220,7 @@ def from_dict(cls, obj: JSONType) -> "Join": Note that either `extract` or `function` can be used, not both.""" + class ColumnReference(Hashable): column: str table: Optional[str] @@ -216,13 +228,14 @@ class ColumnReference(Hashable): extract: Optional[str] function: Optional[str] - def __init__(self, - column: str, - table: Optional[str]=None, - schema: Optional[str]=None, - extract: Optional[str]=None, - function: Optional[str]=None, - ) -> None: + def __init__( + self, + column: str, + table: Optional[str] = None, + schema: Optional[str] = None, + extract: Optional[str] = None, + function: Optional[str] = None, + ) -> None: self.column = column self.table = table self.schema = schema @@ -230,18 +243,25 @@ def __init__(self, self.function = function def __hash__(self) -> int: - return hash(self.column) ^ hash(self.table) ^ hash(self.schema) \ - ^ hash(self.extract) ^ hash(self.function) + return ( + hash(self.column) + ^ hash(self.table) + ^ hash(self.schema) + ^ hash(self.extract) + ^ hash(self.function) + ) def __eq__(self, other: Any) -> bool: if not isinstance(other, ColumnReference): return False else: - return self.column == other.column \ - and self.table == other.table \ - and self.schema == other.schema \ - and self.extract == other.extract \ - and self.function == other.function + return ( + self.column == other.column + and self.table == other.table + and self.schema == other.schema + and self.extract == other.extract + and self.function == other.function + ) @classmethod def from_dict(cls, obj: JSONType) -> "ColumnReference": @@ -269,6 +289,7 @@ def from_dict(cls, obj: JSONType) -> "ColumnReference": table = split[1] column = split[2] # TODO: Deprecated + # FIXME: won't work ('split' is not defined) elif isinstance(obj, list): split = [None] * (3 - len(split)) + split @@ -283,10 +304,9 @@ def from_dict(cls, obj: JSONType) -> "ColumnReference": function = obj.get("function") return ColumnReference( - schema=schema, - table=table, - column=column, - extract=extract, - function=function) - - + schema=schema, + table=table, + column=column, + extract=extract, + function=function, + ) diff --git a/cubes/metadata/providers.py b/cubes/metadata/providers.py index 0300b490..3d876835 100644 --- a/cubes/metadata/providers.py +++ b/cubes/metadata/providers.py @@ -1,18 +1,7 @@ # -*- coding: utf-8 -*- """Logical model model providers.""" import copy - -from typing import ( - cast, - Any, - Dict, - List, - Optional, - Collection, - Set, -) - -from ..types import JSONType +from typing import Any, Dict, List, Optional, Set, cast from ..errors import ( BackendError, @@ -22,21 +11,15 @@ NoSuchDimensionError, TemplateRequired, ) - -from .localization import LocalizationContext +from ..ext import Extensible +from ..namespace import Namespace +from ..stores import Store +from ..types import JSONType from .cube import Cube from .dimension import Dimension -from ..stores import Store - -from ..namespace import Namespace -from ..ext import Extensible +from .localization import LocalizationContext -__all__ = [ - "ModelProvider", - "StaticModelProvider", - "link_cube", - "find_dimension", -] +__all__ = ["ModelProvider", "StaticModelProvider", "link_cube", "find_dimension"] # Proposed Provider API: # Provider.cube() – in abstract class @@ -48,9 +31,13 @@ # # Provider is bound to namespace + class ModelProvider(Extensible, abstract=True): - """Abstract class – factory for model object. Currently empty and used - only to find other model providers.""" + """Abstract class – factory for model object. + + Currently empty and used only to find other model providers. + """ + __extension_type__ = "model_provider" store: Optional[Store] @@ -59,7 +46,7 @@ class ModelProvider(Extensible, abstract=True): cubes_metadata: Dict[str, JSONType] # TODO: Don't get metadata, but arbitrary arguments. - def __init__(self, metadata: JSONType=None) -> None: + def __init__(self, metadata: JSONType = None) -> None: """Base class for model providers. Initializes a model provider and sets `metadata` – a model metadata dictionary. @@ -96,7 +83,7 @@ def __init__(self, metadata: JSONType=None) -> None: self.options = self.metadata.get("options", {}) self.options.update(self.metadata.get("browser_options", {})) - def default_metadata(self, metadata: JSONType=None) -> JSONType: + def default_metadata(self, metadata: JSONType = None) -> JSONType: """Returns metadata that are prepended to the provided model metadata. `metadata` is user-provided metadata and might be used to decide what kind of default metadata are returned. @@ -115,29 +102,36 @@ def default_metadata(self, metadata: JSONType=None) -> JSONType: # TODO: remove this in favor of provider configuration: store= def requires_store(self) -> bool: - """Return `True` if the provider requires a store. Subclasses might - override this method. Default implementation returns `False`""" + """Return `True` if the provider requires a store. + + Subclasses might override this method. Default implementation + returns `False` + """ return False # TODO: bind this automatically on provider configuration: store (see # requires_store() function) def bind(self, store: Store) -> None: - """Set's the provider's `store`. """ + """Set's the provider's `store`.""" self.store = store self.initialize_from_store() def initialize_from_store(self) -> None: """This method is called after the provider's `store` was set. - Override this method if you would like to perform post-initialization - from the store.""" + + Override this method if you would like to perform post- + initialization from the store. + """ pass - def cube_options(self, cube_name:str) -> JSONType: - """Returns an options dictionary for cube `name`. The options - dictoinary is merged model `options` metadata with cube's `options` - metadata if exists. Cube overrides model's global (default) - options.""" + def cube_options(self, cube_name: str) -> JSONType: + """Returns an options dictionary for cube `name`. + + The options dictoinary is merged model `options` metadata with + cube's `options` metadata if exists. Cube overrides model's + global (default) options. + """ options = dict(self.options) if cube_name in self.cubes_metadata: @@ -148,19 +142,20 @@ def cube_options(self, cube_name:str) -> JSONType: return options - def dimension_metadata(self, name: str, locale: str=None) -> JSONType: + def dimension_metadata(self, name: str, locale: str = None) -> JSONType: """Returns a metadata dictionary for dimension `name` and optional `locale`. - Subclasses should override this method and call the super if they - would like to merge metadata provided in a model file.""" + Subclasses should override this method and call the super if + they would like to merge metadata provided in a model file. + """ try: return self.dimensions_metadata[name] except KeyError: raise NoSuchDimensionError("No such dimension '%s'" % name, name) - def cube_metadata(self, name:str , locale: str=None) -> JSONType: + def cube_metadata(self, name: str, locale: str = None) -> JSONType: """Returns a cube metadata by combining model's global metadata and cube's metadata. Merged metadata dictionaries: `browser_options`, `mappings`, `joins`. @@ -180,10 +175,10 @@ def cube_metadata(self, name:str , locale: str=None) -> JSONType: raise NoSuchCubeError("No such cube '%s'" % name, name) # merge browser_options - browser_options = self.metadata.get('browser_options', {}) - if metadata.get('browser_options'): - browser_options.update(metadata.get('browser_options')) - metadata['browser_options'] = browser_options + browser_options = self.metadata.get("browser_options", {}) + if metadata.get("browser_options"): + browser_options.update(metadata.get("browser_options")) + metadata["browser_options"] = browser_options # Merge model and cube mappings # @@ -211,14 +206,14 @@ def cube_metadata(self, name:str , locale: str=None) -> JSONType: model_join_map = {} for join in model_joins: try: - jname = join['name'] + jname = join["name"] except KeyError: - raise ModelError("Missing required 'name' key in " - "model-level joins.") + raise ModelError( + "Missing required 'name' key in model-level joins." + ) if jname in model_join_map: - raise ModelError("Duplicate model-level join 'name': %s" % - jname) + raise ModelError("Duplicate model-level join 'name': %s" % jname) model_join_map[jname] = copy.deepcopy(join) @@ -226,7 +221,7 @@ def cube_metadata(self, name:str , locale: str=None) -> JSONType: merged_joins = [] for join in cube_joins: - name = join.get('name') + name = join.get("name") if name and name in model_join_map: model_join = dict(model_join_map[name]) else: @@ -240,11 +235,15 @@ def cube_metadata(self, name:str , locale: str=None) -> JSONType: # Validate joins: for join in merged_joins: if "master" not in join: - raise ModelError("No master in join for cube '%s' " - "(join name: %s)" % (name, join.get("name"))) + raise ModelError( + "No master in join for cube '%s' " + "(join name: %s)" % (name, join.get("name")) + ) if "detail" not in join: - raise ModelError("No detail in join for cube '%s' " - "(join name: %s)" % (name, join.get("name"))) + raise ModelError( + "No detail in join for cube '%s' " + "(join name: %s)" % (name, join.get("name")) + ) metadata["joins"] = merged_joins @@ -262,13 +261,14 @@ def list_cubes(self) -> List[JSONType]: raise NotImplementedError("Subclasses should implement list_cubes()") def has_cube(self, name: str) -> bool: - """Returns `True` if the provider has cube `name`. Otherwise returns - `False`.""" + """Returns `True` if the provider has cube `name`. + + Otherwise returns `False`. + """ return name in self.cubes_metadata - def cube(self, name: str, locale: str=None, namespace: Namespace=None) \ - -> Cube: + def cube(self, name: str, locale: str = None, namespace: Namespace = None) -> Cube: """Returns a cube with `name` provided by the receiver. If receiver does not have the cube `NoSuchCube` exception is raised. @@ -291,16 +291,14 @@ def cube(self, name: str, locale: str=None, namespace: Namespace=None) \ return cube - def dimension(self, - name: str, - templates: Dict[str, Dimension]=None, - locale: str=None) -> Dimension: + def dimension( + self, name: str, templates: Dict[str, Dimension] = None, locale: str = None + ) -> Dimension: """Returns a dimension with `name` provided by the receiver. `dimensions` is a dictionary of dimension objects where the receiver can look for templates. If the dimension requires a template and the template is missing, the subclasses should raise - `TemplateRequired(template)` error with a template name as an - argument. + `TemplateRequired(template)` error with a template name as an argument. If the receiver does not provide the dimension `NoSuchDimension` exception is raised. @@ -323,7 +321,7 @@ def _merge_metadata(metadata: JSONType, other: JSONType) -> JSONType: if dims: metadata["dimensions"] = dims - joins = metadata.pop("joins", []) + other.pop("joins",[]) + joins = metadata.pop("joins", []) + other.pop("joins", []) if joins: metadata["joins"] = joins @@ -336,11 +334,11 @@ def _merge_metadata(metadata: JSONType, other: JSONType) -> JSONType: return metadata + # TODO: make this FileModelProvider class StaticModelProvider(ModelProvider, name="static"): - def __init__(self, *args: Any, **kwargs: Any) -> None: - super(StaticModelProvider, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) # Initialization code goes here... def list_cubes(self) -> List[JSONType]: @@ -351,24 +349,30 @@ def list_cubes(self) -> List[JSONType]: info = { "name": cube["name"], "label": cube.get("label", cube["name"]), - "category": (cube.get("category") or cube.get("info", {}).get("category")), - "info": cube.get("info", {}) + "category": ( + cube.get("category") or cube.get("info", {}).get("category") + ), + "info": cube.get("info", {}), } cubes.append(info) return cubes + # TODO: add tests # TODO: needs to be reviewed def link_cube( - cube: Cube, - locale: Optional[str], - provider: ModelProvider=None, - namespace: Namespace=None, - ignore_missing: bool=False) -> Cube: - """Links dimensions to the `cube` in the `context` object. The `context` - object should implement a function `dimension(name, locale, namespace, - provider)`. Modifies cube in place, returns the cube. + cube: Cube, + locale: Optional[str], + provider: ModelProvider = None, + namespace: Namespace = None, + ignore_missing: bool = False, +) -> Cube: + """Links dimensions to the `cube` in the `context` object. + + The `context` object should implement a function `dimension(name, + locale, namespace, provider)`. Modifies cube in place, returns the + cube. """ # TODO: change this to: link_cube(cube, locale, namespace, provider) @@ -378,18 +382,17 @@ def link_cube( for dim_name in cube.dimension_links.keys(): if dim_name in linked: - raise ModelError("Dimension '{}' linked twice" - .format(dim_name)) + raise ModelError(f"Dimension '{dim_name}' linked twice") try: - dim = find_dimension(dim_name, locale, - provider=provider, - namespace=namespace) + dim = find_dimension( + dim_name, locale, provider=provider, namespace=namespace + ) except TemplateRequired as e: raise ModelError("Dimension template '%s' missing" % dim_name) if not dim and not ignore_missing: - raise CubesError("Dimension '{}' not found.".format(dim_name)) + raise CubesError(f"Dimension '{dim_name}' not found.") cube.link_dimension(dim) linked.add(dim_name) @@ -399,15 +402,15 @@ def link_cube( # TODO: add tests def find_dimension( - name: str, - locale: str=None, - provider: ModelProvider=None, - namespace: Namespace=None) -> Dimension: - """Returns a localized dimension with `name`. Raises - `NoSuchDimensionError` when no model published the dimension. Raises - `RequiresTemplate` error when model provider requires a template to be - able to provide the dimension, but such template is not a public - dimension. + name: str, + locale: Optional[str] = None, + provider: Optional[ModelProvider] = None, + namespace: Optional[Namespace] = None, +) -> Dimension: + """Returns a localized dimension with `name`. Raises `NoSuchDimensionError` + when no model published the dimension. Raises `RequiresTemplate` error when + model provider requires a template to be able to provide the dimension, but + such template is not a public dimension. The standard lookup when linking a cube is: @@ -434,23 +437,24 @@ def find_dimension( required_template = None try: - dimension = _lookup_dimension(name, templates, - namespace, provider) + dimension = _lookup_dimension(name, templates, namespace, provider) except TemplateRequired as e: required_template = e.template else: templates[name] = dimension if required_template in templates: - raise BackendError("Some model provider didn't make use of " - "dimension template '%s' for '%s'" - % (required_template, name)) + raise BackendError( + "Some model provider didn't make use of " + "dimension template '%s' for '%s'" % (required_template, name) + ) if required_template: missing.append(name) if required_template in missing: - raise ModelError("Dimension templates cycle in '%s'" % - required_template) + raise ModelError( + "Dimension templates cycle in '%s'" % required_template + ) missing.append(required_template) if namespace is not None and locale is not None: @@ -467,14 +471,15 @@ def find_dimension( # TODO: add tests def _lookup_dimension( - name: str, - templates: Dict[str, Dimension], - namespace: Optional[Namespace], - provider: Optional[ModelProvider]) -> Dimension: + name: str, + templates: Dict[str, Dimension], + namespace: Optional[Namespace], + provider: Optional[ModelProvider], +) -> Dimension: """Look-up a dimension `name` in `provider` and then in `namespace`. - `templates` is a dictionary with already instantiated dimensions that - can be used as templates. + `templates` is a dictionary with already instantiated dimensions + that can be used as templates. """ dimension = None @@ -492,7 +497,4 @@ def _lookup_dimension( if namespace: return namespace.dimension(name, templates=templates) - raise NoSuchDimensionError("Dimension '%s' not found" % name, - name=name) - - + raise NoSuchDimensionError("Dimension '%s' not found" % name, name=name) diff --git a/cubes/namespace.py b/cubes/namespace.py index 1f44688b..fd1f5475 100644 --- a/cubes/namespace.py +++ b/cubes/namespace.py @@ -2,22 +2,23 @@ # TODO: This module requires redesign if not removal. Namespaces are not good # idea if one of the objectives is to preserve model quality. -from typing import List, Dict, Optional, Set, Tuple, Union, Any -from .types import JSONType +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +from .common import read_json_file +from .errors import ModelError, NoSuchCubeError, NoSuchDimensionError from .metadata.dimension import Dimension +from .types import JSONType + # from .metadata.providers import ModelProvider # FIXME: [Tech-debt] This needs to go away with redesign of namespaces/providers # FIXME: [typing] Workaround for circular dependency ModelProvider = Any -from .errors import NoSuchCubeError, NoSuchDimensionError, ModelError -from .common import read_json_file -__all__ = [ - "Namespace", -] +__all__ = ["Namespace"] -class Namespace(object): + +class Namespace: parent: Optional["Namespace"] name: Optional[str] @@ -25,9 +26,7 @@ class Namespace(object): providers: List[ModelProvider] translations: Dict[str, JSONType] - def __init__(self, - name: Optional[str]=None, - parent:"Namespace"=None) -> None: + def __init__(self, name: Optional[str] = None, parent: "Namespace" = None) -> None: """Creates a cubes namespace – an object that provides model objects from the providers.""" # TODO: Assign this on __init__, namespaces should not be freely @@ -38,12 +37,13 @@ def __init__(self, self.providers = [] self.translations = {} - def namespace(self, path: Union[str, List[str]], create: bool=False) \ - -> Tuple["Namespace", Optional[str]]: - """Returns a tuple (`namespace`, `remainder`) where `namespace` is - the deepest namespace in the namespace hierarchy and `remainder` is - the remaining part of the path that has no namespace (is an object - name or contains part of external namespace). + def namespace( + self, path: Union[str, List[str]], create: bool = False + ) -> Tuple["Namespace", Optional[str]]: + """Returns a tuple (`namespace`, `remainder`) where `namespace` is the + deepest namespace in the namespace hierarchy and `remainder` is the + remaining part of the path that has no namespace (is an object name or + contains part of external namespace). If path is empty or not provided then returns self. @@ -61,7 +61,7 @@ def namespace(self, path: Union[str, List[str]], create: bool=False) \ namespace = self for i, element in enumerate(path_elements): - remainder = path_elements[i+1:] + remainder = path_elements[i + 1 :] if element in namespace.namespaces: namespace = namespace.namespaces[element] found = True @@ -89,8 +89,7 @@ def create_namespace(self, name: str) -> "Namespace": return namespace - def find_cube(self, cube_ref: str) \ - -> Tuple["Namespace", ModelProvider, str]: + def find_cube(self, cube_ref: str) -> Tuple["Namespace", ModelProvider, str]: """Returns a tuple (`namespace`, `provider`, `basename`) where `namespace` is a namespace conaining `cube`, `provider` providers the model for the cube and `basename` is a name of the `cube` within the @@ -98,7 +97,8 @@ def find_cube(self, cube_ref: str) \ is namespace ``slicer`` then that namespace is returned and the `basename` will be ``nested.cube``. - Raises `NoSuchCubeError` when there is no cube with given reference. + Raises `NoSuchCubeError` when there is no cube with given + reference. """ path: List[str] @@ -118,7 +118,7 @@ def find_cube(self, cube_ref: str) \ (namespace, remainder) = self.namespace(path) if remainder: - basename = "{}.{}".format(remainder, cube_ref) + basename = f"{remainder}.{cube_ref}" else: basename = cube_ref @@ -132,15 +132,13 @@ def find_cube(self, cube_ref: str) \ provider = None if not provider: - raise NoSuchCubeError("Unknown cube '{}'".format(cube_ref), - cube_ref) + raise NoSuchCubeError(f"Unknown cube '{cube_ref}'", cube_ref) return (namespace, provider, basename) - - def list_cubes(self, recursive: bool=False) -> List[JSONType]: - """Retursn a list of cube info dictionaries with keys: `name`, - `label`, `description`, `category` and `info`.""" + def list_cubes(self, recursive: bool = False) -> List[JSONType]: + """Retursn a list of cube info dictionaries with keys: `name`, `label`, + `description`, `category` and `info`.""" all_cubes: List[JSONType] all_cubes = [] @@ -162,26 +160,27 @@ def list_cubes(self, recursive: bool=False) -> List[JSONType]: for name, ns in self.namespaces.items(): cubes = ns.list_cubes(recursive=True) for cube in cubes: - cube["name"] = "%s.%s" % (name, cube["name"]) + cube["name"] = "{}.{}".format(name, cube["name"]) all_cubes += cubes return all_cubes # TODO: change to find_dimension() analogous to the find_cube(). Let the # caller to perform actual dimension creation using the provider - def dimension(self, - name: str, - locale: str=None, - templates: Dict[str, Dimension]=None, - local_only: bool=False) -> Dimension: + def dimension( + self, + name: str, + locale: str = None, + templates: Dict[str, Dimension] = None, + local_only: bool = False, + ) -> Dimension: dim: Dimension for provider in self.providers: # TODO: use locale try: - dim = provider.dimension(name, locale=locale, - templates=templates) + dim = provider.dimension(name, locale=locale, templates=templates) except NoSuchDimensionError: pass else: @@ -212,9 +211,12 @@ def add_translation(self, lang: str, translation: JSONType) -> None: trans.update(translation) def translation_lookup(self, lang: str) -> List[JSONType]: - """Returns translation in language `lang` for model object `obj` - within `context` (cubes, dimensions, attributes, ...). Looks in - parent if current namespace does not have the translation.""" + """Returns translation in language `lang` for model object `obj` within + `context` (cubes, dimensions, attributes, ...). + + Looks in parent if current namespace does not have the + translation. + """ lookup: List[JSONType] lookup = [] @@ -235,4 +237,3 @@ def translation_lookup(self, lang: str) -> List[JSONType]: ns = ns.parent return lookup - diff --git a/cubes/query/browser.py b/cubes/query/browser.py index e370e224..1c125eac 100644 --- a/cubes/query/browser.py +++ b/cubes/query/browser.py @@ -2,79 +2,60 @@ from collections import namedtuple from enum import Enum - from typing import ( - Any, - Collection, - Dict, - Iterable, - Iterator, - List, - Mapping, - Optional, - Set, - Sized, - Tuple, - Union, - cast, - NamedTuple, - ) - -from ..types import JSONType, _RecordType, ValueType - -from ..calendar import CalendarMemberConverter, Calendar -from ..logging import get_logger + Any, + Collection, + Dict, + Iterable, + Iterator, + List, + Mapping, + NamedTuple, + Optional, + Set, + Sized, + Tuple, + Union, + cast, +) + +from ..calendar import Calendar, CalendarMemberConverter from ..common import IgnoringDictionary -from ..errors import ArgumentError, NoSuchAttributeError, HierarchyError, InternalError -from ..stores import Store - +from ..errors import ArgumentError, HierarchyError, InternalError, NoSuchAttributeError +from ..ext import Extensible +from ..logging import get_logger from ..metadata import ( - Attribute, - AttributeBase, - Cube, - Dimension, - Hierarchy, - HierarchyPath, - Level, - Measure, - MeasureAggregate, - string_to_dimension_level, - ) - -from .cells import ( - Cell, - Cut, - PointCut, - RangeCut, - SetCut, - cuts_from_string, - ) - -from .statutils import ( - _CalculatorFunction, - available_calculators, - calculators_for_aggregates, - ) - + Attribute, + AttributeBase, + Cube, + Dimension, + Hierarchy, + HierarchyPath, + Level, + Measure, + MeasureAggregate, + string_to_dimension_level, +) from ..settings import SettingsDict - -from .constants import SPLIT_DIMENSION_NAME, NULL_PATH_VALUE - -from .result import AggregationResult, Facts +from ..stores import Store +from ..types import JSONType, ValueType, _RecordType +from .cells import Cell, Cut, PointCut, RangeCut, SetCut, cuts_from_string +from .constants import NULL_PATH_VALUE, SPLIT_DIMENSION_NAME from .drilldown import Drilldown, DrilldownItem, _DrilldownType +from .result import AggregationResult, Facts +from .statutils import ( + _CalculatorFunction, + available_calculators, + calculators_for_aggregates, +) -from ..ext import Extensible - - -__all__ = [ - "AggregationBrowser", -] +__all__ = ["AggregationBrowser"] # Order can be: `name` or (`name`, `direction`) -_OrderType = Tuple[AttributeBase,str] -_OrderArgType = Union[str, Union[_OrderType, Tuple[str,str]]] -_ReportResult = Union[AggregationResult, Facts, JSONType, List[JSONType]] +_OrderType = Tuple[AttributeBase, str] +_OrderArgType = Union[str, Union[_OrderType, Tuple[str, str]]] +_ReportResult = Union[AggregationResult, Facts, JSONType, List[JSONType]] class BrowserFeatureAction(Enum): @@ -85,56 +66,55 @@ class BrowserFeatureAction(Enum): cell = 5 -class BrowserFeatures(object): +class BrowserFeatures: actions: Collection[BrowserFeatureAction] aggregate_functions: Collection[str] post_aggregate_functions: Collection[str] - def __init__(self, - actions: Optional[Collection[BrowserFeatureAction]]=None, - aggregate_functions: Optional[Collection[str]]=None, - post_aggregate_functions: Optional[Collection[str]]=None) \ - -> None: + def __init__( + self, + actions: Optional[Collection[BrowserFeatureAction]] = None, + aggregate_functions: Optional[Collection[str]] = None, + post_aggregate_functions: Optional[Collection[str]] = None, + ) -> None: self.actions = actions or [] self.aggregate_functions = aggregate_functions or [] self.post_aggregate_functions = post_aggregate_functions or [] @classmethod - def from_dict(cls, data: JSONType) -> 'BrowserFeatures': - actions_names: List[str] = data.get('actions') - aggregate_functions: List[str] = data.get('aggregate_functions') - post_aggregate_functions: List[str] = data.get('post_aggregate_functions') + def from_dict(cls, data: JSONType) -> "BrowserFeatures": + actions_names: List[str] = data.get("actions") + aggregate_functions: List[str] = data.get("aggregate_functions") + post_aggregate_functions: List[str] = data.get("post_aggregate_functions") try: actions = [BrowserFeatureAction[action] for action in actions_names] except KeyError: - raise InternalError('Some actions are not valid.') + raise InternalError("Some actions are not valid.") return BrowserFeatures( actions=actions, aggregate_functions=aggregate_functions, - post_aggregate_functions=post_aggregate_functions + post_aggregate_functions=post_aggregate_functions, ) - def to_dict(self) -> JSONType: result: JSONType = {} if self.actions: - result['actions'] = [action.name for action in self.actions] + result["actions"] = [action.name for action in self.actions] if self.aggregate_functions: - result['aggregate_functions'] = self.aggregate_functions + result["aggregate_functions"] = self.aggregate_functions if self.post_aggregate_functions: - result['post_aggregate_functions'] = self.post_aggregate_functions + result["post_aggregate_functions"] = self.post_aggregate_functions return result class AggregationBrowser(Extensible, abstract=True): - """Class for browsing data cube aggregations + """Class for browsing data cube aggregations. :Attributes: * `cube` - cube for browsing - """ __extension_type__ = "browser" @@ -149,18 +129,20 @@ class AggregationBrowser(Extensible, abstract=True): calendar: Optional[Calendar] locale: Optional[str] - def __init__(self, - cube: Cube, - store: Optional[Store]=None, - locale: Optional[str]=None, - calendar: Optional[Calendar]=None, - ) -> None: - """Creates and initializes the aggregation browser. Subclasses should - override this method. """ - super(AggregationBrowser, self).__init__() + def __init__( + self, + cube: Cube, + store: Optional[Store] = None, + locale: Optional[str] = None, + calendar: Optional[Calendar] = None, + ) -> None: + """Creates and initializes the aggregation browser. + + Subclasses should override this method. + """ + super().__init__() - assert cube is not None, \ - "No cube given for aggregation browser" + assert cube is not None, "No cube given for aggregation browser" self.cube = cube self.store = store @@ -184,15 +166,17 @@ def features(self) -> BrowserFeatures: return BrowserFeatures() # TODO: No *options - def aggregate(self, - cell: Cell=None, - aggregates: List[str]=None, - drilldown: _DrilldownType=None, - split: Cell=None, - order: Optional[Collection[_OrderArgType]]=None, - page: int=None, - page_size: int=None, - **options: Any) -> AggregationResult: + def aggregate( + self, + cell: Cell = None, + aggregates: List[str] = None, + drilldown: _DrilldownType = None, + split: Cell = None, + order: Optional[Collection[_OrderArgType]] = None, + page: int = None, + page_size: int = None, + **options: Any, + ) -> AggregationResult: """Return aggregate of a cell. @@ -244,43 +228,42 @@ def aggregate(self, prepared_order: Collection[_OrderType] prepared_order = self.prepare_order(order, is_aggregate=True) - converters = { - "time": CalendarMemberConverter(self.calendar) - } + converters = {"time": CalendarMemberConverter(self.calendar)} if cell is None: cell = Cell() elif isinstance(cell, str): - cuts = cuts_from_string(self.cube, cell, - role_member_converters=converters) + cuts = cuts_from_string(self.cube, cell, role_member_converters=converters) cell = Cell(cuts) if isinstance(split, str): - cuts = cuts_from_string(self.cube, split, - role_member_converters=converters) + cuts = cuts_from_string(self.cube, split, role_member_converters=converters) split = Cell(cuts) drilldown = Drilldown(self.cube, items=drilldown) - result = self.provide_aggregate(cell, - aggregates=prepared_aggregates, - drilldown=drilldown, - split=split, - order=prepared_order, - page=page, - page_size=page_size) + result = self.provide_aggregate( + cell, + aggregates=prepared_aggregates, + drilldown=drilldown, + split=split, + order=prepared_order, + page=page, + page_size=page_size, + ) # # Find post-aggregation calculations and decorate the result # - calculated_aggs = [agg for agg in prepared_aggregates - if agg.function - and not self.is_builtin_function(agg.function)] - - result.calculators = calculators_for_aggregates(self.cube, - calculated_aggs, - drilldown, - split) + calculated_aggs = [ + agg + for agg in prepared_aggregates + if agg.function and not self.is_builtin_function(agg.function) + ] + + result.calculators = calculators_for_aggregates( + self.cube, calculated_aggs, drilldown, split + ) # Do calculated measures on summary if no drilldown or split if result.summary: @@ -289,14 +272,16 @@ def aggregate(self, return result - def provide_aggregate(self, - cell: Cell, - aggregates: Collection[MeasureAggregate], - drilldown: Drilldown, - split: Optional[Cell]=None, - order: Optional[Collection[_OrderType]]=None, - page: Optional[int]=None, - page_size: Optional[int]=None) -> AggregationResult: + def provide_aggregate( + self, + cell: Cell, + aggregates: Collection[MeasureAggregate], + drilldown: Drilldown, + split: Optional[Cell] = None, + order: Optional[Collection[_OrderType]] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> AggregationResult: """Method to be implemented by subclasses. The arguments are prepared by the superclass. Arguments: @@ -307,13 +292,14 @@ def provide_aggregate(self, * `drilldown` – `Drilldown` instance * `split` – `Cell` instance * `order` – list of tuples: (`attribute`, `order`) - """ - raise NotImplementedError("{} does not provide aggregate functionality." \ - .format(str(type(self)))) + raise NotImplementedError( + "{} does not provide aggregate functionality.".format(str(type(self))) + ) - def prepare_aggregates(self, aggregates: List[Any]=None) \ - -> List[MeasureAggregate]: + def prepare_aggregates( + self, aggregates: List[Any] = None + ) -> List[MeasureAggregate]: """Prepares the aggregate list for aggregatios. `aggregates` might be a list of aggregate names or `MeasureAggregate` objects. @@ -337,7 +323,7 @@ def prepare_aggregates(self, aggregates: List[Any]=None) \ prepared = self.cube.aggregates seen: Set[str] - seen = set(a.name for a in prepared) + seen = {a.name for a in prepared} dependencies: List[MeasureAggregate] = [] @@ -350,10 +336,12 @@ def prepare_aggregates(self, aggregates: List[Any]=None) \ # seen the measure yet, then the measure is considered to be # another aggregate measure and therefore we need to include it in # the queried aggregates. - if agg.measure \ - and agg.function is not None \ - and not self.is_builtin_function(agg.function) \ - and agg.measure not in seen: + if ( + agg.measure + and agg.function is not None + and not self.is_builtin_function(agg.function) + and agg.measure not in seen + ): seen.add(agg.measure) aggregate = self.cube.aggregate(agg.measure) @@ -361,12 +349,14 @@ def prepare_aggregates(self, aggregates: List[Any]=None) \ return prepared + dependencies - def prepare_order(self, - order: Optional[Collection[_OrderArgType]], - is_aggregate: bool=False) -> Collection[_OrderType]: - """Prepares an order list. Returns list of tuples (`attribute`, - `order_direction`). `attribute` is cube's attribute object.""" + def prepare_order( + self, order: Optional[Collection[_OrderArgType]], is_aggregate: bool = False + ) -> Collection[_OrderType]: + """Prepares an order list. + Returns list of tuples (`attribute`, `order_direction`). + `attribute` is cube's attribute object. + """ order = order or [] new_order: List[_OrderType] = [] @@ -417,62 +407,72 @@ def assert_low_cardinality(self, cell: Cell, drilldown: "Drilldown") -> None: if hc_levels: names = [str(level) for level in hc_levels] names_str = ", ".join(names) - raise ArgumentError(f"Can not drilldown on high-cardinality levels" - f"({names_str}) without including both " - f"page_size and page arguments, or else a " - f"point/set cut on the level" - ) - + raise ArgumentError( + f"Can not drilldown on high-cardinality levels" + f"({names_str}) without including both " + f"page_size and page arguments, or else a " + f"point/set cut on the level" + ) def is_builtin_function(self, function_name: str) -> bool: """Returns `True` if function `function_name` is bult-in. Returns - `False` if the browser can not compute the function and - post-aggregation calculation should be used. + `False` if the browser can not compute the function and post- + aggregation calculation should be used. Default implementation returns `True` for all unctions except those in :func:`available_calculators`. Subclasses are reommended to override this method if they have their own built-in version of the aggregate - functions.""" + functions. + """ return function_name in available_calculators() - def facts(self, - cell: Cell=None, - fields: Collection[AttributeBase]=None, - order: List[_OrderArgType]=None, - page: int=None, - page_size: int=None, - fact_list: List[ValueType]=None) -> Facts: - """Return an iterable object with of all facts within cell. - `fields` is list of fields to be considered in the output. - - Subclasses overriding this method sould return a :class:`Facts` object - and set it's `attributes` to the list of selected attributes.""" - raise NotImplementedError("{} does not provide facts functionality." \ - .format(str(type(self)))) - - def fact(self, - key: ValueType, - fields: Collection[AttributeBase]=None) \ - -> Optional[_RecordType]: + def facts( + self, + cell: Cell = None, + fields: Collection[AttributeBase] = None, + order: List[_OrderArgType] = None, + page: int = None, + page_size: int = None, + fact_list: List[ValueType] = None, + ) -> Facts: + """Return an iterable object with of all facts within cell. `fields` is + list of fields to be considered in the output. + + Subclasses overriding this method sould return a :class:`Facts` + object and set it's `attributes` to the list of selected + attributes. + """ + raise NotImplementedError( + "{} does not provide facts functionality.".format(str(type(self))) + ) + + def fact( + self, key: ValueType, fields: Collection[AttributeBase] = None + ) -> Optional[_RecordType]: """Returns a single fact from cube specified by fact key `key`""" - raise NotImplementedError("{} does not provide fact functionality." \ - .format(str(type(self)))) - - def members(self, - cell: Cell, - dimension: Dimension, - depth: int=None, - level: Level=None, - hierarchy: Hierarchy=None, - attributes: Collection[str]=None, - order: Optional[Collection[_OrderArgType]]=None, - page: int=None, - page_size: int=None, - **options: Any) -> Iterable[_RecordType]: - """Return members of `dimension` with level depth `depth`. If `depth` - is ``None``, all levels are returned. If no `hierarchy` is specified, - then default dimension hierarchy is used. + raise NotImplementedError( + "{} does not provide fact functionality.".format(str(type(self))) + ) + + def members( + self, + cell: Cell, + dimension: Dimension, + depth: int = None, + level: Level = None, + hierarchy: Hierarchy = None, + attributes: Collection[str] = None, + order: Optional[Collection[_OrderArgType]] = None, + page: int = None, + page_size: int = None, + **options: Any, + ) -> Iterable[_RecordType]: + """Return members of `dimension` with level depth `depth`. + + If `depth` is ``None``, all levels are returned. If no + `hierarchy` is specified, then default dimension hierarchy is + used. """ prepared_order = self.prepare_order(order, is_aggregate=False) @@ -493,7 +493,7 @@ def members(self, levels = hierarchy.levels_for_depth(depth) elif level: index = hierarchy.level_index(level.name) - levels = hierarchy.levels_for_depth(index+1) + levels = hierarchy.levels_for_depth(index + 1) attribute_objs: Collection[AttributeBase] if attributes is not None: @@ -501,37 +501,45 @@ def members(self, else: attribute_objs = self.cube.get_attributes(attributes) - result = self.provide_members(cell, - dimension=dimension, - hierarchy=hierarchy, - levels=levels, - attributes=attribute_objs, - order=prepared_order, - page=page, - page_size=page_size, - **options) + result = self.provide_members( + cell, + dimension=dimension, + hierarchy=hierarchy, + levels=levels, + attributes=attribute_objs, + order=prepared_order, + page=page, + page_size=page_size, + **options, + ) return result - def provide_members(self, - cell: Cell, - dimension: Dimension, - depth: int=None, - hierarchy: Hierarchy=None, - levels: Collection[Level]=None, - attributes: Collection[AttributeBase]=None, - page: Optional[int]=None, - page_size: Optional[int]=None, - order: Optional[Collection[_OrderType]]=None, - ) -> Iterable[_RecordType]: - raise NotImplementedError("{} does not provide members functionality." \ - .format(str(type(self)))) + def provide_members( + self, + cell: Cell, + dimension: Dimension, + depth: int = None, + hierarchy: Hierarchy = None, + levels: Collection[Level] = None, + attributes: Collection[AttributeBase] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order: Optional[Collection[_OrderType]] = None, + ) -> Iterable[_RecordType]: + raise NotImplementedError( + "{} does not provide members functionality.".format(str(type(self))) + ) # FIXME: [important] Properly annotate this one - def test(self, aggregate: bool=False) -> None: - """Tests whether the cube can be used. Refer to the backend's - documentation for more information about what is being tested.""" - raise NotImplementedError("{} does not provide test functionality." \ - .format(str(type(self)))) + def test(self, aggregate: bool = False) -> None: + """Tests whether the cube can be used. + + Refer to the backend's documentation for more information about + what is being tested. + """ + raise NotImplementedError( + "{} does not provide test functionality.".format(str(type(self))) + ) # FIXME: Create a special "report query" object def report(self, cell: Cell, queries: JSONType) -> Dict[str, _ReportResult]: @@ -627,8 +635,8 @@ def report(self, cell: Cell, queries: JSONType) -> Dict[str, _ReportResult]: args = dict(query) del args["query"] - # Note: we do not just convert name into function from symbol for possible future - # more fine-tuning of queries as strings + # Note: we do not just convert name into function from symbol for possible + # future more fine-tuning of queries as strings # FIXME: [2.0] dimension was removed from cell, the following code # does not work any more. @@ -669,16 +677,17 @@ def report(self, cell: Cell, queries: JSONType) -> Dict[str, _ReportResult]: result = cell_dict else: - raise ArgumentError("Unknown report query '%s' for '%s'" % - (query_type, result_name)) + raise ArgumentError( + f"Unknown report query '{query_type}' for '{result_name}'" + ) report_result[result_name] = result return report_result - def cell_details(self, - cell: Cell=None, - dimension: Union[str, Dimension]=None) -> List[JSONType]: + def cell_details( + self, cell: Cell = None, dimension: Union[str, Dimension] = None + ) -> List[JSONType]: """Returns details for the `cell`. Returned object is a list with one element for each cell cut. If `dimension` is specified, then details only for cuts that use the dimension are returned. @@ -701,8 +710,7 @@ def cell_details(self, return [] if dimension: - cuts = [cut for cut in cell.cuts - if cut.dimension == str(dimension)] + cuts = [cut for cut in cell.cuts if cut.dimension == str(dimension)] else: cuts = cell.cuts @@ -718,7 +726,6 @@ def cut_details(self, cut: Cut) -> Any: * `SetCut` - list of `PointCut` results, one per path in the set * `RangeCut` - `PointCut`-like results for lower range (from) and upper range (to) - """ details: Any @@ -729,17 +736,20 @@ def cut_details(self, cut: Cut) -> Any: details = self._path_details(dimension, cut.path, cut.hierarchy) elif isinstance(cut, SetCut): - details = [self._path_details(dimension, path, cut.hierarchy) - for path in cut.paths] + details = [ + self._path_details(dimension, path, cut.hierarchy) for path in cut.paths + ] elif isinstance(cut, RangeCut): details = { - "from": self._path_details(dimension=dimension, - path=cut.from_path or [], - hierarchy=cut.hierarchy), - "to": self._path_details(dimension=dimension, - path=cut.to_path or [], - hierarchy=cut.hierarchy) + "from": self._path_details( + dimension=dimension, + path=cut.from_path or [], + hierarchy=cut.hierarchy, + ), + "to": self._path_details( + dimension=dimension, path=cut.to_path or [], hierarchy=cut.hierarchy + ), } else: @@ -748,11 +758,12 @@ def cut_details(self, cut: Cut) -> Any: return details # FIXME: [typing] fix the return type to RecordType, see #410 - def _path_details(self, - dimension: Dimension, - path: List[str], - hierarchy:Union[str, Hierarchy]=None) \ - -> Optional[List[Dict[str,Optional[str]]]]: + def _path_details( + self, + dimension: Dimension, + path: List[str], + hierarchy: Union[str, Hierarchy] = None, + ) -> Optional[List[Dict[str, Optional[str]]]]: """Returns a list of details for a path. Each element of the list corresponds to one level of the path and is represented by a dictionary. The keys are dimension level attributes. Returns ``None`` @@ -775,8 +786,7 @@ def _path_details(self, result = [] for level in hierarchy.levels_for_depth(len(path)): - item = {a.ref: details.get(a.ref) for a in - level.attributes} + item = {a.ref: details.get(a.ref) for a in level.attributes} item["_key"] = details.get(level.key.ref) item["_label"] = details.get(level.label_attribute.ref) result.append(item) @@ -784,15 +794,17 @@ def _path_details(self, return result # TODO: [typing] Improve the return type - def path_details(self, - dimension: Dimension, - path: HierarchyPath, - hierarchy: Hierarchy) -> Optional[_RecordType]: - """Returns empty path details. Default fall-back for backends that do - not support the path details. The level key and label are the same - derived from the key.""" - - detail: Dict[str,Optional[str]] = {} + def path_details( + self, dimension: Dimension, path: HierarchyPath, hierarchy: Hierarchy + ) -> Optional[_RecordType]: + """Returns empty path details. + + Default fall-back for backends that do not support the path + details. The level key and label are the same derived from the + key. + """ + + detail: Dict[str, Optional[str]] = {} for level, key in zip(hierarchy.levels, path): for attr in level.attributes: if attr == level.key or attr == level.label_attribute: diff --git a/cubes/query/cells.py b/cubes/query/cells.py index 7d7360db..e76894ae 100644 --- a/cubes/query/cells.py +++ b/cubes/query/cells.py @@ -2,28 +2,26 @@ import copy import re - from collections import OrderedDict from typing import ( - Any, - Callable, - Collection, - Dict, - List, - Optional, - Set, - Tuple, - Union, - cast, - ) + Any, + Callable, + Collection, + Dict, + List, + Optional, + Set, + Tuple, + Union, + cast, +) -from ..types import JSONType from ..errors import ArgumentError, CubesError -from ..metadata.dimension import Dimension, Hierarchy, Level, HierarchyPath +from ..logging import get_logger from ..metadata.attributes import Attribute from ..metadata.cube import Cube -from ..logging import get_logger - +from ..metadata.dimension import Dimension, Hierarchy, HierarchyPath, Level +from ..types import JSONType __all__ = [ "Cell", @@ -31,7 +29,6 @@ "PointCut", "RangeCut", "SetCut", - "cuts_from_string", "string_from_cuts", "string_from_path", @@ -42,7 +39,7 @@ ] -NULL_PATH_VALUE = '__null__' +NULL_PATH_VALUE = "__null__" # Function that takes dimension hierarchy path and returns a member value @@ -55,18 +52,18 @@ RANGE_CUT_SEPARATOR_CHAR = "-" SET_CUT_SEPARATOR_CHAR = ";" -CUT_STRING_SEPARATOR = re.compile(r'(? None: + def __init__( + self, + dimension: str, + hierarchy: str = None, + invert: bool = False, + hidden: bool = False, + ) -> None: """Abstract class for a cell cut.""" self.dimension = dimension self.hierarchy = hierarchy @@ -94,12 +94,14 @@ def __init__(self, self.hidden = hidden def to_dict(self) -> JSONType: - """Returns dictionary representation fo the receiver. The keys are: - `dimension`.""" + """Returns dictionary representation fo the receiver. + + The keys are: `dimension`. + """ d: JSONType = OrderedDict() # Placeholder for 'type' to be at the beginning of the list - d['type'] = None + d["type"] = None d["dimension"] = self.dimension d["hierarchy"] = self.hierarchy if self.hierarchy else None @@ -110,8 +112,10 @@ def to_dict(self) -> JSONType: return d def level_depth(self) -> int: - """Returns deepest level number. Subclasses should implement this - method""" + """Returns deepest level number. + + Subclasses should implement this method + """ raise NotImplementedError def __repr__(self) -> str: @@ -120,23 +124,27 @@ def __repr__(self) -> str: class PointCut(Cut): """Object describing way of slicing a cube (cell) through point in a - dimension""" + dimension.""" path: HierarchyPath - def __init__(self, - dimension: str, - path: HierarchyPath, - hierarchy: str=None, - invert: bool=False, - hidden: bool=False) -> None: - super(PointCut, self).__init__(dimension, hierarchy, invert, hidden) + def __init__( + self, + dimension: str, + path: HierarchyPath, + hierarchy: str = None, + invert: bool = False, + hidden: bool = False, + ) -> None: + super().__init__(dimension, hierarchy, invert, hidden) self.path = path def to_dict(self) -> JSONType: - """Returns dictionary representation of the receiver. The keys are: - `dimension`, `type`=``point`` and `path`.""" - d = super(PointCut, self).to_dict() + """Returns dictionary representation of the receiver. + + The keys are: `dimension`, `type`=``point`` and `path`. + """ + d = super().to_dict() d["type"] = "point" d["path"] = self.path return d @@ -147,10 +155,15 @@ def level_depth(self) -> int: def __str__(self) -> str: """Return string representation of point cut, you can use it in - URLs""" + URLs.""" path_str = string_from_path(self.path) dim_str = string_from_hierarchy(self.dimension, self.hierarchy) - string = ("!" if self.invert else "") + dim_str + DIMENSION_STRING_SEPARATOR_CHAR + path_str + string = ( + ("!" if self.invert else "") + + dim_str + + DIMENSION_STRING_SEPARATOR_CHAR + + path_str + ) return string @@ -171,27 +184,34 @@ def __ne__(self, other: Any) -> bool: class RangeCut(Cut): """Object describing way of slicing a cube (cell) between two points of a - dimension that has ordered points. For dimensions with unordered points - behaviour is unknown.""" + dimension that has ordered points. + + For dimensions with unordered points behaviour is unknown. + """ from_path: Optional[HierarchyPath] to_path: Optional[HierarchyPath] - def __init__(self, - dimension: str, - from_path: Optional[HierarchyPath], - to_path: Optional[HierarchyPath], - hierarchy: str=None, - invert: bool=False, - hidden: bool=False) -> None: - super(RangeCut, self).__init__(dimension, hierarchy, invert, hidden) + def __init__( + self, + dimension: str, + from_path: Optional[HierarchyPath], + to_path: Optional[HierarchyPath], + hierarchy: str = None, + invert: bool = False, + hidden: bool = False, + ) -> None: + super().__init__(dimension, hierarchy, invert, hidden) self.from_path = from_path self.to_path = to_path def to_dict(self) -> JSONType: - """Returns dictionary representation of the receiver. The keys are: - `dimension`, `type`=``range``, `from` and `to` paths.""" - d = super(RangeCut, self).to_dict() + """Returns dictionary representation of the receiver. + + The keys are: `dimension`, `type`=``range``, `from` and `to` + paths. + """ + d = super().to_dict() d["type"] = "range" d["from"] = self.from_path d["to"] = self.to_path @@ -213,7 +233,7 @@ def level_depth(self) -> int: def __str__(self) -> str: """Return string representation of point cut, you can use it in - URLs""" + URLs.""" if self.from_path: from_path_str = string_from_path(self.from_path) else: @@ -226,8 +246,12 @@ def __str__(self) -> str: range_str = from_path_str + RANGE_CUT_SEPARATOR_CHAR + to_path_str dim_str = string_from_hierarchy(self.dimension, self.hierarchy) - string = ("!" if self.invert else "") + dim_str \ - + DIMENSION_STRING_SEPARATOR_CHAR + range_str + string = ( + ("!" if self.invert else "") + + dim_str + + DIMENSION_STRING_SEPARATOR_CHAR + + range_str + ) return string @@ -250,25 +274,32 @@ def __ne__(self, other: Any) -> bool: class SetCut(Cut): """Object describing way of slicing a cube (cell) between two points of a - dimension that has ordered points. For dimensions with unordered points - behaviour is unknown.""" + dimension that has ordered points. + + For dimensions with unordered points behaviour is unknown. + """ paths: List[HierarchyPath] - def __init__(self, - dimension: str, - paths: List[HierarchyPath], - hierarchy:str=None, - invert:bool=False, - hidden:bool=False) -> None: + def __init__( + self, + dimension: str, + paths: List[HierarchyPath], + hierarchy: str = None, + invert: bool = False, + hidden: bool = False, + ) -> None: - super(SetCut, self).__init__(dimension, hierarchy, invert, hidden) + super().__init__(dimension, hierarchy, invert, hidden) self.paths = paths def to_dict(self) -> JSONType: - """Returns dictionary representation of the receiver. The keys are: - `dimension`, `type`=``range`` and `set` as a list of paths.""" - d = super(SetCut, self).to_dict() + """Returns dictionary representation of the receiver. + + The keys are: `dimension`, `type`=``range`` and `set` as a list + of paths. + """ + d = super().to_dict() d["type"] = "set" d["paths"] = self.paths return d @@ -279,15 +310,19 @@ def level_depth(self) -> int: return max([len(path) for path in self.paths]) def __str__(self) -> str: - """Return string representation of set cut, you can use it in URLs""" + """Return string representation of set cut, you can use it in URLs.""" path_strings = [] for path in self.paths: path_strings.append(string_from_path(path)) set_string = SET_CUT_SEPARATOR_CHAR.join(path_strings) dim_str = string_from_hierarchy(self.dimension, self.hierarchy) - string = ("!" if self.invert else "") + dim_str \ - + DIMENSION_STRING_SEPARATOR_CHAR + set_string + string = ( + ("!" if self.invert else "") + + dim_str + + DIMENSION_STRING_SEPARATOR_CHAR + + set_string + ) return string @@ -306,26 +341,29 @@ def __ne__(self, other: Any) -> bool: return not self.__eq__(other) -class Cell(object): - """Part of a cube determined by slicing dimensions. Immutable object.""" +class Cell: + """Part of a cube determined by slicing dimensions. + + Immutable object. + """ cuts: List[Cut] - def __init__(self, cuts: Collection[Cut]=None) -> None: + def __init__(self, cuts: Collection[Cut] = None) -> None: self.cuts = list(cuts) if cuts is not None else [] def __and__(self, other: "Cell") -> "Cell": - """Returns a new cell that is a conjunction of the two provided - cells. The cube has to match.""" + """Returns a new cell that is a conjunction of the two provided cells. + + The cube has to match. + """ cuts = self.cuts + other.cuts return Cell(cuts=cuts) def to_dict(self) -> JSONType: - """Returns a dictionary representation of the cell""" + """Returns a dictionary representation of the cell.""" - result = { - "cuts": [cut.to_dict() for cut in self.cuts] - } + result = {"cuts": [cut.to_dict() for cut in self.cuts]} return result @@ -345,16 +383,16 @@ def collect_key_attributes(self, cube: Cube) -> List[Attribute]: dim = cube.dimension(cut.dimension) hier = dim.hierarchy(cut.hierarchy) - keys = [dim.attribute(level.key.name) - for level in hier.levels[0:depth]] + keys = [dim.attribute(level.key.name) for level in hier.levels[0:depth]] attributes |= set(keys) return list(attributes) def slice(self, cut: Cut) -> "Cell": - """Returns new cell by slicing receiving cell with `cut`. Cut with - same dimension as `cut` will be replaced, if there is no cut with the - same dimension, then the `cut` will be appended. + """Returns new cell by slicing receiving cell with `cut`. + + Cut with same dimension as `cut` will be replaced, if there is + no cut with the same dimension, then the `cut` will be appended. """ cuts: List[Cut] = self.cuts[:] @@ -366,10 +404,11 @@ def slice(self, cut: Cut) -> "Cell": return Cell(cuts=cuts) - def _dimension_cut_index(self, dimension: str) \ - -> Optional[int]: - """Returns index of first occurence of cut for `dimension`. Returns - ``None`` if no cut with `dimension` is found.""" + def _dimension_cut_index(self, dimension: str) -> Optional[int]: + """Returns index of first occurence of cut for `dimension`. + + Returns ``None`` if no cut with `dimension` is found. + """ names = [cut.dimension for cut in self.cuts] try: @@ -378,14 +417,11 @@ def _dimension_cut_index(self, dimension: str) \ except ValueError: return None - def point_slice(self, - dimension: str, - path: HierarchyPath) -> "Cell": - """ - Create another cell by slicing receiving cell through `dimension` - at `path`. Receiving object is not modified. If cut with dimension - exists it is replaced with new one. If path is empty list or is none, - then cut for given dimension is removed. + def point_slice(self, dimension: str, path: HierarchyPath) -> "Cell": + """Create another cell by slicing receiving cell through `dimension` at + `path`. Receiving object is not modified. If cut with dimension exists + it is replaced with new one. If path is empty list or is none, then cut + for given dimension is removed. Example:: @@ -398,7 +434,6 @@ def point_slice(self, Depreiated. Use :meth:`cell.slice` instead with argument `PointCut(dimension, path)` - """ cuts = self.cuts_for_dimension(dimension, exclude=True) @@ -407,9 +442,7 @@ def point_slice(self, cuts.append(cut) return Cell(cuts=cuts) - def drilldown(self, dimension: str, - value: str, - hierarchy: str=None) -> "Cell": + def drilldown(self, dimension: str, value: str, hierarchy: str = None) -> "Cell": """Create another cell by drilling down `dimension` next level on current level's key `value`. @@ -445,11 +478,16 @@ def drilldown(self, dimension: str, return Cell(cuts=cuts) def multi_slice(self, cuts: List[Cut]) -> "Cell": - """Create another cell by slicing through multiple slices. `cuts` is a - list of `Cut` object instances. See also :meth:`Cell.slice`.""" + """Create another cell by slicing through multiple slices. + + `cuts` is a list of `Cut` object instances. See also + :meth:`Cell.slice`. + """ if isinstance(cuts, dict): - raise CubesError("dict type is not supported any more, use list of Cut instances") + raise CubesError( + "dict type is not supported any more, use list of Cut instances" + ) cell = self for cut in cuts: @@ -458,8 +496,7 @@ def multi_slice(self, cuts: List[Cut]) -> "Cell": return cell # FIXME: Use dimension_cut_index - def cut_for_dimension(self, dimension: Dimension) \ - -> Optional[Cut]: + def cut_for_dimension(self, dimension: Dimension) -> Optional[Cut]: """Return first found cut for given `dimension`""" cut_dimension = None @@ -471,8 +508,7 @@ def cut_for_dimension(self, dimension: Dimension) \ return None - def point_cut_for_dimension(self, dimension: str) \ - -> Optional[PointCut]: + def point_cut_for_dimension(self, dimension: str) -> Optional[PointCut]: """Return first point cut for given `dimension`""" cutdim = None @@ -488,7 +524,7 @@ def level_depths(self) -> Dict[str, int]: (index of deepest level).""" depth: int - depths: Dict[str,int] = {} + depths: Dict[str, int] = {} for cut in self.cuts: depth = cut.level_depth() @@ -498,23 +534,28 @@ def level_depths(self) -> Dict[str, int]: return depths - def cuts_for_dimension(self, - dimension: str, - exclude: bool=False) -> List[Cut]: - """Returns cuts for `dimension`. If `exclude` is `True` then the - effect is reversed: return all cuts except those with `dimension`.""" + def cuts_for_dimension(self, dimension: str, exclude: bool = False) -> List[Cut]: + """Returns cuts for `dimension`. + + If `exclude` is `True` then the effect is reversed: return all + cuts except those with `dimension`. + """ cuts = [] for cut in self.cuts: cut_dimension = cut.dimension - if (exclude and cut_dimension != dimension) \ - or (not exclude and cut_dimension == dimension): + if (exclude and cut_dimension != dimension) or ( + not exclude and cut_dimension == dimension + ): cuts.append(cut) return cuts def public_cell(self) -> "Cell": - """Returns a cell that contains only non-hidden cuts. Hidden cuts are - mostly generated cuts by a backend or an extension. Public cell is a - cell to be presented to the front-end.""" + """Returns a cell that contains only non-hidden cuts. + + Hidden cuts are mostly generated cuts by a backend or an + extension. Public cell is a cell to be presented to the front- + end. + """ cuts = [cut for cut in self.cuts if not cut.hidden] @@ -522,8 +563,9 @@ def public_cell(self) -> "Cell": def __eq__(self, other: Any) -> bool: """cells are considered equal if: - * they refer to the same cube - * they have same set of cuts (regardless of their order) + + * they refer to the same cube + * they have same set of cuts (regardless of their order) """ if len(self.cuts) != len(other.cuts): @@ -539,28 +581,29 @@ def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def to_str(self) -> str: - """Return string representation of the cell by using standard - cuts-to-string conversion.""" + """Return string representation of the cell by using standard cuts-to- + string conversion.""" return string_from_cuts(self.cuts) def __str__(self) -> str: - """Return string representation of the cell by using standard - cuts-to-string conversion.""" + """Return string representation of the cell by using standard cuts-to- + string conversion.""" return string_from_cuts(self.cuts) def __repr__(self) -> str: - return 'Cell(%s)' % (self.to_str() or 'All') + return "Cell(%s)" % (self.to_str() or "All") def __nonzero__(self) -> bool: """Returns `True` if the cell contains cuts.""" return bool(self.cuts) -def cuts_from_string(cube: Cube, - string: str, - member_converters: Dict[str, MemberConverter]=None, - role_member_converters: Dict[str, MemberConverter]=None) \ - -> List[Cut]: +def cuts_from_string( + cube: Cube, + string: str, + member_converters: Dict[str, MemberConverter] = None, + role_member_converters: Dict[str, MemberConverter] = None, +) -> List[Cut]: """Return list of cuts specified in `string`. You can use this function to parse cuts encoded in a URL. @@ -612,19 +655,18 @@ def cuts_from_string(cube: Cube, dim_cuts = CUT_STRING_SEPARATOR.split(string) for dim_cut in dim_cuts: - cut = cut_from_string(dim_cut, cube, member_converters, - role_member_converters) + cut = cut_from_string(dim_cut, cube, member_converters, role_member_converters) cuts.append(cut) return cuts - -def cut_from_string(string: str, - cube:Cube=None, - member_converters:Dict[str, MemberConverter]=None, - role_member_converters:Dict[str, MemberConverter]=None) \ - -> Cut: +def cut_from_string( + string: str, + cube: Cube = None, + member_converters: Dict[str, MemberConverter] = None, + role_member_converters: Dict[str, MemberConverter] = None, +) -> Cut: """Returns a cut from `string` with dimension `dimension and assumed hierarchy `hierarchy`. The string should match one of the following patterns: @@ -647,8 +689,7 @@ def cut_from_string(string: str, member_converters = member_converters or {} role_member_converters = role_member_converters or {} - dim_hier_pattern = re.compile(r"(?P!)?" - "(?P\w+)(@(?P\w+))?") + dim_hier_pattern = re.compile(r"(?P!)?" r"(?P\w+)(@(?P\w+))?") try: (dimspec, string) = DIMENSION_STRING_SEPARATOR.split(string) @@ -659,12 +700,14 @@ def cut_from_string(string: str, if match: d = match.groupdict() - invert = (not not d["invert"]) + invert = not not d["invert"] dim_name = d["dim"] hier_name = d["hier"] else: - raise ArgumentError("Dimension spec '%s' does not match " - "pattern 'dimension@hierarchy'" % dimspec) + raise ArgumentError( + "Dimension spec '%s' does not match " + "pattern 'dimension@hierarchy'" % dimspec + ) converter = member_converters.get(dim_name) if cube: @@ -676,8 +719,8 @@ def cut_from_string(string: str, # special case: completely empty string means single path element of '' # FIXME: why? - if string == '': - return PointCut(dim_name, [''], hier_name, invert) + if string == "": + return PointCut(dim_name, [""], hier_name, invert) elif RE_POINT.match(string): path = path_from_string(string) @@ -698,8 +741,9 @@ def cut_from_string(string: str, cut = SetCut(dim_name, paths, hier_name, invert) elif RE_RANGE.match(string): - (from_path, to_path) = list(map(path_from_string, - RANGE_CUT_SEPARATOR.split(string))) + (from_path, to_path) = list( + map(path_from_string, RANGE_CUT_SEPARATOR.split(string)) + ) if converter: from_path = converter(dimension, hierarchy, from_path) @@ -708,16 +752,21 @@ def cut_from_string(string: str, cut = RangeCut(dim_name, from_path, to_path, hier_name, invert) else: - raise ArgumentError("Unknown cut format (check that keys " - "consist only of alphanumeric characters and " - "underscore): %s" % string) + raise ArgumentError( + "Unknown cut format (check that keys " + "consist only of alphanumeric characters and " + "underscore): %s" % string + ) return cut -def cut_from_dict(desc: JSONType, cube: Cube=None) -> Cut: - """Returns a cut from `desc` dictionary. If `cube` is specified, then the - dimension is looked up in the cube and set as `Dimension` instances, if - specified as strings.""" + +def cut_from_dict(desc: JSONType, cube: Cube = None) -> Cut: + """Returns a cut from `desc` dictionary. + + If `cube` is specified, then the dimension is looked up in the cube + and set as `Dimension` instances, if specified as strings. + """ cut_type = desc["type"].lower() @@ -727,12 +776,21 @@ def cut_from_dict(desc: JSONType, cube: Cube=None) -> Cut: dim = cube.dimension(dim) if cut_type == "point": - return PointCut(dim, desc.get("path"), desc.get("hierarchy"), desc.get('invert', False)) + return PointCut( + dim, desc.get("path"), desc.get("hierarchy"), desc.get("invert", False) + ) elif cut_type == "set": - return SetCut(dim, desc.get("paths"), desc.get("hierarchy"), desc.get('invert', False)) + return SetCut( + dim, desc.get("paths"), desc.get("hierarchy"), desc.get("invert", False) + ) elif cut_type == "range": - return RangeCut(dim, desc.get("from"), desc.get("to"), - desc.get("hierarchy"), desc.get('invert', False)) + return RangeCut( + dim, + desc.get("from"), + desc.get("to"), + desc.get("hierarchy"), + desc.get("invert", False), + ) else: raise ArgumentError("Unknown cut type %s" % cut_type) @@ -756,19 +814,23 @@ def _path_part_unescape(path_part: str) -> Optional[str]: def string_from_cuts(cuts: Collection[Cut]) -> str: - """Returns a string represeting `cuts`. String can be used in URLs""" + """Returns a string represeting `cuts`. + + String can be used in URLs + """ strings = [str(cut) for cut in cuts] string = CUT_STRING_SEPARATOR_CHAR.join(strings) return string def string_from_path(path: HierarchyPath) -> str: - """Returns a string representing dimension `path`. If `path` is ``None`` - or empty, then returns empty string. The ptah elements are comma ``,`` + """Returns a string representing dimension `path`. If `path` is ``None`` or + empty, then returns empty string. The ptah elements are comma ``,`` spearated. - Raises `ValueError` when path elements contain characters that are not - allowed in path element (alphanumeric and underscore ``_``).""" + Raises `ValueError` when path elements contain characters that are + not allowed in path element (alphanumeric and underscore ``_``). + """ if not path: return "" @@ -776,21 +838,23 @@ def string_from_path(path: HierarchyPath) -> str: path = [_path_part_escape(str(s)) for s in path] if not all(map(RE_ELEMENT.match, path)): - get_logger().warn("Can not convert path to string: " - "keys contain invalid characters " - "(should be alpha-numeric or underscore) '%s'" % - path) + get_logger().warn( + "Can not convert path to string: " + "keys contain invalid characters " + "(should be alpha-numeric or underscore) '%s'" % path + ) string = PATH_STRING_SEPARATOR_CHAR.join(path) return string -def string_from_hierarchy(dimension: str, - hierarchy: Optional[str]) -> str: +def string_from_hierarchy(dimension: str, hierarchy: Optional[str]) -> str: """Returns a string in form ``dimension@hierarchy`` or ``dimension`` if `hierarchy` is ``None``""" if hierarchy: - return "%s@%s" % (_path_part_escape(dimension), _path_part_escape(hierarchy)) + return "{}@{}".format( + _path_part_escape(dimension), _path_part_escape(hierarchy) + ) else: return _path_part_escape(dimension) @@ -811,5 +875,3 @@ def path_from_string(string: str) -> HierarchyPath: path = [_path_part_unescape(v) for v in path] return path - - diff --git a/cubes/query/computation.py b/cubes/query/computation.py index 5eb67f60..f1ce9f26 100644 --- a/cubes/query/computation.py +++ b/cubes/query/computation.py @@ -4,32 +4,31 @@ from ..errors import ArgumentError -__all__ = [ - "combined_cuboids", - "combined_levels", - "hierarchical_cuboids" -] +__all__ = ["combined_cuboids", "combined_levels", "hierarchical_cuboids"] + def combined_cuboids(dimensions, required=None): """Returns a list of all combinations of `dimensions` as tuples. For example, if `dimensions` is: ``['date', 'product']`` then it returns: - ``[['date', 'cpv'], ['date'], ['cpv']]`` + ``[['date', 'cpv'], ['date'], ['cpv']]`` """ required = tuple(required) if required else () for dim in required: if dim not in dimensions: - raise ArgumentError("Required dimension '%s' is not in list of " - "dimensions to be combined." % str(dim)) + raise ArgumentError( + "Required dimension '%s' is not in list of " + "dimensions to be combined." % str(dim) + ) cuboids = [] to_combine = [dim for dim in dimensions if not dim in required] for i in range(len(to_combine), 0, -1): combos = itertools.combinations(to_combine, i) - combos = [required+combo for combo in combos] + combos = [required + combo for combo in combos] cuboids += tuple(combos) @@ -38,11 +37,13 @@ def combined_cuboids(dimensions, required=None): return cuboids + def combined_levels(dimensions, default_only=False): - """Create a cartesian product of levels from all `dimensions`. For - example, if dimensions are _date_, _product_ then result will be: - levels of _date_ X levels of _product_. Each element of the returned list - is a list of tuples (`dimension`, `level`) + """Create a cartesian product of levels from all `dimensions`. + + For example, if dimensions are _date_, _product_ then result will + be: levels of _date_ X levels of _product_. Each element of the + returned list is a list of tuples (`dimension`, `level`) """ groups = [] for dim in dimensions: @@ -66,4 +67,3 @@ def hierarchical_cuboids(dimensions, required=None, default_only=False): result += list(combined_levels(cuboid, default_only)) return result - diff --git a/cubes/query/constants.py b/cubes/query/constants.py index 4e7c11d7..8e9ad345 100644 --- a/cubes/query/constants.py +++ b/cubes/query/constants.py @@ -1,9 +1,8 @@ # -*- coding: utf-8 -*- -SPLIT_DIMENSION_NAME = '__within_split__' +SPLIT_DIMENSION_NAME = "__within_split__" """Name of a virtual dimension added when a split cell is used""" -NULL_PATH_VALUE = '__null__' +NULL_PATH_VALUE = "__null__" """String representation of a path value which should be represented by NULL""" - diff --git a/cubes/query/drilldown.py b/cubes/query/drilldown.py index 9c5032b5..ec1bf793 100644 --- a/cubes/query/drilldown.py +++ b/cubes/query/drilldown.py @@ -1,50 +1,36 @@ # -*- coding: utf-8 -*- +from typing import Mapping # Collection, from typing import ( - # Collection, - Dict, - Iterable, - Iterator, - List, - Mapping, - NamedTuple, - Optional, - Set, - Sized, - Tuple, - Union, - cast, - ) + Dict, + Iterable, + Iterator, + List, + NamedTuple, + Optional, + Set, + Sized, + Tuple, + Union, + cast, +) +from ..errors import ArgumentError, HierarchyError from ..metadata import ( - Attribute, - Cube, - Dimension, - Hierarchy, - Level, - string_to_dimension_level, - ) - -from .cells import ( - Cell, - Cut, - PointCut, - SetCut, - cuts_from_string, - ) - + Attribute, + Cube, + Dimension, + Hierarchy, + Level, + string_to_dimension_level, +) +from .cells import Cell, Cut, PointCut, SetCut, cuts_from_string from .constants import SPLIT_DIMENSION_NAME -from ..errors import ArgumentError, HierarchyError - # FIXME: Update afetr Python 3.6.1 Collection = List -__all__ = [ - "Drilldown", - "DrilldownItem", - "levels_from_drilldown", -] +__all__ = ["Drilldown", "DrilldownItem", "levels_from_drilldown"] class DrilldownItem(NamedTuple): @@ -62,17 +48,12 @@ class DrilldownItem(NamedTuple): str, Dimension, DrilldownItem, - Tuple[ - Union[Dimension, str], - Union[Hierarchy, str], - Union[Level,str] - ] + Tuple[Union[Dimension, str], Union[Hierarchy, str], Union[Level, str]], ] - ] + ], ] - class Drilldown(Iterable, Sized): drilldown: List[DrilldownItem] @@ -80,9 +61,7 @@ class Drilldown(Iterable, Sized): dimensions: List[Dimension] _contained_dimensions: Set[str] - def __init__(self, - cube: Cube, - items: _DrilldownType=None) -> None: + def __init__(self, cube: Cube, items: _DrilldownType = None) -> None: """Creates a drilldown object for `drilldown` specifictation of `cell`. The drilldown object can be used by browsers for convenient access to various drilldown properties. @@ -112,8 +91,10 @@ def __str__(self) -> str: def items_as_strings(self) -> List[str]: """Returns drilldown items as strings: ``dimension@hierarchy:level``. - If hierarchy is dimension's default hierarchy, then it is not included - in the string: ``dimension:level``""" + + If hierarchy is dimension's default hierarchy, then it is not + included in the string: ``dimension:level`` + """ strings = [] @@ -123,15 +104,14 @@ def items_as_strings(self) -> List[str]: else: hierstr = "" - ddstr = "%s%s:%s" % (item.dimension.name, - hierstr, - item.levels[-1].name) + ddstr = "{}{}:{}".format(item.dimension.name, hierstr, item.levels[-1].name) strings.append(ddstr) return strings - def drilldown_for_dimension(self, dim: Union[str, Dimension]) \ - -> List[DrilldownItem]: + def drilldown_for_dimension( + self, dim: Union[str, Dimension] + ) -> List[DrilldownItem]: """Returns drilldown items for dimension `dim`.""" items = [] dimname = str(dim) @@ -161,14 +141,19 @@ def deepest_levels(self) -> List[Tuple[Dimension, Hierarchy, Level]]: return levels # This is resurrected from Cell in which cube was removed - def _cell_contains_level(self, - cell: Cell, - dimension: Union[Dimension, str], - level: str, - hierarchy: str=None) -> bool: + def _cell_contains_level( + self, + cell: Cell, + dimension: Union[Dimension, str], + level: str, + hierarchy: str = None, + ) -> bool: """Returns `True` if one of the cuts contains `level` of dimension - `dim`. If `hierarchy` is not specified, then dimension's default - hierarchy is used.""" + `dim`. + + If `hierarchy` is not specified, then dimension's default + hierarchy is used. + """ dim = self.cube.dimension(dimension) hierarchy_obj = dim.hierarchy(hierarchy) @@ -186,8 +171,8 @@ def _cell_contains_level(self, return False def high_cardinality_levels(self, cell: Cell) -> List[Level]: - """Returns list of levels in the drilldown that are of high - cardinality and there is no cut for that level in the `cell`.""" + """Returns list of levels in the drilldown that are of high cardinality + and there is no cut for that level in the `cell`.""" not_contained: List[Level] = [] @@ -196,24 +181,24 @@ def high_cardinality_levels(self, cell: Cell) -> List[Level]: # TODO: Replace with enums for level in item.levels: - contains_level = self._cell_contains_level(cell, - dim.name, - level.name, - hier.name) + contains_level = self._cell_contains_level( + cell, dim.name, level.name, hier.name + ) - if (level.cardinality == "high" or dim.cardinality == "high") \ - and contains_level: + if ( + level.cardinality == "high" or dim.cardinality == "high" + ) and contains_level: not_contained.append(level) return not_contained - def result_levels(self, include_split: bool=False) \ - -> Mapping[str, List[str]]: + def result_levels(self, include_split: bool = False) -> Mapping[str, List[str]]: """Returns a dictionary where keys are dimension names and values are list of level names for the drilldown. Use this method to populate the result levels attribute. - If `include_split` is `True` then split dimension is included.""" + If `include_split` is `True` then split dimension is included. + """ result = {} for item in self.drilldown: @@ -233,9 +218,9 @@ def result_levels(self, include_split: bool=False) \ @property def key_attributes(self) -> List[Attribute]: - """Returns only key attributes of all levels in the drilldown. Order - is by the drilldown item, then by the levels and finally by the - attribute in the level. + """Returns only key attributes of all levels in the drilldown. Order is + by the drilldown item, then by the levels and finally by the attribute + in the level. .. versionadded:: 1.1 """ @@ -247,9 +232,11 @@ def key_attributes(self) -> List[Attribute]: @property def all_attributes(self) -> Collection[Attribute]: - """Returns attributes of all levels in the drilldown. Order is by the - drilldown item, then by the levels and finally by the attribute in the - level.""" + """Returns attributes of all levels in the drilldown. + + Order is by the drilldown item, then by the levels and finally + by the attribute in the level. + """ attributes: List[Attribute] = [] for item in self.drilldown: for level in item.levels: @@ -260,16 +247,18 @@ def all_attributes(self) -> Collection[Attribute]: # FIXME: [typing] See #395 @property def natural_order(self) -> List[Tuple[Attribute, str]]: - """Return a natural order for the drill-down. This order can be merged - with user-specified order. Returns a list of tuples: - (`attribute_name`, `order`).""" + """Return a natural order for the drill-down. + + This order can be merged with user-specified order. Returns a + list of tuples: (`attribute_name`, `order`). + """ order = [] for item in self.drilldown: for level in item.levels: lvl_attr = level.order_attribute or level.key - lvl_order = level.order or 'asc' + lvl_order = level.order or "asc" order.append((lvl_attr, lvl_order)) return order @@ -284,11 +273,10 @@ def __nonzero__(self) -> bool: return len(self.drilldown) > 0 - # TODO: move this to Drilldown def levels_from_drilldown( - cube: Cube, - drilldown: Optional[_DrilldownType]) -> List[DrilldownItem]: + cube: Cube, drilldown: Optional[_DrilldownType] +) -> List[DrilldownItem]: """Converts `drilldown` into a list of levels to be used to drill down. `drilldown` can be: @@ -322,11 +310,13 @@ def levels_from_drilldown( elif isinstance(obj, Dimension): obj = (obj, obj.hierarchy(), obj.hierarchy().levels[-1]) elif len(obj) != 3: - raise ArgumentError(f"Drilldown item should be either a string " - f"or a tuple of three elements. Is: {obj}") + raise ArgumentError( + f"Drilldown item should be either a string " + f"or a tuple of three elements. Is: {obj}" + ) dim_any, hier_any, level_any = obj - + dim: Dimension = cube.dimension(dim_any) hier: Hierarchy = dim.hierarchy(hier_any) @@ -334,7 +324,7 @@ def levels_from_drilldown( if level_any: index = hier.level_index(str(level_any)) - levels = hier.levels[:index + 1] + levels = hier.levels[: index + 1] else: levels = hier.levels[:1] diff --git a/cubes/query/result.py b/cubes/query/result.py index 54e1bcc7..db9e8dfe 100644 --- a/cubes/query/result.py +++ b/cubes/query/result.py @@ -1,40 +1,32 @@ # -*- coding: utf-8 -*- from typing import ( - Any, - Collection, - Iterable, - Iterator, - List, - Mapping, - NamedTuple, - Optional, - cast, - ) - - -from ..types import JSONType, _RecordType + Any, + Collection, + Iterable, + Iterator, + List, + Mapping, + NamedTuple, + Optional, + cast, +) +from ..common import IgnoringDictionary from ..metadata import ( - Cube, - Dimension, - Hierarchy, - HierarchyPath, - Level, - MeasureAggregate, - ) - + Cube, + Dimension, + Hierarchy, + HierarchyPath, + Level, + MeasureAggregate, +) from ..query.cells import Cell, PointCut from ..query.drilldown import Drilldown - +from ..types import JSONType, _RecordType from .statutils import _CalculatorFunction -from ..common import IgnoringDictionary -__all__ = [ - "AggregationResult", - "Facts", - "TableRow", -] +__all__ = ["AggregationResult", "Facts", "TableRow"] class TableRow(NamedTuple): @@ -49,9 +41,7 @@ class Facts(Iterable): facts: Iterable[_RecordType] attributes: List[str] - def __init__(self, - facts: Iterable[_RecordType], - attributes: List[str]) -> None: + def __init__(self, facts: Iterable[_RecordType], attributes: List[str]) -> None: """A facts iterator object returned by the browser's `facts()` method.""" @@ -63,15 +53,16 @@ def __iter__(self) -> Iterator[_RecordType]: class CalculatedResultIterator(Iterable): - """ - Iterator that decorates data items - """ + """Iterator that decorates data items.""" + calculators: Collection[_CalculatorFunction] iterator: Iterator[_RecordType] - def __init__(self, - calculators: Collection[_CalculatorFunction], - iterator: Iterator[_RecordType]) -> None: + def __init__( + self, + calculators: Collection[_CalculatorFunction], + iterator: Iterator[_RecordType], + ) -> None: self.calculators = calculators self.iterator = iterator @@ -108,7 +99,6 @@ class AggregationResult(Iterable): Implementors of aggregation browsers should populate `cell`, `measures` and `levels` from the aggregate query. - """ # TODO: This should be List[Cube] for drill-across @@ -126,24 +116,28 @@ class AggregationResult(Iterable): # FIXME: [typing] Fix the type _cells: Iterable[_RecordType] - def __init__(self, - cube: Cube, - cell: Cell, - cells: Iterable[_RecordType], - labels: Optional[Collection[str]]=None, - summary: Optional[_RecordType]=None, - aggregates: Collection[MeasureAggregate]=None, - drilldown: Drilldown=None, - levels: Optional[Mapping[str, List[str]]]=None, - total_cell_count: Optional[int]=None, - remainder: Optional[JSONType]=None, - has_split: bool=False) -> None: + def __init__( + self, + cube: Cube, + cell: Cell, + cells: Iterable[_RecordType], + labels: Optional[Collection[str]] = None, + summary: Optional[_RecordType] = None, + aggregates: Collection[MeasureAggregate] = None, + drilldown: Drilldown = None, + levels: Optional[Mapping[str, List[str]]] = None, + total_cell_count: Optional[int] = None, + remainder: Optional[JSONType] = None, + has_split: bool = False, + ) -> None: """Create an aggergation result object. `cell` – a :class:`cubes.Cell` object used for this aggregation, `aggregates` – list of aggregate - objects selected for this a aggregation, `drilldown` – a + objects selected for this a aggregation, `drilldown` – a. + :class:`cubes.Drilldown` object representing list of dimensions and hierarchies the result is drilled-down by, `has_split` – flag whether - the result has a split dimension.""" + the result has a split dimension. + """ self.cube = cube self.cell = cell @@ -179,7 +173,6 @@ def __init__(self, self._cells = [] self.cells = cells - @property def cells(self) -> Iterable[_RecordType]: return self._cells @@ -192,8 +185,10 @@ def cells(self, val: Iterable[_RecordType]) -> None: self._cells = val def to_dict(self) -> JSONType: - """Return dictionary representation of the aggregation result. Can be - used for JSON serialisation.""" + """Return dictionary representation of the aggregation result. + + Can be used for JSON serialisation. + """ d = IgnoringDictionary() @@ -213,13 +208,11 @@ def to_dict(self) -> JSONType: d.set("attributes", self.attributes) d["has_split"] = self.has_split - return d - def table_rows(self, - dimension_name: str, - depth: int=None, - hierarchy: Hierarchy=None) -> Iterator[TableRow]: + def table_rows( + self, dimension_name: str, depth: int = None, hierarchy: Hierarchy = None + ) -> Iterator[TableRow]: """Returns iterator of drilled-down rows which yields a named tuple with named attributes: (key, label, path, record). `depth` is last level of interest. If not specified (set to ``None``) then deepest @@ -276,15 +269,13 @@ def table_rows(self, for record in self.cells: drill_path = path[:] + [record[level_key]] - row = TableRow(record[level_key], - record[level_label], - drill_path, - is_base, - record) + row = TableRow( + record[level_key], record[level_label], drill_path, is_base, record + ) yield row def __iter__(self) -> Iterator[_RecordType]: - """Return cells as iterator""" + """Return cells as iterator.""" return iter(self.cells) def cached(self) -> "AggregationResult": @@ -307,7 +298,7 @@ def cached(self) -> "AggregationResult": has_split=self.has_split, levels=self.levels, # Cache cells from an iterator - cells=list(self.cells) + cells=list(self.cells), ) return result diff --git a/cubes/query/statutils.py b/cubes/query/statutils.py index df164741..f4614d93 100644 --- a/cubes/query/statutils.py +++ b/cubes/query/statutils.py @@ -1,34 +1,29 @@ # -*- coding: utf-8 -*- from collections import deque -from functools import partial +from functools import partial, reduce from math import sqrt +from statistics import mean, stdev, variance +from typing import Any, Callable, List, Optional, Sequence, Union -from functools import reduce - -from typing import List, Callable, List, Union, Optional, Sequence, Any - -from ..types import _UnknownType, _RecordType from ..errors import ArgumentError, InternalError, ModelError -from ..metadata import MeasureAggregate, HierarchyPath, Level -from ..metadata.cube import Cube +from ..metadata import HierarchyPath, Level, MeasureAggregate from ..metadata.attributes import Measure +from ..metadata.cube import Cube from ..query.cells import Cell +from ..types import _RecordType, _UnknownType +from .constants import SPLIT_DIMENSION_NAME + # FIXME: Circular dependency. We need to fix the type # from ..query.browser import Drilldown Drilldown = Any -from .constants import SPLIT_DIMENSION_NAME - - -from statistics import variance, stdev, mean - __all__ = [ "CALCULATED_AGGREGATIONS", "calculators_for_aggregates", "available_calculators", - "aggregate_calculator_labels" + "aggregate_calculator_labels", ] @@ -41,10 +36,11 @@ def calculators_for_aggregates( - cube: Cube, - aggregates: List[MeasureAggregate], - drilldown: Optional[Drilldown]=None, - split: Cell=None) -> _UnknownType: + cube: Cube, + aggregates: List[MeasureAggregate], + drilldown: Optional[Drilldown] = None, + split: Cell = None, +) -> _UnknownType: """Returns a list of calculator function objects that implements aggregations by calculating on retrieved results, given a particular drilldown. Only post-aggregation calculators are returned. @@ -63,32 +59,36 @@ def calculators_for_aggregates( # Pre-requisites # if not aggregate.measure: - raise InternalError("No measure specified for aggregate '%s' in " - "cube '%s'" % (aggregate.name, cube.name)) + raise InternalError( + "No measure specified for aggregate '%s' in " + "cube '%s'" % (aggregate.name, cube.name) + ) if aggregate.function: function: str = aggregate.function else: # This should not happen. - raise ArgumentError(f"No post-calculation function for aggregate " - f" {aggregate.name}") + raise ArgumentError( + f"No post-calculation function for aggregate " f" {aggregate.name}" + ) try: factory = CALCULATED_AGGREGATIONS[function] except KeyError: - raise ArgumentError("Unknown post-calculation function '%s' for " - "aggregate '%s'" % (aggregate.function, - aggregate.name)) + raise ArgumentError( + "Unknown post-calculation function '%s' for " + "aggregate '%s'" % (aggregate.function, aggregate.name) + ) source = cube.measure(aggregate.measure) - func = factory(aggregate, - source=source.ref, - drilldown=drilldown, - split_cell=split) + func = factory( + aggregate, source=source.ref, drilldown=drilldown, split_cell=split + ) functions.append(func) return functions + def weighted_moving_average(values: Sequence[_ValueType]) -> _ValueType: n = len(values) denom = n * (n + 1) / 2 @@ -104,29 +104,38 @@ def simple_moving_average(values: Sequence[_ValueType]) -> _ValueType: # use all the values return round(reduce(lambda i, c: float(c) + i, values, 0.0) / len(values), 2) + def simple_moving_sum(values: Sequence[_ValueType]) -> _ValueType: return reduce(lambda i, c: i + c, values, 0) + def simple_relative_stdev(values: Sequence[_ValueType]) -> _ValueType: m: float = mean(values) - var:float = variance(values) - return round(((sqrt(var)/m) if m > 0 else 0), 4) + var: float = variance(values) + return round(((sqrt(var) / m) if m > 0 else 0), 4) + def simple_variance(values: Sequence[_ValueType]) -> _ValueType: return round(variance(values), 2) + def simple_stdev(values: Sequence[_ValueType]) -> _ValueType: return round(stdev(values), 2) + def _window_function_factory( - window_function: WindowFunctionType, - label: str, - aggregate: MeasureAggregate, - source: Measure, - drilldown: Optional[Drilldown], - split_cell: Cell) -> _UnknownType: - """Returns a moving average window function. `aggregate` is the target - aggergate. `window_function` is concrete window function.""" + window_function: WindowFunctionType, + label: str, + aggregate: MeasureAggregate, + source: Measure, + drilldown: Optional[Drilldown], + split_cell: Cell, +) -> _UnknownType: + """Returns a moving average window function. + + `aggregate` is the target aggergate. `window_function` is concrete + window function. + """ # If the level we're drilling to doesn't have aggregation_units configured, # we're not doing any calculations @@ -143,7 +152,7 @@ def _window_function_factory( these_num_units = None if relevant_level.info: - these_num_units = relevant_level.info.get('aggregation_units', None) + these_num_units = relevant_level.info.get("aggregation_units", None) if these_num_units is None: key_drilldown_paths.append(item) else: @@ -153,8 +162,10 @@ def _window_function_factory( window_size = 1 elif not isinstance(window_size, int) or window_size < 1: - raise ModelError("window size for aggregate '%s' sohuld be an integer " - "greater than or equeal 1" % aggregate.name) + raise ModelError( + "window size for aggregate '%s' sohuld be an integer " + "greater than or equeal 1" % aggregate.name + ) # Create a composite key for grouping: # * split dimension, if used @@ -173,19 +184,24 @@ def _window_function_factory( # consider the measure reference to be aggregated measure reference. # TODO: this does not work for implicit post-aggregate calculations - function = WindowFunction(window_function, window_key, - target_attribute=aggregate.name, - source_attribute=source, - window_size=window_size, - label=label) + function = WindowFunction( + window_function, + window_key, + target_attribute=aggregate.name, + source_attribute=source, + window_size=window_size, + label=label, + ) return function + def get_key(record, composite_key): """Extracts a tuple of values from the `record` by `composite_key`""" return tuple(record.get(key) for key in composite_key) + # FIXME : [typing] Fix the data types -class WindowFunction(object): +class WindowFunction: function: Any window_key: Any @@ -194,13 +210,15 @@ class WindowFunction(object): window_size: Any label: str - def __init__(self, - function: Any, - window_key: Any, - target_attribute: Any, - source_attribute: Any, - window_size: Any, - label: Any) -> Any: + def __init__( + self, + function: Any, + window_key: Any, + target_attribute: Any, + source_attribute: Any, + window_size: Any, + label: Any, + ): """Creates a window function.""" if not function: @@ -222,9 +240,12 @@ def __init__(self, # TODO: This modifies object in place. It should return modified copy def __call__(self, record: Any) -> None: - """Collects the source value. If the window for the `window_key` is - filled, then apply the window function and store the value in the - `record` to key `target_attribute`.""" + """Collects the source value. + + If the window for the `window_key` is filled, then apply the + window function and store the value in the `record` to key + `target_attribute`. + """ key = get_key(record, self.window_key) @@ -249,29 +270,43 @@ def __call__(self, record: Any) -> None: # TODO: make CALCULATED_AGGREGATIONS a namespace (see extensions.py) CALCULATED_AGGREGATIONS = { - "wma": partial(_window_function_factory, - window_function=weighted_moving_average, - label='Weighted Moving Avg. of {measure}'), - "sma": partial(_window_function_factory, - window_function=simple_moving_average, - label='Simple Moving Avg. of {measure}'), - "sms": partial(_window_function_factory, - window_function=simple_moving_sum, - label='Simple Moving Sum of {measure}'), - "smstd": partial(_window_function_factory, - window_function=simple_stdev, - label='Moving Std. Deviation of {measure}'), - "smrsd": partial(_window_function_factory, - window_function=simple_relative_stdev, - label='Moving Relative St. Dev. of {measure}'), - "smvar": partial(_window_function_factory, - window_function=simple_variance, - label='Moving Variance of {measure}') + "wma": partial( + _window_function_factory, + window_function=weighted_moving_average, + label="Weighted Moving Avg. of {measure}", + ), + "sma": partial( + _window_function_factory, + window_function=simple_moving_average, + label="Simple Moving Avg. of {measure}", + ), + "sms": partial( + _window_function_factory, + window_function=simple_moving_sum, + label="Simple Moving Sum of {measure}", + ), + "smstd": partial( + _window_function_factory, + window_function=simple_stdev, + label="Moving Std. Deviation of {measure}", + ), + "smrsd": partial( + _window_function_factory, + window_function=simple_relative_stdev, + label="Moving Relative St. Dev. of {measure}", + ), + "smvar": partial( + _window_function_factory, + window_function=simple_variance, + label="Moving Variance of {measure}", + ), } + def available_calculators(): """Returns a list of available calculators.""" return CALCULATED_AGGREGATIONS.keys() + def aggregate_calculator_labels(): - return dict([(k, v.keywords['label']) for k, v in CALCULATED_AGGREGATIONS.items()]) + return {k: v.keywords["label"] for k, v in CALCULATED_AGGREGATIONS.items()} diff --git a/cubes/server/__init__.py b/cubes/server/__init__.py index 1cfd772d..88f65843 100644 --- a/cubes/server/__init__.py +++ b/cubes/server/__init__.py @@ -1,4 +1,4 @@ -from .blueprint import slicer, API_VERSION -from .base import run_server, create_server, read_slicer_config from .auth import Authenticator, NotAuthenticated +from .base import create_server, read_slicer_config, run_server +from .blueprint import API_VERSION, slicer from .local import workspace diff --git a/cubes/server/app.py b/cubes/server/app.py index 35742a8f..9ea44ed3 100644 --- a/cubes/server/app.py +++ b/cubes/server/app.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- import os -from .base import create_server -from .base import read_slicer_config + +from .base import create_server, read_slicer_config from .utils import str_to_bool # Set the configuration file diff --git a/cubes/server/auth.py b/cubes/server/auth.py index 5099a8c9..6eb0e1ec 100644 --- a/cubes/server/auth.py +++ b/cubes/server/auth.py @@ -1,17 +1,17 @@ # -*- coding: utf-8 -*- -from ..errors import * -from flask import Response, redirect import re + +from flask import Response, redirect + +from ..errors import * from ..ext import Extensible -__all__ = ( - "Authenticator", - "NotAuthenticated" -) +__all__ = ("Authenticator", "NotAuthenticated") # IMPORTANT: This is provisional code. Might be changed or removed. # + class NotAuthenticated(Exception): pass @@ -23,7 +23,7 @@ def authenticate(self, request): raise NotImplementedError def info_dict(self, request): - return { 'username' : self.authenticate(request) } + return {"username": self.authenticate(request)} def logout(self, request, identity): return "logged out" @@ -36,7 +36,7 @@ def __init__(self, realm=None): def logout(self, request, identity): headers = {"WWW-Authenticate": 'Basic realm="%s"' % self.realm} - url_root = request.args.get('url', request.url_root) + url_root = request.args.get("url", request.url_root) m = self.pattern.search(url_root) if m: url_root = m.group(1) + "__logout__@" + m.group(2) @@ -44,12 +44,16 @@ def logout(self, request, identity): else: return Response("logged out", status=401, headers=headers) + class AdminAdminAuthenticator(AbstractBasicAuthenticator, name="admin_admin"): - """Simple HTTP Basic authenticator for testing purposes. User name and - password have to be the same. User name is passed as the authenticated - identity.""" + """Simple HTTP Basic authenticator for testing purposes. + + User name and password have to be the same. User name is passed as + the authenticated identity. + """ + def __init__(self, realm=None, **options): - super(AdminAdminAuthenticator, self).__init__(realm=realm) + super().__init__(realm=realm) def authenticate(self, request): auth = request.authorization @@ -64,8 +68,9 @@ def authenticate(self, request): class PassParameterAuthenticator(Authenticator, name="pass_parameter"): """Permissive authenticator that passes an URL parameter (default ``api_key``) as idenity.""" + def __init__(self, parameter=None, **options): - super(PassParameterAuthenticator, self).__init__(**options) + super().__init__(**options) self.parameter_name = parameter or "api_key" def authenticate(self, request): @@ -74,18 +79,21 @@ def authenticate(self, request): class HTTPBasicProxyAuthenticator(AbstractBasicAuthenticator, name="http_basic_proxy"): def __init__(self, realm=None, **options): - super(HTTPBasicProxyAuthenticator, self).__init__(realm=realm) + super().__init__(realm=realm) self.realm = realm or "Default" self.pattern = re.compile(r"^(http(?:s?)://)([^/]+.*)$", re.IGNORECASE) def authenticate(self, request): """Permissive authenticator using HTTP Basic authentication that - assumes the server to be behind a proxy, and that the proxy authenticated the user. - Does not check for a password, just passes the `username` as identity""" + assumes the server to be behind a proxy, and that the proxy + authenticated the user. + + Does not check for a password, just passes the `username` as + identity + """ auth = request.authorization if auth: return auth.username raise NotAuthenticated(realm=self.realm) - diff --git a/cubes/server/base.py b/cubes/server/base.py index 14c484fd..c0c72808 100644 --- a/cubes/server/base.py +++ b/cubes/server/base.py @@ -1,24 +1,21 @@ # -*- encoding: utf-8 -*- -import shlex import os +import shlex from flask import Flask -from configparser import ConfigParser +from ..logging import get_logger from .blueprint import slicer from .utils import * -from ..logging import get_logger -__all__ = ( - "create_server", - "run_server" -) +__all__ = ("create_server", "run_server") # Server Instantiation and Running # ================================ -def read_slicer_config(config): + +def read_slicer_config(config: str) -> ConfigParser: if not config: return ConfigParser() elif isinstance(config, str): @@ -30,9 +27,13 @@ def read_slicer_config(config): raise Exception("Unable to load configuration: %s" % e) return config -def create_server(config=None, **_options): - """Returns a Flask server application. `config` is a path to a - ``slicer.ini`` file with Cubes workspace and server configuration.""" + +def create_server(config: Optional[ConfigParser] = None, **_options) -> Flask: + """Returns a Flask server application. + + `config` is a path to a ``slicer.ini`` file with Cubes workspace and + server configuration. + """ # Load extensions if config.has_option("server", "modules"): @@ -40,13 +41,14 @@ def create_server(config=None, **_options): for module in modules: e = __import__(module) - app = Flask(__name__.rsplit('.', 1)[0]) + app = Flask(__name__.rsplit(".", 1)[0]) # FIXME: read note about _options in Workspace. Only for internal use as a # temporary quick fix. app.register_blueprint(slicer, config=config, **_options) return app + def run_server(config, debug=False, app=None): """Run OLAP server with configuration specified in `config`""" @@ -59,8 +61,9 @@ def run_server(config, debug=False, app=None): debug = True if debug: - logger.warning('Server running under DEBUG, so logging level set to DEBUG.') + logger.warning("Server running under DEBUG, so logging level set to DEBUG.") import logging + logger.setLevel(logging.DEBUG) if app is None: @@ -81,8 +84,8 @@ def run_server(config, debug=False, app=None): else: use_reloader = False - if config.has_option('server', 'processes'): - processes = config.getint('server', 'processes') + if config.has_option("server", "processes"): + processes = config.getint("server", "processes") else: processes = 1 @@ -91,11 +94,11 @@ def run_server(config, debug=False, app=None): try: with open(path, "w") as f: f.write(str(os.getpid())) - except IOError as e: - logger.error("Unable to write PID file '%s'. Check the " - "directory existence or permissions." % path) + except OSError as e: + logger.error( + "Unable to write PID file '%s'. Check the " + "directory existence or permissions." % path + ) raise - app.run(host, port, debug=debug, processes=processes, - use_reloader=use_reloader) - + app.run(host, port, debug=debug, processes=processes, use_reloader=use_reloader) diff --git a/cubes/server/blueprint.py b/cubes/server/blueprint.py index 0466e5fe..4c340788 100644 --- a/cubes/server/blueprint.py +++ b/cubes/server/blueprint.py @@ -3,26 +3,36 @@ import sys import traceback from collections import OrderedDict +from configparser import ConfigParser +from typing import Optional, Union + +from flask import ( + Blueprint, + Response, + current_app, + g, + make_response, + redirect, + render_template, + request, + safe_join, +) +from flask.wrappers import Response -from flask import Blueprint, Response, request, g, current_app, safe_join, make_response -from flask import render_template, redirect +from cubes import __version__ -from ..workspace import Workspace, SLICER_INFO_KEYS -from ..query.constants import SPLIT_DIMENSION_NAME -from ..query import Cell, cut_from_dict +from .. import ext from ..errors import * from ..formatters import JSONLinesGenerator, csv_generator -from .. import ext from ..logging import get_logger -from .logging import configured_request_log_handlers, RequestLogger, \ - AsyncRequestLogger -from .errors import * +from ..query import Cell, cut_from_dict +from ..query.constants import SPLIT_DIMENSION_NAME +from ..workspace import SLICER_INFO_KEYS, Workspace +from .auth import NotAuthenticated from .decorators import * +from .errors import * from .local import * -from .auth import NotAuthenticated - - -from cubes import __version__ +from .logging import AsyncRequestLogger, RequestLogger, configured_request_log_handlers # TODO: missing features from the original Werkzeug Slicer: # * /locales and localization @@ -31,15 +41,13 @@ # * root / index # * response.headers.add("Access-Control-Allow-Origin", "*") + try: import cubes_search except ImportError: cubes_search = None -__all__ = ( - "slicer", - "API_VERSION" -) +__all__ = ("slicer", "API_VERSION") API_VERSION = 2 @@ -51,13 +59,22 @@ # Before # ------ -def _store_option(config, option, default, type_=None, allowed=None, - section="server"): - """Copies the `option` into the application config dictionary. `default` - is a default value, if there is no such option in `config`. `type_` can be - `bool`, `int` or `string` (default). If `allowed` is specified, then the - option should be only from the list of allowed options, otherwise a - `ConfigurationError` exception is raised. + +def _store_option( + config: ConfigParser, + option: str, + default: Optional[Union[int, str]], + type_: Optional[str] = None, + allowed=None, + section: str = "server", +) -> None: + """Copies the `option` into the application config dictionary. + + `default` is a default value, if there is no such option in + `config`. `type_` can be `bool`, `int` or `string` (default). If + `allowed` is specified, then the option should be only from the list + of allowed options, otherwise a `ConfigurationError` exception is + raised. """ if config.has_option(section, option): @@ -71,8 +88,7 @@ def _store_option(config, option, default, type_=None, allowed=None, value = default if allowed and value not in allowed: - raise ConfigurationError("Invalued value '%s' for option '%s'" - % (value, option)) + raise ConfigurationError(f"Invalued value '{value}' for option '{option}'") setattr(current_app.slicer, option, value) @@ -98,7 +114,7 @@ def initialize_slicer(state): else: _options = {} - if not hasattr(current_app, 'cubes_workspace'): + if not hasattr(current_app, "cubes_workspace"): current_app.cubes_workspace = Workspace(config, **_options) # Configure the application @@ -120,18 +136,19 @@ def initialize_slicer(state): else: options = {} - current_app.slicer.authenticator = ext.authenticator(method, - **options) + current_app.slicer.authenticator = ext.authenticator(method, **options) logger.debug("Server authentication method: %s" % (method or "none")) if not current_app.slicer.authenticator and workspace.authorizer: - logger.warn("No authenticator specified, but workspace seems to " - "be using an authorizer") + logger.warn( + "No authenticator specified, but workspace seems to " + "be using an authorizer" + ) # Collect query loggers handlers = configured_request_log_handlers(config) - if config.has_option('server', 'asynchronous_logging'): + if config.has_option("server", "asynchronous_logging"): async_logging = config.getboolean("server", "asynchronous_logging") else: async_logging = False @@ -141,11 +158,13 @@ def initialize_slicer(state): else: current_app.slicer.request_logger = RequestLogger(handlers) + # Before and After # ================ + @slicer.before_request -def process_common_parameters(): +def process_common_parameters() -> None: # TODO: setup language # Copy from the application context @@ -158,7 +177,7 @@ def process_common_parameters(): @slicer.before_request -def prepare_authorization(): +def prepare_authorization() -> None: if current_app.slicer.authenticator: try: identity = current_app.slicer.authenticator.authenticate(request) @@ -175,6 +194,7 @@ def prepare_authorization(): # Error Handler # ============= + @slicer.errorhandler(UserError) def user_error_handler(e): error_type = e.__class__.error_type @@ -192,56 +212,57 @@ def user_error_handler(e): return jsonify(error), code + @slicer.errorhandler(404) def page_not_found(e): error = { "error": "not_found", "message": "The requested URL was not found on the server.", "hint": "If you entered the URL manually please check your " - "spelling and try again." + "spelling and try again.", } return jsonify(error), 404 + @slicer.errorhandler(InternalError) def server_error(e): (exc_type, exc_value, exc_traceback) = sys.exc_info() exc_name = exc_type.__name__ - logger.error("Internal Cubes error ({}): {}".format(exc_name, exc_value)) + logger.error(f"Internal Cubes error ({exc_name}): {exc_value}") - tb = traceback.format_exception(exc_type, exc_value, - exc_traceback) + tb = traceback.format_exception(exc_type, exc_value, exc_traceback) logger.debug("Exception stack trace:\n{}".format("".join(tb))) error = { "error": "internal_server_error", "message": "Internal server error", "hint": "Server administrators can learn more about the error from " - "the error logs (even more if they have 'debug' level)" + "the error logs (even more if they have 'debug' level)", } return jsonify(error), 500 + # Endpoints # ========= + @slicer.route("/") def show_index(): info = get_info() has_about = any(key in info for key in SLICER_INFO_KEYS) - return render_template("index.html", - has_about=has_about, - **info) + return render_template("index.html", has_about=has_about, **info) @slicer.route("/version") -def show_version(): +def show_version() -> Response: info = { "version": __version__, # Backward compatibility key "server_version": __version__, - "api_version": API_VERSION + "api_version": API_VERSION, } return jsonify(info) @@ -261,19 +282,20 @@ def get_info(): # authentication authinfo = {} - authinfo["type"] = (current_app.slicer.authentication or "none") + authinfo["type"] = current_app.slicer.authentication or "none" if g.auth_identity: - authinfo['identity'] = g.auth_identity + authinfo["identity"] = g.auth_identity if current_app.slicer.authenticator: ainfo = current_app.slicer.authenticator.info_dict(request) authinfo.update(ainfo) - info['authentication'] = authinfo + info["authentication"] = authinfo return info + @slicer.route("/info") def show_info(): return jsonify(get_info()) @@ -290,16 +312,17 @@ def list_cubes(): @requires_cube def cube_model(cube_name): if workspace.authorizer: - hier_limits = workspace.authorizer.hierarchy_limits(g.auth_identity, - cube_name) + hier_limits = workspace.authorizer.hierarchy_limits(g.auth_identity, cube_name) else: hier_limits = None - response = g.cube.to_dict(expand_dimensions=True, - with_mappings=False, - full_attribute_names=True, - create_label=True, - hierarchy_limits=hier_limits) + response = g.cube.to_dict( + expand_dimensions=True, + with_mappings=False, + full_attribute_names=True, + create_label=True, + hierarchy_limits=hier_limits, + ) response["features"] = workspace.cube_features(g.cube).to_dict() @@ -312,17 +335,17 @@ def cube_model(cube_name): def aggregate(cube_name): cube = g.cube - output_format = validated_parameter(request.args, "format", - values=["json", "csv", 'xlsx'], - default="json") + output_format = validated_parameter( + request.args, "format", values=["json", "csv", "xlsx"], default="json" + ) - header_type = validated_parameter(request.args, "header", - values=["names", "labels", "none"], - default="labels") + header_type = validated_parameter( + request.args, "header", values=["names", "labels", "none"], default="labels" + ) fields_str = request.args.get("fields") if fields_str: - fields = fields_str.lower().split(',') + fields = fields_str.lower().split(",") else: fields = None @@ -342,13 +365,15 @@ def aggregate(cube_name): prepare_cell("split", "split") - result = g.browser.aggregate(g.cell, - aggregates=aggregates, - drilldown=drilldown, - split=g.split, - page=g.page, - page_size=g.page_size, - order=g.order) + result = g.browser.aggregate( + g.cell, + aggregates=aggregates, + drilldown=drilldown, + split=g.split, + page=g.page, + page_size=g.page_size, + order=g.order, + ) # Hide cuts that were generated internally (default: don't) if current_app.slicer.hide_private_cuts: @@ -367,22 +392,22 @@ def aggregate(cube_name): for l in result.labels: # TODO: add a little bit of polish to this if l == SPLIT_DIMENSION_NAME: - header.append('Matches Filters') + header.append("Matches Filters") else: - header += [ attr.label or attr.name for attr in cube.get_attributes([l], aggregated=True) ] + header += [ + attr.label or attr.name + for attr in cube.get_attributes([l], aggregated=True) + ] else: header = None fields = result.labels - generator = csv_generator(result, - fields, - include_header=bool(header), - header=header) + generator = csv_generator( + result, fields, include_header=bool(header), header=header + ) headers = {"Content-Disposition": 'attachment; filename="aggregate.csv"'} - return Response(generator, - mimetype='text/csv', - headers=headers) + return Response(generator, mimetype="text/csv", headers=headers) @slicer.route("/cube//facts") @@ -392,7 +417,7 @@ def cube_facts(cube_name): # Request parameters fields_str = request.args.get("fields") if fields_str: - fields = fields_str.split(',') + fields = fields_str.split(",") else: fields = None @@ -406,11 +431,9 @@ def cube_facts(cube_name): fields = [attr.ref for attr in attributes] # Get the result - facts = g.browser.facts(g.cell, - fields=fields, - order=g.order, - page=g.page, - page_size=g.page_size) + facts = g.browser.facts( + g.cell, fields=fields, order=g.order, page=g.page, page_size=g.page_size + ) # Add cube key to the fields (it is returned in the result) fields.insert(0, g.cube.key or "__fact_key__") @@ -421,6 +444,7 @@ def cube_facts(cube_name): return formatted_response(facts, fields, labels) + @slicer.route("/cube//fact/") @requires_browser def cube_fact(cube_name, fact_id): @@ -429,8 +453,7 @@ def cube_fact(cube_name, fact_id): if fact: return jsonify(fact) else: - raise NotFoundError(fact_id, "fact", - message="No fact with id '%s'" % fact_id) + raise NotFoundError(fact_id, "fact", message="No fact with id '%s'" % fact_id) @slicer.route("/cube//members/") @@ -442,8 +465,9 @@ def cube_members(cube_name, dimension_name): level = request.args.get("level") if depth and level: - raise RequestError("Both depth and level provided, use only one " - "(preferably level)") + raise RequestError( + "Both depth and level provided, use only one (preferably level)" + ) if depth: try: @@ -454,8 +478,11 @@ def cube_members(cube_name, dimension_name): try: dimension = g.cube.dimension(dimension_name) except KeyError: - raise NotFoundError(dimension_name, "dimension", - message="Dimension '%s' was not found" % dimension_name) + raise NotFoundError( + dimension_name, + "dimension", + message="Dimension '%s' was not found" % dimension_name, + ) hier_name = request.args.get("hierarchy") hierarchy = dimension.hierarchy(hier_name) @@ -465,18 +492,20 @@ def cube_members(cube_name, dimension_name): elif level: depth = hierarchy.level_index(level) + 1 - values = g.browser.members(g.cell, - dimension, - depth=depth, - hierarchy=hierarchy, - page=g.page, - page_size=g.page_size) + values = g.browser.members( + g.cell, + dimension, + depth=depth, + hierarchy=hierarchy, + page=g.page, + page_size=g.page_size, + ) result = { "dimension": dimension.name, "hierarchy": hierarchy.name, "depth": len(hierarchy) if depth is None else depth, - "data": values + "data": values, } # Collect fields and labels @@ -521,13 +550,12 @@ def cube_report(cube_name): # Override URL cut with the one in report cuts = [cut_from_dict(cut) for cut in cell_cuts] cell = Cell(cuts) - logger.info("using cell from report specification (URL parameters " - "are ignored)") + logger.info("using cell from report specification (URL parameters are ignored)") if workspace.authorizer: - cell = workspace.authorizer.restricted_cell(g.auth_identity, - cube=g.cube, - cell=cell) + cell = workspace.authorizer.restricted_cell( + g.auth_identity, cube=g.cube, cell=cell + ) else: if not g.cell: cell = Cell() @@ -552,10 +580,9 @@ def cube_search(cube_name): logger.debug("using search engine: %s" % engine_name) - search_engine = cubes_search.create_searcher(engine_name, - browser=g.browser, - locales=g.locales, - **options) + search_engine = cubes_search.create_searcher( + engine_name, browser=g.browser, locales=g.locales, **options + ) dimension = request.args.get("dimension") if not dimension: raise RequestError("No search dimension provided") @@ -567,8 +594,7 @@ def cube_search(cube_name): locale = g.locale or g.locales[0] - logger.debug("searching for '%s' in %s, locale %s" - % (query, dimension, locale)) + logger.debug(f"searching for '{query}' in {dimension}, locale {locale}") search_result = search_engine.search(query, dimension, locale=locale) @@ -576,7 +602,7 @@ def cube_search(cube_name): "matches": search_result.dimension_matches(dimension), "dimension": dimension, "total_found": search_result.total_found, - "locale": locale + "locale": locale, } if search_result.error: @@ -606,19 +632,22 @@ def get_visualizer(): else: raise PageNotFoundError("Visualizer not configured") + @slicer.after_request -def add_cors_headers(response): +def add_cors_headers(response: Response) -> Response: """Add Cross-origin resource sharing headers.""" origin = current_app.slicer.allow_cors_origin if origin and len(origin): - if request.method == 'OPTIONS': - response.headers['Access-Control-Allow-Headers'] = 'X-Requested-With' + if request.method == "OPTIONS": + response.headers["Access-Control-Allow-Headers"] = "X-Requested-With" # OPTIONS preflight requests need to receive origin back instead of wildcard - if origin == '*': - response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', origin) + if origin == "*": + response.headers["Access-Control-Allow-Origin"] = request.headers.get( + "Origin", origin + ) else: - response.headers['Access-Control-Allow-Origin'] = origin - response.headers['Access-Control-Allow-Credentials'] = 'true' - response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS' - response.headers['Access-Control-Max-Age'] = CORS_MAX_AGE + response.headers["Access-Control-Allow-Origin"] = origin + response.headers["Access-Control-Allow-Credentials"] = "true" + response.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS" + response.headers["Access-Control-Max-Age"] = CORS_MAX_AGE return response diff --git a/cubes/server/browser.py b/cubes/server/browser.py index e516e08c..214d1d01 100644 --- a/cubes/server/browser.py +++ b/cubes/server/browser.py @@ -3,9 +3,9 @@ import json import logging -from ..query.browser import BrowserFeatures, BrowserFeatureAction from ..logging import get_logger from ..query import * +from ..query.browser import BrowserFeatureAction, BrowserFeatures from ..query.result import AggregationResult, Facts @@ -13,9 +13,8 @@ class SlicerBrowser(AggregationBrowser, name="slicer"): """Aggregation browser for Cubes Slicer OLAP server.""" def __init__(self, cube, store, locale=None, **options): - """Browser for another Slicer server. - """ - super(SlicerBrowser, self).__init__(cube, store, locale) + """Browser for another Slicer server.""" + super().__init__(cube, store, locale) self.logger = get_logger() self.cube = cube @@ -38,14 +37,15 @@ def features(self) -> BrowserFeatures: BrowserFeatureAction.cell, BrowserFeatureAction.members, ], - aggregate_functions=cube_features.get('aggregate_functions'), - post_aggregate_functions=cube_features.get('post_aggregate_functions') + aggregate_functions=cube_features.get("aggregate_functions"), + post_aggregate_functions=cube_features.get("post_aggregate_functions"), ) return features - def provide_aggregate(self, cell, aggregates, drilldown, split, order, - page, page_size, **options): + def provide_aggregate( + self, cell, aggregates, drilldown, split, order, page, page_size, **options + ): params = {} @@ -71,26 +71,23 @@ def provide_aggregate(self, cell, aggregates, drilldown, split, order, if page_size is not None: params["page_size"] = str(page_size) - - response = self.store.cube_request("aggregate", - self.cube.basename, params) + response = self.store.cube_request("aggregate", self.cube.basename, params) result = AggregationResult() - result.cells = response.get('cells', []) + result.cells = response.get("cells", []) if "summary" in response: - result.summary = response.get('summary') + result.summary = response.get("summary") - result.levels = response.get('levels', {}) - result.labels = response.get('labels', []) + result.levels = response.get("levels", {}) + result.labels = response.get("labels", []) result.cell = cell - result.aggregates = response.get('aggregates', []) + result.aggregates = response.get("aggregates", []) return result - def facts(self, cell=None, fields=None, order=None, page=None, - page_size=None): + def facts(self, cell=None, fields=None, order=None, page=None, page_size=None): cell = cell or Cell() if fields: @@ -119,14 +116,24 @@ def facts(self, cell=None, fields=None, order=None, page=None, params["format"] = "json_lines" - response = self.store.cube_request("facts", self.cube.basename, params, - is_lines=True) + response = self.store.cube_request( + "facts", self.cube.basename, params, is_lines=True + ) return Facts(response, attributes) - def provide_members(self, cell=None, dimension=None, levels=None, - hierarchy=None, attributes=None, page=None, - page_size=None, order=None, **options): + def provide_members( + self, + cell=None, + dimension=None, + levels=None, + hierarchy=None, + attributes=None, + page=None, + page_size=None, + order=None, + **options + ): params = {} @@ -153,7 +160,7 @@ def provide_members(self, cell=None, dimension=None, levels=None, params["format"] = "json_lines" - action = "/cube/%s/members/%s" % (self.cube.basename, str(dimension)) + action = "/cube/{}/members/{}".format(self.cube.basename, str(dimension)) response = self.store.request(action, params, is_lines=True) return response @@ -168,12 +175,12 @@ def cell_details(self, cell, dimension=None): if dimension: params["dimension"] = str(dimension) - response = self.store.cube_request("cell", self.cube.basename, params) + response = self.store.cube_request("cell", self.cube.basename, params) return response def fact(self, fact_id): - action = "/cube/%s/fact/%s" % (self.cube.basename, str(fact_id)) + action = "/cube/{}/fact/{}".format(self.cube.basename, str(fact_id)) response = self.store.request(action) return response @@ -182,6 +189,5 @@ def is_builtin_function(self, name, aggregate): def _order_param(self, order): """Prepare an order string in form: ``attribute:direction``""" - string = ",".join("%s:%s" % (o[0], o[1]) for o in order) + string = ",".join("{}:{}".format(o[0], o[1]) for o in order) return string - diff --git a/cubes/server/caching.py b/cubes/server/caching.py index c61d7598..d0bb92d8 100644 --- a/cubes/server/caching.py +++ b/cubes/server/caching.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- +import cPickle as pickle import json import logging -from functools import update_wrapper, wraps +import types from datetime import datetime, timedelta from exceptions import BaseException -import cPickle as pickle -import types +from functools import update_wrapper, wraps from werkzeug.routing import Rule from werkzeug.wrappers import Response @@ -15,9 +15,14 @@ def _make_key_str(name, *args, **kwargs): key_str = name if args: - key_str += '::' + '::'.join([str(a) for a in args]) + key_str += "::" + "::".join([str(a) for a in args]) if kwargs: - key_str += '::' + '::'.join(['%s=%s' % (str(k), str(v)) for k, v in sorted(kwargs.items(), key=lambda x: x[0])]) + key_str += "::" + "::".join( + [ + "{}={}".format(str(k), str(v)) + for k, v in sorted(kwargs.items(), key=lambda x: x[0]) + ] + ) return key_str @@ -29,12 +34,12 @@ def query_ttl_strategy(data): import chat2query import measures - if 'q' in data: - query = chat2query.parse(data['q']) + if "q" in data: + query = chat2query.parse(data["q"]) config = measures.get_measure_manifest().get(query.measure, {}) - ttl = config.get('ttl', None) + ttl = config.get("ttl", None) if ttl: - logging.getLogger().debug('Using configured ttl: %s', ttl) + logging.getLogger().debug("Using configured ttl: %s", ttl) return ttl return None @@ -45,44 +50,44 @@ def _default_strategy(data): def response_dumps(response): - return { - 'data': response.data, - 'mimetype': response.content_type - } + return {"data": response.data, "mimetype": response.content_type} def response_loads(data): - return Response(data['data'], mimetype=data['mimetype']) - + return Response(data["data"], mimetype=data["mimetype"]) def cacheable(fn): @wraps(fn) def _cache(self, *args, **kwargs): - if not hasattr(self, 'cache'): - logging.getLogger().warn('Object is not configured with cache for @cacheable function: %s', self) + if not hasattr(self, "cache"): + logging.getLogger().warn( + "Object is not configured with cache for @cacheable function: %s", self + ) return fn(self, *args, **kwargs) - additional_args = getattr(self, 'args', {}) + additional_args = getattr(self, "args", {}) cache_impl = self.cache - name = '%s.%s' % (self.__class__.__name__, fn.__name__) - key = _make_key_str(name, *args, **dict(additional_args.items() + kwargs.items())) + name = f"{self.__class__.__name__}.{fn.__name__}" + key = _make_key_str( + name, *args, **dict(additional_args.items() + kwargs.items()) + ) try: v = cache_impl.get(key) if not v: - self.logger.debug('CACHE MISS') + self.logger.debug("CACHE MISS") v = fn(self, *args, **kwargs) cache_impl.set(key, v) else: - self.logger.debug('CACHE HIT') + self.logger.debug("CACHE HIT") return v except Exception as e: - self.logger.warn('ERROR, skipping cache') + self.logger.warn("ERROR, skipping cache") self.logger.exception(e) v = fn(self, *args, **kwargs) try: @@ -93,8 +98,7 @@ def _cache(self, *args, **kwargs): return update_wrapper(_cache, fn) - -class Cache(object): +class Cache: def __setitem__(self, key, value): return self.set(key, value) @@ -110,33 +114,39 @@ def _trap(*args, **kwargs): try: return fn(*args, **kwargs) except BaseException as e: - logging.getLogger().error('%s: %s, %s', fn.__name__, args, kwargs) + logging.getLogger().error("%s: %s, %s", fn.__name__, args, kwargs) logging.getLogger().exception(e) + return _trap class MongoCache(Cache): - - def __init__(self, name, ds, ttl=60, ttl_strategy=_default_strategy, dumps=_NOOP, loads=_NOOP, logger=logging.getLogger(), **kwargs): + def __init__( + self, + name, + ds, + ttl=60, + ttl_strategy=_default_strategy, + dumps=_NOOP, + loads=_NOOP, + logger=logging.getLogger(), + **kwargs, + ): self.ttl = ttl self.store = ds.Caches[name] self.dumps = dumps self.loads = loads self.ttl_strategy = ttl_strategy - self.logger=logger + self.logger = logger @trap def set(self, key, val, ttl=None): t = ttl or self.ttl_strategy(val) or self.ttl n = datetime.utcnow() + timedelta(seconds=t) - p = { - '_id': key, - 't': n, - 'd': self.dumps(val) - } + p = {"_id": key, "t": n, "d": self.dumps(val)} - self.logger.debug('Set: %s, ttl: %s', key, t) + self.logger.debug("Set: %s, ttl: %s", key, t) item = self.store.save(p) return item is not None @@ -144,31 +154,31 @@ def set(self, key, val, ttl=None): @trap def get(self, key): n = datetime.utcnow() - item = self.store.find_one({'_id':key}) + item = self.store.find_one({"_id": key}) if item: - item['d'] = self.loads(item['d']) - exp = item['t'] + item["d"] = self.loads(item["d"]) + exp = item["t"] if exp >= n: - self.logger.debug('Hit: %s', key) - return item['d'] + self.logger.debug("Hit: %s", key) + return item["d"] else: - self.logger.debug('Stale: %s', key) - self.store.remove({'_id': key}) + self.logger.debug("Stale: %s", key) + self.store.remove({"_id": key}) return None else: - self.logger.debug('Miss: %s', key) + self.logger.debug("Miss: %s", key) return None def rem(self, key): n = datetime.utcnow() - item = self.store.find_one({'_id':key}) + item = self.store.find_one({"_id": key}) if item: - self.logger.debug('Remove: %s', key) + self.logger.debug("Remove: %s", key) self.store.remove(item) return True else: - self.logger.debug('Miss: %s', key) + self.logger.debug("Miss: %s", key) return False diff --git a/cubes/server/decorators.py b/cubes/server/decorators.py index ae1ca05a..8c76856d 100644 --- a/cubes/server/decorators.py +++ b/cubes/server/decorators.py @@ -1,36 +1,34 @@ # -*- coding: utf-8 -*- -from flask import Blueprint, Flask, Response, request, g, current_app +from contextlib import contextmanager from functools import wraps +from typing import Callable + +from flask import Blueprint, Flask, Response, current_app, g, request -from ..workspace import Workspace from ..auth import NotAuthorized +from ..calendar import CalendarMemberConverter +from ..errors import * from ..query.cells import Cell, cut_from_dict, cuts_from_string from ..query.constants import SPLIT_DIMENSION_NAME -from ..errors import * -from .utils import * +from ..workspace import Workspace from .errors import * from .local import * -from ..calendar import CalendarMemberConverter - -from contextlib import contextmanager +from .utils import * # Utils # ----- + def prepare_cell(argname="cut", target="cell", restrict=False): """Sets `g.cell` with a `Cell` object from argument with name `argname`""" # Used by prepare_browser_request and in /aggregate for the split cell - # TODO: experimental code, for now only for dims with time role - converters = { - "time": CalendarMemberConverter(workspace.calendar) - } + converters = {"time": CalendarMemberConverter(workspace.calendar)} cuts = [] for cut_string in request.args.getlist(argname): - cuts += cuts_from_string(g.cube, cut_string, - role_member_converters=converters) + cuts += cuts_from_string(g.cube, cut_string, role_member_converters=converters) if cuts: cell = Cell(cuts) @@ -39,13 +37,13 @@ def prepare_cell(argname="cut", target="cell", restrict=False): if restrict: if workspace.authorizer: - cell = workspace.authorizer.restricted_cell(g.auth_identity, - cube=g.cube, - cell=cell) + cell = workspace.authorizer.restricted_cell( + g.auth_identity, cube=g.cube, cell=cell + ) setattr(g, target, cell) -def requires_cube(f): +def requires_cube(f: Callable) -> Callable: @wraps(f) def wrapper(*args, **kwargs): if "lang" in request.args: @@ -57,16 +55,18 @@ def wrapper(*args, **kwargs): try: g.cube = authorized_cube(cube_name, locale=g.locale) except NoSuchCubeError: - raise NotFoundError(cube_name, "cube", - "Unknown cube '%s'" % cube_name) + raise NotFoundError(cube_name, "cube", "Unknown cube '%s'" % cube_name) return f(*args, **kwargs) return wrapper -def requires_browser(f): + +def requires_browser(f: Callable) -> Callable: """Prepares three global variables: `g.cube`, `g.browser` and `g.cell`. - Also athorizes the cube using `authorize()`.""" + + Also athorizes the cube using `authorize()`. + """ @wraps(f) def wrapper(*args, **kwargs): @@ -110,9 +110,9 @@ def wrapper(*args, **kwargs): for order in orders.split(","): split = order.split(":") if len(split) == 1: - g.order.append( (order, None) ) + g.order.append((order, None)) else: - g.order.append( (split[0], split[1]) ) + g.order.append((split[0], split[1])) return f(*args, **kwargs) @@ -122,23 +122,28 @@ def wrapper(*args, **kwargs): # Get authorized cube # =================== + def authorized_cube(cube_name, locale): - """Returns a cube `cube_name`. Handle cube authorization if required.""" + """Returns a cube `cube_name`. + + Handle cube authorization if required. + """ try: cube = workspace.cube(cube_name, g.auth_identity, locale=locale) except NotAuthorized: - ident = "'%s'" % g.auth_identity if g.auth_identity \ - else "unspecified identity" - raise NotAuthorizedError("Authorization of cube '%s' failed for " - "%s" % (cube_name, ident)) + ident = "'%s'" % g.auth_identity if g.auth_identity else "unspecified identity" + raise NotAuthorizedError( + f"Authorization of cube '{cube_name}' failed for {ident}" + ) return cube # Query Logging # ============= -def log_request(action, attrib_field="attributes"): + +def log_request(action: str, attrib_field: str = "attributes") -> Callable: def decorator(f): @wraps(f) def wrapper(*args, **kwargs): @@ -158,11 +163,10 @@ def wrapper(*args, **kwargs): "page_size": g.page_size, "format": request.args.get("format"), "header": request.args.get("header"), - "attributes": request.args.get(attrib_field) + "attributes": request.args.get(attrib_field), } - with rlogger.log_time(action, g.browser, g.cell, g.auth_identity, - **other): + with rlogger.log_time(action, g.browser, g.cell, g.auth_identity, **other): retval = f(*args, **kwargs) return retval @@ -170,4 +174,3 @@ def wrapper(*args, **kwargs): return wrapper return decorator - diff --git a/cubes/server/errors.py b/cubes/server/errors.py index 8b6d1bc0..7c2b86d3 100644 --- a/cubes/server/errors.py +++ b/cubes/server/errors.py @@ -2,10 +2,7 @@ import json -server_error_codes = { - "unknown": 400, - "missing_object": 404 -} +server_error_codes = {"unknown": 400, "missing_object": 404} try: from werkzeug.exceptions import HTTPException @@ -19,18 +16,16 @@ class ServerError(HTTPException): code = 500 error_type = "default" + def __init__(self, message=None, exception=None, **details): - super(ServerError, self).__init__() + super().__init__() self.message = message self.exception = exception self.details = details self.help = None def get_body(self, environ): - error = { - "message": self.message, - "type": self.__class__.error_type - } + error = {"message": self.message, "type": self.__class__.error_type} if self.exception: error["reason"] = str(self.exception) @@ -43,7 +38,7 @@ def get_body(self, environ): def get_headers(self, environ): """Get a list of headers.""" - return [('Content-Type', 'application/json')] + return [("Content-Type", "application/json")] class RequestError(ServerError): @@ -61,9 +56,7 @@ class NotAuthenticatedError(ServerError): error_type = "not_authenticated" def __init__(self, message=None, exception=None, realm=None, **details): - super(NotAuthenticatedError, self).__init__(message, - exception, - **details) + super().__init__(message, exception, **details) self.message = message self.exception = exception self.details = details @@ -72,8 +65,8 @@ def __init__(self, message=None, exception=None, realm=None, **details): def get_headers(self, environ): """Get a list of headers.""" - headers = super(NotAuthenticatedError, self).get_headers(environ) - headers.append(('WWW-Authenticate', 'Basic realm="%s"' % self.realm)) + headers = super().get_headers(environ) + headers.append(("WWW-Authenticate", 'Basic realm="%s"' % self.realm)) return headers @@ -81,24 +74,24 @@ def get_headers(self, environ): class PageNotFoundError(ServerError): code = 404 error_type = "not_found" + def __init__(self, message=None): - super(PageNotFoundError, self).__init__(message) + super().__init__(message) # TODO: Rename this to ObjectNotFoundError class NotFoundError(ServerError): code = 404 error_type = "object_not_found" + def __init__(self, obj, objtype=None, message=None): - super(NotFoundError, self).__init__(message) - self.details = { "object": obj } + super().__init__(message) + self.details = {"object": obj} if objtype: self.details["object_type"] = objtype if not message: - self.message = "Object '%s' of type '%s' was not found" % (obj, objtype) + self.message = f"Object '{obj}' of type '{objtype}' was not found" else: self.message = message - - diff --git a/cubes/server/local.py b/cubes/server/local.py index 434f005e..447c90b2 100644 --- a/cubes/server/local.py +++ b/cubes/server/local.py @@ -2,17 +2,21 @@ from flask import current_app from werkzeug.local import LocalProxy +from cubes.workspace import Workspace + # Application Context # =================== # # Readability proxies -def _get_workspace(): + +def _get_workspace() -> Workspace: return current_app.cubes_workspace + def _get_logger(): return current_app.cubes_workspace.logger + workspace = LocalProxy(_get_workspace) logger = LocalProxy(_get_logger) - diff --git a/cubes/server/logging.py b/cubes/server/logging.py index 321162e4..f99600d3 100644 --- a/cubes/server/logging.py +++ b/cubes/server/logging.py @@ -1,31 +1,31 @@ # -*- coding: utf-8 -*- -import datetime -import time import csv +import datetime import io import json - -from contextlib import contextmanager +import time from collections import namedtuple -from threading import Thread +from configparser import ConfigParser +from contextlib import contextmanager from queue import Queue +from threading import Thread +from typing import Any, List from .. import ext -from ..logging import get_logger from ..errors import * +from ..logging import get_logger from ..query.drilldown import Drilldown __all__ = [ "create_request_log_handler", "configured_request_log_handlers", - "RequestLogger", "AsyncRequestLogger", "RequestLogHandler", "DefaultRequestLogHandler", "CSVFileRequestLogHandler", - 'XLSXFileRequestLogHandler', - "QUERY_LOG_ITEMS" + "XLSXFileRequestLogHandler", + "QUERY_LOG_ITEMS", ] @@ -42,12 +42,13 @@ "page", "page_size", "format", - "headers" + "headers", ] -def configured_request_log_handlers(config, prefix="query_log", - default_logger=None): +def configured_request_log_handlers( + config: ConfigParser, prefix: str = "query_log", default_logger=None +) -> List[Any]: """Returns configured query loggers as defined in the `config`.""" handlers = [] @@ -67,8 +68,8 @@ def configured_request_log_handlers(config, prefix="query_log", return handlers -class RequestLogger(object): - def __init__(self, handlers=None): +class RequestLogger: + def __init__(self, handlers: Optional[List[Any]] = None) -> None: if handlers: self.handlers = list(handlers) else: @@ -91,7 +92,7 @@ def log(self, method, browser, cell, identity=None, elapsed=None, **other): "cube": browser.cube, "identity": identity, "elapsed_time": elapsed or 0, - "cell": cell + "cell": cell, } record.update(other) @@ -101,12 +102,14 @@ def log(self, method, browser, cell, identity=None, elapsed=None, **other): try: handler.write_record(browser.cube, cell, record) except Exception as e: - self.logger.error("Server log handler error (%s): %s" - % (type(handler).__name__, str(e))) - + self.logger.error( + "Server log handler error (%s): %s" + % (type(handler).__name__, str(e)) + ) def _stringify_record(self, record): - """Return a log rectord with object attributes converted to unicode strings""" + """Return a log rectord with object attributes converted to unicode + strings.""" record = dict(record) record["cube"] = str(record["cube"]) @@ -122,20 +125,20 @@ def _stringify_record(self, record): class AsyncRequestLogger(RequestLogger): def __init__(self, handlers=None): - super(AsyncRequestLogger, self).__init__(handlers) + super().__init__(handlers) self.queue = Queue() - self.thread = Thread(target=self.log_consumer, - name="slicer_logging") + self.thread = Thread(target=self.log_consumer, name="slicer_logging") self.thread.daemon = True self.thread.start() def log(self, *args, **kwargs): - self.queue.put( (args, kwargs) ) + self.queue.put((args, kwargs)) def log_consumer(self): while True: (args, kwargs) = self.queue.get() - super(AsyncRequestLogger, self).log(*args, **kwargs) + super().log(*args, **kwargs) + class RequestLogHandler(ext.Extensible, abstract=True): __extension_type__ = "request_log_handler" @@ -159,9 +162,16 @@ def write_record(self, cube, cell, record, **options): else: identity_str = "none" - self.logger.info("method:%s cube:%s cell:%s identity:%s time:%s" - % (record["method"], record["cube"], cell_str, - identity_str, record["elapsed_time"])) + self.logger.info( + "method:%s cube:%s cell:%s identity:%s time:%s" + % ( + record["method"], + record["cube"], + cell_str, + identity_str, + record["elapsed_time"], + ) + ) class CSVFileRequestLogHandler(RequestLogHandler, name="csv"): @@ -177,7 +187,7 @@ def write_record(self, cube, cell, record): item = str(item) out.append(item) - with io.open(self.path, 'ab') as f: + with io.open(self.path, "ab") as f: writer = csv.writer(f) writer.writerow(out) @@ -195,15 +205,18 @@ def write_record(self, cube, cell, record): item = str(item) out.append(item) - with io.open(self.path, 'ab') as f: + with io.open(self.path, "ab") as f: writer = csv.writer(f) writer.writerow(out) class JSONRequestLogHandler(RequestLogHandler, name="json"): def __init__(self, path=None, **options): - """Creates a JSON logger which logs requests in a JSON lines. It - includes two lists: `cell_dimensions` and `drilldown_dimensions`.""" + """Creates a JSON logger which logs requests in a JSON lines. + + It includes two lists: `cell_dimensions` and + `drilldown_dimensions`. + """ self.path = path def write_record(self, cube, cell, record): @@ -229,7 +242,7 @@ def write_record(self, cube, cell, record): dim = cube.dimension(cut.dimension) depth = cut.level_depth() if depth: - level = dim.hierarchy(cut.hierarchy)[depth-1] + level = dim.hierarchy(cut.hierarchy)[depth - 1] level_name = str(level) else: level_name = None @@ -238,7 +251,7 @@ def write_record(self, cube, cell, record): "dimension": str(dim), "hierarchy": str(cut.hierarchy), "level": str(level_name), - "value": str(cut) + "value": str(cut), } uses.append(use) @@ -257,14 +270,13 @@ def write_record(self, cube, cell, record): "dimension": str(dim), "hierarchy": str(hier), "level": str(level), - "value": None + "value": None, } uses.append(use) record["drilldown_dimensions"] = uses line = json.dumps(record) - with io.open(self.path, 'ab') as f: + with io.open(self.path, "ab") as f: json.dump(record, f) f.write("\n") - diff --git a/cubes/server/store.py b/cubes/server/store.py index d47dd7e5..fe0970ce 100644 --- a/cubes/server/store.py +++ b/cubes/server/store.py @@ -1,19 +1,23 @@ # -*- coding=utf -*- -from ..metadata import * -from ..query import * -from ..stores import Store +import json +from urllib.parse import urlencode +from urllib.request import ( + HTTPBasicAuthHandler, + HTTPPasswordMgrWithDefaultRealm, + build_opener, + urlopen, +) + from ..errors import * from ..logging import get_logger +from ..metadata import * +from ..query import * from ..settings import Setting, SettingType -import json - -from urllib.request import urlopen, build_opener -from urllib.request import HTTPPasswordMgrWithDefaultRealm -from urllib.request import HTTPBasicAuthHandler -from urllib.parse import urlencode +from ..stores import Store DEFAULT_SLICER_URL = "http://localhost:5000" + class _default_opener: def __init__(self): pass @@ -21,6 +25,7 @@ def __init__(self): def open(self, url, *args, **kwargs): return urlopen(url, *args, **kwargs) + class SlicerStore(Store, name="slicer"): related_model_provider = "slicer" @@ -30,43 +35,41 @@ class SlicerStore(Store, name="slicer"): """ extension_settings = [ Setting( - name= "url", - desc= "URL of slicer server to connect to", - type= SettingType.str, + name="url", desc="URL of slicer server to connect to", type=SettingType.str ), Setting( - name= "authentication", - desc= "Authentication method (pass_parameter or none)", - type= SettingType.str, + name="authentication", + desc="Authentication method (pass_parameter or none)", + type=SettingType.str, ), Setting( - name= "auth_identity", - desc= "Authenticated identity (user name, key, ...)", - type= SettingType.str, + name="auth_identity", + desc="Authenticated identity (user name, key, ...)", + type=SettingType.str, ), Setting( - name= "auth_parameter", - desc= "Name of authentication URL parameter " \ - "(default: api_key", - type= SettingType.str, + name="auth_parameter", + desc="Name of authentication URL parameter (default: api_key)", + type=SettingType.str, ), Setting( - name= "username", - desc= "HTTP authentication username", - type= SettingType.str, + name="username", desc="HTTP authentication username", type=SettingType.str ), Setting( - name= "password", - desc= "HTTP authentication password", - type= SettingType.str, + name="password", desc="HTTP authentication password", type=SettingType.str ), ] - def __init__(self, url=None, authentication=None, - auth_identity=None, auth_parameter=None, - **options): + def __init__( + self, + url=None, + authentication=None, + auth_identity=None, + auth_parameter=None, + **options, + ): - super(SlicerStore, self).__init__(**options) + super().__init__(**options) url = url or DEFAULT_SLICER_URL @@ -74,8 +77,9 @@ def __init__(self, url=None, authentication=None, self.logger = get_logger() if authentication and authentication not in ["pass_parameter", "none"]: - raise ConfigurationError("Unsupported authentication method '%s'" - % authentication) + raise ConfigurationError( + "Unsupported authentication method '%s'" % authentication + ) self.authentication = authentication self.auth_identity = auth_identity @@ -84,9 +88,12 @@ def __init__(self, url=None, authentication=None, if "username" in options and "password" in options: # make a basic auth-enabled opener _pmgr = HTTPPasswordMgrWithDefaultRealm() - _pmgr.add_password(None, self.url, options['username'], options['password']) + _pmgr.add_password(None, self.url, options["username"], options["password"]) self.opener = build_opener(HTTPBasicAuthHandler(_pmgr)) - self.logger.info("Created slicer opener using basic auth credentials with username %s", options['username']) + self.logger.info( + "Created slicer opener using basic auth credentials with username %s", + options["username"], + ) else: self.opener = _default_opener() @@ -105,19 +112,22 @@ def request(self, action, params=None, is_lines=False): params[self.auth_parameter] = self.auth_identity params_str = urlencode(params) - request_url = '%s/%s' % (self.url, action) + request_url = f"{self.url}/{action}" if params_str: - request_url += '?' + params_str + request_url += "?" + params_str - self.logger.debug("slicer request: %s" % (request_url, )) + self.logger.debug(f"slicer request: {request_url}") response = self.opener.open(request_url) if response.getcode() == 404: raise MissingObjectError elif response.getcode() != 200: - raise BackendError("Slicer request error (%s): %s" - % (response.getcode(), response.read())) + raise BackendError( + "Slicer request error ({}): {}".format( + response.getcode(), response.read() + ) + ) if is_lines: return _JSONLinesIterator(response) @@ -130,11 +140,11 @@ def request(self, action, params=None, is_lines=False): return result def cube_request(self, action, cube, params=None, is_lines=False): - action = "cube/%s/%s" % (cube, action) + action = f"cube/{cube}/{action}" return self.request(action, params, is_lines) -class _JSONLinesIterator(object): +class _JSONLinesIterator: def __init__(self, stream): self.stream = stream @@ -144,15 +154,13 @@ def __iter__(self): class SlicerModelProvider(ModelProvider, name="slicer"): - """ - Uses external Slicer server as a model provider. - """ + """Uses external Slicer server as a model provider.""" def requires_store(self): return True def list_cubes(self): - return self.store.request('cubes') + return self.store.request("cubes") def cube(self, name, locale=None): params = {} diff --git a/cubes/server/utils.py b/cubes/server/utils.py index 510da13d..e6ee2b86 100644 --- a/cubes/server/utils.py +++ b/cubes/server/utils.py @@ -1,32 +1,43 @@ # -*- encoding: utf-8 -*- -from flask import Request, Response, request, g - import codecs -import json import csv +import json +from configparser import ConfigParser +from typing import Dict, Optional, Union + +from flask import Request, Response, g, request +from flask.wrappers import Response +from cubes.server.logging import RequestLogger + +from ..formatters import ( + JSONLinesGenerator, + SlicerJSONEncoder, + csv_generator, + xlsx_generator, +) from .errors import * -from ..formatters import csv_generator, JSONLinesGenerator, SlicerJSONEncoder,\ - xlsx_generator def str_to_bool(string): - """Convert a `string` to bool value. Returns ``True`` if `string` is - one of ``["true", "yes", "1", "on"]``, returns ``False`` if `string` is - one of ``["false", "no", "0", "off"]``, otherwise returns ``None``.""" + """Convert a `string` to bool value. + + Returns ``True`` if `string` is one of ``["true", "yes", "1", + "on"]``, returns ``False`` if `string` is one of ``["false", "no", + "0", "off"]``, otherwise returns ``None``. + """ if string is not None: if string.lower() in ["true", "yes", "1", "on"]: return True - elif string.lower() in["false", "no", "0", "off"]: + elif string.lower() in ["false", "no", "0", "off"]: return False return None -def validated_parameter(args, name, values=None, default=None, - case_sensitive=False): +def validated_parameter(args, name, values=None, default=None, case_sensitive=False): """Return validated parameter `param` that has to be from the list of `values` if provided.""" @@ -40,28 +51,30 @@ def validated_parameter(args, name, values=None, default=None, else: if values and param not in values: list_str = ", ".join(values) - raise RequestError("Parameter '%s' should be one of: %s" - % (name, list_str)) + raise RequestError(f"Parameter '{name}' should be one of: {list_str}") return param class CustomDict(dict): - def __getattr__(self, attr): + def __getattr__(self, attr: str) -> Optional[Union[int, str]]: try: - return super(CustomDict, self).__getitem__(attr) + return super().__getitem__(attr) except KeyError: - return super(CustomDict, self).__getattribute__(attr) + return super().__getattribute__(attr) - def __setattr__(self, attr, value): + def __setattr__( + self, attr: str, value: Optional[Union[int, RequestLogger, str, ConfigParser]] + ) -> None: self.__setitem__(attr, value) # Utils # ===== -def jsonify(obj): - """Returns a ``application/json`` `Response` object with `obj` converted - to JSON.""" + +def jsonify(obj: Dict[str, Union[str, int]]) -> Response: + """Returns a ``application/json`` `Response` object with `obj` converted to + JSON.""" if g.prettyprint: indent = 4 @@ -72,21 +85,26 @@ def jsonify(obj): encoder.iterator_limit = g.json_record_limit data = encoder.iterencode(obj) - return Response(data, mimetype='application/json') + return Response(data, mimetype="application/json") def formatted_response(response, fields, labels, iterable=None): - """Wraps request which returns response that can be formatted. The - `data_attribute` is name of data attribute or key in the response that - contains formateable data.""" + """Wraps request which returns response that can be formatted. - output_format = validated_parameter(request.args, "format", - values=["xlsx", "json", "json_lines", "csv"], - default="json") + The `data_attribute` is name of data attribute or key in the + response that contains formateable data. + """ - header_type = validated_parameter(request.args, "header", - values=["names", "labels", "none"], - default="labels") + output_format = validated_parameter( + request.args, + "format", + values=["xlsx", "json", "json_lines", "csv"], + default="json", + ) + + header_type = validated_parameter( + request.args, "header", values=["names", "labels", "none"], default="labels" + ) # Construct the header if header_type == "names": @@ -96,39 +114,29 @@ def formatted_response(response, fields, labels, iterable=None): else: header = None - # If no iterable is provided, we assume the response to be iterable iterable = iterable or response if output_format == "json": return jsonify(response) elif output_format == "json_lines": - return Response(JSONLinesGenerator(iterable), - mimetype='application/x-json-lines') + return Response( + JSONLinesGenerator(iterable), mimetype="application/x-json-lines" + ) elif output_format == "csv": - generator = csv_generator(iterable, - fields, - include_header=bool(header), - header=header) + generator = csv_generator( + iterable, fields, include_header=bool(header), header=header + ) headers = {"Content-Disposition": 'attachment; filename="facts.csv"'} - return Response(generator, - mimetype='text/csv', - headers=headers) - elif output_format == 'xlsx': + return Response(generator, mimetype="text/csv", headers=headers) + elif output_format == "xlsx": generator = xlsx_generator( - iterable, - fields, - include_header=bool(header), - header=header + iterable, fields, include_header=bool(header), header=header ) - fh = open(generator, 'rb') + fh = open(generator, "rb") resp = fh.read() fh.close() headers = {"Content-Disposition": 'attachment; filename="facts.xlsx"'} - return Response(resp, - content_type='application/ms-excel', - headers=headers) - - + return Response(resp, content_type="application/ms-excel", headers=headers) diff --git a/cubes/settings.py b/cubes/settings.py index 8721a9f0..8f039eb2 100644 --- a/cubes/settings.py +++ b/cubes/settings.py @@ -1,25 +1,12 @@ -from typing import ( - Any, - Collection, - Dict, - Mapping, - NamedTuple, - Iterator, - Optional, - Union, - Tuple, - Set, - cast, - ) - -import collections.abc as abc from collections import OrderedDict -from .errors import InternalError, ConfigurationError from enum import Enum +from typing import Any, Collection, Dict, Iterator, Mapping, Optional, Set, Union +from .errors import ConfigurationError, InternalError SettingValue = Union[str, float, bool, int] + class SettingType(Enum): str = 0 int = 1 @@ -27,8 +14,10 @@ class SettingType(Enum): float = 3 store = 4 + STRING_SETTING_TYPES = [SettingType.str, SettingType.store] + class Setting: name: str default: SettingValue @@ -38,14 +27,16 @@ class Setting: is_required: bool values: Collection[str] - def __init__(self, - name: str, - type: Optional[SettingType]=None, - default: Optional[Any]=None, - desc: Optional[str]=None, - label: Optional[str]=None, - is_required: bool=False, - values: Optional[Collection[str]]=None) -> None: + def __init__( + self, + name: str, + type: Optional[SettingType] = None, + default: Optional[Any] = None, + desc: Optional[str] = None, + label: Optional[str] = None, + is_required: bool = False, + values: Optional[Collection[str]] = None, + ) -> None: self.name = name self.default = default self.type = type or SettingType.str @@ -58,6 +49,7 @@ def __init__(self, TRUE_VALUES = ["1", "true", "yes", "on"] FALSE_VALUES = ["0", "false", "no", "off"] + def _to_bool(value: Optional[SettingValue]) -> Optional[bool]: retval: Optional[bool] @@ -98,6 +90,7 @@ def _to_int(value: Optional[SettingValue]) -> Optional[int]: return retval + def _to_float(value: Optional[SettingValue]) -> Optional[float]: retval: Optional[float] @@ -117,6 +110,7 @@ def _to_float(value: Optional[SettingValue]) -> Optional[float]: return retval + def _to_string(value: Optional[SettingValue]) -> Optional[str]: retval: Optional[str] @@ -132,6 +126,7 @@ def _to_string(value: Optional[SettingValue]) -> Optional[str]: return retval + def _cast_value(value: Any, setting: Setting) -> Optional[SettingValue]: retval: Optional[SettingValue] @@ -148,16 +143,19 @@ def _cast_value(value: Any, setting: Setting) -> Optional[SettingValue]: return retval -def distill_settings(mapping: Mapping[str, Any], - settings: Collection[Setting], - owner: Optional[str]=None) -> Dict[str, Optional[SettingValue]]: + +def distill_settings( + mapping: Mapping[str, Any], + settings: Collection[Setting], + owner: Optional[str] = None, +) -> Dict[str, Optional[SettingValue]]: """Coalesce values of `mapping` to match type in `settings`. If the mapping contains key that don't have corresponding settings or when the mapping does not contain key for a required setting an `ConfigurationError` exeption is raised. - - The returned dictionary can be safely used to be passed into an extension's - `__init__()` method as key-word arguments. + + The returned dictionary can be safely used to be passed into an + extension's `__init__()` method as key-word arguments. """ value: Optional[SettingValue] @@ -176,22 +174,21 @@ def distill_settings(mapping: Mapping[str, Any], if name in lower_map: result[setting.name] = _cast_value(lower_map[name], setting) elif setting.is_required: - raise ConfigurationError(f"Setting '{name}'{ownerstr}" - f" is required") + raise ConfigurationError(f"Setting '{name}'{ownerstr}" f" is required") elif setting.default is not None: # We assume that extension developers provide values in correct # type result[name] = setting.default - keys: Set[str] - keys = set(mapping.keys()) - set(s.name for s in settings) + keys = set(mapping.keys()) - {s.name for s in settings} if keys: alist: str = ", ".join(sorted(keys)) raise ConfigurationError(f"Unknown settings{ownerstr}: {alist}") return result + # Note: This is a little similar to the ConfigParser section mapping, but # richer information # @@ -201,15 +198,16 @@ class SettingsDict(Mapping[str, Optional[SettingValue]]): _dict: Dict[str, Optional[SettingValue]] _settings: Dict[str, Setting] - def __init__(self, - mapping: Mapping[str, SettingValue], - settings: Collection[Setting], - ) -> None: - """Create a dictionary of settings from `mapping`. Only items specified - in the `settings` are going to be included in the new settings - dictionary.""" + def __init__( + self, mapping: Mapping[str, SettingValue], settings: Collection[Setting] + ) -> None: + """Create a dictionary of settings from `mapping`. + + Only items specified in the `settings` are going to be included + in the new settings dictionary. + """ - self._dict = distill_settings(mapping ,settings) + self._dict = distill_settings(mapping, settings) self._settings = OrderedDict((s.name, s) for s in settings) def __getitem__(self, key: str) -> Any: diff --git a/cubes/slicer/commands.py b/cubes/slicer/commands.py index a2065daa..593cdaa4 100644 --- a/cubes/slicer/commands.py +++ b/cubes/slicer/commands.py @@ -1,5 +1,5 @@ # -*- encoding: utf-8 -*- -"""Slicer – Cubes command-line tool +"""Slicer – Cubes command-line tool. For more information run: slicer --help @@ -7,40 +7,52 @@ environment variable. """ -from typing import TYPE_CHECKING, Optional - import json import os import sys +from typing import List, Optional + import click -from ..datastructures import AttributeDict -from ..errors import InconsistencyError, ArgumentError, InternalError, UserError -from ..formatters import csv_generator, SlicerJSONEncoder, JSONLinesGenerator, xlsx_generator -from ..metadata import read_model_metadata, write_model_metadata_bundle -from ..workspace import Workspace -from ..errors import CubesError -from ..server import run_server -from ..server.base import read_slicer_config +import cubes from .. import ext - -from ..query import cuts_from_string, Cell -from ..metadata import string_to_dimension_level - - +from ..datastructures import AttributeDict +from ..errors import ( + ArgumentError, + CubesError, + InconsistencyError, + InternalError, + UserError, +) from ..ext import ExtensionRegistry - +from ..formatters import ( + JSONLinesGenerator, + SlicerJSONEncoder, + csv_generator, + xlsx_generator, +) +from ..metadata import ( + read_model_metadata, + string_to_dimension_level, + write_model_metadata_bundle, +) +from ..query import Cell, cuts_from_string +from ..server import run_server +from ..server.base import read_slicer_config +from ..workspace import Workspace DEFAULT_CONFIG = "slicer.ini" @click.group() @click.pass_context -@click.option('--debug/--no-debug', - envvar='CUBES_DEBUG', - default=False, - help="Enable/disable debugging output") +@click.option( + "--debug/--no-debug", + envvar="CUBES_DEBUG", + default=False, + help="Enable/disable debugging output", +) def cli(ctx, debug): ctx.obj = AttributeDict() ctx.obj.debug = debug @@ -49,10 +61,10 @@ def cli(ctx, debug): ################################################################################ # Command: serve + @cli.command() -@click.argument('config', type=click.Path(exists=True), default=DEFAULT_CONFIG) -@click.option('--visualizer', - help="Visualizer URL for /visualizer path") +@click.argument("config", type=click.Path(exists=True), default=DEFAULT_CONFIG) +@click.option("--visualizer", help="Visualizer URL for /visualizer path") @click.pass_context def serve(ctx, config, visualizer): """Run Slicer HTTP server.""" @@ -64,20 +76,26 @@ def serve(ctx, config, visualizer): run_server(config, debug=ctx.obj.debug) + ################################################################################ # Command: extension + @cli.command("extension") -@click.argument('extension_type', metavar='TYPE', required=False, default='all') -@click.argument('extension_name', metavar='NAME', required=False) -@click.option("--try-import", is_flag=True, default=False, - help="Try whether the module can be imported") +@click.argument("extension_type", metavar="TYPE", required=False, default="all") +@click.argument("extension_name", metavar="NAME", required=False) +@click.option( + "--try-import", + is_flag=True, + default=False, + help="Try whether the module can be imported", +) @click.pass_context def extension_info(ctx, extension_type, extension_name, try_import): - """Show info about Cubes extensions""" + """Show info about Cubes extensions.""" types: List[str] - if extension_type == 'all': + if extension_type == "all": types = ext.EXTENSION_TYPES.keys() else: types = [extension_type] @@ -87,8 +105,7 @@ def extension_info(ctx, extension_type, extension_name, try_import): registry = ext.get_registry(extension_type) desc = registry.describe(extension_name) - click.echo(f"{desc.name} - {desc.label}\n\n" - f"{desc.doc}\n") + click.echo(f"{desc.name} - {desc.label}\n\n" f"{desc.doc}\n") if desc.settings: click.echo("Settings:\n") @@ -105,7 +122,7 @@ def extension_info(ctx, extension_type, extension_name, try_import): click.echo("Available Cubes extensions:\n") for ext_type in types: registry = ext.get_registry(ext_type) - + click.echo(ext_type) for name in registry.names(): if try_import: @@ -118,6 +135,7 @@ def extension_info(ctx, extension_type, extension_name, try_import): click.echo() + def _try_import(registry: ExtensionRegistry, name: str) -> Optional[str]: result: str = "OK" try: @@ -128,18 +146,20 @@ def _try_import(registry: ExtensionRegistry, name: str) -> Optional[str]: return result - ################################################################################ # Command: list + @cli.command() -@click.option('--verbose/--terse', 'verbose', default=False, - help='Display also cube description') -@click.argument('config', required=False, - default=DEFAULT_CONFIG, type=click.Path(exists=True)) +@click.option( + "--verbose/--terse", "verbose", default=False, help="Display also cube description" +) +@click.argument( + "config", required=False, default=DEFAULT_CONFIG, type=click.Path(exists=True) +) @click.pass_context def list(ctx, config, verbose): - """List cubes""" + """List cubes.""" ws = Workspace(config) for cube in ws.list_cubes(): @@ -147,28 +167,33 @@ def list(ctx, config, verbose): label = cube.get("label", name) desc = cube.get("description", "(no description)") if verbose: - print("{} - {}\n {}\n".format(name, label, desc)) + print(f"{name} - {label}\n {desc}\n") else: - print("{} - {}".format(name, label)) + print(f"{name} - {label}") ################################################################################ # Command: valdate_model + @cli.group() @click.pass_context def model(ctx): """Model metadata tools.""" pass + @model.command() -@click.option('--defaults', '-d', 'show_defaults', default=False, - help='show defaults') -@click.option('--warnings/--no-warnings', 'show_warnings', default=True, - help='enable/disable warnings') -@click.argument('model_path', metavar='MODEL') +@click.option("--defaults", "-d", "show_defaults", default=False, help="show defaults") +@click.option( + "--warnings/--no-warnings", + "show_warnings", + default=True, + help="enable/disable warnings", +) +@click.argument("model_path", metavar="MODEL") def validate(show_defaults, show_warnings, model_path): - """Validate model metadata""" + """Validate model metadata.""" click.echo("Reading model %s" % model_path) model = cubes.read_model_metadata(model_path) @@ -185,7 +210,7 @@ def validate(show_defaults, show_warnings, model_path): scope = "model" else: if error.object: - scope = "%s '%s'" % (error.scope, error.object) + scope = f"{error.scope} '{error.object}'" else: scope = "unknown %s" % error.scope @@ -203,19 +228,18 @@ def validate(show_defaults, show_warnings, model_path): default_count += 1 if show: - print("%s in %s: %s" - % (error.severity.upper(), scope, error.message)) + print("{} in {}: {}".format(error.severity.upper(), scope, error.message)) if error_count == 0: if warning_count == 0: if default_count == 0: verdict = "model can be used" else: - verdict = "model can be used, " \ - "make sure that the defaults reflect reality" + verdict = ( + "model can be used, make sure that the defaults reflect reality" + ) else: - verdict = "not recommended to use the model, " \ - "some issues might emerge" + verdict = "not recommended to use the model, some issues might emerge" else: verdict = "model can not be used" @@ -228,18 +252,21 @@ def validate(show_defaults, show_warnings, model_path): if error_count > 0: exit(1) + ################################################################################ # Command: test + @cli.command() -@click.option('--aggregate', is_flag=True, default=False, - help="Test aggregate of whole cube") -@click.option('--exclude-store', '-E', 'exclude_stores', multiple=True) -@click.option('--store', 'include_stores', multiple=True) -@click.argument('config', default=DEFAULT_CONFIG) -@click.argument('cube', nargs=-1) +@click.option( + "--aggregate", is_flag=True, default=False, help="Test aggregate of whole cube" +) +@click.option("--exclude-store", "-E", "exclude_stores", multiple=True) +@click.option("--store", "include_stores", multiple=True) +@click.argument("config", default=DEFAULT_CONFIG) +@click.argument("cube", nargs=-1) def test(aggregate, exclude_stores, include_stores, config, cube): - """Test every cube in the model""" + """Test every cube in the model.""" workspace = cubes.Workspace(config) errors = [] @@ -257,10 +284,9 @@ def test(aggregate, exclude_stores, include_stores, config, cube): for name in cube_list: cube = workspace.cube(name) - click.echo("testing {}: ".format(name), nl=False) + click.echo(f"testing {name}: ", nl=False) - if cube.store_name in exclude \ - or (include and cube.store_name not in include): + if cube.store_name in exclude or (include and cube.store_name not in include): click.echo("pass") continue @@ -293,20 +319,24 @@ def test(aggregate, exclude_stores, include_stores, config, cube): else: etype = str(type(e)) - click.echo("%s: %s - %s" % (cube, etype, str(e))) + click.echo("{}: {} - {}".format(cube, etype, str(e))) else: click.echo("test passed") @model.command() -@click.option('--format', 'model_format', type=click.Choice(["json", "bundle"]), - default='json', - help='output model format') -@click.option('--force', is_flag=True, - default=False, - help='replace existing model bundle') -@click.argument('model_path', metavar='MODEL') -@click.argument('target', required=False) +@click.option( + "--format", + "model_format", + type=click.Choice(["json", "bundle"]), + default="json", + help="output model format", +) +@click.option( + "--force", is_flag=True, default=False, help="replace existing model bundle" +) +@click.argument("model_path", metavar="MODEL") +@click.argument("target", required=False) @click.pass_context def convert(ctx, model_format, force, model_path, target): """Convert model between model formats.""" @@ -321,38 +351,54 @@ def convert(ctx, model_format, force, model_path, target): elif model_format == "bundle": write_model_metadata_bundle(target, metadata, replace=force) + def read_config(cfg): """Read the configuration file.""" return read_slicer_config(cfg) + ################################################################################ # Group: sql + @cli.group() @click.pass_context -@click.option('--store', nargs=1, - help="Name of the store to use other than default. Must be SQL.") -@click.option('--config', nargs=1, default=DEFAULT_CONFIG, - help="Name of slicer.ini configuration file") +@click.option( + "--store", nargs=1, help="Name of the store to use other than default. Must be SQL." +) +@click.option( + "--config", + nargs=1, + default=DEFAULT_CONFIG, + help="Name of slicer.ini configuration file", +) def sql(ctx, store, config): - """SQL store commands""" + """SQL store commands.""" ctx.obj.workspace = cubes.Workspace(config) ctx.obj.store = ctx.obj.workspace.get_store(store) + ################################################################################ # Command: sql denormalize + @sql.command() -@click.option('--force', is_flag=True, default=False, - help='replace existing views') -@click.option('--materialize', '-m', is_flag=True, default=False, - help='create materialized view (table)') -@click.option('--index/--no-index', default=True, - help='create index for key attributes') -@click.option('--schema', '-s', - help='target view schema (overrides default fact schema') -@click.argument('cube', required=False) -@click.argument('target', required=False) +@click.option("--force", is_flag=True, default=False, help="replace existing views") +@click.option( + "--materialize", + "-m", + is_flag=True, + default=False, + help="create materialized view (table)", +) +@click.option( + "--index/--no-index", default=True, help="create index for key attributes" +) +@click.option( + "--schema", "-s", help="target view schema (overrides default fact schema" +) +@click.argument("cube", required=False) +@click.argument("target", required=False) @click.pass_context def denormalize(ctx, force, materialize, index, schema, cube, target): """Create denormalized view(s) from cube(s).""" @@ -374,15 +420,17 @@ def denormalize(ctx, force, materialize, index, schema, cube, target): cube = workspace.cube(cube_name) store = workspace.get_store(cube.store_name or "default") - print("denormalizing cube '%s' into '%s'" % (cube_name, - target)) + print(f"denormalizing cube '{cube_name}' into '{target}'") - store.create_denormalized_view(cube, target, - materialize=materialize, - replace=force, - create_index=index, - keys_only=False, - schema=schema) + store.create_denormalized_view( + cube, + target, + materialize=materialize, + replace=force, + create_index=index, + keys_only=False, + schema=schema, + ) # TODO: Nice to have it back @@ -405,62 +453,88 @@ def denormalize(ctx, force, materialize, index, schema, cube, target): ################################################################################ # Command: sql aggregate + @sql.command("aggregate") -@click.option('--force', is_flag=True, default=False, - help='replace existing views') -@click.option('--index/--no-index', default=True, - help='create index for key attributes') -@click.option('--schema', '-s', - help='target view schema (overrides default fact schema') -@click.option('--dimension', '-d', "dimensions", multiple=True, - help='dimension to be used for aggregation') -@click.argument('cube') -@click.argument('target', required=False) +@click.option("--force", is_flag=True, default=False, help="replace existing views") +@click.option( + "--index/--no-index", default=True, help="create index for key attributes" +) +@click.option( + "--schema", "-s", help="target view schema (overrides default fact schema" +) +@click.option( + "--dimension", + "-d", + "dimensions", + multiple=True, + help="dimension to be used for aggregation", +) +@click.argument("cube") +@click.argument("target", required=False) @click.pass_context def sql_aggregate(ctx, force, index, schema, cube, target, dimensions): """Create pre-aggregated table from cube(s).""" workspace = ctx.obj.workspace store = ctx.obj.store - print("denormalizing cube '%s' into '%s'" % (cube_name, - target)) + print(f"denormalizing cube '{cube}' into '{target}'") - store.create_cube_aggregate(cube, target, - replace=force, - create_index=index, - schema=schema, - dimensions=dimensions) + store.create_cube_aggregate( + cube, + target, + replace=force, + create_index=index, + schema=schema, + dimensions=dimensions, + ) ################################################################################ # Command: aggregate + @cli.command() -@click.option('--config', type=click.Path(exists=True), required=False, - default=DEFAULT_CONFIG) - -@click.option('--aggregate', '-a', 'aggregates', multiple=True, - help="List of aggregates to get") -@click.option('--cut', '-c', 'cuts', multiple=True, - help="Cell cut") -@click.option('--split', 'split_str', multiple=False, - help="Split cell") -@click.option('--drilldown', '-d', 'drilldown', multiple=True, - help="Drilldown dimensions") - -@click.option('--on-row', 'on_rows', multiple=True, - help="Attribute to put on row (default is all)") -@click.option('--on-column', 'on_columns', multiple=True, - help="Attribute to put on column (default is none)") - -@click.option('--format', "-f", "formatter_name", default="cross_table", - help="Output format") - -@click.argument('cube_name', metavar='CUBE') +@click.option( + "--config", type=click.Path(exists=True), required=False, default=DEFAULT_CONFIG +) +@click.option( + "--aggregate", "-a", "aggregates", multiple=True, help="List of aggregates to get" +) +@click.option("--cut", "-c", "cuts", multiple=True, help="Cell cut") +@click.option("--split", "split_str", multiple=False, help="Split cell") +@click.option( + "--drilldown", "-d", "drilldown", multiple=True, help="Drilldown dimensions" +) +@click.option( + "--on-row", + "on_rows", + multiple=True, + help="Attribute to put on row (default is all)", +) +@click.option( + "--on-column", + "on_columns", + multiple=True, + help="Attribute to put on column (default is none)", +) +@click.option( + "--format", "-f", "formatter_name", default="cross_table", help="Output format" +) +@click.argument("cube_name", metavar="CUBE") @click.pass_context -def aggregate(ctx, config, cube_name, aggregates, cuts, drilldown, formatter_name, - split_str, on_rows, on_columns): - """Aggregate a cube""" +def aggregate( + ctx, + config, + cube_name, + aggregates, + cuts, + drilldown, + formatter_name, + split_str, + on_rows, + on_columns, +): + """Aggregate a cube.""" config = read_config(config) workspace = Workspace(config) browser = workspace.browser(cube_name) @@ -481,22 +555,26 @@ def aggregate(ctx, config, cube_name, aggregates, cuts, drilldown, formatter_nam aggregates = [agg.name for agg in browser.cube.aggregates] # TODO: paging and ordering - result = browser.aggregate(cell, - aggregates=aggregates, - drilldown=drilldown, - split=split, - page=None, - page_size=None, - order=None) + result = browser.aggregate( + cell, + aggregates=aggregates, + drilldown=drilldown, + split=split, + page=None, + page_size=None, + order=None, + ) if formatter_name: formatter = ext.formatter(formatter_name) - output = formatter.format(browser.cube, - result, - onrows=on_rows, - oncolumns=on_columns, - aggregates=aggregates, - aggregates_on="columns") + output = formatter.format( + browser.cube, + result, + onrows=on_rows, + oncolumns=on_columns, + aggregates=aggregates, + aggregates_on="columns", + ) else: output = result.to_dict() @@ -506,22 +584,25 @@ def aggregate(ctx, config, cube_name, aggregates, cuts, drilldown, formatter_nam ################################################################################ # Command: members -@cli.command() -@click.option('--config', type=click.Path(exists=True), required=False, - default=DEFAULT_CONFIG) - -@click.option('--cut', '-c', 'cuts', multiple=True, - help="Cell cut") -@click.option('--format', "-f", "output_format", default="json", - type=click.Choice(["json", "csv", "json_lines", 'xlsx']), - help="Output format") - -@click.argument('cube_name', metavar='CUBE') -@click.argument('dim_name', metavar='DIMENSION') +@cli.command() +@click.option( + "--config", type=click.Path(exists=True), required=False, default=DEFAULT_CONFIG +) +@click.option("--cut", "-c", "cuts", multiple=True, help="Cell cut") +@click.option( + "--format", + "-f", + "output_format", + default="json", + type=click.Choice(["json", "csv", "json_lines", "xlsx"]), + help="Output format", +) +@click.argument("cube_name", metavar="CUBE") +@click.argument("dim_name", metavar="DIMENSION") @click.pass_context def members(ctx, config, cube_name, cuts, dim_name, output_format): - """Aggregate a cube""" + """Aggregate a cube.""" config = read_config(config) workspace = Workspace(config) browser = workspace.browser(cube_name) @@ -543,12 +624,9 @@ def members(ctx, config, cube_name, cuts, dim_name, output_format): depth = len(hierarchy) # TODO: pagination - values = browser.members(cell, - dimension, - depth=depth, - hierarchy=hierarchy, - page=None, - page_size=None) + values = browser.members( + cell, dimension, depth=depth, hierarchy=hierarchy, page=None, page_size=None + ) attributes = [] for level in hierarchy.levels_for_depth(depth): @@ -563,19 +641,13 @@ def members(ctx, config, cube_name, cuts, dim_name, output_format): elif output_format == "json_lines": result = JSONLinesGenerator(values) elif output_format == "csv": - result = csv_generator(values, - fields, - include_header=True, - header=labels) - elif output_format == 'xlsx': - result = xlsx_generator( - values, - fields, - include_header=True, - header=labels - ) + result = csv_generator(values, fields, include_header=True, header=labels) + elif output_format == "xlsx": + result = xlsx_generator(values, fields, include_header=True, header=labels) + else: + raise ValueError(f"Illegal output format: {output_format}") - out = click.get_text_stream('stdout') + out = click.get_text_stream("stdout") for row in result: out.write(row) @@ -595,12 +667,14 @@ def main(*args, **kwargs): if os.environ.get("CUBES_ERROR_DEBUG"): raise else: - click.echo("\n" \ - "Error: Internal error occured.\n" - "Reason: {}\n\n" \ - "Please report the error and information about what you " \ - "were doing to the Cubes development team.\n" - .format(e), err=True) + click.echo( + "\n" + "Error: Internal error occured.\n" + "Reason: {}\n\n" + "Please report the error and information about what you " + "were doing to the Cubes development team.\n".format(e), + err=True, + ) sys.exit(1) except (InternalError, UserError) as e: @@ -611,6 +685,5 @@ def main(*args, **kwargs): if os.environ.get("CUBES_ERROR_DEBUG"): raise else: - click.echo("\nError: {}".format(e), err=True) + click.echo(f"\nError: {e}", err=True) sys.exit(1) - diff --git a/cubes/sql/__init__.py b/cubes/sql/__init__.py index cc3568c9..be797c36 100644 --- a/cubes/sql/__init__.py +++ b/cubes/sql/__init__.py @@ -5,4 +5,3 @@ __all__ += browser.__all__ __all__ += store.__all__ - diff --git a/cubes/sql/browser.py b/cubes/sql/browser.py index d97d857a..5a788cb1 100644 --- a/cubes/sql/browser.py +++ b/cubes/sql/browser.py @@ -1,8 +1,10 @@ # -*- encoding=utf -*- -"""SQL Browser""" +"""SQL Browser.""" +import collections from typing import ( + TYPE_CHECKING, Any, Collection, Dict, @@ -11,72 +13,48 @@ List, Mapping, Optional, - TYPE_CHECKING, - Type, Tuple, + Type, Union, ) -import collections - -from . import sqlalchemy as sa -from ..types import _RecordType, ValueType - -from ..metadata.attributes import ( - Attribute, - AttributeBase, - Measure, - MeasureAggregate, - ) -from ..metadata.dimension import Hierarchy, HierarchyPath, Dimension, Level +from ..errors import ArgumentError, InternalError +from ..logging import get_logger +from ..metadata.attributes import Attribute, AttributeBase, Measure, MeasureAggregate from ..metadata.cube import Cube +from ..metadata.dimension import Dimension, Hierarchy, HierarchyPath, Level from ..metadata.physical import Join - -from ..types import _RecordType, ValueType - from ..query import available_calculators -from ..query.browser import ( - AggregationBrowser, - BrowserFeatures, - BrowserFeatureAction, - _OrderType, - # FIXME: We should not be getting this here - _OrderArgType, - ) -from ..query.result import AggregationResult, Facts -from ..query.drilldown import Drilldown +from ..query.browser import ( # FIXME: We should not be getting this here + AggregationBrowser, + BrowserFeatureAction, + BrowserFeatures, + _OrderArgType, + _OrderType, +) from ..query.cells import Cell, PointCut -from ..logging import get_logger -from ..errors import ArgumentError, InternalError - -from ..stores import Store +from ..query.drilldown import Drilldown +from ..query.result import AggregationResult, Facts from ..settings import Setting, SettingType - +from ..stores import Store +from ..types import ValueType, _RecordType +from . import sqlalchemy as sa from .functions import available_aggregate_functions -from .mapper import ( - Mapper, - NamingDict, - DenormalizedMapper, - StarSchemaMapper, - ) -from .query import StarSchema, QueryContext, FACT_KEY_LABEL -from .utils import paginate_query, order_query +from .mapper import DenormalizedMapper, Mapper, NamingDict, StarSchemaMapper +from .query import FACT_KEY_LABEL, QueryContext, StarSchema +from .utils import order_query, paginate_query if TYPE_CHECKING: from .store import SQLStore -from ..types import _RecordType - -__all__ = [ - "SQLBrowser", -] +__all__ = ["SQLBrowser"] class SQLBrowser(AggregationBrowser, name="sql"): """SnowflakeBrowser is a SQL-based AggregationBrowser implementation that - can aggregate star and snowflake schemas without need of having - explicit view or physical denormalized table. + can aggregate star and snowflake schemas without need of having explicit + view or physical denormalized table. Attributes: @@ -106,7 +84,6 @@ class SQLBrowser(AggregationBrowser, name="sql"): * only one locale can be used for browsing at a time * locale is implemented as denormalized: one column for each language - """ extension_desc = """ @@ -116,34 +93,27 @@ class SQLBrowser(AggregationBrowser, name="sql"): extension_settings = [ Setting( - name= "include_summary", - desc= "Include aggregation summary "\ - "(requires extra statement)", - type= SettingType.bool - ), - Setting( - name= "include_cell_count", - type= SettingType.bool - ), - Setting( - name= "use_denormalization", - type= SettingType.bool + name="include_summary", + desc="Include aggregation summary (requires extra statement)", + type=SettingType.bool, ), + Setting(name="include_cell_count", type=SettingType.bool), + Setting(name="use_denormalization", type=SettingType.bool), Setting( - name= "safe_labels", - desc= "Use internally SQL statement column labels " \ - "without special characters", - type= SettingType.bool + name="safe_labels", + desc="Use internally SQL statement column labels " + "without special characters", + type=SettingType.bool, ), Setting( - name= "is_denormalized", - desc= "The data is in a denormalzied table", - type= SettingType.bool + name="is_denormalized", + desc="The data is in a denormalzied table", + type=SettingType.bool, ), Setting( - name= "exclude_null_aggregates", - desc= "Exclude aggregates which value is NULL", - type= SettingType.bool + name="exclude_null_aggregates", + desc="Exclude aggregates which value is NULL", + type=SettingType.bool, ), Setting( name="naming", @@ -156,21 +126,23 @@ class SQLBrowser(AggregationBrowser, name="sql"): connectable: sa.Connectable star: StarSchema # FIXME: [Typing] see Cube.distilled_hierarchies - hierarchies: Dict[Tuple[str,Optional[str]],List[str]] + hierarchies: Dict[Tuple[str, Optional[str]], List[str]] include_summary: bool include_cell_count: bool safe_labels: bool exclude_null_aggregates: bool - def __init__(self, - cube: Cube, - store: "SQLStore", - locale: str=None, - debug: bool=False, - tables: Optional[Mapping[str, sa.FromClause]] = None, - naming: Optional[NamingDict]=None, - **kwargs: Any) -> None: + def __init__( + self, + cube: Cube, + store: "SQLStore", + locale: str = None, + debug: bool = False, + tables: Optional[Mapping[str, sa.FromClause]] = None, + naming: Optional[NamingDict] = None, + **kwargs: Any, + ) -> None: """Create a SQL Browser.""" super().__init__(cube, store=store, locale=locale or cube.locale) @@ -188,8 +160,7 @@ def __init__(self, else: self.connectable = store - metadata = kwargs.get("metadata", - sa.MetaData(bind=self.connectable)) + metadata = kwargs.get("metadata", sa.MetaData(bind=self.connectable)) # Options # ------- @@ -201,18 +172,18 @@ def __init__(self, # FIXME: [typing] Remove the `type: ignore` once Options are # implemented - self.include_summary = options.get("include_summary", True) # type: ignore - self.include_cell_count = options.get("include_cell_count", True) # type: ignore + self.include_summary = options.get("include_summary", True) # type: ignore + self.include_cell_count = options.get( + "include_cell_count", True + ) # type: ignore self.safe_labels = options.get("safe_labels", False) # type: ignore if self.safe_labels: - self.logger.debug("using safe labels for cube {}" - .format(cube.name)) + self.logger.debug(f"using safe labels for cube {cube.name}") # Whether to ignore cells where at least one aggregate is NULL # TODO: this is undocumented - self.exclude_null_agregates = options.get("exclude_null_agregates", - False) + self.exclude_null_agregates = options.get("exclude_null_agregates", False) # Mapper # ------ @@ -223,14 +194,18 @@ def __init__(self, mapper: Mapper if options.get("is_denormalized", options.get("use_denormalization")): - mapper = DenormalizedMapper(cube=self.cube, naming=naming or {}, - locale=locale) + mapper = DenormalizedMapper( + cube=self.cube, naming=naming or {}, locale=locale + ) else: - mapper = StarSchemaMapper(cube=self.cube, naming=naming or {}, - locale=locale) + mapper = StarSchemaMapper( + cube=self.cube, naming=naming or {}, locale=locale + ) - self.logger.debug("using mapper %s for cube '%s' (locale: %s)" % - (str(mapper.__class__.__name__), cube.name, locale)) + self.logger.debug( + "using mapper %s for cube '%s' (locale: %s)" + % (str(mapper.__class__.__name__), cube.name, locale) + ) # Prepare the mappings of base attributes # @@ -244,13 +219,15 @@ def __init__(self, joins = [] # FIXME: [2.0] Pass mapper instead of prepared mappings - self.star = StarSchema(self.cube.name, - metadata, - mappings=mappings, - fact_name=mapper.fact_name, - joins=joins, - schema=mapper.schema, - tables=tables) + self.star = StarSchema( + self.cube.name, + metadata, + mappings=mappings, + fact_name=mapper.fact_name, + joins=joins, + schema=mapper.schema, + tables=tables, + ) # Extract hierarchies # ------------------- @@ -258,9 +235,11 @@ def __init__(self, self.hierarchies = self.cube.distilled_hierarchies def features(self) -> BrowserFeatures: - """Return SQL features. Currently they are all the same for every - cube, however in the future they might depend on the SQL engine or - other factors.""" + """Return SQL features. + + Currently they are all the same for every cube, however in the + future they might depend on the SQL engine or other factors. + """ features = BrowserFeatures( actions=[ @@ -271,7 +250,7 @@ def features(self) -> BrowserFeatures: BrowserFeatureAction.members, ], aggregate_functions=available_aggregate_functions(), - post_aggregate_functions=available_calculators() + post_aggregate_functions=available_calculators(), ) return features @@ -282,22 +261,22 @@ def is_builtin_function(self, funcname: str) -> bool: return funcname in available_aggregate_functions() - def fact(self, - key_value: ValueType, - fields: Collection[AttributeBase]=None) \ - -> Optional[_RecordType]: + def fact( + self, key_value: ValueType, fields: Collection[AttributeBase] = None + ) -> Optional[_RecordType]: """Get a single fact with key `key_value` from cube. - Number of SQL queries: 1.""" + Number of SQL queries: 1. + """ attributes: Collection[AttributeBase] attributes = fields or self.cube.all_fact_attributes statement: sa.Select labels: List[str] - (statement, labels) = self.denormalized_statement(attributes=attributes, - cell=Cell(), - include_fact_key=True) + (statement, labels) = self.denormalized_statement( + attributes=attributes, cell=Cell(), include_fact_key=True + ) condition = self.star.fact_key_column == key_value statement = statement.where(condition) @@ -316,13 +295,15 @@ def fact(self, return record - def facts(self, - cell: Cell=None, - fields: Collection[AttributeBase]=None, - order: List[_OrderArgType]=None, - page: int=None, - page_size: int=None, - fact_list: List[ValueType]=None) -> Facts: + def facts( + self, + cell: Cell = None, + fields: Collection[AttributeBase] = None, + order: List[_OrderArgType] = None, + page: int = None, + page_size: int = None, + fact_list: List[ValueType] = None, + ) -> Facts: """Return all facts from `cell`, might be ordered and paginated. `fact_list` is a list of fact keys to be selected. Might be used to @@ -336,9 +317,9 @@ def facts(self, statement: sa.Select labels: List[str] - (statement, labels) = self.denormalized_statement(cell=cell, - attributes=attrs, - include_fact_key=True) + (statement, labels) = self.denormalized_statement( + cell=cell, attributes=attrs, include_fact_key=True + ) if fact_list is not None: in_condition = self.star.fact_key_column.in_(fact_list) @@ -347,26 +328,25 @@ def facts(self, statement = paginate_query(statement, page, page_size) # TODO: use natural order - statement = order_query(statement, - order, - natural_order={}, - labels=labels) + statement = order_query(statement, order, natural_order={}, labels=labels) cursor = self.execute(statement, "facts") return Facts(ResultIterator(cursor, labels), attributes=labels) - def test(self, aggregate: bool=False) -> None: - """Tests whether the statement can be constructed and executed. Does - not return anything, but raises an exception if there are issues with - the generated statements. By default it tests only denormalized - statement by fetching one row. If `aggregate` is `True` then test also - aggregation.""" + def test(self, aggregate: bool = False) -> None: + """Tests whether the statement can be constructed and executed. + + Does not return anything, but raises an exception if there are + issues with the generated statements. By default it tests only + denormalized statement by fetching one row. If `aggregate` is + `True` then test also aggregation. + """ statement: sa.Select (statement, _) = self.denormalized_statement( - attributes=self.cube.all_fact_attributes, - cell=Cell()) + attributes=self.cube.all_fact_attributes, cell=Cell() + ) statement = statement.limit(1) result = self.connectable.execute(statement) @@ -376,24 +356,24 @@ def test(self, aggregate: bool=False) -> None: aggs = self.cube.all_aggregate_attributes dd = Drilldown(cube=self.cube) - (statement, labels) = self.aggregation_statement(aggregates=aggs, - cell=Cell(), - drilldown=dd, - for_summary=True) + (statement, labels) = self.aggregation_statement( + aggregates=aggs, cell=Cell(), drilldown=dd, for_summary=True + ) result = self.connectable.execute(statement) result.close() - def provide_members(self, - cell: Cell, - dimension: Dimension, - depth: int=None, - hierarchy: Hierarchy=None, - levels: Collection[Level]=None, - attributes: Collection[AttributeBase]=None, - page: int=None, - page_size: int=None, - order: Optional[Collection[_OrderType]]=None) \ - -> Iterable[_RecordType]: + def provide_members( + self, + cell: Cell, + dimension: Dimension, + depth: int = None, + hierarchy: Hierarchy = None, + levels: Collection[Level] = None, + attributes: Collection[AttributeBase] = None, + page: int = None, + page_size: int = None, + order: Optional[Collection[_OrderType]] = None, + ) -> Iterable[_RecordType]: """Return values for `dimension` with level depth `depth`. If `depth` is ``None``, all levels are returned. @@ -415,21 +395,18 @@ def provide_members(self, # Order and paginate # statement = statement.group_by(*statement.columns) - statement = order_query(statement, - order, - labels=labels) + statement = order_query(statement, order, labels=labels) statement = paginate_query(statement, page, page_size) result = self.execute(statement, "members") return ResultIterator(result, labels) - def path_details(self, - dimension: Dimension, - path: HierarchyPath, - hierarchy: Hierarchy=None) -> Optional[_RecordType]: - """Returns details for `path` in `dimension`. Can be used for - multi-dimensional "breadcrumbs" in a used interface. + def path_details( + self, dimension: Dimension, path: HierarchyPath, hierarchy: Hierarchy = None + ) -> Optional[_RecordType]: + """Returns details for `path` in `dimension`. Can be used for multi- + dimensional "breadcrumbs" in a used interface. Number of SQL queries: 1. """ @@ -443,15 +420,15 @@ def path_details(self, attributes: List[AttributeBase] attributes = [] - for level in hierarchy.levels[0:len(path)]: + for level in hierarchy.levels[0 : len(path)]: attributes += level.attributes statement: sa.Select labels: List[str] - (statement, labels) = self.denormalized_statement(attributes, - cell, - include_fact_key=True) + (statement, labels) = self.denormalized_statement( + attributes, cell, include_fact_key=True + ) statement = statement.limit(1) cursor: sa.ResultProxy cursor = self.execute(statement, "path details") @@ -466,21 +443,24 @@ def path_details(self, return member - def execute(self, statement: sa.Select, label: str=None) \ - -> sa.ResultProxy: - """Execute the `statement`, optionally log it. Returns the result - cursor.""" + def execute(self, statement: sa.Select, label: str = None) -> sa.ResultProxy: + """Execute the `statement`, optionally log it. + + Returns the result cursor. + """ self._log_statement(statement, label) return self.connectable.execute(statement) - def provide_aggregate(self, - cell: Cell, - aggregates: Collection[MeasureAggregate], - drilldown: Drilldown, - split: Cell=None, - order: Collection[_OrderType]=None, - page: int=None, - page_size: int=None) -> AggregationResult: + def provide_aggregate( + self, + cell: Cell, + aggregates: Collection[MeasureAggregate], + drilldown: Drilldown, + split: Cell = None, + order: Collection[_OrderType] = None, + page: int = None, + page_size: int = None, + ) -> AggregationResult: """Return aggregated result. Arguments: @@ -515,7 +495,6 @@ def provide_aggregate(self, Notes: * measures can be only in the fact table - """ cells: Optional[ResultIterator] = None @@ -528,11 +507,9 @@ def provide_aggregate(self, # ------- if self.include_summary or not (drilldown or split): - (statement, labels) = \ - self.aggregation_statement(cell, - aggregates=aggregates, - drilldown=drilldown, - for_summary=True) + (statement, labels) = self.aggregation_statement( + cell, aggregates=aggregates, drilldown=drilldown, for_summary=True + ) cursor = self.execute(statement, "aggregation summary") row = cursor.first() @@ -557,10 +534,9 @@ def provide_aggregate(self, self.logger.debug("preparing drilldown statement") - (statement, labels) = self.aggregation_statement(cell, - aggregates=aggregates, - drilldown=drilldown, - split=split) + (statement, labels) = self.aggregation_statement( + cell, aggregates=aggregates, drilldown=drilldown, split=split + ) # Get the total cell count before the pagination # if self.include_cell_count: @@ -570,10 +546,7 @@ def provide_aggregate(self, # Order and paginate # - statement = order_query(statement, - order, - natural_order, - labels=labels) + statement = order_query(statement, order, natural_order, labels=labels) statement = paginate_query(statement, page, page_size) cursor = self.execute(statement, "aggregation drilldown") @@ -581,53 +554,64 @@ def provide_aggregate(self, cells = ResultIterator(cursor, labels) labels = labels - # If exclude_null_aggregates is True then don't include cells where # at least one of the bult-in aggregates is NULL if cells is not None and self.exclude_null_agregates: - native_aggs = [agg.ref for agg in aggregates - if agg.function and self.is_builtin_function(agg.function)] + native_aggs = [ + agg.ref + for agg in aggregates + if agg.function and self.is_builtin_function(agg.function) + ] cells.exclude_if_null = native_aggs result = AggregationResult( - cube=self.cube, - cell=cell, - cells=cells or [], - labels=labels, - levels=levels, - aggregates=aggregates, - drilldown=drilldown, - summary=summary, - total_cell_count=total_cell_count, - has_split=split is not None) + cube=self.cube, + cell=cell, + cells=cells or [], + labels=labels, + levels=levels, + aggregates=aggregates, + drilldown=drilldown, + summary=summary, + total_cell_count=total_cell_count, + has_split=split is not None, + ) return result - def _create_context(self, attributes: Collection[AttributeBase]) \ - -> QueryContext: - """Create a query context for `attributes`. The `attributes` should - contain all attributes that will be somehow involved in the query.""" + def _create_context(self, attributes: Collection[AttributeBase]) -> QueryContext: + """Create a query context for `attributes`. + + The `attributes` should contain all attributes that will be + somehow involved in the query. + """ collected = self.cube.collect_dependencies(attributes) - return QueryContext(self.star, - attributes=collected, - hierarchies=self.hierarchies, - safe_labels=self.safe_labels) - - def denormalized_statement(self, - attributes: Collection[AttributeBase], - cell: Cell, - include_fact_key: bool=False) -> Tuple[sa.Select, List[str]]: + return QueryContext( + self.star, + attributes=collected, + hierarchies=self.hierarchies, + safe_labels=self.safe_labels, + ) + + def denormalized_statement( + self, + attributes: Collection[AttributeBase], + cell: Cell, + include_fact_key: bool = False, + ) -> Tuple[sa.Select, List[str]]: """Returns a tuple (`statement`, `labels`) representing denormalized - star statement restricted by `cell`. If `attributes` is not specified, - then all cube's attributes are selected. The returned `labels` are - correct labels to be applied to the iterated result in case of - `safe_labels`.""" + star statement restricted by `cell`. + + If `attributes` is not specified, then all cube's attributes are + selected. The returned `labels` are correct labels to be applied + to the iterated result in case of `safe_labels`. + """ selection: List[sa.ColumnElement] cell_keys: Collection[AttributeBase] - cell_keys = cell.collect_key_attributes(self.cube) + cell_keys = cell.collect_key_attributes(self.cube) refs: List[str] refs = [attr.ref for attr in attributes] @@ -646,9 +630,9 @@ def denormalized_statement(self, cell_condition = context.condition_for_cell(cell) - statement = sa.select(selection, - from_obj=context.star, - whereclause=cell_condition) + statement = sa.select( + selection, from_obj=context.star, whereclause=cell_condition + ) return (statement, context.get_labels(statement.columns)) @@ -657,12 +641,14 @@ def denormalized_statement(self, # # This is the reason of our whole existence. # - def aggregation_statement(self, - cell: Cell, - aggregates: Collection[AttributeBase], - drilldown:Drilldown=None, - split:Cell=None, - for_summary:bool=False) -> Tuple[sa.Select, List[str]]: + def aggregation_statement( + self, + cell: Cell, + aggregates: Collection[AttributeBase], + drilldown: Drilldown = None, + split: Cell = None, + for_summary: bool = False, + ) -> Tuple[sa.Select, List[str]]: """Builds a statement to aggregate the `cell` and reutrns a tuple (`statement`, `labels`). `statement` is a SQLAlchemy statement object, `labels` is a list of attribute names selected in the statement. The @@ -687,16 +673,19 @@ def aggregation_statement(self, raise ArgumentError("List of aggregates should not be empty") if not isinstance(drilldown, Drilldown): - raise InternalError("Drilldown should be a Drilldown object. " - "Is '{}'".format(type(drilldown))) + raise InternalError( + "Drilldown should be a Drilldown object. " + "Is '{}'".format(type(drilldown)) + ) # 1. Gather attributes # - self.logger.debug("prepare aggregation statement. cell: '%s' " - "drilldown: '%s' for summary: %s" % - (",".join([str(cut) for cut in cell.cuts]), - drilldown, for_summary)) + self.logger.debug( + "prepare aggregation statement. cell: '%s' " + "drilldown: '%s' for summary: %s" + % (",".join([str(cut) for cut in cell.cuts]), drilldown, for_summary) + ) # TODO: it is verylikely that the _create_context is not getting all # attributes, for example those that aggregate depends on @@ -719,8 +708,7 @@ def aggregation_statement(self, # SELECT – Prepare the master selection # * master drilldown items - selection = context.get_columns([attr.ref for attr in - drilldown.all_attributes]) + selection = context.get_columns([attr.ref for attr in drilldown.all_attributes]) # SPLIT # ----- @@ -744,26 +732,25 @@ def aggregation_statement(self, else: selection += aggregate_cols - statement = sa.select(selection, - from_obj=context.star, - use_labels=True, - whereclause=condition, - group_by=group_by) + statement = sa.select( + selection, + from_obj=context.star, + use_labels=True, + whereclause=condition, + group_by=group_by, + ) return (statement, context.get_labels(statement.columns)) - def _log_statement(self, - statement: sa.Select, - label: str=None) -> None: + def _log_statement(self, statement: sa.Select, label: str = None) -> None: label = "SQL(%s):" % label if label else "SQL:" - self.logger.debug("%s\n%s\n" % (label, str(statement))) + self.logger.debug("{}\n{}\n".format(label, str(statement))) # TODO: Rename to batch result iterator class ResultIterator(Iterable[_RecordType]): - """ - Iterator that returns SQLAlchemy ResultProxy rows as dictionaries - """ + """Iterator that returns SQLAlchemy ResultProxy rows as dictionaries.""" + # FIXME: [typing] this is SA row proxy result: sa.ResultProxy # FIXME: [typing] this should be typing.Deque, but that does not seem to @@ -772,10 +759,12 @@ class ResultIterator(Iterable[_RecordType]): labels: List[str] exclude_if_null: List[str] - def __init__(self, - result: sa.ResultProxy, - labels: List[str], - exclude_if_null: List[str]=None) -> None: + def __init__( + self, + result: sa.ResultProxy, + labels: List[str], + exclude_if_null: List[str] = None, + ) -> None: self.result = result self.batch = collections.deque() self.labels = labels diff --git a/cubes/sql/expressions.py b/cubes/sql/expressions.py index 00f88eaa..1f97823a 100644 --- a/cubes/sql/expressions.py +++ b/cubes/sql/expressions.py @@ -1,69 +1,81 @@ # -*- coding=utf -*- -"""SQL Expression compiler""" +"""SQL Expression compiler.""" # The compiler is meant to be maintained in a similar way as the star schema # generator is – is to remain as much Cubes-independent as possible, just be a # low level module somewhere between SQLAlchemy and Cubes. -import sqlalchemy.sql as sql +from typing import Dict, List, Optional, Union +import sqlalchemy.sql as sql from expressions import Compiler -from .functions import get_aggregate_function +from expressions.compiler import Variable +from sqlalchemy.sql.elements import BinaryExpression, BindParameter +from sqlalchemy.sql.functions import _FunctionGenerator, min +from sqlalchemy.sql.schema import Column from ..errors import ExpressionError +from .functions import get_aggregate_function - -__all__ = [ - "SQLExpressionContext", - "compile_attributes", - "SQLExpressionCompiler", -] +__all__ = ["SQLExpressionContext", "compile_attributes", "SQLExpressionCompiler"] SQL_FUNCTIONS = [ # String - "lower", "upper", "left", "right", "substr", - "lpad", "rpad", "replace", - "concat", "repeat", "position", - + "lower", + "upper", + "left", + "right", + "substr", + "lpad", + "rpad", + "replace", + "concat", + "repeat", + "position", # Math - "round", "trunc", "floor", "ceil", - "mod", "remainder", + "round", + "trunc", + "floor", + "ceil", + "mod", + "remainder", "sign", - - "min", "max", - - "pow", "exp", "log", "log10", + "min", + "max", + "pow", + "exp", + "log", + "log10", "sqrt", - "cos", "sin", "tan", - + "cos", + "sin", + "tan", # Date/time "extract", - # Conditionals - "coalesce", "nullif", "case", - + "coalesce", + "nullif", + "case", ] # TODO: Add: lstrip, rstrip, strip -> trim # TODO: Add: like -SQL_AGGREGATE_FUNCTIONS = [ - "sum", "min", "max", "avg", "stddev", "variance", "count" -] +SQL_AGGREGATE_FUNCTIONS = ["sum", "min", "max", "avg", "stddev", "variance", "count"] -SQL_ALL_FUNCTIONS = SQL_FUNCTIONS + SQL_AGGREGATE_FUNCTIONS; +SQL_ALL_FUNCTIONS = SQL_FUNCTIONS + SQL_AGGREGATE_FUNCTIONS -SQL_VARIABLES = [ - "current_date", "current_time", "local_date", "local_time" -] +SQL_VARIABLES = ["current_date", "current_time", "local_date", "local_time"] -class SQLExpressionContext(object): +class SQLExpressionContext: """Context used for building a list of all columns to be used within a single SQL query.""" - def __init__(self, columns=None, parameters=None, label=None): + def __init__( + self, columns: Optional[Dict[str, Column]] = None, parameters=None, label=None + ) -> None: """Creates a SQL expression compiler context. * `bases` is a dictionary of base columns or column expressions @@ -85,7 +97,7 @@ def __init__(self, columns=None, parameters=None, label=None): def columns(self): return self._columns - def resolve(self, variable): + def resolve(self, variable: str) -> Union[Column, BinaryExpression]: """Resolve `variable` – return either a column, variable from a dictionary or a SQL constant (in that order).""" @@ -99,30 +111,33 @@ def resolve(self, variable): result = getattr(sql.func, variable)() else: - label = " in {}".format(self.label) if self.label else "" - raise ExpressionError("Unknown attribute, variable or parameter " - "'{}'{}" .format(variable, label)) + label = f" in {self.label}" if self.label else "" + raise ExpressionError( + "Unknown attribute, variable or parameter " + "'{}'{}".format(variable, label) + ) return result def __getitem__(self, item): return self.resolve(item) - def function(self, name): - """Return a SQL function""" + def function(self, name: str) -> _FunctionGenerator: + """Return a SQL function.""" if name not in SQL_ALL_FUNCTIONS: - raise ExpressionError("Unknown function '{}'" - .format(name)) + raise ExpressionError(f"Unknown function '{name}'") return getattr(sql.func, name) - def add_column(self, name, column): + def add_column(self, name: str, column: BinaryExpression) -> None: self._columns[name] = column -def compile_attributes(bases, dependants, parameters, coalesce=None, - label=None): - """Compile dependant attributes in `dependants`. `bases` is a dictionary - of base attributes and their column expressions.""" +def compile_attributes(bases, dependants, parameters, coalesce=None, label=None): + """Compile dependant attributes in `dependants`. + + `bases` is a dictionary of base attributes and their column + expressions. + """ context = SQLExpressionContext(bases, parameters, label=label) compiler = SQLExpressionCompiler() @@ -150,15 +165,21 @@ def compile_attributes(bases, dependants, parameters, coalesce=None, class SQLExpressionCompiler(Compiler): - def __init__(self, context=None): - super(SQLExpressionCompiler, self).__init__(context) - - def compile_literal(self, context, literal): - return sql.expression.bindparam("literal", - literal, - unique=True) - - def compile_binary(self, context, operator, op1, op2): + def __init__(self, context=None) -> None: + super().__init__(context) + + def compile_literal( + self, context: SQLExpressionContext, literal: Union[str, int] + ) -> BindParameter: + return sql.expression.bindparam("literal", literal, unique=True) + + def compile_binary( + self, + context: SQLExpressionContext, + operator: str, + op1: Union[Column, BinaryExpression, BindParameter], + op2: Union[BindParameter, Column], + ) -> BinaryExpression: if operator == "*": result = op1 * op2 elif operator == "/": @@ -194,18 +215,20 @@ def compile_binary(self, context, operator, op1, op2): return result - def compile_variable(self, context, variable): + def compile_variable( + self, context: SQLExpressionContext, variable: Variable + ) -> Union[Column, BinaryExpression]: name = variable.name result = context.resolve(name) return result def compile_unary(self, context, operator, operand): if operator == "-": - result = (- operand) + result = -operand elif operator == "+": - result = (+ operand) + result = +operand elif operator == "~": - result = (~ operand) + result = ~operand elif operator == "not": result = sql.expression.not_(operand) else: @@ -213,7 +236,11 @@ def compile_unary(self, context, operator, operand): return result - def compile_function(self, context, func, args): + def compile_function( + self, + context: SQLExpressionContext, + func: Variable, + args: List[Union[Column, BindParameter]], + ) -> min: func = context.function(func.name) return func(*args) - diff --git a/cubes/sql/functions.py b/cubes/sql/functions.py index e8549b17..f037e9ac 100644 --- a/cubes/sql/functions.py +++ b/cubes/sql/functions.py @@ -4,31 +4,17 @@ # called `formulas`) once implemented. There is no need for complexity of # this type. -from typing import ( - Any, - Callable, - Collection, - Dict, - List, - Optional, - Sequence, - ) - -from . import sqlalchemy as sa -from ..types import ValueType +from typing import Any, Callable, Collection, Dict, List, Optional, Sequence from ..errors import ModelError - from ..metadata.attributes import MeasureAggregate +from ..types import ValueType +from . import sqlalchemy as sa - -__all__ = ( - "get_aggregate_function", - "available_aggregate_functions" -) +__all__ = ("get_aggregate_function", "available_aggregate_functions") -class AggregateFunction(object): +class AggregateFunction: requires_measure = True # if `True` then on `coalesce` the values are coalesced to 0 before the @@ -39,34 +25,45 @@ class AggregateFunction(object): name: str function: Callable[[sa.ColumnElement], sa.ColumnElement] - def __init__(self, name_: str, - function_: Optional[Callable]=None) -> None: + def __init__(self, name_: str, function_: Optional[Callable] = None) -> None: self.name = name_ self.function = function_ # type: ignore - def __call__(self, aggregate: MeasureAggregate, context: Optional[Any], - coalesce: bool=False) -> sa.ColumnElement: + def __call__( + self, + aggregate: MeasureAggregate, + context: Optional[Any], + coalesce: bool = False, + ) -> sa.ColumnElement: """Applied the function on the aggregate and returns labelled - expression. SQL expression label is the aggregate's name. This method - calls `apply()` method which can be overriden by subclasses. + expression. + + SQL expression label is the aggregate's name. This method calls + `apply()` method which can be overriden by subclasses. """ expression = self.apply(aggregate, context, coalesce) expression = expression.label(aggregate.name) return expression - def coalesce_value(self, aggregate: MeasureAggregate, - value: sa.ColumnElement) -> sa.ColumnElement: - """Coalesce the value before aggregation of `aggregate`. `value` is a - SQLAlchemy expression. Default implementation does nothing, just - returns the `value`.""" + def coalesce_value( + self, aggregate: MeasureAggregate, value: sa.ColumnElement + ) -> sa.ColumnElement: + """Coalesce the value before aggregation of `aggregate`. + + `value` is a SQLAlchemy expression. Default implementation does + nothing, just returns the `value`. + """ return value - def coalesce_aggregate(self, aggregate: MeasureAggregate, - value: sa.ColumnElement) -> sa.ColumnElement: - """Coalesce the aggregated value of `aggregate`. `value` is a - SQLAlchemy expression. Default implementation does nothing, just - returns the `value`.""" + def coalesce_aggregate( + self, aggregate: MeasureAggregate, value: sa.ColumnElement + ) -> sa.ColumnElement: + """Coalesce the aggregated value of `aggregate`. + + `value` is a SQLAlchemy expression. Default implementation does + nothing, just returns the `value`. + """ return value # FIXME: [2.0] Investigate necessity of this function and impact of tis @@ -81,20 +78,26 @@ def required_measures(self, aggregate: MeasureAggregate) -> Collection[str]: return [] # TODO: use dict of name:measure from required_measures instead of context - def apply(self, aggregate: MeasureAggregate, context: Optional[Any]=None, - coalesce:bool=False) -> sa.ColumnElement: + def apply( + self, + aggregate: MeasureAggregate, + context: Optional[Any] = None, + coalesce: bool = False, + ) -> sa.ColumnElement: """Apply the function on the aggregate. Subclasses might override this method and use other `aggregates` and browser context. If `missing_value` is not `None`, then the aggregate's source value should be wrapped in ``COALESCE(column, missing_value)``. - Returns a SQLAlchemy expression.""" + Returns a SQLAlchemy expression. + """ if not aggregate.measure: - raise ModelError("No measure specified for aggregate %s, " - "required for aggregate function %s" - % (str(aggregate), self.name)) + raise ModelError( + "No measure specified for aggregate %s, " + "required for aggregate function %s" % (str(aggregate), self.name) + ) column = context[aggregate.measure] @@ -112,43 +115,64 @@ def apply(self, aggregate: MeasureAggregate, context: Optional[Any]=None, def __str__(self) -> str: return self.name + class ValueCoalescingFunction(AggregateFunction): - def coalesce_value(self, aggregate: MeasureAggregate, - value: sa.ColumnElement) -> sa.ColumnElement: - """Coalesce the value before aggregation of `aggregate`. `value` is a - SQLAlchemy expression. Default implementation coalesces to zero 0.""" + def coalesce_value( + self, aggregate: MeasureAggregate, value: sa.ColumnElement + ) -> sa.ColumnElement: + """Coalesce the value before aggregation of `aggregate`. + + `value` is a SQLAlchemy expression. Default implementation + coalesces to zero 0. + """ # TODO: use measure's missing value (we need to get the measure object # somehow) return sa.coalesce(value, 0) class SummaryCoalescingFunction(AggregateFunction): - def coalesce_aggregate(self, aggregate: MeasureAggregate, - value: sa.ColumnElement) -> sa.ColumnElement: - """Coalesce the aggregated value of `aggregate`. `value` is a - SQLAlchemy expression. Default implementation does nothing.""" + def coalesce_aggregate( + self, aggregate: MeasureAggregate, value: sa.ColumnElement + ) -> sa.ColumnElement: + """Coalesce the aggregated value of `aggregate`. + + `value` is a SQLAlchemy expression. Default implementation does + nothing. + """ # TODO: use aggregates's missing value return sa.coalesce(value, 0) class GenerativeFunction(AggregateFunction): - def __init__(self, name: str, - function: Callable[[], sa.ColumnElement]=None) -> None: + def __init__( + self, name: str, function: Callable[[], sa.ColumnElement] = None + ) -> None: """Creates a function that generates a value without using any of the measures.""" - super(GenerativeFunction, self).__init__(name, function) - - def apply(self, aggregate: MeasureAggregate, context: Optional[Any]=None, - coalesce: bool=False) -> sa.ColumnElement: + super().__init__(name, function) + + def apply( + self, + aggregate: MeasureAggregate, + context: Optional[Any] = None, + coalesce: bool = False, + ) -> sa.ColumnElement: return self.function() # type: ignore class FactCountFunction(AggregateFunction): - """Creates a function that provides fact (record) counts. """ - def apply(self, aggregate: MeasureAggregate, - context: Optional[Any]=None, coalesce: bool=False) \ - -> sa.ColumnElement: - """Count only existing facts. Assumption: every facts has an ID""" + """Creates a function that provides fact (record) counts.""" + + def apply( + self, + aggregate: MeasureAggregate, + context: Optional[Any] = None, + coalesce: bool = False, + ) -> sa.ColumnElement: + """Count only existing facts. + + Assumption: every facts has an ID + """ if coalesce: # FIXME: pass the fact column somehow more nicely, maybe in a map: @@ -163,7 +187,7 @@ class FactCountDistinctFunction(AggregateFunction): def __init__(self, name: str) -> None: """Creates a function that provides distinct fact (record) counts.""" function = lambda x: sa.count(sa.distinct(x)) - super(FactCountDistinctFunction, self).__init__(name, function) + super().__init__(name, function) class avg(sa.ReturnTypeFromArgs): @@ -189,7 +213,7 @@ class variance(sa.ReturnTypeFromArgs): ValueCoalescingFunction("max", sa.max), ValueCoalescingFunction("avg", avg), ValueCoalescingFunction("stddev", stddev), - ValueCoalescingFunction("variance", variance) + ValueCoalescingFunction("variance", variance), ] _function_dict: Dict[str, AggregateFunction] @@ -203,9 +227,12 @@ def _create_function_dict() -> None: def get_aggregate_function(name: str) -> AggregateFunction: - """Returns an aggregate function `name`. The returned function takes two + """Returns an aggregate function `name`. + + The returned function takes two arguments: `aggregate` and `context`. When called returns a labelled - SQL expression.""" + SQL expression. + """ _create_function_dict() return _function_dict[name] @@ -215,4 +242,3 @@ def available_aggregate_functions() -> Collection[str]: """Returns a list of available aggregate function names.""" _create_function_dict() return _function_dict.keys() - diff --git a/cubes/sql/logging.py b/cubes/sql/logging.py index 42daf2ae..abc8804d 100644 --- a/cubes/sql/logging.py +++ b/cubes/sql/logging.py @@ -1,13 +1,24 @@ # -*- coding=utf -*- -from ..server.logging import RequestLogHandler, REQUEST_LOG_ITEMS -from sqlalchemy import create_engine, Table, MetaData, Column -from sqlalchemy import Integer, Sequence, DateTime, String, Float +import logging + +from sqlalchemy import ( + Column, + DateTime, + Float, + Integer, + MetaData, + Sequence, + String, + Table, + create_engine, +) from sqlalchemy.exc import NoSuchTableError + from ..query.drilldown import Drilldown +from ..server.logging import REQUEST_LOG_ITEMS, RequestLogHandler from .store import create_sqlalchemy_engine -import logging class SQLRequestLogHandler(RequestLogHandler, name="sql"): def __init__(self, url=None, table=None, dimensions_table=None, **options): @@ -17,29 +28,28 @@ def __init__(self, url=None, table=None, dimensions_table=None, **options): metadata = MetaData(bind=self.engine) - logging.getLogger('sqlalchemy.engine').setLevel("DEBUG") - logging.getLogger('sqlalchemy.pool').setLevel("DEBUG") + logging.getLogger("sqlalchemy.engine").setLevel("DEBUG") + logging.getLogger("sqlalchemy.pool").setLevel("DEBUG") try: self.table = Table(table, metadata, autoload=True) except NoSuchTableError: columns = [ - Column('id', Integer, Sequence(table+"_seq"), - primary_key=True), - Column('timestamp', DateTime), - Column('method', String(50)), - Column('cube', String(250)), - Column('cell', String(2000)), - Column('identity', String(250)), - Column('elapsed_time', Float), - Column('attributes', String(2000)), - Column('split', String(2000)), - Column('drilldown', String(2000)), - Column('page', Integer), - Column('page_size', Integer), - Column('format', String(50)), - Column('header', String(50)), + Column("id", Integer, Sequence(table + "_seq"), primary_key=True), + Column("timestamp", DateTime), + Column("method", String(50)), + Column("cube", String(250)), + Column("cell", String(2000)), + Column("identity", String(250)), + Column("elapsed_time", Float), + Column("attributes", String(2000)), + Column("split", String(2000)), + Column("drilldown", String(2000)), + Column("page", Integer), + Column("page_size", Integer), + Column("format", String(50)), + Column("header", String(50)), ] self.table = Table(table, metadata, extend_existing=True, *columns) @@ -57,17 +67,18 @@ def __init__(self, url=None, table=None, dimensions_table=None, **options): except NoSuchTableError: columns = [ - Column('id', Integer, Sequence(table+"_seq"), - primary_key=True), - Column('query_id', Integer), - Column('dimension', String(250)), - Column('hierarchy', String(250)), - Column('level', String(250)), - Column('used_as', String(50)), - Column('value', String(2000)), + Column("id", Integer, Sequence(table + "_seq"), primary_key=True), + Column("query_id", Integer), + Column("dimension", String(250)), + Column("hierarchy", String(250)), + Column("level", String(250)), + Column("used_as", String(50)), + Column("value", String(2000)), ] - self.dims_table = Table(dimensions_table, metadata, extend_existing=True, *columns) + self.dims_table = Table( + dimensions_table, metadata, extend_existing=True, *columns + ) self.dims_table.create() else: self.dims_table = None @@ -100,7 +111,7 @@ def write_record(self, cube, cell, record): dim = cube.dimension(cut.dimension) depth = cut.level_depth() if depth: - level = dim.hierarchy(cut.hierarchy)[depth-1] + level = dim.hierarchy(cut.hierarchy)[depth - 1] level_name = str(level) else: level_name = None @@ -111,7 +122,7 @@ def write_record(self, cube, cell, record): "hierarchy": str(cut.hierarchy), "level": str(level_name), "used_as": "cell", - "value": str(cut) + "value": str(cut), } uses.append(use) @@ -129,15 +140,13 @@ def write_record(self, cube, cell, record): "hierarchy": str(hier), "level": str(level), "used_as": "drilldown", - "value": None + "value": None, } uses.append(use) - if uses: insert = self.dims_table.insert().values(uses) connection.execute(insert) trans.commit() connection.close() - diff --git a/cubes/sql/mapper.py b/cubes/sql/mapper.py index 1c1dfa5a..2709c304 100644 --- a/cubes/sql/mapper.py +++ b/cubes/sql/mapper.py @@ -1,35 +1,26 @@ # -*- encoding: utf-8 -*- -"""Logical to Physical Mappers""" +"""Logical to Physical Mappers.""" import re - from typing import ( - Collection, - Dict, - List, - Mapping, - Optional, - Pattern, - Tuple, - Type, - TypeVar, - Union, - ) - -from collections import defaultdict - -from ..types import JSONType + Collection, + Dict, + List, + Mapping, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, +) from ..errors import ModelError -from ..datastructures import AttributeDict - -from ..metadata.physical import ColumnReference - -from ..metadata.cube import Cube from ..metadata.attributes import AttributeBase +from ..metadata.cube import Cube from ..metadata.dimension import Dimension - -from ..settings import Setting, SettingType +from ..metadata.physical import ColumnReference +from ..types import JSONType # Note about the future of this module: # @@ -41,18 +32,16 @@ __all__ = ( "distill_naming", "DEFAULT_KEY_FIELD", - "Mapper", "StarSchemaMapper", "DenormalizedMapper", - "map_base_attributes", ) DEFAULT_KEY_FIELD = "id" -DEFAULT_FACT_KEY = 'id' -DEFAULT_DIMENSION_KEY = 'id' +DEFAULT_FACT_KEY = "id" +DEFAULT_DIMENSION_KEY = "id" # Note: Only keys in this dictionary are allowed in the `naming` dictionary. # All other keys are ignored. @@ -64,17 +53,13 @@ "dimension_suffix": None, "dimension_key_prefix": None, "dimension_key_suffix": None, - "denormalized_prefix": None, "denormalized_suffix": None, - "aggregated_prefix": None, "aggregated_suffix": None, - "fact_key": DEFAULT_FACT_KEY, "dimension_key": DEFAULT_DIMENSION_KEY, "explicit_dimension_primary": False, - "schema": None, "fact_schema": None, "dimension_schema": None, @@ -86,22 +71,23 @@ # settings. NamingDict = Dict[str, Union[str, bool, None]] + # TODO: [typing][2.0] analyse whether this is still needed, looks lie Store is # using it -def distill_naming(dictionary: Dict[str,str]) -> NamingDict: +def distill_naming(dictionary: Dict[str, str]) -> NamingDict: """Distill only keys and values related to the naming conventions.""" - d = {key: value for key, value in dictionary.items() - if key in NAMING_DEFAULTS} + d = {key: value for key, value in dictionary.items() if key in NAMING_DEFAULTS} return d -def _match_names(pattern: Pattern, names: Collection[str]) \ - -> Collection[Tuple[str,str]]: +def _match_names( + pattern: Pattern, names: Collection[str] +) -> Collection[Tuple[str, str]]: """Match names to patterns and return a tuple of matching name with extracted value (stripped of suffix/prefix).""" - result: List[Tuple[str,str]] = [] + result: List[Tuple[str, str]] = [] for name in names: match = pattern.match(name) @@ -110,8 +96,10 @@ def _match_names(pattern: Pattern, names: Collection[str]) \ return result + T = TypeVar("T", Optional[str], Optional[bool]) + def _naming_default(naming: NamingDict, key: str) -> T: return naming.get(key, NAMING_DEFAULTS.get(key)) @@ -149,11 +137,12 @@ class Mapper: fact_name_pattern: Pattern dim_key_pattern: Pattern - - def __init__(self, cube: Cube, naming: NamingDict, - locale: str=None) -> None: + def __init__(self, cube: Cube, naming: NamingDict, locale: str = None) -> None: """Creates a mapping for `cube` using `naming` conventions within - optional `locale`. `naming` is a dictionary of naming conventions. """ + optional `locale`. + + `naming` is a dictionary of naming conventions. + """ self.cube = cube @@ -178,61 +167,68 @@ def __init__(self, cube: Cube, naming: NamingDict, self.dimension_schema = _naming_default(naming, "dimension_schema") self.aggregate_schema = _naming_default(naming, "aggregate_schema") - self.dim_name_pattern = re.compile("^{}(?P.*){}$" - .format(self.dimension_prefix or "", - self.dimension_suffix or "")) + self.dim_name_pattern = re.compile( + "^{}(?P.*){}$".format( + self.dimension_prefix or "", self.dimension_suffix or "" + ) + ) - self.fact_name_pattern = re.compile("^{}(?P.*){}$" - .format(self.fact_prefix or "", - self.fact_suffix or "")) + self.fact_name_pattern = re.compile( + "^{}(?P.*){}$".format(self.fact_prefix or "", self.fact_suffix or "") + ) - self.dim_key_pattern = re.compile("^{}(?P.*){}$" - .format(self.dimension_key_prefix or "", - self.dimension_key_suffix or "")) + self.dim_key_pattern = re.compile( + "^{}(?P.*){}$".format( + self.dimension_key_prefix or "", self.dimension_key_suffix or "" + ) + ) self.fact_name = cube.fact or self.fact_table_name(cube.name) def dimension_table_name(self, name: str) -> str: """Constructs a physical dimension table name for dimension `name`""" - table_name = "{}{}{}".format(self.dimension_prefix or "", - name, - self.dimension_suffix or "") + table_name = "{}{}{}".format( + self.dimension_prefix or "", name, self.dimension_suffix or "" + ) return table_name def fact_table_name(self, name: str) -> str: """Constructs a physical fact table name for fact/cube `name`""" - table_name = "{}{}{}".format(self.fact_prefix or "", - name, - self.fact_suffix or "") + table_name = "{}{}{}".format( + self.fact_prefix or "", name, self.fact_suffix or "" + ) return table_name # TODO: require list of dimensions here def aggregated_table_name(self, name: str) -> str: """Constructs a physical fact table name for fact/cube `name`""" - table_name = "{}{}{}".format(self.aggregated_prefix or "", - name, - self.aggregated_suffix or "") + table_name = "{}{}{}".format( + self.aggregated_prefix or "", name, self.aggregated_suffix or "" + ) return table_name - - def __getitem__(self, attribute: AttributeBase) -> ColumnReference: """Returns implicit physical column reference for `attribute`, which - should be an instance of :class:`cubes.model.Attribute`. If there is - no dimension specified in attribute, then fact table is assumed. The - returned reference has attributes `schema`, `table`, `column`, - `extract`. """ + should be an instance of :class:`cubes.model.Attribute`. + + If there is no dimension specified in attribute, then fact table + is assumed. The returned reference has attributes `schema`, + `table`, `column`, `extract`. + """ column_name = attribute.name if attribute.is_localizable(): - locale = self.locale if self.locale in attribute.locales \ - else attribute.locales[0] + locale = ( + self.locale + if self.locale in attribute.locales + else attribute.locales[0] + ) - column_name = "{}_{}".format(column_name, locale) + column_name = f"{column_name}_{locale}" schema, table = self.attribute_table(attribute) @@ -261,11 +257,14 @@ def attribute_table(self, attribute: AttributeBase) -> Tuple[Optional[str], str] def map_base_attributes(self) -> Mapping[str, ColumnReference]: """Map all base attributes of `cube` using mapping function `mapper`. + `naming` is a naming convention object. Returns a dictionary of - attribute references and their physical column references.""" + attribute references and their physical column references. + """ - mapped = {attr.ref:self[attr] for attr in self.cube.all_attributes - if attr.is_base} + mapped = { + attr.ref: self[attr] for attr in self.cube.all_attributes if attr.is_base + } return mapped @@ -273,9 +272,10 @@ def map_base_attributes(self) -> Mapping[str, ColumnReference]: class DenormalizedMapper(Mapper): def __getitem__(self, attribute: AttributeBase) -> ColumnReference: if attribute.expression is not None: - raise ModelError("Attribute '{}' has an expression, it can not " - "have a direct physical representation" - .format(attribute.name)) + raise ModelError( + "Attribute '{}' has an expression, it can not " + "have a direct physical representation".format(attribute.name) + ) return super().__getitem__(attribute) @@ -299,9 +299,10 @@ def __getitem__(self, attribute: AttributeBase) -> ColumnReference: """ if attribute.expression is not None: - raise ModelError("Attribute '{}' has an expression, it can not " - "have a direct physical representation" - .format(attribute.name)) + raise ModelError( + "Attribute '{}' has an expression, it can not " + "have a direct physical representation".format(attribute.name) + ) # Fix locale: if attribute is not localized, use none, if it is # localized, then use specified if exists otherwise use default @@ -309,8 +310,11 @@ def __getitem__(self, attribute: AttributeBase) -> ColumnReference: locale: Optional[str] if attribute.is_localizable(): - locale = self.locale if self.locale in attribute.locales \ - else attribute.locales[0] + locale = ( + self.locale + if self.locale in attribute.locales + else attribute.locales[0] + ) else: locale = None @@ -324,6 +328,4 @@ def __getitem__(self, attribute: AttributeBase) -> ColumnReference: else: # No mappings exist or no mapping was found - we are going to # create default physical reference - return super(StarSchemaMapper, self).__getitem__(attribute) - - + return super().__getitem__(attribute) diff --git a/cubes/sql/query.py b/cubes/sql/query.py index 397da5aa..5ea4e5a5 100644 --- a/cubes/sql/query.py +++ b/cubes/sql/query.py @@ -7,48 +7,39 @@ """ -from typing import ( - Any, - cast, - Callable, - Collection, - Dict, - List, - Mapping, - NamedTuple, - Optional, - Set, - Tuple, - Union, - ) - -from ..types import JSONType - -from . import sqlalchemy as sa - from logging import Logger, getLogger -from collections import namedtuple - +from typing import ( + Any, + Callable, + Collection, + Dict, + List, + Mapping, + NamedTuple, + Optional, + Set, + Tuple, +) + +from ..errors import ArgumentError, HierarchyError, InternalError, ModelError from ..metadata import object_dict -from ..metadata.dimension import HierarchyPath -from ..metadata.physical import ColumnReference, JoinKey, Join, JoinMethod from ..metadata.attributes import AttributeBase -from ..errors import InternalError, ModelError, ArgumentError, HierarchyError +from ..metadata.dimension import HierarchyPath +from ..metadata.physical import ColumnReference, Join, JoinKey, JoinMethod +from ..query.cells import Cell, Cut, PointCut, RangeCut, SetCut from ..query.constants import SPLIT_DIMENSION_NAME -from ..query.cells import Cell, Cut, PointCut, SetCut, RangeCut - +from . import sqlalchemy as sa from .expressions import compile_attributes - # Default label for all fact keys -FACT_KEY_LABEL = '__fact_key__' -DEFAULT_FACT_KEY = 'id' +FACT_KEY_LABEL = "__fact_key__" +DEFAULT_FACT_KEY = "id" # Attribute -> Column # IF attribute has no 'expression' then mapping is used # IF attribute has expression, the expression is used and underlying mappings -# +# # END OF FREE TYPES # -------------------------------------------------------------------------- @@ -57,6 +48,7 @@ class _TableKey(NamedTuple): schema: Optional[str] table: str + # FIXME: [typing] Move this to _TableKey in Python 3.6.1 as __str__ def _format_key(key: _TableKey) -> str: """Format table key `key` to a string.""" @@ -67,6 +59,7 @@ def _format_key(key: _TableKey) -> str: else: return table + # Internal table reference class _TableRef(NamedTuple): # Database schema @@ -82,18 +75,22 @@ class _TableRef(NamedTuple): # join which joins this table as a detail join: Optional[Join] + class SchemaError(InternalError): """Error related to the physical star schema.""" + pass class NoSuchTableError(SchemaError): """Error related to the physical star schema.""" + pass class NoSuchAttributeError(SchemaError): """Error related to the physical star schema.""" + pass @@ -166,7 +163,7 @@ class StarSchema: # FIXME: [typing] this should be ColumnExpression or some superclass of Col _columns: Dict[str, sa.ColumnElement] _tables: Dict[_TableKey, _TableRef] - + logger: Logger fact_name: str fact_table: sa.FromClause @@ -174,16 +171,18 @@ class StarSchema: # FIXME: [typing] change to SA expression (same as above) fact_key_column: sa.Column - def __init__(self, - label: str, - metadata: sa.MetaData, - # FIXME: [typing] This should be already prepared - mappings: Mapping[str, ColumnReference], - fact_name: str, - fact_key: Optional[str]=None, - joins: Optional[Collection[Join]]=None, - tables: Optional[Mapping[str, sa.FromClause]]=None, - schema: Optional[str]=None) -> None: + def __init__( + self, + label: str, + metadata: sa.MetaData, + # FIXME: [typing] This should be already prepared + mappings: Mapping[str, ColumnReference], + fact_name: str, + fact_key: Optional[str] = None, + joins: Optional[Collection[Join]] = None, + tables: Optional[Mapping[str, sa.FromClause]] = None, + schema: Optional[str] = None, + ) -> None: # TODO: expectation is, that the snowlfake is already localized, the # owner of the snowflake should generate one snowflake per locale. @@ -218,8 +217,10 @@ def __init__(self, try: self.fact_key_column = self.fact_table.columns[self.fact_key] except KeyError: - raise ModelError(f"Unknown column '{fact_key}' " - f"in fact table '{fact_name}' for '{label}'.") + raise ModelError( + f"Unknown column '{fact_key}' " + f"in fact table '{fact_name}' for '{label}'." + ) elif DEFAULT_FACT_KEY in self.fact_table.columns: self.fact_key_column = self.fact_table.columns[DEFAULT_FACT_KEY] else: @@ -249,13 +250,13 @@ def _collect_tables(self) -> None: # Collect the fact table as the root master table # fact_table = _TableRef( - schema=self.schema, - name=self.fact_name, - alias=self.fact_name, - key=_TableKey(self.schema, self.fact_name), - table=self.fact_table, - join=None - ) + schema=self.schema, + name=self.fact_name, + alias=self.fact_name, + key=_TableKey(self.schema, self.fact_name), + table=self.fact_table, + join=None, + ) self._tables[fact_table.key] = fact_table @@ -273,13 +274,14 @@ def _collect_tables(self) -> None: # just ask for the table if not join.detail.table: - raise ModelError("No detail table specified for a join in " - "schema '{}'. Master of the join is '{}'" - .format(self.label, - _format_key(self._master_key(join)))) + raise ModelError( + "No detail table specified for a join in " + "schema '{}'. Master of the join is '{}'".format( + self.label, _format_key(self._master_key(join)) + ) + ) - table = self.physical_table(join.detail.table, - join.detail.schema) + table = self.physical_table(join.detail.table, join.detail.schema) if join.alias: table = table.alias(join.alias) @@ -290,22 +292,24 @@ def _collect_tables(self) -> None: key = _TableKey(join.detail.schema or self.schema, alias) if key in seen: - raise ModelError("Detail table '{}' joined twice in star" - " schema {}. Join alias is required." - .format(_format_key(key), self.label)) + raise ModelError( + "Detail table '{}' joined twice in star" + " schema {}. Join alias is required.".format( + _format_key(key), self.label + ) + ) seen.add(key) self._tables[key] = _TableRef( - table=table, - schema=join.detail.schema, - name=join.detail.table, - alias=alias, - key=key, - join=join, - ) - + table=table, + schema=join.detail.schema, + name=join.detail.table, + alias=alias, + key=key, + join=join, + ) - def table(self, key: _TableKey, role: str=None) -> _TableRef: + def table(self, key: _TableKey, role: str = None) -> _TableRef: """Return a table reference for `key`. `schema` should be ``None`` for named table expressions, which take precedence before the physical tables in the default schema. If there is no named table expression @@ -325,7 +329,7 @@ def table(self, key: _TableKey, role: str=None) -> _TableRef: table, which role of the table was expected, such as master or detail. """ - assert(key is not None, "Table key should not be None") + assert key is not None, "Table key should not be None" key = _TableKey(key[0] or self.schema, key[1] or self.fact_name) @@ -333,17 +337,18 @@ def table(self, key: _TableKey, role: str=None) -> _TableRef: return self._tables[key] except KeyError: if role is not None: - role_str = " (as {})".format(role) + role_str = f" (as {role})" else: role_str = "" schema_str = f'"{key[0]}".' if key[0] is not None else "" - raise SchemaError(f"Unknown star table {schema_str}" - f"\"{key[1]}\"{role_str}. Missing join?") + raise SchemaError( + f"Unknown star table {schema_str}" + f'"{key[1]}"{role_str}. Missing join?' + ) - def physical_table(self, name: str, schema: Optional[str]=None) \ - -> sa.FromClause: + def physical_table(self, name: str, schema: Optional[str] = None) -> sa.FromClause: """Return a physical table or table expression by name, regardless whether it exists or not in the star.""" @@ -357,10 +362,9 @@ def physical_table(self, name: str, schema: Optional[str]=None) \ table: sa.Table try: - table = sa.Table(name, - self.metadata, - autoload=True, - schema=coalesced_schema) + table = sa.Table( + name, self.metadata, autoload=True, schema=coalesced_schema + ) except sa.NoSuchTableError: schema_str: str @@ -374,8 +378,9 @@ def physical_table(self, name: str, schema: Optional[str]=None) \ return table def column(self, logical: str) -> sa.ColumnElement: - """Return a column for `logical` reference. The returned column will - have a label same as the `logical`. + """Return a column for `logical` reference. + + The returned column will have a label same as the `logical`. """ # IMPORTANT # @@ -405,8 +410,9 @@ def column(self, logical: str) -> sa.ColumnElement: else: raise NoSuchAttributeError(logical) - table_key = _TableKey(column_ref.schema or self.schema, - column_ref.table or self.fact_name) + table_key = _TableKey( + column_ref.schema or self.schema, column_ref.table or self.fact_name + ) table = self.table(table_key).table @@ -415,9 +421,11 @@ def column(self, logical: str) -> sa.ColumnElement: except KeyError: avail: str avail = ", ".join(str(c) for c in table.columns) - raise SchemaError(f"Unknown column '{column_ref.column}' " - f"in table '{column_ref.table}' " - f"possible: {avail}") + raise SchemaError( + f"Unknown column '{column_ref.column}' " + f"in table '{column_ref.table}' " + f"possible: {avail}" + ) # Apply the `extract` operator/function on date field # @@ -436,12 +444,13 @@ def column(self, logical: str) -> sa.ColumnElement: return column def _master_key(self, join: Join) -> _TableKey: - """Generate join master key, use schema defaults""" - return _TableKey(join.master.schema or self.schema, - join.master.table or self.fact_name) + """Generate join master key, use schema defaults.""" + return _TableKey( + join.master.schema or self.schema, join.master.table or self.fact_name + ) def _detail_key(self, join: Join) -> _TableKey: - """Generate join detail key, use schema defaults""" + """Generate join detail key, use schema defaults.""" # Note: we don't include fact as detail table by default. Fact can not # be detail (at least for now, we don't have a case where it could be) detail_table: str @@ -454,8 +463,9 @@ def _detail_key(self, join: Join) -> _TableKey: def required_tables(self, attributes: Collection[str]) -> List[_TableRef]: """Get all tables that are required to be joined to get `attributes`. - `attributes` is a list of `StarSchema` attributes (or objects with - same kind of attributes). + + `attributes` is a list of `StarSchema` attributes (or objects + with same kind of attributes). """ # Attribute: (schema, table, column) @@ -472,8 +482,7 @@ def required_tables(self, attributes: Collection[str]) -> List[_TableRef]: # FIXME: [typing] We need to resolve this non-optional # ColumnReference.table. See also: column() method of this class. relevant: Set[_TableRef] - relevant = set(self.table(_TableKey(ref.schema, ref.table)) - for ref in column_refs) + relevant = {self.table(_TableKey(ref.schema, ref.table)) for ref in column_refs} # Dependencies # ------------ @@ -517,9 +526,11 @@ def required_tables(self, attributes: Collection[str]) -> List[_TableRef]: sorted_tables = [fact] while required: - details = [table for table in required.values() - if table.join is not None - and self._master_key(table.join) in masters] + details = [ + table + for table in required.values() + if table.join is not None and self._master_key(table.join) in masters + ] if not details: break @@ -533,9 +544,11 @@ def required_tables(self, attributes: Collection[str]) -> List[_TableRef]: # We should end up with only one table in the list, all of them should # be consumed by joins. if len(required) > 1: - keys = [_format_key(table.key) - for table in required.values() - if table.key != fact_key] + keys = [ + _format_key(table.key) + for table in required.values() + if table.key != fact_key + ] joined_str = ", ".join(keys) raise ModelError(f"Some tables are not joined: {joined_str}") @@ -546,10 +559,9 @@ def required_tables(self, attributes: Collection[str]) -> List[_TableRef]: # ========================== def get_star(self, attributes: Collection[str]) -> sa.FromClause: - """The main method for generating underlying star schema joins. - Returns a denormalized JOIN expression that includes all relevant - tables containing base `attributes` (attributes representing actual - columns). + """The main method for generating underlying star schema joins. Returns + a denormalized JOIN expression that includes all relevant tables + containing base `attributes` (attributes representing actual columns). Example use: @@ -594,8 +606,9 @@ def get_star(self, attributes: Collection[str]) -> sa.FromClause: join: Join if table.join is None: - raise ModelError("Missing join for table '{}'" - .format(_format_key(table.key))) + raise ModelError( + "Missing join for table '{}'".format(_format_key(table.key)) + ) else: join = table.join @@ -617,41 +630,44 @@ def get_star(self, attributes: Collection[str]) -> sa.FromClause: master_table = self.table(master_key).table try: - master_columns = [master_table.columns[name] - for name in master.columns] + master_columns = [master_table.columns[name] for name in master.columns] except KeyError as e: - raise ModelError('Unable to find master key column "{key}" ' - 'in table "{table}" for star {schema} ' - .format(schema=self.label, - key=e, - table=_format_key(master_key))) + raise ModelError( + 'Unable to find master key column "{key}" ' + 'in table "{table}" for star {schema} '.format( + schema=self.label, key=e, table=_format_key(master_key) + ) + ) # Detail table.column # ------------------- try: - detail_columns = [detail_table.columns[name] - for name in join.detail.columns] + detail_columns = [ + detail_table.columns[name] for name in join.detail.columns + ] except KeyError as e: - raise ModelError('Unable to find detail key column "{key}" ' - 'in table "{table}" for star {schema} ' - .format(schema=self.label, - key=e, - table=_format_key(detail_key))) + raise ModelError( + 'Unable to find detail key column "{key}" ' + 'in table "{table}" for star {schema} '.format( + schema=self.label, key=e, table=_format_key(detail_key) + ) + ) if len(master_columns) != len(detail_columns): - raise ModelError("Compound keys for master '{}' and detail " - "'{}' table in star {} have different number" - " of columns" - .format(_format_key(master_key), - _format_key(detail_key), - self.label)) + raise ModelError( + "Compound keys for master '{}' and detail " + "'{}' table in star {} have different number" + " of columns".format( + _format_key(master_key), _format_key(detail_key), self.label + ) + ) # The JOIN ON condition # --------------------- key_conditions: List[sa.ColumnElement] - key_conditions = [left == right - for left, right - in zip(master_columns, detail_columns)] + key_conditions = [ + left == right for left, right in zip(master_columns, detail_columns) + ] onclause: sa.ColumnElement onclause = sa.and_(*key_conditions) @@ -676,8 +692,11 @@ def get_star(self, attributes: Collection[str]) -> sa.FromClause: # Consume the detail if detail_key not in star_tables: - raise ModelError("Detail table '{}' not in star. Missing join?" - .format(_format_key(detail_key))) + raise ModelError( + "Detail table '{}' not in star. Missing join?".format( + _format_key(detail_key) + ) + ) # The table is consumed by the join product, becomes the join # product itself. @@ -691,6 +710,7 @@ def get_star(self, attributes: Collection[str]) -> sa.FromClause: _WildHierarchyKeyType = Tuple[str, Optional[str]] _WildHierarchyDictType = Dict[_WildHierarchyKeyType, List[str]] + class QueryContext: """Context for execution of a query with given set of attributes and underlying star schema. The context is used for providing columns for @@ -714,13 +734,15 @@ class QueryContext: _columns: Dict[str, sa.ColumnElement] # FIXME: Rename to label_to_attribute or label_attr_map label_attributes: Dict[str, str] - + # TODO: Pass parameters here - def __init__(self, - star_schema: StarSchema, - attributes: Collection[AttributeBase] , - hierarchies: Optional[_WildHierarchyDictType]=None, - safe_labels: Optional[bool]=False) -> None: + def __init__( + self, + star_schema: StarSchema, + attributes: Collection[AttributeBase], + hierarchies: Optional[_WildHierarchyDictType] = None, + safe_labels: Optional[bool] = False, + ) -> None: """Creates a query context for `cube`. * `attributes` – list of all attributes that are relevant to the @@ -742,7 +764,6 @@ def __init__(self, Note: in the future the `hierarchies` dictionary might change just to a hierarchy name (a string), since hierarchies and dimensions will be both top-level objects. - """ # Note on why attributes have to be sorted: We don'd have enough @@ -772,10 +793,9 @@ def __init__(self, bases[FACT_KEY_LABEL] = self.star_schema.fact_key_column # FIXME: [typing] correct the type once sql.expressions are annotated - self._columns = compile_attributes(bases=bases, - dependants=dependants, - parameters=None, - label=star_schema.label) # type: ignore + self._columns = compile_attributes( + bases=bases, dependants=dependants, parameters=None, label=star_schema.label + ) # type: ignore self.label_attributes = {} if self.safe_labels: @@ -795,11 +815,13 @@ def __init__(self, self.label_attributes[attr.ref] = attr.ref def column(self, ref: str) -> sa.ColumnElement: - """Get a column expression for attribute with reference `ref`. Column - has the same label as the attribute reference, unless `safe_labels` is - provided to the query context. If `safe_labels` translation is - provided, then the column has label according to the translation - dictionary.""" + """Get a column expression for attribute with reference `ref`. + + Column has the same label as the attribute reference, unless + `safe_labels` is provided to the query context. If `safe_labels` + translation is provided, then the column has label according to + the translation dictionary. + """ try: return self._columns[ref] @@ -807,37 +829,46 @@ def column(self, ref: str) -> sa.ColumnElement: # This should not happen under normal circumstances. If this # exception is raised, it very likely means that the owner of the # query contexts forgot to do something. - raise InternalError("Missing column '{}'. Query context not " - "properly initialized or dependencies were " - "not correctly ordered?".format(ref)) + raise InternalError( + "Missing column '{}'. Query context not " + "properly initialized or dependencies were " + "not correctly ordered?".format(ref) + ) def get_labels(self, columns: Collection[sa.ColumnElement]) -> List[str]: - """Returns real attribute labels for columns `columns`. It is highly - recommended that the owner of the context uses this method before - iterating over statement result.""" + """Returns real attribute labels for columns `columns`. + + It is highly recommended that the owner of the context uses this + method before iterating over statement result. + """ if self.safe_labels: - return [self.label_attributes.get(column.name, column.name) - for column in columns] + return [ + self.label_attributes.get(column.name, column.name) + for column in columns + ] else: return [col.name for col in columns] def get_columns(self, refs: Collection[str]) -> List[sa.ColumnElement]: - """Get columns for attribute references `refs`. """ + """Get columns for attribute references `refs`.""" return [self._columns[ref] for ref in refs] def condition_for_cell(self, cell: Cell) -> sa.ColumnElement: - """Returns a condition for cell `cell`. If cell is empty or cell is - `None` then returns `None`.""" + """Returns a condition for cell `cell`. + + If cell is empty or cell is `None` then returns `None`. + """ condition = sa.and_(*self.conditions_for_cuts(cell.cuts)) return condition def conditions_for_cuts(self, cuts: List[Cut]) -> List[sa.ColumnElement]: - """Constructs conditions for all cuts in the `cell`. Returns a list of - SQL conditional expressions. + """Constructs conditions for all cuts in the `cell`. + + Returns a list of SQL conditional expressions. """ conditions: List[sa.ColumnElement] @@ -847,19 +878,17 @@ def conditions_for_cuts(self, cuts: List[Cut]) -> List[sa.ColumnElement]: for cut in cuts: if isinstance(cut, PointCut): path = cut.path - condition = self.condition_for_point(cut.dimension, - path, - cut.hierarchy, - cut.invert) + condition = self.condition_for_point( + cut.dimension, path, cut.hierarchy, cut.invert + ) elif isinstance(cut, SetCut): set_conds: List[sa.ColumnElement] = [] for path in cut.paths: - condition = self.condition_for_point(cut.dimension, - path, - cut.hierarchy, - invert=False) + condition = self.condition_for_point( + cut.dimension, path, cut.hierarchy, invert=False + ) set_conds.append(condition) condition = sa.or_(*set_conds) @@ -868,10 +897,9 @@ def conditions_for_cuts(self, cuts: List[Cut]) -> List[sa.ColumnElement]: condition = sa.not_(condition) elif isinstance(cut, RangeCut): - condition = self.range_condition(cut.dimension, - cut.hierarchy, - cut.from_path, - cut.to_path, cut.invert) + condition = self.range_condition( + cut.dimension, cut.hierarchy, cut.from_path, cut.to_path, cut.invert + ) else: raise ArgumentError("Unknown cut type %s" % type(cut)) @@ -880,15 +908,19 @@ def conditions_for_cuts(self, cuts: List[Cut]) -> List[sa.ColumnElement]: return conditions - def condition_for_point(self, - dim: str, - path: HierarchyPath, - hierarchy: Optional[str]=None, - invert: bool=False) -> sa.ColumnElement: - """Returns a `Condition` tuple (`attributes`, `conditions`, - `group_by`) dimension `dim` point at `path`. It is a compound + def condition_for_point( + self, + dim: str, + path: HierarchyPath, + hierarchy: Optional[str] = None, + invert: bool = False, + ) -> sa.ColumnElement: + """Returns a `Condition` tuple (`attributes`, `conditions`, `group_by`) + dimension `dim` point at `path`. It is a compound. + condition - one equality condition for each path element in form: - ``level[i].key = path[i]``""" + ``level[i].key = path[i]`` + """ conditions: List[sa.ColumnElement] conditions = [] @@ -908,17 +940,23 @@ def condition_for_point(self, return condition - def range_condition(self, - dim: str, - hierarchy: Optional[str], - from_path: Optional[HierarchyPath], - to_path: Optional[HierarchyPath], - invert: bool=False) -> sa.ColumnElement: + def range_condition( + self, + dim: str, + hierarchy: Optional[str], + from_path: Optional[HierarchyPath], + to_path: Optional[HierarchyPath], + invert: bool = False, + ) -> sa.ColumnElement: """Return a condition for a hierarchical range (`from_path`, - `to_path`). Return value is a `Condition` tuple.""" + `to_path`). - assert(from_path is not None or to_path is not None, - "Range cut must have at least one boundary") + Return value is a `Condition` tuple. + """ + + assert ( + from_path is not None or to_path is not None + ), "Range cut must have at least one boundary" conditions: List[sa.ColumnElement] conditions = [] @@ -944,16 +982,20 @@ def range_condition(self, return condition - def _boundary_condition(self, - dim: str, - hierarchy: Optional[str], - path: Optional[HierarchyPath], - bound: int, - first: bool=True) -> Optional[sa.ColumnElement]: - """Return a `Condition` tuple for a boundary condition. If `bound` is - 1 then path is considered to be upper bound (operators < and <= are - used), otherwise path is considered as lower bound (operators > and >= - are used )""" + def _boundary_condition( + self, + dim: str, + hierarchy: Optional[str], + path: Optional[HierarchyPath], + bound: int, + first: bool = True, + ) -> Optional[sa.ColumnElement]: + """Return a `Condition` tuple for a boundary condition. + + If `bound` is 1 then path is considered to be upper bound + (operators < and <= are used), otherwise path is considered as + lower bound (operators > and >= are used ) + """ # TODO: make this non-recursive column: sa.ColumnElement @@ -961,8 +1003,7 @@ def _boundary_condition(self, if not path: return None - last = self._boundary_condition(dim, hierarchy, path[:-1], bound, - first=False) + last = self._boundary_condition(dim, hierarchy, path[:-1], bound, first=False) levels = self.level_keys(dim, hierarchy, path) @@ -992,12 +1033,11 @@ def _boundary_condition(self, return condition - def level_keys(self, - dimension: str, - hierarchy: Optional[str], - path: HierarchyPath) -> List[str]: - """Return list of key attributes of levels for `path` in `hierarchy` - of `dimension`.""" + def level_keys( + self, dimension: str, hierarchy: Optional[str], path: HierarchyPath + ) -> List[str]: + """Return list of key attributes of levels for `path` in `hierarchy` of + `dimension`.""" # Note: If something does not work here, make sure that hierarchies # contains "default hierarchy", that is (dimension, None) tuple. @@ -1006,27 +1046,28 @@ def level_keys(self, try: levels = self.hierarchies[(str(dimension), hierarchy)] except KeyError as e: - raise InternalError("Unknown hierarchy '{}'. Hierarchies are " - "not properly initialized (maybe missing " - "default?)".format(e)) + raise InternalError( + "Unknown hierarchy '{}'. Hierarchies are " + "not properly initialized (maybe missing " + "default?)".format(e) + ) depth = 0 if not path else len(path) if depth > len(levels): levels_str = ", ".join(levels) - raise HierarchyError("Path '{}' is longer than hierarchy. " - "Levels: {}".format(path, levels)) + raise HierarchyError( + f"Path '{path}' is longer than hierarchy. Levels: {levels}" + ) return levels[0:depth] - def column_for_split(self, split_cell: Cell, label: str=None) \ - -> sa.ColumnElement: + def column_for_split(self, split_cell: Cell, label: str = None) -> sa.ColumnElement: """Create a column for a cell split from list of `cust`.""" condition: sa.ColumnElement condition = self.condition_for_cell(split_cell) - split_column = sa.case([(condition, True)], - else_=False) + split_column = sa.case([(condition, True)], else_=False) label = label or SPLIT_DIMENSION_NAME diff --git a/cubes/sql/sqlalchemy.py b/cubes/sql/sqlalchemy.py index 1fb873f5..8722f51f 100644 --- a/cubes/sql/sqlalchemy.py +++ b/cubes/sql/sqlalchemy.py @@ -1,18 +1,9 @@ -"""Aliases for SQL/SQLAlchemy objects that are assured to be correctly -type-checked.""" - -from typing import ( - Any, - Iterable, - List, - Mapping, - TYPE_CHECKING, - Tuple, - Union, - ) +"""Aliases for SQL/SQLAlchemy objects that are assured to be correctly type- +checked.""" -import sqlalchemy +from typing import TYPE_CHECKING, Any, Iterable, List, Mapping, Tuple, Union +import sqlalchemy # Engine # ====== diff --git a/cubes/sql/store.py b/cubes/sql/store.py index ab0153bb..ba2462d4 100644 --- a/cubes/sql/store.py +++ b/cubes/sql/store.py @@ -1,30 +1,22 @@ # -*- encoding=utf -*- -from . import sqlalchemy as sa - from typing import Any, Optional -from ..types import OptionsType, OptionValue, JSONType -from typing import Any -from ..types import OptionsType, OptionValue, JSONType - -from .browser import SQLBrowser -from .mapper import distill_naming, NamingDict -from ..logging import get_logger from ..common import coalesce_options -from ..stores import Store -from ..errors import ArgumentError, StoreError, ConfigurationError -from ..query.drilldown import Drilldown -from ..query.cells import Cell -from .utils import CreateTableAsSelect, CreateOrReplaceView +from ..errors import ArgumentError, ConfigurationError, StoreError +from ..logging import get_logger from ..metadata import string_to_dimension_level +from ..query.cells import Cell +from ..query.drilldown import Drilldown from ..settings import Setting, SettingType +from ..stores import Store +from ..types import OptionsType, OptionValue +from . import sqlalchemy as sa +from .browser import SQLBrowser +from .mapper import distill_naming +from .utils import CreateOrReplaceView, CreateTableAsSelect - -__all__ = [ - "sqlalchemy_options", - "SQLStore" -] +__all__ = ["sqlalchemy_options", "SQLStore"] # Data types of options passed to sqlalchemy.create_engine @@ -41,7 +33,7 @@ "pool_size": "int", "pool_recycle": "int", "pool_timeout": "int", - "supports_unicode_binds": "bool" + "supports_unicode_binds": "bool", } # Data types of options passed to the workspace, browser and mapper @@ -50,14 +42,17 @@ "include_summary": "bool", "include_cell_count": "bool", "use_denormalization": "bool", - "safe_labels": "bool" + "safe_labels": "bool", } def sqlalchemy_options(options, prefix="sqlalchemy_"): - """Return converted `options` to match SQLAlchemy create_engine options - and their types. The `options` are expected to have prefix - ``sqlalchemy_``, which will be removed.""" + """Return converted `options` to match SQLAlchemy create_engine options and + their types. + + The `options` are expected to have prefix ``sqlalchemy_``, which + will be removed. + """ sa_keys = [key for key in options.keys() if key.startswith(prefix)] sa_options = {} @@ -73,7 +68,7 @@ class SQLStore(Store, name="sql"): default_browser_name = "sql" extension_label = "SQL Store" - extension_desc =""" + extension_desc = """ Relational database store. Supported database engines: firebird, mssql, mysql, oracle, postgresql, @@ -110,13 +105,14 @@ class SQLStore(Store, name="sql"): options: OptionsType schema: Optional[str] - def __init__(self, - url: str=None, - engine: sa.Engine=None, - metadata: sa.MetaData=None, - **options: OptionValue) -> None: - """ - The options are: + def __init__( + self, + url: str = None, + engine: sa.Engine = None, + metadata: sa.MetaData = None, + **options: OptionValue, + ) -> None: + """The options are: Required (one of the two, `engine` takes precedence): @@ -151,11 +147,12 @@ def __init__(self, located (use this if the views are in different schema than fact tables, otherwise default schema is going to be used) """ - super(SQLStore, self).__init__(**options) + super().__init__(**options) if not engine and not url: - raise ConfigurationError("No URL or engine specified in options, " - "provide at least one") + raise ConfigurationError( + "No URL or engine specified in options, provide at least one" + ) if engine and url: raise ConfigurationError("Both engine and URL specified. Use only one.") @@ -182,21 +179,23 @@ def __init__(self, if metadata: self.metadata = metadata else: - self.metadata = sa.MetaData(bind=self.connectable, - schema=self.schema) + self.metadata = sa.MetaData(bind=self.connectable, schema=self.schema) # TODO: make a separate SQL utils function def _drop_table(self, table, schema, force=False): - """Drops `table` in `schema`. If table exists, exception is raised - unless `force` is ``True``""" + """Drops `table` in `schema`. + + If table exists, exception is raised unless `force` is ``True`` + """ view_name = str(table) preparer = self.connectable.dialect.preparer(self.connectable.dialect) full_name = preparer.format_table(table) if table.exists() and not force: - raise StoreError("View or table %s (schema: %s) already exists." % \ - (view_name, schema)) + raise StoreError( + f"View or table {view_name} (schema: {schema}) already exists." + ) inspector = sa.engine.reflection.Inspector.from_engine(self.connectable) view_names = inspector.get_view_names(schema=schema) @@ -225,7 +224,6 @@ def validate(self, cube): * ``no_table`` - there is no table for attribute * ``no_column`` - there is no column for attribute * ``duplicity`` - attribute is found more than once - """ issues = [] @@ -236,7 +234,7 @@ def validate(self, cube): alias_map = {} # for join in cube.joins: - self.logger.debug("join: %s" % (join, )) + self.logger.debug(f"join: {join}") if not join.master.column: issues.append(("join", "master column not specified", join)) @@ -252,15 +250,22 @@ def validate(self, cube): detail_alias = (join.detail.schema, join.alias or join.detail.table) if detail_alias in aliases: - issues.append(("join", "duplicate detail table %s" % detail_table, join)) + issues.append( + ("join", "duplicate detail table %s" % detail_table, join) + ) else: aliases.add(detail_alias) alias_map[detail_alias] = detail_table if detail_table in tables and not join.alias: - issues.append(("join", "duplicate detail table %s (no alias specified)" - % detail_table, join)) + issues.append( + ( + "join", + "duplicate detail table %s (no alias specified)" % detail_table, + join, + ) + ) else: tables.add(detail_table) @@ -268,20 +273,27 @@ def validate(self, cube): physical_tables = {} # Add fact table to support simple attributes - physical_tables[(self.fact_table.schema, self.fact_table.name)] = self.fact_table + physical_tables[ + (self.fact_table.schema, self.fact_table.name) + ] = self.fact_table for table in tables: try: - physical_table = sa.Table(table[1], self.metadata, - autoload=True, - schema=table[0] or self.mapper.schema) - physical_tables[(table[0] or self.mapper.schema, table[1])] = physical_table + physical_table = sa.Table( + table[1], + self.metadata, + autoload=True, + schema=table[0] or self.mapper.schema, + ) + physical_tables[ + (table[0] or self.mapper.schema, table[1]) + ] = physical_table except sa.exc.NoSuchTableError: issues.append(("join", "table %s.%s does not exist" % table, join)) # check attributes base = base_attributes(cube.all_fact_attributes) - mappings = {attr.name:mapper.physical(attr) for attr in base} + mappings = {attr.name: mapper.physical(attr) for attr in base} for attr, ref in mappings.items: alias_ref = (ref.schema, ref.table) @@ -290,30 +302,41 @@ def validate(self, cube): if table is None: logical = attr.localized_ref() - issues.append(( - "attribute", - "table {}.{} does not exist for attribute {}" - .format(table_ref[0], table_ref[1], logical, attr))) + issues.append( + ( + "attribute", + "table {}.{} does not exist for attribute {}".format( + table_ref[0], table_ref[1], logical, attr + ), + ) + ) else: try: c = table.c[ref.column] except KeyError: logical = attr.localized_ref() - issues.append(( - "attribute", - "column {}.{}.{} does not exist for attribute {}" - .format(table_ref[0], - table_ref[1], - ref.column, - logical, - attr))) + issues.append( + ( + "attribute", + "column {}.{}.{} does not exist for attribute {}".format( + table_ref[0], table_ref[1], ref.column, logical, attr + ), + ) + ) return issues - # FIXME: This should be broken between - def create_denormalized_view(self, cube, view_name=None, materialize=False, - replace=False, create_index=False, - keys_only=False, schema=None): + # FIXME: This should be broken between + def create_denormalized_view( + self, + cube, + view_name=None, + materialize=False, + replace=False, + create_index=False, + keys_only=False, + schema=None, + ): """Creates a denormalized view named `view_name` of a `cube`. If `view_name` is ``None`` then view name is constructed by pre-pending value of `denormalized_view_prefix` from workspace options to the cube @@ -341,14 +364,16 @@ def create_denormalized_view(self, cube, view_name=None, materialize=False, browser = SQLBrowser(cube, self, schema=schema) if browser.safe_labels: - raise ConfigurationError("Denormalization does not work with " - "safe_labels turned on") + raise ConfigurationError( + "Denormalization does not work with safe_labels turned on" + ) # Note: this does not work with safe labels – since they are "safe" # they can not conform to the cubes implicit naming schema dim.attr - (statement, _) = browser.denormalized_statement(attributes, - include_fact_key=True) + (statement, _) = browser.denormalized_statement( + attributes, include_fact_key=True + ) mapper = browser.mapper schema = schema or self.naming.get("schema") @@ -357,10 +382,11 @@ def create_denormalized_view(self, cube, view_name=None, materialize=False, fact_name = cube.fact or mapper.fact_table_name(cube.name) if fact_name == view_name and schema == mapper.schema: - raise StoreError("target denormalized view is the same as source fact table") + raise StoreError( + "target denormalized view is the same as source fact table" + ) - table = sa.Table(view_name, self.metadata, - autoload=False, schema=schema) + table = sa.Table(view_name, self.metadata, autoload=False, schema=schema) if table.exists(): self._drop_table(table, schema, force=replace) @@ -371,13 +397,14 @@ def create_denormalized_view(self, cube, view_name=None, materialize=False, else: create_view = CreateOrReplaceView(table, statement) - self.logger.info("creating denormalized view %s (materialized: %s)" \ - % (str(table), materialize)) + self.logger.info( + "creating denormalized view %s (materialized: %s)" + % (str(table), materialize) + ) # print("SQL statement:\n%s" % statement) self.execute(create_view) if create_index: - table = sa.Table(view_name, self.metadata, - autoload=True, schema=schema) + table = sa.Table(view_name, self.metadata, autoload=True, schema=schema) insp = reflection.Inspector.from_engine(engine) insp.reflecttable(table, None) @@ -386,7 +413,7 @@ def create_denormalized_view(self, cube, view_name=None, materialize=False, label = attribute.ref self.logger.info("creating index for %s" % label) column = table.c[label] - name = "idx_%s_%s" % (view_name, label) + name = f"idx_{view_name}_{label}" index = sa.schema.Index(name, column) index.create(self.connectable) @@ -410,7 +437,6 @@ def validate_model(self) -> None: * ``no_table`` - there is no table for attribute * ``no_column`` - there is no column for attribute * ``duplicity`` - attribute is found more than once - """ issues = [] @@ -440,8 +466,9 @@ def validate_model(self) -> None: * UNIQUE level key: join might be based on level key """ - def create_conformed_rollup(self, cube, dimension, level=None, hierarchy=None, - replace=False, **options): + def create_conformed_rollup( + self, cube, dimension, level=None, hierarchy=None, replace=False, **options + ): """Extracts dimension values at certain level into a separate table. The new table name will be composed of `dimension_prefix`, dimension name and suffixed by dimension level. For example a product dimension @@ -482,24 +509,36 @@ def create_conformed_rollup(self, cube, dimension, level=None, hierarchy=None, for level in levels: attributes.extend(level.attributes) - statement = context.denormalized_statement(attributes=attributes, - include_fact_key=False) + statement = context.denormalized_statement( + attributes=attributes, include_fact_key=False + ) group_by = [context.column(attr) for attr in attributes] statement = statement.group_by(*group_by) - table_name = "%s%s%s_%s" % (dimension_prefix or "", dimension_suffix or "", - str(dimension), str(level)) - self.create_table_from_statement(table_name, statement, schema, - replace, insert=True) + table_name = "{}{}{}_{}".format( + dimension_prefix or "", dimension_suffix or "", str(dimension), str(level) + ) + self.create_table_from_statement( + table_name, statement, schema, replace, insert=True + ) - def create_conformed_rollups(self, cube, dimensions, grain=None, schema=None, - dimension_prefix=None, dimension_suffix=None, - replace=False): - """Extract multiple dimensions from a snowflake. See - `extract_dimension()` for more information. `grain` is a dictionary - where keys are dimension names and values are levels, if level is - ``None`` then all levels are considered.""" + def create_conformed_rollups( + self, + cube, + dimensions, + grain=None, + schema=None, + dimension_prefix=None, + dimension_suffix=None, + replace=False, + ): + """Extract multiple dimensions from a snowflake. + + See `extract_dimension()` for more information. `grain` is a + dictionary where keys are dimension names and values are levels, + if level is ``None`` then all levels are considered. + """ grain = grain or {} @@ -514,15 +553,20 @@ def create_conformed_rollups(self, cube, dimensions, grain=None, schema=None, for depth in range(0, level_index): level = hierarchy.levels[depth] - self.create_conformed_rollup(cube, dim, level=level, - schema=schema, - dimension_prefix=dimension_prefix or "", - dimension_suffix=dimension_suffix or "", - replace=replace) + self.create_conformed_rollup( + cube, + dim, + level=level, + schema=schema, + dimension_prefix=dimension_prefix or "", + dimension_suffix=dimension_suffix or "", + replace=replace, + ) # TODO: make this a separate SQL utility function - def create_table_from_statement(self, table_name, statement, schema, - replace=False, insert=False): + def create_table_from_statement( + self, table_name, statement, schema, replace=False, insert=False + ): """Creates or replaces a table from statement. Arguments: @@ -539,17 +583,18 @@ def create_table_from_statement(self, table_name, statement, schema, # # Create table # - table = sa.Table(table_name, self.metadata, - autoload=False, schema=schema) + table = sa.Table(table_name, self.metadata, autoload=False, schema=schema) if table.exists(): self._drop_table(table, schema, force=replace) for col in statement.columns: # mysql backend requires default string length - if self.connectable.name == "mysql" \ - and isinstance(col.type, sa.String) \ - and not col.type.length: + if ( + self.connectable.name == "mysql" + and isinstance(col.type, sa.String) + and not col.type.length + ): col_type = sa.String(255) else: col_type = col.type @@ -567,9 +612,15 @@ def create_table_from_statement(self, table_name, statement, schema, return table - def create_cube_aggregate(self, cube, table_name=None, dimensions=None, - replace=False, create_index=False, - schema=None): + def create_cube_aggregate( + self, + cube, + table_name=None, + dimensions=None, + replace=False, + create_index=False, + schema=None, + ): """Creates an aggregate table. If dimensions is `None` then all cube's dimensions are considered. @@ -583,11 +634,11 @@ def create_cube_aggregate(self, cube, table_name=None, dimensions=None, mapper = browser.mapper if browser.safe_labels: - raise ConfigurationError("Aggregation does not work with " - "safe_labels turned on") + raise ConfigurationError( + "Aggregation does not work with safe_labels turned on" + ) - schema = schema or mapper.aggregate_schema \ - or mapper.schema + schema = schema or mapper.aggregate_schema or mapper.schema # TODO: this is very similar to the denormalization prep. table_name = table_name or mapper.aggregate_table_name(cube.name) @@ -614,18 +665,12 @@ def create_cube_aggregate(self, cube, table_name=None, dimensions=None, # Create statement of all dimension level keys for # getting structure for table creation (statement, _) = browser.aggregation_statement( - cell, - drilldown=drilldown, - aggregates=cube.aggregates + cell, drilldown=drilldown, aggregates=cube.aggregates ) # Create table table = self.create_table_from_statement( - table_name, - statement, - schema=schema, - replace=replace, - insert=False + table_name, statement, schema=schema, replace=replace, insert=False ) self.logger.info("Inserting...") @@ -642,7 +687,7 @@ def create_cube_aggregate(self, cube, table_name=None, dimensions=None, if column.name in aggregated_columns: continue - name = "%s_%s_idx" % (table_name, column) + name = f"{table_name}_{column}_idx" self.logger.info("creating index: %s" % name) index = Index(name, column) index.create(self.connectable) diff --git a/cubes/sql/utils.py b/cubes/sql/utils.py index 0e710625..d6b4a84a 100644 --- a/cubes/sql/utils.py +++ b/cubes/sql/utils.py @@ -1,12 +1,12 @@ # -*- encoding: utf-8 -*- """Cubes SQL backend utilities, mostly to be used by the slicer command.""" -from sqlalchemy.sql.expression import Executable, ClauseElement -from sqlalchemy.ext.compiler import compiles -import sqlalchemy.sql as sql - from collections import OrderedDict +import sqlalchemy.sql as sql +from sqlalchemy.ext.compiler import compiles +from sqlalchemy.sql.expression import ClauseElement, Executable + from ..errors import ArgumentError from ..query.constants import SPLIT_DIMENSION_NAME @@ -17,72 +17,73 @@ "condition_conjunction", "order_column", "order_query", - "paginate_query" + "paginate_query", ] + class CreateTableAsSelect(Executable, ClauseElement): def __init__(self, table, select): self.table = table self.select = select + @compiles(CreateTableAsSelect) def visit_create_table_as_select(element, compiler, **kw): preparer = compiler.dialect.preparer(compiler.dialect) full_name = preparer.format_table(element.table) - return "CREATE TABLE %s AS (%s)" % ( - element.table, - compiler.process(element.select) + return "CREATE TABLE {} AS ({})".format( + element.table, compiler.process(element.select) ) + + @compiles(CreateTableAsSelect, "sqlite") def visit_create_table_as_select(element, compiler, **kw): preparer = compiler.dialect.preparer(compiler.dialect) full_name = preparer.format_table(element.table) - return "CREATE TABLE %s AS %s" % ( - element.table, - compiler.process(element.select) + return "CREATE TABLE {} AS {}".format( + element.table, compiler.process(element.select) ) + class CreateOrReplaceView(Executable, ClauseElement): def __init__(self, view, select): self.view = view self.select = select + @compiles(CreateOrReplaceView) def visit_create_or_replace_view(element, compiler, **kw): preparer = compiler.dialect.preparer(compiler.dialect) full_name = preparer.format_table(element.view) - return "CREATE OR REPLACE VIEW %s AS (%s)" % ( - full_name, - compiler.process(element.select) + return "CREATE OR REPLACE VIEW {} AS ({})".format( + full_name, compiler.process(element.select) ) + @compiles(CreateOrReplaceView, "sqlite") def visit_create_or_replace_view(element, compiler, **kw): preparer = compiler.dialect.preparer(compiler.dialect) full_name = preparer.format_table(element.view) - return "CREATE VIEW %s AS %s" % ( - full_name, - compiler.process(element.select) - ) + return "CREATE VIEW {} AS {}".format(full_name, compiler.process(element.select)) + @compiles(CreateOrReplaceView, "mysql") def visit_create_or_replace_view(element, compiler, **kw): preparer = compiler.dialect.preparer(compiler.dialect) full_name = preparer.format_table(element.view) - return "CREATE OR REPLACE VIEW %s AS %s" % ( - full_name, - compiler.process(element.select) + return "CREATE OR REPLACE VIEW {} AS {}".format( + full_name, compiler.process(element.select) ) def paginate_query(statement, page, page_size): - """Returns paginated statement if page is provided, otherwise returns - the same statement.""" + """Returns paginated statement if page is provided, otherwise returns the + same statement.""" if page is not None and page_size is not None: statement = statement.offset(page * page_size).limit(page_size) @@ -91,8 +92,10 @@ def paginate_query(statement, page, page_size): def order_column(column, order): - """Orders a `column` according to `order` specified as string. Returns a - `Column` expression""" + """Orders a `column` according to `order` specified as string. + + Returns a `Column` expression + """ if not order: return column @@ -161,4 +164,3 @@ def order_query(statement, order, natural_order=None, labels=None): statement = statement.order_by(*final_order.values()) return statement - diff --git a/cubes/stores.py b/cubes/stores.py index 4c42ab87..e955d649 100644 --- a/cubes/stores.py +++ b/cubes/stores.py @@ -1,12 +1,11 @@ # -*- coding: utf-8 -*- -from typing import Optional, Any -from .types import JSONType +from typing import Any, Optional + from .ext import Extensible +from .types import JSONType -__all__ = ( - "Store" -) +__all__ = "Store" # Note: this class does not have much use right now besides being discoverable # by custom plugin system in cubes. diff --git a/cubes/tutorial/sql.py b/cubes/tutorial/sql.py index 03c91cfd..66bf5361 100644 --- a/cubes/tutorial/sql.py +++ b/cubes/tutorial/sql.py @@ -1,18 +1,20 @@ # -*- coding: utf-8 -*- -import sqlalchemy -import csv import codecs +import csv + +import sqlalchemy class UTF8Recoder: - """ - Iterator that reads an encoded stream and reencodes the input to UTF-8 - """ + """Iterator that reads an encoded stream and reencodes the input to + UTF-8.""" + def __init__(self, f, encoding): - assert 'b' in f.mode, "in py3k, codec's StreamReader needs a bytestream" + assert "b" in f.mode, "in py3k, codec's StreamReader needs a bytestream" self.reader = codecs.getreader(encoding)(f) self.next = self.__next__ + def __iter__(self): return self @@ -21,10 +23,8 @@ def __next__(self): class UnicodeReader: - """ - A CSV reader which will iterate over lines in the CSV file "f", - which is encoded in the given encoding. - """ + """A CSV reader which will iterate over lines in the CSV file "f", which is + encoded in the given encoding.""" def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): f = UTF8Recoder(f, encoding) @@ -39,18 +39,21 @@ def __iter__(self): return self -def create_table_from_csv(connectable, file_name, table_name, fields, - create_id=False, schema=None): - """Create a table with name `table_name` from a CSV file `file_name` with columns corresponding - to `fields`. The `fields` is a list of two string tuples: (name, type) where type might be: - ``integer``, ``float`` or ``string``. +def create_table_from_csv( + connectable, file_name, table_name, fields, create_id=False, schema=None +): + """Create a table with name `table_name` from a CSV file `file_name` with + columns corresponding to `fields`. The `fields` is a list of two string + tuples: (name, type) where type might be: ``integer``, ``float`` or + ``string``. - If `create_id` is ``True`` then a column with name ``id`` is created and will contain generated - sequential record id. + If `create_id` is ``True`` then a column with name ``id`` is created and + will contain generated sequential record id. - This is just small utility function for sandbox, play-around and testing purposes. It is not - recommended to be used for serious CSV-to-table loadings. For more advanced CSV loadings use another - framework, such as Brewery (http://databrewery.org). + This is just small utility function for sandbox, play-around and testing + purposes. It is not recommended to be used for serious CSV-to-table loadings. + For more advanced CSV loadings use another framework, such as Brewery + (http://databrewery.org). """ metadata = sqlalchemy.MetaData(bind=connectable) @@ -59,15 +62,17 @@ def create_table_from_csv(connectable, file_name, table_name, fields, if table.exists(): table.drop(checkfirst=False) - type_map = {"integer": sqlalchemy.Integer, - "float": sqlalchemy.Numeric, - "string": sqlalchemy.String(256), - "text": sqlalchemy.Text, - "date": sqlalchemy.Text, - "boolean": sqlalchemy.Integer} + type_map = { + "integer": sqlalchemy.Integer, + "float": sqlalchemy.Numeric, + "string": sqlalchemy.String(256), + "text": sqlalchemy.Text, + "date": sqlalchemy.Text, + "boolean": sqlalchemy.Integer, + } if create_id: - col = sqlalchemy.schema.Column('id', sqlalchemy.Integer, primary_key=True) + col = sqlalchemy.schema.Column("id", sqlalchemy.Integer, primary_key=True) table.append_column(col) field_names = [] @@ -78,7 +83,7 @@ def create_table_from_csv(connectable, file_name, table_name, fields, table.create() - reader = UnicodeReader(open(file_name, 'rb')) + reader = UnicodeReader(open(file_name, "rb")) # Skip header next(reader) diff --git a/cubes/types.py b/cubes/types.py index 571b8a8a..b84cf501 100644 --- a/cubes/types.py +++ b/cubes/types.py @@ -1,10 +1,4 @@ -from typing import ( - Any, - Dict, - Mapping, - Optional, - Tuple, - ) +from typing import Any, Dict, Mapping, Optional, Tuple # Type used as a placeholder during type annotation process. There should be no # values of this type in the future. Used to mark: diff --git a/cubes/workspace.py b/cubes/workspace.py index 7cf08ba9..e9e40a41 100644 --- a/cubes/workspace.py +++ b/cubes/workspace.py @@ -1,91 +1,88 @@ # -*- coding: utf-8 -*- import os.path - -from typing import List, Dict, Any, Optional, Tuple, Union, Type -from logging import Logger - from collections import OrderedDict, defaultdict from configparser import ConfigParser +from logging import Logger +from typing import Any, Dict, List, Optional, Tuple, Type, Union -from .metadata import read_model_metadata, find_dimension, LocalizationContext,\ - Cube, Dimension -from .metadata.providers import ModelProvider -from .auth import NotAuthorized, Authorizer +from . import ext +from .auth import Authorizer, NotAuthorized +from .calendar import Calendar from .common import read_json_file -from .errors import ConfigurationError, ArgumentError, CubesError +from .errors import ConfigurationError from .logging import get_logger -from .calendar import Calendar +from .metadata import ( + Cube, + Dimension, + LocalizationContext, + find_dimension, + read_model_metadata, +) +from .metadata.providers import ModelProvider from .namespace import Namespace -from .stores import Store from .query.browser import AggregationBrowser, BrowserFeatures -from .types import _CubeKey, JSONType -from . import ext from .settings import Setting, SettingType, distill_settings # FIXME: [typing] Remove direct reference to SQL, move to shared place from .sql.mapper import NamingDict, distill_naming +from .stores import Store +from .types import JSONType, _CubeKey -__all__ = [ - "Workspace", -] +__all__ = ["Workspace"] SLICER_INFO_KEYS = ( "name", "label", "description", # Workspace model description - "copyright", # Copyright for the data - "license", # Data license - "maintainer", # Name (and maybe contact) of data maintainer - "contributors", # List of contributors + "copyright", # Copyright for the data + "license", # Data license + "maintainer", # Name (and maybe contact) of data maintainer + "contributors", # List of contributors "visualizers", # List of dicts with url and label of server's visualizers - "keywords", # List of keywords describing server's cubes - "related" # List of dicts with related servers + "keywords", # List of keywords describing server's cubes + "related", # List of dicts with related servers ) WORKSPACE_SETTINGS = [ - Setting( - "log", SettingType.str, - desc="File name where the logs are written" - ), - Setting( - "log_level", SettingType.str, - desc="Log level details", - values=["info", "error", "warn", "debug"], - ), - Setting( - "root_directory", SettingType.str, - desc="Directory for all relative paths" - ), - Setting( - "models_directory", SettingType.str, - desc="Place where file-based models are searched for", - ), - Setting( - "info_file", SettingType.str, - desc="A JSON file where server info is stored", - ), - Setting( - "stores_file", SettingType.str, - desc="Configuration file with configuration of stores", - ), - Setting( - "timezone", SettingType.str, - desc="Default timezone for time and date functions", - ), - Setting( - "first_weekday", SettingType.str, - desc="Name or a number of a first day of the week", - ), - ] + Setting("log", SettingType.str, desc="File name where the logs are written"), + Setting( + "log_level", + SettingType.str, + desc="Log level details", + values=["info", "error", "warn", "debug"], + ), + Setting("root_directory", SettingType.str, desc="Directory for all relative paths"), + Setting( + "models_directory", + SettingType.str, + desc="Place where file-based models are searched for", + ), + Setting( + "info_file", SettingType.str, desc="A JSON file where server info is stored" + ), + Setting( + "stores_file", + SettingType.str, + desc="Configuration file with configuration of stores", + ), + Setting( + "timezone", SettingType.str, desc="Default timezone for time and date functions" + ), + Setting( + "first_weekday", + SettingType.str, + desc="Name or a number of a first day of the week", + ), +] class Workspace: # TODO: Make this first-class object - store_infos: Dict[str,Tuple[str, JSONType]] - stores: Dict[str,Store] + store_infos: Dict[str, Tuple[str, JSONType]] + stores: Dict[str, Store] logger: Logger root_dir: str models_dir: str @@ -104,13 +101,15 @@ class Workspace: _cubes: Dict[_CubeKey, Cube] - def __init__(self, - config: ConfigParser=None, - stores: str=None, - load_base_model: bool=True, - **_options: Any) -> None: - """Creates a workspace. `config` should be a `ConfigParser` or a - path to a config file. `stores` should be a dictionary of store + def __init__( + self, + config: ConfigParser = None, + stores: str = None, + load_base_model: bool = True, + **_options: Any, + ) -> None: + """Creates a workspace. `config` should be a `ConfigParser` or a path + to a config file. `stores` should be a dictionary of store configurations, a `ConfigParser` or a path to a ``stores.ini`` file. Properties: @@ -147,9 +146,9 @@ def __init__(self, # ======= # Log to file or console if "workspace" in config: - workspace_config = distill_settings(config["workspace"], - WORKSPACE_SETTINGS, - owner="workspace") + workspace_config = distill_settings( + config["workspace"], WORKSPACE_SETTINGS, owner="workspace" + ) else: workspace_config = {} @@ -215,10 +214,12 @@ def __init__(self, info = dict(config["info"]) if "visualizer" in info: - info["visualizers"] = [{ - "label": info.get("label", info.get("name", "Default")), - "url": info["visualizer"] - }] + info["visualizers"] = [ + { + "label": info.get("label", info.get("name", "Default")), + "url": info["visualizer"], + } + ] for key in SLICER_INFO_KEYS: self.info[key] = info.get(key) @@ -237,20 +238,21 @@ def __init__(self, try: store_config.read(stores) except Exception as e: - raise ConfigurationError(f"Unable to read stores from {stores}." - " Reason: {e}") + raise ConfigurationError( + f"Unable to read stores from {stores}. Reason: {e}" + ) for store in store_config.sections(): - self._register_store_dict(store, - dict(store_config.items(store))) + self._register_store_dict(store, dict(store_config.items(store))) elif isinstance(stores, dict): for name, store in stores.items(): self._register_store_dict(name, store) elif stores is not None: - raise ConfigurationError("Unknown stores description object: %s" % - (type(stores))) + raise ConfigurationError( + "Unknown stores description object: %s" % (type(stores)) + ) # Calendar # ======== @@ -258,11 +260,12 @@ def __init__(self, timezone = workspace_config.get("timezone") first_weekday = workspace_config.get("first_weekday", 0) - self.logger.debug(f"Workspace calendar timezone: {timezone} " - "first week day: {first_weekday}") + self.logger.debug( + f"Workspace calendar timezone: {timezone} " + "first week day: {first_weekday}" + ) - self.calendar = Calendar(timezone=timezone, - first_weekday=first_weekday) + self.calendar = Calendar(timezone=timezone, first_weekday=first_weekday) # Register Naming # @@ -345,7 +348,7 @@ def __init__(self, # root/model.json # root/main.cubesmodel # models/*.cubesmodel - models: List[Tuple[str,str]] + models: List[Tuple[str, str]] models = [] # Undepreciated if "model" in config: @@ -374,12 +377,14 @@ def _get_namespace(self, ref: str) -> Namespace: return self.namespace return self.namespace.namespace(ref)[0] - def add_translation(self, - locale: str, - trans: JSONType, - ns: str="default") -> None: - """Add translation `trans` for `locale`. `ns` is a namespace. If no - namespace is specified, then default (global) is used.""" + def add_translation( + self, locale: str, trans: JSONType, ns: str = "default" + ) -> None: + """Add translation `trans` for `locale`. + + `ns` is a namespace. If no namespace is specified, then default + (global) is used. + """ namespace = self._get_namespace(ns) namespace.add_translation(locale, trans) @@ -395,23 +400,25 @@ def _register_store_dict(self, name: str, info: JSONType) -> None: except KeyError: raise ConfigurationError("Store '%s' has no type specified" % name) else: - self.logger.warn("'backend' is depreciated, use 'type' for " - "store (in %s)." % str(name)) + self.logger.warn( + "'backend' is depreciated, use 'type' for " + "store (in %s)." % str(name) + ) self.register_store(name, type_, **info) # TODO: Make `config` use Options def register_default_store(self, type_: str, **config: Any) -> None: - """Convenience function for registering the default store. For more - information see `register_store()`""" + """Convenience function for registering the default store. + + For more information see `register_store()` + """ self.register_store("default", type_, **config) # TODO: Make `config` use Options - def register_store(self, - name: str, - type_: str, - include_model: bool=True, - **_config: Any) -> None: + def register_store( + self, name: str, type_: str, include_model: bool = True, **_config: Any + ) -> None: """Adds a store configuration.""" config = dict(_config) @@ -443,18 +450,16 @@ def register_store(self, nsname = config.pop("namespace", None) if model: - self.import_model(model, store=name, namespace=nsname, - provider=provider) + self.import_model(model, store=name, namespace=nsname, provider=provider) elif provider: # Import empty model and register the provider - self.import_model({}, store=name, namespace=nsname, - provider=provider) + self.import_model({}, store=name, namespace=nsname, provider=provider) self.logger.debug("Registered store '%s'" % name) # TODO: Rename to _model_store_name def _store_for_model(self, metadata: JSONType) -> str: - """Returns a store for model specified in `metadata`. """ + """Returns a store for model specified in `metadata`.""" store_name = metadata.get("store") if not store_name and "info" in metadata: store_name = metadata["info"].get("store") @@ -465,15 +470,16 @@ def _store_for_model(self, metadata: JSONType) -> str: # TODO: this is very confusing process, needs simplification # TODO: change this to: add_model_provider(provider, info, store, languages, ns) - def import_model(self, - model: Union[JSONType, str]=None, - provider: Union[str, ModelProvider] = None, - store: str=None, - translations: JSONType=None, - namespace: str=None) -> None: - """Registers the `model` in the workspace. `model` can be a - metadata dictionary, filename, path to a model bundle directory or a - URL. + def import_model( + self, + model: Union[JSONType, str] = None, + provider: Union[str, ModelProvider] = None, + store: str = None, + translations: JSONType = None, + namespace: str = None, + ) -> None: + """Registers the `model` in the workspace. `model` can be a metadata + dictionary, filename, path to a model bundle directory or a URL. If `namespace` is specified, then the model's objects are stored in the namespace of that name. @@ -493,7 +499,7 @@ def import_model(self, # 1. Metadata # ----------- # Make sure that the metadata is a dictionary - # + # # TODO: Use "InlineModelProvider" and "FileBasedModelProvider" # 1. Model Metadata @@ -503,23 +509,28 @@ def import_model(self, # TODO: Use "InlineModelProvider" and "FileBasedModelProvider" if isinstance(model, str): - self.logger.debug(f"Importing model from {model}. " - f"Provider: {provider} Store: {store} " - f"NS: {namespace}") + self.logger.debug( + f"Importing model from {model}. " + f"Provider: {provider} Store: {store} " + f"NS: {namespace}" + ) path = model if self.models_dir and not os.path.isabs(path): path = os.path.join(self.models_dir, path) model = read_model_metadata(path) elif isinstance(model, dict): - self.logger.debug(f"Importing model from dictionary. " - f"Provider: {provider} Store: {store} " - f"NS: {namespace}") + self.logger.debug( + f"Importing model from dictionary. " + f"Provider: {provider} Store: {store} " + f"NS: {namespace}" + ) elif model is None: model = {} else: - raise ConfigurationError(f"Unknown model '{model}' " - f"(should be a filename or a dictionary)") + raise ConfigurationError( + f"Unknown model '{model}' " f"(should be a filename or a dictionary)" + ) # 2. Model provider # ----------------- @@ -544,8 +555,9 @@ def import_model(self, # Link the model with store store = store or model.get("store") - if store or (hasattr(provider_obj, "requires_store") \ - and provider_obj.requires_store()): + if store or ( + hasattr(provider_obj, "requires_store") and provider_obj.requires_store() + ): provider_obj.bind(self.get_store(store)) # 4. Namespace @@ -572,13 +584,13 @@ def add_slicer(self, name: str, url: str, **options: Any) -> None: self.register_store(name, "slicer", url=url, **options) self.import_model({}, provider="slicer", store=name) - def cube_names(self, identity: Any=None) -> List[str]: + def cube_names(self, identity: Any = None) -> List[str]: """Return names all available cubes.""" return [cube["name"] for cube in self.list_cubes()] # TODO: this is not loclized!!! # TODO: Convert this to CubeDescriptions - def list_cubes(self, identity: Any=None) -> List[Dict[str,str]]: + def list_cubes(self, identity: Any = None) -> List[Dict[str, str]]: """Get a list of metadata for cubes in the workspace. Result is a list of dictionaries with keys: `name`, `label`, `category`, `info`. @@ -592,7 +604,7 @@ def list_cubes(self, identity: Any=None) -> List[Dict[str,str]]: all_cubes = self.namespace.list_cubes(recursive=True) if self.authorizer: - by_name = dict((cube["name"], cube) for cube in all_cubes) + by_name = {cube["name"]: cube for cube in all_cubes} names = [cube["name"] for cube in all_cubes] authorized = self.authorizer.authorize(identity, names) @@ -600,7 +612,7 @@ def list_cubes(self, identity: Any=None) -> List[Dict[str,str]]: return all_cubes - def cube(self, ref: str, identity: Any=None, locale: str=None) -> Cube: + def cube(self, ref: str, identity: Any = None, locale: str = None) -> Cube: """Returns a cube with full cube namespace reference `ref` for user `identity` and translated to `locale`.""" @@ -643,15 +655,13 @@ def cube(self, ref: str, identity: Any=None, locale: str=None) -> Cube: return cube - def dimension(self, - name: str, - locale: str=None, - namespace: str=None, - provider: str=None) -> Dimension: + def dimension( + self, name: str, locale: str = None, namespace: str = None, provider: str = None + ) -> Dimension: """Returns a dimension with `name`. Raises `NoSuchDimensionError` when no model published the dimension. Raises `RequiresTemplate` error when - model provider requires a template to be able to provide the - dimension, but such template is not a public dimension. + model provider requires a template to be able to provide the dimension, + but such template is not a public dimension. The standard lookup when linking a cube is: @@ -660,14 +670,14 @@ def dimension(self, 3. look in the default (global) namespace """ - return find_dimension(name, locale, - namespace or self.namespace, - provider) + return find_dimension(name, locale, namespace or self.namespace, provider) def _browser_options(self, cube: Cube) -> JSONType: - """Returns browser configuration options for `cube`. The options are - taken from the configuration file and then overriden by cube's - `browser_options` attribute.""" + """Returns browser configuration options for `cube`. + + The options are taken from the configuration file and then + overriden by cube's `browser_options` attribute. + """ options = dict(self.browser_options) if cube.browser_options: @@ -675,10 +685,9 @@ def _browser_options(self, cube: Cube) -> JSONType: return options - def browser(self, - cube: Cube, - locale: str=None, - identity: Any=None) -> AggregationBrowser: + def browser( + self, cube: Cube, locale: str = None, identity: Any = None + ) -> AggregationBrowser: """Returns a browser for `cube`.""" naming: NamingDict @@ -691,8 +700,9 @@ def browser(self, # We don't allow cube store to be an actual store. Cube is a logical # object. - assert isinstance(cube.store, str) or cube.store is None, \ - f"Store of a cube ({cube}) must be a string or None" + assert ( + isinstance(cube.store, str) or cube.store is None + ), f"Store of a cube ({cube}) must be a string or None" locale = locale or cube.locale @@ -703,8 +713,7 @@ def browser(self, # TODO: Review necessity of this store_type = store.extension_name - assert store_type is not None, \ - f"Store type should not be None ({store})" + assert store_type is not None, f"Store type should not be None ({store})" cube_options = self._browser_options(cube) @@ -738,24 +747,32 @@ def browser(self, settings = cls.distill_settings(options) # FIXME: [typing] Not correct type-wise - browser = cls(cube=cube, store=store, locale=locale, - calendar=self.calendar, naming=naming, **settings) + browser = cls( + cube=cube, + store=store, + locale=locale, + calendar=self.calendar, + naming=naming, + **settings, + ) # TODO: remove this once calendar is used in all backends browser.calendar = self.calendar return browser - def cube_features(self, cube: Cube, identity: Any=None) -> BrowserFeatures: + def cube_features(self, cube: Cube, identity: Any = None) -> BrowserFeatures: """Returns browser features for `cube`""" # TODO: this might be expensive, make it a bit cheaper # recycle the feature-providing browser or something. Maybe use class # method for that return self.browser(cube, identity).features() - def get_store(self, name: str=None) -> Store: - """Opens a store `name`. If the store is already open, returns the - existing store.""" + def get_store(self, name: str = None) -> Store: + """Opens a store `name`. + + If the store is already open, returns the existing store. + """ name = name or "default" @@ -765,7 +782,7 @@ def get_store(self, name: str=None) -> Store: try: type_, options = self.store_infos[name] except KeyError: - raise ConfigurationError("Unknown store '{}'".format(name)) + raise ConfigurationError(f"Unknown store '{name}'") # TODO: temporary hack to pass store name and store type ext: Store diff --git a/doc/conf.py b/doc/conf.py index f971448e..44e60855 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -43,8 +43,8 @@ master_doc = 'index' # General information about the project. -project = u'Cubes' -copyright = u'2010-2015, Stefan Urbanek' +project = 'Cubes' +copyright = '2010-2015, Stefan Urbanek' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -184,8 +184,8 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'Cubes.tex', u'Cubes Documentation', - u'Stefan Urbanek', 'manual'), + ('index', 'Cubes.tex', 'Cubes Documentation', + 'Stefan Urbanek', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -217,6 +217,6 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'cubes', u'Cubes Documentation', - [u'Stefan Urbanek'], 1) + ('index', 'cubes', 'Cubes Documentation', + ['Stefan Urbanek'], 1) ] diff --git a/examples/dimension_browser/application.py b/examples/dimension_browser/application.py index 14706003..8d07a5b2 100644 --- a/examples/dimension_browser/application.py +++ b/examples/dimension_browser/application.py @@ -1,4 +1,4 @@ -"""Dimension Browser example +"""Dimension Browser example. A Flask application for browsing cube's dimensions. @@ -24,12 +24,14 @@ # Cube we are going to browse (only one for this example) # -CUBE_NAME="irbd_balance" +CUBE_NAME = "irbd_balance" + @app.route("/favicon.ico") def favicon(): return make_response("") + @app.route("/") @app.route("/") def report(dim_name=None): @@ -37,7 +39,7 @@ def report(dim_name=None): cube = browser.cube if not dim_name: - return render_template('report.html', dimensions=cube.dimensions) + return render_template("report.html", dimensions=cube.dimensions) # First we need to get the hierarchy to know the order of levels. Cubes # supports multiple hierarchies internally. @@ -45,8 +47,8 @@ def report(dim_name=None): dimension = cube.dimension(dim_name) hierarchy = dimension.hierarchy() - # Parse the`cut` request parameter and convert it to a list of - # actual cube cuts. Think of this as of multi-dimensional path, even that + # Parse the`cut` request parameter and convert it to a list of + # actual cube cuts. Think of this as of multi-dimensional path, even that # for this simple example, we are goint to use only one dimension for # browsing. @@ -88,15 +90,17 @@ def report(dim_name=None): is_last = hierarchy.is_last(next_level) # Finally, we render it - return render_template('report.html', - dimensions=cube.dimensions, - dimension=dimension, - levels=levels, - next_level=next_level, - result=result, - cell=cell, - is_last=is_last, - details=details) + return render_template( + "report.html", + dimensions=cube.dimensions, + dimension=dimension, + levels=levels, + next_level=next_level, + result=result, + cell=cell, + is_last=is_last, + details=details, + ) if __name__ == "__main__": @@ -104,5 +108,3 @@ def report(dim_name=None): # Create a Slicer and register it at http://localhost:5000/slicer app.register_blueprint(slicer, url_prefix="/slicer", config="slicer.ini") app.run(debug=True) - - diff --git a/examples/formatters/table.py b/examples/formatters/table.py index 7de58523..a3ab1363 100644 --- a/examples/formatters/table.py +++ b/examples/formatters/table.py @@ -52,9 +52,9 @@ # result = browser.aggregate(drilldown=["item", "year"]) with open("cross_table.html", "w") as f: - data = html_cross_formatter(result, - onrows=["year"], - oncolumns=["item.category_label"]) + data = html_cross_formatter( + result, onrows=["year"], oncolumns=["item.category_label"] + ) f.write(data) print("Check also table.html and cross_table.html files") diff --git a/examples/hello_world/aggregate.py b/examples/hello_world/aggregate.py index a39b0fb3..7226fb23 100644 --- a/examples/hello_world/aggregate.py +++ b/examples/hello_world/aggregate.py @@ -12,8 +12,7 @@ # 3. Play with aggregates result = browser.aggregate() -print("Total\n" - "----------------------") +print("Total\n" "----------------------") print("Record count : %8d" % result.summary["record_count"]) print("Total amount : %8d" % result.summary["amount_sum"]) @@ -23,35 +22,47 @@ # 4. Drill-down through a dimension # -print("\n" - "Drill Down by Category (top-level Item hierarchy)\n" - "==================================================") +print( + "\n" + "Drill Down by Category (top-level Item hierarchy)\n" + "==================================================" +) # result = browser.aggregate(drilldown=["item"]) # -print(("%-20s%10s%10s%10s\n"+"-"*50) % ("Category", "Count", "Total", "Double")) +print(("%-20s%10s%10s%10s\n" + "-" * 50) % ("Category", "Count", "Total", "Double")) # for row in result.table_rows("item"): - print("%-20s%10d%10d%10d" % ( row.label, - row.record["record_count"], - row.record["amount_sum"], - row.record["double_amount_sum"]) - ) + print( + "%-20s%10d%10d%10d" + % ( + row.label, + row.record["record_count"], + row.record["amount_sum"], + row.record["double_amount_sum"], + ) + ) -print("\n" - "Slice where Category = Equity\n" - "==================================================") +print( + "\n" + "Slice where Category = Equity\n" + "==================================================" +) cut = PointCut("item", ["e"]) -cell = Cell(cuts = [cut]) +cell = Cell(cuts=[cut]) result = browser.aggregate(cell, drilldown=["item:subcategory"]) -print(("%-20s%10s%10s%10s\n"+"-"*50) % ("Sub-category", "Count", "Total", "Double")) +print(("%-20s%10s%10s%10s\n" + "-" * 50) % ("Sub-category", "Count", "Total", "Double")) for row in result.table_rows("item"): - print("%-20s%10d%10d%10d" % ( row.label, - row.record["record_count"], - row.record["amount_sum"], - row.record["double_amount_sum"], - )) + print( + "%-20s%10d%10d%10d" + % ( + row.label, + row.record["record_count"], + row.record["amount_sum"], + row.record["double_amount_sum"], + ) + ) diff --git a/examples/hello_world/prepare_data.py b/examples/hello_world/prepare_data.py index 4737bd3f..604dc46f 100644 --- a/examples/hello_world/prepare_data.py +++ b/examples/hello_world/prepare_data.py @@ -11,20 +11,22 @@ print("preparing data...") -engine = create_engine('sqlite:///data.sqlite') +engine = create_engine("sqlite:///data.sqlite") -create_table_from_csv(engine, - "data.csv", - table_name=FACT_TABLE, - fields=[ - ("category", "string"), - ("category_label", "string"), - ("subcategory", "string"), - ("subcategory_label", "string"), - ("line_item", "string"), - ("year", "integer"), - ("amount", "integer")], - create_id=True - ) +create_table_from_csv( + engine, + "data.csv", + table_name=FACT_TABLE, + fields=[ + ("category", "string"), + ("category_label", "string"), + ("subcategory", "string"), + ("subcategory_label", "string"), + ("line_item", "string"), + ("year", "integer"), + ("amount", "integer"), + ], + create_id=True, +) print("done. file data.sqlite created") diff --git a/examples/model_browser/application.py b/examples/model_browser/application.py index 1355255b..53cac771 100644 --- a/examples/model_browser/application.py +++ b/examples/model_browser/application.py @@ -3,7 +3,6 @@ Use: python application.py [slicer.ini] - """ import argparse @@ -42,10 +41,9 @@ def report(dim_name=None): dimension = None physical = None - return render_template('index.html', - dimensions=cube.dimensions, - dimension=dimension, - mapping=physical) + return render_template( + "index.html", dimensions=cube.dimensions, dimension=dimension, mapping=physical + ) def get_browser(): @@ -57,11 +55,12 @@ def get_browser(): return workspace.browser(cube_name) + if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Cubes model browser.') - parser.add_argument('config', help='server configuration .ini file') - parser.add_argument('cube', nargs='?', default=None, help='cube name') + parser = argparse.ArgumentParser(description="Cubes model browser.") + parser.add_argument("config", help="server configuration .ini file") + parser.add_argument("cube", nargs="?", default=None, help="cube name") args = parser.parse_args() config = ConfigParser.SafeConfigParser() diff --git a/mypy.ini b/mypy.ini index cb4d59bf..24a82678 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,10 +1,17 @@ [mypy] -fast_parser = True python_version = 3.6 -disallow_untyped_defs = True warn_no_return = True strict_optional = True incremental = True warn_redundant_casts = True warn_unused_ignores = True -custom_typeshed_dir = ./typeshed + +# TODO: uncomment when ready +# disallow_untyped_defs = True + +# TODO: remove when ready +ignore_missing_imports = True + +# Obsolete: +# fast_parser = True +# custom_typeshed_dir = ./typeshed diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..e03adec9 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,10 @@ +[flake8] +# F401: unused import (actually used by mypy) +# E711,E712: things that are actually correct for SQLAlchemy +# E203,W503: dubious pep8 warnings (blacks doesn't "fix" those) +# F405,F403: star imports (fix this!) +# E713 test for membership should be 'not in' (easy to fix!) +ignore = E203,F401,E711,E712,W503,F405,F403,E713 +max-line-length = 90 +exclude=.tox,docs + diff --git a/setup.py b/setup.py index 6b3218eb..8493e495 100644 --- a/setup.py +++ b/setup.py @@ -7,6 +7,7 @@ # Always prefer setuptools over distutils from setuptools import setup, find_packages + # To use a consistent encoding from codecs import open from os import path @@ -14,7 +15,7 @@ here = path.abspath(path.dirname(__file__)) # Get the long description from the README file -with open(path.join(here, 'README.md'), encoding='utf-8') as f: +with open(path.join(here, "README.md"), encoding="utf-8") as f: long_description = f.read() @@ -27,64 +28,50 @@ ] extras = { - 'slicer': 'werkzeug', - 'html': 'jinja', - 'all': ['cubes[%s]' % extra for extra in ['slicer', 'html']], - 'dev': ['cubes[all]', 'sphinx'], + "slicer": "werkzeug", + "html": "jinja", + "all": ["cubes[%s]" % extra for extra in ["slicer", "html"]], + # TODO: add pytest-cov without breaking travis + "dev": ["cubes[all]", "sphinx", "pytest", "flake8", "mccabe"], } setup( - name = "cubes", - + name="cubes", # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html - version = '2.0', - - description = "Lightweight framework for Online Analytical Processing (OLAP) and multidimensional analysis", + version="2.0", + description="Lightweight framework for Online Analytical Processing (OLAP) and multidimensional analysis", long_description=long_description, - url = "http://cubes.databrewery.org", - + url="http://cubes.databrewery.org", # Author details - author = "Stefan Urbanek", - author_email = "stefan.urbanek@gmail.com", - license = "MIT", - - install_requires = requirements, - extras_require = extras, - + author="Stefan Urbanek", + author_email="stefan.urbanek@gmail.com", + license="MIT", + install_requires=requirements, + extras_require=extras, packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), - package_data={ # If any package contains *.txt or *.rst files, include them: - '': ['*.txt', '*.rst'], - 'cubes': ['templates/*.html', 'templates/*.js', 'schemas/*.json'], - 'cubes.server': ['templates/*.html'], + "": ["*.txt", "*.rst"], + "cubes": ["templates/*.html", "templates/*.js", "schemas/*.json"], + "cubes.server": ["templates/*.html"], }, - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: MIT License', - - 'Topic :: Database', - 'Topic :: Scientific/Engineering', - 'Topic :: Utilities' - + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Topic :: Database", + "Topic :: Scientific/Engineering", + "Topic :: Utilities" # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", ], - - entry_points={ - 'console_scripts': [ 'slicer = cubes.slicer.commands:main' ], - }, - + entry_points={"console_scripts": ["slicer = cubes.slicer.commands:main"]}, test_suite="tests", - - keywords = "olap multidimensional data analysis", - + keywords="olap multidimensional data analysis", # could also include long_description, download_url, classifiers, etc. ) diff --git a/tests/common.py b/tests/common.py index 988aed35..39c3d407 100644 --- a/tests/common.py +++ b/tests/common.py @@ -7,22 +7,24 @@ from cubes.metadata import StaticModelProvider TESTS_PATH = os.path.dirname(os.path.abspath(__file__)) -RESOURCES_PATH = os.path.join(TESTS_PATH, 'resources') +RESOURCES_PATH = os.path.join(TESTS_PATH, "resources") + def resource_path(resource: str) -> str: """Return full path to `resource`""" return os.path.join(RESOURCES_PATH, resource) + def model_path(model: str) -> str: """Return full path to `resource`""" return os.path.join(RESOURCES_PATH, "models", model) - # FIXME: Legacy code below this line. Remove # ==================================================================== -DATA_PATH = os.path.join(TESTS_PATH, 'data') +DATA_PATH = os.path.join(TESTS_PATH, "data") + def create_provider(name): # TODO: this should be rather: @@ -30,12 +32,13 @@ def create_provider(name): metadata = read_model_metadata(model_path(name)) return StaticModelProvider(metadata) + class CubesTestCaseBase(unittest.TestCase): sql_engine = None def setUp(self): - self._models_path = os.path.join(RESOURCES_PATH, 'models') - self._data_path = os.path.join(RESOURCES_PATH, 'data') + self._models_path = os.path.join(RESOURCES_PATH, "models") + self._data_path = os.path.join(RESOURCES_PATH, "data") if self.sql_engine: self.engine = create_engine(self.sql_engine) @@ -44,7 +47,6 @@ def setUp(self): self.engine = None self.metadata = None - def model_path(self, model): return os.path.join(self._models_path, model) @@ -58,11 +60,14 @@ def data_path(self, file): return os.path.join(self._data_path, file) def create_workspace(self, store=None, model=None): - """Create shared workspace. Add default store specified in `store` as - a dictionary and `model` which can be a filename relative to - ``tests/models`` or a moel dictionary. If no store is provided but - class has an engine or `sql_engine` set, then the existing engine will - be used as the default SQL store.""" + """Create shared workspace. + + Add default store specified in `store` as a dictionary and + `model` which can be a filename relative to ``tests/models`` or + a moel dictionary. If no store is provided but class has an + engine or `sql_engine` set, then the existing engine will be + used as the default SQL store. + """ raise NotImplementedError("Depreciated in this context") workspace = Workspace() @@ -86,4 +91,3 @@ def load_data(self, table, data): for row in data: insert = table.insert().values(row) self.engine.execute(insert) - diff --git a/tests/metadata/test_localization.py b/tests/metadata/test_localization.py index 7e302b22..44e42243 100644 --- a/tests/metadata/test_localization.py +++ b/tests/metadata/test_localization.py @@ -2,16 +2,21 @@ from cubes import Namespace from cubes import StaticModelProvider from cubes import read_json_file -from cubes.metadata.localization import LocalizationContext, ModelObjectLocalizationContext +from cubes.metadata.localization import ( + LocalizationContext, + ModelObjectLocalizationContext, +) from ..common import CubesTestCaseBase + class LocalizationTestCase(CubesTestCaseBase): def setUp(self): - super(LocalizationTestCase, self).setUp() + super().setUp() self.translation = read_json_file(self.model_path("translation.json")) self.model = read_json_file(self.model_path("localizable.json")) self.provider = StaticModelProvider(self.model) self.context = LocalizationContext(self.translation) + def test_basic(self): trans = self.context.object_localization("cubes", "inner") self.assertEqual(trans.get("label"), "inner_LAB") @@ -57,4 +62,3 @@ def test_translate_cube(self): # TODO: test non existent top object # TODO: test non existend child object # TODO: test plain label - diff --git a/tests/metadata/test_model.py b/tests/metadata/test_model.py index c73ee306..77d9ad2c 100644 --- a/tests/metadata/test_model.py +++ b/tests/metadata/test_model.py @@ -18,12 +18,12 @@ "levels": [ {"name": "year"}, {"name": "month", "attributes": ["month", "month_name"]}, - {"name": "day"} + {"name": "day"}, ], "hierarchies": [ {"name": "ymd", "levels": ["year", "month", "day"]}, {"name": "ym", "levels": ["year", "month"]}, - ] + ], } DIM_FLAG_DESC = {"name": "flag"} @@ -33,30 +33,31 @@ "levels": [ {"name": "category", "attributes": ["key", "name"]}, {"name": "subcategory", "attributes": ["key", "name"]}, - {"name": "product", "attributes": ["key", "name", "description"]} - ] + {"name": "product", "attributes": ["key", "name", "description"]}, + ], } class ModelTestCaseBase(unittest.TestCase): def setUp(self): - self.models_path = os.path.join(RESOURCES_PATH, 'models') + self.models_path = os.path.join(RESOURCES_PATH, "models") def model_path(self, model): return os.path.join(self.models_path, model) class AttributeTestCase(unittest.TestCase): - """docstring for AttributeTestCase""" + """docstring for AttributeTestCase.""" + def test_basics(self): - """Attribute creation and attribute references""" + """Attribute creation and attribute references.""" attr = Attribute("foo") self.assertEqual("foo", attr.name) self.assertEqual("foo", str(attr)) self.assertEqual("foo", attr.ref) def test_locale(self): - """References to localizable attributes""" + """References to localizable attributes.""" attr = Attribute("foo") self.assertRaises(ArgumentError, attr.localized_ref, locale="xx") @@ -113,7 +114,7 @@ def test_create_attribute(self): class MeasuresTestsCase(CubesTestCaseBase): def setUp(self): - super(MeasuresTestsCase, self).setUp() + super().setUp() self.metadata = self.model_metadata("measures.json") self.cubes_md = {} @@ -188,7 +189,7 @@ def test_fact_count(self): def test_empty2(self): """No measures in metadata should yield count measure with record - count""" + count.""" cube = self.cube("empty") self.assertIsInstance(cube, Cube) self.assertEqual(0, len(cube.measures)) @@ -200,7 +201,7 @@ def test_empty2(self): self.assertIsNone(aggregate.measure) def test_amount_default(self): - """Plain measure definition should yield measure_sum aggregate""" + """Plain measure definition should yield measure_sum aggregate.""" cube = self.cube("amount_default") measures = cube.measures self.assertEqual(1, len(measures)) @@ -262,9 +263,7 @@ def test_explicit_implicit_combined(self): aggregates = cube.aggregates self.assertEqual(3, len(aggregates)) names = [a.name for a in aggregates] - self.assertSequenceEqual(["total", - "amount_min", - "amount_max"], names) + self.assertSequenceEqual(["total", "amount_min", "amount_max"], names) def test_backend_provided(self): cube = self.cube("backend_provided_aggregate") @@ -306,20 +305,12 @@ def test_implicit(self): # TODO: this should be in model.py tests cube = self.cube("default_aggregates") aggregates = [a.name for a in cube.aggregates] - self.assertSequenceEqual(["amount_sum", - "amount_min", - "amount_max" - ], - aggregates) + self.assertSequenceEqual(["amount_sum", "amount_min", "amount_max"], aggregates) def test_explicit(self): cube = self.cube("explicit_aggregates") aggregates = [a.name for a in cube.aggregates] - self.assertSequenceEqual(["amount_sum", - "amount_wma", - "count", - ], - aggregates) + self.assertSequenceEqual(["amount_sum", "amount_wma", "count"], aggregates) def test_explicit_conflict(self): with self.assertRaisesRegex(ModelError, "function mismatch"): @@ -327,13 +318,14 @@ def test_explicit_conflict(self): class LevelTestCase(unittest.TestCase): - """docstring for LevelTestCase""" + """docstring for LevelTestCase.""" + def test_initialization(self): - """Empty attribute list for new level should raise an exception """ + """Empty attribute list for new level should raise an exception.""" self.assertRaises(ModelError, Level, "month", []) def test_has_details(self): - """Level "has_details" flag""" + """Level "has_details" flag.""" attrs = [Attribute("year")] level = Level("year", attrs) self.assertFalse(level.has_details) @@ -343,12 +335,12 @@ def test_has_details(self): self.assertTrue(level.has_details) def test_operators(self): - """Level to string conversion""" + """Level to string conversion.""" attrs = [Attribute("foo")] self.assertEqual("date", str(Level("date", attrs))) def test_create(self): - """Create level from a dictionary""" + """Create level from a dictionary.""" desc = "year" level = Level.from_metadata(desc) self.assertIsInstance(level, Level) @@ -381,8 +373,8 @@ def test_create(self): "attributes": [ {"name": "month"}, {"name": "month_name", "locales": ["en", "sk"]}, - {"name": "month_sname", "locales": ["en", "sk"]} - ] + {"name": "month_sname", "locales": ["en", "sk"]}, + ], } level = Level.from_metadata(desc) @@ -409,8 +401,7 @@ def test_key_label_attributes(self): self.assertEqual("name", str(level.label_attribute)) attrs = [Attribute("info"), Attribute("code"), Attribute("name")] - level = Level("product", attrs, key="code", - label_attribute="name") + level = Level("product", attrs, key="code", label_attribute="name") self.assertIsInstance(level.key, Attribute) self.assertEqual("code", str(level.key)) self.assertIsInstance(level.label_attribute, Attribute) @@ -421,7 +412,7 @@ def test_key_label_attributes(self): "name": "product", "attributes": ["info", "code", "name"], "label_attribute": "name", - "key": "code" + "key": "code", } level = Level.from_metadata(desc) @@ -431,10 +422,7 @@ def test_key_label_attributes(self): self.assertEqual("name", str(level.label_attribute)) def test_level_inherit(self): - desc = { - "name": "product_type", - "label": "Product Type" - } + desc = {"name": "product_type", "label": "Product Type"} level = Level.from_metadata(desc) self.assertEqual(1, len(level.attributes)) @@ -443,15 +431,12 @@ def test_level_inherit(self): self.assertEqual("product_type", attr.name) self.assertEqual("Product Type", attr.label) - def test_comparison(self): - """Comparison of level instances""" + """Comparison of level instances.""" attrs = [Attribute("info"), Attribute("code"), Attribute("name")] - level1 = Level("product", attrs, key="code", - label_attribute="name") - level2 = Level("product", attrs, key="code", - label_attribute="name") + level1 = Level("product", attrs, key="code", label_attribute="name") + level2 = Level("product", attrs, key="code", label_attribute="name") level3 = Level("product", attrs) attrs = [Attribute("month"), Attribute("month_name")] level4 = Level("product", attrs) @@ -465,20 +450,21 @@ class HierarchyTestCase(unittest.TestCase): def setUp(self): self.levels = [ Level("year", attributes=[Attribute("year")]), - Level("month", - attributes=[ + Level( + "month", + attributes=[ Attribute("month"), Attribute("month_name"), - Attribute("month_sname") - ]), + Attribute("month_sname"), + ], + ), Level("day", attributes=[Attribute("day")]), - Level("week", attributes=[Attribute("week")]) + Level("week", attributes=[Attribute("week")]), ] self.level_names = [level.name for level in self.levels] self.dimension = Dimension("date", levels=self.levels) levels = [self.levels[0], self.levels[1], self.levels[2]] - self.hierarchy = Hierarchy("default", - levels) + self.hierarchy = Hierarchy("default", levels) def test_initialization(self): """No dimension on initialization should raise an exception.""" @@ -490,7 +476,7 @@ def test_initialization(self): @unittest.skip("fix this") def test_operators(self): - """Hierarchy operators len(), hier[] and level in hier""" + """Hierarchy operators len(), hier[] and level in hier.""" # __len__ self.assertEqual(3, len(self.hierarchy)) @@ -503,7 +489,7 @@ def test_operators(self): self.assertFalse("flower" in self.hierarchy) def test_levels_for(self): - """Levels for depth""" + """Levels for depth.""" levels = self.hierarchy.levels_for_depth(0) self.assertEqual([], levels) @@ -516,44 +502,38 @@ def test_levels_for(self): def test_level_ordering(self): """Ordering of levels (next, previous)""" self.assertEqual(self.levels[0], self.hierarchy.next_level(None)) - self.assertEqual(self.levels[1], - self.hierarchy.next_level(self.levels[0])) - self.assertEqual(self.levels[2], - self.hierarchy.next_level(self.levels[1])) + self.assertEqual(self.levels[1], self.hierarchy.next_level(self.levels[0])) + self.assertEqual(self.levels[2], self.hierarchy.next_level(self.levels[1])) self.assertEqual(None, self.hierarchy.next_level(self.levels[2])) self.assertEqual(None, self.hierarchy.previous_level(None)) self.assertEqual(None, self.hierarchy.previous_level(self.levels[0])) - self.assertEqual(self.levels[0], - self.hierarchy.previous_level(self.levels[1])) - self.assertEqual(self.levels[1], - self.hierarchy.previous_level(self.levels[2])) + self.assertEqual(self.levels[0], self.hierarchy.previous_level(self.levels[1])) + self.assertEqual(self.levels[1], self.hierarchy.previous_level(self.levels[2])) self.assertEqual(0, self.hierarchy.level_index(self.levels[0])) self.assertEqual(1, self.hierarchy.level_index(self.levels[1])) self.assertEqual(2, self.hierarchy.level_index(self.levels[2])) - self.assertRaises(HierarchyError, self.hierarchy.level_index, - self.levels[3]) + self.assertRaises(HierarchyError, self.hierarchy.level_index, self.levels[3]) def test_base_path(self): - """Test base paths""" + """Test base paths.""" self.assertTrue(self.hierarchy.path_is_base([2012, 1, 5])) self.assertFalse(self.hierarchy.path_is_base([2012, 1])) self.assertFalse(self.hierarchy.path_is_base([2012])) self.assertFalse(self.hierarchy.path_is_base([])) def test_attributes(self): - """Collecting attributes and keys""" + """Collecting attributes and keys.""" keys = [a.name for a in self.hierarchy.key_attributes()] self.assertEqual(["year", "month", "day"], keys) attrs = [a.name for a in self.hierarchy.all_attributes] - self.assertEqual(["year", "month", "month_name", "month_sname", "day"], - attrs) + self.assertEqual(["year", "month", "month_name", "month_sname", "day"], attrs) def test_copy(self): - class DummyDimension(object): + class DummyDimension: def __init__(self): self.name = "dummy" self.is_flat = False @@ -576,10 +556,14 @@ class DimensionTestCase(unittest.TestCase): def setUp(self): self.levels = [ Level("year", attributes=create_list_of(Attribute, ["year"])), - Level("month", attributes=create_list_of(Attribute, ["month", "month_name", - "month_sname"])), + Level( + "month", + attributes=create_list_of( + Attribute, ["month", "month_name", "month_sname"] + ), + ), Level("day", attributes=create_list_of(Attribute, ["day"])), - Level("week", attributes=create_list_of(Attribute, ["week"])) + Level("week", attributes=create_list_of(Attribute, ["week"])), ] self.level_names = [level.name for level in self.levels] self.dimension = Dimension("date", levels=self.levels) @@ -588,7 +572,7 @@ def setUp(self): self.hierarchy = Hierarchy("default", levels) def test_create(self): - """Dimension from a dictionary""" + """Dimension from a dictionary.""" dim = Dimension.from_metadata("year") self.assertIsInstance(dim, Dimension) self.assertEqual("year", dim.name) @@ -629,7 +613,7 @@ def test_create(self): self.assertEqual(1, len(dim.hierarchies)) def test_flat_dimension(self): - """Flat dimension and 'has details' flags""" + """Flat dimension and 'has details' flags.""" dim = Dimension.from_metadata("foo") self.assertTrue(dim.is_flat) self.assertFalse(dim.has_details) @@ -646,7 +630,7 @@ def test_flat_dimension(self): self.assertEqual("foo", attr.name) def test_comparisons(self): - """Comparison of dimension instances""" + """Comparison of dimension instances.""" dim1 = Dimension.from_metadata(DIM_DATE_DESC) dim2 = Dimension.from_metadata(DIM_DATE_DESC) @@ -682,17 +666,16 @@ def test_template(self): template = self.dimension.to_dict() template["hierarchies"] = [ {"name": "ym", "levels": ["year", "month"]}, - {"name": "ymd", "levels": ["year", "month", "day"]} + {"name": "ymd", "levels": ["year", "month", "day"]}, ] template["default_hierarchy_name"] = "ym" template = Dimension.from_metadata(template) dims = {"date": template} - desc = {"template": "date", "name":"another_date"} + desc = {"template": "date", "name": "another_date"} dim = Dimension.from_metadata(desc, dims) self.assertEqual(2, len(dim.hierarchies)) - self.assertEqual(["ym", "ymd"], - [hier.name for hier in dim.hierarchies]) + self.assertEqual(["ym", "ymd"], [hier.name for hier in dim.hierarchies]) def test_template_hierarchies(self): md = { @@ -703,17 +686,11 @@ def test_template_hierarchies(self): {"name": "ymd", "levels": ["year", "month", "day"]}, {"name": "ym", "levels": ["year", "month"]}, {"name": "y", "levels": ["year"]}, - ] + ], } dim_time = Dimension.from_metadata(md) templates = {"time": dim_time} - md = { - "name": "date", - "template": "time", - "hierarchies": [ - "ymd", "ym", "y" - ] - } + md = {"name": "date", "template": "time", "hierarchies": ["ymd", "ym", "y"]} dim_date = Dimension.from_metadata(md, templates) @@ -725,18 +702,14 @@ def test_template_hierarchies(self): def test_template_info(self): md = { "name": "template", - "levels": [ - { "name": "one", "info": {"units":"$", "format": "foo"}} - ] + "levels": [{"name": "one", "info": {"units": "$", "format": "foo"}}], } tempdim = Dimension.from_metadata(md) md = { "name": "dim", - "levels": [ - { "name": "one", "info": {"units":"USD"}} - ], - "template": "template" + "levels": [{"name": "one", "info": {"units": "USD"}}], + "template": "template", } templates = {"template": tempdim} @@ -748,23 +721,26 @@ def test_template_info(self): self.assertEqual(level.info["units"], "USD") self.assertEqual(level.info["format"], "foo") + class CubeTestCase(unittest.TestCase): def setUp(self): a = [DIM_DATE_DESC, DIM_PRODUCT_DESC, DIM_FLAG_DESC] self.measures = create_list_of(Measure, ["amount", "discount"]) self.details = create_list_of(Attribute, ["detail"]) self.dimensions = [Dimension.from_metadata(desc) for desc in a] - self.cube = Cube("contracts", - dimensions=self.dimensions, - measures=self.measures, - details=self.details) + self.cube = Cube( + "contracts", + dimensions=self.dimensions, + measures=self.measures, + details=self.details, + ) def test_create_cube(self): cube = { - "name": "cube", - "dimensions": ["date"], - "aggregates": ["record_count"], - "details": ["some_detail", "another_detail"] + "name": "cube", + "dimensions": ["date"], + "aggregates": ["record_count"], + "details": ["some_detail", "another_detail"], } cube = Cube.from_metadata(cube) @@ -792,22 +768,23 @@ def test_attributes(self): refs = [a.ref for a in all_attributes] expected = [ - 'date.year', - 'date.month', - 'date.month_name', - 'date.day', - 'product.key', - 'product.name', - 'product.description', - 'flag', - 'detail', - 'amount', - 'discount'] + "date.year", + "date.month", + "date.month_name", + "date.day", + "product.key", + "product.name", + "product.description", + "flag", + "detail", + "amount", + "discount", + ] self.assertSequenceEqual(expected, refs) attributes = self.cube.get_attributes(["date.year", "product.name"]) refs = [a.ref for a in attributes] - expected = ['date.year', 'product.name'] + expected = ["date.year", "product.name"] self.assertSequenceEqual(expected, refs) attributes = self.cube.get_attributes(["amount"]) @@ -820,7 +797,7 @@ def test_attributes(self): @unittest.skip("deferred (needs workspace)") def test_to_dict(self): desc = self.cube.to_dict() - dims = dict((dim.name, dim) for dim in self.dimensions) + dims = {dim.name: dim for dim in self.dimensions} cube = Cube.from_metadata(desc, dims) self.assertEqual(self.cube.dimensions, cube.dimensions) self.assertEqual(self.cube.measures, cube.measures) @@ -829,51 +806,41 @@ def test_to_dict(self): @unittest.skip("requires revision") def test_links(self): # TODO: test link alias! - dims = dict((d.name, d) for d in self.dimensions) + dims = {d.name: d for d in self.dimensions} links = [{"name": "date"}] - cube = Cube("contracts", - dimension_links=links, - measures=self.measures) + cube = Cube("contracts", dimension_links=links, measures=self.measures) cube.link_dimensions(dims) self.assertEqual(len(cube.dimensions), 1) dim = cube.dimension("date") self.assertEqual(len(dim.hierarchies), 2) links = [{"name": "date"}, "product", "flag"] - cube = Cube("contracts", - dimension_links=links, - measures=self.measures) + cube = Cube("contracts", dimension_links=links, measures=self.measures) cube.link_dimensions(dims) self.assertEqual(len(cube.dimensions), 3) self.assertIsInstance(cube.dimension("flag"), Dimension) @unittest.skip("requires revision") def test_link_hierarchies(self): - dims = dict((d.name, d) for d in self.dimensions) + dims = {d.name: d for d in self.dimensions} links = [{"name": "date"}] - cube = Cube("contracts", - dimension_links=links, - measures=self.measures) + cube = Cube("contracts", dimension_links=links, measures=self.measures) cube.link_dimensions(dims) dim = cube.dimension("date") self.assertEqual(len(dim.hierarchies), 2) self.assertEqual(dim.hierarchy().name, "ymd") - links = [{"name": "date", "nonadditive":None}] - cube = Cube("contracts", - dimension_links=links, - measures=self.measures) + links = [{"name": "date", "nonadditive": None}] + cube = Cube("contracts", dimension_links=links, measures=self.measures) cube.link_dimensions(dims) dim = cube.dimension("date") self.assertEqual(len(dim.hierarchies), 2) self.assertEqual(dim.hierarchy().name, "ymd") links = [{"name": "date", "hierarchies": ["ym"]}] - cube = Cube("contracts", - dimension_links=links, - measures=self.measures) + cube = Cube("contracts", dimension_links=links, measures=self.measures) cube.link_dimensions(dims) dim = cube.dimension("date") self.assertEqual(len(dim.hierarchies), 1) @@ -886,11 +853,11 @@ def test_inherit_nonadditive(self): "name": "contracts", "dimensions": ["date", "product"], "nonadditive": "time", - "measures": ["amount", "discount"] + "measures": ["amount", "discount"], } dims = [Dimension.from_metadata(md) for md in dims] - dims = dict((dim.name, dim) for dim in dims) + dims = {dim.name: dim for dim in dims} cube = Cube.from_metadata(cube) @@ -900,7 +867,7 @@ def test_inherit_nonadditive(self): class ReadModelDescriptionTestCase(ModelTestCaseBase): def setUp(self): - super(ReadModelDescriptionTestCase, self).setUp() + super().setUp() def test_from_file(self): path = self.model_path("model.json") @@ -926,16 +893,17 @@ def test_from_bundle(self): path = self.model_path("model.json") desc = read_model_metadata_bundle(path) -def test_suite(): - suite = unittest.TestSuite() - - suite.addTest(unittest.makeSuite(AttributeTestCase)) - suite.addTest(unittest.makeSuite(LevelTestCase)) - suite.addTest(unittest.makeSuite(HierarchyTestCase)) - suite.addTest(unittest.makeSuite(DimensionTestCase)) - suite.addTest(unittest.makeSuite(CubeTestCase)) - suite.addTest(unittest.makeSuite(ModelTestCase)) - - suite.addTest(unittest.makeSuite(OldModelValidatorTestCase)) - return suite +# def test_suite(): +# suite = unittest.TestSuite() +# +# suite.addTest(unittest.makeSuite(AttributeTestCase)) +# suite.addTest(unittest.makeSuite(LevelTestCase)) +# suite.addTest(unittest.makeSuite(HierarchyTestCase)) +# suite.addTest(unittest.makeSuite(DimensionTestCase)) +# suite.addTest(unittest.makeSuite(CubeTestCase)) +# suite.addTest(unittest.makeSuite(ModelTestCase)) +# +# suite.addTest(unittest.makeSuite(OldModelValidatorTestCase)) +# +# return suite diff --git a/tests/metadata/test_physical.py b/tests/metadata/test_physical.py index 4d2e9b3e..d8ef32b4 100644 --- a/tests/metadata/test_physical.py +++ b/tests/metadata/test_physical.py @@ -3,6 +3,7 @@ from cubes.metadata.physical import Join, JoinKey from cubes.errors import ArgumentError + class SchemaUtilitiesTestCase(unittest.TestCase): """Test independent utility functions and structures.""" @@ -21,12 +22,10 @@ def test_to_join_key(self): key = JoinKey.from_dict({"column": "col"}) self.assertEqual(JoinKey(columns=["col"], table=None, schema=None), key) - key = JoinKey.from_dict({"table":"table", "column": "col"}) + key = JoinKey.from_dict({"table": "table", "column": "col"}) self.assertEqual(JoinKey(columns=["col"], table="table", schema=None), key) - key = JoinKey.from_dict({"schema":"schema", - "table":"table", - "column": "col"}) + key = JoinKey.from_dict({"schema": "schema", "table": "table", "column": "col"}) self.assertEqual(JoinKey(columns=["col"], table="table", schema="schema"), key) @@ -48,48 +47,50 @@ def test_to_join_key(self): @unittest.skip("Should be Join.from_dict()") def test_to_join(self): join = ("left", "right") - self.assertEqual(to_join(join), Join(to_join_key("left"), - to_join_key("right"), - None, - None)) + self.assertEqual( + to_join(join), Join(to_join_key("left"), to_join_key("right"), None, None) + ) join = ("left", "right", "alias") - self.assertEqual(to_join(join), Join(to_join_key("left"), - to_join_key("right"), - "alias", - None)) + self.assertEqual( + to_join(join), + Join(to_join_key("left"), to_join_key("right"), "alias", None), + ) join = ("left", "right", "alias", "match") - self.assertEqual(to_join(join), Join(to_join_key("left"), - to_join_key("right"), - "alias", - "match")) + self.assertEqual( + to_join(join), + Join(to_join_key("left"), to_join_key("right"), "alias", "match"), + ) # Dict join = {"master": "left", "detail": "right"} - self.assertEqual(to_join(join), Join(to_join_key("left"), - to_join_key("right"), - None, - None)) + self.assertEqual( + to_join(join), Join(to_join_key("left"), to_join_key("right"), None, None) + ) join = {"master": "left", "detail": "right", "alias": "alias"} - self.assertEqual(to_join(join), Join(to_join_key("left"), - to_join_key("right"), - "alias", - None)) + self.assertEqual( + to_join(join), + Join(to_join_key("left"), to_join_key("right"), "alias", None), + ) join = {"master": "left", "detail": "right", "method": "match"} - self.assertEqual(to_join(join), Join(to_join_key("left"), - to_join_key("right"), - None, - "match")) - - join = {"master": "left", "detail": "right", "alias": "alias", - "method": "match"} - self.assertEqual(to_join(join), Join(to_join_key("left"), - to_join_key("right"), - "alias", - "match")) + self.assertEqual( + to_join(join), + Join(to_join_key("left"), to_join_key("right"), None, "match"), + ) + + join = { + "master": "left", + "detail": "right", + "alias": "alias", + "method": "match", + } + self.assertEqual( + to_join(join), + Join(to_join_key("left"), to_join_key("right"), "alias", "match"), + ) # Error with self.assertRaises(ArgumentError): @@ -98,5 +99,3 @@ def test_to_join(self): # Error with self.assertRaises(ArgumentError): to_join(["onlyone"]) - - diff --git a/tests/metadata/test_providers.py b/tests/metadata/test_providers.py index 919da490..1de923a2 100644 --- a/tests/metadata/test_providers.py +++ b/tests/metadata/test_providers.py @@ -10,5 +10,3 @@ class MetadataProviderTestCase(unittest.TestCase): def test_nothing(self) -> None: pass - - diff --git a/tests/query/test_cells.py b/tests/query/test_cells.py index 59650285..5592f013 100644 --- a/tests/query/test_cells.py +++ b/tests/query/test_cells.py @@ -11,7 +11,7 @@ class CutsTestCase(CubesTestCaseBase): def setUp(self): - super(CutsTestCase, self).setUp() + super().setUp() self.provider = create_provider("browser_test.json") self.cube = self.provider.cube("transactions") @@ -30,9 +30,15 @@ def test_cut_from_dict(self): # d = {"type":"point", "path":[2010]} # self.assertRaises(Exception, cubes.cut_from_dict, d) - d = {"type": "point", "path": [2010], "dimension": "date", - "level_depth": 1, "hierarchy": None, "invert": False, - "hidden": False} + d = { + "type": "point", + "path": [2010], + "dimension": "date", + "level_depth": 1, + "hierarchy": None, + "invert": False, + "hidden": False, + } cut = cut_from_dict(d) tcut = PointCut("date", [2010]) @@ -40,18 +46,31 @@ def test_cut_from_dict(self): self.assertEqual(dict(d), tcut.to_dict()) self._assert_invert(d, cut, tcut) - d = {"type": "range", "from": [2010], "to": [2012, 10], "dimension": - "date", "level_depth": 2, "hierarchy": None, "invert": False, - "hidden": False} + d = { + "type": "range", + "from": [2010], + "to": [2012, 10], + "dimension": "date", + "level_depth": 2, + "hierarchy": None, + "invert": False, + "hidden": False, + } cut = cut_from_dict(d) tcut = RangeCut("date", [2010], [2012, 10]) self.assertEqual(tcut, cut) self.assertEqual(dict(d), tcut.to_dict()) self._assert_invert(d, cut, tcut) - d = {"type": "set", "paths": [[2010], [2012, 10]], "dimension": "date", - "level_depth": 2, "hierarchy": None, "invert": False, - "hidden": False} + d = { + "type": "set", + "paths": [[2010], [2012, 10]], + "dimension": "date", + "level_depth": 2, + "hierarchy": None, + "invert": False, + "hidden": False, + } cut = cut_from_dict(d) tcut = SetCut("date", [[2010], [2012, 10]]) self.assertEqual(tcut, cut) @@ -83,27 +102,26 @@ def test_cut_string_conversions(self): self.assertEqual(cut, cut_from_string("foo:123_ abc_")) cut = PointCut("foo", ["a-b"]) - self.assertEqual("foo:a\-b", str(cut)) - self.assertEqual(cut, cut_from_string("foo:a\-b")) + self.assertEqual(r"foo:a\-b", str(cut)) + self.assertEqual(cut, cut_from_string(r"foo:a\-b")) cut = PointCut("foo", ["a+b"]) self.assertEqual("foo:a+b", str(cut)) self.assertEqual(cut, cut_from_string("foo:a+b")) def test_special_characters(self): - self.assertEqual('\\:q\\-we,a\\\\sd\\;,100', - string_from_path([":q-we", "a\\sd;", 100])) + self.assertEqual( + "\\:q\\-we,a\\\\sd\\;,100", string_from_path([":q-we", "a\\sd;", 100]) + ) def test_string_from_path(self): - self.assertEqual('qwe,asd,100', - string_from_path(["qwe", "asd", 100])) - self.assertEqual('', string_from_path([])) - self.assertEqual('', string_from_path(None)) + self.assertEqual("qwe,asd,100", string_from_path(["qwe", "asd", 100])) + self.assertEqual("", string_from_path([])) + self.assertEqual("", string_from_path(None)) def test_path_from_string(self): - self.assertEqual(["qwe", "asd", "100"], - path_from_string('qwe,asd,100')) - self.assertEqual([], path_from_string('')) + self.assertEqual(["qwe", "asd", "100"], path_from_string("qwe,asd,100")) + self.assertEqual([], path_from_string("")) self.assertEqual([], path_from_string(None)) def test_set_cut_string(self): @@ -118,8 +136,8 @@ def test_set_cut_string(self): self.assertEqual(PointCut("foo", ["a+b"]), cut_from_string("foo:a+b")) cut = SetCut("foo", [["a-b"]]) - self.assertEqual("foo:a\-b", str(cut)) - self.assertEqual(PointCut("foo", ["a-b"]), cut_from_string("foo:a\-b")) + self.assertEqual(r"foo:a\-b", str(cut)) + self.assertEqual(PointCut("foo", ["a-b"]), cut_from_string(r"foo:a\-b")) def test_range_cut_string(self): cut = RangeCut("date", ["2010"], ["2011"]) @@ -130,13 +148,13 @@ def test_range_cut_string(self): self.assertEqual("date:2010-", str(cut)) cut = cut_from_string("date:2010-") if cut.to_path: - self.fail('there should be no to path, is: %s' % (cut.to_path, )) + self.fail(f"there should be no to path, is: {cut.to_path}") cut = RangeCut("date", None, ["2010"]) self.assertEqual("date:-2010", str(cut)) cut = cut_from_string("date:-2010") if cut.from_path: - self.fail('there should be no from path is: %s' % (cut.from_path, )) + self.fail(f"there should be no from path is: {cut.from_path}") cut = RangeCut("date", ["2010", "11", "12"], ["2011", "2", "3"]) self.assertEqual("date:2010,11,12-2011,2,3", str(cut)) @@ -158,7 +176,7 @@ def test_hierarchy_cut(self): class CellInteractiveSlicingTestCase(CubesTestCaseBase): def setUp(self): - super(CellInteractiveSlicingTestCase, self).setUp() + super().setUp() self.provider = create_provider("model.json") self.cube = self.provider.cube("contracts") @@ -184,7 +202,8 @@ def test_multi_slice(self): cuts_list = ( PointCut("date", [2010]), PointCut("cpv", [50, 20]), - PointCut("supplier", [1234])) + PointCut("supplier", [1234]), + ) cell_list = full_cube.multi_slice(cuts_list) self.assertEqual(3, len(cell_list.cuts)) @@ -218,10 +237,11 @@ def test_hierarchy_path(self): levels = hier.levels_for_path([1, 2, 3, 4]) self.assertEqual(len(levels), 4) names = [level.name for level in levels] - self.assertEqual(names, ['division', 'group', 'class', 'category']) + self.assertEqual(names, ["division", "group", "class", "category"]) - self.assertRaises(HierarchyError, hier.levels_for_path, - [1, 2, 3, 4, 5, 6, 7, 8]) + self.assertRaises( + HierarchyError, hier.levels_for_path, [1, 2, 3, 4, 5, 6, 7, 8] + ) @unittest.skip("Fix this") def test_hierarchy_drilldown_levels(self): @@ -230,10 +250,10 @@ def test_hierarchy_drilldown_levels(self): levels = hier.levels_for_path([], drilldown=True) self.assertEqual(len(levels), 1) - self.assertEqual(levels[0].name, 'division') + self.assertEqual(levels[0].name, "division") levels = hier.levels_for_path(None, drilldown=True) self.assertEqual(len(levels), 1) - self.assertEqual(levels[0].name, 'division') + self.assertEqual(levels[0].name, "division") def test_slice_drilldown(self): cut = PointCut("date", []) @@ -249,10 +269,10 @@ def test_slice_drilldown(self): self.assertEqual([2010, 1, 2], cell.cut_for_dimension("date").path) -def test_suite(): - suite = unittest.TestSuite() - - suite.addTest(unittest.makeSuite(AggregationBrowserTestCase)) - suite.addTest(unittest.makeSuite(CellsAndCutsTestCase)) - - return suite +# def test_suite(): +# suite = unittest.TestSuite() +# +# suite.addTest(unittest.makeSuite(AggregationBrowserTestCase)) +# suite.addTest(unittest.makeSuite(CellsAndCutsTestCase)) +# +# return suite diff --git a/tests/server/test_server.py b/tests/server/test_server.py index ed81c282..af4d023f 100644 --- a/tests/server/test_server.py +++ b/tests/server/test_server.py @@ -1,9 +1,9 @@ # -*- coding=utf -*- import unittest -from cubes import __version__ +from cubes import __version__, to_str import json from ..common import CubesTestCaseBase -from sqlalchemy import MetaData, Table, Column, Integer, String +from sqlalchemy import Table, Column, Integer, String from werkzeug.test import Client from werkzeug.wrappers import BaseResponse @@ -18,10 +18,9 @@ TEST_DB_URL = "sqlite:///" -@unittest.skip("Fix this") class SlicerTestCaseBase(CubesTestCaseBase): def setUp(self): - super(SlicerTestCaseBase, self).setUp() + super().setUp() self.config = ConfigParser() self.slicer = create_server(self.config) @@ -37,7 +36,7 @@ def get(self, path, *args, **kwargs): response = self.server.get(path, *args, **kwargs) try: - result = json.loads(str(response.data)) + result = json.loads(response.data) except ValueError: result = response.data @@ -60,11 +59,11 @@ def test_unknown(self): response, status = self.get("this_is_unknown") self.assertEqual(404, status) + @unittest.skip("We need to fix the model") class SlicerModelTestCase(SlicerTestCaseBase): - def setUp(self): - super(SlicerModelTestCase, self).setUp() + super().setUp() ws = Workspace() ws.register_default_store("sql", url=TEST_DB_URL) @@ -103,7 +102,9 @@ def test_no_cube(self): def test_get_cube(self): response, status = self.get("cube/sales/model") - import pdb; pdb.set_trace() + import pdb + + pdb.set_trace() self.assertEqual(200, status) self.assertIsInstance(response, dict) self.assertNotIn("error", response) @@ -125,8 +126,9 @@ def test_get_cube(self): self.assertIsInstance(aggregates, list) self.assertEqual(4, len(aggregates)) names = [a["name"] for a in aggregates] - self.assertCountEqual(["amount_sum", "amount_min", "discount_sum", - "record_count"], names) + self.assertCountEqual( + ["amount_sum", "amount_min", "discount_sum", "record_count"], names + ) def test_cube_dimensions(self): response, status = self.get("cube/sales/model") @@ -154,72 +156,72 @@ def test_cube_dimensions(self): self.assertEqual(True, dims[0]["has_details"]) +@unittest.skip("TODO") class SlicerAggregateTestCase(SlicerTestCaseBase): sql_engine = "sqlite:///" + def setUp(self): - super(SlicerAggregateTestCase, self).setUp() + super().setUp() + # This raises: NotImplementedError: Depreciated in this context self.workspace = self.create_workspace(model="server.json") self.cube = self.workspace.cube("aggregate_test") self.slicer.cubes_workspace = self.workspace - self.facts = Table("facts", self.metadata, - Column("id", Integer), - Column("id_date", Integer), - Column("id_item", Integer), - Column("amount", Integer) - ) - - self.dim_date = Table("date", self.metadata, - Column("id", Integer), - Column("year", Integer), - Column("month", Integer), - Column("day", Integer) - ) - - self.dim_item = Table("item", self.metadata, - Column("id", Integer), - Column("name", String) - ) + self.facts = Table( + "facts", + self.metadata, + Column("id", Integer), + Column("id_date", Integer), + Column("id_item", Integer), + Column("amount", Integer), + ) + + self.dim_date = Table( + "date", + self.metadata, + Column("id", Integer), + Column("year", Integer), + Column("month", Integer), + Column("day", Integer), + ) + + self.dim_item = Table( + "item", self.metadata, Column("id", Integer), Column("name", String) + ) self.metadata.create_all() data = [ - # Master-detail Match - ( 1, 20130901, 1, 20), - ( 2, 20130902, 1, 20), - ( 3, 20130903, 1, 20), - ( 4, 20130910, 1, 20), - ( 5, 20130915, 1, 20), - # -------- - # ∑ 100 - # No city dimension - ( 6, 20131001, 2, 200), - ( 7, 20131002, 2, 200), - ( 8, 20131004, 2, 200), - ( 9, 20131101, 3, 200), - (10, 20131201, 3, 200), - # -------- - # ∑ 1000 - # ======== - # ∑ 1100 - - ] + # Master-detail Match + (1, 20130901, 1, 20), + (2, 20130902, 1, 20), + (3, 20130903, 1, 20), + (4, 20130910, 1, 20), + (5, 20130915, 1, 20), + # -------- + # ∑ 100 + # No city dimension + (6, 20131001, 2, 200), + (7, 20131002, 2, 200), + (8, 20131004, 2, 200), + (9, 20131101, 3, 200), + (10, 20131201, 3, 200), + # -------- + # ∑ 1000 + # ======== + # ∑ 1100 + ] self.load_data(self.facts, data) - data = [ - (1, "apple"), - (2, "pear"), - (3, "garlic"), - (4, "carrod") - ] + data = [(1, "apple"), (2, "pear"), (3, "garlic"), (4, "carrod")] self.load_data(self.dim_item, data) data = [] for day in range(1, 31): - row = (20130900+day, 2013, 9, day) + row = (20130900 + day, 2013, 9, day) data.append(row) self.load_data(self.dim_date, data) @@ -229,36 +231,32 @@ def test_aggregate_csv_headers(self): url = "cube/aggregate_test/aggregate?drilldown=date&format=csv" response, status = self.get(url) - response = compat.to_str(response) + response = to_str(response) reader = csv.reader(response.splitlines()) header = next(reader) - self.assertSequenceEqual(["Year", "Total Amount", "Item Count"], - header) + self.assertSequenceEqual(["Year", "Total Amount", "Item Count"], header) # Labels - explicit url = "cube/aggregate_test/aggregate?drilldown=date&format=csv&header=labels" response, status = self.get(url) - response = compat.to_str(response) + response = to_str(response) reader = csv.reader(response.splitlines()) header = next(reader) - self.assertSequenceEqual(["Year", "Total Amount", "Item Count"], - header) + self.assertSequenceEqual(["Year", "Total Amount", "Item Count"], header) # Names url = "cube/aggregate_test/aggregate?drilldown=date&format=csv&header=names" response, status = self.get(url) - response = compat.to_str(response) + response = to_str(response) reader = csv.reader(response.splitlines()) header = next(reader) - self.assertSequenceEqual(["date.year", "amount_sum", "count"], - header) + self.assertSequenceEqual(["date.year", "amount_sum", "count"], header) # None url = "cube/aggregate_test/aggregate?drilldown=date&format=csv&header=none" response, status = self.get(url) - response = compat.to_str(response) + response = to_str(response) reader = csv.reader(response.splitlines()) header = next(reader) - self.assertSequenceEqual(["2013", "100", "5"], - header) + self.assertSequenceEqual(["2013", "100", "5"], header) diff --git a/tests/sql/common.py b/tests/sql/common.py index 36f8b157..156d1f5e 100644 --- a/tests/sql/common.py +++ b/tests/sql/common.py @@ -9,8 +9,9 @@ # TODO: use the data.py version def create_table(engine, md, desc): - """Create a table according to description `desc`. The description - contains keys: + """Create a table according to description `desc`. The description contains + keys: + * `name` – table name * `columns` – list of column names * `types` – list of column types. If not specified, then `string` is @@ -21,13 +22,12 @@ def create_table(engine, md, desc): """ TYPES = { - "integer": sa.Integer, - "string": sa.String, - "date": sa.Date, - "id": sa.Integer, + "integer": sa.Integer, + "string": sa.String, + "date": sa.Date, + "id": sa.Integer, } - table = sa.Table(desc["name"], md, - sa.Column("id", sa.Integer, primary_key=True)) + table = sa.Table(desc["name"], md, sa.Column("id", sa.Integer, primary_key=True)) types = desc.get("types") if not types: @@ -36,7 +36,7 @@ def create_table(engine, md, desc): col_types = dict(zip(desc["columns"], desc["types"])) for name, type_ in col_types.items(): real_type = TYPES[type_] - if type_ == 'id': + if type_ == "id": col = sa.Column(name, real_type, primary_key=True) else: col = sa.Column(name, real_type) @@ -78,4 +78,3 @@ def assertColumnEqual(self, left, right): def table(self, name): """Return fully reflected table `name`""" return self.metadata.table(name, autoload=True) - diff --git a/tests/sql/dw/demo.py b/tests/sql/dw/demo.py index 06e9ae45..840b1f53 100644 --- a/tests/sql/dw/demo.py +++ b/tests/sql/dw/demo.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- """Database for testing the SQL browser – schema and data. - Contains: * star schema @@ -12,7 +11,6 @@ * measure fields that can be used for expressions * dimension with more than one attribute * folows a naming convention (ft_, dim_, _key, ...) - """ # # See: https://github.com/DataBrewery/cubes/issues/255 @@ -32,12 +30,12 @@ SRC_SALES = { "name": "src_sales", "columns": ( - ("id", "id"), - ("date", "date"), + ("id", "id"), + ("date", "date"), ("location", "string"), - ("item", "string"), + ("item", "string"), ("quantity", "integer"), - ("price", "integer"), + ("price", "integer"), ("discount", "integer"), ), # @@ -45,114 +43,109 @@ # * only one entry for date 2015-01-01 # "data": [ - ( 1, "2015-01-01", "here", "apricot", 1, 3, 0), - ( 2, "2015-01-02", "here", "plum", 2, 1, 0), - ( 3, "2015-01-03", "here", "goat", 1, 1, 0), - ( 4, "2015-01-04", "here", "apricot", 2, 6, 0), - ( 5, "2015-01-05", "there", "shirt", 2, 20, 10), - ( 6, "2015-02-01", "there", "jacket", 1, 50, 10), - ( 7, "2015-02-01", "there", "apricot", 2, 6, 0), - ( 8, "2015-03-01", "there", "apricot", 2, 6, 50), - ( 9, "2015-04-01", "unknown","apricot", 2, 6, 50), - ] + (1, "2015-01-01", "here", "apricot", 1, 3, 0), + (2, "2015-01-02", "here", "plum", 2, 1, 0), + (3, "2015-01-03", "here", "goat", 1, 1, 0), + (4, "2015-01-04", "here", "apricot", 2, 6, 0), + (5, "2015-01-05", "there", "shirt", 2, 20, 10), + (6, "2015-02-01", "there", "jacket", 1, 50, 10), + (7, "2015-02-01", "there", "apricot", 2, 6, 0), + (8, "2015-03-01", "there", "apricot", 2, 6, 50), + (9, "2015-04-01", "unknown", "apricot", 2, 6, 50), + ], } FACT_SALES = { "name": "fact_sales", "columns": ( - ("id", "id"), - ("date_key", "integer"), - ("item_key", "integer"), - ("category_key", "integer"), - ("department_key","integer"), - ("quantity", "integer"), - ("price", "integer"), - ("discount", "integer"), - ) + ("id", "id"), + ("date_key", "integer"), + ("item_key", "integer"), + ("category_key", "integer"), + ("department_key", "integer"), + ("quantity", "integer"), + ("price", "integer"), + ("discount", "integer"), + ), } FACT_SALES_DENORM = { "name": "fact_sales_denorm", "columns": ( - ("id", "id"), - ("date_key", "integer"), - ("date", "date"), - ("item_key", "integer"), - ("item_name", "string"), + ("id", "id"), + ("date_key", "integer"), + ("date", "date"), + ("item_key", "integer"), + ("item_name", "string"), ("item_unit_price", "integer"), - ("category_key", "integer"), - ("category_name", "string"), - ("department_key", "integer"), + ("category_key", "integer"), + ("category_name", "string"), + ("department_key", "integer"), ("department_name", "string"), - ("quantity", "integer"), - ("price", "integer"), - ("discount", "integer"), - ) + ("quantity", "integer"), + ("price", "integer"), + ("discount", "integer"), + ), } DIM_ITEMS = { "name": "dim_item", "columns": [ - ("item_key", "id"), - ("name", "string"), - ("category_key", "integer"), - ("category", "string"), - ("unit_price", "integer") + ("item_key", "id"), + ("name", "string"), + ("category_key", "integer"), + ("category", "string"), + ("unit_price", "integer"), ], "data": [ - ( 1, "apricot", 1, "produce", 3), - ( 2, "plum", 1, "produce", 2), - ( 3, "carrot", 1, "produce", 1), - ( 4, "celery", 1, "produce", 2), - ( 5, "milk", 2, "dairy", 2), - ( 6, "cheese", 2, "dairy", 5), - ( 7, "bread", 3, "bakery", 3), - ( 8, "rolls", 3, "bakery", 1), - ( 9, "chicken", 4, "meat", 4), - (10, "beef", 4, "meat", 8), - (11, "goat", 4, "meat", 7), - - (12, "soap", 5, "hygiene", 1), - (13, "lotion", 5, "hygiene", 5), - (14, "shirt", 6, "formal", 20), - (15, "pants", 6, "formal", 30), - (16, "jacket", 7, "casual", 50), - (17, "shorts", 7, "casual", 25), - ] + (1, "apricot", 1, "produce", 3), + (2, "plum", 1, "produce", 2), + (3, "carrot", 1, "produce", 1), + (4, "celery", 1, "produce", 2), + (5, "milk", 2, "dairy", 2), + (6, "cheese", 2, "dairy", 5), + (7, "bread", 3, "bakery", 3), + (8, "rolls", 3, "bakery", 1), + (9, "chicken", 4, "meat", 4), + (10, "beef", 4, "meat", 8), + (11, "goat", 4, "meat", 7), + (12, "soap", 5, "hygiene", 1), + (13, "lotion", 5, "hygiene", 5), + (14, "shirt", 6, "formal", 20), + (15, "pants", 6, "formal", 30), + (16, "jacket", 7, "casual", 50), + (17, "shorts", 7, "casual", 25), + ], } DIM_CATEGORIES = { "name": "dim_category", "columns": [ - ("category_key", "id"), - ("name", "string"), + ("category_key", "id"), + ("name", "string"), ("department_key", "integer"), - ("department", "string") + ("department", "string"), ], "data": [ (1, "produce", 1, "grocery"), - (2, "dairy", 1, "grocery"), - (3, "bakery", 1, "grocery"), - (4, "meat", 1, "grocery"), + (2, "dairy", 1, "grocery"), + (3, "bakery", 1, "grocery"), + (4, "meat", 1, "grocery"), (5, "hygiene", 2, "body"), - (6, "formal", 3, "fashion"), - (7, "casual", 3, "fashion"), - ] + (6, "formal", 3, "fashion"), + (7, "casual", 3, "fashion"), + ], } DIM_DEPARTMENTS = { "name": "dim_department", - "columns": [ - ("department_key", "id"), - ("name", "string"), - ("manager", "string"), - ], + "columns": [("department_key", "id"), ("name", "string"), ("manager", "string")], "data": [ (1, "grocery", "Michael"), - (2, "body", "Marek"), + (2, "body", "Marek"), (3, "fashion", "Sebastian"), - ] + ], } @@ -162,7 +155,7 @@ month_to_quarter = lambda month: ((month - 1) // 3) + 1 -class TinyDemoDataWarehouse(object): +class TinyDemoDataWarehouse: def __init__(self, url=None, schema=None, recreate=False): if "CUBES_TEST_DB" in os.environ: url = os.environ["CUBES_TEST_DB"] @@ -172,8 +165,8 @@ def __init__(self, url=None, schema=None, recreate=False): self.engine = sa.create_engine(url) if recreate: - self.engine.execute("DROP SCHEMA IF EXISTS {} CASCADE".format(schema)) - self.engine.execute("CREATE SCHEMA {}".format(schema)) + self.engine.execute(f"DROP SCHEMA IF EXISTS {schema} CASCADE") + self.engine.execute(f"CREATE SCHEMA {schema}") self.md = sa.MetaData(self.engine, schema=schema) self.schema = schema @@ -181,6 +174,7 @@ def __init__(self, url=None, schema=None, recreate=False): def create_table(self, desc, name=None): """Create a table according to description `desc`. The description contains keys: + * `name` – table name * `columns` – list of column names * `types` – list of column types. If not specified, then `string` is @@ -191,10 +185,10 @@ def create_table(self, desc, name=None): """ TYPES = { - "integer": sa.Integer, - "string": sa.String, - "date": sa.DateTime, - "id": sa.Integer, + "integer": sa.Integer, + "string": sa.String, + "date": sa.DateTime, + "id": sa.Integer, } name = name or desc["name"] @@ -210,7 +204,7 @@ def create_table(self, desc, name=None): real_type = TYPES[type_] - if type_ == 'id': + if type_ == "id": col = sa.Column(name, real_type, primary_key=True) else: col = sa.Column(name, real_type) @@ -241,23 +235,26 @@ def create_table(self, desc, name=None): return table def create_date_dimension(self): - """Creates and populates the date dimension""" - - table = sa.Table("dim_date", self.md, - # sa.Column("date_key", sa.Integer, primary_key=True), - sa.Column("date_key", sa.Integer), - sa.Column("date", sa.DateTime), - sa.Column("year", sa.Integer), - sa.Column("quarter", sa.Integer), - sa.Column("month", sa.Integer), - sa.Column("month_name", sa.String), - sa.Column("month_sname", sa.String), - sa.Column("day", sa.Integer)) + """Creates and populates the date dimension.""" + + table = sa.Table( + "dim_date", + self.md, + # sa.Column("date_key", sa.Integer, primary_key=True), + sa.Column("date_key", sa.Integer), + sa.Column("date", sa.DateTime), + sa.Column("year", sa.Integer), + sa.Column("quarter", sa.Integer), + sa.Column("month", sa.Integer), + sa.Column("month_name", sa.String), + sa.Column("month_sname", sa.String), + sa.Column("day", sa.Integer), + ) self.md.create_all() - start = date(2014,1,1) - end = date(2016,12,31) + start = date(2014, 1, 1) + end = date(2016, 12, 31) current = start values = [] @@ -273,7 +270,7 @@ def create_date_dimension(self): "month": current.month, "month_name": current.strftime("%B"), "month_sname": current.strftime("%b"), - "day": current.day + "day": current.day, } values.append(record) if len(values) > 100: @@ -288,18 +285,19 @@ def table(self, name): return sa.Table(name, self.md, autoload=True) def mapping_from_table(self, table_name, key_name, values): - """Returns a dictionary constructed from table `table_name` where - `key` is name of the key column (presumably unique) and `value` is - name of a mapping values. + """Returns a dictionary constructed from table `table_name` where `key` + is name of the key column (presumably unique) and `value` is name of a + mapping values. - Keys are ordered for nicer debugging.""" + Keys are ordered for nicer debugging. + """ mapping = OrderedDict() table = self.table(table_name) if not isinstance(values, (tuple, list)): - values = (values, ) + values = (values,) multi = False else: multi = True @@ -322,8 +320,10 @@ def mapping_from_table(self, table_name, key_name, values): return mapping def rows(self, table_name, columns=None): - """Return an interable of rows from table `table_name`. If `columns` - is specified then yield only those columns.""" + """Return an interable of rows from table `table_name`. + + If `columns` is specified then yield only those columns. + """ table = self.table(table_name) if columns: @@ -354,7 +354,7 @@ def __init__(self, *args, **kwargs): with open(path) as f: metadata = json.load(f) - super(TinyDemoModelProvider, self).__init__(metadata) + super().__init__(metadata) # TODO: improve this in the Provider class itself # def cube(self, name): @@ -362,7 +362,8 @@ def __init__(self, *args, **kwargs): # return cube # self.link -class TinyDimension(object): + +class TinyDimension: def __init__(self, table, rows): """Create a tiny dimension. First column of the table is assumed to be surrogate key and second column natural key. @@ -400,8 +401,7 @@ def date_to_key(date): def create_demo_dw(url, schema, recreate): dw = TinyDemoDataWarehouse(url, schema, recreate=recreate) - if "CUBES_TEST_DB" in os.environ \ - and "CUBES_TEST_DB_REUSE" in os.environ: + if "CUBES_TEST_DB" in os.environ and "CUBES_TEST_DB_REUSE" in os.environ: return dw dw.create_table(SRC_SALES) @@ -439,7 +439,7 @@ def create_demo_dw(url, schema, recreate): "department_key": dept_key, "quantity": row["quantity"], "price": row["price"], - "discount": row["discount"] + "discount": row["discount"], } ft_values.append(record) @@ -462,8 +462,4 @@ def create_demo_dw(url, schema, recreate): if __name__ == "__main__": - dw = create_demo_dw( - "postgres://localhost/cubes_test", - schema="test", - recreate=True) - + dw = create_demo_dw("postgres://localhost/cubes_test", schema="test", recreate=True) diff --git a/tests/sql/test_aggregates.py b/tests/sql/test_aggregates.py index a1cfe25d..549fd666 100644 --- a/tests/sql/test_aggregates.py +++ b/tests/sql/test_aggregates.py @@ -6,42 +6,46 @@ from json import dumps + def printable(obj): return dumps(obj, indent=4) + # TODO: This has to be dispersed to other modules @unittest.skip("Fix this") class AggregatesTestCase(CubesTestCaseBase): sql_engine = "sqlite:///" def setUp(self): - super(AggregatesTestCase, self).setUp() + super().setUp() - self.facts = Table("facts", self.metadata, - Column("id", Integer), - Column("year", Integer), - Column("amount", Integer), - Column("price", Integer), - Column("discount", Integer) - ) + self.facts = Table( + "facts", + self.metadata, + Column("id", Integer), + Column("year", Integer), + Column("amount", Integer), + Column("price", Integer), + Column("discount", Integer), + ) self.metadata.create_all() data = [ - ( 1, 2010, 1, 100, 0), - ( 2, 2010, 2, 200, 10), - ( 3, 2010, 4, 300, 0), - ( 4, 2010, 8, 400, 20), - ( 5, 2011, 1, 500, 0), - ( 6, 2011, 2, 600, 40), - ( 7, 2011, 4, 700, 0), - ( 8, 2011, 8, 800, 80), - ( 9, 2012, 1, 100, 0), - (10, 2012, 2, 200, 0), - (11, 2012, 4, 300, 0), + (1, 2010, 1, 100, 0), + (2, 2010, 2, 200, 10), + (3, 2010, 4, 300, 0), + (4, 2010, 8, 400, 20), + (5, 2011, 1, 500, 0), + (6, 2011, 2, 600, 40), + (7, 2011, 4, 700, 0), + (8, 2011, 8, 800, 80), + (9, 2012, 1, 100, 0), + (10, 2012, 2, 200, 0), + (11, 2012, 4, 300, 0), (12, 2012, 8, 400, 10), - (13, 2013, 1, 500, 0), - (14, 2013, 2, 600, 0), - (15, 2013, 4, 700, 0), + (13, 2013, 1, 500, 0), + (14, 2013, 2, 600, 0), + (15, 2013, 4, 700, 0), (16, 2013, 8, 800, 20), ] @@ -67,5 +71,6 @@ def test_post_calculation(self): result = browser.aggregate(drilldown=["year"]) cells = list(result.cells) aggregates = sorted(cells[0].keys()) - self.assertSequenceEqual(['amount_sma', 'amount_sum', 'count', 'year'], - aggregates) + self.assertSequenceEqual( + ["amount_sma", "amount_sum", "count", "year"], aggregates + ) diff --git a/tests/sql/test_browser.py b/tests/sql/test_browser.py index 772d0064..0f0ba182 100644 --- a/tests/sql/test_browser.py +++ b/tests/sql/test_browser.py @@ -19,20 +19,17 @@ CONNECTION = "sqlite://" + @unittest.skip("fix this") class SQLQueryContextTestCase(SQLTestCase): @classmethod def setUpClass(self): self.dw = create_demo_dw(CONNECTION, None, False) - self.store = SQLStore(engine=self.dw.engine, - metadata=self.dw.md) + self.store = SQLStore(engine=self.dw.engine, metadata=self.dw.md) self.provider = TinyDemoModelProvider() - naming = { - "fact_prefix": "fact_", - "dimension_prefix": "dim_" - } + naming = {"fact_prefix": "fact_", "dimension_prefix": "dim_"} naming = distill_naming(naming) self.cube = self.provider.cube("sales") @@ -42,18 +39,20 @@ def setUpClass(self): mappings = mapper.map_base_attributes() joins = [to_join(join) for join in self.cube.joins] - self.star = StarSchema(self.cube.name, - self.dw.md, - mappings=mappings, - fact=mapper.fact_name, - joins=joins) + self.star = StarSchema( + self.cube.name, + self.dw.md, + mappings=mappings, + fact=mapper.fact_name, + joins=joins, + ) # Helper methods def create_context(self, attributes): collected = self.cube.collect_dependencies(attributes) - context = QueryContext(self.star, - attributes=collected, - hierarchies=self.cube.distilled_hierarchies) + context = QueryContext( + self.star, attributes=collected, hierarchies=self.cube.distilled_hierarchies + ) return context def dimension(self, name): @@ -65,10 +64,12 @@ def table(self, name): def execute(self, *args, **kwargs): return self.dw.engine.execute(*args, **kwargs) + class SQLStatementsTestCase(SQLQueryContextTestCase): """"Test basic SQL statement generation in the browser.""" + def setUp(self): - super(SQLStatementsTestCase, self).setUp() + super().setUp() attrs = self.dimension("item").attributes attrs += self.dimension("category").attributes @@ -77,12 +78,10 @@ def setUp(self): self.context = self.create_context(attrs) def select(self, attrs, whereclause=None): - """Returns a select statement from the star view""" + """Returns a select statement from the star view.""" columns = [self.star.column(attr) for attr in attrs] - return sa.select(columns, - from_obj=self.context.star, - whereclause=whereclause) + return sa.select(columns, from_obj=self.context.star, whereclause=whereclause) def test_attribute_column(self): """Test proper selection of attribute column.""" @@ -90,16 +89,18 @@ def test_attribute_column(self): dim_item = self.table("dim_item") dim_category = self.table("dim_category") - self.assertColumnEqual(self.context.column("item.name"), - dim_item.columns["name"]) + self.assertColumnEqual( + self.context.column("item.name"), dim_item.columns["name"] + ) - self.assertColumnEqual(self.context.column("category.name"), - dim_category.columns["name"]) + self.assertColumnEqual( + self.context.column("category.name"), dim_category.columns["name"] + ) # TODO: Test derived column + def test_condition_for_point(self): - condition = self.context.condition_for_point(self.dimension("item"), - ["1"]) + condition = self.context.condition_for_point(self.dimension("item"), ["1"]) select = self.select([FACT_KEY_LABEL], condition) keys = [row[FACT_KEY_LABEL] for row in self.execute(select)] @@ -117,8 +118,9 @@ def test_condition_for_hierarchy_point(self): # Note: # This test requires that there is only one item for 2015-01-01 # See data in DW demo - condition = self.context.condition_for_point(self.dimension("date"), - [2015,1,1]) + condition = self.context.condition_for_point( + self.dimension("date"), [2015, 1, 1] + ) select = self.select([FACT_KEY_LABEL], condition) keys = [row[FACT_KEY_LABEL] for row in self.execute(select)] @@ -133,45 +135,44 @@ def test_condition_for_hierarchy_point(self): @unittest.skip("Test missing") def test_range_condition(self): - """"Test Browser.range_condition""" + """"Test Browser.range_condition.""" # Test single level paths # Test multi-level paths # Test uneven paths # Test lower bound only # Test upper bound only + @unittest.skip("Tests missing") class SQLAggregateTestCase(SQLQueryContextTestCase): def setUp(self): super(self, SQLAggregateTestCase).setUp(self) def test_aggregate_base(self): - """Aggregate all aggregates without any cell and no drilldown""" + """Aggregate all aggregates without any cell and no drilldown.""" def test_aggregate_point(self): - """Aggregate with point cut""" + """Aggregate with point cut.""" def test_aggregate_set(self): - """Aggregate with set cut""" + """Aggregate with set cut.""" def test_aggregate_range(self): - """Aggregate with range cut""" + """Aggregate with range cut.""" def test_aggregate_multiple(self): - """Aggregate with multiple cuts""" + """Aggregate with multiple cuts.""" def test_aggregate_negative(self): """Aggregate with negative cut (point, set, range)""" def test_drilldown(self): - """Test basic drilldown""" + """Test basic drilldown.""" # Test 1 dimension, no cell # Test 2-3 dimensions def test_drilldown_implicit(self): - """Test implicit level from drilldown and cell""" + """Test implicit level from drilldown and cell.""" def test_drilldown_explicit(self): - """Test drilldown with explicit hierarchy level""" - - + """Test drilldown with explicit hierarchy level.""" diff --git a/tests/sql/test_expression.py b/tests/sql/test_expression.py index 03faa943..0f741807 100644 --- a/tests/sql/test_expression.py +++ b/tests/sql/test_expression.py @@ -20,25 +20,23 @@ class SQLExpressionTestCase(SQLTestCase): def setUpClass(self): self.engine = sa.create_engine(CONNECTION) metadata = sa.MetaData(self.engine) - self.table = sa.Table("data", metadata, - sa.Column("id", sa.Integer), - sa.Column("price", sa.Integer), - sa.Column("quantity", sa.Integer) - ) + self.table = sa.Table( + "data", + metadata, + sa.Column("id", sa.Integer), + sa.Column("price", sa.Integer), + sa.Column("quantity", sa.Integer), + ) metadata.create_all() insert = self.table.insert() - data = [[1, 10, 1], - [2, 20, 1], - [3, 40, 2], - [4, 80, 3]] + data = [[1, 10, 1], [2, 20, 1], [3, 40, 2], [4, 80, 3]] for row in data: self.engine.execute(insert.values(row)) self.bases = ["id", "price", "quantity"] - self.columns = {attr:self.table.columns[attr] - for attr in self.bases} + self.columns = {attr: self.table.columns[attr] for attr in self.bases} def setUp(self): self.context = SQLExpressionContext(self.columns) @@ -52,36 +50,35 @@ def execute(self, *args, **kwargs): def assertExpressionEqual(self, left, right): """Asserts that the `left` and `right` statement expressions are equal - by pulling out the data from the table and testing whether the - returned sequences are equal.""" + by pulling out the data from the table and testing whether the returned + sequences are equal.""" stmt = sa.select([left.label("value")], from_obj=self.table) result = self.engine.execute(stmt) left_result = [row["value"] for row in result] - stmt = sa.select([right.label("value")], from_obj=self.table) result = self.engine.execute(stmt) right_result = [row["value"] for row in result] self.assertCountEqual(left_result, right_result) - def test_instance(self): - self.assertIsInstance(self.compiler.compile("id", self.context), - ColumnElement) + self.assertIsInstance(self.compiler.compile("id", self.context), ColumnElement) - self.assertIsInstance(self.compiler.compile("1", self.context), - ColumnElement) + self.assertIsInstance(self.compiler.compile("1", self.context), ColumnElement) - self.assertIsInstance(self.compiler.compile("'text'", self.context), - ColumnElement) + self.assertIsInstance( + self.compiler.compile("'text'", self.context), ColumnElement + ) - self.assertIsInstance(self.compiler.compile("1 + 1", self.context), - ColumnElement) + self.assertIsInstance( + self.compiler.compile("1 + 1", self.context), ColumnElement + ) - self.assertIsInstance(self.compiler.compile("'text' + 1", self.context), - ColumnElement) + self.assertIsInstance( + self.compiler.compile("'text' + 1", self.context), ColumnElement + ) def test_simple(self): column = self.compiler.compile("id", self.context) @@ -89,18 +86,17 @@ def test_simple(self): def test_with_constant(self): column = self.compiler.compile("price + 1", self.context) - self.assertExpressionEqual(self.table.columns["price"] + 1, - column) + self.assertExpressionEqual(self.table.columns["price"] + 1, column) column = self.compiler.compile("price * 10 + 1", self.context) - self.assertExpressionEqual((self.table.columns["price"] * 10) + 1, - column) + self.assertExpressionEqual((self.table.columns["price"] * 10) + 1, column) def test_multiple_columns(self): column = self.compiler.compile("price * quantity", self.context) - self.assertExpressionEqual(self.table.columns["price"] - * self.table.columns["quantity"], - column) + self.assertExpressionEqual( + self.table.columns["price"] * self.table.columns["quantity"], column + ) + def test_unknown(self): with self.assertRaisesRegex(ExpressionError, "unknown"): column = self.compiler.compile("unknown", self.context) @@ -113,11 +109,10 @@ def test_incremental_context(self): self.context.add_column("total", column) column = self.compiler.compile("total", self.context) - self.assertExpressionEqual(self.table.columns["price"] - * self.table.columns["quantity"], - column) + self.assertExpressionEqual( + self.table.columns["price"] * self.table.columns["quantity"], column + ) def test_function(self): column = self.compiler.compile("min(price, 0)", self.context) - self.assertExpressionEqual(sa.func.min(self.table.columns["price"], 0), - column) + self.assertExpressionEqual(sa.func.min(self.table.columns["price"], 0), column) diff --git a/tests/sql/test_mapper.py b/tests/sql/test_mapper.py index 492b6bfe..409d038f 100644 --- a/tests/sql/test_mapper.py +++ b/tests/sql/test_mapper.py @@ -5,17 +5,15 @@ from ..common import CubesTestCaseBase, create_provider + class MapperTestCase(CubesTestCaseBase): def setUp(self): - super(MapperTestCase, self).setUp() + super().setUp() self.provider = create_provider("mapper_test.json") self.cube = self.provider.cube("sales") - naming = { - "dimension_prefix": "dim_", - "dimension_suffix": "_dim" - } + naming = {"dimension_prefix": "dim_", "dimension_suffix": "_dim"} self.naming = distill_naming(naming) self.mapper = StarSchemaMapper(self.cube, self.naming) @@ -23,7 +21,7 @@ def setUp(self): "product.name": "product.product_name", "product.category": "product.category_id", "subcategory.name.en": "subcategory.subcategory_name_en", - "subcategory.name.sk": "subcategory.subcategory_name_sk" + "subcategory.name.sk": "subcategory.subcategory_name_sk", } def test_logical_reference(self): @@ -45,7 +43,9 @@ def test_logical_reference(self): def assertMapping(self, expected, logical_ref, mapper=None): """Create string reference by concatentanig table and column name. - No schema is expected (is ignored).""" + + No schema is expected (is ignored). + """ attr = self.cube.attribute(logical_ref) mapper = mapper or self.mapper @@ -77,7 +77,8 @@ def test_physical_refs_flat_dims(self): self.assertMapping("sales.flag", "flag") def test_physical_refs_facts(self): - """Testing correct mappings of fact attributes in physical references""" + """Testing correct mappings of fact attributes in physical + references.""" fact = self.cube.fact self.cube.fact = None @@ -87,7 +88,7 @@ def test_physical_refs_facts(self): def test_physical_refs_with_mappings_and_locales(self): """Testing mappings of mapped attributes and localized attributes in - physical references""" + physical references.""" self.mapper.mappings = self.cube.mappings # Test defaults @@ -102,16 +103,17 @@ def test_physical_refs_with_mappings_and_locales(self): self.assertMapping("dim_date_dim.month_name", "date.month_name") - self.assertMapping("dim_category_dim.category_name_en", - "product.category_name") + self.assertMapping("dim_category_dim.category_name_en", "product.category_name") - self.assertMapping("dim_category_dim.category_name_sk", - "product.category_name", sk_mapper) + self.assertMapping( + "dim_category_dim.category_name_sk", "product.category_name", sk_mapper + ) # This should default to 'en' since we don't have 'de' locale and the # 'en' locale is the default one - self.assertMapping("dim_category_dim.category_name_en", - "product.category_name", de_mapper) + self.assertMapping( + "dim_category_dim.category_name_en", "product.category_name", de_mapper + ) # Test with mapping self.assertMapping("dim_product_dim.product_name", "product.name") @@ -119,17 +121,19 @@ def test_physical_refs_with_mappings_and_locales(self): # The product name is not localized, we should get the same for any # mapper - self.assertMapping("dim_product_dim.product_name", "product.name", - sk_mapper) - self.assertMapping("dim_product_dim.product_name", "product.name", - de_mapper) - - self.assertMapping("dim_category_dim.subcategory_name_en", - "product.subcategory_name") - self.assertMapping("dim_category_dim.subcategory_name_sk", - "product.subcategory_name", - sk_mapper) - self.assertMapping("dim_category_dim.subcategory_name_en", - "product.subcategory_name", - de_mapper) - + self.assertMapping("dim_product_dim.product_name", "product.name", sk_mapper) + self.assertMapping("dim_product_dim.product_name", "product.name", de_mapper) + + self.assertMapping( + "dim_category_dim.subcategory_name_en", "product.subcategory_name" + ) + self.assertMapping( + "dim_category_dim.subcategory_name_sk", + "product.subcategory_name", + sk_mapper, + ) + self.assertMapping( + "dim_category_dim.subcategory_name_en", + "product.subcategory_name", + de_mapper, + ) diff --git a/tests/sql/test_query.py b/tests/sql/test_query.py index 0e8b3ec2..3859c220 100644 --- a/tests/sql/test_query.py +++ b/tests/sql/test_query.py @@ -24,40 +24,40 @@ BASE_FACT = { "name": "test", - "columns": ["date", "category", "amount"], - "types": ["date", "string", "integer"], + "columns": ["date", "category", "amount"], + "types": ["date", "string", "integer"], "data": [ - ["2014-01-01", "A", 1], - ["2014-02-01", "B", 2], - ["2014-03-01", "C", 4], - ["2014-04-01", "D", 8], - ] + ["2014-01-01", "A", 1], + ["2014-02-01", "B", 2], + ["2014-03-01", "C", 4], + ["2014-04-01", "D", 8], + ], } DIM_CATEGORY = { "name": "dim_category", - "columns": ["category", "label", "size"], - "types": ["string", "string", "integer"], + "columns": ["category", "label", "size"], + "types": ["string", "string", "integer"], "data": [ - ["A", "apple", 2], - ["B", "blueberry", 1], - ["C", "cantaloupe", 4], - ["D", "date", 1], - ["E", "e-fruit", 0], - ] + ["A", "apple", 2], + ["B", "blueberry", 1], + ["C", "cantaloupe", 4], + ["D", "date", 1], + ["E", "e-fruit", 0], + ], } DIM_SIZE = { "name": "dim_size", - "columns": ["size", "label"], - "types": ["integer", "string"], + "columns": ["size", "label"], + "types": ["integer", "string"], "data": [ - [0, "invisible"], - [1, "small"], - [2, "medium"], - [4, "large"], - [8, "very large"], - ] + [0, "invisible"], + [1, "small"], + [2, "medium"], + [4, "large"], + [8, "very large"], + ], } @@ -70,7 +70,7 @@ def setUp(self): # TODO: do the same for a joined table and aliased joined table def test_physical_table(self): - """Test denormalized table selection of few columns""" + """Test denormalized table selection of few columns.""" # Test passing fact by table object star = StarSchema("star", self.md, {}, self.test_fact) self.assertIs(star.physical_table("test"), self.test_fact) @@ -81,8 +81,7 @@ def test_physical_table(self): # Test passing fact by name and in a list of tables - star = StarSchema("star", self.md, {}, "test", - tables = {"test": self.test_fact}) + star = StarSchema("star", self.md, {}, "test", tables={"test": self.test_fact}) self.assertIs(star.physical_table("test"), self.test_fact) @@ -91,7 +90,7 @@ def test_physical_table(self): star.physical_table("imaginary") def test_collected_tables_fact_only(self): - """Test single table references""" + """Test single table references.""" key = (None, "test") star = StarSchema("star", self.md, {}, self.test_fact) @@ -109,8 +108,7 @@ def test_collected_tables_fact_only(self): self.assertIs(ref.table, self.test_fact) # Test passing fact by name and in a list of tables - star = StarSchema("star", self.md, {}, "test", - tables = {"test": self.test_fact}) + star = StarSchema("star", self.md, {}, "test", tables={"test": self.test_fact}) ref = star.table(key) self.assertIs(ref.table, self.test_fact) @@ -123,7 +121,7 @@ def test_fact_columns(self): """Test fetching fact columns.""" mappings = { "category": Column(None, "test", "category", None, None), - "total": Column(None, "test", "amount", None, None), + "total": Column(None, "test", "amount", None, None), } star = StarSchema("star", self.md, mappings, self.test_fact) @@ -146,9 +144,7 @@ def test_fact_columns(self): def test_unknown_column(self): """Test fetching fact columns.""" - mappings = { - "category": Column(None, "test", "__unknown__", None, None), - } + mappings = {"category": Column(None, "test", "__unknown__", None, None)} star = StarSchema("star", self.md, mappings, self.test_fact) @@ -156,10 +152,8 @@ def test_unknown_column(self): column = star.column("category") def test_mapping_extract(self): - """Test that mapping.extract works""" - mappings = { - "year": Column(None, "test", "date", "year", None), - } + """Test that mapping.extract works.""" + mappings = {"year": Column(None, "test", "date", "year", None)} star = StarSchema("star", self.md, mappings, self.test_fact) @@ -173,8 +167,8 @@ def test_mapping_extract(self): def test_required_tables_with_no_joins(self): mappings = { "category": Column(None, "test", "category", None, None), - "amount": Column(None, "test", "amount", None, None), - "year": Column(None, "test", "date", "year", None), + "amount": Column(None, "test", "amount", None, None), + "year": Column(None, "test", "date", "year", None), } schema = StarSchema("star", self.md, mappings, self.test_fact) @@ -190,11 +184,11 @@ def test_required_tables_with_no_joins(self): def test_star_basic(self): """Test selection from the very basic star – no joins, just one - table""" + table.""" mappings = { "category": Column(None, "test", "category", None, None), - "total": Column(None, "test", "amount", None, None), - "year": Column(None, "test", "date", "year", None), + "total": Column(None, "test", "amount", None, None), + "year": Column(None, "test", "date", "year", None), } schema = StarSchema("star", self.md, mappings, self.test_fact) @@ -202,8 +196,7 @@ def test_star_basic(self): selection = [schema.column("category"), schema.column("total")] - statement = sql.expression.select(selection, - from_obj=star) + statement = sql.expression.select(selection, from_obj=star) result = self.engine.execute(statement) amounts = [] @@ -217,6 +210,7 @@ def test_star_basic(self): def test_no_table_in_mapping(self): pass + @unittest.skip("Fix this (important!)") class SchemaJoinsTestCase(SQLTestCase): def setUp(self): @@ -227,18 +221,18 @@ def setUp(self): self.dim_size = create_table(self.engine, self.md, DIM_SIZE) def test_required_tables(self): - """Test master-detail-detail snowflake chain joins""" + """Test master-detail-detail snowflake chain joins.""" joins = [ Join.from_dict(("test.category", "dim_category.category")), Join.from_dict(("dim_category.size", "dim_size.size")), ] mappings = { - "amount": Column(None, "test", "amount", None, None), - "category": Column(None, "test", "category", None, None), + "amount": Column(None, "test", "amount", None, None), + "category": Column(None, "test", "category", None, None), "category_label": Column(None, "dim_category", "label", None, None), - "size": Column(None, "dim_category", "size", None, None), - "size_label": Column(None, "dim_size", "label", None, None), + "size": Column(None, "dim_category", "size", None, None), + "size_label": Column(None, "dim_size", "label", None, None), } schema = StarSchema("star", self.md, mappings, self.fact, joins=joins) @@ -271,23 +265,19 @@ def test_detail_twice(self): StarSchema("star", self.md, {}, self.fact, joins=joins) def test_no_join_detail_table(self): - joins = [ - Join.from_dict(("test.category", "category")), - ] + joins = [Join.from_dict(("test.category", "category"))] with self.assertRaisesRegex(ModelError, r"^No detail table"): StarSchema("star", self.md, {}, self.fact, joins=joins) def test_join(self): - """Test single join, two joins""" - joins = [ - Join.from_dict(("test.category", "dim_category.category")) - ] + """Test single join, two joins.""" + joins = [Join.from_dict(("test.category", "dim_category.category"))] mappings = { - "category": Column(None, "test", "category", None, None), - "amount": Column(None, "test", "amount", None, None), + "category": Column(None, "test", "category", None, None), + "amount": Column(None, "test", "amount", None, None), "category_label": Column(None, "dim_category", "label", None, None), - "size": Column(None, "dim_category", "size", None, None), + "size": Column(None, "dim_category", "size", None, None), } schema = StarSchema("star", self.md, mappings, self.fact, joins=joins) @@ -303,32 +293,28 @@ def test_join(self): self.assertEqual(len(tables), 1) # Check columns - self.assertColumnEqual(schema.column("category"), - self.fact.columns["category"]) - self.assertColumnEqual(schema.column("category_label"), - self.dim_category.columns["label"]) - self.assertColumnEqual(schema.column("size"), - self.dim_category.columns["size"]) + self.assertColumnEqual(schema.column("category"), self.fact.columns["category"]) + self.assertColumnEqual( + schema.column("category_label"), self.dim_category.columns["label"] + ) + self.assertColumnEqual(schema.column("size"), self.dim_category.columns["size"]) def test_compound_join_key(self): - """Test compound (multi-column) join key""" + """Test compound (multi-column) join key.""" joins = [ - Join.from_dict(( - { - "table": "test", - "column": ["category", "category"] - }, - { - "table":"dim_category", - "column": ["category", "category"] - })) + Join.from_dict( + ( + {"table": "test", "column": ["category", "category"]}, + {"table": "dim_category", "column": ["category", "category"]}, + ) + ) ] mappings = { - "category": Column(None, "test", "category", None, None), - "amount": Column(None, "test", "amount", None, None), + "category": Column(None, "test", "category", None, None), + "amount": Column(None, "test", "amount", None, None), "category_label": Column(None, "dim_category", "label", None, None), - "size": Column(None, "dim_category", "size", None, None), + "size": Column(None, "dim_category", "size", None, None), } schema = StarSchema("star", self.md, mappings, self.fact, joins=joins) @@ -344,34 +330,30 @@ def test_compound_join_key(self): self.assertEqual(len(tables), 1) # Check columns - self.assertColumnEqual(schema.column("category"), - self.fact.columns["category"]) - self.assertColumnEqual(schema.column("category_label"), - self.dim_category.columns["label"]) - self.assertColumnEqual(schema.column("size"), - self.dim_category.columns["size"]) + self.assertColumnEqual(schema.column("category"), self.fact.columns["category"]) + self.assertColumnEqual( + schema.column("category_label"), self.dim_category.columns["label"] + ) + self.assertColumnEqual(schema.column("size"), self.dim_category.columns["size"]) schema.get_star(["category_label"]) def test_compound_join_different_length(self): - """Test compound (multi-column) join key""" + """Test compound (multi-column) join key.""" joins = [ - Join.from_dict(( - { - "table": "test", - "column": ["category", "category"] - }, - { - "table":"dim_category", - "column": ["category"] - })) + Join.from_dict( + ( + {"table": "test", "column": ["category", "category"]}, + {"table": "dim_category", "column": ["category"]}, + ) + ) ] mappings = { - "category": Column(None, "test", "category", None, None), - "amount": Column(None, "test", "amount", None, None), + "category": Column(None, "test", "category", None, None), + "amount": Column(None, "test", "amount", None, None), "category_label": Column(None, "dim_category", "label", None, None), - "size": Column(None, "dim_category", "size", None, None), + "size": Column(None, "dim_category", "size", None, None), } schema = StarSchema("star", self.md, mappings, self.fact, joins=joins) @@ -381,16 +363,16 @@ def test_compound_join_different_length(self): schema.get_star(["category_label"]) def test_join_alias(self): - """Test single aliased join, test two joins on same table, one aliased - """ + """Test single aliased join, test two joins on same table, one + aliased.""" joins = [ Join.from_dict(("test.category", "dim_category.category", "dim_fruit")) ] mappings = { - "code": Column(None, "test", "category", None, None), + "code": Column(None, "test", "category", None, None), "fruit": Column(None, "dim_fruit", "label", None, None), - "size": Column(None, "dim_fruit", "size", None, None), + "size": Column(None, "dim_fruit", "size", None, None), } schema = StarSchema("star", self.md, mappings, self.fact, joins=joins) @@ -403,58 +385,55 @@ def test_join_alias(self): self.assertEqual(len(tables), 2) # Check columns - self.assertColumnEqual(schema.column("code"), - self.fact.columns["category"]) - self.assertColumnEqual(schema.column("fruit"), - self.dim_category.columns["label"]) - self.assertColumnEqual(schema.column("size"), - self.dim_category.columns["size"]) + self.assertColumnEqual(schema.column("code"), self.fact.columns["category"]) + self.assertColumnEqual( + schema.column("fruit"), self.dim_category.columns["label"] + ) + self.assertColumnEqual(schema.column("size"), self.dim_category.columns["size"]) # Check selectable statement star = schema.get_star(["code", "size"]) selection = [schema.column("code"), schema.column("size")] - select = sql.expression.select(selection, - from_obj=star) + select = sql.expression.select(selection, from_obj=star) result = self.engine.execute(select) sizes = [r["size"] for r in result] self.assertCountEqual(sizes, [2, 1, 4, 1]) def test_fact_is_included(self): - """Test whether the fact will be included in the star schema - """ + """Test whether the fact will be included in the star schema.""" joins = [ Join.from_dict(("test.category", "dim_category.category", "dim_fruit")) ] mappings = { - "code": Column(None, "test", "category", None, None), + "code": Column(None, "test", "category", None, None), "fruit": Column(None, "dim_fruit", "label", None, None), - "size": Column(None, "dim_fruit", "size", None, None), + "size": Column(None, "dim_fruit", "size", None, None), } schema = StarSchema("star", self.md, mappings, self.fact, joins=joins) star = schema.get_star(["size"]) selection = [schema.column("size")] - select = sql.expression.select(selection, - from_obj=star) + select = sql.expression.select(selection, from_obj=star) result = self.engine.execute(select) sizes = [r["size"] for r in result] self.assertCountEqual(sizes, [2, 1, 4, 1]) def test_snowflake_joins(self): - """Test master-detail-detail snowflake chain joins""" + """Test master-detail-detail snowflake chain joins.""" joins = [ - Join.from_dict({"master": "test.category", - "detail":"dim_category.category"}), - Join.from_dict({"master": "dim_category.size", "detail": "dim_size.size"}), + Join.from_dict( + {"master": "test.category", "detail": "dim_category.category"} + ), + Join.from_dict({"master": "dim_category.size", "detail": "dim_size.size"}), ] mappings = { - "category": Column(None, "test", "category", None, None), + "category": Column(None, "test", "category", None, None), "category_label": Column(None, "dim_category", "label", None, None), - "size": Column(None, "dim_category", "size", None, None), - "size_label": Column(None, "dim_size", "label", None, None), + "size": Column(None, "dim_category", "size", None, None), + "size_label": Column(None, "dim_size", "label", None, None), } schema = StarSchema("star", self.md, mappings, self.fact, joins=joins) @@ -463,25 +442,29 @@ def test_snowflake_joins(self): # arm # star = schema.star(["category_label", "size_label"]) star = schema.get_star(["size_label", "category_label"]) - select = sql.expression.select([schema.column("size_label")], - from_obj=star) + select = sql.expression.select([schema.column("size_label")], from_obj=star) result = self.engine.execute(select) sizes = [r["size_label"] for r in result] self.assertCountEqual(sizes, ["medium", "small", "large", "small"]) def test_snowflake_aliased_joins(self): - """Test master-detail-detail snowflake chain joins""" + """Test master-detail-detail snowflake chain joins.""" joins = [ - Join.from_dict({"master":"test.category", - "detail":"dim_category.category", "alias":"dim_fruit"}), - Join.from_dict({"master":"dim_fruit.size", "detail":"dim_size.size"}) + Join.from_dict( + { + "master": "test.category", + "detail": "dim_category.category", + "alias": "dim_fruit", + } + ), + Join.from_dict({"master": "dim_fruit.size", "detail": "dim_size.size"}), ] mappings = { - "category": Column(None, "test", "category", None, None), + "category": Column(None, "test", "category", None, None), "category_label": Column(None, "dim_fruit", "label", None, None), - "size": Column(None, "dim_fruit", "size", None, None), - "size_label": Column(None, "dim_size", "label", None, None), + "size": Column(None, "dim_fruit", "size", None, None), + "size_label": Column(None, "dim_size", "label", None, None), } schema = StarSchema("star", self.md, mappings, self.fact, joins=joins) @@ -493,73 +476,80 @@ def test_snowflake_aliased_joins(self): self.assertTrue(table.table.is_derived_from(self.dim_size)) # Check columns - self.assertColumnEqual(schema.column("size_label"), - self.dim_size.columns["label"]) + self.assertColumnEqual( + schema.column("size_label"), self.dim_size.columns["label"] + ) # Construct the select for the very last attribute in the snowflake # arm star = schema.get_star(["size_label"]) - select = sql.expression.select([schema.column("size_label")], - from_obj=star) + select = sql.expression.select([schema.column("size_label")], from_obj=star) result = self.engine.execute(select) sizes = [r["size_label"] for r in result] self.assertCountEqual(sizes, ["medium", "small", "large", "small"]) def test_join_method_detail(self): - """Test 'detail' join method""" + """Test 'detail' join method.""" def test_join_method_master(self): - """Test 'detail' join master""" + """Test 'detail' join master.""" def test_unary(self): - """Test that mapping.unary works""" + """Test that mapping.unary works.""" def test_statement_table(self): - """Test using a statement as a table""" - joins = [ - Join.from_dict(("test.category", "dim_category.category")) - ] + """Test using a statement as a table.""" + joins = [Join.from_dict(("test.category", "dim_category.category"))] mappings = { - "code": Column(None, "test", "category", None, None), + "code": Column(None, "test", "category", None, None), "fruit": Column(None, "dim_category", "label", None, None), - "size": Column(None, "dim_category", "size", None, None), + "size": Column(None, "dim_category", "size", None, None), } - fact_statement = sa.select(self.fact.columns, from_obj=self.fact, - whereclause=self.fact.c.category == 'A') - cat_statement = sa.select(self.dim_category.columns, - from_obj=self.dim_category, - whereclause=self.dim_category.c.category == 'A') + fact_statement = sa.select( + self.fact.columns, + from_obj=self.fact, + whereclause=self.fact.c.category == "A", + ) + cat_statement = sa.select( + self.dim_category.columns, + from_obj=self.dim_category, + whereclause=self.dim_category.c.category == "A", + ) - tables = { - "dim_category": cat_statement - } + tables = {"dim_category": cat_statement} with self.assertRaisesRegex(ArgumentError, "requires alias"): - StarSchema("star", self.md, mappings, - fact=fact_statement, - tables=tables, - joins=joins) - - tables = { - "dim_category": cat_statement.alias("dim_category") - } - - schema = StarSchema("star", self.md, mappings, - fact=fact_statement.alias("test"), - tables=tables, - joins=joins) + StarSchema( + "star", + self.md, + mappings, + fact=fact_statement, + tables=tables, + joins=joins, + ) + + tables = {"dim_category": cat_statement.alias("dim_category")} + + schema = StarSchema( + "star", + self.md, + mappings, + fact=fact_statement.alias("test"), + tables=tables, + joins=joins, + ) star = schema.get_star(["size"]) selection = [schema.column("size")] - select = sql.expression.select(selection, - from_obj=star) + select = sql.expression.select(selection, from_obj=star) result = self.engine.execute(select) sizes = [r["size"] for r in result] self.assertCountEqual(sizes, [2]) + @unittest.skip("Fix this (important!)") class QueryTestCase(SQLTestCase): def setUp(self): @@ -568,25 +558,20 @@ def setUp(self): self.fact = create_table(self.engine, self.md, BASE_FACT) mappings = { - "date": Column(None, "test", "date", None, None), - "amount": Column(None, "test", "category", None, None), - "category": Column(None, "test", "amount", None, None), - } - self.deps = { - "date": None, - "amount": None, - "category": None, + "date": Column(None, "test", "date", None, None), + "amount": Column(None, "test", "category", None, None), + "category": Column(None, "test", "amount", None, None), } + self.deps = {"date": None, "amount": None, "category": None} self.schema = StarSchema("star", self.md, mappings, self.fact) self.base_attributes = create_list_of(Attribute, mappings.keys()) # self.base_attributes = list(mappings.keys()) - self.base_deps = {attr:[] for attr in self.base_attributes} - + self.base_deps = {attr: [] for attr in self.base_attributes} def test_basic(self): - context = QueryContext(self.schema, self.base_attributes, - self.base_deps) + context = QueryContext(self.schema, self.base_attributes, self.base_deps) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_auth.py b/tests/test_auth.py index 3b283e51..9cc927f2 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -5,9 +5,11 @@ from json import dumps + def printable(obj): return dumps(obj, indent=4) + class AuthTestCase(unittest.TestCase): def setUp(self): self.sales_cube = Cube("sales") @@ -18,98 +20,74 @@ def test_empty(self): self.assertEqual([], self.auth.authorize("john", [self.sales_cube])) def test_authorize(self): - rights = { - "john": {"allowed_cubes": ["sales"]} - } + rights = {"john": {"allowed_cubes": ["sales"]}} self.auth = SimpleAuthorizer(rights=rights) - self.assertEqual([self.sales_cube], - self.auth.authorize("john", [self.sales_cube])) + self.assertEqual( + [self.sales_cube], self.auth.authorize("john", [self.sales_cube]) + ) self.assertEqual([], self.auth.authorize("ivana", [self.churn_cube])) def test_deny(self): - rights = { - "john": {"denied_cubes": ["sales"]} - } + rights = {"john": {"denied_cubes": ["sales"]}} self.auth = SimpleAuthorizer(rights=rights) - self.assertEqual([self.churn_cube], self.auth.authorize("john", [self.churn_cube])) + self.assertEqual( + [self.churn_cube], self.auth.authorize("john", [self.churn_cube]) + ) - self.assertEqual([], - self.auth.authorize("john", [self.sales_cube])) + self.assertEqual([], self.auth.authorize("john", [self.sales_cube])) self.assertEqual([], self.auth.authorize("ivana", [self.churn_cube])) def test_allow(self): - rights = { - "john": {"denied_cubes": ["sales"]}, - "ivana": {} - } + rights = {"john": {"denied_cubes": ["sales"]}, "ivana": {}} self.auth = SimpleAuthorizer(rights=rights) - self.assertEqual([self.churn_cube], - self.auth.authorize("ivana", [self.churn_cube])) + self.assertEqual( + [self.churn_cube], self.auth.authorize("ivana", [self.churn_cube]) + ) def test_order(self): rights = { - "john": { - "denied_cubes": ["sales"], - "allowed_cubes": ["sales"] - }, - "ivana": { - "denied_cubes": ["sales"], - "allowed_cubes": ["*"] - }, - "fero": { - "denied_cubes": ["*"], - "allowed_cubes": ["sales"] - }, - "magda": { - "denied_cubes": ["*"], - "allowed_cubes": ["*"] - }, + "john": {"denied_cubes": ["sales"], "allowed_cubes": ["sales"]}, + "ivana": {"denied_cubes": ["sales"], "allowed_cubes": ["*"]}, + "fero": {"denied_cubes": ["*"], "allowed_cubes": ["sales"]}, + "magda": {"denied_cubes": ["*"], "allowed_cubes": ["*"]}, } self.auth = SimpleAuthorizer(rights=rights) - self.assertEqual([self.sales_cube], - self.auth.authorize("john", [self.sales_cube])) - self.assertEqual([self.sales_cube], - self.auth.authorize("ivana", [self.sales_cube])) - self.assertEqual([self.sales_cube], - self.auth.authorize("fero", [self.sales_cube])) - self.assertEqual([self.sales_cube], - self.auth.authorize("magda", [self.sales_cube])) + self.assertEqual( + [self.sales_cube], self.auth.authorize("john", [self.sales_cube]) + ) + self.assertEqual( + [self.sales_cube], self.auth.authorize("ivana", [self.sales_cube]) + ) + self.assertEqual( + [self.sales_cube], self.auth.authorize("fero", [self.sales_cube]) + ) + self.assertEqual( + [self.sales_cube], self.auth.authorize("magda", [self.sales_cube]) + ) self.auth = SimpleAuthorizer(rights=rights, order="allow_deny") - self.assertEqual([], - self.auth.authorize("john", [self.sales_cube])) - self.assertEqual([], - self.auth.authorize("ivana", [self.sales_cube])) - self.assertEqual([], - self.auth.authorize("fero", [self.sales_cube])) - self.assertEqual([], - self.auth.authorize("magda", [self.sales_cube])) + self.assertEqual([], self.auth.authorize("john", [self.sales_cube])) + self.assertEqual([], self.auth.authorize("ivana", [self.sales_cube])) + self.assertEqual([], self.auth.authorize("fero", [self.sales_cube])) + self.assertEqual([], self.auth.authorize("magda", [self.sales_cube])) def test_role(self): - roles = { - "marketing": {"allowed_cubes": ["sales"]} - } - rights = { - "john": {"roles": ["marketing"]} - } + roles = {"marketing": {"allowed_cubes": ["sales"]}} + rights = {"john": {"roles": ["marketing"]}} self.auth = SimpleAuthorizer(rights=rights, roles=roles) - self.assertEqual([self.sales_cube], - self.auth.authorize("john", [self.sales_cube])) + self.assertEqual( + [self.sales_cube], self.auth.authorize("john", [self.sales_cube]) + ) def test_role_inheritance(self): - roles = { - "top": {"allowed_cubes": ["sales"]}, - "marketing": {"roles": ["top"]} - } - rights = { - "john": {"roles": ["marketing"]} - } + roles = {"top": {"allowed_cubes": ["sales"]}, "marketing": {"roles": ["top"]}} + rights = {"john": {"roles": ["marketing"]}} self.auth = SimpleAuthorizer(rights=rights, roles=roles) - self.assertEqual([self.sales_cube], - self.auth.authorize("john", [self.sales_cube])) - + self.assertEqual( + [self.sales_cube], self.auth.authorize("john", [self.sales_cube]) + ) diff --git a/tests/test_calendar.py b/tests/test_calendar.py index 9400db7c..122bd4de 100644 --- a/tests/test_calendar.py +++ b/tests/test_calendar.py @@ -8,7 +8,7 @@ class DateTimeTestCase(unittest.TestCase): def setUp(self): - super(DateTimeTestCase,self).setUp() + super().setUp() self.provider = create_provider("datetime.json") self.cal = Calendar() @@ -60,7 +60,7 @@ def test_path_weekday(self): # Reference for the named relative test # 2012 - # + # # Január Február Marec Apríl # po 2 9 16 23 30 6 13 20 27 5*12 19 26 2 9 16 23 30 # ut 3 10 17 24 31 7 14 21 28 6 13 20 27 3 10 17 24 @@ -123,13 +123,13 @@ def test_named_relative_truncated(self): self.assertEqual([2012, 4, 1, 0], path) path = self.cal.named_relative_path("next12months", units, date) - self.assertEqual([2013, 3, 1,0 ], path) + self.assertEqual([2013, 3, 1, 0], path) path = self.cal.named_relative_path("lastquarter", units, date) - self.assertEqual([2011,10, 1, 0], path) + self.assertEqual([2011, 10, 1, 0], path) path = self.cal.named_relative_path("lastyear", units, date) - self.assertEqual([2011, 1, 1,0 ], path) + self.assertEqual([2011, 1, 1, 0], path) def test_distance(self): # Meniny (SK): Anna/Hana diff --git a/tests/test_common.py b/tests/test_common.py index eb2c7add..dbc2a417 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -5,12 +5,11 @@ # TODO: Moved from `test_combinations`. Requires review. @unittest.skip class CombinationsTestCase(unittest.TestCase): - def setUp(self): - self.nodea = ('a', (1,2,3)) - self.nodeb = ('b', (99,88)) - self.nodec = ('c',('x','y')) - self.noded = ('d', ('m')) + self.nodea = ("a", (1, 2, 3)) + self.nodeb = ("b", (99, 88)) + self.nodec = ("c", ("x", "y")) + self.noded = ("d", ("m")) def test_levels(self): combos = cubes.common.combine_nodes([self.nodea]) @@ -55,10 +54,11 @@ def test_required_more(self): break self.assertTrue(flag, "All combinations should contain both required nodes") + @unittest.skip class CuboidsTestCase(unittest.TestCase): def setUp(self): - self.model_path = os.path.join(DATA_PATH, 'model.json') + self.model_path = os.path.join(DATA_PATH, "model.json") self.model = cubes.model_from_path(self.model_path) self.cube = self.model.cubes.get("contracts") @@ -77,9 +77,9 @@ def test_combine_dimensions(self): self.assertEqual(len(results), 648) def test_should_not_accept_unknown_dimension(self): - foo_desc = { "name": "foo", "levels": {"level": {"key": "boo"}}} + foo_desc = {"name": "foo", "levels": {"level": {"key": "boo"}}} foo_dim = cubes.create_dimension(foo_desc) - self.assertRaises(AttributeError, cubes.common.all_cuboids, - self.cube.dimensions, [foo_dim]) - + self.assertRaises( + AttributeError, cubes.common.all_cuboids, self.cube.dimensions, [foo_dim] + ) diff --git a/tests/test_expressions.py b/tests/test_expressions.py index dc9126fb..aa2cd44a 100644 --- a/tests/test_expressions.py +++ b/tests/test_expressions.py @@ -23,9 +23,8 @@ def setUp(self): {"name": "indirect_loop2", "expression": "indirect_loop1"}, ] - self.attrs = {attr["name"]:Attribute.from_metadata(attr) for attr in attrs} - self.deps = {name:attr.dependencies - for name, attr in self.attrs.items()} + self.attrs = {attr["name"]: Attribute.from_metadata(attr) for attr in attrs} + self.deps = {name: attr.dependencies for name, attr in self.attrs.items()} def attributes(self, *attrs): return [self.attrs[attr] for attr in attrs] @@ -51,5 +50,6 @@ def test_sorted_circular(self): depsort_attributes(["loop1", "loop2"], self.deps) with self.assertRaisesRegex(ExpressionError, "Circular"): - depsort_attributes(["indirect_loop1", "intermediate", - "indirect_loop2"], self.deps) + depsort_attributes( + ["indirect_loop1", "intermediate", "indirect_loop2"], self.deps + ) diff --git a/tests/test_ext.py b/tests/test_ext.py index c6c37cd8..b48334e5 100644 --- a/tests/test_ext.py +++ b/tests/test_ext.py @@ -9,15 +9,14 @@ class StoreBase(Extensible, abstract=True): __extension_type__ = "store" + def value(self) -> int: raise NotImplementedError class MyStore(StoreBase, name="my"): - extension_settings = [ - Setting("number", SettingType.int) - ] + extension_settings = [Setting("number", SettingType.int)] number: int diff --git a/tests/test_namespace.py b/tests/test_namespace.py index 7a45211e..90afe40f 100644 --- a/tests/test_namespace.py +++ b/tests/test_namespace.py @@ -1,7 +1,9 @@ import unittest from cubes.namespace import Namespace + # from .common import CubesTestCaseBase + class NamespaceTestCase(unittest.TestCase): def test_create(self): ns = Namespace() @@ -74,7 +76,7 @@ def test_find_cube(self): (ns, nsname, basename) = base.find_cube("extern.deeper.cube") self.assertEqual(ns, extern) - self.assertEqual(nsname, 'extern') + self.assertEqual(nsname, "extern") self.assertEqual(basename, "deeper.cube") (deep, remainder) = base.namespace("even.deeper.extern", create=True) @@ -82,4 +84,3 @@ def test_find_cube(self): self.assertEqual(ns, deep) self.assertEqual(nsname, "") self.assertEqual(basename, "cube") - diff --git a/tests/test_workspace.py b/tests/test_workspace.py index 736df15d..819902b7 100644 --- a/tests/test_workspace.py +++ b/tests/test_workspace.py @@ -2,7 +2,7 @@ import os import json import re -from cubes.errors import NoSuchCubeError, NoSuchDimensionError +from cubes.errors import NoSuchCubeError, NoSuchDimensionError, ModelError from cubes.errors import NoSuchAttributeError from cubes.workspace import Workspace from cubes.stores import Store @@ -10,8 +10,10 @@ from cubes.server.base import read_slicer_config from .common import CubesTestCaseBase + # FIXME: remove this once satisfied + class WorkspaceTestCaseBase(CubesTestCaseBase): def default_workspace(self, model_name=None): model_name = model_name or "model.json" @@ -20,6 +22,7 @@ def default_workspace(self, model_name=None): ws.import_model(self.model_path("model.json")) return ws + class WorkspaceModelTestCase(WorkspaceTestCaseBase): def test_get_cube(self): ws = self.default_workspace() @@ -84,8 +87,7 @@ def test_external_template(self): self.assertEqual("another_date", dim.name) self.assertEqual(3, len(dim.levels)) - @unittest.skip("We are lazy now, we don't want to ping the provider for " - "nothing") + @unittest.skip("We are lazy now, we don't want to ping the provider for nothing") def test_duplicate_dimension(self): ws = Workspace() ws.import_model(self.model_path("templated_dimension.json")) @@ -112,4 +114,3 @@ def test_local_dimension(self): cube = ws.cube("lonely_yearly_events") dim = cube.dimension("date") self.assertEqual(["lonely_year"], dim.level_names) - diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..25916547 --- /dev/null +++ b/tox.ini @@ -0,0 +1,14 @@ +[tox] +envlist = py36,py37,py38 +skip_missing_interpreters = True + +[testenv] +extras = dev + +commands= + # This souldn't be needed. FIXME later. + pip install -r requirements-optional.txt + + pytest + # flake8 +