Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions backend/infrahub/core/attribute.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,12 @@ def _filter_sensitive(self, value: str, filter_sensitive: bool) -> str:

return value

async def from_graphql(self, data: dict, db: InfrahubDatabase) -> bool:
async def from_graphql(
self,
data: dict,
db: InfrahubDatabase,
process_pools: bool = True,
) -> bool:
"""Update attr from GraphQL payload"""
Comment on lines +581 to 587
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

Update docstring to document parameters.

The from_graphql method signature now includes process_pools: bool = True, but the docstring (line 587) is brief and does not document any parameters. Please update the docstring to include an Args section documenting the data, db, and process_pools parameters, as well as a Returns section.

As per coding guidelines

Apply this diff to update the docstring:

     async def from_graphql(
         self,
         data: dict,
         db: InfrahubDatabase,
         process_pools: bool = True,
     ) -> bool:
-        """Update attr from GraphQL payload"""
+        """Update attribute from a GraphQL payload.
+
+        Args:
+            data: GraphQL payload to apply.
+            db: Database connection used for related lookups.
+            process_pools: Whether resource pool allocations should be performed; when False, pool allocation is skipped.
+
+        Returns:
+            True if the attribute changed, otherwise False.
+        """
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async def from_graphql(
self,
data: dict,
db: InfrahubDatabase,
process_pools: bool = True,
) -> bool:
"""Update attr from GraphQL payload"""
async def from_graphql(
self,
data: dict,
db: InfrahubDatabase,
process_pools: bool = True,
) -> bool:
"""Update attribute from a GraphQL payload.
Args:
data: GraphQL payload to apply.
db: Database connection used for related lookups.
process_pools: Whether resource pool allocations should be performed; when False, pool allocation is skipped.
Returns:
True if the attribute changed, otherwise False.
"""
🤖 Prompt for AI Agents
In backend/infrahub/core/attribute.py around lines 581 to 587, the from_graphql
method docstring is too brief and missing parameter and return documentation;
update the docstring to include an Args section documenting: data (dict) —
GraphQL payload used to update the attribute; db (InfrahubDatabase) — database
instance used for lookups/updates; process_pools (bool, default True) — whether
to process pool-related data; and add a Returns section describing the boolean
return (True on successful update, False otherwise). Keep formatting consistent
with the project's docstring style and place the Args and Returns under the
existing one-line summary.


changed = False
Expand All @@ -592,7 +597,8 @@ async def from_graphql(self, data: dict, db: InfrahubDatabase) -> bool:
changed = True
elif "from_pool" in data:
self.from_pool = data["from_pool"]
await self.node.handle_pool(db=db, attribute=self, errors=[])
if process_pools:
await self.node.handle_pool(db=db, attribute=self, errors=[])
changed = True

if changed and self.is_from_profile:
Expand Down
66 changes: 54 additions & 12 deletions backend/infrahub/core/node/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,13 @@ async def init(

return cls(**attrs)

async def handle_pool(self, db: InfrahubDatabase, attribute: BaseAttribute, errors: list) -> None:
async def handle_pool(
self,
db: InfrahubDatabase,
attribute: BaseAttribute,
errors: list,
allocate_resources: bool = True,
) -> None:
Comment on lines +260 to +266
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

Update docstring to document allocate_resources parameter.

The method signature now includes allocate_resources: bool = True, but the docstring (lines 267-271) does not document this new parameter. Please update the docstring to include an Args section with the allocate_resources parameter.

As per coding guidelines

Apply this diff to update the docstring:

     async def handle_pool(
         self,
         db: InfrahubDatabase,
         attribute: BaseAttribute,
         errors: list,
         allocate_resources: bool = True,
     ) -> None:
-        """Evaluate if a resource has been requested from a pool and apply the resource
+        """Evaluate if a resource has been requested from a pool and apply the resource.
+
+        Args:
+            db: Database connection.
+            attribute: Attribute to evaluate for pool allocation.
+            errors: List to append validation errors.
+            allocate_resources: Whether to perform pool allocation; when False, pool allocation is skipped.
 
         This method only works on number pools, currently Integer is the only type that has the from_pool
         within the create code.
         """
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async def handle_pool(
self,
db: InfrahubDatabase,
attribute: BaseAttribute,
errors: list,
allocate_resources: bool = True,
) -> None:
async def handle_pool(
self,
db: InfrahubDatabase,
attribute: BaseAttribute,
errors: list,
allocate_resources: bool = True,
) -> None:
"""Evaluate if a resource has been requested from a pool and apply the resource.
Args:
db: Database connection.
attribute: Attribute to evaluate for pool allocation.
errors: List to append validation errors.
allocate_resources: Whether to perform pool allocation; when False, pool allocation is skipped.
This method only works on number pools, currently Integer is the only type that has the from_pool
within the create code.
"""
🤖 Prompt for AI Agents
In backend/infrahub/core/node/__init__.py around lines 260 to 266, the method
handle_pool now accepts an allocate_resources: bool = True parameter but the
docstring was not updated; add or update the docstring's Args section to include
allocate_resources with a brief description (type bool, default True) explaining
that when True resources will be allocated during handling and when False
allocation is skipped, and ensure the Args list includes the existing parameters
(db, attribute, errors) in the same style and formatting as the project's
docstring conventions.

"""Evaluate if a resource has been requested from a pool and apply the resource

This method only works on number pools, currently Integer is the only type that has the from_pool
Expand All @@ -268,7 +274,7 @@ async def handle_pool(self, db: InfrahubDatabase, attribute: BaseAttribute, erro
attribute.from_pool = {"id": attribute.schema.parameters.number_pool_id}
attribute.is_default = False

if not attribute.from_pool:
if not attribute.from_pool or not allocate_resources:
return

try:
Expand Down Expand Up @@ -426,7 +432,12 @@ async def handle_object_template(self, fields: dict, db: InfrahubDatabase, error
elif relationship_peers := await relationship.get_peers(db=db):
fields[relationship_name] = [{"id": peer_id} for peer_id in relationship_peers]

async def _process_fields(self, fields: dict, db: InfrahubDatabase) -> None:
async def _process_fields(
self,
fields: dict,
db: InfrahubDatabase,
process_pools: bool = True,
) -> None:
errors = []

if "_source" in fields.keys():
Expand Down Expand Up @@ -480,7 +491,7 @@ async def _process_fields(self, fields: dict, db: InfrahubDatabase) -> None:
# Generate Attribute and Relationship and assign them
# -------------------------------------------
errors.extend(await self._process_fields_relationships(fields=fields, db=db))
errors.extend(await self._process_fields_attributes(fields=fields, db=db))
errors.extend(await self._process_fields_attributes(fields=fields, db=db, process_pools=process_pools))

if errors:
raise ValidationError(errors)
Expand Down Expand Up @@ -517,7 +528,12 @@ async def _process_fields_relationships(self, fields: dict, db: InfrahubDatabase

return errors

async def _process_fields_attributes(self, fields: dict, db: InfrahubDatabase) -> list[ValidationError]:
async def _process_fields_attributes(
self,
fields: dict,
db: InfrahubDatabase,
process_pools: bool,
) -> list[ValidationError]:
errors: list[ValidationError] = []

for attr_schema in self._schema.attributes:
Expand All @@ -542,9 +558,15 @@ async def _process_fields_attributes(self, fields: dict, db: InfrahubDatabase) -
)
if not self._existing:
attribute: BaseAttribute = getattr(self, attr_schema.name)
await self.handle_pool(db=db, attribute=attribute, errors=errors)
await self.handle_pool(
db=db,
attribute=attribute,
errors=errors,
allocate_resources=process_pools,
)

attribute.validate(value=attribute.value, name=attribute.name, schema=attribute.schema)
if process_pools or attribute.from_pool is None:
attribute.validate(value=attribute.value, name=attribute.name, schema=attribute.schema)
except ValidationError as exc:
errors.append(exc)

Expand Down Expand Up @@ -672,7 +694,13 @@ async def process_label(self, db: InfrahubDatabase | None = None) -> None: # no
self.label.value = " ".join([word.title() for word in self.name.value.split("_")])
self.label.is_default = False

async def new(self, db: InfrahubDatabase, id: str | None = None, **kwargs: Any) -> Self:
async def new(
self,
db: InfrahubDatabase,
id: str | None = None,
process_pools: bool = True,
**kwargs: Any,
) -> Self:
Comment on lines +697 to +703
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

Update docstring to document process_pools parameter.

The new method signature now includes process_pools: bool = True, but there is no docstring present for this method. Please add a docstring that includes an Args section documenting the process_pools parameter.

As per coding guidelines

Apply this diff to add a docstring:

     async def new(
         self,
         db: InfrahubDatabase,
         id: str | None = None,
         process_pools: bool = True,
         **kwargs: Any,
     ) -> Self:
+        """Create a new node instance with the provided data.
+
+        Args:
+            db: Database connection.
+            id: Optional UUID for the node; generated if not provided.
+            process_pools: Whether to perform resource pool allocations; when False, pool allocation is skipped.
+            **kwargs: Field values for the node.
+
+        Returns:
+            The initialized node instance.
+        """
         if id and not is_valid_uuid(id):
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async def new(
self,
db: InfrahubDatabase,
id: str | None = None,
process_pools: bool = True,
**kwargs: Any,
) -> Self:
async def new(
self,
db: InfrahubDatabase,
id: str | None = None,
process_pools: bool = True,
**kwargs: Any,
) -> Self:
"""Create a new node instance with the provided data.
Args:
db: Database connection.
id: Optional UUID for the node; generated if not provided.
process_pools: Whether to perform resource pool allocations; when False, pool allocation is skipped.
**kwargs: Field values for the node.
Returns:
The initialized node instance.
"""
if id and not is_valid_uuid(id):
🤖 Prompt for AI Agents
In backend/infrahub/core/node/__init__.py around lines 697 to 703, the async
new(...) method lacks a docstring and must document the new process_pools: bool
= True parameter; add a concise docstring above the method including a short
description of the factory behavior and an Args section that documents db, id,
process_pools (explain that process_pools controls whether pool-related
processing is performed, default True), and **kwargs, and mention the return
type in a Returns section (Self or instance). Ensure formatting follows existing
docstring style in the file.

if id and not is_valid_uuid(id):
raise ValidationError({"id": f"{id} is not a valid UUID"})
if id:
Expand All @@ -682,7 +710,7 @@ async def new(self, db: InfrahubDatabase, id: str | None = None, **kwargs: Any)

self.id = id or str(UUIDT())

await self._process_fields(db=db, fields=kwargs)
await self._process_fields(db=db, fields=kwargs, process_pools=process_pools)
await self._process_macros(db=db)

return self
Expand Down Expand Up @@ -935,15 +963,29 @@ async def to_graphql(

return response

async def from_graphql(self, data: dict, db: InfrahubDatabase) -> bool:
"""Update object from a GraphQL payload."""
async def from_graphql(
self,
data: dict,
db: InfrahubDatabase,
process_pools: bool = True,
) -> bool:
"""Update object from a GraphQL payload.

Args:
data: GraphQL payload to apply.
db: Database connection used for related lookups.
process_pools: Whether resource pool allocations should be performed.

Returns:
True if any field changed, otherwise False.
"""

changed = False

for key, value in data.items():
if key in self._attributes and isinstance(value, dict):
attribute = getattr(self, key)
changed |= await attribute.from_graphql(data=value, db=db)
changed |= await attribute.from_graphql(data=value, db=db, process_pools=process_pools)

if key in self._relationships:
rel: RelationshipManager = getattr(self, key)
Expand Down
74 changes: 44 additions & 30 deletions backend/infrahub/core/node/create.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
from __future__ import annotations

from typing import TYPE_CHECKING, Any, Mapping
from typing import TYPE_CHECKING, Any, Mapping, Sequence

from infrahub import lock
from infrahub.core import registry
from infrahub.core.constants import RelationshipCardinality, RelationshipKind
from infrahub.core.constraint.node.runner import NodeConstraintRunner
from infrahub.core.manager import NodeManager
from infrahub.core.node import Node
from infrahub.core.node.save import run_constraints_and_save
from infrahub.core.protocols import CoreObjectTemplate
from infrahub.dependencies.registry import get_component_registry
from infrahub.lock import InfrahubMultiLock
from infrahub.lock_getter import get_lock_names_on_object_mutation

if TYPE_CHECKING:
from infrahub.core.branch import Branch
Expand Down Expand Up @@ -141,18 +145,23 @@ async def refresh_for_profile_update(


async def _do_create_node(
node_class: type[Node],
obj: Node,
db: InfrahubDatabase,
data: dict,
schema: NonGenericSchemaTypes,
fields_to_validate: list,
branch: Branch,
fields_to_validate: list[str],
node_constraint_runner: NodeConstraintRunner,
lock_names: Sequence[str],
) -> Node:
obj = await node_class.init(db=db, schema=schema, branch=branch)
await obj.new(db=db, **data)
await node_constraint_runner.check(node=obj, field_filters=fields_to_validate)
await obj.save(db=db)
await run_constraints_and_save(
node=obj,
node_constraint_runner=node_constraint_runner,
fields_to_validate=fields_to_validate,
fields_to_save=[],
db=db,
branch=branch,
lock_names=lock_names,
manage_lock=False,
)

object_template = await obj.get_object_template(db=db)
if object_template:
Expand All @@ -162,6 +171,7 @@ async def _do_create_node(
template=object_template,
obj=obj,
fields=fields_to_validate,
constraint_runner=node_constraint_runner,
)
return obj

Expand All @@ -175,35 +185,39 @@ async def create_node(
"""Create a node in the database if constraint checks succeed."""

component_registry = get_component_registry()
node_constraint_runner = await component_registry.get_component(
NodeConstraintRunner, db=db.start_session() if not db.is_transaction else db, branch=branch
)
node_class = Node
if schema.kind in registry.node:
node_class = registry.node[schema.kind]

fields_to_validate = list(data)
if db.is_transaction:
obj = await _do_create_node(
node_class=node_class,
node_constraint_runner=node_constraint_runner,
db=db,
schema=schema,
preview_obj = await node_class.init(db=db, schema=schema, branch=branch)
await preview_obj.new(db=db, process_pools=False, **data)
schema_branch = db.schema.get_schema_branch(name=branch.name)
lock_names = get_lock_names_on_object_mutation(node=preview_obj, schema_branch=schema_branch)

async def _persist(current_db: InfrahubDatabase) -> Node:
node_constraint_runner = await component_registry.get_component(
NodeConstraintRunner, db=current_db, branch=branch
)
obj = await node_class.init(db=current_db, schema=schema, branch=branch)
await obj.new(db=current_db, **data)

return await _do_create_node(
obj=obj,
db=current_db,
branch=branch,
fields_to_validate=fields_to_validate,
data=data,
node_constraint_runner=node_constraint_runner,
lock_names=lock_names,
)
else:
async with db.start_transaction() as dbt:
obj = await _do_create_node(
node_class=node_class,
node_constraint_runner=node_constraint_runner,
db=dbt,
schema=schema,
branch=branch,
fields_to_validate=fields_to_validate,
data=data,
)

obj: Node
async with InfrahubMultiLock(lock_registry=lock.registry, locks=lock_names):
if db.is_transaction:
obj = await _persist(db)
else:
async with db.start_transaction() as dbt:
obj = await _persist(dbt)

if await get_profile_ids(db=db, obj=obj):
obj = await refresh_for_profile_update(db=db, branch=branch, schema=schema, obj=obj)
Expand Down
57 changes: 57 additions & 0 deletions backend/infrahub/core/node/save.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from collections.abc import Sequence

from infrahub import lock
from infrahub.core.branch import Branch
from infrahub.core.constraint.node.runner import NodeConstraintRunner
from infrahub.core.node import Node
from infrahub.database import InfrahubDatabase
from infrahub.lock import InfrahubMultiLock
from infrahub.lock_getter import get_lock_names_on_object_mutation


async def run_constraints_and_save(
node: Node,
node_constraint_runner: NodeConstraintRunner,
fields_to_validate: list[str],
fields_to_save: list[str],
db: InfrahubDatabase,
branch: Branch,
skip_uniqueness_check: bool = False,
lock_names: Sequence[str] | None = None,
manage_lock: bool = True,
) -> None:
"""Validate a node and persist it, optionally reusing an existing lock context.

Args:
node: The node instance to validate and persist.
node_constraint_runner: Runner executing node-level constraints.
fields_to_validate: Field names that must be validated.
fields_to_save: Field names that must be persisted.
db: Database connection or transaction to use for persistence.
branch: Branch associated with the mutation.
skip_uniqueness_check: Whether to skip uniqueness constraints.
lock_names: Precomputed lock identifiers to reuse when ``manage_lock`` is False.
manage_lock: Whether this helper should acquire and release locks itself.
"""
Comment on lines +12 to +35
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

Add Raises section to docstring.

The docstring is missing a Raises section to document the ValueError raised on line 38 when manage_lock is False but lock_names is None.

Apply this diff:

     Args:
         node: The node instance to validate and persist.
         node_constraint_runner: Runner executing node-level constraints.
         fields_to_validate: Field names that must be validated.
         fields_to_save: Field names that must be persisted.
         db: Database connection or transaction to use for persistence.
         branch: Branch associated with the mutation.
         skip_uniqueness_check: Whether to skip uniqueness constraints.
         lock_names: Precomputed lock identifiers to reuse when ``manage_lock`` is False.
         manage_lock: Whether this helper should acquire and release locks itself.
+
+    Raises:
+        ValueError: When manage_lock is False but lock_names is not provided.
     """
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async def run_constraints_and_save(
node: Node,
node_constraint_runner: NodeConstraintRunner,
fields_to_validate: list[str],
fields_to_save: list[str],
db: InfrahubDatabase,
branch: Branch,
skip_uniqueness_check: bool = False,
lock_names: Sequence[str] | None = None,
manage_lock: bool = True,
) -> None:
"""Validate a node and persist it, optionally reusing an existing lock context.
Args:
node: The node instance to validate and persist.
node_constraint_runner: Runner executing node-level constraints.
fields_to_validate: Field names that must be validated.
fields_to_save: Field names that must be persisted.
db: Database connection or transaction to use for persistence.
branch: Branch associated with the mutation.
skip_uniqueness_check: Whether to skip uniqueness constraints.
lock_names: Precomputed lock identifiers to reuse when ``manage_lock`` is False.
manage_lock: Whether this helper should acquire and release locks itself.
"""
async def run_constraints_and_save(
node: Node,
node_constraint_runner: NodeConstraintRunner,
fields_to_validate: list[str],
fields_to_save: list[str],
db: InfrahubDatabase,
branch: Branch,
skip_uniqueness_check: bool = False,
lock_names: Sequence[str] | None = None,
manage_lock: bool = True,
) -> None:
"""Validate a node and persist it, optionally reusing an existing lock context.
Args:
node: The node instance to validate and persist.
node_constraint_runner: Runner executing node-level constraints.
fields_to_validate: Field names that must be validated.
fields_to_save: Field names that must be persisted.
db: Database connection or transaction to use for persistence.
branch: Branch associated with the mutation.
skip_uniqueness_check: Whether to skip uniqueness constraints.
lock_names: Precomputed lock identifiers to reuse when ``manage_lock`` is False.
manage_lock: Whether this helper should acquire and release locks itself.
Raises:
ValueError: When manage_lock is False but lock_names is not provided.
"""
🤖 Prompt for AI Agents
In backend/infrahub/core/node/save.py around lines 12 to 35, the function
docstring is missing a Raises section documenting that a ValueError is raised
when manage_lock is False but lock_names is None; add a "Raises" section to the
docstring that specifies ValueError is raised in that exact condition (include
the parameter names and a brief message such as "manage_lock is False but
lock_names is None"), so callers understand the precondition and error behavior.


if not manage_lock and lock_names is None:
raise ValueError("lock_names must be provided when manage_lock is False")

schema_branch = db.schema.get_schema_branch(name=branch.name)
locks = (
list(lock_names)
if lock_names is not None
else get_lock_names_on_object_mutation(node=node, schema_branch=schema_branch)
)

async def _persist() -> None:
await node_constraint_runner.check(
node=node, field_filters=fields_to_validate, skip_uniqueness_check=skip_uniqueness_check
)
await node.save(db=db, fields=fields_to_save)

if manage_lock:
async with InfrahubMultiLock(lock_registry=lock.registry, locks=locks):
await _persist()
else:
await _persist()
Loading