Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 14 additions & 6 deletions src/uproot/behaviors/RNTuple.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import uproot.language.python
import uproot.source.chunk
from uproot._util import no_filter, unset
from uproot.behaviors.TBranch import _regularize_array_cache


def iterate(
Expand Down Expand Up @@ -615,7 +616,7 @@ def arrays(
entry_start=None,
entry_stop=None,
decompression_executor=None, # TODO: Not implemented yet
array_cache="inherit", # TODO: Not implemented yet
array_cache="inherit",
library="ak", # TODO: Not implemented yet
backend="cpu",
interpreter="cpu",
Expand Down Expand Up @@ -659,7 +660,7 @@ def arrays(
is used. (Not implemented yet.)
array_cache ("inherit", None, MutableMapping, or memory size): Cache of arrays;
if "inherit", use the file's cache; if None, do not use a cache;
if a memory size, create a new cache of this size. (Not implemented yet.)
if a memory size, create a new cache of this size.
library (str or :doc:`uproot.interpretation.library.Library`): The library
that is used to represent arrays. Options are ``"np"`` for NumPy,
``"ak"`` for Awkward Array, and ``"pd"`` for Pandas. (Not implemented yet.)
Expand Down Expand Up @@ -725,6 +726,8 @@ def arrays(
[c.num_entries for c in clusters[start_cluster_idx:stop_cluster_idx]]
)

array_cache = _regularize_array_cache(array_cache, self.ntuple._file)

form, field_path = self.to_akform(
filter_name=filter_name,
filter_typename=filter_typename,
Expand All @@ -747,17 +750,22 @@ def arrays(
clusters_datas,
start_cluster_idx,
stop_cluster_idx,
pad_missing_element=True,
)

for key in target_cols:
if "column" in key:
key_nr = int(key.split("-")[1])
# Find how many elements should be padded at the beginning
n_padding = self.ntuple.column_records[key_nr].first_element_index
n_padding -= cluster_starts[start_cluster_idx]
n_padding = max(n_padding, 0)
if interpreter == "cpu":
content = self.ntuple.read_col_pages(
content = self.ntuple.read_cluster_range(
key_nr,
range(start_cluster_idx, stop_cluster_idx),
pad_missing_element=True,
start_cluster_idx,
stop_cluster_idx,
missing_element_padding=n_padding,
array_cache=array_cache,
)
elif interpreter == "gpu" and backend == "cuda":
content = content_dict[key_nr]
Expand Down
Loading