|
43 | 43 | from .exceptions import MigrationError
|
44 | 44 | from .helpers import table_of_model
|
45 | 45 | from .misc import chunks, log_progress, version_between, version_gte
|
46 |
| -from .pg import column_exists, get_columns |
| 46 | +from .pg import column_exists, format_query, get_columns, named_cursor |
47 | 47 |
|
48 | 48 | # python3 shims
|
49 | 49 | try:
|
@@ -281,35 +281,47 @@ def recompute_fields(cr, model, fields, ids=None, logger=_logger, chunk_size=256
|
281 | 281 | assert strategy in {"flush", "commit", "auto"}
|
282 | 282 | Model = env(cr)[model] if isinstance(model, basestring) else model
|
283 | 283 | model = Model._name
|
284 |
| - if ids is None: |
285 |
| - cr.execute('SELECT id FROM "%s"' % table_of_model(cr, model)) |
286 |
| - ids = tuple(map(itemgetter(0), cr.fetchall())) |
287 |
| - |
288 |
| - if strategy == "auto": |
289 |
| - big_table = len(ids) > BIG_TABLE_THRESHOLD |
290 |
| - any_tracked_field = any(getattr(Model._fields[f], _TRACKING_ATTR, False) for f in fields) |
291 |
| - strategy = "commit" if big_table and any_tracked_field else "flush" |
292 |
| - |
293 |
| - size = (len(ids) + chunk_size - 1) / chunk_size |
294 |
| - qual = "%s %d-bucket" % (model, chunk_size) if chunk_size != 1 else model |
295 |
| - for subids in log_progress(chunks(ids, chunk_size, list), logger, qualifier=qual, size=size): |
296 |
| - records = Model.browse(subids) |
297 |
| - for field_name in fields: |
298 |
| - field = records._fields[field_name] |
299 |
| - if hasattr(records, "_recompute_todo"): |
300 |
| - # < 13.0 |
301 |
| - records._recompute_todo(field) |
302 |
| - else: |
303 |
| - Model.env.add_to_compute(field, records) |
304 | 284 |
|
305 |
| - recompute(records) |
306 |
| - # trigger dependent fields recomputation |
307 |
| - records.modified(fields) |
308 |
| - if strategy == "commit": |
309 |
| - cr.commit() |
310 |
| - else: |
311 |
| - flush(records) |
312 |
| - invalidate(records) |
| 285 | + def get_record_ids(): |
| 286 | + if ids: |
| 287 | + yield ids |
| 288 | + return |
| 289 | + MAX_SIZE = 1000000 |
| 290 | + ncr = named_cursor(cr, MAX_SIZE) |
| 291 | + ncr.execute(format_query(cr, "SELECT id FROM {t}", t=table_of_model(cr, model))) |
| 292 | + res = ncr.fetchmany(MAX_SIZE) |
| 293 | + while res: |
| 294 | + yield tuple(map(itemgetter(0), res)) |
| 295 | + res = ncr.fetchmany(MAX_SIZE) |
| 296 | + ncr.close() |
| 297 | + |
| 298 | + for _ids in get_record_ids(): |
| 299 | + # will be checked with the first batch of ids only |
| 300 | + if strategy == "auto": |
| 301 | + big_table = len(_ids) > BIG_TABLE_THRESHOLD |
| 302 | + any_tracked_field = any(getattr(Model._fields[f], _TRACKING_ATTR, False) for f in fields) |
| 303 | + strategy = "commit" if big_table and any_tracked_field else "flush" |
| 304 | + |
| 305 | + size = (len(_ids) + chunk_size - 1) / chunk_size |
| 306 | + qual = "%s %d-bucket" % (model, chunk_size) if chunk_size != 1 else model |
| 307 | + for subids in log_progress(chunks(_ids, chunk_size, list), logger, qualifier=qual, size=size): |
| 308 | + records = Model.browse(subids) |
| 309 | + for field_name in fields: |
| 310 | + field = records._fields[field_name] |
| 311 | + if hasattr(records, "_recompute_todo"): |
| 312 | + # < 13.0 |
| 313 | + records._recompute_todo(field) |
| 314 | + else: |
| 315 | + Model.env.add_to_compute(field, records) |
| 316 | + |
| 317 | + recompute(records) |
| 318 | + # trigger dependent fields recomputation |
| 319 | + records.modified(fields) |
| 320 | + if strategy == "commit": |
| 321 | + cr.commit() |
| 322 | + else: |
| 323 | + flush(records) |
| 324 | + invalidate(records) |
313 | 325 |
|
314 | 326 |
|
315 | 327 | class iter_browse(object):
|
|
0 commit comments