Skip to content

feat(recap): Enhances appellate docket purchase to support ACMS cases #5960

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Jul 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions cl/corpus_importer/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,20 @@ async def ais_appellate_court(court_id: str) -> bool:
return await appellate_court_ids.filter(pk=court_id).aexists()


def should_check_acms_court(court_id: str) -> bool:
"""
Checks whether the given court_id should be checked using ACMS-specific logic.

This helper is used to identify courts that require special handling,
currently limited to a known set of appellate courts.

:param court_id: The unique identifier of the court.

:return: True if the court_id is one that uses ACMS; False otherwise.
"""
return court_id in ["ca2", "ca9"]


def get_start_of_quarter(d: date | None = None) -> date:
"""Get the start date of the calendar quarter requested

Expand Down
17 changes: 15 additions & 2 deletions cl/recap/mergers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Code for merging PACER content into the DB
import json
import logging
import re
from copy import deepcopy
Expand Down Expand Up @@ -1584,9 +1585,21 @@ def merge_pacer_docket_into_cl_docket(
UPLOAD_TYPE.APPELLATE_DOCKET if appellate else UPLOAD_TYPE.DOCKET
)
pacer_file = PacerHtmlFiles(content_object=d, upload_type=upload_type)

# Determine how to store the report data.
# Most PACER reports include a raw HTML response and set the `response`
# attribute. However, ACMS reports typically construct the data from a
# series of API calls, and do not include a single HTML response. In those
# cases, we store the data as JSON instead.
pacer_file_name = "docket.html" if report.response else "docket.json"
pacer_file_content = (
report.response.text.encode()
if report.response
else json.dumps(report.data, default=str).encode()
)
pacer_file.filepath.save(
"docket.html", # We only care about the ext w/S3PrivateUUIDStorageTest
ContentFile(report.response.text.encode()),
pacer_file_name, # We only care about the ext w/S3PrivateUUIDStorageTest
ContentFile(pacer_file_content),
)

# Merge parties before adding docket entries, so they can access parties'
Expand Down
43 changes: 29 additions & 14 deletions cl/recap/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from juriscraper.lib.string_utils import CaseNameTweaker, harmonize
from juriscraper.pacer import (
ACMSAttachmentPage,
AcmsCaseSearch,
ACMSDocketReport,
AppellateDocketReport,
CaseQuery,
Expand Down Expand Up @@ -65,6 +66,7 @@
is_bankruptcy_court,
is_long_appellate_document_number,
mark_ia_upload_needed,
should_check_acms_court,
)
from cl.custom_filters.templatetags.text_filters import oxford_join
from cl.lib.filesizes import convert_size_to_bytes
Expand Down Expand Up @@ -110,6 +112,7 @@
find_subdocket_pdf_rds_from_data,
get_court_id_from_fetch_queue,
get_main_rds,
sort_acms_docket_entries,
)
from cl.scrapers.tasks import (
extract_recap_pdf,
Expand Down Expand Up @@ -1493,17 +1496,7 @@ async def process_recap_acms_docket(pk):
await sync_to_async(add_parties_and_attorneys)(d, data["parties"])

# Sort docket entries to ensure consistent ordering
# The primary sort is by 'date_filed', followed by 'document_number' (nulls
# last for a given date). This approach aligns the order with how docket
# reports are typically displayed.
data["docket_entries"] = sorted(
data["docket_entries"],
key=lambda d: (
d["date_filed"],
d["document_number"] is None,
d["document_number"],
),
)
data["docket_entries"] = sort_acms_docket_entries(data["docket_entries"])
des_returned, rds_created, content_updated = await add_docket_entries(
d, data["docket_entries"]
)
Expand Down Expand Up @@ -2348,7 +2341,7 @@ def create_or_update_docket_data_from_fetch(
fq: PacerFetchQueue,
court_id: str,
pacer_case_id: str | None,
report: DocketReport | AppellateDocketReport,
report: DocketReport | AppellateDocketReport | ACMSDocketReport,
docket_data: dict[str, Any],
) -> dict[str, str | bool]:
"""Creates or updates docket data in the database from fetched data.
Expand Down Expand Up @@ -2420,12 +2413,34 @@ def purchase_appellate_docket_by_docket_number(
:param fq: The PacerFetchQueue object
:return: a dict with information about the docket and the new data
"""
report = AppellateDocketReport(map_cl_to_pacer_id(court_id), session)
report.query(docket_number, **kwargs)
acms_case_id = None

if should_check_acms_court(court_id):
acms_search = AcmsCaseSearch(court_id=court_id, pacer_session=session)
acms_search.query(docket_number)
acms_case_id = (
acms_search.data["pcx_caseid"] if acms_search.data else None
)

pacer_court_id = map_cl_to_pacer_id(court_id)
report_class = ACMSDocketReport if acms_case_id else AppellateDocketReport
report = report_class(pacer_court_id, session)

if acms_case_id:
# ACMSDocketReport only accepts the case ID; filters are not currently
# supported for ACMS docket reports.
report.query(acms_case_id)
else:
report.query(docket_number, **kwargs)

docket_data = report.data
if not docket_data:
raise ParsingException("No data found in docket report.")

if acms_case_id:
docket_data["docket_entries"] = sort_acms_docket_entries(
docket_data["docket_entries"]
)
return create_or_update_docket_data_from_fetch(
fq, court_id, None, report, docket_data
)
Expand Down
Loading