Skip to content

Commit f24d136

Browse files
nathan-weinbergElbehery
authored andcommitted
feat: add auto-generated CI documentation pre-commit hook (#2890)
Our CI is entirely undocumented, this commit adds a README.md file with a table of the current CI and what is does --------- Signed-off-by: Nathan Weinberg <[email protected]>
1 parent e565b91 commit f24d136

File tree

57 files changed

+209
-190
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+209
-190
lines changed

.pre-commit-config.yaml

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,24 @@ repos:
145145
pass_filenames: false
146146
require_serial: true
147147
files: ^.github/workflows/.*$
148+
- id: check-logger-usage
149+
name: Check for proper logger usage (use llama_stack.log instead)
150+
entry: bash
151+
language: system
152+
types: [python]
153+
pass_filenames: true
154+
args:
155+
- -c
156+
- |
157+
matches=$(grep -EnH '^[^#]*\b(import logging|from logging\b)' "$@" | grep -v '# allow-direct-logging' || true)
158+
if [ -n "$matches" ]; then
159+
# GitHub Actions annotation format
160+
while IFS=: read -r file line_num rest; do
161+
echo "::error file=$file,line=$line_num::Do not use 'import logging' or 'from logging import' in $file. Use the custom logger instead: from llama_stack.log import get_logger; logger = get_logger(name=__name__, category=\"core\"). If direct logging is truly needed, add: # allow-direct-logging"
162+
done <<< "$matches"
163+
exit 1
164+
fi
165+
exit 0
148166
149167
ci:
150168
autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks

llama_stack/core/build.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
# the root directory of this source tree.
66

77
import importlib.resources
8-
import logging
98
import sys
109

1110
from pydantic import BaseModel
@@ -17,10 +16,9 @@
1716
from llama_stack.core.utils.exec import run_command
1817
from llama_stack.core.utils.image_types import LlamaStackImageType
1918
from llama_stack.distributions.template import DistributionTemplate
19+
from llama_stack.log import get_logger
2020
from llama_stack.providers.datatypes import Api
2121

22-
log = logging.getLogger(__name__)
23-
2422
# These are the dependencies needed by the distribution server.
2523
# `llama-stack` is automatically installed by the installation script.
2624
SERVER_DEPENDENCIES = [
@@ -33,6 +31,8 @@
3331
"opentelemetry-exporter-otlp-proto-http",
3432
]
3533

34+
logger = get_logger(name=__name__, category="core")
35+
3636

3737
class ApiInput(BaseModel):
3838
api: Api
@@ -157,7 +157,7 @@ def build_image(
157157
return_code = run_command(args)
158158

159159
if return_code != 0:
160-
log.error(
160+
logger.error(
161161
f"Failed to build target {image_name} with return code {return_code}",
162162
)
163163

llama_stack/core/configure.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
#
44
# This source code is licensed under the terms described in the LICENSE file in
55
# the root directory of this source tree.
6-
import logging
76
import textwrap
87
from typing import Any
98

@@ -21,9 +20,10 @@
2120
from llama_stack.core.utils.config_dirs import EXTERNAL_PROVIDERS_DIR
2221
from llama_stack.core.utils.dynamic import instantiate_class_type
2322
from llama_stack.core.utils.prompt_for_config import prompt_for_config
23+
from llama_stack.log import get_logger
2424
from llama_stack.providers.datatypes import Api, ProviderSpec
2525

26-
logger = logging.getLogger(__name__)
26+
logger = get_logger(name=__name__, category="core")
2727

2828

2929
def configure_single_provider(registry: dict[str, ProviderSpec], provider: Provider) -> Provider:

llama_stack/core/library_client.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import asyncio
88
import inspect
99
import json
10-
import logging
1110
import os
1211
import sys
1312
from concurrent.futures import ThreadPoolExecutor
@@ -48,14 +47,15 @@
4847
from llama_stack.core.utils.config import redact_sensitive_fields
4948
from llama_stack.core.utils.context import preserve_contexts_async_generator
5049
from llama_stack.core.utils.exec import in_notebook
50+
from llama_stack.log import get_logger
5151
from llama_stack.providers.utils.telemetry.tracing import (
5252
CURRENT_TRACE_CONTEXT,
5353
end_trace,
5454
setup_logger,
5555
start_trace,
5656
)
5757

58-
logger = logging.getLogger(__name__)
58+
logger = get_logger(name=__name__, category="core")
5959

6060
T = TypeVar("T")
6161

@@ -173,6 +173,8 @@ def _remove_root_logger_handlers(self):
173173
"""
174174
Remove all handlers from the root logger. Needed to avoid polluting the console with logs.
175175
"""
176+
import logging # allow-direct-logging
177+
176178
root_logger = logging.getLogger()
177179

178180
for handler in root_logger.handlers[:]:

llama_stack/core/request_headers.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,19 @@
66

77
import contextvars
88
import json
9-
import logging
109
from contextlib import AbstractContextManager
1110
from typing import Any
1211

1312
from llama_stack.core.datatypes import User
13+
from llama_stack.log import get_logger
1414

1515
from .utils.dynamic import instantiate_class_type
1616

17-
log = logging.getLogger(__name__)
18-
1917
# Context variable for request provider data and auth attributes
2018
PROVIDER_DATA_VAR = contextvars.ContextVar("provider_data", default=None)
2119

20+
logger = get_logger(name=__name__, category="core")
21+
2222

2323
class RequestProviderDataContext(AbstractContextManager):
2424
"""Context manager for request provider data"""
@@ -61,7 +61,7 @@ def get_request_provider_data(self) -> Any:
6161
provider_data = validator(**val)
6262
return provider_data
6363
except Exception as e:
64-
log.error(f"Error parsing provider data: {e}")
64+
logger.error(f"Error parsing provider data: {e}")
6565
return None
6666

6767

@@ -83,7 +83,7 @@ def parse_request_provider_data(headers: dict[str, str]) -> dict[str, Any] | Non
8383
try:
8484
return json.loads(val)
8585
except json.JSONDecodeError:
86-
log.error("Provider data not encoded as a JSON object!")
86+
logger.error("Provider data not encoded as a JSON object!")
8787
return None
8888

8989

llama_stack/core/server/server.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import functools
1010
import inspect
1111
import json
12-
import logging
12+
import logging # allow-direct-logging
1313
import os
1414
import ssl
1515
import sys

llama_stack/core/utils/exec.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,17 +4,17 @@
44
# This source code is licensed under the terms described in the LICENSE file in
55
# the root directory of this source tree.
66

7-
import logging
7+
import importlib
88
import os
99
import signal
1010
import subprocess
1111
import sys
1212

1313
from termcolor import cprint
1414

15-
log = logging.getLogger(__name__)
15+
from llama_stack.log import get_logger
1616

17-
import importlib
17+
logger = get_logger(name=__name__, category="core")
1818

1919

2020
def formulate_run_args(image_type: str, image_name: str) -> list:
@@ -72,7 +72,7 @@ def run_command(command: list[str]) -> int:
7272
def sigint_handler(signum, frame):
7373
nonlocal ctrl_c_pressed
7474
ctrl_c_pressed = True
75-
log.info("\nCtrl-C detected. Aborting...")
75+
logger.info("\nCtrl-C detected. Aborting...")
7676

7777
try:
7878
# Set up the signal handler
@@ -86,10 +86,10 @@ def sigint_handler(signum, frame):
8686
)
8787
return result.returncode
8888
except subprocess.SubprocessError as e:
89-
log.error(f"Subprocess error: {e}")
89+
logger.error(f"Subprocess error: {e}")
9090
return 1
9191
except Exception as e:
92-
log.exception(f"Unexpected error: {e}")
92+
logger.exception(f"Unexpected error: {e}")
9393
return 1
9494
finally:
9595
# Restore the original signal handler

llama_stack/core/utils/prompt_for_config.py

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -6,15 +6,16 @@
66

77
import inspect
88
import json
9-
import logging
109
from enum import Enum
1110
from typing import Annotated, Any, Literal, Union, get_args, get_origin
1211

1312
from pydantic import BaseModel
1413
from pydantic.fields import FieldInfo
1514
from pydantic_core import PydanticUndefinedType
1615

17-
log = logging.getLogger(__name__)
16+
from llama_stack.log import get_logger
17+
18+
logger = get_logger(name=__name__, category="core")
1819

1920

2021
def is_list_of_primitives(field_type):
@@ -107,7 +108,7 @@ def prompt_for_discriminated_union(
107108

108109
if discriminator_value in type_map:
109110
chosen_type = type_map[discriminator_value]
110-
log.info(f"\nConfiguring {chosen_type.__name__}:")
111+
logger.info(f"\nConfiguring {chosen_type.__name__}:")
111112

112113
if existing_value and (getattr(existing_value, discriminator) != discriminator_value):
113114
existing_value = None
@@ -117,7 +118,7 @@ def prompt_for_discriminated_union(
117118
setattr(sub_config, discriminator, discriminator_value)
118119
return sub_config
119120
else:
120-
log.error(f"Invalid {discriminator}. Please try again.")
121+
logger.error(f"Invalid {discriminator}. Please try again.")
121122

122123

123124
# This is somewhat elaborate, but does not purport to be comprehensive in any way.
@@ -166,7 +167,7 @@ def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel |
166167
config_data[field_name] = validated_value
167168
break
168169
except KeyError:
169-
log.error(f"Invalid choice. Please choose from: {', '.join(e.name for e in field_type)}")
170+
logger.error(f"Invalid choice. Please choose from: {', '.join(e.name for e in field_type)}")
170171
continue
171172

172173
if is_discriminated_union(field):
@@ -179,7 +180,7 @@ def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel |
179180
config_data[field_name] = None
180181
continue
181182
nested_type = get_non_none_type(field_type)
182-
log.info(f"Entering sub-configuration for {field_name}:")
183+
logger.info(f"Entering sub-configuration for {field_name}:")
183184
config_data[field_name] = prompt_for_config(nested_type, existing_value)
184185
elif is_optional(field_type) and is_discriminated_union(get_non_none_type(field_type)):
185186
prompt = f"Do you want to configure {field_name}? (y/n): "
@@ -193,7 +194,7 @@ def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel |
193194
existing_value,
194195
)
195196
elif can_recurse(field_type):
196-
log.info(f"\nEntering sub-configuration for {field_name}:")
197+
logger.info(f"\nEntering sub-configuration for {field_name}:")
197198
config_data[field_name] = prompt_for_config(
198199
field_type,
199200
existing_value,
@@ -220,7 +221,7 @@ def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel |
220221
config_data[field_name] = None
221222
break
222223
else:
223-
log.error("This field is required. Please provide a value.")
224+
logger.error("This field is required. Please provide a value.")
224225
continue
225226
else:
226227
try:
@@ -242,10 +243,10 @@ def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel |
242243
value = [element_type(item) for item in value]
243244

244245
except json.JSONDecodeError:
245-
log.error('Invalid JSON. Please enter a valid JSON-encoded list e.g., ["foo","bar"]')
246+
logger.error('Invalid JSON. Please enter a valid JSON-encoded list e.g., ["foo","bar"]')
246247
continue
247248
except ValueError as e:
248-
log.error(f"{str(e)}")
249+
logger.error(f"{str(e)}")
249250
continue
250251

251252
elif get_origin(field_type) is dict:
@@ -255,7 +256,7 @@ def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel |
255256
raise ValueError("Input must be a JSON-encoded dictionary")
256257

257258
except json.JSONDecodeError:
258-
log.error("Invalid JSON. Please enter a valid JSON-encoded dict.")
259+
logger.error("Invalid JSON. Please enter a valid JSON-encoded dict.")
259260
continue
260261

261262
# Convert the input to the correct type
@@ -268,7 +269,9 @@ def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel |
268269
value = field_type(user_input)
269270

270271
except ValueError:
271-
log.error(f"Invalid input. Expected type: {getattr(field_type, '__name__', str(field_type))}")
272+
logger.error(
273+
f"Invalid input. Expected type: {getattr(field_type, '__name__', str(field_type))}"
274+
)
272275
continue
273276

274277
try:
@@ -277,6 +280,6 @@ def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel |
277280
config_data[field_name] = validated_value
278281
break
279282
except ValueError as e:
280-
log.error(f"Validation error: {str(e)}")
283+
logger.error(f"Validation error: {str(e)}")
281284

282285
return config_type(**config_data)

llama_stack/log.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,11 @@
44
# This source code is licensed under the terms described in the LICENSE file in
55
# the root directory of this source tree.
66

7-
import logging
7+
import logging # allow-direct-logging
88
import os
99
import re
1010
import sys
11-
from logging.config import dictConfig
11+
from logging.config import dictConfig # allow-direct-logging
1212

1313
from rich.console import Console
1414
from rich.errors import MarkupError

llama_stack/models/llama/llama3/multimodal/encoder_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
# Copyright (c) Meta Platforms, Inc. and its affiliates.
1515
import math
16-
from logging import getLogger
16+
from logging import getLogger # allow-direct-logging
1717

1818
import torch
1919
import torch.nn.functional as F

0 commit comments

Comments
 (0)