Skip to content

YASL API Reference

yasl

YASL - YAML Advanced Schema Language

YASL is an advanced schema language & validation tool for YAML data. YASL supports definition and validation of data structures with primitives, enumerations, and composition of defined types. YASL also supports references between types and properties, enabling complex data models.

check_paths(paths, model_name=None, disable_log=False, quiet_log=False, verbose_log=False, output='text', log_stream=sys.stdout)

Check mixed YASL schemas and YAML data from a list of paths.

This function recursively scans the provided paths for YASL schema files (.yasl) and YAML data files (.yaml, .yml). It employs a heuristic to distinguish between schema and data files regardless of extension: if a file parses strictly as a valid YASL schema (YaslRoot), it is treated as such; otherwise, it is treated as data to be validated.

Process: 1. Scan paths for all candidate files. 2. Classify each file as Schema or Data. 3. Compile all identified Schemas into the YaslRegistry. 4. Validate all identified Data files against the registered schemas. - If model_name is provided, validate against that specific model. - Otherwise, auto-detect the schema based on root keys.

Parameters:

Name Type Description Default
paths list[str]

List of file or directory paths to scan.

required
model_name str | None

Optional specific schema type name to enforce for validation.

None
disable_log bool

If True, disables all logging output.

False
quiet_log bool

If True, suppresses all output except for errors.

False
verbose_log bool

If True, enables verbose logging output.

False
output str

Output format for logs ('text', 'json', 'yaml'). Default 'text'.

'text'
log_stream StringIO | TextIO

Stream to write logs to. Default stdout.

stdout

Returns:

Name Type Description
bool bool

True if all schemas are valid AND all data files validate successfully. False if any schema fails to compile or any data file fails validation.

Source code in src/yasl/core.py
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
def check_paths(
    paths: list[str],
    model_name: str | None = None,
    disable_log: bool = False,
    quiet_log: bool = False,
    verbose_log: bool = False,
    output: str = "text",
    log_stream: StringIO | TextIO = sys.stdout,
) -> bool:
    """
    Check mixed YASL schemas and YAML data from a list of paths.

    This function recursively scans the provided paths for YASL schema files (.yasl)
    and YAML data files (.yaml, .yml). It employs a heuristic to distinguish between
    schema and data files regardless of extension: if a file parses strictly as a
    valid YASL schema (YaslRoot), it is treated as such; otherwise, it is treated
    as data to be validated.

    Process:
    1.  Scan paths for all candidate files.
    2.  Classify each file as Schema or Data.
    3.  Compile all identified Schemas into the YaslRegistry.
    4.  Validate all identified Data files against the registered schemas.
        - If `model_name` is provided, validate against that specific model.
        - Otherwise, auto-detect the schema based on root keys.

    Args:
        paths (list[str]): List of file or directory paths to scan.
        model_name (str | None): Optional specific schema type name to enforce for validation.
        disable_log (bool): If True, disables all logging output.
        quiet_log (bool): If True, suppresses all output except for errors.
        verbose_log (bool): If True, enables verbose logging output.
        output (str): Output format for logs ('text', 'json', 'yaml'). Default 'text'.
        log_stream (StringIO | TextIO): Stream to write logs to. Default stdout.

    Returns:
        bool: True if all schemas are valid AND all data files validate successfully.
              False if any schema fails to compile or any data file fails validation.
    """
    _setup_logging(
        disable=disable_log,
        verbose=verbose_log,
        quiet=quiet_log,
        output=output,
        stream=log_stream,
    )
    log = logging.getLogger("yasl")
    log.debug(f"YASL Version - {yasl_version()}")

    registry = YaslRegistry()
    registry.clear_caches()

    # 1. Collect all files
    files_to_process = set()
    for p_str in paths:
        p = Path(p_str)
        if p.is_dir():
            # Recursively find .yaml, .yml, .yasl
            files_to_process.update(p.rglob("*.yaml"))
            files_to_process.update(p.rglob("*.yml"))
            files_to_process.update(p.rglob("*.yasl"))
            log.debug(
                f"Scanned directory '{p}' found {len(files_to_process)} files so far."
            )
        elif p.exists():
            files_to_process.add(p)
        else:
            log.error(f"❌ Path not found: '{p}'")
            return False

    if not files_to_process:
        log.error("❌ No files found to process.")
        return False

    schemas: list[YaslRoot] = []
    # Store data as (dict_data, file_path_str)
    data_items: list[tuple[Any, str]] = []

    yaml_loader = YAML(typ="rt")

    # 2. Parse and Classify
    queue = list(files_to_process)
    processed_paths = set()

    while queue:
        file_path = queue.pop(0)
        # Use resolve() to handle symlinks and absolute paths correctly
        try:
            abs_path = file_path.resolve()
        except OSError:
            # Handle cases where path might not exist or be accessible
            continue

        path_str = str(abs_path)
        if path_str in processed_paths:
            continue
        processed_paths.add(path_str)

        log.debug(f"Processing '{path_str}'")
        try:
            with open(file_path) as f:
                # load_all returns a generator
                docs = list(yaml_loader.load_all(f))
        except Exception as e:
            log.error(f"❌ Failed to read file '{path_str}': {e}")
            return False

        for doc in docs:
            if doc is None:
                continue

            # Heuristic: Try to parse as YaslRoot (Schema)
            try:
                # YaslRoot structure is strict.
                # However, an empty dict or minimal dict might pass if optional fields are None.
                # But YaslRoot requires at least one of imports, metadata, or definitions to be useful.
                # Let's trust Pydantic validation.

                # Check 1: If it's a list, it's definitely not a YaslRoot (which is a dict)
                if not isinstance(doc, dict):
                    raise ValidationError.from_exception_data("Not a dict", [])

                schema_candidate = YaslRoot(**doc)

                # Check 2: If it's effectively empty, treat as data.
                if not any(
                    [
                        schema_candidate.imports,
                        schema_candidate.metadata,
                        schema_candidate.definitions,
                    ]
                ):
                    raise ValidationError.from_exception_data("Empty schema", [])

                # Inject metadata
                _inject_line_numbers(doc, schema_candidate, path_str)
                schemas.append(schema_candidate)
                log.debug(f"Found schema in '{path_str}'")

                # Handle imports if any
                if schema_candidate.imports:
                    for imp in schema_candidate.imports:
                        imp_path = file_path.parent / imp
                        if imp_path.exists():
                            queue.append(imp_path)
                            log.debug(f"Queued imported schema: {imp_path}")
                        else:
                            log.warning(f"Imported file not found: {imp_path}")

            except ValidationError:
                # It's not a valid Schema, assume it's Data
                data_items.append((doc, path_str))
                log.debug(f"Found data in '{path_str}'")

    # 3. Compile Schemas
    log.debug(f"Compiling {len(schemas)} schemas...")
    if not compile_yasl_roots(schemas):
        log.error("❌ Schema compilation failed.")
        return False

    # 4. Validate Data
    log.debug(f"Validating {len(data_items)} data documents...")
    all_valid = True

    # We can reuse logic from load_data_files but adapted for in-memory data
    # We need to auto-detect model for each data item

    for data, path_str in data_items:
        # Auto-detect or use model_name
        candidate_model_names: list[tuple[str, str | None]] = []

        if model_name is None:
            root_keys = list(data.keys())
            # registry is singleton, populated by compile_yasl_roots
            yasl_types = registry.get_types()
            for type_id, type_def in yasl_types.items() or []:
                t_name, t_ns = type_id
                t_def_keys = list(type_def.model_fields.keys())
                # subset check: all keys in data must be in type definition?
                # No, standard auto-detect logic in load_data_files was:
                # if all(k in type_def_root_keys for k in root_keys):
                if all(k in t_def_keys for k in root_keys):
                    candidate_model_names.append((t_name, t_ns))
        else:
            # Look up specific model
            # Check if it exists
            if registry.get_type(model_name):
                # We don't know namespace from CLI arg easily unless provided.
                # registry.get_type handles ambiguous check if ns is None.
                # We can just try to find it.
                # But we need (name, ns) tuple for the loop below.
                # Let's find the matching entries.
                found_type = registry.get_type(model_name)
                if found_type:
                    candidate_model_names.append((model_name, found_type.__module__))

        validated = False
        if not candidate_model_names:
            log.error(
                f"❌ No matching schema found for data in '{path_str}' (Keys: {list(data.keys())})"
            )
            if hasattr(data, "lc"):
                log.error(f"   Line: {data.lc.line + 1}")
            all_valid = False
            continue

        for s_name, s_ns in candidate_model_names:
            model_cls = registry.get_type(s_name, s_ns)
            if not model_cls:
                continue

            # Inject info for validation
            if hasattr(data, "lc"):
                data["yaml_line"] = data.lc.line + 1
            data["yaml_file"] = path_str

            try:
                cast(type[BaseModel], model_cls)(**data)
                validated = True
                log.info(
                    f"✅ Data in '{path_str}' (Line {data.get('yaml_line', '?')}) validated as '{s_name}'"
                )
                break
            except ValidationError:
                # If we have multiple candidates, one failing is expected.
                # If ALL fail, we report errors.
                # We defer reporting until we try all candidates.
                pass

        if not validated:
            log.error(f"❌ Validation failed for data in '{path_str}'")
            # We should probably print the errors from the *best* candidate or all of them.
            # Rerunning validation to print errors for the first candidate if any, or just generic error.
            # Let's try to validate against the first candidate again to show errors,
            # or if multiple, maybe just say "Ambiguous or invalid".
            # Re-running simply to log errors:
            if candidate_model_names:
                target_name, target_ns = candidate_model_names[0]
                model_cls = registry.get_type(target_name, target_ns)
                try:
                    cast(type[BaseModel], model_cls)(**data)
                except ValidationError as e:
                    for error in e.errors():
                        line = _get_line_for_error(data, error["loc"])
                        path_loc = " -> ".join(map(str, error["loc"]))
                        file_info = f"{path_str}:{line}" if line else path_str
                        log.error(f"  - {file_info} -> {error['msg']} (at {path_loc})")

            all_valid = False

    if all_valid and data_items:
        log.info("✅ data validation successful")

    registry.clear_caches()
    return all_valid

check_schema(yasl_schema, disable_log=False, quiet_log=False, verbose_log=False, output='text', log_stream=sys.stdout)

Check the validity of a YASL schema file or directory.

Parameters:

Name Type Description Default
yasl_schema str

Path to the YASL schema file or directory.

required
disable_log bool

If True, disables all logging output.

False
quiet_log bool

If True, suppresses all output except for errors.

False
verbose_log bool

If True, enables verbose logging output.

False
output str

Output format for logs. Options are 'text', 'json', or 'yaml'. Default is 'text'.

'text'
log_stream StringIO

Stream to which logs will be written. Default is sys.stdout.

stdout

Returns:

Name Type Description
bool bool

True if the schema is valid, False otherwise.

Source code in src/yasl/core.py
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
def check_schema(
    yasl_schema: str,
    disable_log: bool = False,
    quiet_log: bool = False,
    verbose_log: bool = False,
    output: str = "text",
    log_stream: StringIO | TextIO = sys.stdout,
) -> bool:
    """
    Check the validity of a YASL schema file or directory.

    Args:
        yasl_schema (str): Path to the YASL schema file or directory.
        disable_log (bool): If True, disables all logging output.
        quiet_log (bool): If True, suppresses all output except for errors.
        verbose_log (bool): If True, enables verbose logging output.
        output (str): Output format for logs. Options are 'text', 'json', or 'yaml'. Default is 'text'.
        log_stream (StringIO): Stream to which logs will be written. Default is sys.stdout.

    Returns:
        bool: True if the schema is valid, False otherwise.
    """
    _setup_logging(
        disable=disable_log,
        verbose=verbose_log,
        quiet=quiet_log,
        output=output,
        stream=log_stream,
    )
    log = logging.getLogger("yasl")
    log.debug(f"YASL Version - {yasl_version()}")
    log.debug(f"Checking YASL Schema - {yasl_schema}")

    registry = YaslRegistry()
    registry.clear_caches()

    yasl_files = []
    if Path(yasl_schema).is_dir():
        for p in Path(yasl_schema).rglob("*.yasl"):
            yasl_files.append(p)
        if not yasl_files:
            log.error(f"❌ No .yasl files found in directory '{yasl_schema}'")
            return False
        log.debug(f"Found {len(yasl_files)} .yasl files in directory '{yasl_schema}'")
    else:
        if not Path(yasl_schema).exists():
            log.error(f"❌ YASL schema file '{yasl_schema}' not found")
            return False
        yasl_files.append(Path(yasl_schema))

    all_valid = True
    for yasl_file in yasl_files:
        yasl = load_schema_files(str(yasl_file))
        if yasl is None:
            log.error(f"❌ YASL schema validation failed for '{yasl_file}'.")
            all_valid = False
        else:
            log.info(f"✅ YASL schema '{yasl_file}' is valid.")

    registry.clear_caches()
    return all_valid

get_yasl_registry()

Get the singleton YaslRegistry instance.

Source code in src/yasl/cache.py
342
343
344
def get_yasl_registry() -> YaslRegistry:
    """Get the singleton YaslRegistry instance."""
    return yasl_registry

load_data(yaml_data, schema_name, schema_namespace=None)

Validate a dictionary of data against a specific registered YASL schema.

This function retrieves the Pydantic model corresponding to the given schema name and namespace from the YaslRegistry, and then attempts to validate the provided data against it.

Parameters:

Name Type Description Default
yaml_data dict[str, Any]

The raw dictionary containing the YAML data to validate.

required
schema_name str

The name of the schema type to validate against.

required
schema_namespace str | None

The namespace where the schema is defined.

None

Returns:

Name Type Description
Any Any

An instance of the validated Pydantic model if successful,

Any

or None if validation fails or the schema cannot be found.

Note

The function catches ValidationError and SyntaxError, logs the details, and returns None.

Source code in src/yasl/core.py
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
def load_data(
    yaml_data: dict[str, Any], schema_name: str, schema_namespace: str | None = None
) -> Any:
    """
    Validate a dictionary of data against a specific registered YASL schema.

    This function retrieves the Pydantic model corresponding to the given schema name
    and namespace from the YaslRegistry, and then attempts to validate the provided
    data against it.

    Args:
        yaml_data (dict[str, Any]): The raw dictionary containing the YAML data to validate.
        schema_name (str): The name of the schema type to validate against.
        schema_namespace (str | None): The namespace where the schema is defined.

    Returns:
        Any: An instance of the validated Pydantic model if successful,
        or None if validation fails or the schema cannot be found.

    Note:
        The function catches ValidationError and SyntaxError, logs the details,
        and returns None.
    """
    log = logging.getLogger("yasl")
    try:
        result = None
        registry = YaslRegistry()

        model = registry.get_type(schema_name, schema_namespace)

        if model is None:
            log.error(
                f"❌ No schema found with name '{schema_name}' and namespece '{schema_namespace}'."
            )
            return None
        else:
            result = cast(type[BaseModel], model)(**yaml_data)  # type: ignore
            if result is None:
                log.error(f"YAML did not validate against schema '{schema_name}'.")
                return None

        log.info("✅ YAML data validation successful!")
        return result
    except ValidationError as e:
        log.error(f"❌ Validation failed with {len(e.errors())} error(s):")
        for error in e.errors():
            log.error(f"  - {error['msg']}")
        return None
    except SyntaxError as e:
        log.error(f"❌ SyntaxError in file YAML data - {getattr(e, 'msg', str(e))}")
        if hasattr(e, "text") and e.text:
            log.error(f"  > {e.text.strip()}")
        return None

load_data_files(path, model_name=None)

Load and validate YAML data from a file against YASL schemas.

This function reads a YAML file (which may contain multiple documents) and attempts to validate each document against a registered YASL schema.

If model_name is provided, validation is attempted against that specific schema. If model_name is None, the function attempts to auto-detect the appropriate schema by matching the root keys of the YAML data against the fields of registered types.

Parameters:

Name Type Description Default
path str

The file path to the YAML data file.

required
model_name str | None

The name of the schema to validate against. If None, schema auto-detection is performed.

None

Returns:

Name Type Description
Any Any

A list of validated Pydantic models (one for each document in the YAML file)

Any

if successful, or None if validation fails or the file cannot be read.

Note

The function catches exceptions like FileNotFoundError, SyntaxError, YAMLError, and ValidationError, logging them as errors and returning None.

Source code in src/yasl/core.py
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
def load_data_files(path: str, model_name: str | None = None) -> Any:
    """
    Load and validate YAML data from a file against YASL schemas.

    This function reads a YAML file (which may contain multiple documents) and attempts
    to validate each document against a registered YASL schema.

    If `model_name` is provided, validation is attempted against that specific schema.
    If `model_name` is None, the function attempts to auto-detect the appropriate schema
    by matching the root keys of the YAML data against the fields of registered types.

    Args:
        path (str): The file path to the YAML data file.
        model_name (str | None): The name of the schema to validate against.
            If None, schema auto-detection is performed.

    Returns:
        Any: A list of validated Pydantic models (one for each document in the YAML file)
        if successful, or None if validation fails or the file cannot be read.

    Note:
        The function catches exceptions like FileNotFoundError, SyntaxError, YAMLError,
        and ValidationError, logging them as errors and returning None.
    """
    log = logging.getLogger("yasl")
    log.debug(f"--- Attempting to validate data '{path}' ---")
    docs = []
    data = None
    try:
        yaml_loader = YAML(typ="rt")
        with open(path) as f:
            docs.extend(yaml_loader.load_all(f))

    except FileNotFoundError:
        log.error(f"❌ Error - File not found at '{path}'")
        return None
    except SyntaxError as e:
        log.error(f"❌ Error - Syntax error in data file '{path}'\n  - {e}")
        return None
    except YAMLError as e:
        log.error(f"❌ Error - YAML error while parsing data '{path}'\n  - {e}")
        return None
    except ValueError as e:
        log.error(f"❌ Error - value error while parsing data '{path}'\n  - {e}")
        return None
    except Exception as e:
        log.error(f"❌ An unexpected error occurred - {type(e)} - {e}")
        traceback.print_exc()
        return None
    try:
        results = []
        registry = YaslRegistry()
        for data in docs:
            candidate_model_names: list[tuple[str, str | None]] = []
            if model_name is None:
                root_keys: list[str] = list(data.keys())
                log.debug(f"Auto-detecting schema for YAML root keys in '{path}'")
                yasl_result = registry.get_types()
                for type_id, type_def in yasl_result.items() or []:
                    type_name, type_namespace = type_id
                    type_def_root_keys: list[str] = list(type_def.model_fields.keys())
                    if all(k in type_def_root_keys for k in root_keys):
                        log.debug(
                            f"Auto-detected root model '{type_name}' for YAML file '{path}'"
                        )
                        candidate_model_names.append((type_name, type_namespace))
            else:
                registry_item = registry.get_type(model_name)
                if registry_item:
                    candidate_model_names.append(
                        (model_name, registry_item.__module__ or None)
                    )
                else:
                    candidate_model_names.append((model_name, None))

            log.debug(
                f"Identified candidate model names for '{path}' - {candidate_model_names}"
            )

            for schema_name, schema_namespace in candidate_model_names:
                if registry.get_type(schema_name, schema_namespace) is None:
                    continue
                model = registry.get_type(schema_name, schema_namespace)
                log.debug(
                    f"Using schema '{schema_name}' for data validation of {path}."
                )

                # Inject line number if available
                if hasattr(data, "lc") and hasattr(data.lc, "line"):
                    data["yaml_line"] = data.lc.line + 1

                if model is not None:
                    result = cast(type[BaseModel], model)(**data)  # type: ignore
                    if result is not None:
                        results.append(result)
                        break
                    else:
                        log.debug(
                            f"Data in '{path}' did not validate against schema '{schema_name}'."
                        )

        if not results or len(results) == 0:
            log.error(f"❌ No valid schema found to validate data in '{path}'")
            return None
        log.info(f"✅ YAML '{path}' data validation successful!")
        return results
    except ValidationError as e:
        log.error(f"❌ Validation failed with {len(e.errors())} error(s):")
        for error in e.errors():
            line = _get_line_for_error(data, error["loc"])
            path_str = " -> ".join(map(str, error["loc"]))
            if line:
                log.error(f"  - Line {line} - '{path_str}' -> {error['msg']}")
            else:
                log.error(f"  - Location '{path_str}' -> {error['msg']}")
        return None
    except SyntaxError as e:
        log.error(
            f"❌ SyntaxError in file '{path}' "
            f"at line {getattr(e, 'lineno', '?')}, offset {getattr(e, 'offset', '?')} - {getattr(e, 'msg', str(e))}"
        )
        if hasattr(e, "text") and e.text:
            log.error(f"  > {e.text.strip()}")
        return None
    except Exception as e:
        log.error(f"❌ An unexpected error occurred - {type(e)} - {e}")
        traceback.print_exc()
        return None

load_schema(data)

Load and validate a YASL schema from a dictionary and add the generated types to the registry.

This function parses a raw dictionary into a YaslRoot object, generating any defined enumerations and Pydantic models in the process. Note that schema imports are NOT supported when loading directly from a dictionary; use load_schema_files if import resolution is required.

Parameters:

Name Type Description Default
data dict[str, Any]

The raw dictionary containing the YASL schema definition.

required

Returns:

Name Type Description
YaslRoot YaslRoot

The validated and parsed YASL root object.

Raises:

Type Description
ValueError

If the schema defines imports (which are not supported in this mode), or if type generation fails (e.g. duplicate definitions, invalid references).

ValidationError

If the input data does not match the expected YASL structure.

Source code in src/yasl/core.py
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
def load_schema(data: dict[str, Any]) -> YaslRoot:
    """
    Load and validate a YASL schema from a dictionary and add the generated types to the registry.

    This function parses a raw dictionary into a YaslRoot object, generating
    any defined enumerations and Pydantic models in the process. Note that
    schema imports are NOT supported when loading directly from a dictionary;
    use `load_schema_files` if import resolution is required.

    Args:
        data (dict[str, Any]): The raw dictionary containing the YASL schema definition.

    Returns:
        YaslRoot: The validated and parsed YASL root object.

    Raises:
        ValueError: If the schema defines imports (which are not supported in this mode),
            or if type generation fails (e.g. duplicate definitions, invalid references).
        ValidationError: If the input data does not match the expected YASL structure.
    """
    log = logging.getLogger("yasl")
    yasl = YaslRoot(**data)
    if yasl is None:
        raise ValueError("Failed to parse YASL schema from data {data}")
    if yasl.imports is not None:
        log.error(
            "Imports are not supported by the 'load_schema' function.  Use 'load_schema_files' instead."
        )
        raise ValueError(
            "YASL import not supported when processing from data dictionary."
        )
    if yasl.metadata is not None:
        log.debug(f"YASL Metadata: {yasl.metadata}")

    # Phase 1: Enums
    if yasl.definitions is not None:
        for namespace, yasl_item in yasl.definitions.items():
            if yasl_item.enums is not None:
                gen_enum_from_enumerations(namespace, yasl_item.enums)

    # Phase 2: Collect Types
    all_types: dict[tuple[str, str], TypeDef] = {}
    if yasl.definitions is not None:
        for namespace, yasl_item in yasl.definitions.items():
            if yasl_item.types is not None:
                for name, type_def in yasl_item.types.items():
                    all_types[(namespace, name)] = type_def

    # Phase 3: Generate Types
    if all_types:
        gen_pydantic_type_models(all_types)

    return yasl

load_schema_files(path)

Load and validate YASL schema(s) from a file.

This function reads a YAML file containing one or more YASL schema definitions. It recursively resolves any imports specified in the schemas. For each valid schema, it generates the corresponding Python Enums and Pydantic models and registers them in the YaslRegistry.

Parameters:

Name Type Description Default
path str

The file path to the YASL schema file.

required

Returns:

Type Description
list[YaslRoot] | None

list[YaslRoot] | None: A list of validated YaslRoot objects if successful,

list[YaslRoot] | None

or None if validation fails or the file cannot be read.

Note

The function catches most exceptions (FileNotFoundError, YAMLError, ValidationError) and logs them as errors, returning None.

Source code in src/yasl/core.py
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
def load_schema_files(path: str) -> list[YaslRoot] | None:
    """
    Load and validate YASL schema(s) from a file.

    This function reads a YAML file containing one or more YASL schema definitions.
    It recursively resolves any imports specified in the schemas.
    For each valid schema, it generates the corresponding Python Enums and Pydantic models
    and registers them in the YaslRegistry.

    Args:
        path (str): The file path to the YASL schema file.

    Returns:
        list[YaslRoot] | None: A list of validated YaslRoot objects if successful,
        or None if validation fails or the file cannot be read.

    Note:
        The function catches most exceptions (FileNotFoundError, YAMLError, ValidationError)
        and logs them as errors, returning None.
    """
    log = logging.getLogger("yasl")

    # 1. Load all roots recursively
    roots = _parse_schema_files_recursive(path, log)
    if roots is None:
        return None

    if not compile_yasl_roots(roots):
        return None

    log.debug("✅ YASL schema validation successful!")
    return roots

yasl_eval(yasl_schema, yaml_data, model_name=None, disable_log=False, quiet_log=False, verbose_log=False, output='text', log_stream=sys.stdout)

Evaluate YAML data against a YASL schema.

Parameters:

Name Type Description Default
yasl_schema str

Path to the YASL schema file or directory.

required
yaml_data str

Path to the YAML data file or directory.

required
model_name str

Specific model name to use for validation. If not provided, the model will be auto-detected.

None
disable_log bool

If True, disables all logging output.

False
quiet_log bool

If True, suppresses all output except for errors.

False
verbose_log bool

If True, enables verbose logging output.

False
output str

Output format for logs. Options are 'text', 'json', or 'yaml'. Default is 'text'.

'text'
log_stream StringIO

Stream to which logs will be written. Default is sys.stdout.

stdout

Returns:

Type Description
list[BaseModel] | None

Optional[List[BaseModel]]: List of validated Pydantic models if validation is successful, None otherwise.

Source code in src/yasl/core.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
def yasl_eval(
    yasl_schema: str,
    yaml_data: str,
    model_name: str | None = None,
    disable_log: bool = False,
    quiet_log: bool = False,
    verbose_log: bool = False,
    output: str = "text",
    log_stream: StringIO | TextIO = sys.stdout,
) -> list[BaseModel] | None:
    """
    Evaluate YAML data against a YASL schema.

    Args:
        yasl_schema (str): Path to the YASL schema file or directory.
        yaml_data (str): Path to the YAML data file or directory.
        model_name (str, optional): Specific model name to use for validation. If not provided, the model will be auto-detected.
        disable_log (bool): If True, disables all logging output.
        quiet_log (bool): If True, suppresses all output except for errors.
        verbose_log (bool): If True, enables verbose logging output.
        output (str): Output format for logs. Options are 'text', 'json', or 'yaml'. Default is 'text'.
        log_stream (StringIO): Stream to which logs will be written. Default is sys.stdout.

    Returns:
        Optional[List[BaseModel]]: List of validated Pydantic models if validation is successful, None otherwise.
    """

    _setup_logging(
        disable=disable_log,
        verbose=verbose_log,
        quiet=quiet_log,
        output=output,
        stream=log_stream,
    )
    log = logging.getLogger("yasl")
    log.debug(f"YASL Version - {yasl_version()}")
    log.debug(f"YASL Schema - {yasl_schema}")
    log.debug(f"YAML Data - {yaml_data}")

    registry = YaslRegistry()

    yasl_files = []
    if Path(yasl_schema).is_dir():
        for p in Path(yasl_schema).rglob("*.yasl"):
            yasl_files.append(p)
        if not yasl_files:
            log.error(f"❌ No .yasl files found in directory '{yasl_schema}'")
            registry.clear_caches()
            return None
        log.debug(f"Found {len(yasl_files)} .yasl files in directory '{yasl_schema}'")
    else:
        if not Path(yasl_schema).exists():
            log.error(f"❌ YASL schema file '{yasl_schema}' not found")
            registry.clear_caches()
            return None
        yasl_files.append(Path(yasl_schema))

    yaml_files = []
    if Path(yaml_data).is_dir():
        for p in Path(yaml_data).rglob("*.yaml"):
            yaml_files.append(p)
        if not yaml_files:
            log.error(f"❌ No .yaml files found in directory '{yaml_data}'")
            registry.clear_caches()
            return None
        log.debug(f"Found {len(yaml_files)} .yaml files in directory '{yaml_data}'")
    else:
        if not Path(yaml_data).exists():
            log.error(f"❌ YAML data file '{yaml_data}' not found")
            registry.clear_caches()
            return None
        yaml_files.append(Path(yaml_data))

    yasl_results = []
    for yasl_file in yasl_files:
        yasl = load_schema_files(str(yasl_file))
        if yasl is None:
            log.error("❌ YASL schema validation failed. Exiting.")
            registry.clear_caches()
            return None
        yasl_results.extend(yasl)

    results = []

    for yaml_file in yaml_files:
        results = load_data_files(str(yaml_file), model_name)

        if not results or len(results) == 0:
            log.error(
                f"❌ Validation failed. Unable to validate data in YAML file {yaml_file}."
            )
            registry.clear_caches()
            return None

    registry.clear_caches()
    return results

yasl_version()

Get the version of the YASL package.

Returns:

Name Type Description
str str

The version string defined in pyproject.toml, or an error message if the file cannot be read.

Source code in src/yasl/core.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def yasl_version() -> str:
    """
    Get the version of the YASL package.

    Returns:
        str: The version string defined in pyproject.toml, or an error message if the file cannot be read.
    """
    try:
        pyproject_path = os.path.join(os.path.dirname(__file__), "../../pyproject.toml")
        with open(pyproject_path, "rb") as f:
            pyproject = tomllib.load(f)
        return pyproject["project"]["version"]
    except Exception:
        # fallback to old version if pyproject.toml is missing or malformed
        return "Unknown due to internal error reading pyproject.toml"