diff --git a/BUILD b/BUILD index 6dd357c8..35918709 100644 --- a/BUILD +++ b/BUILD @@ -41,20 +41,6 @@ bzl_library( "lib.bzl will go away in the future, please directly depend on the" + " module(s) needed as it is more efficient." ), - deps = [ - "//lib:collections", - "//lib:dicts", - "//lib:new_sets", - "//lib:partial", - "//lib:paths", - "//lib:selects", - "//lib:sets", - "//lib:shell", - "//lib:structs", - "//lib:types", - "//lib:unittest", - "//lib:versions", - ], ) bzl_library( diff --git a/docs/BUILD b/docs/BUILD index 73ace3da..b20f8214 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -160,12 +160,6 @@ stardoc_with_diff_test( out_label = "//docs:types_doc.md", ) -stardoc_with_diff_test( - name = "unittest", - bzl_library_target = "//lib:unittest", - out_label = "//docs:unittest_doc.md", -) - stardoc_with_diff_test( name = "versions", bzl_library_target = "//lib:versions", diff --git a/docs/unittest_doc.md b/docs/unittest_doc.md deleted file mode 100755 index d059e2cd..00000000 --- a/docs/unittest_doc.md +++ /dev/null @@ -1,606 +0,0 @@ - - -Unit testing support. - -Unlike most Skylib files, this exports four modules: -* `unittest` contains functions to declare and define unit tests for ordinary - Starlark functions; -* `analysistest` contains functions to declare and define tests for analysis - phase behavior of a rule, such as a given target's providers or registered - actions; -* `loadingtest` contains functions to declare and define tests for loading - phase behavior, such as macros and `native.*`; -* `asserts` contains the assertions used within tests. - -See https://bazel.build/extending/concepts for background about macros, rules, -and the different phases of a build. - - - -## unittest_toolchain - -
-unittest_toolchain(name, escape_chars_with, escape_other_chars_with, failure_templ, file_ext,
-                   join_on, success_templ)
-
- - - -**ATTRIBUTES** - - -| Name | Description | Type | Mandatory | Default | -| :------------- | :------------- | :------------- | :------------- | :------------- | -| name | A unique name for this target. | Name | required | | -| escape_chars_with | Dictionary of characters that need escaping in test failure message to prefix appended to escape those characters. For example, `{"%": "%", ">": "^"}` would replace `%` with `%%` and `>` with `^>` in the failure message before that is included in `success_templ`. | Dictionary: String -> String | optional | `{}` | -| escape_other_chars_with | String to prefix every character in test failure message which is not a key in `escape_chars_with` before including that in `success_templ`. For example, `""` would prefix every character in the failure message (except those in the keys of `escape_chars_with`) with `\`. | String | optional | `""` | -| failure_templ | Test script template with a single `%s`. That placeholder is replaced with the lines in the failure message joined with the string specified in `join_with`. The resulting script should print the failure message and exit with non-zero status. | String | required | | -| file_ext | File extension for test script, including leading dot. | String | required | | -| join_on | String used to join the lines in the failure message before including the resulting string in the script specified in `failure_templ`. | String | required | | -| success_templ | Test script generated when the test passes. Should exit with status 0. | String | required | | - - - - -## analysistest.begin - -
-analysistest.begin(ctx)
-
- -Begins an analysis test. - -This should be the first function called in an analysis test implementation -function. It initializes a "test environment" that is used to collect -assertion failures so that they can be reported and logged at the end of the -test. - - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| ctx | The Starlark context. Pass the implementation function's `ctx` argument in verbatim. | none | - -**RETURNS** - -A test environment struct that must be passed to assertions and finally to -`analysistest.end`. Do not rely on internal details about the fields in this -struct as it may change. - - - - -## analysistest.end - -
-analysistest.end(env)
-
- -Ends an analysis test and logs the results. - -This must be called and returned at the end of an analysis test implementation function so -that the results are reported. - - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `analysistest.begin`. | none | - -**RETURNS** - -A list of providers needed to automatically register the analysis test result. - - - - -## analysistest.fail - -
-analysistest.fail(env, msg)
-
- -Unconditionally causes the current test to fail. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `unittest.begin`. | none | -| msg | The message to log describing the failure. | none | - - - - -## analysistest.make - -
-analysistest.make(impl, expect_failure, attrs, fragments, config_settings,
-                  extra_target_under_test_aspects, doc)
-
- -Creates an analysis test rule from its implementation function. - -An analysis test verifies the behavior of a "real" rule target by examining -and asserting on the providers given by the real target. - -Each analysis test is defined in an implementation function that must then be -associated with a rule so that a target can be built. This function handles -the boilerplate to create and return a test rule and captures the -implementation function's name so that it can be printed in test feedback. - -An example of an analysis test: - -``` -def _your_test(ctx): - env = analysistest.begin(ctx) - - # Assert statements go here - - return analysistest.end(env) - -your_test = analysistest.make(_your_test) -``` - -Recall that names of test rules must end in `_test`. - - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| impl | The implementation function of the unit test. | none | -| expect_failure | If true, the analysis test will expect the target_under_test to fail. Assertions can be made on the underlying failure using asserts.expect_failure | `False` | -| attrs | An optional dictionary to supplement the attrs passed to the unit test's `rule()` constructor. | `{}` | -| fragments | An optional list of fragment names that can be used to give rules access to language-specific parts of configuration. | `[]` | -| config_settings | A dictionary of configuration settings to change for the target under test and its dependencies. This may be used to essentially change 'build flags' for the target under test, and may thus be utilized to test multiple targets with different flags in a single build | `{}` | -| extra_target_under_test_aspects | An optional list of aspects to apply to the target_under_test in addition to those set up by default for the test harness itself. | `[]` | -| doc | A description of the rule that can be extracted by documentation generating tools. | `""` | - -**RETURNS** - -A rule definition that should be stored in a global whose name ends in -`_test`. - - - - -## analysistest.target_actions - -
-analysistest.target_actions(env)
-
- -Returns a list of actions registered by the target under test. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `analysistest.begin`. | none | - -**RETURNS** - -A list of actions registered by the target under test - - - - -## analysistest.target_bin_dir_path - -
-analysistest.target_bin_dir_path(env)
-
- -Returns ctx.bin_dir.path for the target under test. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `analysistest.begin`. | none | - -**RETURNS** - -Output bin dir path string. - - - - -## analysistest.target_under_test - -
-analysistest.target_under_test(env)
-
- -Returns the target under test. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `analysistest.begin`. | none | - -**RETURNS** - -The target under test. - - - - -## asserts.equals - -
-asserts.equals(env, expected, actual, msg)
-
- -Asserts that the given `expected` and `actual` values are equal. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `unittest.begin`. | none | -| expected | The expected value of some computation. | none | -| actual | The actual value returned by some computation. | none | -| msg | An optional message that will be printed that describes the failure. If omitted, a default will be used. | `None` | - - - - -## asserts.expect_failure - -
-asserts.expect_failure(env, expected_failure_msg)
-
- -Asserts that the target under test has failed with a given error message. - -This requires that the analysis test is created with `analysistest.make()` and -`expect_failures = True` is specified. - - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `analysistest.begin`. | none | -| expected_failure_msg | The error message to expect as a result of analysis failures. | `""` | - - - - -## asserts.false - -
-asserts.false(env, condition, msg)
-
- -Asserts that the given `condition` is false. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `unittest.begin`. | none | -| condition | A value that will be evaluated in a Boolean context. | none | -| msg | An optional message that will be printed that describes the failure. If omitted, a default will be used. | `"Expected condition to be false, but was true."` | - - - - -## asserts.new_set_equals - -
-asserts.new_set_equals(env, expected, actual, msg)
-
- -Asserts that the given `expected` and `actual` sets are equal. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `unittest.begin`. | none | -| expected | The expected set resulting from some computation. | none | -| actual | The actual set returned by some computation. | none | -| msg | An optional message that will be printed that describes the failure. If omitted, a default will be used. | `None` | - - - - -## asserts.set_equals - -
-asserts.set_equals(env, expected, actual, msg)
-
- -Asserts that the given `expected` and `actual` sets are equal. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `unittest.begin`. | none | -| expected | The expected set resulting from some computation. | none | -| actual | The actual set returned by some computation. | none | -| msg | An optional message that will be printed that describes the failure. If omitted, a default will be used. | `None` | - - - - -## asserts.true - -
-asserts.true(env, condition, msg)
-
- -Asserts that the given `condition` is true. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `unittest.begin`. | none | -| condition | A value that will be evaluated in a Boolean context. | none | -| msg | An optional message that will be printed that describes the failure. If omitted, a default will be used. | `"Expected condition to be true, but was false."` | - - - - -## loadingtest.equals - -
-loadingtest.equals(env, test_case, expected, actual)
-
- -Creates a test case for asserting state at LOADING phase. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | Loading test env created from loadingtest.make | none | -| test_case | Name of the test case | none | -| expected | Expected value to test | none | -| actual | Actual value received. | none | - -**RETURNS** - -None, creates test case - - - - -## loadingtest.make - -
-loadingtest.make(name)
-
- -Creates a loading phase test environment and test_suite. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| name | name of the suite of tests to create | none | - -**RETURNS** - -loading phase environment passed to other loadingtest functions - - - - -## register_unittest_toolchains - -
-register_unittest_toolchains()
-
- -Registers the toolchains for unittest users. - - - - - -## unittest.begin - -
-unittest.begin(ctx)
-
- -Begins a unit test. - -This should be the first function called in a unit test implementation -function. It initializes a "test environment" that is used to collect -assertion failures so that they can be reported and logged at the end of the -test. - - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| ctx | The Starlark context. Pass the implementation function's `ctx` argument in verbatim. | none | - -**RETURNS** - -A test environment struct that must be passed to assertions and finally to -`unittest.end`. Do not rely on internal details about the fields in this -struct as it may change. - - - - -## unittest.end - -
-unittest.end(env)
-
- -Ends a unit test and logs the results. - -This must be called and returned at the end of a unit test implementation function so -that the results are reported. - - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `unittest.begin`. | none | - -**RETURNS** - -A list of providers needed to automatically register the test result. - - - - -## unittest.fail - -
-unittest.fail(env, msg)
-
- -Unconditionally causes the current test to fail. - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| env | The test environment returned by `unittest.begin`. | none | -| msg | The message to log describing the failure. | none | - - - - -## unittest.make - -
-unittest.make(impl, attrs, doc, toolchains)
-
- -Creates a unit test rule from its implementation function. - -Each unit test is defined in an implementation function that must then be -associated with a rule so that a target can be built. This function handles -the boilerplate to create and return a test rule and captures the -implementation function's name so that it can be printed in test feedback. - -The optional `attrs` argument can be used to define dependencies for this -test, in order to form unit tests of rules. - -The optional `toolchains` argument can be used to define toolchain -dependencies for this test. - -An example of a unit test: - -``` -def _your_test(ctx): - env = unittest.begin(ctx) - - # Assert statements go here - - return unittest.end(env) - -your_test = unittest.make(_your_test) -``` - -Recall that names of test rules must end in `_test`. - - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| impl | The implementation function of the unit test. | none | -| attrs | An optional dictionary to supplement the attrs passed to the unit test's `rule()` constructor. | `{}` | -| doc | A description of the rule that can be extracted by documentation generating tools. | `""` | -| toolchains | An optional list to supplement the toolchains passed to the unit test's `rule()` constructor. | `[]` | - -**RETURNS** - -A rule definition that should be stored in a global whose name ends in -`_test`. - - - - -## unittest.suite - -
-unittest.suite(name, test_rules)
-
- -Defines a `test_suite` target that contains multiple tests. - -After defining your test rules in a `.bzl` file, you need to create targets -from those rules so that `blaze test` can execute them. Doing this manually -in a BUILD file would consist of listing each test in your `load` statement -and then creating each target one by one. To reduce duplication, we recommend -writing a macro in your `.bzl` file to instantiate all targets, and calling -that macro from your BUILD file so you only have to load one symbol. - -You can use this function to create the targets and wrap them in a single -test_suite target. If a test rule requires no arguments, you can simply list -it as an argument. If you wish to supply attributes explicitly, you can do so -using `partial.make()`. For instance, in your `.bzl` file, you could write: - -``` -def your_test_suite(): - unittest.suite( - "your_test_suite", - your_test, - your_other_test, - partial.make(yet_another_test, timeout = "short"), - ) -``` - -Then, in your `BUILD` file, simply load the macro and invoke it to have all -of the targets created: - -``` -load("//path/to/your/package:tests.bzl", "your_test_suite") -your_test_suite() -``` - -If you pass _N_ unit test rules to `unittest.suite`, _N_ + 1 targets will be -created: a `test_suite` target named `${name}` (where `${name}` is the name -argument passed in here) and targets named `${name}_test_${i}`, where `${i}` -is the index of the test in the `test_rules` list, which is used to uniquely -name each target. - - -**PARAMETERS** - - -| Name | Description | Default Value | -| :------------- | :------------- | :------------- | -| name | The name of the `test_suite` target, and the prefix of all the test target names. | none | -| test_rules | A list of test rules defines by `unittest.test`. | none | - - diff --git a/lib/unittest.bzl b/lib/unittest.bzl index 3860ad19..e630e11f 100644 --- a/lib/unittest.bzl +++ b/lib/unittest.bzl @@ -1,696 +1,22 @@ -# Copyright 2017 The Bazel Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Unit testing support. - -Unlike most Skylib files, this exports four modules: -* `unittest` contains functions to declare and define unit tests for ordinary - Starlark functions; -* `analysistest` contains functions to declare and define tests for analysis - phase behavior of a rule, such as a given target's providers or registered - actions; -* `loadingtest` contains functions to declare and define tests for loading - phase behavior, such as macros and `native.*`; -* `asserts` contains the assertions used within tests. - -See https://bazel.build/extending/concepts for background about macros, rules, -and the different phases of a build. +"""Unit testing support now lives in @rules_testing//lib:unittest.bzl. This version + is deprecated and will be removed in the future. + Please replace loads of @bazel_skylib//lib:unittest.bzl to @rules_testing//lib:unittest.bzl + to reflect the new location of unittest.bzl" """ -load(":new_sets.bzl", new_sets = "sets") -load(":partial.bzl", "partial") -load(":types.bzl", "types") - -# The following function should only be called from WORKSPACE files and workspace macros. -# buildifier: disable=unnamed-macro -def register_unittest_toolchains(): - """Registers the toolchains for unittest users.""" - native.register_toolchains( - "@bazel_skylib//toolchains/unittest:cmd_toolchain", - "@bazel_skylib//toolchains/unittest:bash_toolchain", - ) - -TOOLCHAIN_TYPE = "@bazel_skylib//toolchains/unittest:toolchain_type" - -_UnittestToolchainInfo = provider( - doc = "Execution platform information for rules in the bazel_skylib repository.", - fields = [ - "file_ext", - "success_templ", - "failure_templ", - "join_on", - "escape_chars_with", - "escape_other_chars_with", - ], -) - -def _unittest_toolchain_impl(ctx): - return [ - platform_common.ToolchainInfo( - unittest_toolchain_info = _UnittestToolchainInfo( - file_ext = ctx.attr.file_ext, - success_templ = ctx.attr.success_templ, - failure_templ = ctx.attr.failure_templ, - join_on = ctx.attr.join_on, - escape_chars_with = ctx.attr.escape_chars_with, - escape_other_chars_with = ctx.attr.escape_other_chars_with, - ), - ), - ] - -unittest_toolchain = rule( - implementation = _unittest_toolchain_impl, - attrs = { - "failure_templ": attr.string( - mandatory = True, - doc = ( - "Test script template with a single `%s`. That " + - "placeholder is replaced with the lines in the " + - "failure message joined with the string " + - "specified in `join_with`. The resulting script " + - "should print the failure message and exit with " + - "non-zero status." - ), - ), - "file_ext": attr.string( - mandatory = True, - doc = ( - "File extension for test script, including leading dot." - ), - ), - "join_on": attr.string( - mandatory = True, - doc = ( - "String used to join the lines in the failure " + - "message before including the resulting string " + - "in the script specified in `failure_templ`." - ), - ), - "success_templ": attr.string( - mandatory = True, - doc = ( - "Test script generated when the test passes. " + - "Should exit with status 0." - ), - ), - "escape_chars_with": attr.string_dict( - doc = ( - "Dictionary of characters that need escaping in " + - "test failure message to prefix appended to escape " + - "those characters. For example, " + - '`{"%": "%", ">": "^"}` would replace `%` with ' + - "`%%` and `>` with `^>` in the failure message " + - "before that is included in `success_templ`." - ), - ), - "escape_other_chars_with": attr.string( - default = "", - doc = ( - "String to prefix every character in test failure " + - "message which is not a key in `escape_chars_with` " + - "before including that in `success_templ`. For " + - 'example, `"\"` would prefix every character in ' + - "the failure message (except those in the keys of " + - "`escape_chars_with`) with `\\`." - ), - ), - }, -) - -def _impl_function_name(impl): - """Derives the name of the given rule implementation function. - - This can be used for better test feedback. - - Args: - impl: the rule implementation function - - Returns: - The name of the given function - """ - - # Starlark currently stringifies a function as "", so we use - # that knowledge to parse the "NAME" portion out. If this behavior ever - # changes, we'll need to update this. - # TODO(bazel-team): Expose a ._name field on functions to avoid this. - impl_name = str(impl) - impl_name = impl_name.partition("")[0] - -def _make(impl, attrs = {}, doc = "", toolchains = []): - """Creates a unit test rule from its implementation function. - - Each unit test is defined in an implementation function that must then be - associated with a rule so that a target can be built. This function handles - the boilerplate to create and return a test rule and captures the - implementation function's name so that it can be printed in test feedback. - - The optional `attrs` argument can be used to define dependencies for this - test, in order to form unit tests of rules. - - The optional `toolchains` argument can be used to define toolchain - dependencies for this test. - - An example of a unit test: - - ``` - def _your_test(ctx): - env = unittest.begin(ctx) - - # Assert statements go here - - return unittest.end(env) - - your_test = unittest.make(_your_test) - ``` - - Recall that names of test rules must end in `_test`. - - Args: - impl: The implementation function of the unit test. - attrs: An optional dictionary to supplement the attrs passed to the - unit test's `rule()` constructor. - doc: A description of the rule that can be extracted by documentation generating tools. - toolchains: An optional list to supplement the toolchains passed to - the unit test's `rule()` constructor. - - Returns: - A rule definition that should be stored in a global whose name ends in - `_test`. - """ - attrs = dict(attrs) - attrs["_impl_name"] = attr.string(default = _impl_function_name(impl)) - - return rule( - impl, - doc = doc, - attrs = attrs, - _skylark_testable = True, - test = True, - toolchains = toolchains + [TOOLCHAIN_TYPE], - ) - -_ActionInfo = provider( - doc = "Information relating to the target under test.", - fields = ["actions", "bin_path"], +load( + "@rules_testing//lib:unittest.bzl", + _analysistest = "analysistest", + _asserts = "asserts", + _loadingtest = "loadingtest", + _unittest = "unittest", ) -def _action_retrieving_aspect_impl(target, ctx): - return [ - _ActionInfo( - actions = target.actions, - bin_path = ctx.bin_dir.path, - ), - ] - -_action_retrieving_aspect = aspect( - attr_aspects = [], - implementation = _action_retrieving_aspect_impl, -) - -# TODO(cparsons): Provide more full documentation on analysis testing in README. -def _make_analysis_test( - impl, - expect_failure = False, - attrs = {}, - fragments = [], - config_settings = {}, - extra_target_under_test_aspects = [], - doc = ""): - """Creates an analysis test rule from its implementation function. - - An analysis test verifies the behavior of a "real" rule target by examining - and asserting on the providers given by the real target. - - Each analysis test is defined in an implementation function that must then be - associated with a rule so that a target can be built. This function handles - the boilerplate to create and return a test rule and captures the - implementation function's name so that it can be printed in test feedback. - - An example of an analysis test: - - ``` - def _your_test(ctx): - env = analysistest.begin(ctx) - - # Assert statements go here - - return analysistest.end(env) - - your_test = analysistest.make(_your_test) - ``` - - Recall that names of test rules must end in `_test`. - - Args: - impl: The implementation function of the unit test. - expect_failure: If true, the analysis test will expect the target_under_test - to fail. Assertions can be made on the underlying failure using asserts.expect_failure - attrs: An optional dictionary to supplement the attrs passed to the - unit test's `rule()` constructor. - fragments: An optional list of fragment names that can be used to give rules access to - language-specific parts of configuration. - config_settings: A dictionary of configuration settings to change for the target under - test and its dependencies. This may be used to essentially change 'build flags' for - the target under test, and may thus be utilized to test multiple targets with different - flags in a single build - extra_target_under_test_aspects: An optional list of aspects to apply to the target_under_test - in addition to those set up by default for the test harness itself. - doc: A description of the rule that can be extracted by documentation generating tools. - - Returns: - A rule definition that should be stored in a global whose name ends in - `_test`. - """ - attrs = dict(attrs) - attrs["_impl_name"] = attr.string(default = _impl_function_name(impl)) - - changed_settings = dict(config_settings) - if expect_failure: - changed_settings["//command_line_option:allow_analysis_failures"] = "True" - - target_attr_kwargs = {} - if changed_settings: - test_transition = analysis_test_transition( - settings = changed_settings, - ) - target_attr_kwargs["cfg"] = test_transition - - attrs["target_under_test"] = attr.label( - aspects = [_action_retrieving_aspect] + extra_target_under_test_aspects, - mandatory = True, - **target_attr_kwargs - ) - - return rule( - impl, - doc = doc, - attrs = attrs, - fragments = fragments, - test = True, - toolchains = [TOOLCHAIN_TYPE], - analysis_test = True, - ) - -def _suite(name, *test_rules): - """Defines a `test_suite` target that contains multiple tests. - - After defining your test rules in a `.bzl` file, you need to create targets - from those rules so that `blaze test` can execute them. Doing this manually - in a BUILD file would consist of listing each test in your `load` statement - and then creating each target one by one. To reduce duplication, we recommend - writing a macro in your `.bzl` file to instantiate all targets, and calling - that macro from your BUILD file so you only have to load one symbol. - - You can use this function to create the targets and wrap them in a single - test_suite target. If a test rule requires no arguments, you can simply list - it as an argument. If you wish to supply attributes explicitly, you can do so - using `partial.make()`. For instance, in your `.bzl` file, you could write: - - ``` - def your_test_suite(): - unittest.suite( - "your_test_suite", - your_test, - your_other_test, - partial.make(yet_another_test, timeout = "short"), - ) - ``` - - Then, in your `BUILD` file, simply load the macro and invoke it to have all - of the targets created: - - ``` - load("//path/to/your/package:tests.bzl", "your_test_suite") - your_test_suite() - ``` - - If you pass _N_ unit test rules to `unittest.suite`, _N_ + 1 targets will be - created: a `test_suite` target named `${name}` (where `${name}` is the name - argument passed in here) and targets named `${name}_test_${i}`, where `${i}` - is the index of the test in the `test_rules` list, which is used to uniquely - name each target. - - Args: - name: The name of the `test_suite` target, and the prefix of all the test - target names. - *test_rules: A list of test rules defines by `unittest.test`. - """ - test_names = [] - for index, test_rule in enumerate(test_rules): - test_name = "%s_test_%d" % (name, index) - if partial.is_instance(test_rule): - partial.call(test_rule, name = test_name) - else: - test_rule(name = test_name) - test_names.append(test_name) - - native.test_suite( - name = name, - tests = [":%s" % t for t in test_names], - ) - -def _begin(ctx): - """Begins a unit test. - - This should be the first function called in a unit test implementation - function. It initializes a "test environment" that is used to collect - assertion failures so that they can be reported and logged at the end of the - test. - - Args: - ctx: The Starlark context. Pass the implementation function's `ctx` argument - in verbatim. - - Returns: - A test environment struct that must be passed to assertions and finally to - `unittest.end`. Do not rely on internal details about the fields in this - struct as it may change. - """ - return struct(ctx = ctx, failures = []) - -def _begin_analysis_test(ctx): - """Begins an analysis test. - - This should be the first function called in an analysis test implementation - function. It initializes a "test environment" that is used to collect - assertion failures so that they can be reported and logged at the end of the - test. - - Args: - ctx: The Starlark context. Pass the implementation function's `ctx` argument - in verbatim. - - Returns: - A test environment struct that must be passed to assertions and finally to - `analysistest.end`. Do not rely on internal details about the fields in this - struct as it may change. - """ - return struct(ctx = ctx, failures = []) - -def _end_analysis_test(env): - """Ends an analysis test and logs the results. - - This must be called and returned at the end of an analysis test implementation function so - that the results are reported. - - Args: - env: The test environment returned by `analysistest.begin`. - - Returns: - A list of providers needed to automatically register the analysis test result. - """ - return [AnalysisTestResultInfo( - success = (len(env.failures) == 0), - message = "\n".join(env.failures), - )] - -def _end(env): - """Ends a unit test and logs the results. - - This must be called and returned at the end of a unit test implementation function so - that the results are reported. - - Args: - env: The test environment returned by `unittest.begin`. - - Returns: - A list of providers needed to automatically register the test result. - """ - - tc = env.ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info - testbin = env.ctx.actions.declare_file(env.ctx.label.name + tc.file_ext) - if env.failures: - failure_message_lines = "\n".join(env.failures).split("\n") - escaped_failure_message_lines = [ - "".join([ - tc.escape_chars_with.get(c, tc.escape_other_chars_with) + c - for c in line.elems() - ]) - for line in failure_message_lines - ] - cmd = tc.failure_templ % tc.join_on.join(escaped_failure_message_lines) - else: - cmd = tc.success_templ - - env.ctx.actions.write( - output = testbin, - content = cmd, - is_executable = True, - ) - return [DefaultInfo(executable = testbin)] - -def _fail(env, msg): - """Unconditionally causes the current test to fail. - - Args: - env: The test environment returned by `unittest.begin`. - msg: The message to log describing the failure. - """ - full_msg = "In test %s: %s" % (env.ctx.attr._impl_name, msg) - - # There isn't a better way to output the message in Starlark, so use print. - # buildifier: disable=print - print(full_msg) - env.failures.append(full_msg) - -def _assert_true( - env, - condition, - msg = "Expected condition to be true, but was false."): - """Asserts that the given `condition` is true. - - Args: - env: The test environment returned by `unittest.begin`. - condition: A value that will be evaluated in a Boolean context. - msg: An optional message that will be printed that describes the failure. - If omitted, a default will be used. - """ - if not condition: - _fail(env, msg) - -def _assert_false( - env, - condition, - msg = "Expected condition to be false, but was true."): - """Asserts that the given `condition` is false. - - Args: - env: The test environment returned by `unittest.begin`. - condition: A value that will be evaluated in a Boolean context. - msg: An optional message that will be printed that describes the failure. - If omitted, a default will be used. - """ - if condition: - _fail(env, msg) - -def _assert_equals(env, expected, actual, msg = None): - """Asserts that the given `expected` and `actual` values are equal. - - Args: - env: The test environment returned by `unittest.begin`. - expected: The expected value of some computation. - actual: The actual value returned by some computation. - msg: An optional message that will be printed that describes the failure. - If omitted, a default will be used. - """ - if expected != actual: - expectation_msg = 'Expected "%s", but got "%s"' % (expected, actual) - if msg: - full_msg = "%s (%s)" % (msg, expectation_msg) - else: - full_msg = expectation_msg - _fail(env, full_msg) - -def _assert_set_equals(env, expected, actual, msg = None): - """Asserts that the given `expected` and `actual` sets are equal. - - Args: - env: The test environment returned by `unittest.begin`. - expected: The expected set resulting from some computation. - actual: The actual set returned by some computation. - msg: An optional message that will be printed that describes the failure. - If omitted, a default will be used. - """ - if not new_sets.is_equal(expected, actual): - missing = new_sets.difference(expected, actual) - unexpected = new_sets.difference(actual, expected) - expectation_msg = "Expected %s, but got %s" % (new_sets.str(expected), new_sets.str(actual)) - if new_sets.length(missing) > 0: - expectation_msg += ", missing are %s" % (new_sets.str(missing)) - if new_sets.length(unexpected) > 0: - expectation_msg += ", unexpected are %s" % (new_sets.str(unexpected)) - if msg: - full_msg = "%s (%s)" % (msg, expectation_msg) - else: - full_msg = expectation_msg - _fail(env, full_msg) - -_assert_new_set_equals = _assert_set_equals - -def _expect_failure(env, expected_failure_msg = ""): - """Asserts that the target under test has failed with a given error message. - - This requires that the analysis test is created with `analysistest.make()` and - `expect_failures = True` is specified. - - Args: - env: The test environment returned by `analysistest.begin`. - expected_failure_msg: The error message to expect as a result of analysis failures. - """ - dep = _target_under_test(env) - if AnalysisFailureInfo in dep: - actual_errors = "" - for cause in dep[AnalysisFailureInfo].causes.to_list(): - actual_errors += cause.message + "\n" - if actual_errors.find(expected_failure_msg) < 0: - expectation_msg = "Expected errors to contain '%s' but did not. " % expected_failure_msg - expectation_msg += "Actual errors:%s" % actual_errors - _fail(env, expectation_msg) - else: - _fail(env, "Expected failure of target_under_test, but found success") - -def _target_actions(env): - """Returns a list of actions registered by the target under test. - - Args: - env: The test environment returned by `analysistest.begin`. - - Returns: - A list of actions registered by the target under test - """ - - # Validate? - return _target_under_test(env)[_ActionInfo].actions - -def _target_bin_dir_path(env): - """Returns ctx.bin_dir.path for the target under test. - - Args: - env: The test environment returned by `analysistest.begin`. - - Returns: - Output bin dir path string. - """ - return _target_under_test(env)[_ActionInfo].bin_path - -def _target_under_test(env): - """Returns the target under test. - - Args: - env: The test environment returned by `analysistest.begin`. - - Returns: - The target under test. - """ - result = getattr(env.ctx.attr, "target_under_test") - if types.is_list(result): - if result: - return result[0] - else: - fail("test rule does not have a target_under_test") - return result - -def _loading_test_impl(ctx): - tc = ctx.toolchains[TOOLCHAIN_TYPE].unittest_toolchain_info - content = tc.success_templ - if ctx.attr.failure_message: - content = tc.failure_templ % ctx.attr.failure_message - - testbin = ctx.actions.declare_file("loading_test_" + ctx.label.name + tc.file_ext) - ctx.actions.write( - output = testbin, - content = content, - is_executable = True, - ) - return [DefaultInfo(executable = testbin)] - -_loading_test = rule( - implementation = _loading_test_impl, - attrs = { - "failure_message": attr.string(), - }, - toolchains = [TOOLCHAIN_TYPE], - test = True, -) - -def _loading_make(name): - """Creates a loading phase test environment and test_suite. - - Args: - name: name of the suite of tests to create - - Returns: - loading phase environment passed to other loadingtest functions - """ - native.test_suite( - name = name + "_tests", - tags = [name + "_test_case"], - ) - return struct(name = name) - -def _loading_assert_equals(env, test_case, expected, actual): - """Creates a test case for asserting state at LOADING phase. - - Args: - env: Loading test env created from loadingtest.make - test_case: Name of the test case - expected: Expected value to test - actual: Actual value received. - - Returns: - None, creates test case - """ - - msg = None - if expected != actual: - msg = 'Expected "%s", but got "%s"' % (expected, actual) - - _loading_test( - name = "%s_%s" % (env.name, test_case), - failure_message = msg, - tags = [env.name + "_test_case"], - ) - -asserts = struct( - expect_failure = _expect_failure, - equals = _assert_equals, - false = _assert_false, - set_equals = _assert_set_equals, - new_set_equals = _assert_new_set_equals, - true = _assert_true, -) - -unittest = struct( - make = _make, - suite = _suite, - begin = _begin, - end = _end, - fail = _fail, -) - -analysistest = struct( - make = _make_analysis_test, - begin = _begin_analysis_test, - end = _end_analysis_test, - fail = _fail, - target_actions = _target_actions, - target_bin_dir_path = _target_bin_dir_path, - target_under_test = _target_under_test, -) - -loadingtest = struct( - make = _loading_make, - equals = _loading_assert_equals, -) +unittest = _unittest +analysistest = _analysistest +loadingtest = _loadingtest +asserts = _asserts diff --git a/tests/BUILD b/tests/BUILD index 7d978cb2..cf989326 100644 --- a/tests/BUILD +++ b/tests/BUILD @@ -12,7 +12,6 @@ load(":shell_tests.bzl", "shell_args_test_gen", "shell_test_suite") load(":structs_tests.bzl", "structs_test_suite") load(":subpackages_tests.bzl", "subpackages_test_suite") load(":types_tests.bzl", "types_test_suite") -load(":unittest_tests.bzl", "unittest_passing_tests_suite") load(":versions_tests.bzl", "versions_test_suite") package( @@ -53,34 +52,8 @@ subpackages_test_suite() types_test_suite() -unittest_passing_tests_suite() - versions_test_suite() -bzl_library( - name = "unittest_tests_bzl", - srcs = ["unittest_tests.bzl"], - visibility = ["//visibility:private"], - deps = ["//lib:unittest"], -) - -sh_test( - name = "unittest_e2e_test", - srcs = ["unittest_test.sh"], - data = [ - ":unittest.bash", - ":unittest_tests_bzl", - "//lib:dicts", - "//lib:new_sets", - "//lib:sets", - "//lib:types", - "//lib:unittest", - "//toolchains/unittest:test_deps", - "@bazel_tools//tools/bash/runfiles", - ], - tags = ["local"], -) - sh_test( name = "analysis_test_e2e_test", srcs = ["analysis_test_test.sh"], diff --git a/tests/collections_tests.bzl b/tests/collections_tests.bzl index 9e866721..40f7b1f2 100644 --- a/tests/collections_tests.bzl +++ b/tests/collections_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for collections.bzl.""" +load("@rules_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:collections.bzl", "collections") -load("//lib:unittest.bzl", "asserts", "unittest") def _after_each_test(ctx): """Unit tests for collections.after_each.""" diff --git a/tests/common_settings_tests.bzl b/tests/common_settings_tests.bzl index efbe0e0b..4b4f4a9f 100644 --- a/tests/common_settings_tests.bzl +++ b/tests/common_settings_tests.bzl @@ -14,7 +14,7 @@ """Analysis tests for common_settings.bzl.""" -load("//lib:unittest.bzl", "analysistest", "asserts") +load("@rules_testing//lib:unittest.bzl", "analysistest", "asserts") load("//rules:common_settings.bzl", "int_flag", "int_setting", "string_flag", "string_setting") def _template_variable_info_contents_test_impl(ctx): diff --git a/tests/dicts_tests.bzl b/tests/dicts_tests.bzl index 816fcea1..c4fcb9ce 100644 --- a/tests/dicts_tests.bzl +++ b/tests/dicts_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for dicts.bzl.""" +load("@rules_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:dicts.bzl", "dicts") -load("//lib:unittest.bzl", "asserts", "unittest") def _add_test(ctx): """Unit tests for dicts.add.""" diff --git a/tests/new_sets_tests.bzl b/tests/new_sets_tests.bzl index e73b7d46..8fa0b1b4 100644 --- a/tests/new_sets_tests.bzl +++ b/tests/new_sets_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for new_sets.bzl.""" +load("@rules_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:new_sets.bzl", "sets") -load("//lib:unittest.bzl", "asserts", "unittest") def _is_equal_test(ctx): """Unit tests for sets.is_equal.""" diff --git a/tests/partial_tests.bzl b/tests/partial_tests.bzl index 73a579bb..40f1bb09 100644 --- a/tests/partial_tests.bzl +++ b/tests/partial_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for partial.bzl.""" +load("@rulest_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:partial.bzl", "partial") -load("//lib:unittest.bzl", "asserts", "unittest") def _make_noargs_nokwargs(): """Test utility for no args no kwargs case""" diff --git a/tests/paths_tests.bzl b/tests/paths_tests.bzl index d0d3cc65..e026f410 100644 --- a/tests/paths_tests.bzl +++ b/tests/paths_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for paths.bzl.""" +load("@rules_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:paths.bzl", "paths") -load("//lib:unittest.bzl", "asserts", "unittest") def _basename_test(ctx): """Unit tests for paths.basename.""" diff --git a/tests/selects_tests.bzl b/tests/selects_tests.bzl index 8227bf87..30f1d84c 100644 --- a/tests/selects_tests.bzl +++ b/tests/selects_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for selects.bzl.""" +load("@rules_testing//lib:unittest.bzl", "analysistest", "asserts", "unittest") load("//lib:selects.bzl", "selects") -load("//lib:unittest.bzl", "analysistest", "asserts", "unittest") ################################################### # with_or_test diff --git a/tests/shell_tests.bzl b/tests/shell_tests.bzl index 5b83f9f2..b64c8c66 100644 --- a/tests/shell_tests.bzl +++ b/tests/shell_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for shell.bzl.""" +load("@rules_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:shell.bzl", "shell") -load("//lib:unittest.bzl", "asserts", "unittest") def _shell_array_literal_test(ctx): """Unit tests for shell.array_literal.""" diff --git a/tests/structs_tests.bzl b/tests/structs_tests.bzl index 79de7ad1..c8444b67 100644 --- a/tests/structs_tests.bzl +++ b/tests/structs_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for structs.bzl.""" +load("@rules_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:structs.bzl", "structs") -load("//lib:unittest.bzl", "asserts", "unittest") def _add_test(ctx): """Unit tests for dicts.add.""" diff --git a/tests/subpackages_tests.bzl b/tests/subpackages_tests.bzl index 3336b936..d0a4776a 100644 --- a/tests/subpackages_tests.bzl +++ b/tests/subpackages_tests.bzl @@ -14,8 +14,8 @@ """Unit tests for subpackages.bzl.""" +load("@rules_testing//lib:unittest.bzl", "loadingtest") load("//lib:subpackages.bzl", "subpackages") -load("//lib:unittest.bzl", "loadingtest") def _all_test(env): """Unit tests for subpackages.all.""" diff --git a/tests/types_tests.bzl b/tests/types_tests.bzl index a3c654bf..37ef2719 100644 --- a/tests/types_tests.bzl +++ b/tests/types_tests.bzl @@ -13,9 +13,9 @@ # limitations under the License. """Unit tests for types.bzl.""" +load("@rules_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:new_sets.bzl", "sets") load("//lib:types.bzl", "types") -load("//lib:unittest.bzl", "asserts", "unittest") def _a_function(): """A dummy function for testing.""" diff --git a/tests/unittest_test.sh b/tests/unittest_test.sh deleted file mode 100755 index 271f1563..00000000 --- a/tests/unittest_test.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2019 The Bazel Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# End to end tests for unittest.bzl. -# -# Specifically, end to end tests of unittest.bzl cover verification that -# analysis-phase tests written with unittest.bzl appropriately -# cause test failures in cases where violated assertions are made. - -# --- begin runfiles.bash initialization --- -set -euo pipefail -if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then - if [[ -f "$0.runfiles_manifest" ]]; then - export RUNFILES_MANIFEST_FILE="$0.runfiles_manifest" - elif [[ -f "$0.runfiles/MANIFEST" ]]; then - export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST" - elif [[ -f "$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then - export RUNFILES_DIR="$0.runfiles" - fi -fi -if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then - source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash" -elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then - source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \ - "$RUNFILES_MANIFEST_FILE" | cut -d ' ' -f 2-)" -else - echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash" - exit 1 -fi -# --- end runfiles.bash initialization --- - -source "$(rlocation $TEST_WORKSPACE/tests/unittest.bash)" \ - || { echo "Could not source bazel_skylib/tests/unittest.bash" >&2; exit 1; } - -function create_pkg() { - local -r pkg="$1" - mkdir -p "$pkg" - cd "$pkg" - - cat > WORKSPACE < tests/BUILD < lib/BUILD < toolchains/unittest/BUILD - - # Create test files. - mkdir -p testdir - cat > testdir/BUILD <<'EOF' -load("//tests:unittest_tests.bzl", - "basic_passing_test", - "basic_failing_test", - "failure_message_test", - "fail_unexpected_passing_test", - "fail_unexpected_passing_fake_rule") - -basic_passing_test(name = "basic_passing_test") - -basic_failing_test(name = "basic_failing_test") - -failure_message_test( - name = "shell_escape_failure_message_test", - message = "Contains $FOO", -) - -failure_message_test( - name = "cmd_escape_failure_message_test", - message = "Contains %FOO%", -) - -failure_message_test( - name = "eof_failure_message_test", - message = "\nEOF\n more after EOF", -) - -fail_unexpected_passing_test( - name = "fail_unexpected_passing_test", - target_under_test = ":fail_unexpected_passing_fake_target", -) - -fail_unexpected_passing_fake_rule( - name = "fail_unexpected_passing_fake_target", - tags = ["manual"]) -EOF -} - -function test_basic_passing_test() { - local -r pkg="${FUNCNAME[0]}" - create_pkg "$pkg" - - bazel test testdir:basic_passing_test >"$TEST_log" 2>&1 || fail "Expected test to pass" - - expect_log "PASSED" -} - -function test_basic_failing_test() { - local -r pkg="${FUNCNAME[0]}" - create_pkg "$pkg" - - bazel test testdir:basic_failing_test --test_output=all --verbose_failures \ - >"$TEST_log" 2>&1 && fail "Expected test to fail" || true - - expect_log "In test _basic_failing_test from //tests:unittest_tests.bzl: Expected \"1\", but got \"2\"" -} - -function test_shell_escape_failure_message_test() { - local -r pkg="${FUNCNAME[0]}" - create_pkg "$pkg" - - bazel test testdir:shell_escape_failure_message_test --test_output=all --verbose_failures \ - >"$TEST_log" 2>&1 && fail "Expected test to fail" || true - - expect_log 'In test _failure_message_test from //tests:unittest_tests.bzl: Expected "", but got "Contains $FOO"' -} - -function test_cmd_escape_failure_message_test() { - local -r pkg="${FUNCNAME[0]}" - create_pkg "$pkg" - - bazel test testdir:cmd_escape_failure_message_test --test_output=all --verbose_failures \ - >"$TEST_log" 2>&1 && fail "Expected test to fail" || true - - expect_log 'In test _failure_message_test from //tests:unittest_tests.bzl: Expected "", but got "Contains %FOO%"' -} - -function test_eof_failure_message_test() { - local -r pkg="${FUNCNAME[0]}" - create_pkg "$pkg" - - bazel test testdir:eof_failure_message_test --test_output=all --verbose_failures \ - >"$TEST_log" 2>&1 && fail "Expected test to fail" || true - - expect_log '^ more after EOF' -} - -function test_fail_unexpected_passing_test() { - local -r pkg="${FUNCNAME[0]}" - create_pkg "$pkg" - - bazel test testdir:fail_unexpected_passing_test --test_output=all --verbose_failures \ - >"$TEST_log" 2>&1 && fail "Expected test to fail" || true - - expect_log "Expected failure of target_under_test, but found success" -} - -cd "$TEST_TMPDIR" -run_suite "unittest test suite" diff --git a/tests/unittest_tests.bzl b/tests/unittest_tests.bzl deleted file mode 100644 index 2f326a01..00000000 --- a/tests/unittest_tests.bzl +++ /dev/null @@ -1,389 +0,0 @@ -# Copyright 2019 The Bazel Authors. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests for unittest.bzl.""" - -load("//lib:partial.bzl", "partial") -load("//lib:unittest.bzl", "analysistest", "asserts", "loadingtest", "unittest") - -################################### -####### basic_failing_test ######## -################################### - -def _basic_failing_test(ctx): - """Unit tests for a basic library verification test that fails.""" - env = unittest.begin(ctx) - - asserts.equals(env, 1, 2) - - return unittest.end(env) - -basic_failing_test = unittest.make(_basic_failing_test) - -################################### -####### failure_message_test ###### -################################### - -def _failure_message_test(ctx): - """Failing unit test with arbitrary content in the message.""" - env = unittest.begin(ctx) - - if not ctx.attr.message: - unittest.fail(env, "Message must be non-empty.") - asserts.equals(env, "", ctx.attr.message) - - return unittest.end(env) - -failure_message_test = unittest.make( - _failure_message_test, - attrs = { - "message": attr.string(), - }, -) - -################################### -####### basic_passing_test ######## -################################### -def _basic_passing_test(ctx): - """Unit tests for a basic library verification test.""" - env = unittest.begin(ctx) - - asserts.equals(env, 1, 1) - - return unittest.end(env) - -basic_passing_test = unittest.make(_basic_passing_test) - -################################################# -####### basic_passing_short_timeout_test ######## -################################################# -def _basic_passing_short_timeout_test(ctx): - """Unit tests for a basic library verification test.""" - env = unittest.begin(ctx) - - asserts.equals(env, ctx.attr.timeout, "short") - - return unittest.end(env) - -basic_passing_short_timeout_test = unittest.make(_basic_passing_short_timeout_test) - -################################### -####### change_setting_test ####### -################################### -def _change_setting_test(ctx): - """Test to verify that an analysis test may change configuration.""" - env = analysistest.begin(ctx) - - dep_min_os_version = analysistest.target_under_test(env)[_ChangeSettingInfo].min_os_version - asserts.equals(env, "1234.5678", dep_min_os_version) - - return analysistest.end(env) - -_ChangeSettingInfo = provider( - doc = "min_os_version for change_setting_test", - fields = ["min_os_version"], -) - -def _change_setting_fake_rule(ctx): - return [_ChangeSettingInfo(min_os_version = ctx.fragments.cpp.minimum_os_version())] - -change_setting_fake_rule = rule( - implementation = _change_setting_fake_rule, - fragments = ["cpp"], -) - -change_setting_test = analysistest.make( - _change_setting_test, - config_settings = { - "//command_line_option:minimum_os_version": "1234.5678", - }, -) - -#################################### -####### failure_testing_test ####### -#################################### -def _failure_testing_test(ctx): - """Test to verify that an analysis test may verify a rule fails with fail().""" - env = analysistest.begin(ctx) - - asserts.expect_failure(env, "This rule should never work") - - return analysistest.end(env) - -def _failure_testing_fake_rule(ctx): - _ignore = [ctx] # @unused - fail("This rule should never work") - -failure_testing_fake_rule = rule( - implementation = _failure_testing_fake_rule, -) - -failure_testing_test = analysistest.make( - _failure_testing_test, - expect_failure = True, -) - -############################################ -####### fail_unexpected_passing_test ####### -############################################ -def _fail_unexpected_passing_test(ctx): - """Test that fails by expecting an error that never occurs.""" - env = analysistest.begin(ctx) - - asserts.expect_failure(env, "Oh no, going to fail") - - return analysistest.end(env) - -def _fail_unexpected_passing_fake_rule(ctx): - _ignore = [ctx] # @unused - return [] - -fail_unexpected_passing_fake_rule = rule( - implementation = _fail_unexpected_passing_fake_rule, -) - -fail_unexpected_passing_test = analysistest.make( - _fail_unexpected_passing_test, - expect_failure = True, -) - -################################################ -####### change_setting_with_failure_test ####### -################################################ -def _change_setting_with_failure_test(ctx): - """Test verifying failure while changing configuration.""" - env = analysistest.begin(ctx) - - asserts.expect_failure(env, "unexpected minimum_os_version!!!") - - return analysistest.end(env) - -def _change_setting_with_failure_fake_rule(ctx): - if ctx.fragments.cpp.minimum_os_version() == "error_error": - fail("unexpected minimum_os_version!!!") - return [] - -change_setting_with_failure_fake_rule = rule( - implementation = _change_setting_with_failure_fake_rule, - fragments = ["cpp"], -) - -change_setting_with_failure_test = analysistest.make( - _change_setting_with_failure_test, - expect_failure = True, - config_settings = { - "//command_line_option:minimum_os_version": "error_error", - }, -) - -#################################### -####### inspect_actions_test ####### -#################################### -def _inspect_actions_test(ctx): - """Test verifying actions registered by a target.""" - env = analysistest.begin(ctx) - - actions = analysistest.target_actions(env) - asserts.equals(env, 1, len(actions)) - action_output = actions[0].outputs.to_list()[0] - asserts.equals(env, "out.txt", action_output.basename) - return analysistest.end(env) - -def _inspect_actions_fake_rule(ctx): - out_file = ctx.actions.declare_file("out.txt") - ctx.actions.run_shell( - command = "echo 'hello' > %s" % out_file.basename, - outputs = [out_file], - ) - return [DefaultInfo(files = depset([out_file]))] - -inspect_actions_fake_rule = rule( - implementation = _inspect_actions_fake_rule, -) - -inspect_actions_test = analysistest.make( - _inspect_actions_test, -) - -#################################### -####### inspect_aspect_test ####### -#################################### -_AddedByAspectInfo = provider( - doc = "Example provider added by example aspect", - fields = { - "value": "(str)", - }, -) - -def _example_aspect_impl(target, ctx): - _ignore = [target, ctx] # @unused - return [ - _AddedByAspectInfo(value = "attached by aspect"), - ] - -example_aspect = aspect( - implementation = _example_aspect_impl, -) - -def _inspect_aspect_test(ctx): - """Test verifying aspect run on a target.""" - env = analysistest.begin(ctx) - - tut = env.ctx.attr.target_under_test - asserts.equals(env, "attached by aspect", tut[_AddedByAspectInfo].value) - return analysistest.end(env) - -def _inspect_aspect_fake_rule(ctx): - out_file = ctx.actions.declare_file("out.txt") - ctx.actions.run_shell( - command = "echo 'hello' > %s" % out_file.basename, - outputs = [out_file], - ) - return [DefaultInfo(files = depset([out_file]))] - -inspect_aspect_fake_rule = rule( - implementation = _inspect_aspect_fake_rule, -) - -inspect_aspect_test = analysistest.make( - _inspect_aspect_test, - extra_target_under_test_aspects = [example_aspect], -) - -######################################## -####### inspect_output_dirs_test ####### -######################################## -_OutputDirInfo = provider( - doc = "bin_path for inspect_output_dirs_test", - fields = ["bin_path"], -) - -def _inspect_output_dirs_test(ctx): - """Test verifying output directories used by a test.""" - env = analysistest.begin(ctx) - - # Assert that the output bin dir observed by the aspect added by analysistest - # is the same as those observed by the rule directly, even when that's - # under a config transition and therefore not the same as the bin dir - # used by the test rule. - bin_path = analysistest.target_bin_dir_path(env) - target_under_test = analysistest.target_under_test(env) - asserts.false(env, not bin_path, "bin dir path not found.") - asserts.false( - env, - bin_path == ctx.bin_dir.path, - "test bin dir (%s) expected to differ with target_under_test bin dir (%s)." % (bin_path, ctx.bin_dir.path), - ) - asserts.equals(env, bin_path, target_under_test[_OutputDirInfo].bin_path) - return analysistest.end(env) - -def _inspect_output_dirs_fake_rule(ctx): - return [ - _OutputDirInfo( - bin_path = ctx.bin_dir.path, - ), - ] - -inspect_output_dirs_fake_rule = rule( - implementation = _inspect_output_dirs_fake_rule, -) - -inspect_output_dirs_test = analysistest.make( - _inspect_output_dirs_test, - # The output directories differ between the test and target under test when - # the target under test is under a config transition. - config_settings = { - "//command_line_option:minimum_os_version": "1234.5678", - }, -) - -def _loading_phase_test(env): - loadingtest.equals(env, "self_glob", ["unittest_tests.bzl"], native.glob(["unittest_tests.bzl"])) - - # now use our own calls to assert we created a test case rule and test_suite for it. - loadingtest.equals(env, "test_exists", True, native.existing_rule(env.name + "_self_glob") != None) - loadingtest.equals(env, "suite_exists", True, native.existing_rule(env.name + "_tests") != None) - -######################################### - -# buildifier: disable=unnamed-macro -def unittest_passing_tests_suite(): - """Creates the test targets and test suite for passing unittest.bzl tests. - - Not all tests are included. Some unittest.bzl tests verify a test fails - when assertions are not met. Such tests must be run in an e2e shell test. - This suite only includes tests which verify success tests. - """ - unittest.suite( - "unittest_tests", - basic_passing_test, - partial.make(basic_passing_short_timeout_test, timeout = "short"), - ) - - change_setting_test( - name = "change_setting_test", - target_under_test = ":change_setting_fake_target", - ) - change_setting_fake_rule( - name = "change_setting_fake_target", - tags = ["manual"], - ) - - failure_testing_test( - name = "failure_testing_test", - target_under_test = ":failure_testing_fake_target", - ) - failure_testing_fake_rule( - name = "failure_testing_fake_target", - tags = ["manual"], - ) - - change_setting_with_failure_test( - name = "change_setting_with_failure_test", - target_under_test = ":change_setting_with_failure_fake_target", - ) - change_setting_with_failure_fake_rule( - name = "change_setting_with_failure_fake_target", - tags = ["manual"], - ) - - inspect_actions_test( - name = "inspect_actions_test", - target_under_test = ":inspect_actions_fake_target", - ) - inspect_actions_fake_rule( - name = "inspect_actions_fake_target", - tags = ["manual"], - ) - - inspect_aspect_test( - name = "inspect_aspect_test", - target_under_test = ":inspect_aspect_fake_target", - ) - inspect_aspect_fake_rule( - name = "inspect_aspect_fake_target", - tags = ["manual"], - ) - - inspect_output_dirs_test( - name = "inspect_output_dirs_test", - target_under_test = ":inspect_output_dirs_fake_target", - ) - inspect_output_dirs_fake_rule( - name = "inspect_output_dirs_fake_target", - tags = ["manual"], - ) - - loading_env = loadingtest.make("selftest") - _loading_phase_test(loading_env) diff --git a/tests/versions_tests.bzl b/tests/versions_tests.bzl index a2e2308a..29a488a2 100644 --- a/tests/versions_tests.bzl +++ b/tests/versions_tests.bzl @@ -14,7 +14,7 @@ """Unit tests for versions.bzl.""" -load("//lib:unittest.bzl", "asserts", "unittest") +load("@rules_testing//lib:unittest.bzl", "asserts", "unittest") load("//lib:versions.bzl", "versions") def _parse_test(ctx):