cockpit/pyproject.toml

215 lines
5.5 KiB
TOML
Raw Permalink Normal View History

[build-system]
requires = []
backend-path = ['src']
build-backend = 'build_backend'
[tool.mypy]
follow_imports = 'silent' # https://github.com/python-lsp/pylsp-mypy/issues/81
scripts_are_modules = true # allow checking all scripts in one invocation
explicit_package_bases = true
mypy_path = 'src:test/common:bots'
exclude = [
'tmp/',
'tools/vulture-suppressions/',
'vendor/',
]
[[tool.mypy.overrides]]
ignore_missing_imports = true
module = [
# run without submodules checked out
"cockpit._vendor.*",
# run without bots checked out
"lib.*",
"machine.*",
"task.*",
"testvm",
"PIL",
# run with bots checked out but its dependencies missing
"libvirt",
"libvirt_qemu",
"pika",
# run without gobject-introspection (used from cockpit-client for Gtk)
"gi.*",
# these are used from various scripts meant to run on the host
"dbus",
"tracer.query",
"vdo.*",
]
[[tool.mypy.overrides]]
# https://github.com/python/mypy/issues/11401 prevents us from enabling strict
# mode for a given set of files, so instead, we enable the corresponding set of
# individual checks the files which are strictly typed.
check_untyped_defs = true
disallow_any_generics = true
disallow_incomplete_defs = true
disallow_subclassing_any = true
disallow_untyped_calls = true
disallow_untyped_decorators = true
disallow_untyped_defs = true
no_implicit_reexport = true
strict_concatenate = true
strict_equality = true
warn_unused_ignores = true
module = [
# src
'cockpit',
'cockpit._version',
'cockpit.jsonutil',
'cockpit.protocol',
'cockpit.transports',
# test/common
'cdp',
'testlib',
]
[tool.pylint]
max-line-length = 118
disable = [
"C0114", # Missing module docstring
"C0115", # Missing class docstring
python bridge: rewrite packages serving code Rework the packages loading code. The main changes: - we don't attempt to do checksums anymore. This was taking a large amount of time when loading the bridge and we never implemented it properly anyway (since we weren't sending the `.checksum` fields in the manifests) - instead, we do scan the list of files in the package one time (at first use, not load) and build our map of URL paths to filenames at this point. This allows us to retain our efficient lookup code, but requires us to do the actual file loading at the time of the request. Because we're no longer storing the contents of the files in memory, this is a substantial runtime memory usage reduction. - drop all of the extra code we had for walking the paths in a particular order. We just do (r)globs now. - move all of the code for deciding which packages to load to a separate class. We now load all of the manifests and evaluate them (requirements, conditionals, priorities) and only the packages that we actually intend to serve are then scanned. The new structure also makes it easier for cockpit-beiboot to do its thing. - the packages loading code now uses the actual cockpit version number for requires comparisons instead of the previously hardcoded '300'. Add an extra check if our version number is `0` (ie: running out of git) and disable version checking in that case. - clean up the relationship between `cockpit.packages` and `cockpit.channels.packages`. Previously, `cockpit.packages` would access properties on the channel object and call methods on it to serve the content back. Now the channel requests the data from `cockpit.packages` (which merely returns the result). - enabled by the above: full typing throughout, and mypy is happy. We have one tiny little thorn in that the packages channel is not strictly capable of knowing that the router to which it's connected has a `packages` attribute, but that's nothing that we weren't doing already. Add a comment to draw attention to it. - to the extent possible, we try to keep the state of the packages channel away from the packages code proper. This led to an overhaul of our `Content-Security-Policy` not to include the origin URLs in the policy output. This is redundant anyway, since that's what "'self'" is for. We do need to do one hack for websockets though, until we can convince ourselves about browser support for the standard. This hack is lifted to the channel level. Adjust tests accordingly. - with some small changes to our pyproject.toml, the two rewritten files (`packages.py` and `channels/packages.py`) are now also passing pylint, but we don't enable that yet, since everything else is broken.
2023-07-17 11:03:42 +02:00
"C0116", # Missing function or method docstring
"R0902", # Too many instance attributes
"R0903", # Too few public methods
"R0913", # Too many arguments
"R1705", # Unnecessary "else" after "return"
python bridge: rewrite packages serving code Rework the packages loading code. The main changes: - we don't attempt to do checksums anymore. This was taking a large amount of time when loading the bridge and we never implemented it properly anyway (since we weren't sending the `.checksum` fields in the manifests) - instead, we do scan the list of files in the package one time (at first use, not load) and build our map of URL paths to filenames at this point. This allows us to retain our efficient lookup code, but requires us to do the actual file loading at the time of the request. Because we're no longer storing the contents of the files in memory, this is a substantial runtime memory usage reduction. - drop all of the extra code we had for walking the paths in a particular order. We just do (r)globs now. - move all of the code for deciding which packages to load to a separate class. We now load all of the manifests and evaluate them (requirements, conditionals, priorities) and only the packages that we actually intend to serve are then scanned. The new structure also makes it easier for cockpit-beiboot to do its thing. - the packages loading code now uses the actual cockpit version number for requires comparisons instead of the previously hardcoded '300'. Add an extra check if our version number is `0` (ie: running out of git) and disable version checking in that case. - clean up the relationship between `cockpit.packages` and `cockpit.channels.packages`. Previously, `cockpit.packages` would access properties on the channel object and call methods on it to serve the content back. Now the channel requests the data from `cockpit.packages` (which merely returns the result). - enabled by the above: full typing throughout, and mypy is happy. We have one tiny little thorn in that the packages channel is not strictly capable of knowing that the router to which it's connected has a `packages` attribute, but that's nothing that we weren't doing already. Add a comment to draw attention to it. - to the extent possible, we try to keep the state of the packages channel away from the packages code proper. This led to an overhaul of our `Content-Security-Policy` not to include the origin URLs in the policy output. This is redundant anyway, since that's what "'self'" is for. We do need to do one hack for websockets though, until we can convince ourselves about browser support for the standard. This hack is lifted to the channel level. Adjust tests accordingly. - with some small changes to our pyproject.toml, the two rewritten files (`packages.py` and `channels/packages.py`) are now also passing pylint, but we don't enable that yet, since everything else is broken.
2023-07-17 11:03:42 +02:00
"W0120", # Else clause on loop without a break statement
"W1113", # Keyword argument before variable positional arguments (PEP-570 is Python 3.8)
]
[tool.ruff]
exclude = [
".git/",
"modules/",
"node_modules/",
]
line-length = 118
preview = true
src = []
[tool.ruff.lint]
select = [
"A", # flake8-builtins
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"D300", # pydocstyle: Forbid ''' in docstrings
"DTZ", # flake8-datetimez
"E", # pycodestyle
"EXE", # flake8-executable
"F", # pyflakes
"FBT", # flake8-boolean-trap
"G", # flake8-logging-format
"I", # isort
"ICN", # flake8-import-conventions
"ISC", # flake8-implicit-str-concat
"PIE", # flake8-pie
"PLE", # pylint errors
"PGH", # pygrep-hooks
"PT", # flake8-pytest-style
"RSE", # flake8-raise
"RUF", # ruff rules
"T10", # flake8-debugger
"TCH", # flake8-type-checking
"UP032", # f-string
"W", # warnings (mostly whitespace)
"YTT", # flake8-2020
]
ignore = [
"A003", # Class attribute is shadowing a python builtin
"B011", # Do not `assert False` (`python -O` removes these calls), raise `AssertionError()`
"E731", # Do not assign a `lambda` expression, use a `def`
"PT011", # `pytest.raises(OSError)` is too broad
"RUF012", # Mutable class attributes should be annotated with `typing.ClassVar`
python bridge: rewrite packages serving code Rework the packages loading code. The main changes: - we don't attempt to do checksums anymore. This was taking a large amount of time when loading the bridge and we never implemented it properly anyway (since we weren't sending the `.checksum` fields in the manifests) - instead, we do scan the list of files in the package one time (at first use, not load) and build our map of URL paths to filenames at this point. This allows us to retain our efficient lookup code, but requires us to do the actual file loading at the time of the request. Because we're no longer storing the contents of the files in memory, this is a substantial runtime memory usage reduction. - drop all of the extra code we had for walking the paths in a particular order. We just do (r)globs now. - move all of the code for deciding which packages to load to a separate class. We now load all of the manifests and evaluate them (requirements, conditionals, priorities) and only the packages that we actually intend to serve are then scanned. The new structure also makes it easier for cockpit-beiboot to do its thing. - the packages loading code now uses the actual cockpit version number for requires comparisons instead of the previously hardcoded '300'. Add an extra check if our version number is `0` (ie: running out of git) and disable version checking in that case. - clean up the relationship between `cockpit.packages` and `cockpit.channels.packages`. Previously, `cockpit.packages` would access properties on the channel object and call methods on it to serve the content back. Now the channel requests the data from `cockpit.packages` (which merely returns the result). - enabled by the above: full typing throughout, and mypy is happy. We have one tiny little thorn in that the packages channel is not strictly capable of knowing that the router to which it's connected has a `packages` attribute, but that's nothing that we weren't doing already. Add a comment to draw attention to it. - to the extent possible, we try to keep the state of the packages channel away from the packages code proper. This led to an overhaul of our `Content-Security-Policy` not to include the origin URLs in the policy output. This is redundant anyway, since that's what "'self'" is for. We do need to do one hack for websockets though, until we can convince ourselves about browser support for the standard. This hack is lifted to the channel level. Adjust tests accordingly. - with some small changes to our pyproject.toml, the two rewritten files (`packages.py` and `channels/packages.py`) are now also passing pylint, but we don't enable that yet, since everything else is broken.
2023-07-17 11:03:42 +02:00
"TCH001", # Move application import into a type-checking block
"TCH002", # Move third-party import `..packages.Packages` into a type-checking block
]
[tool.ruff.lint.flake8-pytest-style]
fixture-parentheses = false
mark-parentheses = false
[tool.ruff.lint.isort]
known-first-party = ["cockpit"]
[tool.pytest.ini_options]
addopts = ['--strict-markers'] # cf. https://github.com/cockpit-project/cockpit/pull/18584#issuecomment-1490243994
pythonpath = ["src"]
testpaths = ["test/pytest"]
log_cli = true
required_plugins = ["pytest-asyncio"]
test/static-code: fix our vulture setup The way we've been using vulture is pretty ineffectual. Cranking the min-confidence all the way to 100 is skipping a lot of potentially useful messages. We've had to do that because vulture does whole-program analysis and we run test/static-code from our post-common hook only on the files that changed. Let's move our vulture configuration to pyproject.toml, removing the minimum confidence level while we're at it, so we get a lot more suggestions. We have to add some suppressions to deal with a lot of those, but that's how the tool is intended to be used. In order to avoid running vulture in the post-commit hook (where it only has access to a subset of files) use an environment variable to signal test/static-code that we're in this situation an skip vulture in that case. We effectively have two sets of configuration for vulture now: - what happens when you run `vulture`: this basically runs on `src/` and `test/pytest`, excluding the integration tests and testlib. We do this because by default vulture only looks at *.py files, and our integration tests are executable scripts without extensions. That would lead vulture to falsely conclude that most of testlib is unused. This is also how vulture will run from ALE, if enabled. - when happens when you run `test-static/code`: in this case, we do more aggressively scanning for python files based on #!-lines. In this case we scan all files (except for fmf_metadata) which means that the testlib appears (correctly) well-used.
2023-05-04 10:04:46 +02:00
[tool.vulture]
paths = [
"src",
"test/pytest",
"tools/vulture_suppressions",
test/static-code: fix our vulture setup The way we've been using vulture is pretty ineffectual. Cranking the min-confidence all the way to 100 is skipping a lot of potentially useful messages. We've had to do that because vulture does whole-program analysis and we run test/static-code from our post-common hook only on the files that changed. Let's move our vulture configuration to pyproject.toml, removing the minimum confidence level while we're at it, so we get a lot more suggestions. We have to add some suppressions to deal with a lot of those, but that's how the tool is intended to be used. In order to avoid running vulture in the post-commit hook (where it only has access to a subset of files) use an environment variable to signal test/static-code that we're in this situation an skip vulture in that case. We effectively have two sets of configuration for vulture now: - what happens when you run `vulture`: this basically runs on `src/` and `test/pytest`, excluding the integration tests and testlib. We do this because by default vulture only looks at *.py files, and our integration tests are executable scripts without extensions. That would lead vulture to falsely conclude that most of testlib is unused. This is also how vulture will run from ALE, if enabled. - when happens when you run `test-static/code`: in this case, we do more aggressively scanning for python files based on #!-lines. In this case we scan all files (except for fmf_metadata) which means that the testlib appears (correctly) well-used.
2023-05-04 10:04:46 +02:00
]
ignore_names = [
"do_*",
"test[A-Z0-9]*",
]
ignore_decorators = [
"@*.getter",
"@*.register_function",
"@bus.Interface.Method",
"@pytest.hookimpl",
test/static-code: fix our vulture setup The way we've been using vulture is pretty ineffectual. Cranking the min-confidence all the way to 100 is skipping a lot of potentially useful messages. We've had to do that because vulture does whole-program analysis and we run test/static-code from our post-common hook only on the files that changed. Let's move our vulture configuration to pyproject.toml, removing the minimum confidence level while we're at it, so we get a lot more suggestions. We have to add some suppressions to deal with a lot of those, but that's how the tool is intended to be used. In order to avoid running vulture in the post-commit hook (where it only has access to a subset of files) use an environment variable to signal test/static-code that we're in this situation an skip vulture in that case. We effectively have two sets of configuration for vulture now: - what happens when you run `vulture`: this basically runs on `src/` and `test/pytest`, excluding the integration tests and testlib. We do this because by default vulture only looks at *.py files, and our integration tests are executable scripts without extensions. That would lead vulture to falsely conclude that most of testlib is unused. This is also how vulture will run from ALE, if enabled. - when happens when you run `test-static/code`: in this case, we do more aggressively scanning for python files based on #!-lines. In this case we scan all files (except for fmf_metadata) which means that the testlib appears (correctly) well-used.
2023-05-04 10:04:46 +02:00
]
[tool.coverage.paths]
source = ["src", "*/site-packages"]
[tool.coverage.run]
concurrency = ["multiprocessing"]
source_pkgs = ["cockpit"]
branch = true
[tool.coverage.report]
show_missing = true
skip_covered = true
exclude_lines = [
"pragma: no cover", # default
"raise NotImplementedError",
]
[tool.tox]
legacy_tox_ini = """
[tox]
envlist = lint,pytest
isolated_build = True
labels =
venv = py3{6,8,9,10,11,12,13}-pytest
# The default test environments use system packages and never PyPI.
[testenv:{lint,pytest}]
sitepackages = True
install_command = python3 -m pip install --no-index --no-build-isolation {opts} {packages}
wheel_build_env = pkg
# All other environments (names like py311-lint, py36-pytest, etc) are isolated
# from the system and get their packages from PyPI, according to the specific
# test environment being requested. We build the wheel in a common environment.
[testenv]
package = wheel
wheel_build_env = venv-pkg
skip_install = lint: True
deps =
lint: mypy
lint: ruff
lint: vulture
pytest
pytest-asyncio
pytest: pytest-cov
pytest: pytest-timeout
pytest: pytest-xdist
allowlist_externals = test/static-code
commands =
pytest: python3 -m pytest -opythonpath= {posargs}
lint: test/static-code --tap
"""