Drop Python bridge

We only support the C bridge on RHEL 8. Drop `--enable-old-bridge` and
make it the default.
This commit is contained in:
Martin Pitt 2024-02-10 17:02:25 +01:00 committed by Martin Pitt
parent ea4cc7297a
commit eb2e4be2f9
63 changed files with 1 additions and 9900 deletions

View File

@ -1,21 +0,0 @@
name: tox
on:
pull_request:
jobs:
venv:
permissions: {}
runs-on: ubuntu-latest
container: ghcr.io/allisonkarlitskaya/toxbox
timeout-minutes: 20
steps:
- name: Clone repository
uses: actions/checkout@v3
- name: Checkout submodules
run: vendor/checkout
- name: Run venv tests
run: runuser -u tox -- tox --colored=yes -m venv -- --color=yes

View File

@ -9,14 +9,10 @@ jobs:
startarg:
# avoid check-memory on i386, it has literally thousands of uninteresting/wrong errors
- { make: 'check-memory', cc: 'gcc', tag: 'latest' }
# with default Python bridge
- { make: 'distcheck', cc: 'clang', tag: 'latest' }
- { make: 'distcheck', cc: 'gcc', tag: 'i386' }
# with old C bridge
- { make: 'distcheck', cc: 'gcc', distcheck_flags: '--enable-old-bridge', tag: 'latest' }
# this runs static code checks, unlike distcheck
- { make: 'check', cc: 'gcc', tag: 'latest' }
- { make: 'pytest-cov', cc: 'gcc', tag: 'latest' }
fail-fast: false
timeout-minutes: 60
steps:

View File

@ -179,51 +179,6 @@ git submodules:
Refer to the [testing README](test/README.md) for details on running the Cockpit
integration tests locally.
## Python bridge
Most distro releases now ship a replacement for the C bridge written in Python.
It resides in `src/cockpit` with most of its rules in `src/Makefile.am`. This
directory was chosen because it matches the standard so-called "src layout"
convention for Python packages, where each package (`cockpit`) is a
subdirectory of the `src` directory.
### Running the bridge
The Python bridge can be used interactively on a local machine:
PYTHONPATH=src python3 -m cockpit.bridge
To make it easy to test out channels without having to write out messages
manually, `cockpit.misc.print` can be used:
PYTHONPATH=src python3 -m cockpit.misc.print open fslist1 path=/etc watch=False | PYTHONPATH=src python3 -m cockpit.bridge
These shell aliases might be useful when experimenting with the protocol:
alias cpy='PYTHONPATH=src python3 -m cockpit.bridge'
alias cpf='PYTHONPATH=src python3 -m cockpit.misc.print'
When working with the Python bridge on test images, note that `RHEL/CentOS 8`,
`debian-stable`, and `ubuntu-2204` still use the C bridge. So if you want to
explicitly have the Python bridge on those images use:
./test/image-prepare --python
To enable debug logging in journal on a test image, you can pass `--debug` to
`image-prepare`. This will set `COCKPIT_DEBUG=all` to `/etc/environment`, if
you are only interested channel debug messages change `all` to
`cockpit.channel`.
### Testing the Python bridge
There are a growing number of Python `unittest` tests being written to test
parts of the new bridge code. You can run these with `make pytest` or
`make pytest-cov`. Those are both just rules to make sure that the
`systemd_ctypes` submodule is checked out before running `pytest` from the
source directory.
The tests require at least `pytest` 7.0.0 or higher to run.
## Running eslint
Cockpit uses [ESLint](https://eslint.org/) to automatically check JavaScript

View File

@ -36,21 +36,12 @@ distdir: $(DISTFILES)
$(MAKE) $(AM_MAKEFLAGS) distdir-am EXTRA_FILES="$$(tr '\n' ' ' < $(srcdir)/.extra_dist) .extra_dist"
sed -i "s/[@]VERSION@/$(VERSION)/" "$(distdir)/src/client/org.cockpit_project.CockpitClient.metainfo.xml"
$(srcdir)/tools/fix-spec $(distdir)/tools/cockpit.spec $(VERSION)
test -z '$(HACK_SPEC_FOR_PYTHON)' || \
sed -i 's/\(define enable_old_bridge\) 1/\1 0/' $(distdir)/tools/cockpit.spec
sed -i "/^pkgver=/ s/0/$(VERSION)/" "$(distdir)/tools/arch/PKGBUILD"
sed -i "1 s/0/$(VERSION)/" "$(distdir)/tools/debian/changelog"
cp -r "$(srcdir)/dist" "$(distdir)"
$(srcdir)/tools/adjust-distdir-timestamps "$(distdir)"
@echo ' DIST $(DIST_ARCHIVES)'
# Needed to ensure the tarball is correct for $(VERSION) override
dist-hook: $(distdir)/src/cockpit/_version.py
$(distdir)/src/cockpit/_version.py: FORCE
python3 '$(srcdir)'/src/build_backend.py --copy '$(srcdir)' '$(distdir)'
@rm -f $(distdir)/src/cockpit/_version.py
$(AM_V_GEN) echo "__version__ = '$(VERSION)'" > $@
$(distdir)/version.m4: FORCE
@rm -f $(distdir)/version.m4
$(AM_V_GEN) echo 'm4_define(VERSION_NUMBER, [$(VERSION)])' > $@

View File

@ -76,12 +76,6 @@ AC_ARG_ENABLE(ssh, AS_HELP_STRING([--disable-ssh], [Disable cockpit-ssh build an
AM_CONDITIONAL(WITH_COCKPIT_SSH, test "$enable_ssh" != "no")
AC_MSG_RESULT(${enable_ssh:=yes})
# --enable-old-bridge
AC_MSG_CHECKING([whether to install the old C cockpit-bridge])
AC_ARG_ENABLE(old_bridge, AS_HELP_STRING([--enable-old-bridge], [Install old C cockpit-bridge]))
AM_CONDITIONAL(WITH_OLD_BRIDGE, test "$enable_old_bridge" = "yes")
AC_MSG_RESULT(${enable_old_bridge:=no})
AC_SEARCH_LIBS([argp_parse], [argp])
case "$ac_cv_search_argp_parse" in
no) AC_MSG_FAILURE([failed to find argp_parse]) ;;
@ -369,10 +363,6 @@ AC_ARG_ENABLE([cockpit-client],
AC_MSG_RESULT($enable_cockpit_client)
AM_CONDITIONAL([ENABLE_COCKPIT_CLIENT], [test "$enable_cockpit_client" = "yes"])
if test "$enable_cockpit_client" = "yes" && test "$enable_old_bridge" = "yes"; then
AC_MSG_ERROR([--enable-cockpit-client conflicts with --enable-old-bridge])
fi
# Debug
AC_MSG_CHECKING([for debug mode])

View File

@ -42,10 +42,7 @@ EXTRA_DIST += build.js files.js package.json package-lock.json
# This is how the qunit tests get included. We need to prevent automake from
# seeing them during ./autogen.sh, but need make to find them at compile time.
# We don't run them in the pybridge case since they're part of `pytest`.
if WITH_OLD_BRIDGE
-include $(wildcard pkg/Makefile.qunit*)
endif
INSTALL_DATA_LOCAL_TARGETS += install-bundles
install-bundles:

View File

@ -1,8 +1,3 @@
[build-system]
requires = []
backend-path = ['src']
build-backend = 'build_backend'
[tool.mypy]
mypy_path = 'src:test/common'
exclude = '_vendor'
@ -117,40 +112,3 @@ exclude_lines = [
"pragma: no cover", # default
"raise NotImplementedError",
]
[tool.tox]
legacy_tox_ini = """
[tox]
envlist = lint,pytest
isolated_build = True
labels =
venv = py311-lint, py3{6,7,8,9,10,11,12}-pytest
# The default test environments use system packages and never PyPI.
[testenv:{lint,pytest}]
sitepackages = True
install_command = python3 -m pip install --no-index --no-build-isolation {opts} {packages}
wheel_build_env = pkg
# All other environments (names like py311-lint, py36-pytest, etc) are isolated
# from the system and get their packages from PyPI, according to the specific
# test environment being requested. We build the wheel in a common environment.
[testenv]
package = wheel
wheel_build_env = venv-pkg
skip_install = lint: True
deps =
lint: mypy
lint: flake8
lint: ruff
lint: vulture
pytest
pytest-asyncio
pytest: pytest-cov
pytest: pytest-timeout
pytest: pytest-xdist
allowlist_externals = test/static-code
commands =
pytest: python3 -m pytest -opythonpath= {posargs}
lint: test/static-code --tap
"""

View File

@ -4,44 +4,6 @@ libexec_PROGRAMS =
libexec_SCRIPTS =
sbin_PROGRAMS =
# -----------------------------------------------------------------------------
# Python
# Will only be honoured if pytest-timeout plugin is installed
export PYTEST_TIMEOUT = 120
.PHONY: pytest
pytest: $(BUILT_SOURCES) $(DIST_STAMP) $(MANIFESTS)
$(MAKE) test-server
cd '$(srcdir)' && abs_builddir='$(abs_builddir)' pytest
.PHONY: pytest-cov
pytest-cov: $(BUILT_SOURCES) $(DIST_STAMP) $(MANIFESTS)
$(MAKE) test-server
cd '$(srcdir)' && abs_builddir='$(abs_builddir)' pytest --cov
if !WITH_OLD_BRIDGE
INSTALL_DATA_LOCAL_TARGETS += install-python
install-python:
@# wheel-based installation with .dist-info.
@# This needs to work on RHEL8 up through modern Fedora, offline, with
@# system packages available to the build.
python3 -m pip install --no-index --force-reinstall --root='$(DESTDIR)/' --prefix='$(prefix)' \
"$$(python3 '$(srcdir)'/src/build_backend.py --wheel '$(srcdir)' tmp/wheel)"
mkdir -p $(DESTDIR)$(libexecdir)
mv -t $(DESTDIR)$(libexecdir) $(DESTDIR)$(bindir)/cockpit-askpass
UNINSTALL_LOCAL_TARGETS += uninstall-python
uninstall-python:
rm -rf tmp/wheel
rm -f $(DESTDIR)$(libexecdir)/cockpit-askpass
rm -f $(DESTDIR)$(bindir)/cockpit-bridge
@# HACK: pip uninstall does not know about --root and --prefix
rm -r $(DESTDIR)$(prefix)/lib/python*/*-packages/cockpit \
$(DESTDIR)$(prefix)/lib/python*/*-packages/cockpit-*.dist-info
endif
# -----------------------------------------------------------------------------
# C

View File

@ -39,8 +39,6 @@ libcockpit_metrics_a_SOURCES = \
src/bridge/cockpitsamples.h \
$(NULL)
if WITH_OLD_BRIDGE
# -----------------------------------------------------------------------------
# libcockpit-bridge.a: code used in cockpit-bridge and its tests
@ -230,8 +228,6 @@ dist_check_DATA += \
src/bridge/mock-server.key \
$(NULL)
endif
# -----------------------------------------------------------------------------
# polkit

View File

@ -1,140 +0,0 @@
import argparse
import base64
import gzip
import hashlib
import lzma
import os
import shutil
import subprocess
import tarfile
import zipfile
from typing import AnyStr, Dict, Iterable, Optional
from cockpit import __version__
VERSION = __version__ or '0'
PACKAGE = f'cockpit-{VERSION}'
TAG = 'py3-none-any'
def find_sources(*, srcpkg: bool) -> Iterable[str]:
try:
subprocess.check_call(['vendor/checkout'], stdout=2) # Needed for git builds...
except FileNotFoundError: # ...but not present in tarball...
pass # ...and not needed either, because...
assert os.path.exists('src/cockpit/_vendor/ferny/__init__.py') # ...the code should exist there already.
if srcpkg:
yield from (
'pyproject.toml',
'src/build_backend.py',
)
for path, _dirs, files in os.walk('src', followlinks=True):
if '__init__.py' in files:
yield from [os.path.join(path, file) for file in files]
def copy_sources(distdir: str) -> None:
for source in find_sources(srcpkg=True):
destination = os.path.join(distdir, source)
os.makedirs(os.path.dirname(destination), exist_ok=True)
shutil.copy(source, destination)
def build_sdist(sdist_directory: str,
config_settings: Optional[Dict[str, object]] = None) -> str:
del config_settings
sdist_filename = f'{PACKAGE}.tar.gz'
# We do this manually to avoid adding timestamps. See https://bugs.python.org/issue31526
with gzip.GzipFile(f'{sdist_directory}/{sdist_filename}', mode='w', mtime=0) as gz:
with tarfile.open(fileobj=gz, mode='w|', dereference=True) as sdist:
for filename in find_sources(srcpkg=True):
sdist.add(filename, arcname=f'{PACKAGE}/{filename}', )
return sdist_filename
def build_wheel(wheel_directory: str,
config_settings: Optional[Dict[str, object]] = None,
metadata_directory: Optional[str] = None) -> str:
del config_settings, metadata_directory
wheel_filename = f'{PACKAGE}-{TAG}.whl'
distinfo = {
'WHEEL': [
'Wheel-Version: 1.0',
'Generator: cockpit build_backend',
'Root-Is-Purelib: true',
f'Tag: {TAG}',
],
'METADATA': [
'Metadata-Version: 2.1',
'Name: cockpit',
f'Version: {VERSION}',
],
'entry_points.txt': [
'[console_scripts]',
'cockpit-askpass = cockpit._vendor.ferny.interaction_client:main',
'cockpit-bridge = cockpit.bridge:main',
],
}
with zipfile.ZipFile(f'{wheel_directory}/{wheel_filename}', 'w') as wheel:
def write(filename: str, data: AnyStr) -> None:
# we do this manually to avoid adding timestamps
wheel.writestr(zipfile.ZipInfo(filename), data)
def beipack_self(main: str, args: str = '') -> bytes:
from cockpit._vendor.bei import beipack
contents = {name: wheel.read(name) for name in wheel.namelist()}
pack = beipack.pack(contents, main, args=args).encode('utf-8')
return lzma.compress(pack, preset=lzma.PRESET_EXTREME)
def write_distinfo(filename: str, lines: Iterable[str]) -> None:
write(f'{PACKAGE}.dist-info/{filename}', ''.join(f'{line}\n' for line in lines))
def record_lines() -> Iterable[str]:
for info in wheel.infolist():
digest = hashlib.sha256(wheel.read(info.filename)).digest()
b64_digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
yield f'{info.filename},sha256={b64_digest},{info.file_size}'
yield f'{PACKAGE}.dist-info/RECORD,,'
for filename in find_sources(srcpkg=False):
with open(filename, 'rb') as file:
write(os.path.relpath(filename, start='src'), file.read())
write('cockpit/data/cockpit-bridge.beipack.xz', beipack_self('cockpit.bridge:main', 'beipack=True'))
for filename, lines in distinfo.items():
write_distinfo(filename, lines)
write_distinfo('RECORD', record_lines())
return wheel_filename
def main() -> None:
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--copy', action='store_true')
group.add_argument('--sdist', action='store_true')
group.add_argument('--wheel', action='store_true')
parser.add_argument('srcdir')
parser.add_argument('destdir')
args = parser.parse_args()
# We have to chdir() for PEP 517, so make sure dest is absolute
destdir = os.path.abspath(args.destdir)
os.chdir(args.srcdir)
os.makedirs(destdir, exist_ok=True)
if args.copy:
copy_sources(destdir)
elif args.sdist:
print(os.path.join(destdir, build_sdist(destdir)))
else:
print(os.path.join(destdir, build_wheel(destdir)))
if __name__ == '__main__':
main()

View File

@ -1 +0,0 @@
from ._version import __version__ # noqa: F401

View File

@ -1 +0,0 @@
../../../vendor/beipack/src/bei

View File

@ -1 +0,0 @@
../../../vendor/ferny/src/ferny

View File

@ -1 +0,0 @@
../../../vendor/systemd_ctypes/src/systemd_ctypes

View File

@ -1,5 +0,0 @@
# This file is only in git. It gets replaced by `make dist`.
from typing import Optional
__version__: Optional[str] = None

View File

@ -1,340 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import asyncio
import base64
import importlib.resources
import logging
import os
import shlex
import sys
from pathlib import Path
from typing import Dict, Iterable, Optional, Sequence
from cockpit import polyfills
from cockpit._vendor import ferny
from cockpit._vendor.bei import bootloader
from cockpit.beipack import BridgeBeibootHelper
from cockpit.bridge import setup_logging
from cockpit.channel import ChannelRoutingRule
from cockpit.channels import PackagesChannel
from cockpit.jsonutil import JsonObject
from cockpit.packages import Packages, PackagesLoader, patch_libexecdir
from cockpit.peer import Peer
from cockpit.protocol import CockpitProblem
from cockpit.router import Router, RoutingRule
from cockpit.transports import StdioTransport
logger = logging.getLogger('cockpit.beiboot')
def ensure_ferny_askpass() -> Path:
"""Create askpass executable
We need this for the flatpak: ssh and thus the askpass program run on the host (via flatpak-spawn),
not the flatpak. Thus we cannot use the shipped cockpit-askpass program.
"""
src_path = importlib.resources.files(ferny.__name__) / 'interaction_client.py'
src_data = src_path.read_bytes()
# Create the file in $XDG_CACHE_HOME, one of the few locations that a flatpak can write to
xdg_cache_home = os.environ.get('XDG_CACHE_HOME')
if xdg_cache_home is None:
xdg_cache_home = os.path.expanduser('~/.cache')
os.makedirs(xdg_cache_home, exist_ok=True)
dest_path = Path(xdg_cache_home, 'cockpit-client-askpass')
logger.debug("Checking if %s exists...", dest_path)
# Check first to see if we already wrote the current version
try:
if dest_path.read_bytes() != src_data:
logger.debug(" ... it exists but is not the same version...")
raise ValueError
if not dest_path.stat().st_mode & 0o100:
logger.debug(" ... it has the correct contents, but is not executable...")
raise ValueError
except (FileNotFoundError, ValueError):
logger.debug(" ... writing contents.")
dest_path.write_bytes(src_data)
dest_path.chmod(0o700)
return dest_path
def get_interesting_files() -> Iterable[str]:
for manifest in PackagesLoader.load_manifests():
for condition in manifest.conditions:
if condition.name in ('path-exists', 'path-not-exists') and isinstance(condition.value, str):
yield condition.value
class ProxyPackagesLoader(PackagesLoader):
file_status: Dict[str, bool]
def check_condition(self, condition: str, value: object) -> bool:
assert isinstance(value, str)
assert value in self.file_status
if condition == 'path-exists':
return self.file_status[value]
elif condition == 'path-not-exists':
return not self.file_status[value]
else:
raise KeyError
def __init__(self, file_status: Dict[str, bool]):
self.file_status = file_status
BEIBOOT_GADGETS = {
"report_exists": r"""
import os
def report_exists(files):
command('cockpit.report-exists', {name: os.path.exists(name) for name in files})
""",
**ferny.BEIBOOT_GADGETS
}
class DefaultRoutingRule(RoutingRule):
peer: 'Peer | None'
def __init__(self, router: Router):
super().__init__(router)
def apply_rule(self, options: JsonObject) -> 'Peer | None':
return self.peer
def shutdown(self) -> None:
if self.peer is not None:
self.peer.close()
class AuthorizeResponder(ferny.AskpassHandler):
commands = ('ferny.askpass', 'cockpit.report-exists')
router: Router
def __init__(self, router: Router):
self.router = router
async def do_askpass(self, messages: str, prompt: str, hint: str) -> Optional[str]:
if hint == 'none':
# We have three problems here:
#
# - we have no way to present a message on the login
# screen without presenting a prompt and a button
# - the login screen will not try to repost the login
# request because it doesn't understand that we are not
# waiting on input, which means that it won't notice
# that we've logged in successfully
# - cockpit-ws has an issue where if we retry the request
# again after login succeeded then it won't forward the
# init message to the client, stalling the login. This
# is a race and can't be fixed without -ws changes.
#
# Let's avoid all of that by just showing nothing.
return None
challenge = 'X-Conversation - ' + base64.b64encode(prompt.encode()).decode()
response = await self.router.request_authorization(challenge,
messages=messages,
prompt=prompt,
hint=hint,
echo=False)
b64 = response.removeprefix('X-Conversation -').strip()
passwd = base64.b64decode(b64.encode()).decode()
logger.debug('Returning a %d chars password', len(passwd))
return passwd
async def do_custom_command(self, command: str, args: tuple, fds: list[int], stderr: str) -> None:
logger.debug('Got ferny command %s %s %s', command, args, stderr)
if command == 'cockpit.report-exists':
file_status, = args
# FIXME: evil duck typing here -- this is a half-way Bridge
self.router.packages = Packages(loader=ProxyPackagesLoader(file_status)) # type: ignore[attr-defined]
self.router.routing_rules.insert(0, ChannelRoutingRule(self.router, [PackagesChannel]))
class SshPeer(Peer):
always: bool
def __init__(self, router: Router, destination: str, args: argparse.Namespace):
self.destination = destination
self.always = args.always
super().__init__(router)
async def do_connect_transport(self) -> None:
beiboot_helper = BridgeBeibootHelper(self)
agent = ferny.InteractionAgent([AuthorizeResponder(self.router), beiboot_helper])
# We want to run a python interpreter somewhere...
cmd: Sequence[str] = ('python3', '-ic', '# cockpit-bridge')
env: Sequence[str] = ()
in_flatpak = os.path.exists('/.flatpak-info')
# Remote host? Wrap command with SSH
if self.destination != 'localhost':
if in_flatpak:
# we run ssh and thus the helper on the host, always use the xdg-cache helper
ssh_askpass = ensure_ferny_askpass()
else:
# outside of the flatpak we expect cockpit-ws and thus an installed helper
askpass = patch_libexecdir('${libexecdir}/cockpit-askpass')
assert isinstance(askpass, str)
ssh_askpass = Path(askpass)
if not ssh_askpass.exists():
logger.error("Could not find cockpit-askpass helper at %r", askpass)
env = (
f'SSH_ASKPASS={ssh_askpass!s}',
'DISPLAY=x',
'SSH_ASKPASS_REQUIRE=force',
)
host, _, port = self.destination.rpartition(':')
# catch cases like `host:123` but not cases like `[2001:abcd::1]
if port.isdigit():
host_args = ['-p', port, host]
else:
host_args = [self.destination]
cmd = ('ssh', *host_args, shlex.join(cmd))
# Running in flatpak? Wrap command with flatpak-spawn --host
if in_flatpak:
cmd = ('flatpak-spawn', '--host',
*(f'--env={kv}' for kv in env),
*cmd)
env = ()
logger.debug("Launching command: cmd=%s env=%s", cmd, env)
transport = await self.spawn(cmd, env, stderr=agent, start_new_session=True)
if not self.always:
exec_cockpit_bridge_steps = [('try_exec', (['cockpit-bridge'],))]
else:
exec_cockpit_bridge_steps = []
# Send the first-stage bootloader
stage1 = bootloader.make_bootloader([
*exec_cockpit_bridge_steps,
('report_exists', [list(get_interesting_files())]),
*beiboot_helper.steps,
], gadgets=BEIBOOT_GADGETS)
transport.write(stage1.encode())
# Wait for "init" or error, handling auth and beiboot requests
await agent.communicate()
def transport_control_received(self, command: str, message: JsonObject) -> None:
if command == 'authorize':
# We've disabled this for explicit-superuser bridges, but older
# bridges don't support that and will ask us anyway.
return
super().transport_control_received(command, message)
class SshBridge(Router):
packages: Optional[Packages] = None
ssh_peer: SshPeer
def __init__(self, args: argparse.Namespace):
# By default, we route everything to the other host. We add an extra
# routing rule for the packages webserver only if we're running the
# beipack.
rule = DefaultRoutingRule(self)
super().__init__([rule])
# This needs to be created after Router.__init__ is called.
self.ssh_peer = SshPeer(self, args.destination, args)
rule.peer = self.ssh_peer
def do_send_init(self):
pass # wait for the peer to do it first
def do_init(self, message):
# https://github.com/cockpit-project/cockpit/issues/18927
#
# We tell cockpit-ws that we have the explicit-superuser capability and
# handle it ourselves (just below) by sending `superuser-init-done` and
# passing {'superuser': False} on to the actual bridge (Python or C).
if isinstance(message.get('superuser'), dict):
self.write_control(command='superuser-init-done')
message['superuser'] = False
self.ssh_peer.write_control(message)
async def run(args) -> None:
logger.debug("Hi. How are you today?")
bridge = SshBridge(args)
StdioTransport(asyncio.get_running_loop(), bridge)
try:
message = dict(await bridge.ssh_peer.start())
# See comment in do_init() above: we tell cockpit-ws that we support
# this and then handle it ourselves when we get the init message.
capabilities = message.setdefault('capabilities', {})
if not isinstance(capabilities, dict):
bridge.write_control(command='init', problem='protocol-error', message='capabilities must be a dict')
return
assert isinstance(capabilities, dict) # convince mypy
capabilities['explicit-superuser'] = True
# only patch the packages line if we are in beiboot mode
if bridge.packages:
message['packages'] = {p: None for p in bridge.packages.packages}
bridge.write_control(message)
bridge.ssh_peer.thaw_endpoint()
except ferny.InteractionError as exc:
sys.exit(str(exc))
except CockpitProblem as exc:
bridge.write_control(exc.attrs, command='init')
return
logger.debug('Startup done. Looping until connection closes.')
try:
await bridge.communicate()
except BrokenPipeError:
# expected if the peer doesn't hang up cleanly
pass
def main() -> None:
polyfills.install()
parser = argparse.ArgumentParser(description='cockpit-bridge is run automatically inside of a Cockpit session.')
parser.add_argument('--always', action='store_true', help="Never try to run cockpit-bridge from the system")
parser.add_argument('--debug', action='store_true')
parser.add_argument('destination', help="Name of the remote host to connect to, or 'localhost'")
args = parser.parse_args()
setup_logging(debug=args.debug)
asyncio.run(run(args), debug=args.debug)
if __name__ == '__main__':
main()

View File

@ -1,76 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2023 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import lzma
from typing import List, Sequence, Tuple
from cockpit._vendor import ferny
from cockpit._vendor.bei import beipack
from .data import read_cockpit_data_file
from .peer import Peer, PeerError
logger = logging.getLogger(__name__)
def get_bridge_beipack_xz() -> Tuple[str, bytes]:
try:
bridge_beipack_xz = read_cockpit_data_file('cockpit-bridge.beipack.xz')
logger.debug('Got pre-built cockpit-bridge.beipack.xz')
except FileNotFoundError:
logger.debug('Pre-built cockpit-bridge.beipack.xz; building our own.')
# beipack ourselves
cockpit_contents = beipack.collect_module('cockpit', recursive=True)
bridge_beipack = beipack.pack(cockpit_contents, entrypoint='cockpit.bridge:main', args='beipack=True')
bridge_beipack_xz = lzma.compress(bridge_beipack.encode())
logger.debug(' ... done!')
return 'cockpit/data/cockpit-bridge.beipack.xz', bridge_beipack_xz
class BridgeBeibootHelper(ferny.InteractionHandler):
# ferny.InteractionHandler ClassVar
commands = ['beiboot.provide', 'beiboot.exc']
peer: Peer
payload: bytes
steps: Sequence[Tuple[str, Sequence[object]]]
def __init__(self, peer: Peer, args: Sequence[str] = ()) -> None:
filename, payload = get_bridge_beipack_xz()
self.peer = peer
self.payload = payload
self.steps = (('boot_xz', (filename, len(payload), tuple(args))),)
async def run_command(self, command: str, args: Tuple, fds: List[int], stderr: str) -> None:
logger.debug('Got ferny request %s %s %s %s', command, args, fds, stderr)
if command == 'beiboot.provide':
try:
size, = args
assert size == len(self.payload)
except (AssertionError, ValueError) as exc:
raise PeerError('internal-error', message=f'ferny interaction error {exc!s}') from exc
assert self.peer.transport is not None
logger.debug('Writing %d bytes of payload', len(self.payload))
self.peer.transport.write(self.payload)
elif command == 'beiboot.exc':
raise PeerError('internal-error', message=f'Remote exception: {args[0]}')
else:
raise PeerError('internal-error', message=f'Unexpected ferny interaction command {command}')

View File

@ -1,315 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import asyncio
import contextlib
import json
import logging
import os
import pwd
import shlex
import socket
import stat
import subprocess
from typing import Iterable, List, Optional, Sequence, Tuple, Type
from cockpit._vendor.ferny import interaction_client
from cockpit._vendor.systemd_ctypes import bus, run_async
from . import polyfills
from ._version import __version__
from .channel import ChannelRoutingRule
from .channels import CHANNEL_TYPES
from .config import Config, Environment
from .internal_endpoints import EXPORTS
from .jsonutil import JsonError, JsonObject, get_dict
from .packages import BridgeConfig, Packages, PackagesListener
from .peer import PeersRoutingRule
from .remote import HostRoutingRule
from .router import Router
from .superuser import SuperuserRoutingRule
from .transports import StdioTransport
logger = logging.getLogger(__name__)
class InternalBus:
exportees: List[bus.Slot]
def __init__(self, exports: Iterable[Tuple[str, Type[bus.BaseObject]]]):
client_socket, server_socket = socket.socketpair()
self.client = bus.Bus.new(fd=client_socket.detach())
self.server = bus.Bus.new(fd=server_socket.detach(), server=True)
self.exportees = [self.server.add_object(path, cls()) for path, cls in exports]
def export(self, path: str, obj: bus.BaseObject) -> None:
self.exportees.append(self.server.add_object(path, obj))
class Bridge(Router, PackagesListener):
internal_bus: InternalBus
packages: Optional[Packages]
bridge_configs: Sequence[BridgeConfig]
args: argparse.Namespace
def __init__(self, args: argparse.Namespace):
self.internal_bus = InternalBus(EXPORTS)
self.bridge_configs = []
self.args = args
self.superuser_rule = SuperuserRoutingRule(self, privileged=args.privileged)
self.internal_bus.export('/superuser', self.superuser_rule)
self.internal_bus.export('/config', Config())
self.internal_bus.export('/environment', Environment())
self.peers_rule = PeersRoutingRule(self)
if args.beipack:
# Some special stuff for beipack
self.superuser_rule.set_configs((
BridgeConfig({
"privileged": True,
"spawn": ["sudo", "-k", "-A", "python3", "-ic", "# cockpit-bridge", "--privileged"],
"environ": ["SUDO_ASKPASS=ferny-askpass"],
}),
))
self.packages = None
elif args.privileged:
self.packages = None
else:
self.packages = Packages(self)
self.internal_bus.export('/packages', self.packages)
self.packages_loaded()
super().__init__([
HostRoutingRule(self),
self.superuser_rule,
ChannelRoutingRule(self, CHANNEL_TYPES),
self.peers_rule,
])
@staticmethod
def get_os_release():
try:
file = open('/etc/os-release', encoding='utf-8')
except FileNotFoundError:
try:
file = open('/usr/lib/os-release', encoding='utf-8')
except FileNotFoundError:
logger.warning("Neither /etc/os-release nor /usr/lib/os-release exists")
return {}
os_release = {}
for line in file.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
k, v = line.split('=')
(v_parsed, ) = shlex.split(v) # expect exactly one token
except ValueError:
logger.warning('Ignoring invalid line in os-release: %r', line)
continue
os_release[k] = v_parsed
return os_release
def do_init(self, message: JsonObject) -> None:
# we're only interested in the case where this is a dict, but
# 'superuser' may well be `False` and that's not an error
with contextlib.suppress(JsonError):
superuser = get_dict(message, 'superuser')
self.superuser_rule.init(superuser)
def do_send_init(self) -> None:
init_args = {
'capabilities': {'explicit-superuser': True},
'command': 'init',
'os-release': self.get_os_release(),
'version': 1,
}
if self.packages is not None:
init_args['packages'] = {p: None for p in self.packages.packages}
self.write_control(init_args)
# PackagesListener interface
def packages_loaded(self) -> None:
assert self.packages
bridge_configs = self.packages.get_bridge_configs()
if self.bridge_configs != bridge_configs:
self.superuser_rule.set_configs(bridge_configs)
self.peers_rule.set_configs(bridge_configs)
self.bridge_configs = bridge_configs
async def run(args) -> None:
logger.debug("Hi. How are you today?")
# Unit tests require this
me = pwd.getpwuid(os.getuid())
os.environ['HOME'] = me.pw_dir
os.environ['SHELL'] = me.pw_shell
os.environ['USER'] = me.pw_name
logger.debug('Starting the router.')
router = Bridge(args)
StdioTransport(asyncio.get_running_loop(), router)
logger.debug('Startup done. Looping until connection closes.')
try:
await router.communicate()
except (BrokenPipeError, ConnectionResetError):
# not unexpected if the peer doesn't hang up cleanly
pass
def try_to_receive_stderr():
try:
ours, theirs = socket.socketpair()
with ours:
with theirs:
interaction_client.command(2, 'cockpit.send-stderr', fds=[theirs.fileno()])
_msg, fds, _flags, _addr = socket.recv_fds(ours, 1, 1)
except OSError:
return
try:
stderr_fd, = fds
# We're about to abruptly drop our end of the stderr socketpair that we
# share with the ferny agent. ferny would normally treat that as an
# unexpected error. Instruct it to do a clean exit, instead.
interaction_client.command(2, 'ferny.end')
os.dup2(stderr_fd, 2)
finally:
for fd in fds:
os.close(fd)
def setup_journald() -> bool:
# If stderr is a socket, prefer systemd-journal logging. This covers the
# case we're already connected to the journal but also the case where we're
# talking to the ferny agent, while leaving logging to file or terminal
# unaffected.
if not stat.S_ISSOCK(os.fstat(2).st_mode):
# not a socket? Don't redirect.
return False
try:
import systemd.journal # type: ignore[import]
except ImportError:
# No python3-systemd? Don't redirect.
return False
logging.root.addHandler(systemd.journal.JournalHandler())
return True
def setup_logging(*, debug: bool) -> None:
"""Setup our logger with optional filtering of modules if COCKPIT_DEBUG env is set"""
modules = os.getenv('COCKPIT_DEBUG', '')
# Either setup logging via journal or via formatted messages to stderr
if not setup_journald():
logging.basicConfig(format='%(name)s-%(levelname)s: %(message)s')
if debug or modules == 'all':
logging.getLogger().setLevel(level=logging.DEBUG)
elif modules:
for module in modules.split(','):
module = module.strip()
if not module:
continue
logging.getLogger(module).setLevel(logging.DEBUG)
def start_ssh_agent() -> None:
# Launch the agent so that it goes down with us on EOF; PDEATHSIG would be more robust,
# but it gets cleared on setgid ssh-agent, which some distros still do
try:
proc = subprocess.Popen(['ssh-agent', 'sh', '-ec', 'echo SSH_AUTH_SOCK=$SSH_AUTH_SOCK; read a'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
assert proc.stdout is not None
# Wait for the agent to write at least one line and look for the
# listener socket. If we fail to find it, kill the agent — something
# went wrong.
for token in shlex.shlex(proc.stdout.readline(), punctuation_chars=True):
if token.startswith('SSH_AUTH_SOCK='):
os.environ['SSH_AUTH_SOCK'] = token.replace('SSH_AUTH_SOCK=', '', 1)
break
else:
proc.terminate()
proc.wait()
except FileNotFoundError:
logger.debug("Couldn't start ssh-agent (FileNotFoundError)")
except OSError as exc:
logger.warning("Could not start ssh-agent: %s", exc)
def main(*, beipack: bool = False) -> None:
polyfills.install()
parser = argparse.ArgumentParser(description='cockpit-bridge is run automatically inside of a Cockpit session.')
parser.add_argument('--privileged', action='store_true', help='Privileged copy of the bridge')
parser.add_argument('--packages', action='store_true', help='Show Cockpit package information')
parser.add_argument('--bridges', action='store_true', help='Show Cockpit bridges information')
parser.add_argument('--debug', action='store_true', help='Enable debug output (very verbose)')
parser.add_argument('--version', action='store_true', help='Show Cockpit version information')
args = parser.parse_args()
# This is determined by who calls us
args.beipack = beipack
# If we were run with --privileged then our stderr is currently being
# consumed by the main bridge looking for startup-related error messages.
# Let's switch back to the original stderr stream, which has a side-effect
# of indicating that our startup is more or less complete. Any errors
# after this point will land in the journal.
if args.privileged:
try_to_receive_stderr()
setup_logging(debug=args.debug)
# Special modes
if args.packages:
Packages().show()
return
elif args.version:
print(f'Version: {__version__}\nProtocol: 1')
return
elif args.bridges:
print(json.dumps([config.__dict__ for config in Packages().get_bridge_configs()], indent=2))
return
# The privileged bridge doesn't need ssh-agent, but the main one does
if 'SSH_AUTH_SOCK' not in os.environ and not args.privileged:
start_ssh_agent()
# asyncio.run() shim for Python 3.6 support
run_async(run(args), debug=args.debug)
if __name__ == '__main__':
main()

View File

@ -1,527 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import json
import logging
from typing import BinaryIO, ClassVar, Dict, Generator, List, Optional, Sequence, Set, Tuple, Type
from .jsonutil import JsonError, JsonObject, JsonValue, create_object, get_bool, get_str
from .protocol import CockpitProblem
from .router import Endpoint, Router, RoutingRule
logger = logging.getLogger(__name__)
class ChannelRoutingRule(RoutingRule):
table: Dict[str, List[Type['Channel']]]
def __init__(self, router: Router, channel_types: List[Type['Channel']]):
super().__init__(router)
self.table = {}
# Sort the channels into buckets by payload type
for cls in channel_types:
entry = self.table.setdefault(cls.payload, [])
entry.append(cls)
# Within each bucket, sort the channels so those with more
# restrictions are considered first.
for entry in self.table.values():
entry.sort(key=lambda cls: len(cls.restrictions), reverse=True)
def check_restrictions(self, restrictions: Sequence[Tuple[str, object]], options: JsonObject) -> bool:
for key, expected_value in restrictions:
our_value = options.get(key)
# If the match rule specifies that a value must be present and
# we don't have it, then fail.
if our_value is None:
return False
# If the match rule specified a specific expected value, and
# our value doesn't match it, then fail.
if expected_value is not None and our_value != expected_value:
return False
# Everything checked out
return True
def apply_rule(self, options: JsonObject) -> Optional['Channel']:
assert self.router is not None
payload = options.get('payload')
if not isinstance(payload, str):
return None
for cls in self.table.get(payload, []):
if self.check_restrictions(cls.restrictions, options):
return cls(self.router)
else:
return None
def shutdown(self):
pass # we don't hold any state
class ChannelError(CockpitProblem):
pass
class Channel(Endpoint):
# Values borrowed from C implementation
BLOCK_SIZE = 16 * 1024
SEND_WINDOW = 2 * 1024 * 1024
# Flow control book-keeping
_send_pings: bool = False
_out_sequence: int = 0
_out_window: int = SEND_WINDOW
# Task management
_tasks: Set[asyncio.Task]
_close_args: Optional[JsonObject] = None
# Must be filled in by the channel implementation
payload: ClassVar[str]
restrictions: ClassVar[Sequence[Tuple[str, object]]] = ()
# These get filled in from .do_open()
channel = ''
group = ''
# input
def do_control(self, command, message):
# Break the various different kinds of control messages out into the
# things that our subclass may be interested in handling. We drop the
# 'message' field for handlers that don't need it.
if command == 'open':
self._tasks = set()
self.channel = message['channel']
if get_bool(message, 'flow-control', default=False):
self._send_pings = True
self.group = get_str(message, 'group', 'default')
self.freeze_endpoint()
self.do_open(message)
elif command == 'ready':
self.do_ready()
elif command == 'done':
self.do_done()
elif command == 'close':
self.do_close()
elif command == 'ping':
self.do_ping(message)
elif command == 'pong':
self.do_pong(message)
elif command == 'options':
self.do_options(message)
def do_channel_control(self, channel: str, command: str, message: JsonObject) -> None:
# Already closing? Ignore.
if self._close_args is not None:
return
# Catch errors and turn them into close messages
try:
try:
self.do_control(command, message)
except JsonError as exc:
raise ChannelError('protocol-error', message=str(exc)) from exc
except ChannelError as exc:
self.close(exc.attrs)
def do_kill(self, host: 'str | None', group: 'str | None', _message: JsonObject) -> None:
# Already closing? Ignore.
if self._close_args is not None:
return
if host is not None:
return
if group is not None and self.group != group:
return
self.do_close()
# At least this one really ought to be implemented...
def do_open(self, options: JsonObject) -> None:
raise NotImplementedError
# ... but many subclasses may reasonably want to ignore some of these.
def do_ready(self) -> None:
pass
def do_done(self) -> None:
pass
def do_close(self) -> None:
self.close()
def do_options(self, message: JsonObject) -> None:
raise ChannelError('not-supported', message='This channel does not implement "options"')
# 'reasonable' default, overridden in other channels for receive-side flow control
def do_ping(self, message: JsonObject) -> None:
self.send_pong(message)
def do_channel_data(self, channel: str, data: bytes) -> None:
# Already closing? Ignore.
if self._close_args is not None:
return
# Catch errors and turn them into close messages
try:
self.do_data(data)
except ChannelError as exc:
self.close(exc.attrs)
def do_data(self, _data: bytes) -> None:
# By default, channels can't receive data.
self.close()
# output
def ready(self, **kwargs: JsonValue) -> None:
self.thaw_endpoint()
self.send_control(command='ready', **kwargs)
def done(self) -> None:
self.send_control(command='done')
# tasks and close management
def is_closing(self) -> bool:
return self._close_args is not None
def _close_now(self) -> None:
self.shutdown_endpoint(self._close_args)
def _task_done(self, task):
# Strictly speaking, we should read the result and check for exceptions but:
# - exceptions bubbling out of the task are programming errors
# - the only thing we'd do with it anyway, is to show it
# - Python already does that with its "Task exception was never retrieved" messages
self._tasks.remove(task)
if self._close_args is not None and not self._tasks:
self._close_now()
def create_task(self, coroutine, name=None):
"""Create a task associated with the channel.
All tasks must exit before the channel can close. You may not create
new tasks after calling .close().
"""
assert self._close_args is None
task = asyncio.create_task(coroutine)
self._tasks.add(task)
task.add_done_callback(self._task_done)
return task
def close(self, close_args: 'JsonObject | None' = None) -> None:
"""Requests the channel to be closed.
After you call this method, you won't get anymore `.do_*()` calls.
This will wait for any running tasks to complete before sending the
close message.
"""
if self._close_args is not None:
# close already requested
return
self._close_args = close_args or {}
if not self._tasks:
self._close_now()
def send_data(self, data: bytes) -> bool:
"""Send data and handle book-keeping for flow control.
The flow control is "advisory". The data is sent immediately, even if
it's larger than the window. In general you should try to send packets
which are approximately Channel.BLOCK_SIZE in size.
Returns True if there is still room in the window, or False if you
should stop writing for now. In that case, `.do_resume_send()` will be
called later when there is more room.
"""
self.send_channel_data(self.channel, data)
if self._send_pings:
out_sequence = self._out_sequence + len(data)
if self._out_sequence // Channel.BLOCK_SIZE != out_sequence // Channel.BLOCK_SIZE:
self.send_control(command='ping', sequence=out_sequence)
self._out_sequence = out_sequence
return self._out_sequence < self._out_window
def do_pong(self, message):
if not self._send_pings: # huh?
logger.warning("Got wild pong on channel %s", self.channel)
return
self._out_window = message['sequence'] + Channel.SEND_WINDOW
if self._out_sequence < self._out_window:
self.do_resume_send()
def do_resume_send(self) -> None:
"""Called to indicate that the channel may start sending again."""
# change to `raise NotImplementedError` after everyone implements it
json_encoder: ClassVar[json.JSONEncoder] = json.JSONEncoder(indent=2)
def send_json(self, _msg: 'JsonObject | None' = None, **kwargs: JsonValue) -> bool:
pretty = self.json_encoder.encode(create_object(_msg, kwargs)) + '\n'
return self.send_data(pretty.encode())
def send_control(self, command: str, **kwargs: JsonValue) -> None:
self.send_channel_control(self.channel, command, None, **kwargs)
def send_pong(self, message: JsonObject) -> None:
self.send_channel_control(self.channel, 'pong', message)
class ProtocolChannel(Channel, asyncio.Protocol):
"""A channel subclass that implements the asyncio Protocol interface.
In effect, data sent to this channel will be written to the connected
transport, and vice-versa. Flow control is supported.
The default implementation of the .do_open() method calls the
.create_transport() abstract method. This method should return a transport
which will be used for communication on the channel.
Otherwise, if the subclass implements .do_open() itself, it is responsible
for setting up the connection and ensuring that .connection_made() is called.
"""
_transport: Optional[asyncio.Transport]
_loop: Optional[asyncio.AbstractEventLoop]
_send_pongs: bool = True
_last_ping: Optional[JsonObject] = None
_create_transport_task = None
# read-side EOF handling
_close_on_eof: bool = False
_eof: bool = False
async def create_transport(self, loop: asyncio.AbstractEventLoop, options: JsonObject) -> asyncio.Transport:
"""Creates the transport for this channel, according to options.
The event loop for the transport is passed to the function. The
protocol for the transport is the channel object, itself (self).
This needs to be implemented by the subclass.
"""
raise NotImplementedError
def do_open(self, options: JsonObject) -> None:
loop = asyncio.get_running_loop()
self._create_transport_task = asyncio.create_task(self.create_transport(loop, options))
self._create_transport_task.add_done_callback(self.create_transport_done)
def create_transport_done(self, task: 'asyncio.Task[asyncio.Transport]') -> None:
assert task is self._create_transport_task
self._create_transport_task = None
try:
transport = task.result()
except ChannelError as exc:
self.close(exc.attrs)
return
self.connection_made(transport)
self.ready()
def connection_made(self, transport: asyncio.BaseTransport) -> None:
assert isinstance(transport, asyncio.Transport)
self._transport = transport
def _get_close_args(self) -> JsonObject:
return {}
def connection_lost(self, exc: Optional[Exception]) -> None:
self.close(self._get_close_args())
def do_data(self, data: bytes) -> None:
assert self._transport is not None
self._transport.write(data)
def do_done(self) -> None:
assert self._transport is not None
if self._transport.can_write_eof():
self._transport.write_eof()
def do_close(self) -> None:
if self._transport is not None:
self._transport.close()
def data_received(self, data: bytes) -> None:
assert self._transport is not None
if not self.send_data(data):
self._transport.pause_reading()
def do_resume_send(self) -> None:
assert self._transport is not None
self._transport.resume_reading()
def close_on_eof(self) -> None:
"""Mark the channel to be closed on EOF.
Normally, ProtocolChannel tries to keep the channel half-open after
receiving EOF from the transport. This instructs that the channel
should be closed on EOF.
If EOF was already received, then calling this function will close the
channel immediately.
If you don't call this function, you are responsible for closing the
channel yourself.
"""
self._close_on_eof = True
if self._eof:
assert self._transport is not None
self._transport.close()
def eof_received(self) -> bool:
self._eof = True
self.done()
return not self._close_on_eof
# Channel receive-side flow control
def do_ping(self, message):
if self._send_pongs:
self.send_pong(message)
else:
# we'll have to pong later
self._last_ping = message
def pause_writing(self) -> None:
# We can't actually stop writing, but we can stop replying to pings
self._send_pongs = False
def resume_writing(self) -> None:
self._send_pongs = True
if self._last_ping is not None:
self.send_pong(self._last_ping)
self._last_ping = None
class AsyncChannel(Channel):
"""A subclass for async/await-style implementation of channels, with flow control
This subclass provides asynchronous `read()` and `write()` calls for
subclasses, with familiar semantics. `write()` doesn't buffer, so the
`done()` method on the base channel class can be used in a way similar to
`shutdown()`. A high-level `sendfile()` method is available to send the
entire contents of a binary-mode file-like object.
The subclass must provide an async `run()` function, which will be spawned
as a task.
On the receiving side, the channel will respond to flow control pings to
indicate that it has received the data, but only after it has been consumed
by `read()`.
On the sending side, write() will block if the channel backs up.
"""
# Receive-side flow control: intermix pings and data in the queue and reply
# to pings as we dequeue them. This is a buffer: since we need to handle
# do_data() without blocking, we have no choice.
receive_queue = None
# Send-side flow control
write_waiter = None
async def run(self, options):
raise NotImplementedError
async def run_wrapper(self, options):
try:
await self.run(options)
self.close()
except ChannelError as exc:
self.close(exc.attrs)
async def read(self):
while True:
item = await self.receive_queue.get()
if isinstance(item, bytes):
return item
self.send_pong(item)
async def write(self, data):
if not self.send_data(data):
self.write_waiter = asyncio.get_running_loop().create_future()
await self.write_waiter
async def sendfile(self, stream: BinaryIO) -> None:
loop = asyncio.get_running_loop()
with stream:
while True:
data = await loop.run_in_executor(None, stream.read, Channel.BLOCK_SIZE)
if data == b'':
break
await self.write(data)
self.done()
def do_resume_send(self) -> None:
if self.write_waiter is not None:
self.write_waiter.set_result(None)
self.write_waiter = None
def do_open(self, options):
self.receive_queue = asyncio.Queue()
self.create_task(self.run_wrapper(options), name=f'{self.__class__.__name__}.run_wrapper({options})')
def do_done(self):
self.receive_queue.put_nowait(b'')
def do_close(self):
# we might have already sent EOF for done, but two EOFs won't hurt anyone
self.receive_queue.put_nowait(b'')
def do_ping(self, message):
self.receive_queue.put_nowait(message)
def do_data(self, data):
if not isinstance(data, bytes):
# this will persist past this callback, so make sure we take our
# own copy, in case this was a memoryview into a bytearray.
data = bytes(data)
self.receive_queue.put_nowait(data)
class GeneratorChannel(Channel):
"""A trivial Channel subclass for sending data from a generator with flow control.
Calls the .do_yield_data() generator with the options from the open message
and sends the data which it yields. If the generator returns a value it
will be used for the close message.
"""
DataGenerator = Generator[bytes, None, Optional[JsonObject]]
__generator: DataGenerator
def do_yield_data(self, options: JsonObject) -> 'DataGenerator':
raise NotImplementedError
def do_open(self, options: JsonObject) -> None:
self.__generator = self.do_yield_data(options)
self.do_resume_send()
def do_resume_send(self) -> None:
try:
while self.send_data(next(self.__generator)):
pass
except StopIteration as stop:
self.done()
self.close(stop.value)

View File

@ -1,40 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from .dbus import DBusChannel
from .filesystem import FsInfoChannel, FsListChannel, FsReadChannel, FsReplaceChannel, FsWatchChannel
from .http import HttpChannel
from .metrics import InternalMetricsChannel
from .packages import PackagesChannel
from .stream import SocketStreamChannel, SubprocessStreamChannel
from .trivial import EchoChannel, NullChannel
CHANNEL_TYPES = [
DBusChannel,
EchoChannel,
FsInfoChannel,
FsListChannel,
FsReadChannel,
FsReplaceChannel,
FsWatchChannel,
HttpChannel,
InternalMetricsChannel,
NullChannel,
PackagesChannel,
SubprocessStreamChannel,
SocketStreamChannel,
]

View File

@ -1,520 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Missing stuff compared to the C bridge that we should probably add:
#
# - removing matches
# - removing watches
# - emitting of signals
# - publishing of objects
# - failing more gracefully in some cases (during open, etc)
#
# Stuff we might or might not do:
#
# - using non-default service names
#
# Stuff we should probably not do:
#
# - emulation of ObjectManager via recursive introspection
# - automatic detection of ObjectManager below the given path_namespace
# - recursive scraping of properties for new object paths
# (for path_namespace watches that don't hit an ObjectManager)
import asyncio
import errno
import json
import logging
import traceback
import xml.etree.ElementTree as ET
from cockpit._vendor import systemd_ctypes
from cockpit._vendor.systemd_ctypes import Bus, BusError, introspection
from ..channel import Channel, ChannelError
logger = logging.getLogger(__name__)
# The dbusjson3 payload
#
# This channel payload type translates JSON encoded messages on a
# Cockpit channel to D-Bus messages, in a mostly straightforward way.
# See doc/protocol.md for a description of the basics.
#
# However, dbusjson3 offers some advanced features as well that are
# meant to support the "magic" DBusProxy objects implemented by
# cockpit.js. Those proxy objects "magically" expose all the methods
# and properties of a D-Bus interface without requiring any explicit
# binding code to be generated for a JavaScript client. A dbusjson3
# channel does this by doing automatic introspection and property
# retrieval without much direction from the JavaScript client.
#
# The details of what exactly is done is not specified very strictly,
# and the Python bridge will likely differ from the C bridge
# significantly. This will be informed by what existing code actually
# needs, and we might end up with a more concrete description of what
# a client can actually expect.
#
# Here is an example of a more complex scenario:
#
# - The client adds a "watch" for a path namespace. There is a
# ObjectManager at the given path and the bridge emits "meta" and
# "notify" messages to describe all interfaces and objects reported
# by that ObjectManager.
#
# - The client makes a method call that causes a new object with a new
# interface to appear at the ObjectManager. The bridge will send a
# "meta" and "notify" message to describe this new object.
#
# - Since the InterfacesAdded signal was emitted before the method
# reply, the bridge must send the "meta" and "notify" messages
# before the method reply message.
#
# - However, in order to construct the "meta" message, the bridge must
# perform a Introspect call, and consequently must delay sending the
# method reply until that call has finished.
#
# The Python bridge implements this delaying of messages with
# coroutines and a fair mutex. Every message coming from D-Bus will
# wait on the mutex for its turn to send its message on the Cockpit
# channel, and will keep that mutex locked until it is done with
# sending. Since the mutex is fair, everyone will nicely wait in line
# without messages getting re-ordered.
#
# The scenario above will play out like this:
#
# - While adding the initial "watch", the lock is held until the
# "meta" and "notify" messages have been sent.
#
# - Later, when the InterfacesAdded signal comes in that has been
# triggered by the method call, the mutex will be locked while the
# necessary introspection is going on.
#
# - The method reply will likely come while the mutex is locked, and
# the task for sending that reply on the Cockpit channel will enter
# the wait queue of the mutex.
#
# - Once the introspection is done and the new "meta" and "notify"
# messages have been sent, the mutex is unlocked, the method reply
# task acquires it, and sends its message.
class InterfaceCache:
def __init__(self):
self.cache = {}
self.old = set() # Interfaces already returned by get_interface_if_new
def inject(self, interfaces):
self.cache.update(interfaces)
async def introspect_path(self, bus, destination, object_path):
xml, = await bus.call_method_async(destination, object_path,
'org.freedesktop.DBus.Introspectable',
'Introspect')
et = ET.fromstring(xml)
interfaces = {tag.attrib['name']: introspection.parse_interface(tag) for tag in et.findall('interface')}
# Add all interfaces we found: we might use them later
self.inject(interfaces)
return interfaces
async def get_interface(self, interface_name, bus=None, destination=None, object_path=None):
try:
return self.cache[interface_name]
except KeyError:
pass
if bus and object_path:
try:
await self.introspect_path(bus, destination, object_path)
except BusError:
pass
return self.cache.get(interface_name)
async def get_interface_if_new(self, interface_name, bus, destination, object_path):
if interface_name in self.old:
return None
self.old.add(interface_name)
return await self.get_interface(interface_name, bus, destination, object_path)
async def get_signature(self, interface_name, method, bus=None, destination=None, object_path=None):
interface = await self.get_interface(interface_name, bus, destination, object_path)
if interface is None:
raise KeyError(f'Interface {interface_name} is not found')
return ''.join(interface['methods'][method]['in'])
def notify_update(notify, path, interface_name, props):
notify.setdefault(path, {})[interface_name] = {k: v.value for k, v in props.items()}
class DBusChannel(Channel):
json_encoder = systemd_ctypes.JSONEncoder(indent=2)
payload = 'dbus-json3'
matches = None
name = None
bus = None
owner = None
async def setup_name_owner_tracking(self):
def send_owner(owner):
# We must be careful not to send duplicate owner
# notifications. cockpit.js relies on that.
if self.owner != owner:
self.owner = owner
self.send_json(owner=owner)
def handler(message):
name, old, new = message.get_body()
send_owner(owner=new if new != "" else None)
self.add_signal_handler(handler,
sender='org.freedesktop.DBus',
path='/org/freedesktop/DBus',
interface='org.freedesktop.DBus',
member='NameOwnerChanged',
arg0=self.name)
try:
unique_name, = await self.bus.call_method_async("org.freedesktop.DBus",
"/org/freedesktop/DBus",
"org.freedesktop.DBus",
"GetNameOwner", "s", self.name)
except BusError as error:
if error.name == "org.freedesktop.DBus.Error.NameHasNoOwner":
# Try to start it. If it starts successfully, we will
# get a NameOwnerChanged signal (which will set
# self.owner) before StartServiceByName returns.
try:
await self.bus.call_method_async("org.freedesktop.DBus",
"/org/freedesktop/DBus",
"org.freedesktop.DBus",
"StartServiceByName", "su", self.name, 0)
except BusError as start_error:
logger.debug("Failed to start service '%s': %s", self.name, start_error.message)
self.send_json(owner=None)
else:
logger.debug("Failed to get owner of service '%s': %s", self.name, error.message)
else:
send_owner(unique_name)
def do_open(self, options):
self.cache = InterfaceCache()
self.name = options.get('name')
self.matches = []
bus = options.get('bus')
address = options.get('address')
try:
if address is not None:
if bus is not None and bus != 'none':
raise ChannelError('protocol-error', message='only one of "bus" and "address" can be specified')
logger.debug('get bus with address %s for %s', address, self.name)
self.bus = Bus.new(address=address, bus_client=self.name is not None)
elif bus == 'internal':
logger.debug('get internal bus for %s', self.name)
self.bus = self.router.internal_bus.client
else:
if bus == 'session':
logger.debug('get session bus for %s', self.name)
self.bus = Bus.default_user()
elif bus == 'system' or bus is None:
logger.debug('get system bus for %s', self.name)
self.bus = Bus.default_system()
else:
raise ChannelError('protocol-error', message=f'invalid bus "{bus}"')
except OSError as exc:
raise ChannelError('protocol-error', message=f'failed to connect to {bus} bus: {exc}') from exc
try:
self.bus.attach_event(None, 0)
except OSError as err:
if err.errno != errno.EBUSY:
raise
# This needs to be a fair mutex so that outgoing messages don't
# get re-ordered. asyncio.Lock is fair.
self.watch_processing_lock = asyncio.Lock()
if self.name is not None:
async def get_ready():
async with self.watch_processing_lock:
await self.setup_name_owner_tracking()
if self.owner:
self.ready(unique_name=self.owner)
else:
self.close({'problem': 'not-found'})
self.create_task(get_ready())
else:
self.ready()
def add_signal_handler(self, handler, **kwargs):
r = dict(**kwargs)
r['type'] = 'signal'
if 'sender' not in r and self.name is not None:
r['sender'] = self.name
# HACK - https://github.com/bus1/dbus-broker/issues/309
# path_namespace='/' in a rule does not work.
if r.get('path_namespace') == "/":
del r['path_namespace']
def filter_owner(message):
if self.owner is not None and self.owner == message.get_sender():
handler(message)
if self.name is not None and 'sender' in r and r['sender'] == self.name:
func = filter_owner
else:
func = handler
r_string = ','.join(f"{key}='{value}'" for key, value in r.items())
if not self.is_closing():
# this gets an EINTR very often especially on RHEL 8
while True:
try:
match = self.bus.add_match(r_string, func)
break
except InterruptedError:
pass
self.matches.append(match)
def add_async_signal_handler(self, handler, **kwargs):
def sync_handler(message):
self.create_task(handler(message))
self.add_signal_handler(sync_handler, **kwargs)
async def do_call(self, message):
path, iface, method, args = message['call']
cookie = message.get('id')
flags = message.get('flags')
timeout = message.get('timeout')
if timeout is not None:
# sd_bus timeout is µs, cockpit API timeout is ms
timeout *= 1000
else:
# sd_bus has no "indefinite" timeout, so use MAX_UINT64
timeout = 2 ** 64 - 1
# We have to figure out the signature of the call. Either we got told it:
signature = message.get('type')
# ... or there aren't any arguments
if signature is None and len(args) == 0:
signature = ''
# ... or we need to introspect
if signature is None:
try:
logger.debug('Doing introspection request for %s %s', iface, method)
signature = await self.cache.get_signature(iface, method, self.bus, self.name, path)
except BusError as error:
self.send_json(error=[error.name, [f'Introspection: {error.message}']], id=cookie)
return
except KeyError:
self.send_json(
error=[
"org.freedesktop.DBus.Error.UnknownMethod",
[f"Introspection data for method {iface} {method} not available"]],
id=cookie)
return
except Exception as exc:
self.send_json(error=['python.error', [f'Introspection: {exc!s}']], id=cookie)
return
try:
method_call = self.bus.message_new_method_call(self.name, path, iface, method, signature, *args)
reply = await self.bus.call_async(method_call, timeout=timeout)
# If the method call has kicked off any signals related to
# watch processing, wait for that to be done.
async with self.watch_processing_lock:
# TODO: stop hard-coding the endian flag here.
self.send_json(
reply=[reply.get_body()], id=cookie,
flags="<" if flags is not None else None,
type=reply.get_signature(True)) # noqa: FBT003
except BusError as error:
# actually, should send the fields from the message body
self.send_json(error=[error.name, [error.message]], id=cookie)
except Exception:
logger.exception("do_call(%s): generic exception", message)
self.send_json(error=['python.error', [traceback.format_exc()]], id=cookie)
async def do_add_match(self, message):
add_match = message['add-match']
logger.debug('adding match %s', add_match)
async def match_hit(message):
logger.debug('got match')
async with self.watch_processing_lock:
self.send_json(signal=[
message.get_path(),
message.get_interface(),
message.get_member(),
list(message.get_body())
])
self.add_async_signal_handler(match_hit, **add_match)
async def setup_objectmanager_watch(self, path, interface_name, meta, notify):
# Watch the objects managed by the ObjectManager at "path".
# Properties are not watched, that is done by setup_path_watch
# below via recursive_props == True.
async def handler(message):
member = message.get_member()
if member == "InterfacesAdded":
(path, interface_props) = message.get_body()
logger.debug('interfaces added %s %s', path, interface_props)
meta = {}
notify = {}
async with self.watch_processing_lock:
for name, props in interface_props.items():
if interface_name is None or name == interface_name:
mm = await self.cache.get_interface_if_new(name, self.bus, self.name, path)
if mm:
meta.update({name: mm})
notify_update(notify, path, name, props)
self.send_json(meta=meta)
self.send_json(notify=notify)
elif member == "InterfacesRemoved":
(path, interfaces) = message.get_body()
logger.debug('interfaces removed %s %s', path, interfaces)
async with self.watch_processing_lock:
notify = {path: {name: None for name in interfaces}}
self.send_json(notify=notify)
self.add_async_signal_handler(handler,
path=path,
interface="org.freedesktop.DBus.ObjectManager")
objects, = await self.bus.call_method_async(self.name, path,
'org.freedesktop.DBus.ObjectManager',
'GetManagedObjects')
for p, ifaces in objects.items():
for iface, props in ifaces.items():
if interface_name is None or iface == interface_name:
mm = await self.cache.get_interface_if_new(iface, self.bus, self.name, p)
if mm:
meta.update({iface: mm})
notify_update(notify, p, iface, props)
async def setup_path_watch(self, path, interface_name, recursive_props, meta, notify):
# Watch a single object at "path", but maybe also watch for
# property changes for all objects below "path".
async def handler(message):
async with self.watch_processing_lock:
path = message.get_path()
name, props, invalids = message.get_body()
logger.debug('NOTIFY: %s %s %s %s', path, name, props, invalids)
for inv in invalids:
try:
reply, = await self.bus.call_method_async(self.name, path,
'org.freedesktop.DBus.Properties', 'Get',
'ss', name, inv)
except BusError as exc:
logger.debug('failed to fetch property %s.%s on %s %s: %s',
name, inv, self.name, path, str(exc))
continue
props[inv] = reply
notify = {}
notify_update(notify, path, name, props)
self.send_json(notify=notify)
this_meta = await self.cache.introspect_path(self.bus, self.name, path)
if interface_name is not None:
interface = this_meta.get(interface_name)
this_meta = {interface_name: interface}
meta.update(this_meta)
if recursive_props:
self.add_async_signal_handler(handler,
interface="org.freedesktop.DBus.Properties",
path_namespace=path)
else:
self.add_async_signal_handler(handler,
interface="org.freedesktop.DBus.Properties",
path=path)
for name in meta:
if name.startswith("org.freedesktop.DBus."):
continue
try:
props, = await self.bus.call_method_async(self.name, path,
'org.freedesktop.DBus.Properties',
'GetAll', 's', name)
notify_update(notify, path, name, props)
except BusError:
pass
async def do_watch(self, message):
watch = message['watch']
path = watch.get('path')
path_namespace = watch.get('path_namespace')
interface_name = watch.get('interface')
cookie = message.get('id')
path = path or path_namespace
recursive = path == path_namespace
if path is None or cookie is None:
logger.debug('ignored incomplete watch request %s', message)
self.send_json(error=['x.y.z', ['Not Implemented']], id=cookie)
self.send_json(reply=[], id=cookie)
return
try:
async with self.watch_processing_lock:
meta = {}
notify = {}
await self.setup_path_watch(path, interface_name, recursive, meta, notify)
if recursive:
await self.setup_objectmanager_watch(path, interface_name, meta, notify)
self.send_json(meta=meta)
self.send_json(notify=notify)
self.send_json(reply=[], id=message['id'])
except BusError as error:
logger.debug("do_watch(%s) caught D-Bus error: %s", message, error.message)
self.send_json(error=[error.name, [error.message]], id=cookie)
async def do_meta(self, message):
self.cache.inject(message['meta'])
def do_data(self, data):
message = json.loads(data)
logger.debug('receive dbus request %s %s', self.name, message)
if 'call' in message:
self.create_task(self.do_call(message))
elif 'add-match' in message:
self.create_task(self.do_add_match(message))
elif 'watch' in message:
self.create_task(self.do_watch(message))
elif 'meta' in message:
self.create_task(self.do_meta(message))
else:
logger.debug('ignored dbus request %s', message)
return
def do_close(self):
for slot in self.matches:
slot.cancel()
self.matches = []
self.close()

View File

@ -1,540 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import contextlib
import enum
import errno
import fnmatch
import functools
import grp
import logging
import os
import pwd
import random
import stat
from typing import Callable, Iterable
from cockpit._vendor.systemd_ctypes import Handle, PathWatch
from cockpit._vendor.systemd_ctypes.inotify import Event as InotifyEvent
from cockpit._vendor.systemd_ctypes.pathwatch import Listener as PathWatchListener
from ..channel import Channel, ChannelError, GeneratorChannel
from ..jsonutil import (
JsonDict,
JsonDocument,
JsonError,
JsonObject,
get_bool,
get_int,
get_str,
get_strv,
json_merge_and_filter_patch,
)
logger = logging.getLogger(__name__)
def tag_from_stat(buf):
return f'1:{buf.st_ino}-{buf.st_mtime}'
def tag_from_path(path):
try:
return tag_from_stat(os.stat(path))
except FileNotFoundError:
return '-'
except OSError:
return None
def tag_from_fd(fd):
try:
return tag_from_stat(os.fstat(fd))
except OSError:
return None
class FsListChannel(Channel):
payload = 'fslist1'
def send_entry(self, event, entry):
if entry.is_symlink():
mode = 'link'
elif entry.is_file():
mode = 'file'
elif entry.is_dir():
mode = 'directory'
else:
mode = 'special'
self.send_json(event=event, path=entry.name, type=mode)
def do_open(self, options):
path = options.get('path')
watch = options.get('watch', True)
if watch:
raise ChannelError('not-supported', message='watching is not implemented, use fswatch1')
try:
scan_dir = os.scandir(path)
except OSError as error:
if isinstance(error, FileNotFoundError):
problem = 'not-found'
elif isinstance(error, PermissionError):
problem = 'access-denied'
else:
problem = 'internal-error'
raise ChannelError(problem, message=str(error)) from error
self.ready()
for entry in scan_dir:
self.send_entry("present", entry)
if not watch:
self.done()
self.close()
class FsReadChannel(GeneratorChannel):
payload = 'fsread1'
def do_yield_data(self, options: JsonObject) -> GeneratorChannel.DataGenerator:
path = get_str(options, 'path')
binary = get_str(options, 'binary', None)
max_read_size = get_int(options, 'max_read_size', None)
logger.debug('Opening file "%s" for reading', path)
try:
with open(path, 'rb') as filep:
buf = os.stat(filep.fileno())
if max_read_size is not None and buf.st_size > max_read_size:
raise ChannelError('too-large')
if binary and stat.S_ISREG(buf.st_mode):
self.ready(size_hint=buf.st_size)
else:
self.ready()
while True:
data = filep.read1(Channel.BLOCK_SIZE)
if data == b'':
break
logger.debug(' ...sending %d bytes', len(data))
if not binary:
data = data.replace(b'\0', b'').decode('utf-8', errors='ignore').encode('utf-8')
yield data
return {'tag': tag_from_stat(buf)}
except FileNotFoundError:
return {'tag': '-'}
except PermissionError as exc:
raise ChannelError('access-denied') from exc
except OSError as exc:
raise ChannelError('internal-error', message=str(exc)) from exc
class FsReplaceChannel(Channel):
payload = 'fsreplace1'
_path = None
_tag = None
_tempfile = None
_temppath = None
def unlink_temppath(self):
try:
os.unlink(self._temppath)
except OSError:
pass # might have been removed from outside
def do_open(self, options):
self._path = options.get('path')
self._tag = options.get('tag')
self.ready()
def do_data(self, data):
if self._tempfile is None:
# keep this bounded, in case anything unexpected goes wrong
for _ in range(10):
suffix = ''.join(random.choices("abcdefghijklmnopqrstuvwxyz0123456789_", k=6))
self._temppath = f'{self._path}.cockpit-tmp.{suffix}'
try:
fd = os.open(self._temppath, os.O_CREAT | os.O_WRONLY | os.O_EXCL, 0o666)
break
except FileExistsError:
continue
except PermissionError as exc:
raise ChannelError('access-denied') from exc
except OSError as exc:
raise ChannelError('internal-error', message=str(exc)) from exc
else:
raise ChannelError('internal-error',
message=f"Could not find unique file name for replacing {self._path}")
try:
self._tempfile = os.fdopen(fd, 'wb')
except OSError:
# Should Not Happen™, but let's be safe and avoid fd leak
os.close(fd)
self.unlink_temppath()
raise
self._tempfile.write(data)
def do_done(self):
if self._tempfile is None:
try:
os.unlink(self._path)
# crash on other errors, as they are unexpected
except FileNotFoundError:
pass
else:
self._tempfile.flush()
if self._tag and self._tag != tag_from_path(self._path):
raise ChannelError('change-conflict')
try:
os.rename(self._temppath, self._path)
except OSError:
# ensure to not leave the temp file behind
self.unlink_temppath()
raise
self._tempfile.close()
self._tempfile = None
self.done()
self.close({'tag': tag_from_path(self._path)})
def do_close(self):
if self._tempfile is not None:
self._tempfile.close()
self.unlink_temppath()
self._tempfile = None
class FsWatchChannel(Channel):
payload = 'fswatch1'
_tag = None
_path = None
_watch = None
# The C bridge doesn't send the initial event, and the JS calls read()
# instead to figure out the initial state of the file. If we send the
# initial state then we cause the event to get delivered twice.
# Ideally we'll sort that out at some point, but for now, suppress it.
_active = False
@staticmethod
def mask_to_event_and_type(mask):
if (InotifyEvent.CREATE or InotifyEvent.MOVED_TO) in mask:
return 'created', 'directory' if InotifyEvent.ISDIR in mask else 'file'
elif InotifyEvent.MOVED_FROM in mask or InotifyEvent.DELETE in mask or InotifyEvent.DELETE_SELF in mask:
return 'deleted', None
elif InotifyEvent.ATTRIB in mask:
return 'attribute-changed', None
elif InotifyEvent.CLOSE_WRITE in mask:
return 'done-hint', None
else:
return 'changed', None
def do_inotify_event(self, mask, _cookie, name):
logger.debug("do_inotify_event(%s): mask %X name %s", self._path, mask, name)
event, type_ = self.mask_to_event_and_type(mask)
if name:
# file inside watched directory changed
path = os.path.join(self._path, name.decode())
tag = tag_from_path(path)
self.send_json(event=event, path=path, tag=tag, type=type_)
else:
# the watched path itself changed; filter out duplicate events
tag = tag_from_path(self._path)
if tag == self._tag:
return
self._tag = tag
self.send_json(event=event, path=self._path, tag=self._tag, type=type_)
def do_identity_changed(self, fd, err):
logger.debug("do_identity_changed(%s): fd %s, err %s", self._path, str(fd), err)
self._tag = tag_from_fd(fd) if fd else '-'
if self._active:
self.send_json(event='created' if fd else 'deleted', path=self._path, tag=self._tag)
def do_open(self, options):
self._path = options['path']
self._tag = None
self._active = False
self._watch = PathWatch(self._path, self)
self._active = True
self.ready()
def do_close(self):
self._watch.close()
self._watch = None
self.close()
class Follow(enum.Enum):
NO = False
YES = True
class FsInfoChannel(Channel, PathWatchListener):
payload = 'fsinfo'
# Options (all get set in `do_open()`)
path: str
attrs: 'set[str]'
fnmatch: str
targets: bool
follow: bool
watch: bool
# State
current_value: JsonDict
effective_fnmatch: str = ''
fd: 'Handle | None' = None
pending: 'set[str] | None' = None
path_watch: 'PathWatch | None' = None
getattrs: 'Callable[[int, str, Follow], JsonDocument]'
@staticmethod
def make_getattrs(attrs: Iterable[str]) -> 'Callable[[int, str, Follow], JsonDocument | None]':
# Cached for the duration of the closure we're creating
@functools.lru_cache()
def get_user(uid: int) -> 'str | int':
try:
return pwd.getpwuid(uid).pw_name
except KeyError:
return uid
@functools.lru_cache()
def get_group(gid: int) -> 'str | int':
try:
return grp.getgrgid(gid).gr_name
except KeyError:
return gid
stat_types = {stat.S_IFREG: 'reg', stat.S_IFDIR: 'dir', stat.S_IFLNK: 'lnk', stat.S_IFCHR: 'chr',
stat.S_IFBLK: 'blk', stat.S_IFIFO: 'fifo', stat.S_IFSOCK: 'sock'}
available_stat_getters = {
'type': lambda buf: stat_types.get(stat.S_IFMT(buf.st_mode)),
'tag': tag_from_stat,
'mode': lambda buf: stat.S_IMODE(buf.st_mode),
'size': lambda buf: buf.st_size,
'uid': lambda buf: buf.st_uid,
'gid': lambda buf: buf.st_gid,
'mtime': lambda buf: buf.st_mtime,
'user': lambda buf: get_user(buf.st_uid),
'group': lambda buf: get_group(buf.st_gid),
}
stat_getters = tuple((key, available_stat_getters.get(key, lambda _: None)) for key in attrs)
def get_attrs(fd: int, name: str, follow: Follow) -> 'JsonDict | None':
try:
buf = os.stat(name, follow_symlinks=follow.value, dir_fd=fd) if name else os.fstat(fd)
except FileNotFoundError:
return None
except OSError:
return {name: None for name, func in stat_getters}
result = {key: func(buf) for key, func in stat_getters}
if 'target' in result and stat.S_IFMT(buf.st_mode) == stat.S_IFLNK:
with contextlib.suppress(OSError):
result['target'] = os.readlink(name, dir_fd=fd)
return result
return get_attrs
def send_update(self, updates: JsonDict, *, reset: bool = False) -> None:
if reset:
if set(self.current_value) & set(updates):
# if we have an overlap, we need to do a proper reset
self.send_json({name: None for name in self.current_value}, partial=True)
self.current_value = {'partial': True}
updates.update(partial=None)
else:
# otherwise there's no overlap: we can just remove the old keys
updates.update({key: None for key in self.current_value})
json_merge_and_filter_patch(self.current_value, updates)
if updates:
self.send_json(updates)
def process_update(self, updates: 'set[str]', *, reset: bool = False) -> None:
assert self.fd is not None
entries: JsonDict = {name: self.getattrs(self.fd, name, Follow.NO) for name in updates}
info = entries.pop('', {})
assert isinstance(info, dict) # fstat() will never fail with FileNotFoundError
if self.effective_fnmatch:
info['entries'] = entries
if self.targets:
info['targets'] = targets = {}
for name in {e.get('target') for e in entries.values() if isinstance(e, dict)}:
if isinstance(name, str) and ('/' in name or not self.interesting(name)):
# if this target is a string that we wouldn't otherwise
# report, then report it via our "targets" attribute.
targets[name] = self.getattrs(self.fd, name, Follow.YES)
self.send_update({'info': info}, reset=reset)
def process_pending_updates(self) -> None:
assert self.pending is not None
if self.pending:
self.process_update(self.pending)
self.pending = None
def interesting(self, name: str) -> bool:
if name == '':
return True
else:
# only report updates on entry filenames if we match them
return fnmatch.fnmatch(name, self.effective_fnmatch)
def schedule_update(self, name: str) -> None:
if not self.interesting(name):
return
if self.pending is None:
asyncio.get_running_loop().call_later(0.1, self.process_pending_updates)
self.pending = set()
self.pending.add(name)
def report_error(self, err: int) -> None:
if err == errno.ENOENT:
problem = 'not-found'
elif err in (errno.EPERM, errno.EACCES):
problem = 'access-denied'
elif err == errno.ENOTDIR:
problem = 'not-directory'
else:
problem = 'internal-error'
self.send_update({'error': {
'problem': problem, 'message': os.strerror(err), 'errno': errno.errorcode[err]
}}, reset=True)
def flag_onlydir_error(self, fd: Handle) -> bool:
# If our requested path ended with '/' then make sure we got a
# directory, or else it's an error. open() will have already flagged
# that for us, but systemd_ctypes doesn't do that (yet).
if not self.watch or not self.path.endswith('/'):
return False
buf = os.fstat(fd) # this should never fail
if stat.S_IFMT(buf.st_mode) != stat.S_IFDIR:
self.report_error(errno.ENOTDIR)
return True
return False
def report_initial_state(self, fd: Handle) -> None:
if self.flag_onlydir_error(fd):
return
self.fd = fd
entries = {''}
if self.fnmatch:
try:
entries.update(os.listdir(f'/proc/self/fd/{self.fd}'))
self.effective_fnmatch = self.fnmatch
except OSError:
# If we failed to get an initial list, then report nothing from now on
self.effective_fnmatch = ''
self.process_update({e for e in entries if self.interesting(e)}, reset=True)
def do_inotify_event(self, mask: InotifyEvent, cookie: int, rawname: 'bytes | None') -> None:
logger.debug('do_inotify_event(%r, %r, %r)', mask, cookie, rawname)
name = (rawname or b'').decode(errors='surrogateescape')
self.schedule_update(name)
if name and mask | (InotifyEvent.CREATE | InotifyEvent.DELETE |
InotifyEvent.MOVED_TO | InotifyEvent.MOVED_FROM):
# These events change the mtime of the directory
self.schedule_update('')
def do_identity_changed(self, fd: 'Handle | None', err: 'int | None') -> None:
logger.debug('do_identity_changed(%r, %r)', fd, err)
# If there were previously pending changes, they are now irrelevant.
if self.pending is not None:
# Note: don't set to None, since the handler is still pending
self.pending.clear()
if err is None:
assert fd is not None
self.report_initial_state(fd)
else:
self.report_error(err)
def do_close(self) -> None:
# non-watch channels close immediately — if we get this, we're watching
assert self.path_watch is not None
self.path_watch.close()
self.close()
def do_open(self, options: JsonObject) -> None:
self.path = get_str(options, 'path')
if not os.path.isabs(self.path):
raise JsonError(options, '"path" must be an absolute path')
attrs = set(get_strv(options, 'attrs'))
self.getattrs = self.make_getattrs(attrs - {'targets', 'entries'})
self.fnmatch = get_str(options, 'fnmatch', '*' if 'entries' in attrs else '')
self.targets = 'targets' in attrs
self.follow = get_bool(options, 'follow', default=True)
self.watch = get_bool(options, 'watch', default=False)
if self.watch and not self.follow:
raise JsonError(options, '"watch: true" and "follow: false" are (currently) incompatible')
if self.targets and not self.follow:
raise JsonError(options, '`targets: "stat"` and `follow: false` are (currently) incompatible')
self.current_value = {}
self.ready()
if not self.watch:
try:
fd = Handle.open(self.path, os.O_PATH if self.follow else os.O_PATH | os.O_NOFOLLOW)
except OSError as exc:
self.report_error(exc.errno)
else:
self.report_initial_state(fd)
fd.close()
self.done()
self.close()
else:
# PathWatch will call do_identity_changed(), which does the same as
# above: calls either report_initial_state() or report_error(),
# depending on if it was provided with an fd or an error code.
self.path_watch = PathWatch(self.path, self)

View File

@ -1,158 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import http.client
import logging
import socket
import ssl
from ..channel import AsyncChannel, ChannelError
from ..jsonutil import JsonObject, get_dict, get_int, get_object, get_str, typechecked
logger = logging.getLogger(__name__)
class HttpChannel(AsyncChannel):
payload = 'http-stream2'
@staticmethod
def get_headers(response: http.client.HTTPResponse, binary: 'str | None') -> JsonObject:
# Never send these headers
remove = {'Connection', 'Transfer-Encoding'}
if binary != 'raw':
# Only send these headers for raw binary streams
remove.update({'Content-Length', 'Range'})
return {key: value for key, value in response.getheaders() if key not in remove}
@staticmethod
def create_client(options: JsonObject) -> http.client.HTTPConnection:
opt_address = get_str(options, 'address', 'localhost')
opt_tls = get_dict(options, 'tls', None)
opt_unix = get_str(options, 'unix', None)
opt_port = get_int(options, 'port', None)
if opt_tls is not None and opt_unix is not None:
raise ChannelError('protocol-error', message='TLS on Unix socket is not supported')
if opt_port is None and opt_unix is None:
raise ChannelError('protocol-error', message='no "port" or "unix" option for channel')
if opt_port is not None and opt_unix is not None:
raise ChannelError('protocol-error', message='cannot specify both "port" and "unix" options')
if opt_tls is not None:
authority = get_dict(opt_tls, 'authority', None)
if authority is not None:
data = get_str(authority, 'data', None)
if data is not None:
context = ssl.create_default_context(cadata=data)
else:
context = ssl.create_default_context(cafile=get_str(authority, 'file'))
else:
context = ssl.create_default_context()
if 'validate' in opt_tls and not opt_tls['validate']:
context.check_hostname = False
context.verify_mode = ssl.VerifyMode.CERT_NONE
# See https://github.com/python/typeshed/issues/11057
return http.client.HTTPSConnection(opt_address, port=opt_port, context=context) # type: ignore[arg-type]
else:
return http.client.HTTPConnection(opt_address, port=opt_port)
@staticmethod
def connect(connection: http.client.HTTPConnection, opt_unix: 'str | None') -> None:
# Blocks. Runs in a thread.
if opt_unix:
# create the connection's socket so that it won't call .connect() internally (which only supports TCP)
connection.sock = socket.socket(socket.AF_UNIX)
connection.sock.connect(opt_unix)
else:
# explicitly call connect(), so that we can do proper error handling
connection.connect()
@staticmethod
def request(
connection: http.client.HTTPConnection, method: str, path: str, headers: 'dict[str, str]', body: bytes
) -> http.client.HTTPResponse:
# Blocks. Runs in a thread.
connection.request(method, path, headers=headers or {}, body=body)
return connection.getresponse()
async def run(self, options: JsonObject) -> None:
logger.debug('open %s', options)
binary = get_str(options, 'binary', None)
method = get_str(options, 'method')
path = get_str(options, 'path')
headers = get_object(options, 'headers', lambda d: {k: typechecked(v, str) for k, v in d.items()}, None)
if 'connection' in options:
raise ChannelError('protocol-error', message='connection sharing is not implemented on this bridge')
loop = asyncio.get_running_loop()
connection = self.create_client(options)
self.ready()
body = b''
while True:
data = await self.read()
if data == b'':
break
body += data
# Connect in a thread and handle errors
try:
await loop.run_in_executor(None, self.connect, connection, get_str(options, 'unix', None))
except ssl.SSLCertVerificationError as exc:
raise ChannelError('unknown-hostkey', message=str(exc)) from exc
except (OSError, IOError) as exc:
raise ChannelError('not-found', message=str(exc)) from exc
# Submit request in a thread and handle errors
try:
response = await loop.run_in_executor(None, self.request, connection, method, path, headers or {}, body)
except (http.client.HTTPException, OSError) as exc:
raise ChannelError('terminated', message=str(exc)) from exc
self.send_control(command='response',
status=response.status,
reason=response.reason,
headers=self.get_headers(response, binary))
# Receive the body and finish up
try:
while True:
block = await loop.run_in_executor(None, response.read1, self.BLOCK_SIZE)
if not block:
break
await self.write(block)
logger.debug('reading response done')
# this returns immediately and does not read anything more, but updates the http.client's
# internal state machine to "response done"
block = response.read()
assert block == b''
await loop.run_in_executor(None, connection.close)
except (http.client.HTTPException, OSError) as exc:
raise ChannelError('terminated', message=str(exc)) from exc
self.done()

View File

@ -1,185 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import json
import logging
import sys
import time
from collections import defaultdict
from typing import Dict, List, NamedTuple, Optional, Set, Tuple, Union
from ..channel import AsyncChannel, ChannelError
from ..jsonutil import JsonList
from ..samples import SAMPLERS, SampleDescription, Sampler, Samples
logger = logging.getLogger(__name__)
class MetricInfo(NamedTuple):
derive: Optional[str]
desc: SampleDescription
class InternalMetricsChannel(AsyncChannel):
payload = 'metrics1'
restrictions = [('source', 'internal')]
metrics: List[MetricInfo]
samplers: Set
samplers_cache: Optional[Dict[str, Tuple[Sampler, SampleDescription]]] = None
interval: int = 1000
need_meta: bool = True
last_timestamp: float = 0
next_timestamp: float = 0
@classmethod
def ensure_samplers(cls):
if cls.samplers_cache is None:
cls.samplers_cache = {desc.name: (sampler, desc) for sampler in SAMPLERS for desc in sampler.descriptions}
def parse_options(self, options):
logger.debug('metrics internal open: %s, channel: %s', options, self.channel)
interval = options.get('interval', self.interval)
if not isinstance(interval, int) or interval <= 0 or interval > sys.maxsize:
raise ChannelError('protocol-error', message=f'invalid "interval" value: {interval}')
self.interval = interval
metrics = options.get('metrics')
if not isinstance(metrics, list) or len(metrics) == 0:
logger.error('invalid "metrics" value: %s', metrics)
raise ChannelError('protocol-error', message='invalid "metrics" option was specified (not an array)')
sampler_classes = set()
for metric in metrics:
# validate it's an object
name = metric.get('name')
units = metric.get('units')
derive = metric.get('derive')
try:
sampler, desc = self.samplers_cache[name]
except KeyError as exc:
logger.error('unsupported metric: %s', name)
raise ChannelError('not-supported', message=f'unsupported metric: {name}') from exc
if units and units != desc.units:
raise ChannelError('not-supported', message=f'{name} has units {desc.units}, not {units}')
sampler_classes.add(sampler)
self.metrics.append(MetricInfo(derive=derive, desc=desc))
self.samplers = {cls() for cls in sampler_classes}
def send_meta(self, samples: Samples, timestamp: float):
metrics: JsonList = []
for metricinfo in self.metrics:
if metricinfo.desc.instanced:
metrics.append({
'name': metricinfo.desc.name,
'units': metricinfo.desc.units,
'instances': list(samples[metricinfo.desc.name].keys()),
'semantics': metricinfo.desc.semantics
})
else:
metrics.append({
'name': metricinfo.desc.name,
'derive': metricinfo.derive, # type: ignore[dict-item]
'units': metricinfo.desc.units,
'semantics': metricinfo.desc.semantics
})
self.send_json(source='internal', interval=self.interval, timestamp=timestamp * 1000, metrics=metrics)
self.need_meta = False
def sample(self):
samples = defaultdict(dict)
for sampler in self.samplers:
sampler.sample(samples)
return samples
def calculate_sample_rate(self, value: float, old_value: Optional[float]) -> Union[float, bool]:
if old_value is not None and self.last_timestamp:
return (value - old_value) / (self.next_timestamp - self.last_timestamp)
else:
return False
def send_updates(self, samples: Samples, last_samples: Samples):
data: List[Union[float, List[Optional[Union[float, bool]]]]] = []
timestamp = time.time()
self.next_timestamp = timestamp
for metricinfo in self.metrics:
value = samples[metricinfo.desc.name]
if metricinfo.desc.instanced:
old_value = last_samples[metricinfo.desc.name]
assert isinstance(value, dict)
assert isinstance(old_value, dict)
# If we have less or more keys the data changed, send a meta message.
if value.keys() != old_value.keys():
self.need_meta = True
if metricinfo.derive == 'rate':
instances: List[Optional[Union[float, bool]]] = []
for key, val in value.items():
instances.append(self.calculate_sample_rate(val, old_value.get(key)))
data.append(instances)
else:
data.append(list(value.values()))
else:
old_value = last_samples.get(metricinfo.desc.name)
assert not isinstance(value, dict)
assert not isinstance(old_value, dict)
if metricinfo.derive == 'rate':
data.append(self.calculate_sample_rate(value, old_value))
else:
data.append(value)
if self.need_meta:
self.send_meta(samples, timestamp)
self.last_timestamp = self.next_timestamp
self.send_data(json.dumps([data]).encode())
async def run(self, options):
self.metrics = []
self.samplers = set()
InternalMetricsChannel.ensure_samplers()
self.parse_options(options)
self.ready()
last_samples = defaultdict(dict)
while True:
samples = self.sample()
self.send_updates(samples, last_samples)
last_samples = samples
try:
await asyncio.wait_for(self.read(), self.interval / 1000)
return
except asyncio.TimeoutError:
# Continue the while loop, we use wait_for as an interval timer.
continue

View File

@ -1,101 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from typing import Optional
from ..channel import AsyncChannel
from ..data import read_cockpit_data_file
from ..jsonutil import JsonObject, get_dict, get_str
from ..packages import Packages
logger = logging.getLogger(__name__)
class PackagesChannel(AsyncChannel):
payload = 'http-stream1'
restrictions = [("internal", "packages")]
# used to carry data forward from open to done
options: Optional[JsonObject] = None
def http_error(self, status: int, message: str) -> None:
template = read_cockpit_data_file('fail.html')
self.send_json(status=status, reason='ERROR', headers={'Content-Type': 'text/html; charset=utf-8'})
self.send_data(template.replace(b'@@message@@', message.encode('utf-8')))
self.done()
self.close()
async def run(self, options: JsonObject) -> None:
packages: Packages = self.router.packages # type: ignore[attr-defined] # yes, this is evil
try:
if get_str(options, 'method') != 'GET':
raise ValueError(f'Unsupported HTTP method {options["method"]}')
self.ready()
if await self.read() != b'':
raise ValueError('Received unexpected data')
path = get_str(options, 'path')
headers = get_dict(options, 'headers')
document = packages.load_path(path, headers)
# Note: we can't cache documents right now. See
# https://github.com/cockpit-project/cockpit/issues/19071
# for future plans.
out_headers = {
'Cache-Control': 'no-cache, no-store',
'Content-Type': document.content_type,
}
if document.content_encoding is not None:
out_headers['Content-Encoding'] = document.content_encoding
if document.content_security_policy is not None:
policy = document.content_security_policy
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/connect-src
#
# Note: connect-src 'self' does not resolve to websocket
# schemes in all browsers, more info in this issue.
#
# https://github.com/w3c/webappsec-csp/issues/7
if "connect-src 'self';" in policy:
protocol = headers.get('X-Forwarded-Proto')
host = headers.get('X-Forwarded-Host')
if not isinstance(protocol, str) or not isinstance(host, str):
raise ValueError('Invalid host or protocol header')
websocket_scheme = "wss" if protocol == "https" else "ws"
websocket_origin = f"{websocket_scheme}://{host}"
policy = policy.replace("connect-src 'self';", f"connect-src {websocket_origin} 'self';")
out_headers['Content-Security-Policy'] = policy
except ValueError as exc:
self.http_error(400, str(exc))
except KeyError:
self.http_error(404, 'Not found')
except OSError as exc:
self.http_error(500, f'Internal error: {exc!s}')
else:
self.send_json(status=200, reason='OK', headers=out_headers)
await self.sendfile(document.data)

View File

@ -1,120 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import logging
import os
import subprocess
from typing import Dict
from ..channel import ChannelError, ProtocolChannel
from ..jsonutil import JsonDict, JsonObject, get_bool, get_int, get_object, get_str, get_strv
from ..transports import SubprocessProtocol, SubprocessTransport, WindowSize
logger = logging.getLogger(__name__)
class SocketStreamChannel(ProtocolChannel):
payload = 'stream'
async def create_transport(self, loop: asyncio.AbstractEventLoop, options: JsonObject) -> asyncio.Transport:
if 'unix' in options and 'port' in options:
raise ChannelError('protocol-error', message='cannot specify both "port" and "unix" options')
try:
# Unix
if 'unix' in options:
path = get_str(options, 'unix')
label = f'Unix socket {path}'
transport, _ = await loop.create_unix_connection(lambda: self, path)
# TCP
elif 'port' in options:
port = get_int(options, 'port')
host = get_str(options, 'address', 'localhost')
label = f'TCP socket {host}:{port}'
transport, _ = await loop.create_connection(lambda: self, host, port)
else:
raise ChannelError('protocol-error',
message='no "port" or "unix" or other address option for channel')
logger.debug('SocketStreamChannel: connected to %s', label)
except OSError as error:
logger.info('SocketStreamChannel: connecting to %s failed: %s', label, error)
if isinstance(error, ConnectionRefusedError):
problem = 'not-found'
else:
problem = 'terminated'
raise ChannelError(problem, message=str(error)) from error
self.close_on_eof()
assert isinstance(transport, asyncio.Transport)
return transport
class SubprocessStreamChannel(ProtocolChannel, SubprocessProtocol):
payload = 'stream'
restrictions = (('spawn', None),)
def process_exited(self) -> None:
self.close_on_eof()
def _get_close_args(self) -> JsonObject:
assert isinstance(self._transport, SubprocessTransport)
args: JsonDict = {'exit-status': self._transport.get_returncode()}
stderr = self._transport.get_stderr()
if stderr is not None:
args['message'] = stderr
return args
def do_options(self, options):
window = get_object(options, 'window', WindowSize, None)
if window is not None:
self._transport.set_window_size(window)
async def create_transport(self, loop: asyncio.AbstractEventLoop, options: JsonObject) -> SubprocessTransport:
args = get_strv(options, 'spawn')
err = get_str(options, 'err', 'msg')
cwd = get_str(options, 'directory', '.')
pty = get_bool(options, 'pty', default=False)
window = get_object(options, 'window', WindowSize, None)
environ = get_strv(options, 'environ', [])
if err == 'out':
stderr = subprocess.STDOUT
elif err == 'ignore':
stderr = subprocess.DEVNULL
else:
stderr = subprocess.PIPE
env: Dict[str, str] = dict(os.environ)
try:
env.update(dict(e.split('=', 1) for e in environ))
except ValueError:
raise ChannelError('protocol-error', message='invalid "environ" option for stream channel') from None
try:
transport = SubprocessTransport(loop, self, args, pty=pty, window=window, env=env, cwd=cwd, stderr=stderr)
logger.debug('Spawned process args=%s pid=%i', args, transport.get_pid())
return transport
except FileNotFoundError as error:
raise ChannelError('not-found') from error
except PermissionError as error:
raise ChannelError('access-denied') from error
except OSError as error:
logger.info("Failed to spawn %s: %s", args, str(error))
raise ChannelError('internal-error') from error

View File

@ -1,46 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from ..channel import Channel
logger = logging.getLogger(__name__)
class EchoChannel(Channel):
payload = 'echo'
def do_open(self, options):
self.ready()
def do_data(self, data):
self.send_data(data)
def do_done(self):
self.done()
self.close()
class NullChannel(Channel):
payload = 'null'
def do_open(self, options):
self.ready()
def do_close(self):
self.close()

View File

@ -1,89 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2023 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import configparser
import logging
import os
from pathlib import Path
from cockpit._vendor.systemd_ctypes import bus
logger = logging.getLogger(__name__)
XDG_CONFIG_HOME = Path(os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'))
DOT_CONFIG_COCKPIT = XDG_CONFIG_HOME / 'cockpit'
def lookup_config(filename: str) -> Path:
config_dirs = os.environ.get('XDG_CONFIG_DIRS', '/etc').split(':')
fallback = None
for config_dir in config_dirs:
config_path = Path(config_dir, 'cockpit', filename)
if not fallback:
fallback = config_path
if config_path.exists():
logger.debug('lookup_config(%s): found %s', filename, config_path)
return config_path
# default to the first entry in XDG_CONFIG_DIRS; that's not according to the spec,
# but what Cockpit has done for years
logger.debug('lookup_config(%s): defaulting to %s', filename, fallback)
assert fallback # mypy; config_dirs always has at least one string
return fallback
class Config(bus.Object, interface='cockpit.Config'):
def __init__(self):
self.reload()
@bus.Interface.Method(out_types='s', in_types='ss')
def get_string(self, section, key):
try:
return self.config[section][key]
except KeyError as exc:
raise bus.BusError('cockpit.Config.KeyError', f'key {key} in section {section} does not exist') from exc
@bus.Interface.Method(out_types='u', in_types='ssuuu')
def get_u_int(self, section, key, default, maximum, minimum):
try:
value = self.config[section][key]
except KeyError:
return default
try:
int_val = int(value)
except ValueError:
logger.warning('cockpit.conf: [%s] %s is not an integer', section, key)
return default
return min(max(int_val, minimum), maximum)
@bus.Interface.Method()
def reload(self):
self.config = configparser.ConfigParser(interpolation=None)
cockpit_conf = lookup_config('cockpit.conf')
logger.debug("cockpit.Config: loading %s", cockpit_conf)
# this may not exist, but it's ok to not have a config file and thus leave self.config empty
self.config.read(cockpit_conf)
class Environment(bus.Object, interface='cockpit.Environment'):
variables = bus.Interface.Property('a{ss}')
@variables.getter
def get_variables(self):
return os.environ.copy()

View File

@ -1,18 +0,0 @@
import sys
if sys.version_info >= (3, 11):
import importlib.resources
def read_cockpit_data_file(filename: str) -> bytes:
return (importlib.resources.files('cockpit.data') / filename).read_bytes()
else:
import importlib.abc
def read_cockpit_data_file(filename: str) -> bytes:
# https://github.com/python/mypy/issues/4182
loader = __loader__ # type: ignore[name-defined]
assert isinstance(loader, importlib.abc.ResourceLoader)
path = __file__.replace('__init__.py', filename)
return loader.get_data(path)

View File

@ -1 +0,0 @@
../../../src/common/fail.html

View File

@ -1,157 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import glob
import grp
import json
import logging
import os
import pwd
from pathlib import Path
from typing import Dict, Optional
from cockpit._vendor.systemd_ctypes import Variant, bus, inotify, pathwatch
from . import config
logger = logging.getLogger(__name__)
class cockpit_LoginMessages(bus.Object):
messages: Optional[str] = None
def __init__(self):
fdstr = os.environ.pop('COCKPIT_LOGIN_MESSAGES_MEMFD', None)
if fdstr is None:
logger.debug("COCKPIT_LOGIN_MESSAGES_MEMFD wasn't set. No login messages today.")
return
logger.debug("Trying to read login messages from fd %s", fdstr)
try:
with open(int(fdstr), 'r') as login_messages:
login_messages.seek(0)
self.messages = login_messages.read()
except (ValueError, OSError, UnicodeDecodeError) as exc:
# ValueError - the envvar wasn't an int
# OSError - the fd wasn't open, or other read failure
# UnicodeDecodeError - didn't contain utf-8
# For all of these, we simply failed to get the message.
logger.debug("Reading login messages failed: %s", exc)
else:
logger.debug("Successfully read login messages: %s", self.messages)
@bus.Interface.Method(out_types=['s'])
def get(self):
return self.messages or '{}'
@bus.Interface.Method(out_types=[])
def dismiss(self):
self.messages = None
class cockpit_Machines(bus.Object):
path: Path
watch: pathwatch.PathWatch
pending_notify: Optional[asyncio.Handle]
# D-Bus implementation
machines = bus.Interface.Property('a{sa{sv}}')
@machines.getter
def get_machines(self) -> Dict[str, Dict[str, Variant]]:
results: Dict[str, Dict[str, Variant]] = {}
for filename in glob.glob(f'{self.path}/*.json'):
with open(filename) as fp:
try:
contents = json.load(fp)
except json.JSONDecodeError:
logger.warning('Invalid JSON in file %s. Ignoring.', filename)
continue
# merge
for hostname, attrs in contents.items():
results[hostname] = {key: Variant(value) for key, value in attrs.items()}
return results
@bus.Interface.Method(in_types=['s', 's', 'a{sv}'])
def update(self, filename: str, hostname: str, attrs: Dict[str, Variant]) -> None:
try:
with self.path.joinpath(filename).open() as fp:
contents = json.load(fp)
except json.JSONDecodeError as exc:
# Refuse to replace corrupted file
raise bus.BusError('cockpit.Machines.Error', f'File {filename} is in invalid format: {exc}.') from exc
except FileNotFoundError:
# But an empty file is an expected case
contents = {}
contents.setdefault(hostname, {}).update({key: value.value for key, value in attrs.items()})
self.path.mkdir(parents=True, exist_ok=True)
with open(self.path.joinpath(filename), 'w') as fp:
json.dump(contents, fp, indent=2)
def notify(self):
def _notify_now():
self.properties_changed('cockpit.Machines', {}, ['Machines'])
self.pending_notify = None
# avoid a flurry of update notifications
if self.pending_notify is None:
self.pending_notify = asyncio.get_running_loop().call_later(1.0, _notify_now)
# inotify events
def do_inotify_event(self, mask: inotify.Event, cookie: int, name: Optional[str]) -> None:
self.notify()
def do_identity_changed(self, fd: Optional[int], errno: Optional[int]) -> None:
self.notify()
def __init__(self):
self.path = config.lookup_config('machines.d')
# ignore the first callback
self.pending_notify = ...
self.watch = pathwatch.PathWatch(str(self.path), self)
self.pending_notify = None
class cockpit_User(bus.Object):
name = bus.Interface.Property('s', value='')
full = bus.Interface.Property('s', value='')
id = bus.Interface.Property('i', value=0)
home = bus.Interface.Property('s', value='')
shell = bus.Interface.Property('s', value='')
groups = bus.Interface.Property('as', value=[])
def __init__(self):
user = pwd.getpwuid(os.getuid())
self.name = user.pw_name
self.full = user.pw_gecos
self.id = user.pw_uid
self.home = user.pw_dir
self.shell = user.pw_shell
self.groups = [gr.gr_name for gr in grp.getgrall() if user.pw_name in gr.gr_mem]
EXPORTS = [
('/LoginMessages', cockpit_LoginMessages),
('/machines', cockpit_Machines),
('/user', cockpit_User),
]

View File

@ -1,180 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2023 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from enum import Enum
from typing import Callable, Dict, List, Mapping, Optional, Sequence, Type, TypeVar, Union
JsonLiteral = Union[str, float, bool, None]
# immutable
JsonValue = Union['JsonObject', Sequence['JsonValue'], JsonLiteral]
JsonObject = Mapping[str, JsonValue]
# mutable
JsonDocument = Union['JsonDict', 'JsonList', JsonLiteral]
JsonDict = Dict[str, JsonDocument]
JsonList = List[JsonDocument]
DT = TypeVar('DT')
T = TypeVar('T')
class JsonError(Exception):
value: object
def __init__(self, value: object, msg: str):
super().__init__(msg)
self.value = value
def typechecked(value: JsonValue, expected_type: Type[T]) -> T:
"""Ensure a JSON value has the expected type, returning it if so."""
if not isinstance(value, expected_type):
raise JsonError(value, f'must have type {expected_type.__name__}')
return value
# We can't use None as a sentinel because it's often the actual default value
# EllipsisType is difficult because it's not available before 3.10.
# See https://peps.python.org/pep-0484/#support-for-singleton-types-in-unions
class _Empty(Enum):
TOKEN = 0
_empty = _Empty.TOKEN
def _get(obj: JsonObject, cast: Callable[[JsonValue], T], key: str, default: Union[DT, _Empty]) -> Union[T, DT]:
try:
return cast(obj[key])
except KeyError:
if default is not _empty:
return default
raise JsonError(obj, f"attribute '{key}' required") from None
except JsonError as exc:
target = f"attribute '{key}'" + (' elements:' if exc.value is not obj[key] else ':')
raise JsonError(obj, f"{target} {exc!s}") from exc
def get_bool(obj: JsonObject, key: str, default: Union[DT, _Empty] = _empty) -> Union[DT, bool]:
return _get(obj, lambda v: typechecked(v, bool), key, default)
def get_int(obj: JsonObject, key: str, default: Union[DT, _Empty] = _empty) -> Union[DT, int]:
return _get(obj, lambda v: typechecked(v, int), key, default)
def get_str(obj: JsonObject, key: str, default: Union[DT, _Empty] = _empty) -> Union[DT, str]:
return _get(obj, lambda v: typechecked(v, str), key, default)
def get_str_or_none(obj: JsonObject, key: str, default: Optional[str]) -> Optional[str]:
return _get(obj, lambda v: None if v is None else typechecked(v, str), key, default)
def get_dict(obj: JsonObject, key: str, default: Union[DT, _Empty] = _empty) -> Union[DT, JsonObject]:
return _get(obj, lambda v: typechecked(v, dict), key, default)
def get_object(
obj: JsonObject,
key: str,
constructor: Callable[[JsonObject], T],
default: Union[DT, _Empty] = _empty
) -> Union[DT, T]:
return _get(obj, lambda v: constructor(typechecked(v, dict)), key, default)
def get_strv(obj: JsonObject, key: str, default: Union[DT, _Empty] = _empty) -> Union[DT, Sequence[str]]:
def as_strv(value: JsonValue) -> Sequence[str]:
return tuple(typechecked(item, str) for item in typechecked(value, list))
return _get(obj, as_strv, key, default)
def get_objv(obj: JsonObject, key: str, constructor: Callable[[JsonObject], T]) -> Union[DT, Sequence[T]]:
def as_objv(value: JsonValue) -> Sequence[T]:
return tuple(constructor(typechecked(item, dict)) for item in typechecked(value, list))
return _get(obj, as_objv, key, ())
def create_object(message: 'JsonObject | None', kwargs: JsonObject) -> JsonObject:
"""Constructs a JSON object based on message and kwargs.
If only message is given, it is returned, unmodified. If message is None,
it is equivalent to an empty dictionary. A copy is always made.
If kwargs are present, then any underscore ('_') present in a key name is
rewritten to a dash ('-'). This is intended to bridge between the required
Python syntax when providing kwargs and idiomatic JSON (which uses '-' for
attributes). These values override values in message.
The idea is that `message` should be used for passing data along, and
kwargs used for data originating at a given call site, possibly including
modifications to an original message.
"""
result = dict(message or {})
for key, value in kwargs.items():
# rewrite '_' (necessary in Python syntax kwargs list) to '-' (idiomatic JSON)
json_key = key.replace('_', '-')
result[json_key] = value
return result
def json_merge_patch(current: JsonObject, patch: JsonObject) -> JsonObject:
"""Perform a JSON merge patch (RFC 7396) using 'current' and 'patch'.
Neither of the original dictionaries is modified the result is returned.
"""
# Always take a copy ('result') — we never modify the input ('current')
result = dict(current)
for key, patch_value in patch.items():
if isinstance(patch_value, Mapping):
current_value = current.get(key, None)
if not isinstance(current_value, Mapping):
current_value = {}
result[key] = json_merge_patch(current_value, patch_value)
elif patch_value is not None:
result[key] = patch_value
else:
result.pop(key, None)
return result
def json_merge_and_filter_patch(current: JsonDict, patch: JsonDict) -> None:
"""Perform a JSON merge patch (RFC 7396) modifying 'current' with 'patch'.
Also modifies 'patch' to remove redundant operations.
"""
for key, patch_value in tuple(patch.items()):
current_value = current.get(key, None)
if isinstance(patch_value, dict):
if not isinstance(current_value, dict):
current[key] = current_value = {}
json_merge_and_filter_patch(current_value, patch_value)
else:
json_merge_and_filter_patch(current_value, patch_value)
if not patch_value:
del patch[key]
elif current_value == patch_value:
del patch[key]
elif patch_value is not None:
current[key] = patch_value
else:
del current[key]

View File

@ -1,56 +0,0 @@
# COCKPIT BOOTLOADER
import os
import sys
from hashlib import sha256
class Bootloader:
version = None
checksum = None
source = None
def start(self, name, version, checksum, size):
self.version = version
self.checksum = checksum
cachedir = os.environ.get("XDG_CACHE_HOME") or os.path.expanduser("~/.cache")
filename = f"{cachedir}/{name}/{version}-{checksum}.py"
# step one: try to find a cached local copy
if self.source is None:
try:
with open(filename, "rb") as file:
data = file.read()
if sha256(data).hexdigest() == checksum:
self.source = data
except OSError:
pass
# step two: request from the sender
if self.source is None:
message = f"""\n{{"command":"need-script","sha256":"{checksum}","size":{size}}}\n"""
os.write(1, f"{len(message)}\n{message}".encode("ascii"))
data = b""
while len(data) < size:
data += os.read(0, size - len(data))
if sha256(data).hexdigest() == checksum:
self.source = data
else:
sys.exit("checksum of sent data is incorrect")
# step three: cache it locally (best effort)
try:
os.makedirs(f"{cachedir}/{name}", exist_ok=True)
with open(filename, "w+b") as file:
file.write(self.source)
except OSError:
pass
exec(self.source)
sys.exit(0)
BOOTLOADER = Bootloader()
BOOTLOADER.start("hello", "300", "a0c22dc5d16db10ca0e3d99859ffccb2b4d536b21d6788cfbe2d2cfac60e8117", 22)
# echo 'print("Hello world!")' | python3 bootloader.py

View File

@ -1,93 +0,0 @@
class CockpitBufferedProtocol(CockpitBaseProtocol, asyncio.BufferedProtocol):
"""A hopefully-reasonably-efficient implementation of the Cockpit frame protocol
We try to operate on a principle of finding a good balance between not
making too many small reads, and not making too many large copies.
Perfection is impossible here: in order to avoid all copies, we would need
to know the size of the frames in advance, and we can't know that without
doing too many small reads.
Most incoming messages to the bridge are high tens to low hundreds of
bytes, and they often come grouped together. We should be able to manage
the vast majority by reading in medium-sized chunks and processing multiple
frames in a single iteration. If we have a partial frame in a chunk, we
leave it for the next iteration (which is the only place we do copies).
A larger buffer allows processing more frames in a single read, and also
makes it less likely that we'll need to adjust the buffer for larger
frames, or copy data when we get flooded with lots of request. It also
means that in the event that we *do* need to adjust the buffer or copy
data, we'll end up copying more. 4096 is picked out of thin air.
We handle large frame sizes by adjusting the buffer size for the next read,
so that we don't have to copy (most of) the content of large frames.
"""
min_buffer_size = None
max_buffer_size = None
buffer = None
in_buffer = 0
def __init__(self, buffer_size=4096):
self.min_buffer_size = buffer_size
self.max_buffer_size = buffer_size * 4
self.buffer = memoryview(bytearray(self.min_buffer_size))
self.in_buffer = 0
def get_buffer(self, sizehint=-1):
return self.buffer[self.in_buffer:]
def buffer_updated(self, nbytes):
self.in_buffer += nbytes
# This is all the data we have so far
view = self.buffer[:self.in_buffer]
# Try to consume some frames
consumed = 0
while (result := self.consume_one_frame(view)) > 0:
view = view[result:]
consumed += result
if not consumed and len(view) - result < len(self.buffer):
# This is the easy case: we didn't consume anything, and there's
# enough room in the buffer for the next packet.
return
if not view and len(self.buffer) <= self.max_buffer_size:
# There is no unconsumed data. That also implies that there is no
# size hint, because where would it have come from? If we're happy
# with the buffer size, then we can just reuse it.
self.in_buffer = 0
return
# Otherwise, the general case. We need to allocate a new buffer, and
# possibly copy the leftover data into it.
new_buffer_size = max(len(view) - result, self.min_buffer_size)
new_buffer = bytearray(new_buffer_size)
new_buffer[:len(view)] = view
self.buffer = memoryview(new_buffer)
self.in_buffer = len(view)
def eof_received(self):
raise ValueError('eof received')
class BufferedProtocolFeeder:
def __init__(self, fd, protocol, *args, **kwargs):
self.fd = fd
self.fileio = io.FileIO(self.fd, closefd=False)
self.listener = protocol(*args, **kwargs)
self.loop = asyncio.get_event_loop()
self.loop.add_reader(self.fd, self.ready)
# From BufferedProtocol docs:
# start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end
def ready(self):
buffer = self.listener.get_buffer()
n_bytes = self.fileio.readinto(buffer)
if n_bytes:
self.listener.buffer_updated(n_bytes)
else:
self.listener.eof_received()
self.loop.remove_reader(self.fd)

View File

@ -1,248 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2023 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import ast
import json
import os
import pydoc
import readline # noqa: F401, side-effecting import
import shlex
import sys
import time
from typing import Any, BinaryIO, Iterable, Optional
class Printer:
output: BinaryIO
last_channel: int
def __init__(self, output=None):
self.last_channel = 0
self.output = output or sys.stdout.buffer
def data(self, channel: str, /, data: bytes) -> None:
"""Send raw data (byte string) on a channel"""
message = channel.encode('ascii') + b'\n' + data
frame = str(len(message)).encode('ascii') + b'\n' + message
self.output.write(frame)
self.output.flush()
def json(self, channel: str, /, **kwargs: object) -> None:
"""Send a json message (built from **kwargs) on a channel"""
self.data(channel, json.dumps(kwargs, indent=2).encode('utf-8') + b'\n')
def control(self, command: str, **kwargs: Any) -> None:
"""Send a control message, build from **kwargs"""
self.json('', command=command, **kwargs)
def init(self, host: str = 'localhost', version: int = 1, **kwargs: Any) -> None:
"""Send init. This is normally done automatically, but you can override it."""
self.control('init', host=host, version=version, **kwargs)
def open(self, payload: str, channel: Optional[str] = None, **kwargs: Any) -> str:
"""Opens a channel for the named payload. A channel name is generated if not provided."""
if channel is None:
self.last_channel += 1
channel = f'ch{self.last_channel}'
self.control('open', channel=channel, payload=payload, **kwargs)
return channel
def done(self, channel: Optional[str] = None, **kwargs: Any) -> None:
"""Sends a done command on the named channel, or the last opened channel."""
if channel is None:
channel = f'ch{self.last_channel}'
self.control('done', channel=channel, **kwargs)
def http(self,
path: str,
*,
method: str = 'GET',
done: bool = True,
channel: Optional[str] = None,
**kwargs: Any) -> None:
"""Open a http-stream2 channel. Sends a 'done' as well, unless done=False."""
self.open('http-stream2', path=path, method=method, channel=channel, **kwargs)
if done:
self.done()
def packages(self, path: str,
headers: Optional[dict[str, str]] = None,
channel: Optional[str] = None,
**kwargs: Any) -> None:
"""Request a file from the internal packages webserver"""
# The packages webserver requires these for computing the content security policy
our_headers = {'X-Forwarded-Proto': 'https', 'X-Forwarded-Host': 'localhost'}
if headers is not None:
our_headers.update(headers)
# mypy is right: kwargs could include `done` or `method`, but codifying that is really awkward
self.http(path, internal='packages', channel=channel, headers=our_headers, **kwargs) # type: ignore[arg-type]
def spawn(self, *args: str, channel: Optional[str] = None, **kwargs: object) -> None:
"""Open a stream channel with a spawned command"""
self.open('stream', spawn=args, channel=channel, **kwargs)
def dbus_open(self, channel: Optional[str] = None, bus: str = 'internal', **kwargs: Any) -> str:
return self.open('dbus-json3', channel=channel, bus=bus, **kwargs)
def dbus_call(
self, *args: object, channel: Optional[str] = None, bus: str = 'internal', **kwargs: Any
) -> None:
if channel is None:
channel = self.dbus_open(bus=bus, **kwargs)
self.json(channel, call=[*args], id=1)
def packages_reload(self, channel: Optional[str] = None) -> None:
self.dbus_call('/packages', 'cockpit.Packages', 'Reload', [], channel=channel)
def fsinfo(self, path: str, *attrs: str, watch: bool = True, **kwargs: Any) -> None:
self.open('fsinfo', path=path, attrs=attrs, watch=watch, **kwargs)
def help(self) -> None:
"""Show help"""
sys.stderr.write("""
Prints cockpit frames according to given commands.
Each method has a name (the first argument) and 0 or more positional and keyword arguments.
Positional arguments are given as commandline arguments in the usual way. They
will be parsed as Python expressions. If that fails, and the argument looks
"simple" they will be treated as literal strings. This is helpful to avoid
having to double-escape simple things.
Keyword arguments are specified by prepending a positional argument with an
identifier and an `=` character.
A single ; or : character allows specifying multiple commands.
Supported methods are as follows:
""")
doc = pydoc.TextDoc()
for name, value in Printer.__dict__.items():
if name.startswith('_'):
continue
sys.stderr.write(doc.indent(doc.docroutine(value), ' ') + '\n')
sys.stderr.write("""Some examples:
python3 -m cockpit.misc.print --no-init open null
python3 -m cockpit.misc.print open echo channel=x : data x "b'foo'" : done x | python3 -m cockpit.bridge
python3 -m cockpit.misc.print packages /manifests.js | python3 -m cockpit.bridge
... etc
""")
def wait(self) -> None:
"""Wait for [Enter] on stdin"""
sys.stdin.readline()
def sleep(self, seconds: float) -> None:
"""Sleep for a number of seconds"""
time.sleep(seconds)
def split_commands(args: list[str]) -> Iterable[list[str]]:
"""split args on ':' items, yielding sub-lists"""
while ':' in args:
colon = args.index(':')
yield args[:colon]
args = args[colon + 1:]
yield args
def get_commands(args: list[str]) -> Iterable[list[str]]:
"""splits args on ':', yielding sub-lists and replacing '-' with input from stdin"""
for command in split_commands(args):
if command == ['-']:
# read commands from stdin
try:
while True:
# try to print the prompt after output from the bridge
time.sleep(0.2)
yield shlex.split(input('cockpit.print> '))
except EOFError:
pass
else:
yield command
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('--no-wait', action='store_true',
help="Don't for [Enter] after printing, before exit")
parser.add_argument('--no-init', action='store_true',
help="Don't send an init message")
parser.add_argument('command', nargs='*', default=['-'],
help="The command to invoke: try 'help'")
args = parser.parse_args()
# "The Usual Tricks"
# Our original stdout is where we need to send our messages, but in case we
# use readline, we need stdout to be attached to the user's terminal. We
# do this by duping the pipe and reopening stdout from /dev/tty.
output = open(os.dup(1), 'wb')
os.dup2(os.open('/dev/tty', os.O_WRONLY), 1)
printer = Printer(output)
need_init = not args.no_init
# Invoke the commands
for command in get_commands(args.command):
if need_init and command[0] not in ['help', 'init']:
printer.init()
need_init = False
positional: list[object] = []
kwargs: dict[str, object] = {}
func = getattr(printer, command[0])
for param in command[1:]:
left, eq, right = param.partition('=')
# Does that look like a kwarg?
if eq and left.replace('-', '_').isidentifier():
key = left
param = right
else:
key = None
# Parse the value, else take it as a literal if it's simple enough
try:
value = ast.literal_eval(param)
except (SyntaxError, ValueError):
if any(c in param for c in '\'":;<>,|\\(){}[]`~!@#$%^&='):
raise
else:
value = param
if key is not None:
kwargs[key] = value
else:
positional.append(value)
func(*positional, **kwargs)
if __name__ == '__main__':
main()

View File

@ -1,580 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import collections
import contextlib
import functools
import gzip
import io
import itertools
import json
import logging
import mimetypes
import os
import re
import shutil
from pathlib import Path
from typing import (
BinaryIO,
Callable,
ClassVar,
Dict,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Sequence,
Tuple,
TypeVar,
)
from cockpit._vendor.systemd_ctypes import bus
from . import config
from ._version import __version__
from .jsonutil import (
JsonError,
JsonObject,
JsonValue,
get_bool,
get_dict,
get_int,
get_objv,
get_str,
get_strv,
json_merge_patch,
typechecked,
)
logger = logging.getLogger(__name__)
# In practice, this is going to get called over and over again with exactly the
# same list. Let's try to cache the result.
@functools.lru_cache()
def parse_accept_language(accept_language: str) -> Sequence[str]:
"""Parse the Accept-Language header, if it exists.
Returns an ordered list of languages, with fallbacks inserted, and
truncated to the position where 'en' would have otherwise appeared, if
applicable.
https://tools.ietf.org/html/rfc7231#section-5.3.5
https://datatracker.ietf.org/doc/html/rfc4647#section-3.4
"""
logger.debug('parse_accept_language(%r)', accept_language)
locales_with_q = []
for entry in accept_language.split(','):
entry = entry.strip().lower()
logger.debug(' entry %r', entry)
locale, _, qstr = entry.partition(';q=')
try:
q = float(qstr or 1.0)
except ValueError:
continue # ignore malformed entry
while locale:
logger.debug(' adding %r q=%r', locale, q)
locales_with_q.append((locale, q))
# strip off '-detail' suffixes until there's nothing left
locale, _, _region = locale.rpartition('-')
# Sort the list by highest q value. Otherwise, this is a stable sort.
locales_with_q.sort(key=lambda pair: pair[1], reverse=True)
logger.debug(' sorted list is %r', locales_with_q)
# If we have 'en' anywhere in our list, ignore it and all items after it.
# This will result in us getting an untranslated (ie: English) version if
# none of the more-preferred languages are found, which is what we want.
# We also take the chance to drop duplicate items. Note: both of these
# things need to happen after sorting.
results = []
for locale, _q in locales_with_q:
if locale == 'en':
break
if locale not in results:
results.append(locale)
logger.debug(' results list is %r', results)
return tuple(results)
def sortify_version(version: str) -> str:
"""Convert a version string to a form that can be compared"""
# 0-pad each numeric component. Only supports numeric versions like 1.2.3.
return '.'.join(part.zfill(8) for part in version.split('.'))
@functools.lru_cache()
def get_libexecdir() -> str:
"""Detect libexecdir on current machine
This only works for systems which have cockpit-ws installed.
"""
for candidate in ['/usr/local/libexec', '/usr/libexec', '/usr/local/lib/cockpit', '/usr/lib/cockpit']:
if os.path.exists(os.path.join(candidate, 'cockpit-askpass')):
return candidate
else:
logger.warning('Could not detect libexecdir')
# give readable error messages
return '/nonexistent/libexec'
# HACK: Type narrowing over Union types is not supported in the general case,
# but this works for the case we care about: knowing that when we pass in an
# JsonObject, we'll get an JsonObject back.
J = TypeVar('J', JsonObject, JsonValue)
def patch_libexecdir(obj: J) -> J:
if isinstance(obj, str):
if '${libexecdir}/cockpit-askpass' in obj:
# extra-special case: we handle this internally
abs_askpass = shutil.which('cockpit-askpass')
if abs_askpass is not None:
return obj.replace('${libexecdir}/cockpit-askpass', abs_askpass)
return obj.replace('${libexecdir}', get_libexecdir())
elif isinstance(obj, dict):
return {key: patch_libexecdir(value) for key, value in obj.items()}
elif isinstance(obj, list):
return [patch_libexecdir(item) for item in obj]
else:
return obj
# A document is a binary stream with a Content-Type, optional Content-Encoding,
# and optional Content-Security-Policy
class Document(NamedTuple):
data: BinaryIO
content_type: str
content_encoding: Optional[str] = None
content_security_policy: Optional[str] = None
class PackagesListener:
def packages_loaded(self) -> None:
"""Called when the packages have been reloaded"""
class BridgeConfig(dict, JsonObject):
def __init__(self, value: JsonObject):
super().__init__(value)
self.label = get_str(self, 'label', None)
self.privileged = get_bool(self, 'privileged', default=False)
self.match = get_dict(self, 'match', {})
if not self.privileged and not self.match:
raise JsonError(value, 'must have match rules or be privileged')
self.environ = get_strv(self, 'environ', ())
self.spawn = get_strv(self, 'spawn')
if not self.spawn:
raise JsonError(value, 'spawn vector must be non-empty')
self.name = self.label or self.spawn[0]
class Condition:
def __init__(self, value: JsonObject):
try:
(self.name, self.value), = value.items()
except ValueError as exc:
raise JsonError(value, 'must contain exactly one key/value pair') from exc
class Manifest(dict, JsonObject):
# Skip version check when running out of the git checkout (__version__ is None)
COCKPIT_VERSION = __version__ and sortify_version(__version__)
def __init__(self, path: Path, value: JsonObject):
super().__init__(value)
self.path = path
self.name = get_str(self, 'name', self.path.name)
self.bridges = get_objv(self, 'bridges', BridgeConfig)
self.priority = get_int(self, 'priority', 1)
self.csp = get_str(self, 'content-security-policy', '')
self.conditions = get_objv(self, 'conditions', Condition)
# Skip version check when running out of the git checkout (COCKPIT_VERSION is None)
if self.COCKPIT_VERSION is not None:
requires = get_dict(self, 'requires', {})
for name, version in requires.items():
if name != 'cockpit':
raise JsonError(name, 'non-cockpit requirement listed')
if sortify_version(typechecked(version, str)) > self.COCKPIT_VERSION:
raise JsonError(version, f'required cockpit version ({version}) not met')
class Package:
# For po{,.manifest}.js files, the interesting part is the locale name
PO_JS_RE: ClassVar[Pattern] = re.compile(r'(po|po\.manifest)\.([^.]+)\.js(\.gz)?')
# immutable after __init__
manifest: Manifest
name: str
path: Path
priority: int
# computed later
translations: Optional[Dict[str, Dict[str, str]]] = None
files: Optional[Dict[str, str]] = None
def __init__(self, manifest: Manifest):
self.manifest = manifest
self.name = manifest.name
self.path = manifest.path
self.priority = manifest.priority
def ensure_scanned(self) -> None:
"""Ensure that the package has been scanned.
This allows us to defer scanning the files of the package until we know
that we'll actually use it.
"""
if self.files is not None:
return
self.files = {}
self.translations = {'po.js': {}, 'po.manifest.js': {}}
for file in self.path.rglob('*'):
name = str(file.relative_to(self.path))
if name in ['.', '..', 'manifest.json']:
continue
po_match = Package.PO_JS_RE.fullmatch(name)
if po_match:
basename = po_match.group(1)
locale = po_match.group(2)
# Accept-Language is case-insensitive and uses '-' to separate variants
lower_locale = locale.lower().replace('_', '-')
logger.debug('Adding translation %r %r -> %r', basename, lower_locale, name)
self.translations[f'{basename}.js'][lower_locale] = name
else:
# strip out trailing '.gz' components
basename = re.sub('.gz$', '', name)
logger.debug('Adding content %r -> %r', basename, name)
self.files[basename] = name
# If we see a filename like `x.min.js` we want to also offer it
# at `x.js`, but only if `x.js(.gz)` itself is not present.
# Note: this works for both the case where we found the `x.js`
# first (it's already in the map) and also if we find it second
# (it will be replaced in the map by the line just above).
# See https://github.com/cockpit-project/cockpit/pull/19716
self.files.setdefault(basename.replace('.min.', '.'), name)
# support old cockpit-po-plugin which didn't write po.manifest.??.js
if not self.translations['po.manifest.js']:
self.translations['po.manifest.js'] = self.translations['po.js']
def get_content_security_policy(self) -> str:
policy = {
"default-src": "'self'",
"connect-src": "'self'",
"form-action": "'self'",
"base-uri": "'self'",
"object-src": "'none'",
"font-src": "'self' data:",
"img-src": "'self' data:",
}
for item in self.manifest.csp.split(';'):
item = item.strip()
if item:
key, _, value = item.strip().partition(' ')
policy[key] = value
return ' '.join(f'{k} {v};' for k, v in policy.items()) + ' block-all-mixed-content'
def load_file(self, filename: str) -> Document:
content_type, content_encoding = mimetypes.guess_type(filename)
content_security_policy = None
if content_type is None:
content_type = 'text/plain'
elif content_type.startswith('text/html'):
content_security_policy = self.get_content_security_policy()
path = self.path / filename
logger.debug(' loading data from %s', path)
return Document(path.open('rb'), content_type, content_encoding, content_security_policy)
def load_translation(self, path: str, locales: Sequence[str]) -> Document:
self.ensure_scanned()
assert self.translations is not None
# First match wins
for locale in locales:
with contextlib.suppress(KeyError):
return self.load_file(self.translations[path][locale])
# We prefer to return an empty document than 404 in order to avoid
# errors in the console when a translation can't be found
return Document(io.BytesIO(), 'text/javascript')
def load_path(self, path: str, headers: JsonObject) -> Document:
self.ensure_scanned()
assert self.files is not None
assert self.translations is not None
if path in self.translations:
locales = parse_accept_language(get_str(headers, 'Accept-Language', ''))
return self.load_translation(path, locales)
else:
return self.load_file(self.files[path])
class PackagesLoader:
CONDITIONS: ClassVar[Dict[str, Callable[[str], bool]]] = {
'path-exists': os.path.exists,
'path-not-exists': lambda p: not os.path.exists(p),
}
@classmethod
def get_xdg_data_dirs(cls) -> Iterable[str]:
try:
yield os.environ['XDG_DATA_HOME']
except KeyError:
yield os.path.expanduser('~/.local/share')
try:
yield from os.environ['XDG_DATA_DIRS'].split(':')
except KeyError:
yield from ('/usr/local/share', '/usr/share')
@classmethod
def patch_manifest(cls, manifest: JsonObject, parent: Path) -> JsonObject:
override_files = [
parent / 'override.json',
config.lookup_config(f'{parent.name}.override.json'),
config.DOT_CONFIG_COCKPIT / f'{parent.name}.override.json',
]
for override_file in override_files:
try:
override: JsonValue = json.loads(override_file.read_bytes())
except FileNotFoundError:
continue
except json.JSONDecodeError as exc:
# User input error: report a warning
logger.warning('%s: %s', override_file, exc)
if not isinstance(override, dict):
logger.warning('%s: override file is not a dictionary', override_file)
continue
manifest = json_merge_patch(manifest, override)
return patch_libexecdir(manifest)
@classmethod
def load_manifests(cls) -> Iterable[Manifest]:
for datadir in cls.get_xdg_data_dirs():
logger.debug("Scanning for manifest files under %s", datadir)
for file in Path(datadir).glob('cockpit/*/manifest.json'):
logger.debug("Considering file %s", file)
try:
manifest = json.loads(file.read_text())
except json.JSONDecodeError as exc:
logger.error("%s: %s", file, exc)
continue
if not isinstance(manifest, dict):
logger.error("%s: json document isn't an object", file)
continue
parent = file.parent
manifest = cls.patch_manifest(manifest, parent)
try:
yield Manifest(parent, manifest)
except JsonError as exc:
logger.warning('%s %s', file, exc)
def check_condition(self, condition: str, value: object) -> bool:
check_fn = self.CONDITIONS[condition]
# All known predicates currently only work on strings
if not isinstance(value, str):
return False
return check_fn(value)
def check_conditions(self, manifest: Manifest) -> bool:
for condition in manifest.conditions:
try:
okay = self.check_condition(condition.name, condition.value)
except KeyError:
# do *not* ignore manifests with unknown predicates, for forward compatibility
logger.warning(' %s: ignoring unknown predicate in manifest: %s', manifest.path, condition.name)
continue
if not okay:
logger.debug(' hiding package %s as its %s condition is not met', manifest.path, condition)
return False
return True
def load_packages(self) -> Iterable[Tuple[str, Package]]:
logger.debug('Scanning for available package manifests:')
# Sort all available packages into buckets by to their claimed name
names: Dict[str, List[Manifest]] = collections.defaultdict(list)
for manifest in self.load_manifests():
logger.debug(' %s/manifest.json', manifest.path)
names[manifest.name].append(manifest)
logger.debug('done.')
logger.debug('Selecting packages to serve:')
for name, candidates in names.items():
# For each package name, iterate the candidates in descending
# priority order and select the first one which passes all checks
for candidate in sorted(candidates, key=lambda manifest: manifest.priority, reverse=True):
try:
if self.check_conditions(candidate):
logger.debug(' creating package %s -> %s', name, candidate.path)
yield name, Package(candidate)
break
except JsonError:
logger.warning(' %s: ignoring package with invalid manifest file', candidate.path)
logger.debug(' ignoring %s: unmet conditions', candidate.path)
logger.debug('done.')
class Packages(bus.Object, interface='cockpit.Packages'):
loader: PackagesLoader
listener: Optional[PackagesListener]
packages: Dict[str, Package]
saw_first_reload_hint: bool
def __init__(self, listener: Optional[PackagesListener] = None, loader: Optional[PackagesLoader] = None):
self.listener = listener
self.loader = loader or PackagesLoader()
self.load()
# Reloading the Shell in the browser should reload the
# packages. This is implemented by having the Shell call
# reload_hint whenever it starts. The first call of this
# method in each session is ignored so that packages are not
# loaded twice right after logging in.
#
self.saw_first_reload_hint = False
def load(self) -> None:
self.packages = dict(self.loader.load_packages())
self.manifests = json.dumps({name: dict(package.manifest) for name, package in self.packages.items()})
logger.debug('Packages loaded: %s', list(self.packages))
def show(self):
for name in sorted(self.packages):
package = self.packages[name]
menuitems = []
for entry in itertools.chain(
package.manifest.get('menu', {}).values(),
package.manifest.get('tools', {}).values()):
with contextlib.suppress(KeyError):
menuitems.append(entry['label'])
print(f'{name:20} {", ".join(menuitems):40} {package.path}')
def get_bridge_configs(self) -> Sequence[BridgeConfig]:
def yield_configs():
for package in sorted(self.packages.values(), key=lambda package: -package.priority):
yield from package.manifest.bridges
return tuple(yield_configs())
# D-Bus Interface
manifests = bus.Interface.Property('s', value="{}")
@bus.Interface.Method()
def reload(self):
self.load()
if self.listener is not None:
self.listener.packages_loaded()
@bus.Interface.Method()
def reload_hint(self):
if self.saw_first_reload_hint:
self.reload()
self.saw_first_reload_hint = True
def load_manifests_js(self, headers: JsonObject) -> Document:
logger.debug('Serving /manifests.js')
chunks: List[bytes] = []
# Send the translations required for the manifest files, from each package
locales = parse_accept_language(get_str(headers, 'Accept-Language', ''))
for name, package in self.packages.items():
if name in ['static', 'base1']:
continue
# find_translation will always find at least 'en'
translation = package.load_translation('po.manifest.js', locales)
with translation.data:
if translation.content_encoding == 'gzip':
data = gzip.decompress(translation.data.read())
else:
data = translation.data.read()
chunks.append(data)
chunks.append(b"""
(function (root, data) {
if (typeof define === 'function' && define.amd) {
define(data);
}
if (typeof cockpit === 'object') {
cockpit.manifests = data;
} else {
root.manifests = data;
}
}(this, """ + self.manifests.encode() + b"""))""")
return Document(io.BytesIO(b'\n'.join(chunks)), 'text/javascript')
def load_manifests_json(self) -> Document:
logger.debug('Serving /manifests.json')
return Document(io.BytesIO(self.manifests.encode()), 'application/json')
PATH_RE = re.compile(
r'/' # leading '/'
r'(?:([^/]+)/)?' # optional leading path component
r'((?:[^/]+/)*[^/]+)' # remaining path components
)
def load_path(self, path: str, headers: JsonObject) -> Document:
logger.debug('packages: serving %s', path)
match = self.PATH_RE.fullmatch(path)
if match is None:
raise ValueError(f'Invalid HTTP path {path}')
packagename, filename = match.groups()
if packagename is not None:
return self.packages[packagename].load_path(filename, headers)
elif filename == 'manifests.js':
return self.load_manifests_js(headers)
elif filename == 'manifests.json':
return self.load_manifests_json()
else:
raise KeyError

View File

@ -1,330 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import logging
import os
from typing import Callable, List, Optional, Sequence
from .jsonutil import JsonObject, JsonValue
from .packages import BridgeConfig
from .protocol import CockpitProblem, CockpitProtocol, CockpitProtocolError
from .router import Endpoint, Router, RoutingRule
from .transports import SubprocessProtocol, SubprocessTransport
logger = logging.getLogger(__name__)
class PeerError(CockpitProblem):
pass
class PeerExited(Exception):
def __init__(self, exit_code: int):
self.exit_code = exit_code
class Peer(CockpitProtocol, SubprocessProtocol, Endpoint):
done_callbacks: List[Callable[[], None]]
init_future: Optional[asyncio.Future]
def __init__(self, router: Router):
super().__init__(router)
# All Peers start out frozen — we only unfreeze after we see the first 'init' message
self.freeze_endpoint()
self.init_future = asyncio.get_running_loop().create_future()
self.done_callbacks = []
# Initialization
async def do_connect_transport(self) -> None:
raise NotImplementedError
async def spawn(self, argv: Sequence[str], env: Sequence[str], **kwargs) -> asyncio.Transport:
# Not actually async...
loop = asyncio.get_running_loop()
user_env = dict(e.split('=', 1) for e in env)
return SubprocessTransport(loop, self, argv, env=dict(os.environ, **user_env), **kwargs)
async def start(self, init_host: Optional[str] = None, **kwargs: JsonValue) -> JsonObject:
"""Request that the Peer is started and connected to the router.
Creates the transport, connects it to the protocol, and participates in
exchanging of init messages. If anything goes wrong, the connection
will be closed and an exception will be raised.
The Peer starts out in a frozen state (ie: attempts to send messages to
it will initially be queued). If init_host is not None then an init
message is sent with the given 'host' field, plus any extra kwargs, and
the queue is thawed. Otherwise, the caller is responsible for sending
the init message and thawing the peer.
In any case, the return value is the init message from the peer.
"""
assert self.init_future is not None
def _connect_task_done(task: asyncio.Task) -> None:
assert task is connect_task
try:
task.result()
except asyncio.CancelledError: # we did that (below)
pass # we want to ignore it
except Exception as exc:
self.close(exc)
connect_task = asyncio.create_task(self.do_connect_transport())
connect_task.add_done_callback(_connect_task_done)
try:
# Wait for something to happen:
# - exception from our connection function
# - receiving "init" from the other side
# - receiving EOF from the other side
# - .close() was called
# - other transport exception
init_message = await self.init_future
except (PeerExited, BrokenPipeError):
# These are fairly generic errors. PeerExited means that we observed the process exiting.
# BrokenPipeError means that we got EPIPE when attempting to write() to it. In both cases,
# the process is gone, but it's not clear why. If the connection process is still running,
# perhaps we'd get a better error message from it.
await connect_task
# Otherwise, re-raise
raise
finally:
self.init_future = None
# In any case (failure or success) make sure this is done.
if not connect_task.done():
connect_task.cancel()
if init_host is not None:
logger.debug(' sending init message back, host %s', init_host)
# Send "init" back
self.write_control(None, command='init', version=1, host=init_host, **kwargs)
# Thaw the queued messages
self.thaw_endpoint()
return init_message
# Background initialization
def start_in_background(self, init_host: Optional[str] = None, **kwargs: JsonValue) -> None:
def _start_task_done(task: asyncio.Task) -> None:
assert task is start_task
try:
task.result()
except (OSError, PeerExited, CockpitProblem, asyncio.CancelledError):
pass # Those are expected. Others will throw.
start_task = asyncio.create_task(self.start(init_host, **kwargs))
start_task.add_done_callback(_start_task_done)
# Shutdown
def add_done_callback(self, callback: Callable[[], None]) -> None:
self.done_callbacks.append(callback)
# Handling of interesting events
def do_superuser_init_done(self) -> None:
pass
def do_authorize(self, message: JsonObject) -> None:
pass
def transport_control_received(self, command: str, message: JsonObject) -> None:
if command == 'init' and self.init_future is not None:
logger.debug('Got init message with active init_future. Setting result.')
self.init_future.set_result(message)
elif command == 'authorize':
self.do_authorize(message)
elif command == 'superuser-init-done':
self.do_superuser_init_done()
else:
raise CockpitProtocolError(f'Received unexpected control message {command}')
def eof_received(self) -> bool:
# We always expect to be the ones to close the connection, so if we get
# an EOF, then we consider it to be an error. This allows us to
# distinguish close caused by unexpected EOF (but no errno from a
# syscall failure) vs. close caused by calling .close() on our side.
# The process is still running at this point, so keep it and handle
# the error in process_exited().
logger.debug('Peer %s received unexpected EOF', self.__class__.__name__)
return True
def do_closed(self, exc: Optional[Exception]) -> None:
logger.debug('Peer %s connection lost %s %s', self.__class__.__name__, type(exc), exc)
if exc is None:
self.shutdown_endpoint(problem='terminated')
elif isinstance(exc, PeerExited):
# a common case is that the called peer does not exist
if exc.exit_code == 127:
self.shutdown_endpoint(problem='no-cockpit')
else:
self.shutdown_endpoint(problem='terminated', message=f'Peer exited with status {exc.exit_code}')
elif isinstance(exc, CockpitProblem):
self.shutdown_endpoint(exc.attrs)
else:
self.shutdown_endpoint(problem='internal-error',
message=f"[{exc.__class__.__name__}] {exc!s}")
# If .start() is running, we need to make sure it stops running,
# raising the correct exception.
if self.init_future is not None and not self.init_future.done():
if exc is not None:
self.init_future.set_exception(exc)
else:
self.init_future.cancel()
for callback in self.done_callbacks:
callback()
def process_exited(self) -> None:
assert isinstance(self.transport, SubprocessTransport)
logger.debug('Peer %s exited, status %d', self.__class__.__name__, self.transport.get_returncode())
returncode = self.transport.get_returncode()
assert isinstance(returncode, int)
self.close(PeerExited(returncode))
# Forwarding data: from the peer to the router
def channel_control_received(self, channel: str, command: str, message: JsonObject) -> None:
if self.init_future is not None:
raise CockpitProtocolError('Received unexpected channel control message before init')
self.send_channel_control(channel, command, message)
def channel_data_received(self, channel: str, data: bytes) -> None:
if self.init_future is not None:
raise CockpitProtocolError('Received unexpected channel data before init')
self.send_channel_data(channel, data)
# Forwarding data: from the router to the peer
def do_channel_control(self, channel: str, command: str, message: JsonObject) -> None:
assert self.init_future is None
self.write_control(message)
def do_channel_data(self, channel: str, data: bytes) -> None:
assert self.init_future is None
self.write_channel_data(channel, data)
def do_kill(self, host: 'str | None', group: 'str | None', message: JsonObject) -> None:
assert self.init_future is None
self.write_control(message)
def do_close(self) -> None:
self.close()
class ConfiguredPeer(Peer):
config: BridgeConfig
args: Sequence[str]
env: Sequence[str]
def __init__(self, router: Router, config: BridgeConfig):
self.config = config
self.args = config.spawn
self.env = config.environ
super().__init__(router)
async def do_connect_transport(self) -> None:
await self.spawn(self.args, self.env)
class PeerRoutingRule(RoutingRule):
config: BridgeConfig
match: JsonObject
peer: Optional[Peer]
def __init__(self, router: Router, config: BridgeConfig):
super().__init__(router)
self.config = config
self.match = config.match
self.peer = None
def apply_rule(self, options: JsonObject) -> Optional[Peer]:
# Check that we match
for key, value in self.match.items():
if key not in options:
logger.debug(' rejecting because key %s is missing', key)
return None
if value is not None and options[key] != value:
logger.debug(' rejecting because key %s has wrong value %s (vs %s)', key, options[key], value)
return None
# Start the peer if it's not running already
if self.peer is None:
self.peer = ConfiguredPeer(self.router, self.config)
self.peer.add_done_callback(self.peer_closed)
assert self.router.init_host
self.peer.start_in_background(init_host=self.router.init_host)
return self.peer
def peer_closed(self):
self.peer = None
def shutdown(self):
if self.peer is not None:
self.peer.close()
class PeersRoutingRule(RoutingRule):
rules: List[PeerRoutingRule] = []
def apply_rule(self, options: JsonObject) -> Optional[Endpoint]:
logger.debug(' considering %d rules', len(self.rules))
for rule in self.rules:
logger.debug(' considering %s', rule.config.name)
endpoint = rule.apply_rule(options)
if endpoint is not None:
logger.debug(' selected')
return endpoint
logger.debug(' no peer rules matched')
return None
def set_configs(self, bridge_configs: Sequence[BridgeConfig]) -> None:
old_rules = self.rules
self.rules = []
for config in bridge_configs:
# Those are handled elsewhere...
if config.privileged or 'host' in config.match:
continue
# Try to reuse an existing rule, if one exists...
for rule in list(old_rules):
if rule.config == config:
old_rules.remove(rule)
break
else:
# ... otherwise, create a new one.
rule = PeerRoutingRule(self.router, config)
self.rules.append(rule)
# close down the old rules that didn't get reclaimed
for rule in old_rules:
rule.shutdown()
def shutdown(self):
for rule in self.rules:
rule.shutdown()

View File

@ -1,171 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2023 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import locale
import logging
import os
import pwd
from typing import Dict, List, Sequence, Tuple
from cockpit._vendor.ferny import AskpassHandler
from cockpit._vendor.systemd_ctypes import Variant, bus
# that path is valid on at least Debian, Fedora/RHEL, and Arch
HELPER_PATH = '/usr/lib/polkit-1/polkit-agent-helper-1'
AGENT_DBUS_PATH = '/PolkitAgent'
logger = logging.getLogger(__name__)
Identity = Tuple[str, Dict[str, Variant]]
# https://www.freedesktop.org/software/polkit/docs/latest/eggdbus-interface-org.freedesktop.PolicyKit1.AuthenticationAgent.html
# Note that we don't implement the CancelAuthentication() API. pkexec gets called in a way that has no opportunity to
# cancel an ongoing authentication from the pkexec side. On the UI side cancellation is implemented via the standard
# asyncio process mechanism. If we ever need CancelAuthentication(), we could keep a cookie → get_current_task()
# mapping, but that method is not available for Python 3.6 yet.
class org_freedesktop_PolicyKit1_AuthenticationAgent(bus.Object):
def __init__(self, responder: AskpassHandler):
super().__init__()
self.responder = responder
# confusingly named: this actually does the whole authentication dialog, see docs
@bus.Interface.Method('', ['s', 's', 's', 'a{ss}', 's', 'a(sa{sv})'])
async def begin_authentication(self, action_id: str, message: str, icon_name: str,
details: Dict[str, str], cookie: str, identities: Sequence[Identity]) -> None:
logger.debug('BeginAuthentication: action %s, message "%s", icon %s, details %s, cookie %s, identities %r',
action_id, message, icon_name, details, cookie, identities)
# only support authentication as ourselves, as we don't yet have the
# protocol plumbing and UI to select an admin user
my_uid = os.geteuid()
for (auth_type, subject) in identities:
if auth_type == 'unix-user' and 'uid' in subject and subject['uid'].value == my_uid:
logger.debug('Authentication subject %s matches our uid %d', subject, my_uid)
break
else:
logger.warning('Not supporting authentication as any of %s', identities)
return
user_name = pwd.getpwuid(my_uid).pw_name
process = await asyncio.create_subprocess_exec(HELPER_PATH, user_name, cookie,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE)
try:
await self._communicate(process)
except asyncio.CancelledError:
logger.debug('Cancelled authentication')
process.terminate()
finally:
res = await process.wait()
logger.debug('helper exited with code %i', res)
async def _communicate(self, process: asyncio.subprocess.Process) -> None:
assert process.stdin
assert process.stdout
messages: List[str] = []
async for line in process.stdout:
logger.debug('Read line from helper: %s', line)
command, _, value = line.strip().decode().partition(' ')
# usually: PAM_PROMPT_ECHO_OFF Password: \n
if command.startswith('PAM_PROMPT'):
# Don't pass this to the UI if it's "Password" (the usual case),
# so that superuser.py uses the translated default
if value.startswith('Password'):
value = ''
# flush out accumulated info/error messages
passwd = await self.responder.do_askpass('\n'.join(messages), value, '')
messages.clear()
if passwd is None:
logger.debug('got PAM_PROMPT %s, but do_askpass returned None', value)
raise asyncio.CancelledError('no password given')
logger.debug('got PAM_PROMPT %s, do_askpass returned a password', value)
process.stdin.write(passwd.encode())
process.stdin.write(b'\n')
del passwd # don't keep this around longer than necessary
await process.stdin.drain()
logger.debug('got PAM_PROMPT, wrote password to helper')
elif command in ('PAM_TEXT_INFO', 'PAM_ERROR'):
messages.append(value)
elif command == 'SUCCESS':
logger.debug('Authentication succeeded')
break
elif command == 'FAILURE':
logger.warning('Authentication failed')
break
else:
logger.warning('Unknown line from helper, aborting: %s', line)
process.terminate()
break
class PolkitAgent:
"""Register polkit agent when required
Use this as a context manager to ensure that the agent gets unregistered again.
"""
def __init__(self, responder: AskpassHandler):
self.responder = responder
self.agent_slot = None
async def __aenter__(self):
try:
self.system_bus = bus.Bus.default_system()
except OSError as e:
logger.warning('cannot connect to system bus, not registering polkit agent: %s', e)
return self
try:
# may refine that with a D-Bus call to logind
self.subject = ('unix-session', {'session-id': Variant(os.environ['XDG_SESSION_ID'], 's')})
except KeyError:
logger.debug('XDG_SESSION_ID not set, not registering polkit agent')
return self
agent_object = org_freedesktop_PolicyKit1_AuthenticationAgent(self.responder)
self.agent_slot = self.system_bus.add_object(AGENT_DBUS_PATH, agent_object)
# register agent
locale_name = locale.setlocale(locale.LC_MESSAGES, None)
await self.system_bus.call_method_async(
'org.freedesktop.PolicyKit1',
'/org/freedesktop/PolicyKit1/Authority',
'org.freedesktop.PolicyKit1.Authority',
'RegisterAuthenticationAgent',
'(sa{sv})ss',
self.subject, locale_name, AGENT_DBUS_PATH)
logger.debug('Registered agent for %r and locale %s', self.subject, locale_name)
return self
async def __aexit__(self, _exc_type, _exc_value, _traceback):
if self.agent_slot:
await self.system_bus.call_method_async(
'org.freedesktop.PolicyKit1',
'/org/freedesktop/PolicyKit1/Authority',
'org.freedesktop.PolicyKit1.Authority',
'UnregisterAuthenticationAgent',
'(sa{sv})s',
self.subject, AGENT_DBUS_PATH)
self.agent_slot.cancel()
logger.debug('Unregistered agent for %r', self.subject)

View File

@ -1,57 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2023 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import contextlib
import socket
def install():
"""Add shims for older Python versions"""
# introduced in 3.9
if not hasattr(socket, 'recv_fds'):
import array
import _socket
def recv_fds(sock, bufsize, maxfds, flags=0):
fds = array.array("i")
msg, ancdata, flags, addr = sock.recvmsg(bufsize, _socket.CMSG_LEN(maxfds * fds.itemsize))
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS):
fds.frombytes(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return msg, list(fds), flags, addr
socket.recv_fds = recv_fds
# introduced in 3.7
if not hasattr(contextlib, 'AsyncExitStack'):
class AsyncExitStack:
async def __aenter__(self):
self.cms = []
return self
async def enter_async_context(self, cm):
result = await cm.__aenter__()
self.cms.append(cm)
return result
async def __aexit__(self, exc_type, exc_value, traceback):
for cm in self.cms:
cm.__aexit__(exc_type, exc_value, traceback)
contextlib.AsyncExitStack = AsyncExitStack

View File

@ -1,248 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import json
import logging
import uuid
from .jsonutil import JsonError, JsonObject, JsonValue, create_object, get_int, get_str, typechecked
logger = logging.getLogger(__name__)
class CockpitProblem(Exception):
"""A type of exception that carries a problem code and a message.
Depending on the scope, this is used to handle shutting down:
- an individual channel (sends problem code in the close message)
- peer connections (sends problem code in close message for each open channel)
- the main stdio interaction with the bridge
It is usually thrown in response to some violation of expected protocol
when parsing messages, connecting to a peer, or opening a channel.
"""
attrs: JsonObject
def __init__(self, problem: str, _msg: 'JsonObject | None' = None, **kwargs: JsonValue) -> None:
kwargs['problem'] = problem
self.attrs = create_object(_msg, kwargs)
super().__init__(get_str(self.attrs, 'message', problem))
class CockpitProtocolError(CockpitProblem):
def __init__(self, message: str, problem: str = 'protocol-error'):
super().__init__(problem, message=message)
class CockpitProtocol(asyncio.Protocol):
"""A naive implementation of the Cockpit frame protocol
We need to use this because Python's SelectorEventLoop doesn't supported
buffered protocols.
"""
transport: 'asyncio.Transport | None' = None
buffer = b''
_closed: bool = False
_communication_done: 'asyncio.Future[None] | None' = None
def do_ready(self) -> None:
pass
def do_closed(self, exc: 'Exception | None') -> None:
pass
def transport_control_received(self, command: str, message: JsonObject) -> None:
raise NotImplementedError
def channel_control_received(self, channel: str, command: str, message: JsonObject) -> None:
raise NotImplementedError
def channel_data_received(self, channel: str, data: bytes) -> None:
raise NotImplementedError
def frame_received(self, frame: bytes) -> None:
header, _, data = frame.partition(b'\n')
if header != b'':
channel = header.decode('ascii')
logger.debug('data received: %d bytes of data for channel %s', len(data), channel)
self.channel_data_received(channel, data)
else:
self.control_received(data)
def control_received(self, data: bytes) -> None:
try:
message = typechecked(json.loads(data), dict)
command = get_str(message, 'command')
channel = get_str(message, 'channel', None)
if channel is not None:
logger.debug('channel control received %s', message)
self.channel_control_received(channel, command, message)
else:
logger.debug('transport control received %s', message)
self.transport_control_received(command, message)
except (json.JSONDecodeError, JsonError) as exc:
raise CockpitProtocolError(f'control message: {exc!s}') from exc
def consume_one_frame(self, data: bytes) -> int:
"""Consumes a single frame from view.
Returns positive if a number of bytes were consumed, or negative if no
work can be done because of a given number of bytes missing.
"""
try:
newline = data.index(b'\n')
except ValueError as exc:
if len(data) < 10:
# Let's try reading more
return len(data) - 10
raise CockpitProtocolError("size line is too long") from exc
try:
length = int(data[:newline])
except ValueError as exc:
raise CockpitProtocolError("frame size is not an integer") from exc
start = newline + 1
end = start + length
if end > len(data):
# We need to read more
return len(data) - end
# We can consume a full frame
self.frame_received(data[start:end])
return end
def connection_made(self, transport: asyncio.BaseTransport) -> None:
logger.debug('connection_made(%s)', transport)
assert isinstance(transport, asyncio.Transport)
self.transport = transport
self.do_ready()
if self._closed:
logger.debug(' but the protocol already was closed, so closing transport')
transport.close()
def connection_lost(self, exc: 'Exception | None') -> None:
logger.debug('connection_lost')
assert self.transport is not None
self.transport = None
self.close(exc)
def close(self, exc: 'Exception | None' = None) -> None:
if self._closed:
return
self._closed = True
if self.transport:
self.transport.close()
self.do_closed(exc)
def write_channel_data(self, channel: str, payload: bytes) -> None:
"""Send a given payload (bytes) on channel (string)"""
# Channel is certainly ascii (as enforced by .encode() below)
frame_length = len(channel + '\n') + len(payload)
header = f'{frame_length}\n{channel}\n'.encode('ascii')
if self.transport is not None:
logger.debug('writing to transport %s', self.transport)
self.transport.write(header + payload)
else:
logger.debug('cannot write to closed transport')
def write_control(self, _msg: 'JsonObject | None' = None, **kwargs: JsonValue) -> None:
"""Write a control message. See jsonutil.create_object() for details."""
logger.debug('sending control message %r %r', _msg, kwargs)
pretty = json.dumps(create_object(_msg, kwargs), indent=2) + '\n'
self.write_channel_data('', pretty.encode())
def data_received(self, data: bytes) -> None:
try:
self.buffer += data
while self.buffer:
result = self.consume_one_frame(self.buffer)
if result <= 0:
return
self.buffer = self.buffer[result:]
except CockpitProtocolError as exc:
self.close(exc)
def eof_received(self) -> bool:
return False
# Helpful functionality for "server"-side protocol implementations
class CockpitProtocolServer(CockpitProtocol):
init_host: 'str | None' = None
authorizations: 'dict[str, asyncio.Future[str]] | None' = None
def do_send_init(self) -> None:
raise NotImplementedError
def do_init(self, message: JsonObject) -> None:
pass
def do_kill(self, host: 'str | None', group: 'str | None', message: JsonObject) -> None:
raise NotImplementedError
def transport_control_received(self, command: str, message: JsonObject) -> None:
if command == 'init':
if get_int(message, 'version') != 1:
raise CockpitProtocolError('incorrect version number')
self.init_host = get_str(message, 'host')
self.do_init(message)
elif command == 'kill':
self.do_kill(get_str(message, 'host', None), get_str(message, 'group', None), message)
elif command == 'authorize':
self.do_authorize(message)
else:
raise CockpitProtocolError(f'unexpected control message {command} received')
def do_ready(self) -> None:
self.do_send_init()
# authorize request/response API
async def request_authorization(
self, challenge: str, timeout: 'int | None' = None, **kwargs: JsonValue
) -> str:
if self.authorizations is None:
self.authorizations = {}
cookie = str(uuid.uuid4())
future = asyncio.get_running_loop().create_future()
try:
self.authorizations[cookie] = future
self.write_control(None, command='authorize', challenge=challenge, cookie=cookie, **kwargs)
return await asyncio.wait_for(future, timeout)
finally:
self.authorizations.pop(cookie)
def do_authorize(self, message: JsonObject) -> None:
cookie = get_str(message, 'cookie')
response = get_str(message, 'response')
if self.authorizations is None or cookie not in self.authorizations:
logger.warning('no matching authorize request')
return
self.authorizations[cookie].set_result(response)

View File

@ -1,233 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import getpass
import logging
import re
import socket
from typing import Dict, List, Optional, Tuple
from cockpit._vendor import ferny
from .jsonutil import JsonObject, JsonValue, get_str, get_str_or_none
from .peer import Peer, PeerError
from .router import Router, RoutingRule
logger = logging.getLogger(__name__)
class PasswordResponder(ferny.AskpassHandler):
PASSPHRASE_RE = re.compile(r"Enter passphrase for key '(.*)': ")
password: Optional[str]
hostkeys_seen: List[Tuple[str, str, str, str, str]]
error_message: Optional[str]
password_attempts: int
def __init__(self, password: Optional[str]):
self.password = password
self.hostkeys_seen = []
self.error_message = None
self.password_attempts = 0
async def do_hostkey(self, reason: str, host: str, algorithm: str, key: str, fingerprint: str) -> bool:
self.hostkeys_seen.append((reason, host, algorithm, key, fingerprint))
return False
async def do_askpass(self, messages: str, prompt: str, hint: str) -> Optional[str]:
logger.debug('Got askpass(%s): %s', hint, prompt)
match = PasswordResponder.PASSPHRASE_RE.fullmatch(prompt)
if match is not None:
# We never unlock private keys — we rather need to throw a
# specially-formatted error message which will cause the frontend
# to load the named key into the agent for us and try again.
path = match.group(1)
logger.debug("This is a passphrase request for %s, but we don't do those. Abort.", path)
self.error_message = f'locked identity: {path}'
return None
assert self.password is not None
assert self.password_attempts == 0
self.password_attempts += 1
return self.password
class SshPeer(Peer):
session: Optional[ferny.Session] = None
host: str
user: Optional[str]
password: Optional[str]
private: bool
async def do_connect_transport(self) -> None:
assert self.session is not None
logger.debug('Starting ssh session user=%s, host=%s, private=%s', self.user, self.host, self.private)
basename, colon, portstr = self.host.rpartition(':')
if colon and portstr.isdigit():
host = basename
port = int(portstr)
else:
host = self.host
port = None
responder = PasswordResponder(self.password)
options = {"StrictHostKeyChecking": 'yes'}
if self.password is not None:
options.update(NumberOfPasswordPrompts='1')
else:
options.update(PasswordAuthentication="no", KbdInteractiveAuthentication="no")
try:
await self.session.connect(host, login_name=self.user, port=port,
handle_host_key=self.private, options=options,
interaction_responder=responder)
except (OSError, socket.gaierror) as exc:
logger.debug('connecting to host %s failed: %s', host, exc)
raise PeerError('no-host', error='no-host', message=str(exc)) from exc
except ferny.SshHostKeyError as exc:
if responder.hostkeys_seen:
# If we saw a hostkey then we can issue a detailed error message
# containing the key that would need to be accepted. That will
# cause the front-end to present a dialog.
_reason, host, algorithm, key, fingerprint = responder.hostkeys_seen[0]
error_args = {'host-key': f'{host} {algorithm} {key}', 'host-fingerprint': fingerprint}
else:
error_args = {}
if isinstance(exc, ferny.SshChangedHostKeyError):
error = 'invalid-hostkey'
elif self.private:
error = 'unknown-hostkey'
else:
# non-private session case. throw a generic error.
error = 'unknown-host'
logger.debug('SshPeer got a %s %s; private %s, seen hostkeys %r; raising %s with extra args %r',
type(exc), exc, self.private, responder.hostkeys_seen, error, error_args)
raise PeerError(error, error_args, error=error, auth_method_results={}) from exc
except ferny.SshAuthenticationError as exc:
logger.debug('authentication to host %s failed: %s', host, exc)
results = {method: 'not-provided' for method in exc.methods}
if 'password' in results and self.password is not None:
if responder.password_attempts == 0:
results['password'] = 'not-tried'
else:
results['password'] = 'denied'
raise PeerError('authentication-failed',
error=responder.error_message or 'authentication-failed',
auth_method_results=results) from exc
except ferny.SshError as exc:
logger.debug('unknown failure connecting to host %s: %s', host, exc)
raise PeerError('internal-error', message=str(exc)) from exc
args = self.session.wrap_subprocess_args(['cockpit-bridge'])
await self.spawn(args, [])
def do_kill(self, host: 'str | None', group: 'str | None', message: JsonObject) -> None:
if host == self.host:
self.close()
elif host is None:
super().do_kill(host, group, message)
def do_authorize(self, message: JsonObject) -> None:
if get_str(message, 'challenge').startswith('plain1:'):
cookie = get_str(message, 'cookie')
self.write_control(command='authorize', cookie=cookie, response=self.password or '')
self.password = None # once is enough...
def do_superuser_init_done(self) -> None:
self.password = None
def __init__(self, router: Router, host: str, user: Optional[str], options: JsonObject, *, private: bool) -> None:
super().__init__(router)
self.host = host
self.user = user
self.password = get_str(options, 'password', None)
self.private = private
self.session = ferny.Session()
superuser: JsonValue
init_superuser = get_str_or_none(options, 'init-superuser', None)
if init_superuser in (None, 'none'):
superuser = False
else:
superuser = {'id': init_superuser}
self.start_in_background(init_host=host, superuser=superuser)
class HostRoutingRule(RoutingRule):
remotes: Dict[Tuple[str, Optional[str], Optional[str]], Peer]
def __init__(self, router):
super().__init__(router)
self.remotes = {}
def apply_rule(self, options: JsonObject) -> Optional[Peer]:
assert self.router is not None
assert self.router.init_host is not None
host = get_str(options, 'host', self.router.init_host)
if host == self.router.init_host:
return None
user = get_str(options, 'user', None)
# HACK: the front-end relies on this for tracking connections without an explicit user name;
# the user will then be determined by SSH (`User` in the config or the current user)
# See cockpit_router_normalize_host_params() in src/bridge/cockpitrouter.c
if user == getpass.getuser():
user = None
if not user:
user_from_host, _, _ = host.rpartition('@')
user = user_from_host or None # avoid ''
if get_str(options, 'session', None) == 'private':
nonce = get_str(options, 'channel')
else:
nonce = None
assert isinstance(host, str)
assert user is None or isinstance(user, str)
assert nonce is None or isinstance(nonce, str)
key = host, user, nonce
logger.debug('Request for channel %s is remote.', options)
logger.debug('key=%s', key)
if key not in self.remotes:
logger.debug('%s is not among the existing remotes %s. Opening a new connection.', key, self.remotes)
peer = SshPeer(self.router, host, user, options, private=nonce is not None)
peer.add_done_callback(lambda: self.remotes.__delitem__(key))
self.remotes[key] = peer
return self.remotes[key]
def shutdown(self):
for peer in set(self.remotes.values()):
peer.close()

View File

@ -1,266 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import asyncio
import collections
import logging
from typing import Dict, List, Optional
from .jsonutil import JsonObject, JsonValue
from .protocol import CockpitProblem, CockpitProtocolError, CockpitProtocolServer
logger = logging.getLogger(__name__)
class ExecutionQueue:
"""Temporarily delay calls to a given set of class methods.
Functions by replacing the named function at the instance __dict__
level, effectively providing an override for exactly one instance
of `method`'s object.
Queues the invocations. Run them later with .run(), which also reverses
the redirection by deleting the named methods from the instance.
"""
def __init__(self, methods):
self.queue = collections.deque()
self.methods = methods
for method in self.methods:
self._wrap(method)
def _wrap(self, method):
# NB: this function is stored in the instance dict and therefore
# doesn't function as a descriptor, isn't a method, doesn't get bound,
# and therefore doesn't receive a self parameter
setattr(method.__self__, method.__func__.__name__, lambda *args: self.queue.append((method, args)))
def run(self):
logger.debug('ExecutionQueue: Running %d queued method calls', len(self.queue))
for method, args in self.queue:
method(*args)
for method in self.methods:
delattr(method.__self__, method.__func__.__name__)
class Endpoint:
router: 'Router'
__endpoint_frozen_queue: Optional[ExecutionQueue] = None
def __init__(self, router: 'Router'):
router.add_endpoint(self)
self.router = router
def freeze_endpoint(self):
assert self.__endpoint_frozen_queue is None
logger.debug('Freezing endpoint %s', self)
self.__endpoint_frozen_queue = ExecutionQueue({self.do_channel_control, self.do_channel_data, self.do_kill})
def thaw_endpoint(self):
assert self.__endpoint_frozen_queue is not None
logger.debug('Thawing endpoint %s', self)
self.__endpoint_frozen_queue.run()
self.__endpoint_frozen_queue = None
# interface for receiving messages
def do_close(self):
raise NotImplementedError
def do_channel_control(self, channel: str, command: str, message: JsonObject) -> None:
raise NotImplementedError
def do_channel_data(self, channel: str, data: bytes) -> None:
raise NotImplementedError
def do_kill(self, host: 'str | None', group: 'str | None', message: JsonObject) -> None:
raise NotImplementedError
# interface for sending messages
def send_channel_data(self, channel: str, data: bytes) -> None:
self.router.write_channel_data(channel, data)
def send_channel_control(
self, channel: str, command: str, _msg: 'JsonObject | None', **kwargs: JsonValue
) -> None:
self.router.write_control(_msg, channel=channel, command=command, **kwargs)
if command == 'close':
self.router.endpoints[self].remove(channel)
self.router.drop_channel(channel)
def shutdown_endpoint(self, _msg: 'JsonObject | None' = None, **kwargs: JsonValue) -> None:
self.router.shutdown_endpoint(self, _msg, **kwargs)
class RoutingError(CockpitProblem):
pass
class RoutingRule:
router: 'Router'
def __init__(self, router: 'Router'):
self.router = router
def apply_rule(self, options: JsonObject) -> Optional[Endpoint]:
"""Check if a routing rule applies to a given 'open' message.
This should inspect the options dictionary and do one of the following three things:
- return an Endpoint to handle this channel
- raise a RoutingError to indicate that the open should be rejected
- return None to let the next rule run
"""
raise NotImplementedError
def shutdown(self):
raise NotImplementedError
class Router(CockpitProtocolServer):
routing_rules: List[RoutingRule]
open_channels: Dict[str, Endpoint]
endpoints: 'dict[Endpoint, set[str]]'
no_endpoints: asyncio.Event # set if endpoints dict is empty
_eof: bool = False
def __init__(self, routing_rules: List[RoutingRule]):
for rule in routing_rules:
rule.router = self
self.routing_rules = routing_rules
self.open_channels = {}
self.endpoints = {}
self.no_endpoints = asyncio.Event()
self.no_endpoints.set() # at first there are no endpoints
def check_rules(self, options: JsonObject) -> Endpoint:
for rule in self.routing_rules:
logger.debug(' applying rule %s', rule)
endpoint = rule.apply_rule(options)
if endpoint is not None:
logger.debug(' resulting endpoint is %s', endpoint)
return endpoint
else:
logger.debug(' No rules matched')
raise RoutingError('not-supported')
def drop_channel(self, channel: str) -> None:
try:
self.open_channels.pop(channel)
logger.debug('router dropped channel %s', channel)
except KeyError:
logger.error('trying to drop non-existent channel %s from %s', channel, self.open_channels)
def add_endpoint(self, endpoint: Endpoint) -> None:
self.endpoints[endpoint] = set()
self.no_endpoints.clear()
def shutdown_endpoint(self, endpoint: Endpoint, _msg: 'JsonObject | None' = None, **kwargs: JsonValue) -> None:
channels = self.endpoints.pop(endpoint)
logger.debug('shutdown_endpoint(%s, %s) will close %s', endpoint, kwargs, channels)
for channel in channels:
self.write_control(_msg, command='close', channel=channel, **kwargs)
self.drop_channel(channel)
if not self.endpoints:
self.no_endpoints.set()
# were we waiting to exit?
if self._eof:
logger.debug(' endpoints remaining: %r', self.endpoints)
if not self.endpoints and self.transport:
logger.debug(' close transport')
self.transport.close()
def do_kill(self, host: 'str | None', group: 'str | None', message: JsonObject) -> None:
endpoints = set(self.endpoints)
logger.debug('do_kill(%s, %s). Considering %d endpoints.', host, group, len(endpoints))
for endpoint in endpoints:
endpoint.do_kill(host, group, message)
def channel_control_received(self, channel: str, command: str, message: JsonObject) -> None:
# If this is an open message then we need to apply the routing rules to
# figure out the correct endpoint to connect. If it's not an open
# message, then we expect the endpoint to already exist.
if command == 'open':
if channel in self.open_channels:
raise CockpitProtocolError('channel is already open')
try:
logger.debug('Trying to find endpoint for new channel %s payload=%s', channel, message.get('payload'))
endpoint = self.check_rules(message)
except RoutingError as exc:
self.write_control(exc.attrs, command='close', channel=channel)
return
self.open_channels[channel] = endpoint
self.endpoints[endpoint].add(channel)
else:
try:
endpoint = self.open_channels[channel]
except KeyError:
# sending to a non-existent channel can happen due to races and is not an error
return
# At this point, we have the endpoint. Route the message.
endpoint.do_channel_control(channel, command, message)
def channel_data_received(self, channel: str, data: bytes) -> None:
try:
endpoint = self.open_channels[channel]
except KeyError:
return
endpoint.do_channel_data(channel, data)
def eof_received(self) -> bool:
logger.debug('eof_received(%r)', self)
endpoints = set(self.endpoints)
for endpoint in endpoints:
endpoint.do_close()
self._eof = True
logger.debug(' endpoints remaining: %r', self.endpoints)
return bool(self.endpoints)
_communication_done: Optional[asyncio.Future] = None
def do_closed(self, exc: Optional[Exception]) -> None:
# If we didn't send EOF yet, do it now.
if not self._eof:
self.eof_received()
if self._communication_done is not None:
if exc is None:
self._communication_done.set_result(None)
else:
self._communication_done.set_exception(exc)
async def communicate(self) -> None:
"""Wait until communication is complete on the router and all endpoints are done."""
assert self._communication_done is None
self._communication_done = asyncio.get_running_loop().create_future()
try:
await self._communication_done
except (BrokenPipeError, ConnectionResetError):
pass # these are normal occurrences when closed from the other side
finally:
self._communication_done = None
# In an orderly exit, this is already done, but in case it wasn't
# orderly, we need to make sure the endpoints shut down anyway...
await self.no_endpoints.wait()

View File

@ -1,438 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import errno
import logging
import os
import re
from typing import Any, DefaultDict, Iterable, List, NamedTuple, Optional, Tuple
from cockpit._vendor.systemd_ctypes import Handle
USER_HZ = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
MS_PER_JIFFY = 1000 / (USER_HZ if (USER_HZ > 0) else 100)
HWMON_PATH = '/sys/class/hwmon'
# we would like to do this, but mypy complains; https://github.com/python/mypy/issues/2900
# Samples = collections.defaultdict[str, Union[float, Dict[str, Union[float, None]]]]
Samples = DefaultDict[str, Any]
logger = logging.getLogger(__name__)
def read_int_file(rootfd: int, statfile: str, default: Optional[int] = None, key: bytes = b'') -> Optional[int]:
# Not every stat is available, such as cpu.weight
try:
fd = os.open(statfile, os.O_RDONLY, dir_fd=rootfd)
except FileNotFoundError:
return None
try:
data = os.read(fd, 1024)
except OSError as e:
# cgroups can disappear between the open and read
if e.errno != errno.ENODEV:
logger.warning('Failed to read %s: %s', statfile, e)
return None
finally:
os.close(fd)
if key:
start = data.index(key) + len(key)
end = data.index(b'\n', start)
data = data[start:end]
try:
# 0 often means "none", so replace it with default value
return int(data) or default
except ValueError:
# Some samples such as "memory.max" contains "max" when there is a no limit
return None
class SampleDescription(NamedTuple):
name: str
units: str
semantics: str
instanced: bool
class Sampler:
descriptions: List[SampleDescription]
def sample(self, samples: Samples) -> None:
raise NotImplementedError
class CPUSampler(Sampler):
descriptions = [
SampleDescription('cpu.basic.nice', 'millisec', 'counter', instanced=False),
SampleDescription('cpu.basic.user', 'millisec', 'counter', instanced=False),
SampleDescription('cpu.basic.system', 'millisec', 'counter', instanced=False),
SampleDescription('cpu.basic.iowait', 'millisec', 'counter', instanced=False),
SampleDescription('cpu.core.nice', 'millisec', 'counter', instanced=True),
SampleDescription('cpu.core.user', 'millisec', 'counter', instanced=True),
SampleDescription('cpu.core.system', 'millisec', 'counter', instanced=True),
SampleDescription('cpu.core.iowait', 'millisec', 'counter', instanced=True),
]
def sample(self, samples: Samples) -> None:
with open('/proc/stat') as stat:
for line in stat:
if not line.startswith('cpu'):
continue
cpu, user, nice, system, _idle, iowait = line.split()[:6]
core = cpu[3:] or None
if core:
prefix = 'cpu.core'
samples[f'{prefix}.nice'][core] = int(nice) * MS_PER_JIFFY
samples[f'{prefix}.user'][core] = int(user) * MS_PER_JIFFY
samples[f'{prefix}.system'][core] = int(system) * MS_PER_JIFFY
samples[f'{prefix}.iowait'][core] = int(iowait) * MS_PER_JIFFY
else:
prefix = 'cpu.basic'
samples[f'{prefix}.nice'] = int(nice) * MS_PER_JIFFY
samples[f'{prefix}.user'] = int(user) * MS_PER_JIFFY
samples[f'{prefix}.system'] = int(system) * MS_PER_JIFFY
samples[f'{prefix}.iowait'] = int(iowait) * MS_PER_JIFFY
class MemorySampler(Sampler):
descriptions = [
SampleDescription('memory.free', 'bytes', 'instant', instanced=False),
SampleDescription('memory.used', 'bytes', 'instant', instanced=False),
SampleDescription('memory.cached', 'bytes', 'instant', instanced=False),
SampleDescription('memory.swap-used', 'bytes', 'instant', instanced=False),
]
def sample(self, samples: Samples) -> None:
with open('/proc/meminfo') as meminfo:
items = {k: int(v.strip(' kB\n')) for line in meminfo for k, v in [line.split(':', 1)]}
samples['memory.free'] = 1024 * items['MemFree']
samples['memory.used'] = 1024 * (items['MemTotal'] - items['MemAvailable'])
samples['memory.cached'] = 1024 * (items['Buffers'] + items['Cached'])
samples['memory.swap-used'] = 1024 * (items['SwapTotal'] - items['SwapFree'])
class CPUTemperatureSampler(Sampler):
# Cache found sensors, as they can't be hotplugged.
sensors: Optional[List[str]] = None
descriptions = [
SampleDescription('cpu.temperature', 'celsius', 'instant', instanced=True),
]
@staticmethod
def detect_cpu_sensors(dir_fd: int) -> Iterable[str]:
# Read the name file to decide what to do with this directory
try:
with Handle.open('name', os.O_RDONLY, dir_fd=dir_fd) as fd:
name = os.read(fd, 1024).decode().strip()
except FileNotFoundError:
return
if name == 'atk0110':
# only sample 'CPU Temperature' in atk0110
predicate = (lambda label: label == 'CPU Temperature')
elif name == 'cpu_thermal':
# labels are not used on ARM
predicate = None
elif name == 'coretemp':
# accept all labels on Intel
predicate = None
elif name in ['k8temp', 'k10temp']:
predicate = None
else:
# Not a CPU sensor
return
# Now scan the directory for inputs
for input_filename in os.listdir(dir_fd):
if not input_filename.endswith('_input'):
continue
if predicate:
# We need to check the label
try:
label_filename = input_filename.replace('_input', '_label')
with Handle.open(label_filename, os.O_RDONLY, dir_fd=dir_fd) as fd:
label = os.read(fd, 1024).decode().strip()
except FileNotFoundError:
continue
if not predicate(label):
continue
yield input_filename
@staticmethod
def scan_sensors() -> Iterable[str]:
try:
top_fd = Handle.open(HWMON_PATH, os.O_RDONLY | os.O_DIRECTORY)
except FileNotFoundError:
return
with top_fd:
for hwmon_name in os.listdir(top_fd):
with Handle.open(hwmon_name, os.O_RDONLY | os.O_DIRECTORY, dir_fd=top_fd) as subdir_fd:
for sensor in CPUTemperatureSampler.detect_cpu_sensors(subdir_fd):
yield f'{HWMON_PATH}/{hwmon_name}/{sensor}'
def sample(self, samples: Samples) -> None:
if self.sensors is None:
self.sensors = list(CPUTemperatureSampler.scan_sensors())
for sensor_path in self.sensors:
with open(sensor_path) as sensor:
temperature = int(sensor.read().strip())
if temperature == 0:
return
samples['cpu.temperature'][sensor_path] = temperature / 1000
class DiskSampler(Sampler):
descriptions = [
SampleDescription('disk.all.read', 'bytes', 'counter', instanced=False),
SampleDescription('disk.all.written', 'bytes', 'counter', instanced=False),
SampleDescription('disk.dev.read', 'bytes', 'counter', instanced=True),
SampleDescription('disk.dev.written', 'bytes', 'counter', instanced=True),
]
def sample(self, samples: Samples) -> None:
with open('/proc/diskstats') as diskstats:
all_read_bytes = 0
all_written_bytes = 0
for line in diskstats:
# https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
fields = line.strip().split()
dev_major = fields[0]
dev_name = fields[2]
num_sectors_read = fields[5]
num_sectors_written = fields[9]
# ignore mdraid
if dev_major == '9':
continue
# ignore device-mapper
if dev_name.startswith('dm-'):
continue
# Skip partitions
if dev_name[:2] in ['sd', 'hd', 'vd'] and dev_name[-1].isdigit():
continue
# Ignore nvme partitions
if dev_name.startswith('nvme') and 'p' in dev_name:
continue
read_bytes = int(num_sectors_read) * 512
written_bytes = int(num_sectors_written) * 512
all_read_bytes += read_bytes
all_written_bytes += written_bytes
samples['disk.dev.read'][dev_name] = read_bytes
samples['disk.dev.written'][dev_name] = written_bytes
samples['disk.all.read'] = all_read_bytes
samples['disk.all.written'] = all_written_bytes
class CGroupSampler(Sampler):
descriptions = [
SampleDescription('cgroup.memory.usage', 'bytes', 'instant', instanced=True),
SampleDescription('cgroup.memory.limit', 'bytes', 'instant', instanced=True),
SampleDescription('cgroup.memory.sw-usage', 'bytes', 'instant', instanced=True),
SampleDescription('cgroup.memory.sw-limit', 'bytes', 'instant', instanced=True),
SampleDescription('cgroup.cpu.usage', 'millisec', 'counter', instanced=True),
SampleDescription('cgroup.cpu.shares', 'count', 'instant', instanced=True),
]
cgroups_v2: Optional[bool] = None
def sample(self, samples: Samples) -> None:
if self.cgroups_v2 is None:
self.cgroups_v2 = os.path.exists('/sys/fs/cgroup/cgroup.controllers')
if self.cgroups_v2:
cgroups_v2_path = '/sys/fs/cgroup/'
for path, _, _, rootfd in os.fwalk(cgroups_v2_path):
cgroup = path.replace(cgroups_v2_path, '')
if not cgroup:
continue
samples['cgroup.memory.usage'][cgroup] = read_int_file(rootfd, 'memory.current', 0)
samples['cgroup.memory.limit'][cgroup] = read_int_file(rootfd, 'memory.max')
samples['cgroup.memory.sw-usage'][cgroup] = read_int_file(rootfd, 'memory.swap.current', 0)
samples['cgroup.memory.sw-limit'][cgroup] = read_int_file(rootfd, 'memory.swap.max')
samples['cgroup.cpu.shares'][cgroup] = read_int_file(rootfd, 'cpu.weight')
usage_usec = read_int_file(rootfd, 'cpu.stat', 0, key=b'usage_usec')
if usage_usec:
samples['cgroup.cpu.usage'][cgroup] = usage_usec / 1000
else:
memory_path = '/sys/fs/cgroup/memory/'
for path, _, _, rootfd in os.fwalk(memory_path):
cgroup = path.replace(memory_path, '')
if not cgroup:
continue
samples['cgroup.memory.usage'][cgroup] = read_int_file(rootfd, 'memory.usage_in_bytes', 0)
samples['cgroup.memory.limit'][cgroup] = read_int_file(rootfd, 'memory.limit_in_bytes')
samples['cgroup.memory.sw-usage'][cgroup] = read_int_file(rootfd, 'memory.memsw.usage_in_bytes', 0)
samples['cgroup.memory.sw-limit'][cgroup] = read_int_file(rootfd, 'memory.memsw.limit_in_bytes')
cpu_path = '/sys/fs/cgroup/cpu/'
for path, _, _, rootfd in os.fwalk(cpu_path):
cgroup = path.replace(cpu_path, '')
if not cgroup:
continue
samples['cgroup.cpu.shares'][cgroup] = read_int_file(rootfd, 'cpu.shares')
usage_nsec = read_int_file(rootfd, 'cpuacct.usage')
if usage_nsec:
samples['cgroup.cpu.usage'][cgroup] = usage_nsec / 1000000
class CGroupDiskIO(Sampler):
IO_RE = re.compile(rb'\bread_bytes: (?P<read>\d+).*\nwrite_bytes: (?P<write>\d+)', flags=re.S)
descriptions = [
SampleDescription('disk.cgroup.read', 'bytes', 'counter', instanced=True),
SampleDescription('disk.cgroup.written', 'bytes', 'counter', instanced=True),
]
@staticmethod
def get_cgroup_name(fd: int) -> str:
with Handle.open('cgroup', os.O_RDONLY, dir_fd=fd) as cgroup_fd:
cgroup_name = os.read(cgroup_fd, 2048).decode().strip()
# Skip leading ::0/
return cgroup_name[4:]
@staticmethod
def get_proc_io(fd: int) -> Tuple[int, int]:
with Handle.open('io', os.O_RDONLY, dir_fd=fd) as io_fd:
data = os.read(io_fd, 4096)
match = re.search(CGroupDiskIO.IO_RE, data)
if match:
proc_read = int(match.group('read'))
proc_write = int(match.group('write'))
return proc_read, proc_write
return 0, 0
def sample(self, samples: Samples):
with Handle.open('/proc', os.O_RDONLY | os.O_DIRECTORY) as proc_fd:
reads = samples['disk.cgroup.read']
writes = samples['disk.cgroup.written']
for path in os.listdir(proc_fd):
# non-pid entries in proc are guaranteed to start with a character a-z
if path[0] < '0' or path[0] > '9':
continue
try:
with Handle.open(path, os.O_PATH, dir_fd=proc_fd) as pid_fd:
cgroup_name = self.get_cgroup_name(pid_fd)
proc_read, proc_write = self.get_proc_io(pid_fd)
except (FileNotFoundError, PermissionError, ProcessLookupError):
continue
reads[cgroup_name] = reads.get(cgroup_name, 0) + proc_read
writes[cgroup_name] = writes.get(cgroup_name, 0) + proc_write
class NetworkSampler(Sampler):
descriptions = [
SampleDescription('network.interface.tx', 'bytes', 'counter', instanced=True),
SampleDescription('network.interface.rx', 'bytes', 'counter', instanced=True),
]
def sample(self, samples: Samples) -> None:
with open("/proc/net/dev") as network_samples:
for line in network_samples:
fields = line.split()
# Skip header line
if fields[0][-1] != ':':
continue
iface = fields[0][:-1]
samples['network.interface.rx'][iface] = int(fields[1])
samples['network.interface.tx'][iface] = int(fields[9])
class MountSampler(Sampler):
descriptions = [
SampleDescription('mount.total', 'bytes', 'instant', instanced=True),
SampleDescription('mount.used', 'bytes', 'instant', instanced=True),
]
def sample(self, samples: Samples) -> None:
with open('/proc/mounts') as mounts:
for line in mounts:
# Only look at real devices
if line[0] != '/':
continue
path = line.split()[1]
try:
res = os.statvfs(path)
except OSError:
continue
frsize = res.f_frsize
total = frsize * res.f_blocks
samples['mount.total'][path] = total
samples['mount.used'][path] = total - frsize * res.f_bfree
class BlockSampler(Sampler):
descriptions = [
SampleDescription('block.device.read', 'bytes', 'counter', instanced=True),
SampleDescription('block.device.written', 'bytes', 'counter', instanced=True),
]
def sample(self, samples: Samples) -> None:
with open('/proc/diskstats') as diskstats:
for line in diskstats:
# https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
[_, _, dev_name, _, _, sectors_read, _, _, _, sectors_written, *_] = line.strip().split()
samples['block.device.read'][dev_name] = int(sectors_read) * 512
samples['block.device.written'][dev_name] = int(sectors_written) * 512
SAMPLERS = [
BlockSampler,
CGroupSampler,
CGroupDiskIO,
CPUSampler,
CPUTemperatureSampler,
DiskSampler,
MemorySampler,
MountSampler,
NetworkSampler,
]

View File

@ -1,245 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import array
import asyncio
import contextlib
import getpass
import logging
import os
import socket
from tempfile import TemporaryDirectory
from typing import List, Optional, Sequence, Tuple
from cockpit._vendor import ferny
from cockpit._vendor.bei.bootloader import make_bootloader
from cockpit._vendor.systemd_ctypes import Variant, bus
from .beipack import BridgeBeibootHelper
from .jsonutil import JsonObject, get_str
from .packages import BridgeConfig
from .peer import ConfiguredPeer, Peer, PeerError
from .polkit import PolkitAgent
from .router import Router, RoutingError, RoutingRule
logger = logging.getLogger(__name__)
class SuperuserPeer(ConfiguredPeer):
responder: ferny.AskpassHandler
def __init__(self, router: Router, config: BridgeConfig, responder: ferny.AskpassHandler):
super().__init__(router, config)
self.responder = responder
async def do_connect_transport(self) -> None:
async with contextlib.AsyncExitStack() as context:
if 'pkexec' in self.args:
logger.debug('connecting polkit superuser peer transport %r', self.args)
await context.enter_async_context(PolkitAgent(self.responder))
else:
logger.debug('connecting non-polkit superuser peer transport %r', self.args)
responders: 'list[ferny.InteractionHandler]' = [self.responder]
if '# cockpit-bridge' in self.args:
logger.debug('going to beiboot superuser bridge %r', self.args)
helper = BridgeBeibootHelper(self, ['--privileged'])
responders.append(helper)
stage1 = make_bootloader(helper.steps, gadgets=ferny.BEIBOOT_GADGETS).encode()
else:
stage1 = None
agent = ferny.InteractionAgent(responders)
if 'SUDO_ASKPASS=ferny-askpass' in self.env:
tmpdir = context.enter_context(TemporaryDirectory())
ferny_askpass = ferny.write_askpass_to_tmpdir(tmpdir)
env: Sequence[str] = [f'SUDO_ASKPASS={ferny_askpass}']
else:
env = self.env
transport = await self.spawn(self.args, env, stderr=agent, start_new_session=True)
if stage1 is not None:
transport.write(stage1)
try:
await agent.communicate()
except ferny.InteractionError as exc:
raise PeerError('authentication-failed', message=str(exc)) from exc
class CockpitResponder(ferny.AskpassHandler):
commands = ('ferny.askpass', 'cockpit.send-stderr')
async def do_custom_command(self, command: str, args: Tuple, fds: List[int], stderr: str) -> None:
if command == 'cockpit.send-stderr':
with socket.socket(fileno=fds[0]) as sock:
fds.pop(0)
# socket.send_fds(sock, [b'\0'], [2]) # New in Python 3.9
sock.sendmsg([b'\0'], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", [2]))])
class AuthorizeResponder(CockpitResponder):
def __init__(self, router: Router):
self.router = router
async def do_askpass(self, messages: str, prompt: str, hint: str) -> str:
hexuser = ''.join(f'{c:02x}' for c in getpass.getuser().encode('ascii'))
return await self.router.request_authorization(f'plain1:{hexuser}')
class SuperuserRoutingRule(RoutingRule, CockpitResponder, bus.Object, interface='cockpit.Superuser'):
superuser_configs: Sequence[BridgeConfig] = ()
pending_prompt: Optional[asyncio.Future]
peer: Optional[SuperuserPeer]
# D-Bus signals
prompt = bus.Interface.Signal('s', 's', 's', 'b', 's') # message, prompt, default, echo, error
# D-Bus properties
bridges = bus.Interface.Property('as', value=[])
current = bus.Interface.Property('s', value='none')
methods = bus.Interface.Property('a{sv}', value={})
# RoutingRule
def apply_rule(self, options: JsonObject) -> Optional[Peer]:
superuser = options.get('superuser')
if not superuser or self.current == 'root':
# superuser not requested, or already superuser? Next rule.
return None
elif self.peer or superuser == 'try':
# superuser requested and active? Return it.
# 'try' requested? Either return the peer, or None.
return self.peer
else:
# superuser requested, but not active? That's an error.
raise RoutingError('access-denied')
# ferny.AskpassHandler
async def do_askpass(self, messages: str, prompt: str, hint: str) -> Optional[str]:
assert self.pending_prompt is None
echo = hint == "confirm"
self.pending_prompt = asyncio.get_running_loop().create_future()
try:
logger.debug('prompting for %s', prompt)
# with sudo, all stderr messages are treated as warning/errors by the UI
# (such as the lecture or "wrong password"), so pass them in the "error" field
self.prompt('', prompt, '', echo, messages)
return await self.pending_prompt
finally:
self.pending_prompt = None
def __init__(self, router: Router, *, privileged: bool = False):
super().__init__(router)
self.pending_prompt = None
self.peer = None
self.startup = None
if privileged or os.getuid() == 0:
self.current = 'root'
def peer_done(self):
self.current = 'none'
self.peer = None
async def go(self, name: str, responder: ferny.AskpassHandler) -> None:
if self.current != 'none':
raise bus.BusError('cockpit.Superuser.Error', 'Superuser bridge already running')
assert self.peer is None
assert self.startup is None
for config in self.superuser_configs:
if name in (config.name, 'any'):
break
else:
raise bus.BusError('cockpit.Superuser.Error', f'Unknown superuser bridge type "{name}"')
self.current = 'init'
self.peer = SuperuserPeer(self.router, config, responder)
self.peer.add_done_callback(self.peer_done)
try:
await self.peer.start(init_host=self.router.init_host)
except asyncio.CancelledError:
raise bus.BusError('cockpit.Superuser.Error.Cancelled', 'Operation aborted') from None
except (OSError, PeerError) as exc:
raise bus.BusError('cockpit.Superuser.Error', str(exc)) from exc
self.current = self.peer.config.name
def set_configs(self, configs: Sequence[BridgeConfig]):
logger.debug("set_configs() with %d items", len(configs))
configs = [config for config in configs if config.privileged]
self.superuser_configs = tuple(configs)
self.bridges = [config.name for config in self.superuser_configs]
self.methods = {c.label: Variant({'label': Variant(c.label)}, 'a{sv}') for c in configs if c.label}
logger.debug(" bridges are now %s", self.bridges)
# If the currently active bridge config is not in the new set of configs, stop it
if self.peer is not None:
if self.peer.config not in self.superuser_configs:
logger.debug(" stopping superuser bridge '%s': it disappeared from configs", self.peer.config.name)
self.stop()
def cancel_prompt(self):
if self.pending_prompt is not None:
self.pending_prompt.cancel()
self.pending_prompt = None
def shutdown(self):
self.cancel_prompt()
if self.peer is not None:
self.peer.close()
# close() should have disconnected the peer immediately
assert self.peer is None
# Connect-on-startup functionality
def init(self, params: JsonObject) -> None:
name = get_str(params, 'id', 'any')
responder = AuthorizeResponder(self.router)
self._init_task = asyncio.create_task(self.go(name, responder))
self._init_task.add_done_callback(self._init_done)
def _init_done(self, task: 'asyncio.Task[None]') -> None:
logger.debug('superuser init done! %s', task.exception())
self.router.write_control(command='superuser-init-done')
del self._init_task
# D-Bus methods
@bus.Interface.Method(in_types=['s'])
async def start(self, name: str) -> None:
await self.go(name, self)
@bus.Interface.Method()
def stop(self) -> None:
self.shutdown()
@bus.Interface.Method(in_types=['s'])
def answer(self, reply: str) -> None:
if self.pending_prompt is not None:
logger.debug('responding to pending prompt')
self.pending_prompt.set_result(reply)
else:
logger.debug('got Answer, but no prompt pending')

View File

@ -1,552 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""Bi-directional asyncio.Transport implementations based on file descriptors."""
import asyncio
import collections
import ctypes
import errno
import fcntl
import logging
import os
import select
import signal
import struct
import subprocess
import termios
from typing import Any, ClassVar, Sequence
from .jsonutil import JsonObject, get_int
libc6 = ctypes.cdll.LoadLibrary('libc.so.6')
def prctl(*args: int) -> None:
if libc6.prctl(*args) != 0:
raise OSError('prctl() failed')
SET_PDEATHSIG = 1
logger = logging.getLogger(__name__)
IOV_MAX = 1024 # man 2 writev
class _Transport(asyncio.Transport):
BLOCK_SIZE: ClassVar[int] = 1024 * 1024
# A transport always has a loop and a protocol
_loop: asyncio.AbstractEventLoop
_protocol: asyncio.Protocol
_queue: 'collections.deque[bytes] | None'
_in_fd: int
_out_fd: int
_closing: bool
_is_reading: bool
_eof: bool
_eio_is_eof: bool = False
def __init__(self,
loop: asyncio.AbstractEventLoop,
protocol: asyncio.Protocol,
in_fd: int = -1, out_fd: int = -1,
extra: 'dict[str, object] | None' = None):
super().__init__(extra)
self._loop = loop
self._protocol = protocol
logger.debug('Created transport %s for protocol %s, fds %d %d', self, protocol, in_fd, out_fd)
self._queue = None
self._is_reading = False
self._eof = False
self._closing = False
self._in_fd = in_fd
self._out_fd = out_fd
os.set_blocking(in_fd, False)
if out_fd != in_fd:
os.set_blocking(out_fd, False)
self._protocol.connection_made(self)
self.resume_reading()
def _read_ready(self) -> None:
logger.debug('Read ready on %s %s %d', self, self._protocol, self._in_fd)
try:
data = os.read(self._in_fd, _Transport.BLOCK_SIZE)
except BlockingIOError: # pragma: no cover
return
except OSError as exc:
if self._eio_is_eof and exc.errno == errno.EIO:
# PTY devices return EIO to mean "EOF"
data = b''
else:
# Other errors: terminate the connection
self.abort(exc)
return
if data != b'':
logger.debug(' read %d bytes', len(data))
self._protocol.data_received(data)
else:
logger.debug(' got EOF')
self._close_reader()
keep_open = self._protocol.eof_received()
if not keep_open:
self.close()
def is_reading(self) -> bool:
return self._is_reading
def _close_reader(self) -> None:
self.pause_reading()
self._in_fd = -1
def pause_reading(self) -> None:
if self._is_reading:
self._loop.remove_reader(self._in_fd)
self._is_reading = False
def resume_reading(self) -> None:
# It's possible that the Protocol could decide to attempt to unpause
# reading after _close_reader() got called. Check that the fd is != -1
# before actually resuming.
if not self._is_reading and self._in_fd != -1:
self._loop.add_reader(self._in_fd, self._read_ready)
self._is_reading = True
def _close(self) -> None:
pass
def abort(self, exc: 'Exception | None' = None) -> None:
self._closing = True
self._close_reader()
self._remove_write_queue()
self._protocol.connection_lost(exc)
self._close()
def can_write_eof(self) -> bool:
raise NotImplementedError
def write_eof(self) -> None:
assert not self._eof
self._eof = True
if self._queue is None:
logger.debug('%s got EOF. closing backend.', self)
self._write_eof_now()
else:
logger.debug('%s got EOF. bytes in queue, deferring close', self)
def get_write_buffer_size(self) -> int:
if self._queue is None:
return 0
return sum(len(block) for block in self._queue)
def get_write_buffer_limits(self) -> 'tuple[int, int]':
return (0, 0)
def set_write_buffer_limits(self, high: 'int | None' = None, low: 'int | None' = None) -> None:
assert high is None or high == 0
assert low is None or low == 0
def _write_eof_now(self) -> None:
raise NotImplementedError
def _write_ready(self) -> None:
logger.debug('%s _write_ready', self)
assert self._queue is not None
try:
n_bytes = os.writev(self._out_fd, self._queue)
except BlockingIOError: # pragma: no cover
n_bytes = 0
except OSError as exc:
self.abort(exc)
return
logger.debug(' successfully wrote %d bytes from the queue', n_bytes)
while n_bytes:
block = self._queue.popleft()
if len(block) > n_bytes:
# This block wasn't completely written.
logger.debug(' incomplete block. Stop.')
self._queue.appendleft(block[n_bytes:])
break
n_bytes -= len(block)
logger.debug(' removed complete block. %d remains.', n_bytes)
if not self._queue:
logger.debug('%s queue drained.')
self._remove_write_queue()
if self._eof:
logger.debug('%s queue drained. closing backend now.')
self._write_eof_now()
if self._closing:
self.abort()
def _remove_write_queue(self) -> None:
if self._queue is not None:
self._protocol.resume_writing()
self._loop.remove_writer(self._out_fd)
self._queue = None
def _create_write_queue(self, data: bytes) -> None:
logger.debug('%s creating write queue for fd %s', self, self._out_fd)
assert self._queue is None
self._loop.add_writer(self._out_fd, self._write_ready)
self._queue = collections.deque((data,))
self._protocol.pause_writing()
def write(self, data: bytes) -> None:
# this is a race condition with subprocesses: if we get and process the the "exited"
# event before seeing BrokenPipeError, we'll try to write to a closed pipe.
# Do what the standard library does and ignore, instead of assert
if self._closing:
logger.debug('ignoring write() to closing transport fd %i', self._out_fd)
return
assert not self._eof
if self._queue is not None:
self._queue.append(data)
# writev() will complain if the queue is too long. Consolidate it.
if len(self._queue) > IOV_MAX:
all_data = b''.join(self._queue)
self._queue.clear()
self._queue.append(all_data)
return
try:
n_bytes = os.write(self._out_fd, data)
except BlockingIOError:
n_bytes = 0
except OSError as exc:
self.abort(exc)
return
if n_bytes != len(data):
self._create_write_queue(data[n_bytes:])
def close(self) -> None:
if self._closing:
return
self._closing = True
self._close_reader()
if self._queue is not None:
# abort() will be called from _write_ready() when it's done
return
self.abort()
def get_protocol(self) -> asyncio.BaseProtocol:
return self._protocol
def is_closing(self) -> bool:
return self._closing
def set_protocol(self, protocol: asyncio.BaseProtocol) -> None:
raise NotImplementedError
def __del__(self) -> None:
self._close()
class SubprocessProtocol(asyncio.Protocol):
"""An extension to asyncio.Protocol for use with SubprocessTransport."""
def process_exited(self) -> None:
"""Called when subprocess has exited."""
raise NotImplementedError
class WindowSize:
def __init__(self, value: JsonObject):
self.rows = get_int(value, 'rows')
self.cols = get_int(value, 'cols')
class SubprocessTransport(_Transport, asyncio.SubprocessTransport):
"""A bi-directional transport speaking with stdin/out of a subprocess.
Note: this is not really a normal SubprocessTransport. Although it
implements the entire API of asyncio.SubprocessTransport, it is not
designed to be used with asyncio.SubprocessProtocol objects. Instead, it
pair with normal Protocol objects which also implement the
SubprocessProtocol defined in this module (which only has a
process_exited() method). Whatever the protocol writes is sent to stdin,
and whatever comes from stdout is given to the Protocol via the
.data_received() function.
If stderr is configured as a pipe, the transport will separately collect
data from it, making it available via the .get_stderr() method.
"""
_returncode: 'int | None' = None
_pty_fd: 'int | None' = None
_process: 'subprocess.Popen[bytes] | None' = None
_stderr: 'Spooler | None'
@staticmethod
def _create_watcher() -> asyncio.AbstractChildWatcher:
try:
os.close(os.pidfd_open(os.getpid(), 0)) # check for kernel support
return asyncio.PidfdChildWatcher()
except (AttributeError, OSError):
pass
return asyncio.SafeChildWatcher()
@staticmethod
def _get_watcher(loop: asyncio.AbstractEventLoop) -> asyncio.AbstractChildWatcher:
quark = '_cockpit_transports_child_watcher'
watcher = getattr(loop, quark, None)
if watcher is None:
watcher = SubprocessTransport._create_watcher()
watcher.attach_loop(loop)
setattr(loop, quark, watcher)
return watcher
def get_stderr(self, *, reset: bool = False) -> str:
if self._stderr is not None:
return self._stderr.get(reset=reset).decode(errors='replace')
else:
return ''
def _exited(self, pid: int, code: int) -> None:
# NB: per AbstractChildWatcher API, this handler should be thread-safe,
# but we only ever use non-threaded child watcher implementations, so
# we can assume we'll always be called in the main thread.
# NB: the subprocess is going to want to waitpid() itself as well, but
# will get ECHILD since we already reaped it. Fortunately, since
# Python 3.2 this is supported, and process gets a return status of
# zero. For that reason, we need to store our own copy of the return
# status. See https://github.com/python/cpython/issues/59960
assert isinstance(self._protocol, SubprocessProtocol)
assert self._process is not None
assert self._process.pid == pid
self._returncode = code
logger.debug('Process exited with status %d', self._returncode)
if not self._closing:
self._protocol.process_exited()
def __init__(self,
loop: asyncio.AbstractEventLoop,
protocol: SubprocessProtocol,
args: Sequence[str],
*,
pty: bool = False,
window: 'WindowSize | None' = None,
**kwargs: Any):
# go down as a team -- we don't want any leaked processes when the bridge terminates
def preexec_fn() -> None:
prctl(SET_PDEATHSIG, signal.SIGTERM)
if pty:
fcntl.ioctl(0, termios.TIOCSCTTY, 0)
if pty:
self._pty_fd, session_fd = os.openpty()
if window is not None:
self.set_window_size(window)
kwargs['stderr'] = session_fd
self._process = subprocess.Popen(args,
stdin=session_fd, stdout=session_fd,
preexec_fn=preexec_fn, start_new_session=True, **kwargs)
os.close(session_fd)
in_fd, out_fd = self._pty_fd, self._pty_fd
self._eio_is_eof = True
else:
self._process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
preexec_fn=preexec_fn, **kwargs)
assert self._process.stdin
assert self._process.stdout
in_fd = self._process.stdout.fileno()
out_fd = self._process.stdin.fileno()
if self._process.stderr is not None:
self._stderr = Spooler(loop, self._process.stderr.fileno())
else:
self._stderr = None
super().__init__(loop, protocol, in_fd, out_fd)
self._get_watcher(loop).add_child_handler(self._process.pid, self._exited)
def set_window_size(self, size: WindowSize) -> None:
assert self._pty_fd is not None
fcntl.ioctl(self._pty_fd, termios.TIOCSWINSZ, struct.pack('2H4x', size.rows, size.cols))
def can_write_eof(self) -> bool:
assert self._process is not None
return self._process.stdin is not None
def _write_eof_now(self) -> None:
assert self._process is not None
assert self._process.stdin is not None
self._process.stdin.close()
self._out_fd = -1
def get_pid(self) -> int:
assert self._process is not None
return self._process.pid
def get_returncode(self) -> 'int | None':
return self._returncode
def get_pipe_transport(self, fd: int) -> asyncio.Transport:
raise NotImplementedError
def send_signal(self, sig: signal.Signals) -> None: # type: ignore[override] # mypy/issues/13885
assert self._process is not None
# We try to avoid using subprocess.send_signal(). It contains a call
# to waitpid() internally to avoid signalling the wrong process (if a
# PID gets reused), but:
#
# - we already detect the process exiting via our PidfdChildWatcher
#
# - the check is actually harmful since collecting the process via
# waitpid() prevents the PidfdChildWatcher from doing the same,
# resulting in an error.
#
# It's on us now to check it, but that's easy:
if self._returncode is not None:
logger.debug("won't attempt %s to process %i. It exited already.", sig, self._process.pid)
return
try:
os.kill(self._process.pid, sig)
logger.debug('sent %s to process %i', sig, self._process.pid)
except ProcessLookupError:
# already gone? fine
logger.debug("can't send %s to process %i. It's exited just now.", sig, self._process.pid)
def terminate(self) -> None:
self.send_signal(signal.SIGTERM)
def kill(self) -> None:
self.send_signal(signal.SIGKILL)
def _close(self) -> None:
if self._pty_fd is not None:
os.close(self._pty_fd)
self._pty_fd = None
if self._process is not None:
if self._process.stdin is not None:
self._process.stdin.close()
self._process.stdin = None
try:
self.terminate() # best effort...
except PermissionError:
logger.debug("can't kill %i due to EPERM", self._process.pid)
class StdioTransport(_Transport):
"""A bi-directional transport that corresponds to stdin/out.
Can talk to just about anything:
- files
- pipes
- character devices (including terminals)
- sockets
"""
def __init__(self, loop: asyncio.AbstractEventLoop, protocol: asyncio.Protocol, stdin: int = 0, stdout: int = 1):
super().__init__(loop, protocol, stdin, stdout)
def can_write_eof(self) -> bool:
return False
def _write_eof_now(self) -> None:
raise RuntimeError("Can't write EOF to stdout")
class Spooler:
"""Consumes data from an fd, storing it in a buffer.
This makes a copy of the fd, so you don't have to worry about holding it
open.
"""
_loop: asyncio.AbstractEventLoop
_fd: int
_contents: 'list[bytes]'
def __init__(self, loop: asyncio.AbstractEventLoop, fd: int):
self._loop = loop
self._fd = -1 # in case dup() raises an exception
self._contents = []
self._fd = os.dup(fd)
os.set_blocking(self._fd, False)
loop.add_reader(self._fd, self._read_ready)
def _read_ready(self) -> None:
try:
data = os.read(self._fd, 8192)
except BlockingIOError: # pragma: no cover
return
except OSError:
# all other errors -> EOF
data = b''
if data != b'':
self._contents.append(data)
else:
self.close()
def _is_ready(self) -> bool:
if self._fd == -1:
return False
return select.select([self._fd], [], [], 0) != ([], [], [])
def get(self, *, reset: bool = False) -> bytes:
while self._is_ready():
self._read_ready()
result = b''.join(self._contents)
if reset:
self._contents = []
return result
def close(self) -> None:
if self._fd != -1:
self._loop.remove_reader(self._fd)
os.close(self._fd)
self._fd = -1
def __del__(self) -> None:
self.close()

View File

@ -36,10 +36,8 @@ cockpit_ssh_SOURCES = src/ssh/ssh.c
# -----------------------------------------------------------------------------
# C bridge config; Python bridge handles it internally
if WITH_OLD_BRIDGE
sshmanifestdir = $(datadir)/cockpit/ssh
dist_sshmanifest_DATA = src/ssh/manifest.json
endif
# -----------------------------------------------------------------------------
# mock-ssh

View File

@ -132,8 +132,6 @@ test_kerberos_CPPFLAGS = $(libcockpit_ws_a_CPPFLAGS) $(TEST_CPP)
test_kerberos_LDADD = $(libcockpit_ws_a_LIBS) $(TEST_LIBS) $(krb5_LIBS)
test_kerberos_SOURCES = src/ws/test-kerberos.c
if WITH_OLD_BRIDGE
# These are -ws tests but they involve invoking ./cockpit-bridge.
TEST_PROGRAM += test-channelresponse
@ -158,7 +156,6 @@ test_authssh_CPPFLAGS = $(libcockpit_ws_a_CPPFLAGS) $(TEST_CPP)
test_authssh_LDADD = $(libcockpit_ws_a_LIBS) $(TEST_LIBS)
test_authssh_SOURCES = src/ws/test-authssh.c
endif
endif
noinst_PROGRAMS += mock-pam-conv-mod.so

View File

@ -1,63 +0,0 @@
import asyncio
import os
import subprocess
import sys
from typing import Iterable
import pytest
from cockpit import polyfills
from cockpit._vendor.systemd_ctypes import EventLoopPolicy
polyfills.install()
def any_subprocesses() -> bool:
# Make sure we don't leak subprocesses
try:
os.waitid(os.P_ALL, -1, os.WEXITED | os.WNOHANG | os.WNOWAIT)
except ChildProcessError:
return False # good !
else:
return True # at least one process (or zombie) still waitable
@pytest.fixture(autouse=True)
def event_loop(monkeypatch) -> Iterable[asyncio.AbstractEventLoop]:
loop = EventLoopPolicy().new_event_loop()
if sys.version_info < (3, 7, 0):
# Polyfills for Python 3.6:
def all_tasks(loop=loop):
return {t for t in asyncio.Task.all_tasks(loop=loop) if not t.done()}
monkeypatch.setattr(asyncio, 'get_running_loop', lambda: loop, raising=False)
monkeypatch.setattr(asyncio, 'create_task', loop.create_task, raising=False)
monkeypatch.setattr(asyncio, 'all_tasks', all_tasks, raising=False)
yield loop
# Let all tasks and subprocesses run to completion
for _ in range(200):
if not (asyncio.all_tasks(loop) or any_subprocesses()):
break
loop.run_until_complete(asyncio.sleep(0.005))
# No tasks left
assert asyncio.all_tasks(loop=loop) == set()
# No subprocesses left
if any_subprocesses():
# Bad news. Show some helpful output.
subprocess.run(['ps', 'f', f'--pid={os.getpid()}', f'--ppid={os.getpid()}'])
# clear it out for the sake of the other tests
subprocess.run(['pkill', '-9', '-P', f'{os.getpid()}'])
try:
for _ in range(100): # zombie vacuum
os.wait()
except ChildProcessError:
pass
pytest.fail('Some subprocesses still running!')
loop.close()

View File

@ -1,46 +0,0 @@
import asyncio
import os
import sys
from cockpit._vendor import systemd_ctypes
from cockpit.router import Router
from cockpit.transports import StdioTransport
class MockPeer(Router):
def do_send_init(self):
init_type = os.environ.get('INIT_TYPE', None)
if init_type == 'wrong-command':
self.write_control(command='xnit', version=1)
elif init_type == 'wrong-version':
self.write_control(command='init', version=2)
elif init_type == 'channel-control':
self.write_control(command='init', channel='x')
elif init_type == 'data':
self.write_channel_data('x', b'123')
elif init_type == 'break-protocol':
print('i like printf debugging', flush=True)
elif init_type == 'exit':
sys.exit()
elif init_type == 'exit-not-found':
# shell error code for "command not found"
sys.exit(127)
elif init_type != 'silence':
self.write_control(command='init', version=1)
def channel_control_received(self, channel, command, message):
if command == 'open':
self.write_control(command='ready', channel=channel)
def channel_data_received(self, channel, data):
pass
async def run():
protocol = MockPeer([])
StdioTransport(asyncio.get_running_loop(), protocol)
await protocol.communicate()
if __name__ == '__main__':
systemd_ctypes.run_async(run())

View File

@ -1,245 +0,0 @@
import asyncio
import json
from typing import Any, Dict, Iterable, Optional, Tuple
from cockpit.jsonutil import JsonObject, JsonValue
from cockpit.router import Router
MOCK_HOSTNAME = 'mockbox'
class MockTransport(asyncio.Transport):
queue: 'asyncio.Queue[Tuple[str, bytes]]'
next_id: int = 0
close_future: Optional[asyncio.Future] = None
async def assert_empty(self):
await asyncio.sleep(0.1)
assert self.queue.qsize() == 0
def send_json(self, _channel: str, **kwargs) -> None:
# max_read_size is one of our special keys which uses underscores
msg = {k.replace('_', '-') if k != "max_read_size" else k: v for k, v in kwargs.items()}
self.send_data(_channel, json.dumps(msg).encode('ascii'))
def send_data(self, channel: str, data: bytes) -> None:
msg = channel.encode('ascii') + b'\n' + data
msg = str(len(msg)).encode('ascii') + b'\n' + msg
self.protocol.data_received(msg)
def send_init(self, version=1, host=MOCK_HOSTNAME, **kwargs):
self.send_json('', command='init', version=version, host=host, **kwargs)
def init(self, **kwargs: Any) -> Dict[str, object]:
channel, data = self.queue.get_nowait()
assert channel == ''
msg = json.loads(data)
assert msg['command'] == 'init'
self.send_init(**kwargs)
return msg
def get_id(self, prefix: str) -> str:
self.next_id += 1
return f'{prefix}.{self.next_id}'
def send_open(self, payload, channel=None, **kwargs):
if channel is None:
channel = self.get_id('channel')
self.send_json('', command='open', channel=channel, payload=payload, **kwargs)
return channel
async def check_open(
self,
payload,
channel=None,
problem=None,
reply_keys: Optional[JsonObject] = None,
absent_keys: 'Iterable[str]' = (),
**kwargs,
):
assert isinstance(self.protocol, Router)
ch = self.send_open(payload, channel, **kwargs)
if problem is None:
await self.assert_msg('', command='ready', channel=ch, absent_keys=absent_keys, **(reply_keys or {}))
# it's possible that the channel already closed
else:
await self.assert_msg('', command='close', channel=ch, problem=problem, absent_keys=absent_keys,
**(reply_keys or {}))
assert ch not in self.protocol.open_channels
return ch
def send_done(self, channel, **kwargs):
self.send_json('', command='done', channel=channel, **kwargs)
def send_close(self, channel, **kwargs):
self.send_json('', command='close', channel=channel, **kwargs)
async def check_close(self, channel, **kwargs):
self.send_close(channel, **kwargs)
await self.assert_msg('', command='close', channel=channel)
def send_ping(self, **kwargs):
self.send_json('', command='ping', **kwargs)
def __init__(self, protocol: asyncio.Protocol):
self.queue = asyncio.Queue()
self.protocol = protocol
protocol.connection_made(self)
def write(self, data: bytes) -> None:
# We know that the bridge only ever writes full frames at once, so we
# can disassemble them immediately.
_, channel, data = data.split(b'\n', 2)
self.queue.put_nowait((channel.decode('ascii'), data))
def stop(self, event_loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
keep_open = self.protocol.eof_received()
if keep_open:
assert event_loop is not None
self.close_future = event_loop.create_future()
try:
event_loop.run_until_complete(self.close_future)
finally:
self.close_future = None
def close(self) -> None:
if self.close_future is not None:
self.close_future.set_result(None)
if self.protocol is not None:
self.protocol.connection_lost(None)
async def next_frame(self) -> Tuple[str, bytes]:
return await self.queue.get()
async def next_msg(self, expected_channel) -> Dict[str, Any]:
channel, data = await self.next_frame()
assert channel == expected_channel, data
return json.loads(data)
async def assert_data(self, expected_channel: str, expected_data: bytes) -> None:
channel, data = await self.next_frame()
assert channel == expected_channel
assert data == expected_data
async def assert_msg(self, expected_channel: str, absent_keys: 'Iterable[str]' = (),
**kwargs: JsonValue) -> JsonObject:
msg = await self.next_msg(expected_channel)
assert msg == dict(msg, **{k.replace('_', '-'): v for k, v in kwargs.items()}), msg
for absent_key in absent_keys:
assert absent_key not in msg
return msg
# D-Bus helpers
internal_bus: str = ''
async def ensure_internal_bus(self):
if not self.internal_bus:
self.internal_bus = await self.check_open('dbus-json3', bus='internal')
assert self.protocol.open_channels[self.internal_bus].bus == self.protocol.internal_bus.client
return self.internal_bus
def send_bus_call(self, bus: str, path: str, iface: str, name: str, args: list) -> str:
tag = self.get_id('call')
self.send_json(bus, call=[path, iface, name, args], id=tag)
return tag
async def assert_bus_reply(
self,
tag: str,
expected_reply: Optional[list] = None,
bus: Optional[str] = None,
) -> list:
if bus is None:
bus = await self.ensure_internal_bus()
reply = await self.next_msg(bus)
assert 'id' in reply, reply
assert reply['id'] == tag, reply
assert 'reply' in reply, reply
if expected_reply is not None:
assert reply['reply'] == [expected_reply]
return reply['reply'][0]
async def assert_bus_error(self, tag: str, code: str, message: str, bus: Optional[str] = None) -> None:
if bus is None:
bus = await self.ensure_internal_bus()
reply = await self.next_msg(bus)
assert 'id' in reply, reply
assert reply['id'] == tag, reply
assert 'error' in reply, reply
assert reply['error'] == [code, [message]], reply['error']
async def check_bus_call(
self,
path: str,
iface: str,
name: str,
args: list,
expected_reply: Optional[list] = None,
bus: Optional[str] = None,
) -> list:
if bus is None:
bus = await self.ensure_internal_bus()
tag = self.send_bus_call(bus, path, iface, name, args)
return await self.assert_bus_reply(tag, expected_reply, bus=bus)
async def assert_bus_props(
self, path: str, iface: str, expected_values: JsonObject, bus: Optional[str] = None
) -> None:
(values,) = await self.check_bus_call(path, 'org.freedesktop.DBus.Properties', 'GetAll', [iface], bus=bus)
for key, value in expected_values.items():
assert values[key]['v'] == value
async def assert_bus_meta(
self,
path: str,
iface: str,
expected: Iterable[str],
bus: Optional[str] = None,
) -> None:
if bus is None:
bus = await self.ensure_internal_bus()
meta = await self.next_msg(bus)
assert 'meta' in meta, meta
assert set(meta['meta'][iface]['properties']) == set(expected)
async def assert_bus_notify(
self,
path: str,
iface: str,
expected: JsonObject,
bus: Optional[str] = None,
) -> None:
if bus is None:
bus = await self.ensure_internal_bus()
notify = await self.next_msg(bus)
assert 'notify' in notify
assert notify['notify'][path][iface] == expected
async def watch_bus(self, path: str, iface: str, expected: JsonObject, bus: Optional[str] = None) -> None:
if bus is None:
bus = await self.ensure_internal_bus()
tag = self.get_id('watch')
self.send_json(bus, watch={'path': path, 'interface': iface}, id=tag)
await self.assert_bus_meta(path, iface, expected, bus)
await self.assert_bus_notify(path, iface, expected, bus)
await self.assert_msg(bus, id=tag, reply=[])
async def assert_bus_signal(
self,
path: str,
iface: str,
name: str,
args: list,
bus: Optional[str] = None,
) -> None:
if bus is None:
bus = await self.ensure_internal_bus()
signal = await self.next_msg(bus)
assert 'signal' in signal, signal
assert signal['signal'] == [path, iface, name, args]
async def add_bus_match(self, path: str, iface: str, bus: Optional[str] = None) -> None:
if bus is None:
bus = await self.ensure_internal_bus()
self.send_json(bus, add_match={'path': path, 'interface': iface})

View File

@ -1,18 +0,0 @@
import os
import sys
from cockpit._vendor.ferny import interaction_client
pw = os.environ.get('PSEUDO_PASSWORD')
if pw:
reader, writer = os.pipe()
# '-' is the (ignored) argv[0], and 'can haz pw' is the message in argv[1]
interaction_client.askpass(2, writer, ['-', 'can haz pw?'], {})
os.close(writer)
response = os.read(reader, 1024).decode('utf-8').strip()
if response != pw:
sys.stderr.write('pseudo says: Bad password\n')
sys.exit(1)
os.execvp(sys.argv[1], sys.argv[1:])

View File

@ -1,28 +0,0 @@
import sys
import pytest
from cockpit._vendor import ferny
from cockpit._vendor.bei import bootloader
from cockpit.beipack import BridgeBeibootHelper
from cockpit.peer import Peer
from cockpit.router import Router
class BeibootPeer(Peer):
async def do_connect_transport(self) -> None:
helper = BridgeBeibootHelper(self)
agent = ferny.InteractionAgent([helper])
transport = await self.spawn([sys.executable, '-iq'], env=[], stderr=agent)
transport.write(bootloader.make_bootloader(helper.steps, gadgets=ferny.BEIBOOT_GADGETS).encode())
await agent.communicate()
@pytest.mark.asyncio
async def test_bridge_beiboot():
# Try to beiboot a copy of the bridge and read its init message
peer = BeibootPeer(Router([]))
init_msg = await peer.start()
assert init_msg['version'] == 1
assert 'packages' not in init_msg
peer.close()

File diff suppressed because it is too large Load Diff

View File

@ -1,48 +0,0 @@
import glob
import os
import subprocess
import sys
from typing import Iterable
import pytest
SRCDIR = os.path.realpath(f'{__file__}/../../..')
BUILDDIR = os.environ.get('abs_builddir', SRCDIR)
SKIP = {
'base1/test-dbus-address.html',
}
XFAIL = {
'base1/test-websocket.html',
}
# Changed in version 3.10: Added the root_dir and dir_fd parameters.
def glob_py310(fnmatch: str, *, root_dir: str, recursive: bool = False) -> Iterable[str]:
prefix = f'{root_dir}/'
prefixlen = len(prefix)
for result in glob.glob(f'{prefix}{fnmatch}', recursive=recursive):
assert result.startswith(prefix)
yield result[prefixlen:]
@pytest.mark.parametrize('html', glob_py310('**/test-*.html', root_dir=f'{SRCDIR}/qunit', recursive=True))
def test_browser(html):
if not os.path.exists(f'{BUILDDIR}/test-server'):
pytest.skip('no test-server')
if html in SKIP:
pytest.skip()
elif html in XFAIL:
pytest.xfail()
if 'COVERAGE_RCFILE' in os.environ:
coverage = ['coverage', 'run', '--parallel-mode', '--module']
else:
coverage = []
# Merge 2>&1 so that pytest displays an interleaved log
subprocess.run(['test/common/tap-cdp', f'{BUILDDIR}/test-server',
sys.executable, '-m', *coverage, 'cockpit.bridge', '--debug',
f'./qunit/{html}'], check=True, stderr=subprocess.STDOUT)

View File

@ -1,286 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2023 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import pytest
from cockpit.packages import Packages, parse_accept_language
@pytest.mark.parametrize(("test_input", "expected"), [
# correct handles empty values
('', ()),
(' ', ()),
(' , ', ()),
(' , ,xx', ('xx',)),
# english → empty list
('en', ()),
(' , en', ()),
# invalid q values get ignored
('aa;q===,bb;q=abc,cc;q=.,zz', ('zz',)),
# variant-peeling works
('aa-bb-cc-dd,ee-ff-gg-hh', ('aa-bb-cc-dd', 'aa-bb-cc', 'aa-bb', 'aa', 'ee-ff-gg-hh', 'ee-ff-gg', 'ee-ff', 'ee')),
# sorting and english-truncation are working
('fr-ch;q=0.8,es-mx;q=1.0,en-ca;q=0.9', ('es-mx', 'es', 'en-ca')),
('de-at, zh-CN, en,', ('de-at', 'de', 'zh-cn', 'zh')),
('es-es, nl;q=0.8, fr;q=0.9', ('es-es', 'es', 'fr', 'nl')),
('fr-CH, fr;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5', ('fr-ch', 'fr'))
])
def test_parse_accept_language(test_input: str, expected: 'tuple[str]') -> None:
assert parse_accept_language(test_input) == expected
@pytest.fixture
def pkgdir(tmp_path, monkeypatch):
monkeypatch.setenv('XDG_DATA_DIRS', str(tmp_path))
monkeypatch.setenv('XDG_DATA_HOME', '/nonexisting')
self = tmp_path / 'cockpit'
self.mkdir()
make_package(self, 'basic', description="standard package", requires={"cockpit": "42"})
return self
@pytest.fixture
def confdir(tmp_path, monkeypatch):
monkeypatch.setenv('XDG_CONFIG_DIRS', str(tmp_path))
return tmp_path / 'cockpit'
def make_package(pkgdir, dirname: str, **kwargs: object) -> None:
(pkgdir / dirname).mkdir()
with (pkgdir / dirname / 'manifest.json').open('w') as file:
json.dump(kwargs, file, indent=2)
def test_basic(pkgdir):
packages = Packages()
assert len(packages.packages) == 1
assert packages.packages['basic'].name == 'basic'
assert packages.packages['basic'].manifest['description'] == 'standard package'
assert packages.packages['basic'].manifest['requires'] == {'cockpit': "42"}
assert packages.packages['basic'].priority == 1
assert packages.manifests == '{"basic": {"description": "standard package", "requires": {"cockpit": "42"}}}'
def test_override_etc(pkgdir, confdir):
(confdir / 'basic.override.json').write_text('{"description": null, "priority": 5, "does-not-exist": null}')
packages = Packages()
assert len(packages.packages) == 1
# original attributes
assert packages.packages['basic'].name == 'basic'
assert packages.packages['basic'].manifest['requires'] == {'cockpit': '42'}
# overridden attributes
assert 'description' not in packages.packages['basic'].manifest
assert packages.packages['basic'].priority == 5
assert json.loads(packages.manifests) == {
'basic': {
'requires': {'cockpit': '42'},
'priority': 5,
}
}
def test_priority(pkgdir):
make_package(pkgdir, 'vip', name='basic', description='VIP', priority=100)
make_package(pkgdir, 'guest', description='Guest')
packages = Packages()
assert len(packages.packages) == 2
assert packages.packages['basic'].name == 'basic'
assert packages.packages['basic'].priority == 100
assert packages.packages['basic'].manifest['description'] == 'VIP'
assert packages.packages['guest'].name == 'guest'
assert packages.packages['guest'].priority == 1
parsed = json.loads(packages.manifests)
assert parsed['basic'] == {'name': 'basic', 'description': 'VIP', 'priority': 100}
assert parsed['guest'] == {'description': 'Guest'}
def test_conditions(pkgdir):
make_package(pkgdir, 'empty', conditions=[])
# path-exists only
make_package(pkgdir, 'exists-1-yes', conditions=[{'path-exists': '/usr'}])
make_package(pkgdir, 'exists-1-no', conditions=[{'path-exists': '/nonexisting'}])
make_package(pkgdir, 'exists-2-yes', conditions=[{"path-exists": "/usr"},
{"path-exists": "/bin/sh"}])
make_package(pkgdir, 'exists-2-no', conditions=[{"path-exists": "/usr"},
{"path-exists": "/nonexisting"}])
# path-not-exists only
make_package(pkgdir, 'notexists-1-yes', conditions=[{"path-not-exists": "/nonexisting"}])
make_package(pkgdir, 'notexists-1-no', conditions=[{"path-not-exists": "/usr"}])
make_package(pkgdir, 'notexists-2-yes', conditions=[{"path-not-exists": "/nonexisting"},
{"path-not-exists": "/obscure"}])
make_package(pkgdir, 'notexists-2-no', conditions=[{"path-not-exists": "/nonexisting"},
{"path-not-exists": "/usr"}])
# mixed
make_package(pkgdir, 'mixed-yes', conditions=[{"path-exists": "/usr"},
{"path-not-exists": "/nonexisting"}])
make_package(pkgdir, 'mixed-no', conditions=[{"path-exists": "/nonexisting"},
{"path-not-exists": "/obscure"}])
packages = Packages()
assert set(packages.packages.keys()) == {
'basic', 'empty', 'exists-1-yes', 'exists-2-yes', 'notexists-1-yes', 'notexists-2-yes', 'mixed-yes'
}
def test_conditions_errors(pkgdir):
make_package(pkgdir, 'broken-syntax-1', conditions=[1])
make_package(pkgdir, 'broken-syntax-2', conditions=[["path-exists"]])
make_package(pkgdir, 'broken-syntax-3', conditions=[{"path-exists": "/foo", "path-not-exists": "/bar"}])
make_package(pkgdir, 'unknown-predicate-good', conditions=[{"path-exists": "/usr"},
{"frobnicated": True}])
make_package(pkgdir, 'unknown-predicate-bad', conditions=[{"path-exists": "/nonexisting"},
{"frobnicated": True}])
packages = Packages()
assert set(packages.packages.keys()) == {'basic', 'unknown-predicate-good'}
def test_condition_hides_priority(pkgdir):
make_package(pkgdir, 'vip', name="basic", description="VIP", priority=100,
conditions=[{"path-exists": "/nonexisting"}])
packages = Packages()
assert packages.packages['basic'].name == 'basic'
assert packages.packages['basic'].manifest['description'] == 'standard package'
assert packages.packages['basic'].manifest['requires'] == {'cockpit': "42"}
assert packages.packages['basic'].priority == 1
def test_english_translation(pkgdir):
make_package(pkgdir, 'one')
(pkgdir / 'one' / 'po.de.js').write_text('eins')
packages = Packages()
# make sure we get German
document = packages.load_path('/one/po.js', {'Accept-Language': 'de'})
assert '/javascript' in document.content_type
assert document.data.read() == b'eins'
# make sure we get German here (higher q-value) even with English first
document = packages.load_path('/one/po.js', {'Accept-Language': 'en;q=0.9, de-ch'})
assert '/javascript' in document.content_type
assert document.data.read() == b'eins'
# make sure we get the empty ("English") translation, and not German
document = packages.load_path('/one/po.js', {'Accept-Language': 'en, de'})
assert '/javascript' in document.content_type
assert document.data.read() == b''
document = packages.load_path('/one/po.js', {'Accept-Language': 'de;q=0.9, fr;q=0.7, en'})
assert '/javascript' in document.content_type
assert document.data.read() == b''
document = packages.load_path('/one/po.js', {'Accept-Language': 'de;q=0.9, fr, en-ca'})
assert '/javascript' in document.content_type
assert document.data.read() == b''
document = packages.load_path('/one/po.js', {'Accept-Language': ''})
assert '/javascript' in document.content_type
assert document.data.read() == b''
document = packages.load_path('/one/po.js', {})
assert '/javascript' in document.content_type
assert document.data.read() == b''
def test_translation(pkgdir):
# old style: make sure po.de.js is served as fallback for manifest translations
make_package(pkgdir, 'one')
(pkgdir / 'one' / 'po.de.js').write_text('eins')
# new style: separated translations
make_package(pkgdir, 'two')
(pkgdir / 'two' / 'po.de.js').write_text('zwei')
(pkgdir / 'two' / 'po.manifest.de.js').write_text('zwo')
packages = Packages()
# make sure we can read a po.js file with language fallback
document = packages.load_path('/one/po.js', {'Accept-Language': 'es, de'})
assert '/javascript' in document.content_type
assert document.data.read() == b'eins'
# make sure we fall back cleanly to an empty file with correct mime
document = packages.load_path('/one/po.js', {'Accept-Language': 'es'})
assert '/javascript' in document.content_type
assert document.data.read() == b''
# make sure the manifest translations get sent along with manifests.js
document = packages.load_path('/manifests.js', {'Accept-Language': 'de'})
contents = document.data.read()
assert b'eins\n' in contents
assert b'zwo\n' in contents
assert b'zwei\n' not in contents
def test_filename_mangling(pkgdir):
make_package(pkgdir, 'one')
# test various filename variations
(pkgdir / 'one' / 'one.js').write_text('this is one.js')
(pkgdir / 'one' / 'two.js.gz').write_text('this is two.js')
(pkgdir / 'one' / 'three.min.js.gz').write_text('this is three.js')
(pkgdir / 'one' / 'four.min.js').write_text('this is four.js')
packages = Packages()
encodings = set()
for name in ['one', 'two', 'three', 'four']:
document = packages.load_path(f'/one/{name}.js', {})
assert document.data.read().decode() == f'this is {name}.js'
assert '/javascript' in document.content_type
encodings.add(document.content_encoding)
assert encodings == {None, 'gzip'} # make sure we saw both compressed and uncompressed
def test_overlapping_minified(pkgdir):
make_package(pkgdir, 'one')
(pkgdir / 'one' / 'one.min.js').write_text('min')
(pkgdir / 'one' / 'one.js').write_text('max')
# try the other way around in hope of listing the files in reverse order
(pkgdir / 'one' / 'two.js').write_text('max')
(pkgdir / 'one' / 'two.min.js').write_text('min')
packages = Packages()
# if both files are present, we should find the original one
document = packages.load_path('/one/one.js', {})
assert document.data.read().decode() == 'max'
document = packages.load_path('/one/two.js', {})
assert document.data.read().decode() == 'max'
# but requesting .min. explicitly will load it
document = packages.load_path('/one/one.min.js', {})
assert document.data.read().decode() == 'min'
document = packages.load_path('/one/two.min.js', {})
assert document.data.read().decode() == 'min'

View File

@ -1,214 +0,0 @@
import asyncio
import os
import sys
import time
import pytest
from cockpit.channel import ChannelError
from cockpit.packages import BridgeConfig
from cockpit.peer import ConfiguredPeer, PeerRoutingRule
from cockpit.protocol import CockpitProtocolError
from cockpit.router import Router
from cockpit.transports import SubprocessTransport
from . import mockpeer
from .mocktransport import MockTransport
PEER_CONFIG = BridgeConfig({
"spawn": [sys.executable, mockpeer.__file__],
"environ": ['PYTHONPATH=' + ':'.join(sys.path)],
"match": {"payload": "test"},
})
class Bridge(Router):
init_host = 'localhost'
def __init__(self):
rule = PeerRoutingRule(self, PEER_CONFIG)
super().__init__([rule])
def do_send_init(self):
pass
@pytest.fixture
def bridge():
return Bridge()
@pytest.fixture
def transport(bridge):
return MockTransport(bridge)
@pytest.fixture
def rule(bridge):
return bridge.routing_rules[0]
@pytest.mark.asyncio
async def test_shutdown(transport, rule):
await transport.check_open('test')
await transport.check_open('xest', problem='not-supported')
# Force the Peer closed
rule.peer.close()
await transport.assert_msg('', command='close', channel='channel.1', problem='terminated')
# But it should spawn again
await transport.check_open('test')
await transport.check_open('xest', problem='not-supported')
rule.peer.close()
@pytest.mark.asyncio
@pytest.mark.parametrize('init_type', ['wrong-command', 'channel-control', 'data', 'break-protocol'])
async def test_init_failure(rule, init_type, monkeypatch, transport):
monkeypatch.setenv('INIT_TYPE', init_type)
await transport.check_open('test', problem='protocol-error')
@pytest.mark.asyncio
async def test_immediate_shutdown(rule):
peer = rule.apply_rule({'payload': 'test'})
assert peer is not None
peer.close()
@pytest.mark.asyncio
async def test_shutdown_before_init(monkeypatch, transport, rule):
monkeypatch.setenv('INIT_TYPE', 'silence')
channel = transport.send_open('test')
assert rule.peer is not None
assert rule.peer.transport is None
while rule.peer.transport is None:
await asyncio.sleep(0)
rule.peer.close()
await transport.assert_msg('', command='close', channel=channel, problem='terminated')
@pytest.mark.asyncio
async def test_exit_without_init(monkeypatch, transport):
monkeypatch.setenv('INIT_TYPE', 'exit')
await transport.check_open('test', problem='terminated')
@pytest.mark.asyncio
async def test_exit_not_found(monkeypatch, transport):
monkeypatch.setenv('INIT_TYPE', 'exit-not-found')
await transport.check_open('test', problem='no-cockpit')
@pytest.mark.asyncio
async def test_killed(monkeypatch, transport, rule):
channel = await transport.check_open('test')
os.kill(rule.peer.transport._process.pid, 9)
await transport.assert_msg('', command='close', channel=channel, problem='terminated')
@pytest.mark.asyncio
@pytest.mark.parametrize('init_type', ['wrong-command', 'channel-control', 'data', 'break-protocol'])
async def test_await_failure(init_type, monkeypatch, bridge):
monkeypatch.setenv('INIT_TYPE', init_type)
peer = ConfiguredPeer(bridge, PEER_CONFIG)
with pytest.raises(CockpitProtocolError):
await peer.start()
peer.close()
@pytest.mark.asyncio
async def test_await_broken_connect(bridge):
class BrokenConnect(ConfiguredPeer):
async def do_connect_transport(self):
_ = 42 / 0
peer = BrokenConnect(bridge, PEER_CONFIG)
with pytest.raises(ZeroDivisionError):
await peer.start()
peer.close()
@pytest.mark.asyncio
async def test_await_broken_after_connect(bridge):
class BrokenConnect(ConfiguredPeer):
async def do_connect_transport(self):
await super().do_connect_transport()
_ = 42 / 0
peer = BrokenConnect(bridge, PEER_CONFIG)
with pytest.raises(ZeroDivisionError):
await peer.start()
peer.close()
class CancellableConnect(ConfiguredPeer):
was_cancelled = False
async def do_connect_transport(self):
await super().do_connect_transport()
try:
# We should get cancelled here when the mockpeer sends "init"
await asyncio.sleep(10000)
except asyncio.CancelledError:
self.was_cancelled = True
raise
@pytest.mark.asyncio
async def test_await_cancellable_connect_init(bridge):
peer = CancellableConnect(bridge, PEER_CONFIG)
await peer.start()
peer.close()
while len(asyncio.all_tasks()) > 1:
await asyncio.sleep(0.1)
assert peer.was_cancelled
@pytest.mark.asyncio
async def test_await_cancellable_connect_close(monkeypatch, event_loop, bridge):
monkeypatch.setenv('INIT_TYPE', 'silence') # make sure we never get "init"
peer = CancellableConnect(bridge, PEER_CONFIG)
event_loop.call_later(0.1, peer.close) # call peer.close() after .start() is running
with pytest.raises(asyncio.CancelledError):
await peer.start()
# we already called .close()
while len(asyncio.all_tasks()) > 1:
await asyncio.sleep(0.1)
assert peer.was_cancelled
@pytest.mark.asyncio
async def test_spawn_broken_pipe(bridge):
class BrokenPipePeer(ConfiguredPeer):
def __init__(self, *, specific_error=False):
super().__init__(bridge, PEER_CONFIG)
self.specific_error = specific_error
async def do_connect_transport(self) -> None:
transport = await self.spawn(['sh', '-c', 'read a; exit 9'], ())
assert isinstance(transport, SubprocessTransport)
# Make the process exit by writing a newline (causing `read` to finish)
transport.write(b'\n')
# The process will exit soon — try writing to it until a write fails.
while not transport.is_closing():
transport.write(b'x')
time.sleep(0.1)
while transport.get_returncode() is None:
await asyncio.sleep(0.1)
if self.specific_error:
raise ChannelError('not-supported', message='kaputt')
# BrokenPipe bubbles up without an error returned by do_connect_transport
peer = BrokenPipePeer(specific_error=False)
with pytest.raises(BrokenPipeError):
await peer.start()
peer.close()
# BrokenPipe gets trumped by specific error returned by do_connect_transport
peer = BrokenPipePeer(specific_error=True)
with pytest.raises(ChannelError) as raises:
await peer.start()
assert raises.value.attrs == {'message': 'kaputt', 'problem': 'not-supported'}
peer.close()

View File

@ -1,158 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import multiprocessing
import numbers
import os
import pytest
import cockpit.samples
@pytest.fixture
def hwmon_mock(tmpdir_factory, monkeypatch):
hwmon_dir = tmpdir_factory.mktemp('hwmon')
monkeypatch.setattr(cockpit.samples, "HWMON_PATH", str(hwmon_dir))
# hwmon1 - no name
(hwmon_dir / 'hwmon1').mkdir()
# hwmon2 - no label (on ARM)
hwmon2_dir = hwmon_dir / 'hwmon2'
hwmon2_dir.mkdir()
with open(hwmon2_dir / 'name', 'w') as fp:
fp.write('cpu_thermal')
with open(hwmon2_dir / 'temp1_input', 'w') as fp:
fp.write('32000')
# hwmon3 - AMD workaround #18098
hwmon3_dir = hwmon_dir / 'hwmon3'
hwmon3_dir.mkdir()
with open(hwmon3_dir / 'name', 'w') as fp:
fp.write('k10temp')
with open(hwmon3_dir / 'temp1_input', 'w') as fp:
fp.write('27500')
with open(hwmon3_dir / 'temp1_label', 'w') as fp:
fp.write('Tctl')
with open(hwmon3_dir / 'temp3_input', 'w') as fp:
fp.write('37000')
with open(hwmon3_dir / 'temp3_label', 'w') as fp:
fp.write('Tccd1')
# hwmon4 - Intel coretemp
hwmon4_dir = hwmon_dir / 'hwmon4'
hwmon4_dir.mkdir()
with open(hwmon4_dir / 'name', 'w') as fp:
fp.write('coretemp')
with open(hwmon4_dir / 'temp1_input', 'w') as fp:
fp.write('47000')
with open(hwmon4_dir / 'temp1_label', 'w') as fp:
fp.write('Package id 0')
with open(hwmon4_dir / 'temp2_input', 'w') as fp:
fp.write('46000')
with open(hwmon4_dir / 'temp2_label', 'w') as fp:
fp.write('Core 0')
with open(hwmon4_dir / 'temp3_input', 'w') as fp:
fp.write('46000')
with open(hwmon4_dir / 'temp3_label', 'w') as fp:
fp.write('Core 1')
with open(hwmon4_dir / 'temp4_input', 'w') as fp:
fp.write('46000')
with open(hwmon4_dir / 'temp4_label', 'w') as fp:
fp.write('Core 2')
return hwmon_dir
def get_checked_samples(sampler: cockpit.samples.Sampler) -> cockpit.samples.Samples:
cls = sampler.__class__
samples: cockpit.samples.Samples = collections.defaultdict(dict)
sampler.sample(samples)
assert set(samples) == {descr.name for descr in cls.descriptions}
for descr in cls.descriptions:
sample = samples[descr.name]
if descr.instanced:
assert isinstance(sample, dict)
else:
assert isinstance(sample, numbers.Real)
return samples
def test_descriptions():
for cls in cockpit.samples.SAMPLERS:
# currently broken in containers with no cgroups or temperatures present
if cls in [cockpit.samples.CGroupSampler, cockpit.samples.CPUTemperatureSampler]:
continue
get_checked_samples(cls())
def test_cgroup_descriptions():
if not os.path.exists('/sys/fs/cgroup/system.slice'):
pytest.xfail('No cgroups present')
get_checked_samples(cockpit.samples.CGroupSampler())
def test_temperature_descriptions():
samples = collections.defaultdict(dict)
cockpit.samples.CPUTemperatureSampler().sample(samples)
if not samples:
pytest.xfail('No CPU temperature present')
get_checked_samples(cockpit.samples.CPUTemperatureSampler())
def test_cpu():
samples = get_checked_samples(cockpit.samples.CPUSampler())
assert len(samples['cpu.core.user']) == multiprocessing.cpu_count()
def test_cpu_temperature(hwmon_mock):
samples = collections.defaultdict(dict)
cockpit.samples.CPUTemperatureSampler().sample(samples)
samples = get_checked_samples(cockpit.samples.CPUTemperatureSampler())
for name, temperature in samples['cpu.temperature'].items():
# no name
assert 'hwmon1' not in name
assert 20 < temperature < 50
expected = ['hwmon4/temp4_input', 'hwmon4/temp3_input', 'hwmon4/temp2_input',
'hwmon4/temp1_input', 'hwmon3/temp3_input', 'hwmon3/temp1_input',
'hwmon2/temp1_input']
sensors = [os.path.relpath(p, start=hwmon_mock) for p in samples['cpu.temperature']]
assert sorted(sensors) == sorted(expected)
def test_cgroup_disk_io():
samples = collections.defaultdict(dict)
cockpit.samples.CGroupDiskIO().sample(samples)
samples = get_checked_samples(cockpit.samples.CGroupDiskIO())
assert len(samples['disk.cgroup.read']) == len(samples['disk.cgroup.written'])
for cgroup in samples['disk.cgroup.read']:
assert samples['disk.cgroup.read'][cgroup] >= 0
assert samples['disk.cgroup.written'][cgroup] >= 0

View File

@ -1,392 +0,0 @@
# This file is part of Cockpit.
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import contextlib
import errno
import os
import signal
import subprocess
import unittest.mock
from typing import Any, List, Optional, Tuple
import pytest
import cockpit.transports
class Protocol(cockpit.transports.SubprocessProtocol):
transport: Optional[asyncio.Transport] = None
paused: bool = False
sent: int = 0
received: int = 0
exited: bool = False
close_on_eof: bool = True
eof: bool = False
exc: Optional[Exception] = None
output: Optional[List[bytes]] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
assert isinstance(transport, asyncio.Transport)
self.transport = transport
def connection_lost(self, exc: Optional[Exception] = None) -> None:
self.transport = None
self.exc = exc
def data_received(self, data: bytes) -> None:
if self.output is not None:
self.output.append(data)
self.received += len(data)
def eof_received(self) -> bool:
self.eof = True
return not self.close_on_eof
def pause_writing(self) -> None:
self.paused = True
def write_until_backlogged(self) -> None:
while not self.paused:
self.write(b'a' * 4096)
def write(self, data: bytes) -> None:
assert self.transport is not None
self.transport.write(data)
self.sent += len(data)
def write_a_lot(self) -> None:
assert self.transport is not None
self.write_until_backlogged()
assert self.transport.get_write_buffer_size() != 0
for _ in range(20):
self.write(b'b' * 1024 * 1024)
assert self.transport.get_write_buffer_size() > 20 * 1024 * 1024
def process_exited(self) -> None:
self.exited = True
def get_output(self) -> bytes:
assert self.output is not None
return b''.join(self.output)
async def eof_and_exited_with_code(self, returncode) -> None:
self.close_on_eof = False # otherwise we won't get process_exited()
transport = self.transport
assert isinstance(transport, cockpit.transports.SubprocessTransport)
while not self.exited or not self.eof:
await asyncio.sleep(0.1)
assert transport.get_returncode() == returncode
class TestSpooler:
@pytest.mark.asyncio
async def test_bad_fd(self) -> None:
# Make sure failing to construct succeeds without further failures
loop = asyncio.get_running_loop()
with pytest.raises(OSError) as raises:
cockpit.transports.Spooler(loop, -1)
assert raises.value.errno == errno.EBADF
def create_spooler(self, to_write: bytes = b'') -> cockpit.transports.Spooler:
loop = asyncio.get_running_loop()
reader, writer = os.pipe()
try:
spooler = cockpit.transports.Spooler(loop, reader)
finally:
os.close(reader)
try:
os.write(writer, to_write)
finally:
os.close(writer)
return spooler
@pytest.mark.asyncio
async def test_poll_eof(self) -> None:
spooler = self.create_spooler()
while spooler._fd != -1:
await asyncio.sleep(0.1)
assert spooler.get() == b''
@pytest.mark.asyncio
async def test_nopoll_eof(self) -> None:
spooler = self.create_spooler()
assert spooler.get() == b''
assert spooler._fd == -1
@pytest.mark.asyncio
async def test_poll_small(self) -> None:
spooler = self.create_spooler(b'abcd')
while spooler._fd != -1:
await asyncio.sleep(0.1)
assert spooler.get() == b'abcd'
@pytest.mark.asyncio
async def test_nopoll_small(self) -> None:
spooler = self.create_spooler(b'abcd')
assert spooler.get() == b'abcd'
assert spooler._fd == -1
@pytest.mark.asyncio
async def test_big(self) -> None:
loop = asyncio.get_running_loop()
reader, writer = os.pipe()
try:
spooler = cockpit.transports.Spooler(loop, reader)
finally:
os.close(reader)
try:
os.set_blocking(writer, False)
written = 0
blob = b'a' * 64 * 1024 # NB: pipe buffer is 64k
while written < 1024 * 1024:
# Note: we should never get BlockingIOError here since we always
# give the reader a chance to drain the pipe.
written += os.write(writer, blob)
while len(spooler.get()) < written:
await asyncio.sleep(0.01)
assert spooler._fd != -1
finally:
os.close(writer)
await asyncio.sleep(0.1)
assert spooler._fd == -1
assert len(spooler.get()) == written
class TestEpollLimitations:
# https://github.com/python/cpython/issues/73903
#
# There are some types of files that epoll doesn't work with, returning
# EPERM. We might be in a situation where we receive one of those on
# stdin/stdout for AsyncioTransport, so we'd theoretically like to support
# them.
async def spool_file(self, filename: str) -> None:
loop = asyncio.get_running_loop()
with open(filename) as fp:
spooler = cockpit.transports.Spooler(loop, fp.fileno())
while spooler._fd != -1:
await asyncio.sleep(0.1)
@pytest.mark.xfail
@pytest.mark.asyncio
async def test_read_file(self) -> None:
await self.spool_file(__file__)
@pytest.mark.xfail
@pytest.mark.asyncio
async def test_dev_null(self) -> None:
await self.spool_file('/dev/null')
class TestStdio:
@contextlib.contextmanager
def create_terminal(self):
ours, theirs = os.openpty()
stdin = os.dup(theirs)
stdout = os.dup(theirs)
os.close(theirs)
loop = asyncio.get_running_loop()
protocol = Protocol()
yield ours, protocol, cockpit.transports.StdioTransport(loop, protocol, stdin=stdin, stdout=stdout)
os.close(stdin)
os.close(stdout)
@pytest.mark.asyncio
async def test_terminal_write_eof(self):
# Make sure write_eof() fails
with self.create_terminal() as (ours, protocol, transport):
assert not transport.can_write_eof()
with pytest.raises(RuntimeError):
transport.write_eof()
os.close(ours)
@pytest.mark.asyncio
async def test_terminal_disconnect(self):
# Make sure disconnecting the session shows up as an EOF
with self.create_terminal() as (ours, protocol, transport):
os.close(ours)
while not protocol.eof:
await asyncio.sleep(0.1)
class TestSubprocessTransport:
def subprocess(self, args, **kwargs: Any) -> Tuple[Protocol, cockpit.transports.SubprocessTransport]:
loop = asyncio.get_running_loop()
protocol = Protocol()
transport = cockpit.transports.SubprocessTransport(loop, protocol, args, **kwargs)
assert transport._protocol == protocol
assert protocol.transport == transport
return protocol, transport
@pytest.mark.asyncio
async def test_true(self) -> None:
protocol, transport = self.subprocess(['true'])
await protocol.eof_and_exited_with_code(0)
assert transport.get_stderr() == ''
@pytest.mark.asyncio
async def test_cat(self) -> None:
protocol, transport = self.subprocess(['cat'])
protocol.close_on_eof = False
protocol.write_a_lot()
assert transport.can_write_eof()
transport.write_eof()
await protocol.eof_and_exited_with_code(0)
assert protocol.transport is not None # should not have automatically closed
assert transport.get_returncode() == 0
assert protocol.sent == protocol.received
transport.close()
assert protocol.transport is None
@pytest.mark.asyncio
async def test_send_signal(self) -> None:
protocol, transport = self.subprocess(['cat'])
transport.send_signal(signal.SIGINT)
await protocol.eof_and_exited_with_code(-signal.SIGINT)
@pytest.mark.asyncio
async def test_pid(self) -> None:
protocol, transport = self.subprocess(['sh', '-c', 'echo $$'])
protocol.output = []
await protocol.eof_and_exited_with_code(0)
assert int(protocol.get_output()) == transport.get_pid()
@pytest.mark.asyncio
async def test_terminate(self) -> None:
protocol, transport = self.subprocess(['cat'])
transport.kill()
await protocol.eof_and_exited_with_code(-signal.SIGKILL)
protocol, transport = self.subprocess(['cat'])
transport.terminate()
await protocol.eof_and_exited_with_code(-signal.SIGTERM)
@pytest.mark.asyncio
async def test_stderr(self) -> None:
loop = asyncio.get_running_loop()
protocol = Protocol()
transport = cockpit.transports.SubprocessTransport(loop, protocol, ['cat', '/nonexistent'],
stderr=subprocess.PIPE)
await protocol.eof_and_exited_with_code(1)
assert protocol.received == protocol.sent == 0
# Unless we reset it, we should get the same result repeatedly
assert '/nonexistent' in transport.get_stderr()
assert '/nonexistent' in transport.get_stderr()
assert '/nonexistent' in transport.get_stderr(reset=True)
# After we reset, it should be the empty string
assert transport.get_stderr() == ''
assert transport.get_stderr(reset=True) == ''
@pytest.mark.asyncio
async def test_safe_watcher_ENOSYS(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(asyncio, 'PidfdChildWatcher', unittest.mock.Mock(side_effect=OSError), raising=False)
protocol, transport = self.subprocess(['true'])
watcher = transport._get_watcher(asyncio.get_running_loop())
assert isinstance(watcher, asyncio.SafeChildWatcher)
await protocol.eof_and_exited_with_code(0)
@pytest.mark.asyncio
async def test_safe_watcher_oldpy(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delattr(asyncio, 'PidfdChildWatcher', raising=False)
protocol, transport = self.subprocess(['true'])
watcher = transport._get_watcher(asyncio.get_running_loop())
assert isinstance(watcher, asyncio.SafeChildWatcher)
await protocol.eof_and_exited_with_code(0)
@pytest.mark.asyncio
async def test_true_pty(self) -> None:
loop = asyncio.get_running_loop()
protocol = Protocol()
transport = cockpit.transports.SubprocessTransport(loop, protocol, ['true'], pty=True)
assert not transport.can_write_eof()
await protocol.eof_and_exited_with_code(0)
assert protocol.received == protocol.sent == 0
@pytest.mark.asyncio
async def test_broken_pipe(self) -> None:
loop = asyncio.get_running_loop()
protocol = Protocol()
transport = cockpit.transports.SubprocessTransport(loop, protocol, ['true'])
protocol.close_on_eof = False
while not protocol.exited:
await asyncio.sleep(0.1)
assert protocol.transport is transport # should not close on EOF
# Now let's write to the stdin with the other side closed.
# This should be enough to immediately disconnect us (EPIPE)
protocol.write(b'abc')
assert protocol.transport is None
assert isinstance(protocol.exc, BrokenPipeError)
@pytest.mark.asyncio
async def test_broken_pipe_backlog(self) -> None:
loop = asyncio.get_running_loop()
protocol = Protocol()
transport = cockpit.transports.SubprocessTransport(loop, protocol, ['cat'])
protocol.close_on_eof = False
# Since we're not reading, cat's stdout will back up and it will be
# forced to stop reading at some point. We'll still have a rather full
# write buffer.
protocol.write_a_lot()
# This will result in the stdin closing. Our next attempt to write to
# the buffer should end badly (EPIPE).
transport.kill()
while protocol.transport:
await asyncio.sleep(0.1)
assert protocol.transport is None
assert isinstance(protocol.exc, BrokenPipeError)
@pytest.mark.asyncio
async def test_window_size(self) -> None:
protocol, transport = self.subprocess(['bash', '-ic',
"""
while true; do
sleep 0.1
echo ${LINES}x${COLUMNS}
done
"""],
pty=True,
window=cockpit.transports.WindowSize({'rows': 22, 'cols': 33}))
protocol.output = []
while b'22x33\r\n' not in protocol.get_output():
await asyncio.sleep(0.1)
transport.set_window_size(cockpit.transports.WindowSize({'rows': 44, 'cols': 55}))
while b'44x55\r\n' not in protocol.get_output():
await asyncio.sleep(0.1)
transport.close()
@pytest.mark.asyncio
async def test_env(self) -> None:
protocol, transport = self.subprocess(['bash', '-ic', 'echo $HOME'],
pty=True,
env={'HOME': '/test'})
protocol.output = []
while b'/test\r\n' not in protocol.get_output():
await asyncio.sleep(0.1)
transport.close()

View File

@ -44,34 +44,6 @@ test_ruff() {
find_python_files | xargs -r -0 ruff check --no-cache
}
if [ "${WITH_PARTIAL_TREE:-0}" = 0 ]; then
mypy_strict_files='
src/cockpit/__init__.py
src/cockpit/_version.py
src/cockpit/jsonutil.py
src/cockpit/protocol.py
src/cockpit/transports.py
'
test_mypy() {
command -v mypy >/dev/null || skip 'no mypy'
for pkg in systemd_ctypes ferny bei; do
test -e "src/cockpit/_vendor/${pkg}/__init__.py" || skip "no ${pkg}"
done
mypy --no-error-summary src/cockpit test/pytest
# test scripts individually, to avoid clashing on `__main__`
# also skip integration tests, they are too big and not annotated
find_scripts 'python3' "*.none" | grep -zv 'test/' | xargs -r -0 -n1 mypy --no-error-summary
mypy --no-error-summary --strict $mypy_strict_files
}
test_vulture() {
# vulture to find unused variables/functions
command -v vulture >/dev/null || skip 'no vulture'
find_python_files | xargs -r -0 vulture
}
fi
test_js_translatable_strings() {
# Translatable strings must be marked with _(""), not _('')

View File

@ -53,11 +53,6 @@ Version: 0
Release: 1%{?dist}
Source0: https://github.com/cockpit-project/cockpit/releases/download/%{version}/cockpit-%{version}.tar.xz
# Don't change the bridge in the RHEL 8; the old SSH breaks some features, see @todoPybridgeRHEL8
%if 0%{?rhel} == 8 && !%{defined enable_old_bridge}
%define enable_old_bridge 1
%endif
# in RHEL 8 the source package is duplicated: cockpit (building basic packages like cockpit-{bridge,system})
# and cockpit-appstream (building optional packages like cockpit-{pcp})
# This split does not apply to EPEL/COPR nor packit c8s builds, only to our own
@ -179,20 +174,6 @@ Suggests: cockpit-selinux
Requires: subscription-manager-cockpit
%endif
%if 0%{?enable_old_bridge} == 0
BuildRequires: python3-devel
BuildRequires: python3-pip
%if 0%{?rhel} == 0
# All of these are only required for running pytest (which we only do on Fedora)
BuildRequires: procps-ng
BuildRequires: pyproject-rpm-macros
BuildRequires: python3-pytest-asyncio
BuildRequires: python3-pytest-cov
BuildRequires: python3-pytest-timeout
BuildRequires: python3-tox-current-env
%endif
%endif
%prep
%setup -q -n cockpit-%{version}
@ -205,9 +186,6 @@ BuildRequires: python3-tox-current-env
--docdir=%_defaultdocdir/%{name} \
%endif
--with-pamdir='%{pamdir}' \
%if 0%{?enable_old_bridge}
--enable-old-bridge \
%endif
%if 0%{?build_basic} == 0
--disable-ssh \
%endif
@ -220,10 +198,6 @@ BuildRequires: python3-tox-current-env
%check
make -j$(nproc) check
%if 0%{?enable_old_bridge} == 0 && 0%{?rhel} == 0
%tox
%endif
%install
%make_install
make install-tests DESTDIR=%{buildroot}
@ -239,7 +213,7 @@ echo '%dir %{_datadir}/cockpit/base1' >> base.list
find %{buildroot}%{_datadir}/cockpit/base1 -type f -o -type l >> base.list
echo '%{_sysconfdir}/cockpit/machines.d' >> base.list
echo %{buildroot}%{_datadir}/polkit-1/actions/org.cockpit-project.cockpit-bridge.policy >> base.list
%if 0%{?enable_old_bridge} && 0%{?build_basic}
%if 0%{?build_basic}
echo '%dir %{_datadir}/cockpit/ssh' >> base.list
find %{buildroot}%{_datadir}/cockpit/ssh -type f >> base.list
%endif
@ -390,9 +364,6 @@ system on behalf of the web based user interface.
%doc %{_mandir}/man1/cockpit-bridge.1.gz
%{_bindir}/cockpit-bridge
%{_libexecdir}/cockpit-askpass
%if 0%{?enable_old_bridge} == 0
%{python3_sitelib}/%{name}*
%endif
%package doc
Summary: Cockpit deployment and developer guide

View File

@ -1,3 +0,0 @@
import pytest
pytest.Module._obj # type: ignore[attr-defined]