osbs: raise a glass for it's service

This removes osbs and allmost all it's associated playbooks and files.

It served long and well, but we no longer need it.
flatpaks are building with a koji-flatpak plugin.
base/minimal/toolbox containers are building with kiwi.
We aren't building any other containers right now, and we did they could
be added to kiwi.

This is the end of an era... I look with nostolga on
ansible-ansible-openshift-ansible (a role to setup ansible on a control
host and run it from our ansible).

Good bye osbs!

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
This commit is contained in:
Kevin Fenzi 2024-03-28 12:52:07 -07:00
parent 8fb54fb26e
commit c84b99223c
153 changed files with 2 additions and 9297 deletions

View File

@ -1,50 +0,0 @@
source 'http://rubygems.org'
gem 'rails', '~> 3.0.13'
gem 'json'
gem 'parseconfig'
gem 'mongo'
gem 'xml-simple'
gem 'rack'
gem 'regin'
gem 'open4'
gem 'stickshift-common'
gem 'stickshift-controller'
gem 'rest-client'
gem 'systemu'
# Add plugin gems here
gem 'gearchanger-mcollective-plugin'
gem 'uplift-bind-plugin'
gem 'swingshift-mongo-plugin'
gem 'dnsruby'
# Bundle edge Rails instead:
# gem 'rails', :git => 'git://github.com/rails/rails.git'
# Use unicorn as the web server
# gem 'unicorn'
# Deploy with Capistrano
# gem 'capistrano'
# To use debugger (ruby-debug for Ruby 1.8.7+, ruby-debug19 for Ruby 1.9.2+)
# gem 'ruby-debug'
# gem 'ruby-debug19', :require => 'ruby-debug'
# Bundle the extra gems:
# gem 'bj'
# gem 'nokogiri'
# gem 'sqlite3-ruby', :require => 'sqlite3'
# gem 'aws-s3', :require => 'aws/s3'
# Bundle gems for the local environment. Make sure to
# put test-only gems in this group so their generators
# and rake tasks are available in development mode:
group :development, :test do
# The require part from http://tinyurl.com/3pf68ho
gem 'mocha', :require => nil
gem 'cucumber'
gem 'rcov'
end

View File

@ -1,5 +0,0 @@
[jenkins]
name=Jenkins
baseurl=http://pkg.jenkins-ci.org/redhat
gpgcheck=1
gpgkey=http://pkg.jenkins-ci.org/redhat/jenkins-ci.org.key

View File

@ -1,18 +0,0 @@
topicprefix = /topic/
main_collective = mcollective
collectives = mcollective
libdir = /usr/libexec/mcollective
loglevel = debug
logfile = /var/log/mcollective-client.log
# Plugins
securityprovider = psk
plugin.psk = unset
connector = qpid
plugin.qpid.host=127.0.0.1
plugin.qpid.secure=false
plugin.qpid.timeout=5
# Facts
factsource = yaml
plugin.yaml = /etc/mcollective/facts.yaml

View File

@ -1,7 +0,0 @@
#!/bin/bash
mongo stickshift_broker_dev --eval 'db.addUser("stickshift", "mooo")'
mongo stickshift_broker_dev --eval 'db.auth_user.update({"_id":"admin"}, {"_id":"admin","user":"admin","password":"2a8462d93a13e51387a5e607cbd1139f"} , true)'
echo "Acct setup done on `date`" > /etc/mongo-acct-setup

View File

@ -1,91 +0,0 @@
##
### Basic Defaults
##
bind_ip = 127.0.0.1
port = 27017
fork = true
pidfilepath = /var/run/mongodb/mongodb.pid
logpath = /var/log/mongodb/mongodb.log
dbpath =/var/lib/mongodb
journal = true
# Enables periodic logging of CPU utilization and I/O wait
#cpu = true
# Turn on/off security. Off is currently the default
#noauth = true
auth = true
# Verbose logging output.
#verbose = true
# Inspect all client data for validity on receipt (useful for
# developing drivers)
#objcheck = true
# Enable db quota management
#quota = true
# Set oplogging level where n is
# 0=off (default)
# 1=W
# 2=R
# 3=both
# 7=W+some reads
#oplog = 0
# Diagnostic/debugging option
#nocursors = true
# Ignore query hints
#nohints = true
# Disable the HTTP interface (Defaults to port+1000).
nohttpinterface = true
# Turns off server-side scripting. This will result in greatly limited
# functionality
#noscripting = true
# Turns off table scans. Any query that would do a table scan fails.
#notablescan = true
# Disable data file preallocation.
#noprealloc = true
# Specify .ns file size for new databases.
# nssize = <size>
# Accout token for Mongo monitoring server.
#mms-token = <token>
# Server name for Mongo monitoring server.
#mms-name = <server-name>
# Ping interval for Mongo monitoring server.
#mms-interval = <seconds>
# Replication Options
# in replicated mongo databases, specify here whether this is a slave or master
#slave = true
#source = master.example.com
# Slave only: specify a single database to replicate
#only = master.example.com
# or
#master = true
#source = slave.example.com
# Address of a server to pair with.
#pairwith = <server:port>
# Address of arbiter server.
#arbiter = <server:port>
# Automatically resync if slave data is stale
#autoresync
# Custom size for replication operation log.
#oplogSize = <MB>
# Size limit for in-memory storage of op ids.
#opIdMem = <bytes>
# smallfiles
smallfiles = true

View File

@ -1,5 +0,0 @@
[openshift]
name=OpenShift
baseurl=http://mirror.openshift.com/pub/crankcase/rhel-6/x86_64/
enabled=1
gpgcheck=0

View File

@ -1,14 +0,0 @@
{% if inventory_hostname.startswith('osbs') %}
#TODO : remove this after freeze
[rhel7-openshift-3.11]
name = rhel7 openshift 3.11 $basearch
baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-openshift-3.11-rpms/
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
enabled=0
{% elif inventory_hostname.startswith('os') %}
[rhel7-openshift-3.11]
name = rhel7 openshift 3.11 $basearch
baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-openshift-3.11-rpms/
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
enabled=0
{% endif %}

View File

@ -1,3 +0,0 @@
cluster-mechanism=DIGEST-MD5 ANONYMOUS
auth=no

View File

@ -1,13 +0,0 @@
FROM registry.fedoraproject.org/fedora:37
RUN dnf -y install --refresh dnf-plugins-core && dnf -y install moby-engine git python3-setuptools e2fsprogs koji osbs-client\
python3-osbs-client gssproxy fedpkg python3-docker-squash atomic-reactor python3-atomic-reactor* go-md2man python3-productmd sed\
python3-gobject python3-libmodulemd python3-pdc-client ostree flatpak-module-tools flatpak skopeo && dnf clean all
ADD ./orchestrator_customize.json /usr/share/osbs/orchestrator_customize.json
ADD ./worker_customize.json /usr/share/osbs/worker_customize.json
ADD ./krb5.conf /etc
RUN printf '[libdefaults]\n default_ccache_name = DIR:/tmp/ccache_%%{uid}' >/etc/krb5.conf.d/ccache.conf
ADD ./krb5.osbs_{{osbs_url}}.keytab /etc/
RUN sed -i -e 's|/var/lib/rpm|/usr/lib/sysimage/rpm|' /usr/lib/python*/site-packages/atomic_reactor/plugins/post_rpmqa.py
ADD ./ca.crt /etc/pki/ca-trust/source/anchors/osbs.ca.crt
RUN update-ca-trust
CMD ["python3", "/usr/bin/atomic-reactor", "--verbose", "inside-build"]

View File

@ -1,12 +0,0 @@
FROM registry.fedoraproject.org/fedora:37
RUN dnf -y install --refresh dnf-plugins-core && dnf -y install moby-engine git python3-setuptools e2fsprogs koji osbs-client\
python3-osbs-client gssproxy fedpkg python3-docker-squash atomic-reactor python3-atomic-reactor* go-md2man python3-productmd\
python3-gobject python3-libmodulemd python3-pdc-client ostree flatpak-module-tools flatpak skopeo && dnf clean all
ADD ./orchestrator_customize.json /usr/share/osbs/orchestrator_customize.json
ADD ./worker_customize.json /usr/share/osbs/worker_customize.json
ADD ./krb5.conf /etc
RUN printf '[libdefaults]\n default_ccache_name = DIR:/tmp/ccache_%%{uid}' >/etc/krb5.conf.d/ccache.conf
ADD ./krb5.osbs_{{osbs_url}}.keytab /etc/
ADD ./ca.crt /etc/pki/ca-trust/source/anchors/osbs.ca.crt
RUN update-ca-trust
CMD ["python3", "/usr/bin/atomic-reactor", "--verbose", "inside-build"]

View File

@ -1,5 +0,0 @@
SHELL=/bin/bash
MAILTO=maxamillion@fedoraproject.org
5 0 * * * root for i in $(docker ps -a | awk '/Exited/ { print $1 }'); do docker rm $i; done && for i in $(docker images -q -f 'dangling=true'); do docker rmi $i; done

View File

@ -1,3 +0,0 @@
SHELL=/bin/bash
0 0 * * * root oc adm prune builds --keep-complete=0 --keep-failed=0 --keep-younger-than=72h0m0s --orphans --confirm

View File

@ -1 +0,0 @@
VG="vg-docker"

View File

@ -1 +0,0 @@
STORAGE_DRIVER="overlay2"

View File

@ -1,8 +0,0 @@
# Ansible managed
[Unit]
Wants=iptables.service
After=iptables.service
[Service]
ExecStartPost=/usr/local/bin/fix-docker-iptables

View File

@ -1,2 +0,0 @@
[Service]
ExecStartPost=/usr/local/bin/fix-docker-iptables

View File

@ -1 +0,0 @@
{"auths":{"candidate-registry.fedoraproject.org":{"username":"{{candidate_registry_osbs_prod_username}}","password":"{{candidate_registry_osbs_prod_password}}","email":"","auth":"{{ auth_info_prod | b64encode }}"}}}

View File

@ -1 +0,0 @@
{"auths":{"candidate-registry.stg.fedoraproject.org":{"username":"{{candidate_registry_osbs_stg_username}}","password":"{{candidate_registry_osbs_stg_password}}","email":"","auth":"{{ auth_info_stg | b64encode }}"}}}

View File

@ -1,693 +0,0 @@
# Configuration file for dnsmasq.
#
# Format is one option per line, legal options are the same
# as the long options legal on the command line. See
# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details.
# Listen on this specific port instead of the standard DNS port
# (53). Setting this to zero completely disables DNS function,
# leaving only DHCP and/or TFTP.
#port=5353
# The following two options make you a better netizen, since they
# tell dnsmasq to filter out queries which the public DNS cannot
# answer, and which load the servers (especially the root servers)
# unnecessarily. If you have a dial-on-demand link they also stop
# these requests from bringing up the link unnecessarily.
# Never forward plain names (without a dot or domain part)
#domain-needed
# Never forward addresses in the non-routed address spaces.
#bogus-priv
# Uncomment these to enable DNSSEC validation and caching:
# (Requires dnsmasq to be built with DNSSEC option.)
#conf-file=/usr/share/dnsmasq/trust-anchors.conf
#dnssec
# Replies which are not DNSSEC signed may be legitimate, because the domain
# is unsigned, or may be forgeries. Setting this option tells dnsmasq to
# check that an unsigned reply is OK, by finding a secure proof that a DS
# record somewhere between the root and the domain does not exist.
# The cost of setting this is that even queries in unsigned domains will need
# one or more extra DNS queries to verify.
#dnssec-check-unsigned
# Uncomment this to filter useless windows-originated DNS requests
# which can trigger dial-on-demand links needlessly.
# Note that (amongst other things) this blocks all SRV requests,
# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk.
# This option only affects forwarding, SRV records originating for
# dnsmasq (via srv-host= lines) are not suppressed by it.
#filterwin2k
# Change this line if you want dns to get its upstream servers from
# somewhere other that /etc/resolv.conf
#resolv-file=
# By default, dnsmasq will send queries to any of the upstream
# servers it knows about and tries to favour servers to are known
# to be up. Uncommenting this forces dnsmasq to try each query
# with each server strictly in the order they appear in
# /etc/resolv.conf
#strict-order
# If you don't want dnsmasq to read /etc/resolv.conf or any other
# file, getting its servers from this file instead (see below), then
# uncomment this.
#no-resolv
# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv
# files for changes and re-read them then uncomment this.
#no-poll
# Add other name servers here, with domain specs if they are for
# non-public domains.
#server=/localnet/192.168.0.1
# Example of routing PTR queries to nameservers: this will send all
# address->name queries for 192.168.3/24 to nameserver 10.1.2.3
#server=/3.168.192.in-addr.arpa/10.1.2.3
# Add local-only domains here, queries in these domains are answered
# from /etc/hosts or DHCP only.
#local=/localnet/
# Add domains which you want to force to an IP address here.
# The example below send any host in double-click.net to a local
# web-server.
#address=/double-click.net/127.0.0.1
# --address (and --server) work with IPv6 addresses too.
#address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83
# Add the IPs of all queries to yahoo.com, google.com, and their
# subdomains to the vpn and search ipsets:
#ipset=/yahoo.com/google.com/vpn,search
# You can control how dnsmasq talks to a server: this forces
# queries to 10.1.2.3 to be routed via eth1
# server=10.1.2.3@eth1
# and this sets the source (ie local) address used to talk to
# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that
# IP on the machine, obviously).
# server=10.1.2.3@192.168.1.1#55
# If you want dnsmasq to change uid and gid to something other
# than the default, edit the following lines.
user=dnsmasq
group=dnsmasq
# If you want dnsmasq to listen for DHCP and DNS requests only on
# specified interfaces (and the loopback) give the name of the
# interface (eg eth0) here.
# Repeat the line for more than one interface.
#interface=
# Listen only on localhost by default
interface=lo
# Or you can specify which interface _not_ to listen on
#except-interface=
# Or which to listen on by address (remember to include 127.0.0.1 if
# you use this.)
#listen-address=
# If you want dnsmasq to provide only DNS service on an interface,
# configure it as shown above, and then use the following line to
# disable DHCP and TFTP on it.
#no-dhcp-interface=
# Serve DNS and DHCP only to networks directly connected to this machine.
# Any interface= line will override it.
#local-service
# On systems which support it, dnsmasq binds the wildcard address,
# even when it is listening on only some interfaces. It then discards
# requests that it shouldn't reply to. This has the advantage of
# working even when interfaces come and go and change address. If you
# want dnsmasq to really bind only the interfaces it is listening on,
# uncomment this option. About the only time you may need this is when
# running another nameserver on the same machine.
#
# To listen only on localhost and do not receive packets on other
# interfaces, bind only to lo device. Comment out to bind on single
# wildcard socket.
bind-interfaces
# If you don't want dnsmasq to read /etc/hosts, uncomment the
# following line.
#no-hosts
# or if you want it to read another file, as well as /etc/hosts, use
# this.
#addn-hosts=/etc/banner_add_hosts
# Set this (and domain: see below) if you want to have a domain
# automatically added to simple names in a hosts-file.
#expand-hosts
# Set the domain for dnsmasq. this is optional, but if it is set, it
# does the following things.
# 1) Allows DHCP hosts to have fully qualified domain names, as long
# as the domain part matches this setting.
# 2) Sets the "domain" DHCP option thereby potentially setting the
# domain of all systems configured by DHCP
# 3) Provides the domain part for "expand-hosts"
#domain=thekelleys.org.uk
# Set a different domain for a particular subnet
#domain=wireless.thekelleys.org.uk,192.168.2.0/24
# Same idea, but range rather then subnet
#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200
# Uncomment this to enable the integrated DHCP server, you need
# to supply the range of addresses available for lease and optionally
# a lease time. If you have more than one network, you will need to
# repeat this for each network on which you want to supply DHCP
# service.
#dhcp-range=192.168.0.50,192.168.0.150,12h
# This is an example of a DHCP range where the netmask is given. This
# is needed for networks we reach the dnsmasq DHCP server via a relay
# agent. If you don't know what a DHCP relay agent is, you probably
# don't need to worry about this.
#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h
# This is an example of a DHCP range which sets a tag, so that
# some DHCP options may be set only for this network.
#dhcp-range=set:red,192.168.0.50,192.168.0.150
# Use this DHCP range only when the tag "green" is set.
#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h
# Specify a subnet which can't be used for dynamic address allocation,
# is available for hosts with matching --dhcp-host lines. Note that
# dhcp-host declarations will be ignored unless there is a dhcp-range
# of some type for the subnet in question.
# In this case the netmask is implied (it comes from the network
# configuration on the machine running dnsmasq) it is possible to give
# an explicit netmask instead.
#dhcp-range=192.168.0.0,static
# Enable DHCPv6. Note that the prefix-length does not need to be specified
# and defaults to 64 if missing/
#dhcp-range=1234::2, 1234::500, 64, 12h
# Do Router Advertisements, BUT NOT DHCP for this subnet.
#dhcp-range=1234::, ra-only
# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and
# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
# hosts. Use the DHCPv4 lease to derive the name, network segment and
# MAC address and assume that the host will also have an
# IPv6 address calculated using the SLAAC algorithm.
#dhcp-range=1234::, ra-names
# Do Router Advertisements, BUT NOT DHCP for this subnet.
# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.)
#dhcp-range=1234::, ra-only, 48h
# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA
# so that clients can use SLAAC addresses as well as DHCP ones.
#dhcp-range=1234::2, 1234::500, slaac
# Do Router Advertisements and stateless DHCP for this subnet. Clients will
# not get addresses from DHCP, but they will get other configuration information.
# They will use SLAAC for addresses.
#dhcp-range=1234::, ra-stateless
# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses
# from DHCPv4 leases.
#dhcp-range=1234::, ra-stateless, ra-names
# Do router advertisements for all subnets where we're doing DHCPv6
# Unless overridden by ra-stateless, ra-names, et al, the router
# advertisements will have the M and O bits set, so that the clients
# get addresses and configuration from DHCPv6, and the A bit reset, so the
# clients don't use SLAAC addresses.
#enable-ra
# Supply parameters for specified hosts using DHCP. There are lots
# of valid alternatives, so we will give examples of each. Note that
# IP addresses DO NOT have to be in the range given above, they just
# need to be on the same network. The order of the parameters in these
# do not matter, it's permissible to give name, address and MAC in any
# order.
# Always allocate the host with Ethernet address 11:22:33:44:55:66
# The IP address 192.168.0.60
#dhcp-host=11:22:33:44:55:66,192.168.0.60
# Always set the name of the host with hardware address
# 11:22:33:44:55:66 to be "fred"
#dhcp-host=11:22:33:44:55:66,fred
# Always give the host with Ethernet address 11:22:33:44:55:66
# the name fred and IP address 192.168.0.60 and lease time 45 minutes
#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m
# Give a host with Ethernet address 11:22:33:44:55:66 or
# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume
# that these two Ethernet interfaces will never be in use at the same
# time, and give the IP address to the second, even if it is already
# in use by the first. Useful for laptops with wired and wireless
# addresses.
#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60
# Give the machine which says its name is "bert" IP address
# 192.168.0.70 and an infinite lease
#dhcp-host=bert,192.168.0.70,infinite
# Always give the host with client identifier 01:02:02:04
# the IP address 192.168.0.60
#dhcp-host=id:01:02:02:04,192.168.0.60
# Always give the InfiniBand interface with hardware address
# 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the
# ip address 192.168.0.61. The client id is derived from the prefix
# ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of
# hex digits of the hardware address.
#dhcp-host=id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61
# Always give the host with client identifier "marjorie"
# the IP address 192.168.0.60
#dhcp-host=id:marjorie,192.168.0.60
# Enable the address given for "judge" in /etc/hosts
# to be given to a machine presenting the name "judge" when
# it asks for a DHCP lease.
#dhcp-host=judge
# Never offer DHCP service to a machine whose Ethernet
# address is 11:22:33:44:55:66
#dhcp-host=11:22:33:44:55:66,ignore
# Ignore any client-id presented by the machine with Ethernet
# address 11:22:33:44:55:66. This is useful to prevent a machine
# being treated differently when running under different OS's or
# between PXE boot and OS boot.
#dhcp-host=11:22:33:44:55:66,id:*
# Send extra options which are tagged as "red" to
# the machine with Ethernet address 11:22:33:44:55:66
#dhcp-host=11:22:33:44:55:66,set:red
# Send extra options which are tagged as "red" to
# any machine with Ethernet address starting 11:22:33:
#dhcp-host=11:22:33:*:*:*,set:red
# Give a fixed IPv6 address and name to client with
# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2
# Note the MAC addresses CANNOT be used to identify DHCPv6 clients.
# Note also that the [] around the IPv6 address are obligatory.
#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
# Ignore any clients which are not specified in dhcp-host lines
# or /etc/ethers. Equivalent to ISC "deny unknown-clients".
# This relies on the special "known" tag which is set when
# a host is matched.
#dhcp-ignore=tag:!known
# Send extra options which are tagged as "red" to any machine whose
# DHCP vendorclass string includes the substring "Linux"
#dhcp-vendorclass=set:red,Linux
# Send extra options which are tagged as "red" to any machine one
# of whose DHCP userclass strings includes the substring "accounts"
#dhcp-userclass=set:red,accounts
# Send extra options which are tagged as "red" to any machine whose
# MAC address matches the pattern.
#dhcp-mac=set:red,00:60:8C:*:*:*
# If this line is uncommented, dnsmasq will read /etc/ethers and act
# on the ethernet-address/IP pairs found there just as if they had
# been given as --dhcp-host options. Useful if you keep
# MAC-address/host mappings there for other purposes.
#read-ethers
# Send options to hosts which ask for a DHCP lease.
# See RFC 2132 for details of available options.
# Common options can be given to dnsmasq by name:
# run "dnsmasq --help dhcp" to get a list.
# Note that all the common settings, such as netmask and
# broadcast address, DNS server and default route, are given
# sane defaults by dnsmasq. You very likely will not need
# any dhcp-options. If you use Windows clients and Samba, there
# are some options which are recommended, they are detailed at the
# end of this section.
# Override the default route supplied by dnsmasq, which assumes the
# router is the same machine as the one running dnsmasq.
#dhcp-option=3,1.2.3.4
# Do the same thing, but using the option name
#dhcp-option=option:router,1.2.3.4
# Override the default route supplied by dnsmasq and send no default
# route at all. Note that this only works for the options sent by
# default (1, 3, 6, 12, 28) the same line will send a zero-length option
# for all other option numbers.
#dhcp-option=3
# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5
#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5
# Send DHCPv6 option. Note [] around IPv6 addresses.
#dhcp-option=option6:dns-server,[1234::77],[1234::88]
# Send DHCPv6 option for namservers as the machine running
# dnsmasq and another.
#dhcp-option=option6:dns-server,[::],[1234::88]
# Ask client to poll for option changes every six hours. (RFC4242)
#dhcp-option=option6:information-refresh-time,6h
# Set option 58 client renewal time (T1). Defaults to half of the
# lease time if not specified. (RFC2132)
#dhcp-option=option:T1,1m
# Set option 59 rebinding time (T2). Defaults to 7/8 of the
# lease time if not specified. (RFC2132)
#dhcp-option=option:T2,2m
# Set the NTP time server address to be the same machine as
# is running dnsmasq
#dhcp-option=42,0.0.0.0
# Set the NIS domain name to "welly"
#dhcp-option=40,welly
# Set the default time-to-live to 50
#dhcp-option=23,50
# Set the "all subnets are local" flag
#dhcp-option=27,1
# Send the etherboot magic flag and then etherboot options (a string).
#dhcp-option=128,e4:45:74:68:00:00
#dhcp-option=129,NIC=eepro100
# Specify an option which will only be sent to the "red" network
# (see dhcp-range for the declaration of the "red" network)
# Note that the tag: part must precede the option: part.
#dhcp-option = tag:red, option:ntp-server, 192.168.1.1
# The following DHCP options set up dnsmasq in the same way as is specified
# for the ISC dhcpcd in
# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt
# adapted for a typical dnsmasq installation where the host running
# dnsmasq is also the host running samba.
# you may want to uncomment some or all of them if you use
# Windows clients and Samba.
#dhcp-option=19,0 # option ip-forwarding off
#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s)
#dhcp-option=45,0.0.0.0 # netbios datagram distribution server
#dhcp-option=46,8 # netbios node type
# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave.
#dhcp-option=252,"\n"
# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client
# probably doesn't support this......
#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com
# Send RFC-3442 classless static routes (note the netmask encoding)
#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8
# Send vendor-class specific options encapsulated in DHCP option 43.
# The meaning of the options is defined by the vendor-class so
# options are sent only when the client supplied vendor class
# matches the class given here. (A substring match is OK, so "MSFT"
# matches "MSFT" and "MSFT 5.0"). This example sets the
# mtftp address to 0.0.0.0 for PXEClients.
#dhcp-option=vendor:PXEClient,1,0.0.0.0
# Send microsoft-specific option to tell windows to release the DHCP lease
# when it shuts down. Note the "i" flag, to tell dnsmasq to send the
# value as a four-byte integer - that's what microsoft wants. See
# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true
#dhcp-option=vendor:MSFT,2,1i
# Send the Encapsulated-vendor-class ID needed by some configurations of
# Etherboot to allow is to recognise the DHCP server.
#dhcp-option=vendor:Etherboot,60,"Etherboot"
# Send options to PXELinux. Note that we need to send the options even
# though they don't appear in the parameter request list, so we need
# to use dhcp-option-force here.
# See http://syslinux.zytor.com/pxe.php#special for details.
# Magic number - needed before anything else is recognised
#dhcp-option-force=208,f1:00:74:7e
# Configuration file name
#dhcp-option-force=209,configs/common
# Path prefix
#dhcp-option-force=210,/tftpboot/pxelinux/files/
# Reboot time. (Note 'i' to send 32-bit value)
#dhcp-option-force=211,30i
# Set the boot filename for netboot/PXE. You will only need
# this if you want to boot machines over the network and you will need
# a TFTP server; either dnsmasq's built-in TFTP server or an
# external one. (See below for how to enable the TFTP server.)
#dhcp-boot=pxelinux.0
# The same as above, but use custom tftp-server instead machine running dnsmasq
#dhcp-boot=pxelinux,server.name,192.168.1.100
# Boot for iPXE. The idea is to send two different
# filenames, the first loads iPXE, and the second tells iPXE what to
# load. The dhcp-match sets the ipxe tag for requests from iPXE.
#dhcp-boot=undionly.kpxe
#dhcp-match=set:ipxe,175 # iPXE sends a 175 option.
#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php
# Encapsulated options for iPXE. All the options are
# encapsulated within option 175
#dhcp-option=encap:175, 1, 5b # priority code
#dhcp-option=encap:175, 176, 1b # no-proxydhcp
#dhcp-option=encap:175, 177, string # bus-id
#dhcp-option=encap:175, 189, 1b # BIOS drive code
#dhcp-option=encap:175, 190, user # iSCSI username
#dhcp-option=encap:175, 191, pass # iSCSI password
# Test for the architecture of a netboot client. PXE clients are
# supposed to send their architecture as option 93. (See RFC 4578)
#dhcp-match=peecees, option:client-arch, 0 #x86-32
#dhcp-match=itanics, option:client-arch, 2 #IA64
#dhcp-match=hammers, option:client-arch, 6 #x86-64
#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64
# Do real PXE, rather than just booting a single file, this is an
# alternative to dhcp-boot.
#pxe-prompt="What system shall I netboot?"
# or with timeout before first available action is taken:
#pxe-prompt="Press F8 for menu.", 60
# Available boot services. for PXE.
#pxe-service=x86PC, "Boot from local disk"
# Loads <tftp-root>/pxelinux.0 from dnsmasq TFTP server.
#pxe-service=x86PC, "Install Linux", pxelinux
# Loads <tftp-root>/pxelinux.0 from TFTP server at 1.2.3.4.
# Beware this fails on old PXE ROMS.
#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4
# Use bootserver on network, found my multicast or broadcast.
#pxe-service=x86PC, "Install windows from RIS server", 1
# Use bootserver at a known IP address.
#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4
# If you have multicast-FTP available,
# information for that can be passed in a similar way using options 1
# to 5. See page 19 of
# http://download.intel.com/design/archives/wfm/downloads/pxespec.pdf
# Enable dnsmasq's built-in TFTP server
#enable-tftp
# Set the root directory for files available via FTP.
#tftp-root=/var/ftpd
# Do not abort if the tftp-root is unavailable
#tftp-no-fail
# Make the TFTP server more secure: with this set, only files owned by
# the user dnsmasq is running as will be send over the net.
#tftp-secure
# This option stops dnsmasq from negotiating a larger blocksize for TFTP
# transfers. It will slow things down, but may rescue some broken TFTP
# clients.
#tftp-no-blocksize
# Set the boot file name only when the "red" tag is set.
#dhcp-boot=tag:red,pxelinux.red-net
# An example of dhcp-boot with an external TFTP server: the name and IP
# address of the server are given after the filename.
# Can fail with old PXE ROMS. Overridden by --pxe-service.
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3
# If there are multiple external tftp servers having a same name
# (using /etc/hosts) then that name can be specified as the
# tftp_servername (the third option to dhcp-boot) and in that
# case dnsmasq resolves this name and returns the resultant IP
# addresses in round robin fashion. This facility can be used to
# load balance the tftp load among a set of servers.
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name
# Set the limit on DHCP leases, the default is 150
#dhcp-lease-max=150
# The DHCP server needs somewhere on disk to keep its lease database.
# This defaults to a sane location, but if you want to change it, use
# the line below.
#dhcp-leasefile=/var/lib/dnsmasq/dnsmasq.leases
# Set the DHCP server to authoritative mode. In this mode it will barge in
# and take over the lease for any client which broadcasts on the network,
# whether it has a record of the lease or not. This avoids long timeouts
# when a machine wakes up on a new network. DO NOT enable this if there's
# the slightest chance that you might end up accidentally configuring a DHCP
# server for your campus/company accidentally. The ISC server uses
# the same option, and this URL provides more information:
# http://www.isc.org/files/auth.html
#dhcp-authoritative
# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039.
# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit
# option with a DHCPACK including a Rapid Commit option and fully committed address
# and configuration information. This must only be enabled if either the server is
# the only server for the subnet, or multiple servers are present and they each
# commit a binding for all clients.
#dhcp-rapid-commit
# Run an executable when a DHCP lease is created or destroyed.
# The arguments sent to the script are "add" or "del",
# then the MAC address, the IP address and finally the hostname
# if there is one.
#dhcp-script=/bin/echo
# Set the cachesize here.
#cache-size=150
# If you want to disable negative caching, uncomment this.
#no-negcache
# Normally responses which come from /etc/hosts and the DHCP lease
# file have Time-To-Live set as zero, which conventionally means
# do not cache further. If you are happy to trade lower load on the
# server for potentially stale date, you can set a time-to-live (in
# seconds) here.
#local-ttl=
# If you want dnsmasq to detect attempts by Verisign to send queries
# to unregistered .com and .net hosts to its sitefinder service and
# have dnsmasq instead return the correct NXDOMAIN response, uncomment
# this line. You can add similar lines to do the same for other
# registries which have implemented wildcard A records.
#bogus-nxdomain=64.94.110.11
# If you want to fix up DNS results from upstream servers, use the
# alias option. This only works for IPv4.
# This alias makes a result of 1.2.3.4 appear as 5.6.7.8
#alias=1.2.3.4,5.6.7.8
# and this maps 1.2.3.x to 5.6.7.x
#alias=1.2.3.0,5.6.7.0,255.255.255.0
# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40
#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0
# Change these lines if you want dnsmasq to serve MX records.
# Return an MX record named "maildomain.com" with target
# servermachine.com and preference 50
#mx-host=maildomain.com,servermachine.com,50
# Set the default target for MX records created using the localmx option.
#mx-target=servermachine.com
# Return an MX record pointing to the mx-target for all local
# machines.
#localmx
# Return an MX record pointing to itself for all local machines.
#selfmx
# Change the following lines if you want dnsmasq to serve SRV
# records. These are useful if you want to serve ldap requests for
# Active Directory and other windows-originated DNS requests.
# See RFC 2782.
# You may add multiple srv-host lines.
# The fields are <name>,<target>,<port>,<priority>,<weight>
# If the domain part if missing from the name (so that is just has the
# service and protocol sections) then the domain given by the domain=
# config option is used. (Note that expand-hosts does not need to be
# set for this to work.)
# A SRV record sending LDAP for the example.com domain to
# ldapserver.example.com port 389
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389
# A SRV record sending LDAP for the example.com domain to
# ldapserver.example.com port 389 (using domain=)
#domain=example.com
#srv-host=_ldap._tcp,ldapserver.example.com,389
# Two SRV records for LDAP, each with different priorities
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2
# A SRV record indicating that there is no LDAP server for the domain
# example.com
#srv-host=_ldap._tcp.example.com
# The following line shows how to make dnsmasq serve an arbitrary PTR
# record. This is useful for DNS-SD. (Note that the
# domain-name expansion done for SRV records _does_not
# occur for PTR records.)
#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services"
# Change the following lines to enable dnsmasq to serve TXT records.
# These are used for things like SPF and zeroconf. (Note that the
# domain-name expansion done for SRV records _does_not
# occur for TXT records.)
#Example SPF.
#txt-record=example.com,"v=spf1 a -all"
#Example zeroconf
#txt-record=_http._tcp.example.com,name=value,paper=A4
# Provide an alias for a "local" DNS name. Note that this _only_ works
# for targets which are names from DHCP or /etc/hosts. Give host
# "bert" another name, bertrand
#cname=bertand,bert
# For debugging purposes, log each DNS query as it passes through
# dnsmasq.
#log-queries
# Log lots of extra information about DHCP transactions.
#log-dhcp
# Include another lot of configuration options.
#conf-file=/etc/dnsmasq.more.conf
#conf-dir=/etc/dnsmasq.d
# Include all the files in a directory except those ending in .bak
#conf-dir=/etc/dnsmasq.d,.bak
# Include all files in a directory which end in .conf
#conf-dir=/etc/dnsmasq.d/,*.conf
# Include all files in /etc/dnsmasq.d except RPM backup files
conf-dir=/etc/dnsmasq.d,.rpmnew,.rpmsave,.rpmorig
# If a DHCP client claims that its name is "wpad", ignore that.
# This fixes a security hole. see CERT Vulnerability VU#598349
#dhcp-name-match=set:wpad-ignore,wpad
#dhcp-ignore-names=tag:wpad-ignore

View File

@ -1,693 +0,0 @@
# Configuration file for dnsmasq.
#
# Format is one option per line, legal options are the same
# as the long options legal on the command line. See
# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details.
# Listen on this specific port instead of the standard DNS port
# (53). Setting this to zero completely disables DNS function,
# leaving only DHCP and/or TFTP.
#port=5353
# The following two options make you a better netizen, since they
# tell dnsmasq to filter out queries which the public DNS cannot
# answer, and which load the servers (especially the root servers)
# unnecessarily. If you have a dial-on-demand link they also stop
# these requests from bringing up the link unnecessarily.
# Never forward plain names (without a dot or domain part)
#domain-needed
# Never forward addresses in the non-routed address spaces.
#bogus-priv
# Uncomment these to enable DNSSEC validation and caching:
# (Requires dnsmasq to be built with DNSSEC option.)
#conf-file=/usr/share/dnsmasq/trust-anchors.conf
#dnssec
# Replies which are not DNSSEC signed may be legitimate, because the domain
# is unsigned, or may be forgeries. Setting this option tells dnsmasq to
# check that an unsigned reply is OK, by finding a secure proof that a DS
# record somewhere between the root and the domain does not exist.
# The cost of setting this is that even queries in unsigned domains will need
# one or more extra DNS queries to verify.
#dnssec-check-unsigned
# Uncomment this to filter useless windows-originated DNS requests
# which can trigger dial-on-demand links needlessly.
# Note that (amongst other things) this blocks all SRV requests,
# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk.
# This option only affects forwarding, SRV records originating for
# dnsmasq (via srv-host= lines) are not suppressed by it.
#filterwin2k
# Change this line if you want dns to get its upstream servers from
# somewhere other that /etc/resolv.conf
#resolv-file=
# By default, dnsmasq will send queries to any of the upstream
# servers it knows about and tries to favour servers to are known
# to be up. Uncommenting this forces dnsmasq to try each query
# with each server strictly in the order they appear in
# /etc/resolv.conf
#strict-order
# If you don't want dnsmasq to read /etc/resolv.conf or any other
# file, getting its servers from this file instead (see below), then
# uncomment this.
#no-resolv
# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv
# files for changes and re-read them then uncomment this.
#no-poll
# Add other name servers here, with domain specs if they are for
# non-public domains.
#server=/localnet/192.168.0.1
# Example of routing PTR queries to nameservers: this will send all
# address->name queries for 192.168.3/24 to nameserver 10.1.2.3
#server=/3.168.192.in-addr.arpa/10.1.2.3
# Add local-only domains here, queries in these domains are answered
# from /etc/hosts or DHCP only.
#local=/localnet/
# Add domains which you want to force to an IP address here.
# The example below send any host in double-click.net to a local
# web-server.
#address=/double-click.net/127.0.0.1
# --address (and --server) work with IPv6 addresses too.
#address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83
# Add the IPs of all queries to yahoo.com, google.com, and their
# subdomains to the vpn and search ipsets:
#ipset=/yahoo.com/google.com/vpn,search
# You can control how dnsmasq talks to a server: this forces
# queries to 10.1.2.3 to be routed via eth1
# server=10.1.2.3@eth1
# and this sets the source (ie local) address used to talk to
# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that
# IP on the machine, obviously).
# server=10.1.2.3@192.168.1.1#55
# If you want dnsmasq to change uid and gid to something other
# than the default, edit the following lines.
user=dnsmasq
group=dnsmasq
# If you want dnsmasq to listen for DHCP and DNS requests only on
# specified interfaces (and the loopback) give the name of the
# interface (eg eth0) here.
# Repeat the line for more than one interface.
#interface=
# Listen only on localhost by default
interface=lo
# Or you can specify which interface _not_ to listen on
#except-interface=
# Or which to listen on by address (remember to include 127.0.0.1 if
# you use this.)
#listen-address=
# If you want dnsmasq to provide only DNS service on an interface,
# configure it as shown above, and then use the following line to
# disable DHCP and TFTP on it.
#no-dhcp-interface=
# Serve DNS and DHCP only to networks directly connected to this machine.
# Any interface= line will override it.
#local-service
# On systems which support it, dnsmasq binds the wildcard address,
# even when it is listening on only some interfaces. It then discards
# requests that it shouldn't reply to. This has the advantage of
# working even when interfaces come and go and change address. If you
# want dnsmasq to really bind only the interfaces it is listening on,
# uncomment this option. About the only time you may need this is when
# running another nameserver on the same machine.
#
# To listen only on localhost and do not receive packets on other
# interfaces, bind only to lo device. Comment out to bind on single
# wildcard socket.
bind-interfaces
# If you don't want dnsmasq to read /etc/hosts, uncomment the
# following line.
#no-hosts
# or if you want it to read another file, as well as /etc/hosts, use
# this.
#addn-hosts=/etc/banner_add_hosts
# Set this (and domain: see below) if you want to have a domain
# automatically added to simple names in a hosts-file.
#expand-hosts
# Set the domain for dnsmasq. this is optional, but if it is set, it
# does the following things.
# 1) Allows DHCP hosts to have fully qualified domain names, as long
# as the domain part matches this setting.
# 2) Sets the "domain" DHCP option thereby potentially setting the
# domain of all systems configured by DHCP
# 3) Provides the domain part for "expand-hosts"
#domain=thekelleys.org.uk
# Set a different domain for a particular subnet
#domain=wireless.thekelleys.org.uk,192.168.2.0/24
# Same idea, but range rather then subnet
#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200
# Uncomment this to enable the integrated DHCP server, you need
# to supply the range of addresses available for lease and optionally
# a lease time. If you have more than one network, you will need to
# repeat this for each network on which you want to supply DHCP
# service.
#dhcp-range=192.168.0.50,192.168.0.150,12h
# This is an example of a DHCP range where the netmask is given. This
# is needed for networks we reach the dnsmasq DHCP server via a relay
# agent. If you don't know what a DHCP relay agent is, you probably
# don't need to worry about this.
#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h
# This is an example of a DHCP range which sets a tag, so that
# some DHCP options may be set only for this network.
#dhcp-range=set:red,192.168.0.50,192.168.0.150
# Use this DHCP range only when the tag "green" is set.
#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h
# Specify a subnet which can't be used for dynamic address allocation,
# is available for hosts with matching --dhcp-host lines. Note that
# dhcp-host declarations will be ignored unless there is a dhcp-range
# of some type for the subnet in question.
# In this case the netmask is implied (it comes from the network
# configuration on the machine running dnsmasq) it is possible to give
# an explicit netmask instead.
#dhcp-range=192.168.0.0,static
# Enable DHCPv6. Note that the prefix-length does not need to be specified
# and defaults to 64 if missing/
#dhcp-range=1234::2, 1234::500, 64, 12h
# Do Router Advertisements, BUT NOT DHCP for this subnet.
#dhcp-range=1234::, ra-only
# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and
# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
# hosts. Use the DHCPv4 lease to derive the name, network segment and
# MAC address and assume that the host will also have an
# IPv6 address calculated using the SLAAC algorithm.
#dhcp-range=1234::, ra-names
# Do Router Advertisements, BUT NOT DHCP for this subnet.
# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.)
#dhcp-range=1234::, ra-only, 48h
# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA
# so that clients can use SLAAC addresses as well as DHCP ones.
#dhcp-range=1234::2, 1234::500, slaac
# Do Router Advertisements and stateless DHCP for this subnet. Clients will
# not get addresses from DHCP, but they will get other configuration information.
# They will use SLAAC for addresses.
#dhcp-range=1234::, ra-stateless
# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses
# from DHCPv4 leases.
#dhcp-range=1234::, ra-stateless, ra-names
# Do router advertisements for all subnets where we're doing DHCPv6
# Unless overridden by ra-stateless, ra-names, et al, the router
# advertisements will have the M and O bits set, so that the clients
# get addresses and configuration from DHCPv6, and the A bit reset, so the
# clients don't use SLAAC addresses.
#enable-ra
# Supply parameters for specified hosts using DHCP. There are lots
# of valid alternatives, so we will give examples of each. Note that
# IP addresses DO NOT have to be in the range given above, they just
# need to be on the same network. The order of the parameters in these
# do not matter, it's permissible to give name, address and MAC in any
# order.
# Always allocate the host with Ethernet address 11:22:33:44:55:66
# The IP address 192.168.0.60
#dhcp-host=11:22:33:44:55:66,192.168.0.60
# Always set the name of the host with hardware address
# 11:22:33:44:55:66 to be "fred"
#dhcp-host=11:22:33:44:55:66,fred
# Always give the host with Ethernet address 11:22:33:44:55:66
# the name fred and IP address 192.168.0.60 and lease time 45 minutes
#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m
# Give a host with Ethernet address 11:22:33:44:55:66 or
# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume
# that these two Ethernet interfaces will never be in use at the same
# time, and give the IP address to the second, even if it is already
# in use by the first. Useful for laptops with wired and wireless
# addresses.
#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60
# Give the machine which says its name is "bert" IP address
# 192.168.0.70 and an infinite lease
#dhcp-host=bert,192.168.0.70,infinite
# Always give the host with client identifier 01:02:02:04
# the IP address 192.168.0.60
#dhcp-host=id:01:02:02:04,192.168.0.60
# Always give the InfiniBand interface with hardware address
# 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the
# ip address 192.168.0.61. The client id is derived from the prefix
# ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of
# hex digits of the hardware address.
#dhcp-host=id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61
# Always give the host with client identifier "marjorie"
# the IP address 192.168.0.60
#dhcp-host=id:marjorie,192.168.0.60
# Enable the address given for "judge" in /etc/hosts
# to be given to a machine presenting the name "judge" when
# it asks for a DHCP lease.
#dhcp-host=judge
# Never offer DHCP service to a machine whose Ethernet
# address is 11:22:33:44:55:66
#dhcp-host=11:22:33:44:55:66,ignore
# Ignore any client-id presented by the machine with Ethernet
# address 11:22:33:44:55:66. This is useful to prevent a machine
# being treated differently when running under different OS's or
# between PXE boot and OS boot.
#dhcp-host=11:22:33:44:55:66,id:*
# Send extra options which are tagged as "red" to
# the machine with Ethernet address 11:22:33:44:55:66
#dhcp-host=11:22:33:44:55:66,set:red
# Send extra options which are tagged as "red" to
# any machine with Ethernet address starting 11:22:33:
#dhcp-host=11:22:33:*:*:*,set:red
# Give a fixed IPv6 address and name to client with
# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2
# Note the MAC addresses CANNOT be used to identify DHCPv6 clients.
# Note also that the [] around the IPv6 address are obligatory.
#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
# Ignore any clients which are not specified in dhcp-host lines
# or /etc/ethers. Equivalent to ISC "deny unknown-clients".
# This relies on the special "known" tag which is set when
# a host is matched.
#dhcp-ignore=tag:!known
# Send extra options which are tagged as "red" to any machine whose
# DHCP vendorclass string includes the substring "Linux"
#dhcp-vendorclass=set:red,Linux
# Send extra options which are tagged as "red" to any machine one
# of whose DHCP userclass strings includes the substring "accounts"
#dhcp-userclass=set:red,accounts
# Send extra options which are tagged as "red" to any machine whose
# MAC address matches the pattern.
#dhcp-mac=set:red,00:60:8C:*:*:*
# If this line is uncommented, dnsmasq will read /etc/ethers and act
# on the ethernet-address/IP pairs found there just as if they had
# been given as --dhcp-host options. Useful if you keep
# MAC-address/host mappings there for other purposes.
#read-ethers
# Send options to hosts which ask for a DHCP lease.
# See RFC 2132 for details of available options.
# Common options can be given to dnsmasq by name:
# run "dnsmasq --help dhcp" to get a list.
# Note that all the common settings, such as netmask and
# broadcast address, DNS server and default route, are given
# sane defaults by dnsmasq. You very likely will not need
# any dhcp-options. If you use Windows clients and Samba, there
# are some options which are recommended, they are detailed at the
# end of this section.
# Override the default route supplied by dnsmasq, which assumes the
# router is the same machine as the one running dnsmasq.
#dhcp-option=3,1.2.3.4
# Do the same thing, but using the option name
#dhcp-option=option:router,1.2.3.4
# Override the default route supplied by dnsmasq and send no default
# route at all. Note that this only works for the options sent by
# default (1, 3, 6, 12, 28) the same line will send a zero-length option
# for all other option numbers.
#dhcp-option=3
# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5
#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5
# Send DHCPv6 option. Note [] around IPv6 addresses.
#dhcp-option=option6:dns-server,[1234::77],[1234::88]
# Send DHCPv6 option for namservers as the machine running
# dnsmasq and another.
#dhcp-option=option6:dns-server,[::],[1234::88]
# Ask client to poll for option changes every six hours. (RFC4242)
#dhcp-option=option6:information-refresh-time,6h
# Set option 58 client renewal time (T1). Defaults to half of the
# lease time if not specified. (RFC2132)
#dhcp-option=option:T1,1m
# Set option 59 rebinding time (T2). Defaults to 7/8 of the
# lease time if not specified. (RFC2132)
#dhcp-option=option:T2,2m
# Set the NTP time server address to be the same machine as
# is running dnsmasq
#dhcp-option=42,0.0.0.0
# Set the NIS domain name to "welly"
#dhcp-option=40,welly
# Set the default time-to-live to 50
#dhcp-option=23,50
# Set the "all subnets are local" flag
#dhcp-option=27,1
# Send the etherboot magic flag and then etherboot options (a string).
#dhcp-option=128,e4:45:74:68:00:00
#dhcp-option=129,NIC=eepro100
# Specify an option which will only be sent to the "red" network
# (see dhcp-range for the declaration of the "red" network)
# Note that the tag: part must precede the option: part.
#dhcp-option = tag:red, option:ntp-server, 192.168.1.1
# The following DHCP options set up dnsmasq in the same way as is specified
# for the ISC dhcpcd in
# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt
# adapted for a typical dnsmasq installation where the host running
# dnsmasq is also the host running samba.
# you may want to uncomment some or all of them if you use
# Windows clients and Samba.
#dhcp-option=19,0 # option ip-forwarding off
#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s)
#dhcp-option=45,0.0.0.0 # netbios datagram distribution server
#dhcp-option=46,8 # netbios node type
# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave.
#dhcp-option=252,"\n"
# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client
# probably doesn't support this......
#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com
# Send RFC-3442 classless static routes (note the netmask encoding)
#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8
# Send vendor-class specific options encapsulated in DHCP option 43.
# The meaning of the options is defined by the vendor-class so
# options are sent only when the client supplied vendor class
# matches the class given here. (A substring match is OK, so "MSFT"
# matches "MSFT" and "MSFT 5.0"). This example sets the
# mtftp address to 0.0.0.0 for PXEClients.
#dhcp-option=vendor:PXEClient,1,0.0.0.0
# Send microsoft-specific option to tell windows to release the DHCP lease
# when it shuts down. Note the "i" flag, to tell dnsmasq to send the
# value as a four-byte integer - that's what microsoft wants. See
# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true
#dhcp-option=vendor:MSFT,2,1i
# Send the Encapsulated-vendor-class ID needed by some configurations of
# Etherboot to allow is to recognise the DHCP server.
#dhcp-option=vendor:Etherboot,60,"Etherboot"
# Send options to PXELinux. Note that we need to send the options even
# though they don't appear in the parameter request list, so we need
# to use dhcp-option-force here.
# See http://syslinux.zytor.com/pxe.php#special for details.
# Magic number - needed before anything else is recognised
#dhcp-option-force=208,f1:00:74:7e
# Configuration file name
#dhcp-option-force=209,configs/common
# Path prefix
#dhcp-option-force=210,/tftpboot/pxelinux/files/
# Reboot time. (Note 'i' to send 32-bit value)
#dhcp-option-force=211,30i
# Set the boot filename for netboot/PXE. You will only need
# this if you want to boot machines over the network and you will need
# a TFTP server; either dnsmasq's built-in TFTP server or an
# external one. (See below for how to enable the TFTP server.)
#dhcp-boot=pxelinux.0
# The same as above, but use custom tftp-server instead machine running dnsmasq
#dhcp-boot=pxelinux,server.name,192.168.1.100
# Boot for iPXE. The idea is to send two different
# filenames, the first loads iPXE, and the second tells iPXE what to
# load. The dhcp-match sets the ipxe tag for requests from iPXE.
#dhcp-boot=undionly.kpxe
#dhcp-match=set:ipxe,175 # iPXE sends a 175 option.
#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php
# Encapsulated options for iPXE. All the options are
# encapsulated within option 175
#dhcp-option=encap:175, 1, 5b # priority code
#dhcp-option=encap:175, 176, 1b # no-proxydhcp
#dhcp-option=encap:175, 177, string # bus-id
#dhcp-option=encap:175, 189, 1b # BIOS drive code
#dhcp-option=encap:175, 190, user # iSCSI username
#dhcp-option=encap:175, 191, pass # iSCSI password
# Test for the architecture of a netboot client. PXE clients are
# supposed to send their architecture as option 93. (See RFC 4578)
#dhcp-match=peecees, option:client-arch, 0 #x86-32
#dhcp-match=itanics, option:client-arch, 2 #IA64
#dhcp-match=hammers, option:client-arch, 6 #x86-64
#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64
# Do real PXE, rather than just booting a single file, this is an
# alternative to dhcp-boot.
#pxe-prompt="What system shall I netboot?"
# or with timeout before first available action is taken:
#pxe-prompt="Press F8 for menu.", 60
# Available boot services. for PXE.
#pxe-service=x86PC, "Boot from local disk"
# Loads <tftp-root>/pxelinux.0 from dnsmasq TFTP server.
#pxe-service=x86PC, "Install Linux", pxelinux
# Loads <tftp-root>/pxelinux.0 from TFTP server at 1.2.3.4.
# Beware this fails on old PXE ROMS.
#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4
# Use bootserver on network, found my multicast or broadcast.
#pxe-service=x86PC, "Install windows from RIS server", 1
# Use bootserver at a known IP address.
#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4
# If you have multicast-FTP available,
# information for that can be passed in a similar way using options 1
# to 5. See page 19 of
# http://download.intel.com/design/archives/wfm/downloads/pxespec.pdf
# Enable dnsmasq's built-in TFTP server
#enable-tftp
# Set the root directory for files available via FTP.
#tftp-root=/var/ftpd
# Do not abort if the tftp-root is unavailable
#tftp-no-fail
# Make the TFTP server more secure: with this set, only files owned by
# the user dnsmasq is running as will be send over the net.
#tftp-secure
# This option stops dnsmasq from negotiating a larger blocksize for TFTP
# transfers. It will slow things down, but may rescue some broken TFTP
# clients.
#tftp-no-blocksize
# Set the boot file name only when the "red" tag is set.
#dhcp-boot=tag:red,pxelinux.red-net
# An example of dhcp-boot with an external TFTP server: the name and IP
# address of the server are given after the filename.
# Can fail with old PXE ROMS. Overridden by --pxe-service.
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3
# If there are multiple external tftp servers having a same name
# (using /etc/hosts) then that name can be specified as the
# tftp_servername (the third option to dhcp-boot) and in that
# case dnsmasq resolves this name and returns the resultant IP
# addresses in round robin fashion. This facility can be used to
# load balance the tftp load among a set of servers.
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name
# Set the limit on DHCP leases, the default is 150
#dhcp-lease-max=150
# The DHCP server needs somewhere on disk to keep its lease database.
# This defaults to a sane location, but if you want to change it, use
# the line below.
#dhcp-leasefile=/var/lib/dnsmasq/dnsmasq.leases
# Set the DHCP server to authoritative mode. In this mode it will barge in
# and take over the lease for any client which broadcasts on the network,
# whether it has a record of the lease or not. This avoids long timeouts
# when a machine wakes up on a new network. DO NOT enable this if there's
# the slightest chance that you might end up accidentally configuring a DHCP
# server for your campus/company accidentally. The ISC server uses
# the same option, and this URL provides more information:
# http://www.isc.org/files/auth.html
#dhcp-authoritative
# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039.
# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit
# option with a DHCPACK including a Rapid Commit option and fully committed address
# and configuration information. This must only be enabled if either the server is
# the only server for the subnet, or multiple servers are present and they each
# commit a binding for all clients.
#dhcp-rapid-commit
# Run an executable when a DHCP lease is created or destroyed.
# The arguments sent to the script are "add" or "del",
# then the MAC address, the IP address and finally the hostname
# if there is one.
#dhcp-script=/bin/echo
# Set the cachesize here.
#cache-size=150
# If you want to disable negative caching, uncomment this.
#no-negcache
# Normally responses which come from /etc/hosts and the DHCP lease
# file have Time-To-Live set as zero, which conventionally means
# do not cache further. If you are happy to trade lower load on the
# server for potentially stale date, you can set a time-to-live (in
# seconds) here.
#local-ttl=
# If you want dnsmasq to detect attempts by Verisign to send queries
# to unregistered .com and .net hosts to its sitefinder service and
# have dnsmasq instead return the correct NXDOMAIN response, uncomment
# this line. You can add similar lines to do the same for other
# registries which have implemented wildcard A records.
#bogus-nxdomain=64.94.110.11
# If you want to fix up DNS results from upstream servers, use the
# alias option. This only works for IPv4.
# This alias makes a result of 1.2.3.4 appear as 5.6.7.8
#alias=1.2.3.4,5.6.7.8
# and this maps 1.2.3.x to 5.6.7.x
#alias=1.2.3.0,5.6.7.0,255.255.255.0
# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40
#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0
# Change these lines if you want dnsmasq to serve MX records.
# Return an MX record named "maildomain.com" with target
# servermachine.com and preference 50
#mx-host=maildomain.com,servermachine.com,50
# Set the default target for MX records created using the localmx option.
#mx-target=servermachine.com
# Return an MX record pointing to the mx-target for all local
# machines.
#localmx
# Return an MX record pointing to itself for all local machines.
#selfmx
# Change the following lines if you want dnsmasq to serve SRV
# records. These are useful if you want to serve ldap requests for
# Active Directory and other windows-originated DNS requests.
# See RFC 2782.
# You may add multiple srv-host lines.
# The fields are <name>,<target>,<port>,<priority>,<weight>
# If the domain part if missing from the name (so that is just has the
# service and protocol sections) then the domain given by the domain=
# config option is used. (Note that expand-hosts does not need to be
# set for this to work.)
# A SRV record sending LDAP for the example.com domain to
# ldapserver.example.com port 389
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389
# A SRV record sending LDAP for the example.com domain to
# ldapserver.example.com port 389 (using domain=)
#domain=example.com
#srv-host=_ldap._tcp,ldapserver.example.com,389
# Two SRV records for LDAP, each with different priorities
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2
# A SRV record indicating that there is no LDAP server for the domain
# example.com
#srv-host=_ldap._tcp.example.com
# The following line shows how to make dnsmasq serve an arbitrary PTR
# record. This is useful for DNS-SD. (Note that the
# domain-name expansion done for SRV records _does_not
# occur for PTR records.)
#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services"
# Change the following lines to enable dnsmasq to serve TXT records.
# These are used for things like SPF and zeroconf. (Note that the
# domain-name expansion done for SRV records _does_not
# occur for TXT records.)
#Example SPF.
#txt-record=example.com,"v=spf1 a -all"
#Example zeroconf
#txt-record=_http._tcp.example.com,name=value,paper=A4
# Provide an alias for a "local" DNS name. Note that this _only_ works
# for targets which are names from DHCP or /etc/hosts. Give host
# "bert" another name, bertrand
#cname=bertand,bert
# For debugging purposes, log each DNS query as it passes through
# dnsmasq.
#log-queries
# Log lots of extra information about DHCP transactions.
#log-dhcp
# Include another lot of configuration options.
#conf-file=/etc/dnsmasq.more.conf
#conf-dir=/etc/dnsmasq.d
# Include all the files in a directory except those ending in .bak
#conf-dir=/etc/dnsmasq.d,.bak
# Include all files in a directory which end in .conf
#conf-dir=/etc/dnsmasq.d/,*.conf
# Include all files in /etc/dnsmasq.d except RPM backup files
conf-dir=/etc/dnsmasq.d,.rpmnew,.rpmsave,.rpmorig
# If a DHCP client claims that its name is "wpad", ignore that.
# This fixes a security hole. see CERT Vulnerability VU#598349
#dhcp-name-match=set:wpad-ignore,wpad
#dhcp-ignore-names=tag:wpad-ignore

View File

@ -1,2 +0,0 @@
server=/fedoraproject.org/10.3.163.33
server=/fedoraproject.org/10.3.163.34

View File

@ -1,2 +0,0 @@
server=/fedoraproject.org/10.3.163.33
server=/fedoraproject.org/10.3.163.34

View File

@ -1,90 +0,0 @@
#!/bin/bash -xe
# Note: this is done as a script because it needs to be run after
# every docker service restart.
# And just doing an iptables-restore is going to mess up kubernetes'
# NAT table.
# And it gets even better with openshift! It thinks I'm stupid and need
# to be corrected by automatically adding the "allow all" rules back at
# the top as soon as I remove them.
# To circumvent that, we're just adding a new chain for this, as it seems
# that it doesn't do anything with the firewall if we keep its rules in
# place. (it doesn't check the order of its rules, only that they exist)
if [ "`iptables -nL | grep FILTER_FORWARD`" == "" ];
then
iptables -N FILTER_FORWARD
fi
if [ "`iptables -nL | grep 'FILTER_FORWARD all'`" == "" ];
then
iptables -I FORWARD 1 -j FILTER_FORWARD
iptables -I FORWARD 2 -j REJECT
iptables -I DOCKER-ISOLATION 1 -j FILTER_FORWARD
fi
# Delete all old rules
iptables --flush FILTER_FORWARD
# Re-insert some basic rules
iptables -A FILTER_FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
iptables -A FILTER_FORWARD --src 10.1.0.0/16 --dst 10.1.0.0/16 -j ACCEPT
# Now insert access to allowed boxes
# docker-registry no cdn
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.119 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.127 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.119 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.127 --dport 443 -j ACCEPT
# Candidate registry
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.102 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.102 --dport 443 -j ACCEPT
#koji.fp.o
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.104 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.104 --dport 443 -j ACCEPT
# pkgs
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.116 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.116 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.116 --dport 9418 -j ACCEPT
# DNS
iptables -A FILTER_FORWARD -p udp -m udp -d 10.3.163.33 --dport 53 -j ACCEPT
iptables -A FILTER_FORWARD -p udp -m udp -d 10.3.163.33 --dport 53 -j ACCEPT
# mirrors.fp.o
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.76 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.77 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.75 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.74 --dport 443 -j ACCEPT
# infrastructure.fp.o (infra repos)
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.35 --dport 443 -j ACCEPT
# Kerberos
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.76 --dport 1088 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.77 --dport 1088 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.75 --dport 1088 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.74 --dport 1088 -j ACCEPT
# dl.phx2
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.49 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.49 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.50 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.50 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.51 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.51 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.85 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.85 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.84 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.84 --dport 443 -j ACCEPT
# Docker is CRAZY and forces Google DNS upon us.....
iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.8.8 --dport 53 -j ACCEPT
iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.4.4 --dport 53 -j ACCEPT
# aarch64 cluster
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.170.147 --dport 8443 -j ACCEPT
iptables -A FORWARD -j REJECT --reject-with icmp-host-prohibited

View File

@ -1,86 +0,0 @@
#!/bin/bash -xe
# Note: this is done as a script because it needs to be run after
# every docker service restart.
# And just doing an iptables-restore is going to mess up kubernetes'
# NAT table.
# And it gets even better with openshift! It thinks I'm stupid and need
# to be corrected by automatically adding the "allow all" rules back at
# the top as soon as I remove them.
# To circumvent that, we're just adding a new chain for this, as it seems
# that it doesn't do anything with the firewall if we keep its rules in
# place. (it doesn't check the order of its rules, only that they exist)
if [ "`iptables -nL | grep FILTER_FORWARD`" == "" ];
then
iptables -N FILTER_FORWARD
fi
if [ "`iptables -nL | grep 'FILTER_FORWARD all'`" == "" ];
then
iptables -I FORWARD 1 -j FILTER_FORWARD
iptables -I FORWARD 2 -j REJECT
iptables -I DOCKER-ISOLATION 1 -j FILTER_FORWARD
fi
# Delete all old rules
iptables --flush FILTER_FORWARD
# Re-insert some basic rules
iptables -A FILTER_FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
iptables -A FILTER_FORWARD --src 10.1.0.0/16 --dst 10.1.0.0/16 -j ACCEPT
# Now insert access to allowed boxes
# osbs
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.166.74 --dport 443 -j ACCEPT
# docker-registry
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.128.123 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.128.124 --dport 443 -j ACCEPT
#koji.fp.o
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.64 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.64 --dport 443 -j ACCEPT
# pkgs.stg
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.74 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.74 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.74 --dport 9418 -j ACCEPT
# DNS
iptables -A FILTER_FORWARD -p udp -m udp -d 10.3.163.33 --dport 53 -j ACCEPT
iptables -A FILTER_FORWARD -p udp -m udp -d 10.3.163.34 --dport 53 -j ACCEPT
# mirrors.fp.o
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.76 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.77 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.75 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.74 --dport 443 -j ACCEPT
# infrastructure.fp.o (infra repos)
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.35 --dport 443 -j ACCEPT
# dl.phx2
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.49 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.49 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.50 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.50 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.51 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.51 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.85 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.85 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.84 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.84 --dport 443 -j ACCEPT
# Docker is CRAZY and forces Google DNS upon us.....
iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.8.8 --dport 53 -j ACCEPT
iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.4.4 --dport 53 -j ACCEPT
# proxy
iptables -A FILTER_FORWARD -p tcp --dst 10.3.166.74 --dport 443 -j ACCEPT
# Kerberos
iptables -A FILTER_FORWARD -p tcp --dst 10.3.166.74 --dport 1088 -j ACCEPT
iptables -A FILTER_FORWARD -j REJECT --reject-with icmp-host-prohibited

View File

@ -1,9 +0,0 @@
{
"disable_plugins": [
{
"plugin_type": "exit_plugins",
"plugin_name": "import_image"
}
],
"enable_plugins": []
}

View File

@ -1,13 +0,0 @@
{
"disable_plugins": [
{
"plugin_type": "prebuild_plugins",
"plugin_name": "fetch_maven_artifacts"
},
{
"plugin_type": "exit_plugins",
"plugin_name": "import_image"
}
],
"enable_plugins": []
}

View File

@ -160,7 +160,6 @@ bvmhost-a64-14.iad2.fedoraproject.org
bvmhost-a64-15.iad2.fedoraproject.org
# These are lenovo emags in IAD2
bvmhost-a64-01.stg.iad2.fedoraproject.org
bvmhost-a64-osbs-01.iad2.fedoraproject.org
# ppc
bvmhost-p09-01.iad2.fedoraproject.org
bvmhost-p09-02.iad2.fedoraproject.org

View File

@ -57,7 +57,6 @@ ipa_client_shell_groups:
- sysadmin-messaging
- sysadmin-noc
- sysadmin-odcs
- sysadmin-osbs
- sysadmin-osbuild
- sysadmin-openscanhub
- sysadmin-qa

View File

@ -18,6 +18,4 @@ koji_root: "koji.fedoraproject.org/koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.fedoraproject.org/koji"
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.fedoraproject.org"
source_registry: "registry.fedoraproject.org"

View File

@ -26,8 +26,6 @@ lvm_size: 262144
max_mem_size: "{{ mem_size }}"
mem_size: 15360
num_cpus: 6
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
virt_install_command: "{{ virt_install_command_one_nic_unsafe }}"
volgroup: /dev/BuildGuests

View File

@ -27,8 +27,6 @@ max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 36864
num_cpus: 12
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View File

@ -34,8 +34,6 @@ max_mem_size: "{{ mem_size }}"
mem_size: 40960
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
num_cpus: 5
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
# this is to enable nested virt, which we need for some builds
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"

View File

@ -34,8 +34,6 @@ max_mem_size: "{{ mem_size }}"
mem_size: 10240
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
num_cpus: 4
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
virt_install_command: "{{ virt_install_command_ppc64le_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View File

@ -33,8 +33,6 @@ max_mem_size: "{{ mem_size }}"
mem_size: 10240
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=4"
num_cpus: 4
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.stg.fedoraproject.org"
resolvconf: "resolv.conf/iad2"
source_registry: "registry.fedoraproject.org"
virt_install_command: "{{ virt_install_command_one_nic_unsafe }}"

View File

@ -36,7 +36,6 @@ mem_size: 32768
max_mem_size: 65536
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=4"
num_cpus: 16
osbs_url: "osbs.fedoraproject.org"
primary_auth_source: ipa
source_registry: "registry.fedoraproject.org"
# for systems that do not match the above - specify the same parameter in

View File

@ -1,9 +1,5 @@
---
# Define resources for this group of hosts here.
# Add custom iptable rule to allow stage koji to talk to
# osbs-dev.fedorainfracloud.org (will move to stage osbs later, this is for the
# sake of testing).
custom_rules: ['-A OUTPUT -p tcp -m tcp -d 209.132.184.60 --dport 8443 -j ACCEPT']
docker_registry: "candidate-registry.stg.fedoraproject.org"
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
@ -27,11 +23,9 @@ fedmsg_certs:
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-osbs
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-osbs
- sysadmin-releng
ipa_host_group: kojihub
ipa_host_group_desc: Koji Hub hosts
@ -45,7 +39,6 @@ mem_size: 8192
# NOTE -- staging mounts read-only
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
num_cpus: 8
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file

View File

@ -72,7 +72,6 @@ iad2_management_hosts:
- bvmhost-a64-09.mgmt.iad2.fedoraproject.org.
- bvmhost-a64-10.mgmt.iad2.fedoraproject.org.
- bvmhost-a64-11.mgmt.iad2.fedoraproject.org.
- bvmhost-a64-osbs-01.mgmt.iad2.fedoraproject.org.
- bvmhost-p09-01.mgmt.iad2.fedoraproject.org.
- bvmhost-p09-02.mgmt.iad2.fedoraproject.org.
- bvmhost-p09-03.mgmt.iad2.fedoraproject.org.

View File

@ -22,7 +22,6 @@ odcs_allowed_clients_users:
kevin: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
# This is token used by CCCC service running on https://jenkins-fedora-infra.apps.ci.centos.org/job/cccc.
odcs@service: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
osbs@service: {}
releng-odcs@service: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
# Default queues for general ODCS backends.
odcs_celery_queues:

View File

@ -17,7 +17,6 @@ odcs_allowed_clients_users:
humaton: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
jkaluza: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
mohanboddu: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
osbs@service: {}
# Default queues for general ODCS backends.
odcs_celery_queues:
- pungi_composes

View File

@ -1,36 +0,0 @@
---
# Define resources for this group of hosts here.
baseiptables: False
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
# fedora container images required by buildroot
fedora_required_images:
- "fedora:latest"
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-osbs
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-osbs
- sysadmin-releng
ipa_host_group: osbs
ipa_host_group_desc: OpenShift Build Service
koji_url: "koji.fedoraproject.org"
lvm_size: 60000
mem_size: 8192
num_cpus: 2
#openshift_ansible_upgrading: True
# docker images required by OpenShift Origin
openshift_required_images:
- "openshift/origin-pod"
osbs_client_conf_path: /etc/osbs.conf
osbs_koji_username: "kojibuilder"
osbs_url: "osbs.fedoraproject.org"
package_excludes: "docker*"
primary_auth_source: ipa
source_registry: "registry.fedoraproject.org"
sudoers: "{{ private }}/files/sudo/osbs-sudoers"
tcp_ports: [80, 443, 8443]

View File

@ -1,42 +0,0 @@
---
# Define resources for this group of hosts here.
#Docker command delegated host
composer: compose-x86-01.iad2.fedoraproject.org
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
koji_url: "koji.fedoraproject.org"
lvm_size: 60000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 8192
# Nagios configuration
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
openshift_node_labels: {'region': 'infra'}
openshift_schedulable: False
osbs_client_conf_path: /etc/osbs.conf
osbs_conf_readwrite_users:
- "system:serviceaccount:{{ osbs_namespace }}:default"
- "system:serviceaccount:{{ osbs_namespace }}:builder"
osbs_conf_service_accounts:
- koji
- builder
osbs_conf_sources_command: fedpkg sources
osbs_namespace: "osbs-fedora"
osbs_orchestrator_cpu_limitrange: "95m"
osbs_orchestrator_default_nodeselector: "orchestrator=true"
osbs_url: "osbs.fedoraproject.org"
osbs_worker_default_nodeselector: "worker=true"
osbs_worker_namespace: worker
osbs_worker_service_accounts:
- orchestrator
- builder
source_registry: "registry.fedoraproject.org"
tcp_ports: [80, 443, 8443]
virt_install_command: "{{ virt_install_command_aarch64_one_nic }}"

View File

@ -1,42 +0,0 @@
---
# Define resources for this group of hosts here.
#Docker command delegated host
composer: compose-x86-01.stg.iad2.fedoraproject.org
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
koji_url: "koji.stg.fedoraproject.org"
lvm_size: 60000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 8192
# Nagios configuration
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
openshift_node_labels: {'region': 'infra'}
openshift_schedulable: False
osbs_client_conf_path: /etc/osbs.conf
osbs_conf_readwrite_users:
- "system:serviceaccount:{{ osbs_namespace }}:default"
- "system:serviceaccount:{{ osbs_namespace }}:builder"
osbs_conf_service_accounts:
- koji
- builder
osbs_conf_sources_command: fedpkg sources
osbs_namespace: "osbs-fedora"
osbs_orchestrator_cpu_limitrange: "95m"
osbs_orchestrator_default_nodeselector: "orchestrator=true"
osbs_url: "osbs.stg.fedoraproject.org"
osbs_worker_default_nodeselector: "worker=true"
osbs_worker_namespace: worker
osbs_worker_service_accounts:
- orchestrator
- builder
source_registry: "registry.stg.fedoraproject.org"
tcp_ports: [80, 443, 8443]
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"

View File

@ -1,17 +0,0 @@
---
# Define resources for this group of hosts here.
lvm_size: 60000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 8192
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
openshift_node_labels: {'region': 'primary', 'zone': 'default'}
tcp_ports: [80, 443, 8443, 10250]
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"

View File

@ -1,17 +0,0 @@
---
# Define resources for this group of hosts here.
lvm_size: 60000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 8192
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
openshift_node_labels: {'region': 'primary', 'zone': 'default'}
tcp_ports: [80, 443, 8443, 10250]
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"

View File

@ -1,17 +0,0 @@
---
# Define resources for this group of hosts here.
lvm_size: 60000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 8192
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
openshift_node_labels: {'region': 'primary', 'zone': 'default'}
tcp_ports: [80, 443, 8443, 10250]
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"

View File

@ -1,13 +0,0 @@
---
# Define resources for this group of hosts here.
aarch_infra_group: "osbs_aarch64_masters"
# Aarch64 variables
aarch_masters_group: "osbs_aarch64_masters"
aarch_nodes_group: "osbs_aarch64_nodes"
cluster_infra_group: "osbs_masters"
cluster_masters_group: "osbs_masters"
cluster_nodes_group: "osbs_nodes"
inventory_filename: "cluster-inventory"
# Variables used in the ansible-ansible-openshift-ansible role in osbs-cluster playbook
osbs_url: "osbs.fedoraproject.org"
sudoers: "{{ private }}/files/sudo/osbs-sudoers"

View File

@ -1,13 +0,0 @@
---
# Define resources for this group of hosts here.
# Variables used in the ansible-ansible-openshift-ansible role in osbs-cluster playbook
aarch_infra_group: "osbs_aarch64_masters_stg"
# Aarch64 variables
aarch_masters_group: "osbs_aarch64_masters_stg"
aarch_nodes_group: "osbs_aarch64_nodes_stg"
cluster_infra_group: "osbs_masters_stg"
cluster_masters_group: "osbs_masters_stg"
cluster_nodes_group: "osbs_nodes_stg"
inventory_filename: "cluster-inventory-stg"
osbs_url: "osbs.stg.fedoraproject.org"

View File

@ -1,134 +0,0 @@
---
# Define resources for this group of hosts here.
_osbs_reactor_config_map:
artifacts_allowed_domains: []
#- download.devel.redhat.com/released
#- download.devel.redhat.com/devel/candidates
clusters:
aarch64:
- enabled: True
max_concurrent_builds: 1
name: "aarch64"
x86_64:
- enabled: True
max_concurrent_builds: 2
name: "x86_64"
clusters_client_config_dir: "/var/run/secrets/atomic-reactor/client-config-secret"
content_versions:
- v2
flatpak:
base_image: "registry.fedoraproject.org/flatpak-build-base:latest"
metadata: both
group_manifests: True
image_equal_labels:
- ['description', 'io.k8s.description']
image_labels:
authoritative-source-url: "{{ source_registry }}"
distribution-scope: public
vendor: "Fedora Project"
koji:
auth:
krb_keytab_path: "FILE:/etc/krb5.osbs_{{ osbs_url }}.keytab"
krb_principal: "osbs/{{osbs_url}}@{{ ipa_realm }}"
hub_url: "https://koji{{ env_suffix }}.fedoraproject.org/kojihub"
root_url: "https://koji{{ env_suffix }}.fedoraproject.org/"
odcs:
api_url: "https://odcs{{ env_suffix }}.fedoraproject.org/api/1"
auth:
openidc_dir: "/var/run/secrets/atomic-reactor/odcs-oidc-secret"
default_signing_intent: "unsigned"
signing_intents:
- keys: []
name: unsigned
openshift:
auth:
enable: True
build_json_dir: /usr/share/osbs
insecure: true
url: "https://{{ osbs_url }}"
platform_descriptors: "{{ osbs_platform_descriptors }}"
prefer_schema1_digest: False
registries:
- auth:
cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
url: https://candidate-registry.fedoraproject.org/v2
required_secrets:
- v2-registry-dockercfg
- odcs-oidc-secret
skip_koji_check_for_base_image: True
source_registry:
insecure: True
url: "{{ source_registry }}"
sources_command: "{{ osbs_conf_sources_command }}"
version: 1
worker_token_secrets:
- x86-64-orchestrator
- aarch64-orchestrator
- client-config-secret
_osbs_scratch_reactor_config_map_overrides:
image_labels:
distribution-scope: private
#Docker command delegated host
composer: compose-x86-01.iad2.fedoraproject.org
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
koji_url: "koji.fedoraproject.org"
lvm_size: 60000
mem_size: 8192
# Nagios configuration
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
openshift_node_labels: {'region': 'infra'}
openshift_schedulable: False
osbs_client_conf_path: /etc/osbs.conf
osbs_conf_readwrite_users:
- "system:serviceaccount:{{ osbs_namespace }}:default"
- "system:serviceaccount:{{ osbs_namespace }}:builder"
osbs_conf_service_accounts:
- koji
- builder
osbs_conf_sources_command: fedpkg sources
osbs_conf_worker_clusters:
aarch64:
- max_concurrent_builds: 1
name: aarch64
openshift_url: "https://osbs-aarch64-master01.iad2.fedoraproject.org:8443/"
verify_ssl: 'false'
x86_64:
- max_concurrent_builds: 2
name: x86_64
openshift_url: "https://osbs.fedoraproject.org/"
verify_ssl: 'false'
osbs_koji_username: "kojibuilder"
osbs_namespace: "osbs-fedora"
osbs_odcs_enabled: true
osbs_orchestrator_cpu_limitrange: "95m"
osbs_orchestrator_default_nodeselector: "orchestrator=true"
osbs_platform_descriptors:
- architecture: amd64
platform: x86_64
- architecture: arm64
platform: aarch64
osbs_reactor_config_maps:
- data: "{{ _osbs_reactor_config_map }}"
name: reactor-config-map
- data: >
{{ _osbs_reactor_config_map |
combine(_osbs_scratch_reactor_config_map_overrides, recursive=True) }}
name: reactor-config-map-scratch
osbs_url: "osbs.fedoraproject.org"
osbs_worker_default_nodeselector: "worker=true"
osbs_worker_namespace: worker
osbs_worker_service_accounts:
- orchestrator
- builder
source_registry: "registry.fedoraproject.org"
tcp_ports: [80, 443, 8443]

View File

@ -1,129 +0,0 @@
---
# Define resources for this group of hosts here.
_osbs_reactor_config_map:
artifacts_allowed_domains: []
#- download.devel.redhat.com/released
#- download.devel.redhat.com/devel/candidates
clusters:
aarch64:
- enabled: True
max_concurrent_builds: 1
name: "aarch64"
x86_64:
- enabled: True
max_concurrent_builds: 2
name: "x86_64"
clusters_client_config_dir: "/var/run/secrets/atomic-reactor/client-config-secret"
content_versions:
- v2
flatpak:
base_image: "registry.fedoraproject.org/flatpak-build-base:latest"
metadata: both
group_manifests: True
image_equal_labels:
- ['description', 'io.k8s.description']
image_labels:
authoritative-source-url: "{{ source_registry }}"
distribution-scope: public
vendor: "Fedora Project"
koji:
auth:
krb_keytab_path: "FILE:/etc/krb5.osbs_{{ osbs_url }}.keytab"
krb_principal: "osbs/{{osbs_url}}@{{ ipa_realm }}"
hub_url: "https://koji{{ env_suffix }}.fedoraproject.org/kojihub"
root_url: "https://koji{{ env_suffix }}.fedoraproject.org/"
odcs:
api_url: "https://odcs{{ env_suffix }}.fedoraproject.org/api/1"
auth:
openidc_dir: "/var/run/secrets/atomic-reactor/odcs-oidc-secret"
default_signing_intent: "unsigned"
signing_intents:
- keys: []
name: unsigned
openshift:
auth:
enable: True
build_json_dir: /usr/share/osbs
insecure: true
url: "https://{{ osbs_url }}"
platform_descriptors: "{{ osbs_platform_descriptors }}"
prefer_schema1_digest: False
registries:
- auth:
cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg
insecure: False
url: https://candidate-registry.stg.fedoraproject.org/v2
required_secrets:
- v2-registry-dockercfg
- odcs-oidc-secret
skip_koji_check_for_base_image: True
source_registry:
insecure: True
url: "{{ source_registry }}"
sources_command: "{{ osbs_conf_sources_command }}"
version: 1
worker_token_secrets:
- x86-64-orchestrator
- aarch64-orchestrator
- client-config-secret
_osbs_scratch_reactor_config_map_overrides:
image_labels:
distribution-scope: private
#Docker command delegated host
composer: compose-x86-01.stg.iad2.fedoraproject.org
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
koji_url: "koji.stg.fedoraproject.org"
lvm_size: 60000
mem_size: 8192
# Nagios configuration
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
openshift_node_labels: {'region': 'infra'}
openshift_schedulable: False
osbs_client_conf_path: /etc/osbs.conf
osbs_conf_readwrite_users:
- "system:serviceaccount:{{ osbs_namespace }}:default"
- "system:serviceaccount:{{ osbs_namespace }}:builder"
osbs_conf_service_accounts:
- koji
- builder
osbs_conf_sources_command: fedpkg sources
osbs_conf_worker_clusters:
x86_64:
- max_concurrent_builds: 2
name: x86_64
openshift_url: "https://osbs-master01.stg.iad2.fedoraproject.org:8443"
verify_ssl: 'false'
osbs_namespace: "osbs-fedora"
osbs_odcs_enabled: true
osbs_orchestrator_cpu_limitrange: "95m"
osbs_orchestrator_default_nodeselector: "orchestrator=true"
osbs_platform_descriptors:
- architecture: amd64
platform: x86_64
- architecture: arm64
platform: aarch64
osbs_reactor_config_maps:
- data: "{{ _osbs_reactor_config_map }}"
name: reactor-config-map
- data: >
{{ _osbs_reactor_config_map |
combine(_osbs_scratch_reactor_config_map_overrides, recursive=True) }}
name: reactor-config-map-scratch
osbs_url: "osbs.stg.fedoraproject.org"
osbs_worker_default_nodeselector: "worker=true"
osbs_worker_namespace: worker
osbs_worker_service_accounts:
- orchestrator
- builder
source_registry: "registry.fedoraproject.org"
tcp_ports: [80, 443, 8443]

View File

@ -1,20 +0,0 @@
---
# Define resources for this group of hosts here.
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
koji_url: "koji.fedoraproject.org"
lvm_size: 60000
mem_size: 8192
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
osbs_client_conf_path: /etc/osbs.conf
osbs_koji_username: "kojibuilder"
osbs_url: "osbs.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
tcp_ports: [80, 443, 8443, 10250]

View File

@ -1,14 +0,0 @@
---
# Define resources for this group of hosts here.
lvm_size: 60000
mem_size: 8192
nagios_Check_Services:
dhcpd: false
httpd: false
named: false
nrpe: true
sshd: true
swap: false
num_cpus: 2
openshift_node_labels: {'region': 'primary', 'zone': 'default'}
tcp_ports: [80, 443, 8443, 10250]

View File

@ -1,32 +0,0 @@
---
# Define resources for this group of hosts here.
baseiptables: False
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
# fedora container images required by buildroot
fedora_required_images:
- "fedora:latest"
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-osbs
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-osbs
- sysadmin-releng
ipa_host_group: osbs
ipa_host_group_desc: OpenShift Build Service
koji_url: "koji.stg.fedoraproject.org"
lvm_size: 60000
mem_size: 8192
num_cpus: 2
openshift_ansible_upgrading: True
# docker images required by OpenShift Origin
openshift_required_images:
- "openshift/origin-pod"
osbs_client_conf_path: /etc/osbs.conf
osbs_koji_username: "kojibuilder_stg"
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
tcp_ports: [80, 443, 8443]

View File

@ -110,7 +110,6 @@ bvmhost-a64-07.iad2.fedoraproject.org
bvmhost-a64-08.iad2.fedoraproject.org
bvmhost-a64-09.iad2.fedoraproject.org
bvmhost-a64-11.iad2.fedoraproject.org
bvmhost-a64-osbs-01.iad2.fedoraproject.org
openqa-a64-worker01.iad2.fedoraproject.org
openqa-a64-worker02.iad2.fedoraproject.org
openqa-a64-worker03.iad2.fedoraproject.org

View File

@ -1,15 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.170.254
eth0_ipv4_ip: 10.3.170.147
host_group: osbs-aarch64-masters
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/33/Everything/aarch64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora-33-aarch64-osbs
lvm_size: 60g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-a64-osbs-01.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,15 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.170.254
eth0_ipv4_ip: 10.3.170.148
host_group: osbs-aarch64-nodes
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/33/Everything/aarch64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora-33-aarch64-osbs
lvm_size: 60g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-a64-osbs-01.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,15 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.170.254
eth0_ipv4_ip: 10.3.170.149
host_group: osbs-aarch64-nodes
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/33/Everything/aarch64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora-33-aarch64-osbs
lvm_size: 60g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-a64-osbs-01.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,13 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.112
ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-iad2
max_mem_size: 4096
mem_size: 4096
nagios_Check_Services:
mail: false
nrpe: false
vmhost: bvmhost-x86-02.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,10 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.167.254
eth0_ipv4_ip: 10.3.167.38
ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-iad2
max_mem_size: 4096
mem_size: 4096
vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,14 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.113
ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2
lvm_size: 120g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-x86-02.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,15 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.167.254
eth0_ipv4_ip: 10.3.167.39
host_group: osbs-stg
ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2
lvm_size: 120g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,14 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.114
ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2
lvm_size: 240g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-x86-04.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,15 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.167.254
eth0_ipv4_ip: 10.3.167.40
host_group: osbs-stg
ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2
lvm_size: 120g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,14 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.115
ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2
lvm_size: 240g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-x86-05.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -1,15 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.167.254
eth0_ipv4_ip: 10.3.167.41
host_group: osbs-stg
ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2
lvm_size: 120g
max_mem_size: 16384
mem_size: 16384
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
num_cpus: 4
vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View File

@ -666,10 +666,6 @@ mm-crawler-dev.stg.iad2.fedoraproject.org
odcs-backend01.stg.iad2.fedoraproject.org
odcs-frontend01.stg.iad2.fedoraproject.org
os-control01.stg.iad2.fedoraproject.org
osbs-control01.stg.iad2.fedoraproject.org
osbs-master01.stg.iad2.fedoraproject.org
osbs-node01.stg.iad2.fedoraproject.org
osbs-node02.stg.iad2.fedoraproject.org
pdc-web01.stg.iad2.fedoraproject.org
pkgs01.stg.iad2.fedoraproject.org
proxy01.stg.iad2.fedoraproject.org
@ -981,45 +977,6 @@ pagure02.fedoraproject.org
[pagure_stg]
pagure-stg01.fedoraproject.org
[osbs_control]
osbs-control01.iad2.fedoraproject.org
[osbs_control_stg]
osbs-control01.stg.iad2.fedoraproject.org
[osbs_nodes]
osbs-node01.iad2.fedoraproject.org
osbs-node02.iad2.fedoraproject.org
[osbs_masters]
osbs-master01.iad2.fedoraproject.org
[osbs_aarch64_masters]
osbs-aarch64-master01.iad2.fedoraproject.org
[osbs_aarch64_nodes]
osbs-aarch64-node01.iad2.fedoraproject.org
osbs-aarch64-node02.iad2.fedoraproject.org
[osbs:children]
osbs_control
osbs_nodes
osbs_masters
osbs_aarch64_nodes
osbs_aarch64_masters
[osbs_masters_stg]
osbs-master01.stg.iad2.fedoraproject.org
[osbs_nodes_stg]
osbs-node01.stg.iad2.fedoraproject.org
osbs-node02.stg.iad2.fedoraproject.org
[osbs_stg:children]
osbs_control_stg
osbs_masters_stg
osbs_nodes_stg
[ocp:children]
os_control
ocp_controlplane

View File

@ -52,8 +52,6 @@
- import_playbook: /srv/web/infra/ansible/playbooks/groups/odcs.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/openqa-workers.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/openqa.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/osbs/deploy-cluster.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/osbs/configure-osbs.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/pagure.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/pdc.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/people.yml

View File

@ -14,7 +14,6 @@
pre_tasks:
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
- import_tasks: "{{ tasks_path }}/osbs_certs.yml"
- name: override nbde_client-network-flush to work around bug
copy:

View File

@ -67,18 +67,6 @@
- role: keytab/service
kt_location: /etc/kojid/kojid.keytab
service: compile
- role: keytab/service
owner_user: root
owner_group: root
service: osbs
host: "osbs.fedoraproject.org"
when: env == "production"
- role: keytab/service
owner_user: root
owner_group: root
service: osbs
host: "osbs.stg.fedoraproject.org"
when: env == "staging"
- role: keytab/service
owner_user: root
owner_group: root
@ -110,106 +98,6 @@
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: configure osbs on koji builders
hosts: buildvm:buildvm_stg
tags:
- osbs
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
pre_tasks:
- import_tasks: "{{ tasks_path }}/osbs_certs.yml"
- import_tasks: "{{ tasks_path }}/osbs_koji_token.yml"
roles:
- {
role: osbs-client,
when: env == 'staging' and ansible_architecture == 'x86_64',
general:
{
verbose: 0,
build_json_dir: "/usr/share/osbs/",
openshift_required_version: 1.1.0,
},
default:
{
username: "{{ osbs_koji_stg_username }}",
password: "{{ osbs_koji_stg_password }}",
koji_use_kerberos: True,
koji_kerberos_keytab: "FILE:/etc/krb5.osbs_{{osbs_url}}.keytab",
koji_kerberos_principal: "osbs/{{osbs_url}}@{{ipa_realm}}",
openshift_url: "https://{{ osbs_url }}/",
build_host: "{{ osbs_url }}",
koji_root: "http://{{ koji_root }}",
koji_hub: "https://koji.stg.fedoraproject.org/kojihub",
sources_command: "fedpkg sources",
build_type: "prod",
verify_ssl: true,
use_auth: true,
builder_use_auth: true,
registry_api_versions: "v2",
builder_openshift_url: "https://{{osbs_url}}",
client_config_secret: "client-config-secret",
reactor_config_secret: "reactor-config-secret",
token_secrets: "x86-64-osbs:/var/run/secrets/atomic-reactor/x86-64-orchestrator",
token_file: "/etc/osbs/x86-64-osbs-koji",
namespace: "osbs-fedora",
can_orchestrate: true,
builder_odcs_url: "https://odcs{{ env_suffix }}.fedoraproject.org",
builder_odcs_openidc_secret: "odcs-oidc-secret",
builder_pdc_url: "https://pdc.stg.fedoraproject.org/api/1",
reactor_config_map: "reactor-config-map",
reactor_config_map_scratch: "reactor-config-map-scratch",
build_from: "image:buildroot:latest",
},
}
- {
role: osbs-client,
when: env == 'production' and ansible_architecture == 'x86_64',
general:
{
verbose: 0,
build_json_dir: "/usr/share/osbs/",
openshift_required_version: 1.1.0,
},
default:
{
username: "{{ osbs_koji_prod_username }}",
password: "{{ osbs_koji_prod_password }}",
koji_use_kerberos: True,
koji_kerberos_keytab: "FILE:/etc/krb5.osbs_{{osbs_url}}.keytab",
koji_kerberos_principal: "osbs/{{osbs_url}}@{{ipa_realm}}",
openshift_url: "https://{{ osbs_url }}/",
build_host: "{{ osbs_url }}",
koji_root: "http://{{ koji_root }}",
koji_hub: "https://koji.fedoraproject.org/kojihub",
sources_command: "fedpkg sources",
build_type: "prod",
verify_ssl: true,
use_auth: true,
builder_use_auth: true,
registry_api_versions: "v2",
builder_openshift_url: "https://{{osbs_url}}",
token_secrets: "x86-64-osbs:/var/run/secrets/atomic-reactor/x86-64-orchestrator",
token_file: "/etc/osbs/x86-64-osbs-koji",
namespace: "osbs-fedora",
can_orchestrate: true,
builder_odcs_url: "https://odcs{{ env_suffix }}.fedoraproject.org",
builder_odcs_openidc_secret: "odcs-oidc-secret",
builder_pdc_url: "https://pdc.fedoraproject.org/api/1",
reactor_config_map: "reactor-config-map",
reactor_config_map_scratch: "reactor-config-map-scratch",
build_from: "image:buildroot:latest",
},
}
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: configure varnish cache
hosts: buildvm-s390x-24.s390.fedoraproject.org:buildvm-s390x-01.stg.s390.fedoraproject.org:buildvm-s390x-14.s390.fedoraproject.org
tags:

View File

@ -21,7 +21,6 @@
pre_tasks:
- include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README
tags: always
- import_tasks: "{{ tasks_path }}/osbs_certs.yml"
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
roles:

View File

@ -1,3 +0,0 @@
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/osbs/setup-worker-namespace.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/osbs/setup-orchestrator-namespace.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/osbs/osbs-post-install.yml"

View File

@ -1,334 +0,0 @@
# create an osbs server
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: osbs_control
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: osbs_control_stg
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: osbs_nodes:osbs_masters
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: osbs_nodes_stg:osbs_masters_stg
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: osbs_aarch64_nodes_stg:osbs_aarch64_masters_stg:osbs_aarch64_nodes
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: osbs_aarch64_masters
- name: make the box be real
hosts: osbs_control:osbs_masters:osbs_nodes:osbs_control_stg:osbs_masters_stg:osbs_nodes_stg:osbs_aarch64_masters_stg:osbs_aarch64_nodes_stg:osbs_aarch64_masters:osbs_aarch64_nodes
tags:
- osbs-cluster-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
pre_tasks:
- include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
roles:
- base
- rkhunter
- nagios_client
- hosts
- ipa/client
- sudo
- rsyncd
tasks:
- name: put openshift repo on os- systems
template: src="{{ files }}/openshift/openshift.repo" dest="/etc/yum.repos.d/openshift.repo"
tags:
- config
- packages
- yumrepos
- name: install redhat ca file
package:
name: subscription-manager-rhsm-certificates
state: present
- import_tasks: "{{ tasks_path }}/motd.yml"
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: OSBS control hosts pre-req setup
hosts: osbs_control:osbs_control_stg
tags:
- osbs-cluster-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: deploy private key to control hosts
copy:
src: "{{private}}/files/osbs/{{env}}/control_key"
dest: "/root/.ssh/id_rsa"
owner: root
mode: 0600
- name: set ansible to use pipelining
ini_file:
dest: /etc/ansible/ansible.cfg
section: ssh_connection
option: pipelining
value: "True"
- name: Setup cluster masters pre-reqs
hosts: osbs_masters_stg:osbs_masters:osbs_aarch64_masters_stg:osbs_aarch64_masters
tags:
- osbs-cluster-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: ensure origin conf dir exists
file:
path: "/etc/origin"
state: "directory"
- name: create cert dir for openshift public facing REST API SSL
file:
path: "/etc/origin/master/named_certificates"
state: "directory"
- name: install cert for openshift public facing REST API SSL
copy:
src: "{{private}}/files/osbs/{{env}}/osbs-internal.pem"
dest: "/etc/origin/master/named_certificates/{{osbs_url}}.pem"
- name: install key for openshift public facing REST API SSL
copy:
src: "{{private}}/files/osbs/{{env}}/osbs-internal.key"
dest: "/etc/origin/master/named_certificates/{{osbs_url}}.key"
- name: place htpasswd file
copy:
src: "{{private}}/files/httpd/osbs-{{env}}.htpasswd"
dest: /etc/origin/master/htpasswd
- name: Setup cluster hosts pre-reqs
hosts: osbs_masters_stg:osbs_nodes_stg:osbs_masters:osbs_nodes:osbs_aarch64_masters_stg:osbs_aarch64_nodes_stg:osbs_aarch64_masters:osbs_aarch64_nodes
tags:
- osbs-cluster-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- name: restart NetworkManager
service:
name: NetworkManager
state: restarted
tasks:
- name: Install necessary packages that openshift-ansible needs
package:
state: installed
name:
- tar
- rsync
- python3-dbus
- NetworkManager
- python3-libselinux
- python3-PyYAML
- name: Deploy controller public ssh keys to osbs cluster hosts
authorized_key:
user: root
key: "{{ lookup('file', '{{private}}/files/osbs/{{env}}/control_key.pub') }}"
- name: Create file for eth0 config
file:
path: "/etc/sysconfig/network-scripts/ifcfg-eth0"
state: touch
mode: 0644
owner: root
group: root
# This is required for OpenShift built-in SkyDNS inside the overlay network
# of the cluster
- name: ensure NM_CONTROLLED is set to "yes" for osbs cluster
lineinfile:
dest: "/etc/sysconfig/network-scripts/ifcfg-eth0"
line: "NM_CONTROLLED=yes"
notify:
- restart NetworkManager
# This is required for OpenShift built-in SkyDNS inside the overlay network
# of the cluster
- name: ensure NetworkManager is enabled and started
service:
name: NetworkManager
state: started
enabled: yes
- name: cron entry to clean up docker storage
copy:
src: "{{files}}/osbs/cleanup-docker-storage"
dest: "/etc/cron.d/cleanup-docker-storage"
- name: copy docker-storage-setup config
copy:
src: "{{files}}/osbs/docker-storage-setup"
dest: "/etc/sysconfig/docker-storage-setup"
- name: update ca certificates
command: 'update-ca-trust'
- name: Deploy kerberose keytab to cluster hosts
hosts: osbs_masters_stg:osbs_nodes_stg:osbs_masters:osbs_nodes:osbs_aarch64_masters_stg:osbs_aarch64_nodes_stg:osbs_aarch64_masters:osbs_aarch64_nodes
tags:
- osbs-cluster-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: keytab/service
owner_user: root
owner_group: root
service: osbs
host: "osbs.fedoraproject.org"
when: env == "production"
- role: keytab/service
owner_user: root
owner_group: root
service: osbs
host: "osbs.stg.fedoraproject.org"
when: env == "staging"
- name: Deploy OpenShift Cluster x86_64
hosts: osbs_control:osbs_control_stg
tags:
- osbs-deploy-openshift
- osbs-x86-deploy-openshift
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: ansible-ansible-openshift-ansible
cluster_inventory_filename: "{{ inventory_filename }}"
openshift_master_public_api_url: "https://{{ osbs_url }}:8443"
openshift_release: "v3.11"
openshift_version: "v3.11"
openshift_pkg_version: "-3.11*"
openshift_ansible_path: "/root/openshift-ansible"
openshift_ansible_pre_playbook: "playbooks/prerequisites.yml"
openshift_ansible_playbook: "playbooks/deploy_cluster.yml"
openshift_ansible_version: "openshift-ansible-3.11.51-1"
openshift_ansible_ssh_user: root
openshift_ansible_install_examples: false
openshift_ansible_containerized_deploy: false
openshift_cluster_masters_group: "{{ cluster_masters_group }}"
openshift_cluster_nodes_group: "{{ cluster_nodes_group }}"
openshift_cluster_infra_group: "{{ cluster_infra_group }}"
openshift_auth_profile: "osbs"
openshift_cluster_url: "{{osbs_url}}"
openshift_master_ha: false
openshift_debug_level: 2
openshift_shared_infra: true
openshift_deployment_type: "openshift-enterprise"
openshift_ansible_use_crio: false
openshift_ansible_crio_only: false
tags: ['openshift-cluster-x86','ansible-ansible-openshift-ansible']
- name: Deploy OpenShift Cluster aarch64
hosts: osbs_control:osbs_control_stg
tags:
- osbs-deploy-openshift
- osbs-aarch-deploy-openshift
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: ansible-ansible-openshift-ansible
cluster_inventory_filename: "{{ inventory_filename }}"
openshift_htpasswd_file: "/etc/origin/htpasswd"
openshift_master_public_api_url: "https://{{ osbs_url }}:8443"
openshift_release: "v3.11"
openshift_version: "v3.11"
openshift_pkg_version: "-3.11.2"
openshift_ansible_path: "/root/openshift-ansible"
openshift_ansible_pre_playbook: "playbooks/prerequisites.yml"
openshift_ansible_playbook: "playbooks/deploy_cluster.yml"
openshift_ansible_version: "openshift-ansible-3.11.51-1"
openshift_ansible_ssh_user: root
openshift_ansible_install_examples: false
openshift_ansible_containerized_deploy: false
openshift_cluster_masters_group: "{{ aarch_masters_group }}"
openshift_cluster_nodes_group: "{{ aarch_nodes_group }}"
openshift_cluster_infra_group: "{{ aarch_infra_group }}"
openshift_auth_profile: "osbs"
openshift_cluster_url: "{{osbs_url}}"
openshift_master_ha: false
openshift_debug_level: 2
openshift_shared_infra: true
openshift_deployment_type: "origin"
openshift_ansible_python_interpreter: "/usr/bin/python3"
openshift_ansible_use_crio: false
openshift_ansible_crio_only: false
openshift_arch: "aarch64"
tags: ['openshift-cluster-aarch','ansible-ansible-openshift-ansible']
- name: Setup OSBS requirements for OpenShift cluster hosts
hosts: osbs_masters_stg:osbs_nodes_stg:osbs_masters:osbs_nodes
tags:
- osbs-cluster-req
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Ensures /etc/dnsmasq.d/ dir exists
file: path="/etc/dnsmasq.d/" state=directory
- name: install fedora dnsmasq specific top-config
copy:
src: "{{files}}/osbs/fedora-dnsmasq-master.conf.{{env}}"
dest: "/etc/dnsmasq.conf"
when:
is_fedora is defined or (ansible_distribution_major_version|int > 8 and ansible_distribution == 'RedHat')
- name: install fedora dnsmasq specific sub-config
copy:
src: "{{files}}/osbs/fedora-dnsmasq.conf.{{env}}"
dest: "/etc/dnsmasq.d/fedora-dns.conf"

View File

@ -1,222 +0,0 @@
- name: post-install master host osbs tasks
hosts: osbs_masters_stg:osbs_masters:osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0]
tags:
- osbs-post-install
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/private/ansible/files/openstack/passwords.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
tasks:
- name: cron entry to clean up old builds
copy:
src: "{{files}}/osbs/cleanup-old-osbs-builds"
dest: "/etc/cron.d/cleanup-old-osbs-builds"
- name: post-install osbs control tasks
hosts: osbs_control
tags: osbs-post-install
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/private/ansible/files/openstack/passwords.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: enable nrpe for monitoring (noc01)
iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.3.163.10 state=present jump=ACCEPT
tags:
- iptables
- name: post-install node host osbs tasks
hosts: osbs_masters:osbs_masters_stg:osbs_aarch64_masters:osbs_nodes_stg:osbs_nodes:osbs_aarch64_nodes_stg:osbs_aarch64_nodes
tags:
- osbs-post-install
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/private/ansible/files/openstack/passwords.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
handlers:
- name: Remove the previous buildroot image
docker_image:
state: absent
name: buildroot
- name: Build the new buildroot container
docker_image:
path: /etc/osbs/buildroot/
name: buildroot
nocache: yes
- name: restart and reload docker service
systemd:
name: docker
state: restarted
daemon_reload: yes
tasks:
- name: enable nrpe for monitoring (noc01)
iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.3.163.10 state=present jump=ACCEPT
tags:
- iptables
- name: copy docker iptables script
copy:
src: "{{files}}/osbs/fix-docker-iptables.{{ env }}"
dest: /usr/local/bin/fix-docker-iptables
mode: 0755
tags:
- iptables
notify:
- restart and reload docker service
- name: copy docker custom service config
copy:
src: "{{files}}/osbs/docker.firewall.service"
dest: /etc/systemd/system/docker.service.d/firewall.conf
tags:
- docker
notify:
- restart and reload docker service
- name: copy the osbs customization file
copy:
src: "{{item}}"
dest: "/etc/osbs/buildroot/"
owner: root
mode: 0600
with_items:
- "{{files}}/osbs/worker_customize.json"
- "{{files}}/osbs/orchestrator_customize.json"
- name: Create buildroot container conf directory
file:
path: "/etc/osbs/buildroot/"
state: directory
- name: Upload Dockerfile for buildroot container
template:
src: "{{ files }}/osbs/buildroot-Dockerfile-{{env}}.j2"
dest: "/etc/osbs/buildroot/Dockerfile"
mode: 0400
notify:
- Remove the previous buildroot image
- Build the new buildroot container
- name: Upload krb5.conf for buildroot container
template:
src: "{{ roles_path }}/base/templates/krb5.conf.j2"
dest: "/etc/osbs/buildroot/krb5.conf"
mode: 0644
notify:
- Remove the previous buildroot image
- Build the new buildroot container
- name: Upload internal CA for buildroot
copy:
src: "{{private}}/files/osbs/{{env}}/osbs-internal.pem"
dest: "/etc/osbs/buildroot/ca.crt"
mode: 0400
notify:
- Remove the previous buildroot image
- Build the new buildroot container
- name: stat infra repofile
stat:
path: "/etc/yum.repos.d/infra-tags.repo"
register: infra_repo_stat
- name: stat /etc/osbs/buildroot/ infra repofile
stat:
path: "/etc/osbs/buildroot/infra-tags.repo"
register: etcosbs_infra_repo_stat
- name: remove old /etc/osbs/buildroot/ infra repofile
file:
path: "/etc/osbs/buildroot/infra-tags.repo"
state: absent
when: etcosbs_infra_repo_stat.stat.exists and infra_repo_stat.stat.checksum != etcosbs_infra_repo_stat.stat.checksum
- name: Copy repofile for buildroot container (because Docker)
copy:
src: "/etc/yum.repos.d/infra-tags.repo"
dest: "/etc/osbs/buildroot/infra-tags.repo"
remote_src: true
notify:
- Remove the previous buildroot image
- Build the new buildroot container
when: etcosbs_infra_repo_stat.stat.exists == false
- name: stat /etc/ keytab
stat:
path: "/etc/krb5.osbs_{{osbs_url}}.keytab"
register: etc_kt_stat
- name: stat /etc/osbs/buildroot/ keytab
stat:
path: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab"
register: etcosbs_kt_stat
- name: remove old hardlink to /etc/osbs/buildroot/ keytab
file:
path: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab"
state: absent
when: etcosbs_kt_stat.stat.exists and etc_kt_stat.stat.checksum != etcosbs_kt_stat.stat.checksum
- name: Hardlink keytab for buildroot container (because Docker)
file:
src: "/etc/krb5.osbs_{{osbs_url}}.keytab"
dest: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab"
state: hard
notify:
- Remove the previous buildroot image
- Build the new buildroot container
when: etcosbs_kt_stat.stat.exists == false
- name: pull fedora required docker images
command: "docker pull registry.fedoraproject.org/fedora:latest"
register: docker_pull_fedora
changed_when: "'Downloaded newer image' in docker_pull_fedora.stdout"
- name: enable nrpe for monitoring (noc01)
iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.3.163.10 state=present jump=ACCEPT
- name: make directory for cni config
file:
path: /etc/cni/net.d/
state: directory
- name: Add cni config
copy:
dest: /etc/cni/net.d/80-openshift-network.conf
content: |
{
"cniVersion": "0.2.0",
"name": "openshift-sdn",
"type": "openshift-sdn"
}
- name: Set ulimit for docker
copy:
dest: /etc/systemd/system/docker.service.d/override.conf
content: |
[Service]
LimitNOFILE=1048576
notify: restart and reload docker service

View File

@ -1,15 +0,0 @@
# This playbook can be used to update to rebuild the buildroot image of
# OSBS. This is useful when we want to update some dependencies in the image.
- name: rebuild the osbs buildroot image.
hosts: osbs_nodes:osbs_nodes_stg:osbs_aarch64_nodes_stg:osbs_aarch64_nodes
gather_facts: false
user: root
tasks:
- name: Backup the current buildroot
command: "docker tag buildroot:latest buildroot:backup"
- name: rebuild the buildroot container image.
command: "docker build /etc/osbs/buildroot -t buildroot --no-cache --pull"

View File

@ -1,168 +0,0 @@
- name: Create orchestrator namespace
hosts: osbs_masters_stg[0]:osbs_masters[0]
roles:
- role: osbs-namespace
osbs_orchestrator: true
osbs_worker_clusters: "{{ osbs_conf_worker_clusters }}"
osbs_cpu_limitrange: "{{ osbs_orchestrator_cpu_limitrange }}"
osbs_nodeselector: "{{ osbs_orchestrator_default_nodeselector|default('') }}"
osbs_sources_command: "{{ osbs_conf_sources_command }}"
osbs_readwrite_users: "{{ osbs_conf_readwrite_users }}"
osbs_service_accounts: "{{ osbs_conf_service_accounts }}"
koji_use_kerberos: true
koji_kerberos_keytab: "FILE:/etc/krb5.osbs_{{ osbs_url }}.keytab"
koji_kerberos_principal: "osbs/{{osbs_url}}@{{ ipa_realm }}"
tags:
- osbs-orchestrator-namespace
- name: setup reactor config secret in orchestrator namespace
hosts: osbs_masters_stg[0]:osbs_masters[0]
roles:
- role: osbs-secret
osbs_secret_name: reactor-config-secret
osbs_secret_files:
- source: "/tmp/{{ osbs_namespace }}-{{ env }}-reactor-config-secret.yml"
dest: config.yaml
tags:
- osbs-orchestrator-namespace
- name: setup client config secret in orchestrator namespace
hosts: osbs_masters_stg[0]:osbs_masters[0]
roles:
- role: osbs-secret
osbs_secret_name: client-config-secret
osbs_secret_files:
- source: "/tmp/{{ osbs_namespace }}-{{ env }}-client-config-secret.conf"
dest: osbs.conf
tags:
- osbs-orchestrator-namespace
- name: setup ODCS secret in orchestrator namespace
hosts: osbs_masters_stg[0]:osbs_masters[0]
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: osbs-secret
osbs_secret_name: odcs-oidc-secret
osbs_secret_files:
- source: "{{ private }}/files/osbs/{{ env }}/odcs-oidc-token"
dest: token
tags:
- osbs-orchestrator-namespace
- name: Save orchestrator token x86_64
hosts: osbs_masters_stg[0]:osbs_masters[0]
tasks:
- name: get orchestrator service account token
command: "oc -n {{ osbs_worker_namespace }} sa get-token orchestrator"
register: orchestator_token_x86_64
- name: save the token locally
local_action: >
copy
content="{{ orchestator_token_x86_64.stdout }}"
dest=/tmp/.orchestator-token-x86_64
mode=0400
tags:
- osbs-orchestrator-namespace
- name: setup orchestrator token for x86_64-osbs
hosts: osbs_masters_stg[0]:osbs_masters[0]
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: osbs-secret
osbs_secret_name: x86-64-orchestrator
osbs_secret_files:
- source: "/tmp/.orchestator-token-x86_64"
dest: token
post_tasks:
- name: Delete the temporary secret file
local_action: >
file
state=absent
path="/tmp/.orchestator-token-x86_64"
tags:
- osbs-orchestrator-namespace
- name: Save orchestrator token aarch64
hosts: osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0]
tasks:
- name: get orchestrator service account token
command: "oc -n {{ osbs_worker_namespace }} sa get-token orchestrator"
register: orchestator_token_aarch64
- name: save the token locally
local_action: >
copy
content="{{ orchestator_token_aarch64.stdout }}"
dest=/tmp/.orchestator-token-aarch64
mode=0400
tags:
- osbs-orchestrator-namespace
- name: setup orchestrator token for aarch64-osbs
hosts: osbs_masters_stg[0]:osbs_masters[0]
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: osbs-secret
osbs_secret_can_fail: true
osbs_secret_name: aarch64-orchestrator
osbs_secret_files:
- source: "/tmp/.orchestator-token-aarch64"
dest: token
post_tasks:
- name: Delete the temporary secret file
local_action: >
file
state=absent
path="/tmp/.orchestator-token-aarch64"
tags:
- osbs-orchestrator-namespace
- name: Add dockercfg secret to allow registry push orchestrator
hosts: osbs_masters_stg[0]:osbs_masters[0]
tags:
- osbs-dockercfg-secret
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
pre_tasks:
- name: Create the username:password string needed by the template
set_fact:
auth_info_prod: "{{candidate_registry_osbs_prod_username}}:{{candidate_registry_osbs_prod_password}}"
auth_info_stg: "{{candidate_registry_osbs_stg_username}}:{{candidate_registry_osbs_stg_password}}"
- name: Create the dockercfg secret file
local_action: >
template
src="{{ files }}/osbs/dockercfg-{{env}}-secret.j2"
dest="/tmp/.dockercfg{{ env }}"
mode=0400
roles:
- role: osbs-secret
osbs_secret_name: "v2-registry-dockercfg"
osbs_secret_type: kubernetes.io/dockercfg
osbs_secret_files:
- source: "/tmp/.dockercfg{{ env }}"
dest: .dockercfg
post_tasks:
- name: Delete the temporary secret file
local_action: >
file
state=absent
path="/tmp/.dockercfg{{ env }}"

View File

@ -1,78 +0,0 @@
- name: Create worker namespace
hosts: osbs_masters_stg[0]:osbs_masters[0]:osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0]
tags:
- osbs-worker-namespace
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
roles:
- role: osbs-namespace
osbs_namespace: "{{ osbs_worker_namespace }}"
osbs_service_accounts: "{{ osbs_worker_service_accounts }}"
osbs_nodeselector: "{{ osbs_worker_default_nodeselector|default('') }}"
osbs_sources_command: "{{ osbs_conf_sources_command }}"
- name: setup ODCS secret in worker namespace
hosts: osbs_masters_stg[0]:osbs_masters[0]:osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0]
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: osbs-secret
osbs_namespace: "{{ osbs_worker_namespace }}"
osbs_secret_name: odcs-oidc-secret
osbs_secret_files:
- source: "{{ private }}/files/osbs/{{ env }}/odcs-oidc-token"
dest: token
tags:
- osbs-worker-namespace
- name: Add dockercfg secret to allow registry push worker
hosts: osbs_masters_stg[0]:osbs_masters[0]:osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0]
tags:
- osbs-dockercfg-secret
- osbs-worker-namespace
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
pre_tasks:
- name: Create the username:password string needed by the template
set_fact:
auth_info_prod: "{{candidate_registry_osbs_prod_username}}:{{candidate_registry_osbs_prod_password}}"
auth_info_stg: "{{candidate_registry_osbs_stg_username}}:{{candidate_registry_osbs_stg_password}}"
- name: Create the dockercfg secret file
local_action: >
template
src="{{ files }}/osbs/dockercfg-{{env}}-secret.j2"
dest="/tmp/.dockercfg{{ env }}"
mode=0400
roles:
- role: osbs-secret
osbs_namespace: "{{ osbs_worker_namespace }}"
osbs_secret_name: "v2-registry-dockercfg"
osbs_secret_type: kubernetes.io/dockercfg
osbs_secret_files:
- source: "/tmp/.dockercfg{{ env }}"
dest: .dockercfg
post_tasks:
- name: Delete the temporary secret file
local_action: >
file
state=absent
path="/tmp/.dockercfg{{ env }}"

View File

@ -613,11 +613,6 @@
header_scheme: true
keephost: true
- role: httpd/reverseproxy
website: osbs.fedoraproject.org
destname: osbs
proxyurl: http://localhost:10047
- role: httpd/reverseproxy
website: registry.fedoraproject.org
destname: registry-fedora

View File

@ -660,12 +660,6 @@
tags:
- fedoraloveskde
- role: httpd/website
site_name: osbs.fedoraproject.org
server_aliases: [osbs.stg.fedoraproject.org]
sslonly: true
cert_name: "{{wildcard_cert_name}}"
- role: httpd/website
site_name: "provision{{ env_suffix }}.fedoraproject.org"
# Zezere needs non-HTTPS for netboot

View File

@ -1,5 +1,5 @@
- name: Uninstall IPA client
hosts: bodhi_backend_stg:bugzilla2fedmsg_stg:github2fedmsg_stg:ipsilon_stg:mbs_stg:osbs_control_stg:osbs_masters_stg:osbs_nodes_stg:osbs_aarch64_masters_stg:osbs_aarch64_nodes_stg:buildvm_stg:buildvm_ppc64le_stg:buildvm_aarch64_stg:buildvm_armv7_stg:buildvm_s390x_stg
hosts: bodhi_backend_stg:bugzilla2fedmsg_stg:github2fedmsg_stg:ipsilon_stg:mbs_stg:buildvm_stg:buildvm_ppc64le_stg:buildvm_aarch64_stg:buildvm_armv7_stg:buildvm_s390x_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
@ -16,7 +16,6 @@
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/github2fedmsg.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/ipsilon.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/mbs.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/osbs/deploy-cluster.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/buildvm.yml"

View File

@ -1,29 +0,0 @@
---
language: python
python: "2.7"
# Use the new container infrastructure
sudo: false
# Install ansible
addons:
apt:
packages:
- python-pip
install:
# Install ansible
- pip install ansible
# Check ansible version
- ansible --version
# Create ansible.cfg with correct roles_path
- printf '[defaults]\nroles_path=../' >ansible.cfg
script:
# Basic role syntax check
- ansible-playbook tests/test.yml -i tests/inventory --syntax-check
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

View File

@ -1,54 +0,0 @@
ansible-ansible-openshift-ansible
#################################
Ansible role to run ansible on a remote "openshift control" what will run
`openshift-ansible`_ to deploy a cluster.
This is a Fedora Infrastructure specific adaptation into a role of the original
prototype located in pagure:
https://pagure.io/ansible-ansible-openshift-ansible/tree/master
What? Why?
----------
The `openshift-ansible`_ playbooks require that various tasks be run on
``localhost`` in order to build their internal abstracted representation of the
inventory list. Running potentially arbitrary code from external sources on a
bastion host (which is what ``localhost`` would be as the ansible control
machine) is often frowned upon. The goal here is to allow for the deployment of
`openshift-ansible`_ via an intermediate host.
.. note::
There is a requirement to setup the SSH keys such that the bastion host
can passwordless ssh into the openshift control host and such that the
openshift control host can passwordless ssh into each of the hosts in
the openshift cluster. This is outside the scope of this document.
::
+---------------+ +-------------------+
| | | |
| bastion host +----[ansible]----->| openshift control |
| | | |
+---------------+ +---------+---------+
|
|
[ansible]
|
|
V
+--------------------------------------------------------------------------+
| |
| openshift cluster |
| |
| +-----------+ +-----------+ +-----------+ |
| | | | | | | |
| | openshift | ...[masters] | openshift | | openshift | ...[nodes] |
| | master | | node | | node | |
| | | | | | | |
| +-----------+ +-----------+ +-----------+ |
| |
+--------------------------------------------------------------------------+

View File

@ -1,87 +0,0 @@
---
# defaults file for ansible-ansible-openshift-ansible
#
#
#
# Auth Profile
# These are Fedora Infra specific auth profiles
#
# Acceptable values:
# osbs - this will configure htpasswd for use with osbs
# fedoraidp - configure for fedora idp
# fedoraidp-stg - configure for fedora idp staging env
openshift_auth_profile: osbs
# Do we want OpenShift itself to be containerized?
# This is a requirement if using Atomic Host
#
# As of v3.5.x this would mean that all our systems would completely go down
# in the event the docker daemon were to restart or crash.
#
# In the future (as of v3.6 devel branch), this is done with system containers
# and won't be bound to the docker daemon.
openshift_ansible_containerized_deploy: false
# This will co-host the infra nodes with the primary nodes
openshift_shared_infra: false
# OpenShift Cluster URL
# Example: openshift.fedoraproject.org
openshift_cluster_url: None
# OpenShift Console and API listening ports
# These default to 8443 in openshift-ansible
openshift_api_port: 8443
openshift_console_port: 8443
# OpenShift Applications Ingress subdomain (OpenShift routes)
openshift_app_subdomain: None
# Setup native OpenShift Master High Availability (true or false)
openshift_master_ha: false
# Destination file name for template-generated cluster inventory
cluster_inventory_filename: "cluster-inventory"
# Ansible user for use with openshift-ansible playbooks
openshift_ansible_ssh_user: root
# OpenShift Debug level (Default is 2 upstream)
openshift_debug_level: 2
# Release required as per the openshift-ansible
openshift_release: "v1.5.0"
# OpenShift Deployment Type
# Possible options:
# origin
# openshift-enterprise
deployment_type: origin
# Install the OpenShift App Examples (value should be "true" or "false")
openshift_ansible_install_examples: false
# Path to clone the openshift-ansible git repo into
openshift_ansible_path: "/root/openshift-ansible"
# Relative path inside the openshift-ansible git repo of the playbook to execute
# remotely
openshift_ansible_playbook: "playbooks/byo/config.yml"
# openshift-ansible version tag, this is the git tag of the "release" of the
# openshift-ansible git repo. We need to track OpenShift v1.x to
# openshift-ansible-3.x.y-1 as that's the release/tag standard upstream.
openshift_ansible_version: "openshift-ansible-3.2.35-1"
# The group names assigned to these variables are used to create the "effective"
# inventory (via a template) that is used to deploy the OpenShift Cluster via
# openshift-ansible (https://github.com/openshift/openshift-ansible). The values
# assigned here must match group names in the current running inventory or the
# remote effective inventory that actually deploys the OpenShift Cluster will be
# empty causing undesired effects.
openshift_cluster_masters_group: "openshift-cluster-masters"
openshift_cluster_nodes_group: "openshift-cluster-nodes"
openshift_cluster_infra_group: "openshift-cluster-nodes"
openshift_arch: "x86_64"

View File

@ -1,81 +0,0 @@
---
# tasks file for ansible-ansible-openshift-ansible
#
- name: Install required packages
package: name="{{ item }}" state=present
with_items:
- ansible
- git
- pyOpenSSL
- ca-certificates
tags:
- ansible-ansible-openshift-ansible
- ansible-ansible-openshift-ansible-config
- name: git clone the openshift-ansible repo
git:
repo: "https://github.com/openshift/openshift-ansible.git"
dest: "{{ openshift_ansible_path }}"
version: "{{ openshift_ansible_version }}"
tags:
- ansible-ansible-openshift-ansible
- ansible-ansible-openshift-ansible-config
ignore_errors: true
- name: generate the inventory file (staging)
template:
src: "cluster-inventory-stg.j2"
dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}"
tags:
- ansible-ansible-openshift-ansible
- ansible-ansible-openshift-ansible-config
when: env == 'staging' and inventory_hostname.startswith('os-')
- name: generate the inventory file (production) (iad2)
template:
src: "cluster-inventory-iad2-prod.j2"
dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}"
tags:
- ansible-ansible-openshift-ansible
- ansible-ansible-openshift-ansible-config
when: env == 'production' and inventory_hostname.startswith('os-') and datacenter == 'iad2'
- name: generate the inventory file (osbs)
template:
src: "cluster-inventory-osbs.j2"
dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}"
tags:
- ansible-ansible-openshift-ansible
- ansible-ansible-openshift-ansible-config
when: inventory_hostname.startswith('osbs')
- name: run ansible prereqs playbook
shell: "ansible-playbook {{ openshift_ansible_pre_playbook }} -i {{ cluster_inventory_filename }}"
args:
chdir: "{{ openshift_ansible_path }}"
register: run_ansible_out
when: openshift_ansible_pre_playbook is defined and openshift_ansible_upgrading is defined
tags:
- ansible-ansible-openshift-ansible
- name: run ansible
shell: "ansible-playbook {{ openshift_ansible_playbook }} -i {{ cluster_inventory_filename }}"
args:
chdir: "{{ openshift_ansible_path }}"
register: run_ansible_out
tags:
- ansible-ansible-openshift-ansible
when: openshift_ansible_upgrading is defined
- name: display run ansible stdout_lines
debug:
var: run_ansible_out.stdout_lines
tags:
- ansible-ansible-openshift-ansible
- name: display run ansible stderr
debug:
var: run_ansible_out.stderr
tags:
- ansible-ansible-openshift-ansible

View File

@ -1,882 +0,0 @@
# This is based on the example inventories provided by the upstream
# openshift-ansible project available:
# https://github.com/openshift/openshift-ansible/tree/master/inventory/byo
[masters]
{% for host in groups[openshift_cluster_masters_group] %}
{% if hostvars[host].datacenter == datacenter %}
{{ host }}
{% endif %}
{% endfor %}
[etcd]
{% for host in groups[openshift_cluster_masters_group] %}
{% if hostvars[host].datacenter == datacenter %}
{{ host }}
{% endif %}
{% endfor %}
[nodes]
{% for host in groups[openshift_cluster_masters_group] %}
{% if hostvars[host].datacenter == datacenter %}
{{ host }} openshift_node_group_name='node-config-master'
{% endif %}
{% endfor %}
{% for host in groups[openshift_cluster_nodes_group] %}
{% if hostvars[host].datacenter == datacenter %}
{{ host }} openshift_node_group_name='node-config-compute'
{% endif %}
{% endfor %}
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
masters
nodes
etcd
# Add this if using nfs and have defined the nfs group
#nfs
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true', 'orchestrator=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true',]}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true', 'node-role.kubernetes.io/infra=true', 'worker=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
# Disable the service catalog. We don't use it and it needs persistent storage.
openshift_enable_service_catalog=false
# Set this because we have nfs which isn't supported
openshift_enable_unsupported_configurations=true
# Have upgrader also restart systems in a rolling manner.
openshift_rolling_restart_mode=system
# Disable the disk and package version tests
openshift_disable_check=disk_availability,package_version,docker_image_availability,memory_availability,docker_storage
# SSH user, this user should allow ssh based auth without requiring a
# password. If using ssh key based auth, then the key should be managed by an
# ssh agent.
ansible_ssh_user={{openshift_ansible_ssh_user}}
# Specify the deployment type. Valid values are origin and openshift-enterprise.
deployment_type={{openshift_deployment_type}}
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
# rely on the version running on the first master. Works best for containerized installs where we can usually
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
openshift_release={{openshift_release}}
openshift_version={{openshift_version}}
# For whatever reason, this keeps hitting a race condition and docker is
# excluded before docker is installed so we're just going to remove it.
openshift_enable_docker_excluder = False
# OpenShift Containerized deployment or not?
containerized={{openshift_ansible_containerized_deploy}}
{% if openshift_ansible_ssh_user != "root" %}
# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
ansible_become=yes
{% endif %}
{% if openshift_ansible_python_interpreter is defined %}
ansible_python_interpreter={{openshift_ansible_python_interpreter}}
{% endif %}
# Debug level for all OpenShift components (Defaults to 2)
debug_level={{openshift_debug_level}}
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
openshift_image_tag={{openshift_release}}
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
openshift_pkg_version={{openshift_pkg_version}}
# Install the openshift examples
{% if openshift_ansible_install_examples is defined %}
openshift_install_examples={{openshift_ansible_install_examples}}
{% endif %}
openshift_cluster_monitoring_operator_install = false
openshift_web_console_install = false
openshift_console_install = false
openshift_enable_olm=false
# Configure logoutURL in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
#openshift_master_logout_url=http://example.com
# Configure extensionScripts in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
# Configure extensionStylesheets in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
# Configure extensions in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
# Configure extensions in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html
# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
# Docker Configuration
# Add additional, insecure, and blocked registries to global docker configuration
# For enterprise deployment types we ensure that registry.access.redhat.com is
# included if you do not include it
#openshift_docker_additional_registries=registry.example.com
#openshift_docker_insecure_registries=registry.example.com
#openshift_docker_blocked_registries=registry.hacker.com
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
# Install and run cri-o.
{% if openshift_ansible_use_crio is defined %}
openshift_use_crio={{ openshift_ansible_use_crio }}
{% endif %}
{% if openshift_ansible_use_crio_only is defined %}
openshift_use_crio_only={{ openshift_ansible_crio_only }}
{% endif %}
# The following two variables are used when openshift_use_crio is True
# and cleans up after builds that pass through docker. When openshift_use_crio is True
# these variables are set to the defaults shown. You may override them here.
# NOTE: You will still need to tag crio nodes with your given label(s)!
# Enable docker garbage collection when using cri-o
#openshift_crio_enable_docker_gc=True
# Node Selectors to run the garbage collection
#openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}
openshift_crio_docker_gc_node_selector={}
openshift_crio_systemcontainer_image_override="registry.access.redhat.com/openshift3/cri-o:v3.9"
# Use Docker inside a System Container. Note that this is a tech preview and should
# not be used to upgrade!
# The following options for docker are ignored:
# - docker_version
# - docker_upgrade
# The following options must not be used
# - openshift_docker_options
#openshift_docker_use_system_container=False
# Force the registry to use for the system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
# Specify exact version of etcd to configure or upgrade to.
# etcd_version="3.1.0"
# Enable etcd debug logging, defaults to false
# etcd_debug=true
# Set etcd log levels by package
# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
# Upgrade Hooks
#
# Hooks are available to run custom tasks at various points during a cluster
# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
# absolute paths, if not the path will be treated as relative to the file where the
# hook is actually used.
#
# Tasks to run before each master is upgraded.
# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
#
# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
# upgrade steps, but before we restart system/services.
# openshift_master_upgrade_hook=/usr/share/custom/master.yml
#
# Tasks to run after each master is upgraded and system/services have been restarted.
# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
# Alternate image format string, useful if you've got your own registry mirror
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
# Additional yum repos to install
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# OSBS Specific Auth
{% if openshift_auth_profile == "osbs" %}
openshift_master_manage_htpasswd=false
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
{% endif %}
{% if openshift_auth_profile == "fedoraidp" %}
openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_prod_client_secret}}", "extraScopes": ["profile", "email", "https://id.fedoraproject.org/scope/groups"], "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}]
{% endif %}
{% if openshift_auth_profile == "fedoraidp-stg" %}
openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}]
{% endif %}
# If oreg_url points to a registry requiring authentication, provide the following:
{% if openshift_arch == "aarch64" %}
oreg_url=quay.io/multi-arch/aarch64-openshift3-ose-${component}:v3.11
oreg_auth_user="{{ os_multiarch_registry_user }}"
oreg_auth_password="{{ os_multiarch_registry_password }}"
oreg_test_login=false
{% elif env == "staging" %}
oreg_auth_user="{{ os_stg_registry_user }}"
oreg_auth_password="{{ os_stg_registry_password }}"
{% elif datacenter != 'iad2' %}
oreg_auth_user="{{ os_prod_registry_user }}"
oreg_auth_password="{{ os_prod_registry_password }}"
{% else %}
oreg_auth_user="{{ os_prod_iad2_registry_user }}"
oreg_auth_password="{{ os_prod_iad2_registry_password }}"
{% endif %}
# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
# oreg_auth_pass should be generated from running docker login.
# To update registry auth credentials, uncomment the following:
#oreg_auth_credentials_replace=True
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
#
# Configure LDAP CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "ca" key set
# within the LDAPPasswordIdentityProvider.
#
#openshift_master_ldap_ca=<ca text>
# or
#openshift_master_ldap_ca_file=<path to local ca file to use>
# OpenID auth
#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
#
# Configure OpenID CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "ca" key set
# within the OpenIDIdentityProvider.
#
#openshift_master_openid_ca=<ca text>
# or
#openshift_master_openid_ca_file=<path to local ca file to use>
# Request header auth
#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
#
# Configure request header CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "clientCA"
# key set within the RequestHeaderIdentityProvider.
#
#openshift_master_request_header_ca=<ca text>
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
{% if openshift_master_ha is defined %}
{% if openshift_master_ha %}
# Native high availability cluster method with optional load balancer.
# If no lb group is defined, the installer assumes that a load balancer has
# been preconfigured. For installation the value of
# openshift_master_cluster_hostname must resolve to the load balancer
# or to one or all of the masters defined in the inventory if no load
# balancer is present.
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{openshift_internal_cluster_url}}
openshift_master_cluster_public_hostname={{openshift_cluster_url}}
{% endif %}
{% endif %}
# Override the default controller lease ttl
#osm_controller_lease_ttl=30
# Configure controller arguments
#osm_controller_args={'resource-quota-sync-period': ['10s']}
# Configure api server arguments
#osm_api_server_args={'max-requests-inflight': ['400']}
# default subdomain to use for exposed routes
{% if openshift_app_subdomain is defined %}
{% if openshift_app_subdomain %}
openshift_master_default_subdomain={{openshift_app_subdomain}}
{% endif %}
{% endif %}
# additional cors origins
#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
# default project node selector
#osm_default_node_selector='region=primary'
# Override the default pod eviction timeout
#openshift_master_pod_eviction_timeout=5m
# Override the default oauth tokenConfig settings:
# openshift_master_access_token_max_seconds=86400
# openshift_master_auth_token_max_seconds=500
# Override master servingInfo.maxRequestsInFlight
#openshift_master_max_requests_inflight=500
# Override master and node servingInfo.minTLSVersion and .cipherSuites
# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
#openshift_master_min_tls_version=VersionTLS12
#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
#
#openshift_node_min_tls_version=VersionTLS12
#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs']
# OpenShift Router Options
#
# An OpenShift router will be created during install if there are
# nodes present with labels matching the default router selector,
# "region=infra". Set openshift_node_labels per node as needed in
# order to label nodes.
#
# Example:
# [nodes]
# node.example.com openshift_node_labels="{'region': 'infra'}"
#
# Router selector (optional)
# Router will only be created if nodes matching this label are present.
# Default value: 'region=infra'
#openshift_hosted_router_selector='region=infra'
#
# Router replicas (optional)
# Unless specified, openshift-ansible will calculate the replica count
# based on the number of nodes matching the openshift router selector.
#openshift_hosted_router_replicas=2
#
# Router force subdomain (optional)
# A router path format to force on all routes used by this router
# (will ignore the route host value)
#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
#
# Router certificate (optional)
# Provide local certificate paths which will be configured as the
# router's default certificate.
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
#
# Disable management of the OpenShift Router
openshift_hosted_manage_router=false
#
# Router sharding support has been added and can be achieved by supplying the correct
# data to the inventory. The variable to house the data is openshift_hosted_routers
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
#openshift_hosted_routers:
#- name: router1
# stats_port: 1936
# ports:
# - 80:80
# - 443:443
# replicas: 1
# namespace: default
# serviceaccount: router
# selector: type=router1
# images: "openshift3/ose-${component}:${version}"
# edits: []
# certificates:
# certfile: /path/to/certificate/abc.crt
# keyfile: /path/to/certificate/abc.key
# cafile: /path/to/certificate/ca.crt
#- name: router2
# stats_port: 1936
# ports:
# - 80:80
# - 443:443
# replicas: 1
# namespace: default
# serviceaccount: router
# selector: type=router2
# images: "openshift3/ose-${component}:${version}"
# certificates:
# certfile: /path/to/certificate/xyz.crt
# keyfile: /path/to/certificate/xyz.key
# cafile: /path/to/certificate/ca.crt
# edits:
# # ROUTE_LABELS sets the router to listen for routes
# # tagged with the provided values
# - key: spec.template.spec.containers[0].env
# value:
# name: ROUTE_LABELS
# value: "route=external"
# action: append
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
# Override image version, defaults to latest for origin, matches the product version for enterprise
#openshift_cockpit_deployer_version=1.4.1
# Openshift Registry Options
#
# An OpenShift registry will be created during install if there are
# nodes present with labels matching the default registry selector,
# "region=infra". Set openshift_node_labels per node as needed in
# order to label nodes.
#
# Example:
# [nodes]
# node.example.com openshift_node_labels="{'region': 'infra'}"
#
# Registry selector (optional)
# Registry will only be created if nodes matching this label are present.
# Default value: 'region=infra'
#openshift_hosted_registry_selector='region=infra'
#
# Registry replicas (optional)
# Unless specified, openshift-ansible will calculate the replica count
# based on the number of nodes matching the openshift registry selector.
#openshift_hosted_registry_replicas=2
#
# Validity of the auto-generated certificate in days (optional)
#openshift_hosted_registry_cert_expire_days=730
#
# Disable management of the OpenShift Registry
#openshift_hosted_manage_registry=false
# Registry Storage Options
#
# NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/registry"
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=10Gi
#
# External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/registry"
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com
#openshift_hosted_registry_storage_nfs_directory=/{{ansible_architecture}}
#openshift_hosted_registry_storage_volume_name=osbs-stg-registry
#openshift_hosted_registry_storage_volume_size=10Gi
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com
#openshift_hosted_registry_storage_nfs_directory=/{{ansible_architecture}}
#openshift_hosted_registry_storage_volume_name=osbs-prod-registry
#openshift_hosted_registry_storage_volume_size=10Gi
# Openstack
# Volume must already exist.
#openshift_hosted_registry_storage_kind=openstack
#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_registry_storage_openstack_filesystem=ext4
#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
#openshift_hosted_registry_storage_volume_size=10Gi
#
# Native GlusterFS Registry Storage
#openshift_hosted_registry_storage_kind=glusterfs
#
# AWS S3
#
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
#openshift_hosted_registry_storage_s3_region=bucket_region
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_storage_s3_rootdirectory=/registry
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
#
# Any S3 service (Minio, ExoScale, ...): Basically the same as above
# but with regionendpoint configured
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
#openshift_hosted_registry_storage_s3_accesskey=access_key_id
#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
#openshift_hosted_registry_storage_s3_bucket=bucket_name
#openshift_hosted_registry_storage_s3_region=bucket_region
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_storage_s3_rootdirectory=/registry
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
#
# Additional CloudFront Options. When using CloudFront all three
# of the followingg variables must be defined.
#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
# Metrics deployment
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
#
# openshift_hosted_metrics_deploy=true
{% if openshift_metrics_deploy is defined %}
{% if openshift_metrics_deploy %}
#
openshift_hosted_metrics_deploy=false
# Storage Options
# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
# Storage options A & B currently support only one cassandra pod which is
# generally enough for up to 1000 pods. Additional volumes can be created
# manually after the fact and metrics scaled per the docs.
#
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/metrics"
#openshift_hosted_metrics_storage_kind=nfs
#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/metrics"
#openshift_hosted_metrics_storage_kind=nfs
#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_metrics_storage_host=nfs.example.com
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
#openshift_hosted_metrics_storage_kind=dynamic
#
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_metrics/README.md
#
# Override metricsPublicURL in the master config for cluster metrics
# Defaults to https://hawkular-metrics.openshift_master_default_subdomain/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.{{openshift_cluster_url}}/hawkular/metrics
{% endif %}
{% endif %}
# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
#openshift_hosted_logging_deploy=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_host=nfs.example.com
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
#openshift_hosted_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
#
# Other Logging Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to kibana.openshift_master_default_subdomain
#openshift_hosted_logging_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
#openshift_hosted_logging_deployer_version=3.5.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
# Disable the OpenShift SDN plugin
# openshift_use_openshift_sdn=False
# Configure SDN cluster network and kubernetes service CIDR blocks. These
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
#
# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
# 172.17.0.0/16. Your installation will fail and/or your configuration change will
# cause the Pod SDN or Cluster SDN to fail.
#
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
# is prefixed with !, IPs in that CIDR will be rejected. Rejections
# will be applied first, then the IP checked against one of the
# allowed CIDRs. You should ensure this range does not overlap with
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
# be assigned. It may contain a single CIDR that will be allocated from. For
# security reasons, you should ensure that this range does not overlap with
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
# Configure number of bits to allocate to each host's subnet e.g. 9
# would mean a /23 network on the host.
#osm_host_subnet_length=9
# Configure master API and console ports.
# These will default to 8443
{% if openshift_api_port is defined and openshift_console_port is defined %}
{% if openshift_api_port and openshift_console_port %}
openshift_master_api_port={{openshift_api_port}}
openshift_master_console_port={{openshift_console_port}}
{% endif %}
{% endif %}
# set RPM version for debugging purposes
#openshift_pkg_version=-3.1.0.0
# Configure custom ca certificate
#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
#
# NOTE: CA certificate will not be replaced with existing clusters.
# This option may only be specified when creating a new cluster or
# when redeploying cluster certificates with the redeploy-certificates
# playbook.
# Configure custom named certificates (SNI certificates)
#
# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
#
# NOTE: openshift_master_named_certificates is cached on masters and is an
# additive fact, meaning that each run with a different set of certificates
# will add the newly provided certificates to the cached set of certificates.
#
# An optional CA may be specified for each named certificate. CAs will
# be added to the OpenShift CA bundle which allows for the named
# certificate to be served for internal cluster communication.
#
# If you would like openshift_master_named_certificates to be overwritten with
# the provided value, specify openshift_master_overwrite_named_certificates.
#openshift_master_overwrite_named_certificates=true
#
# Provide local certificate paths which will be deployed to masters
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
#
# Detected names may be overridden by specifying the "names" key
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
# Session options
#openshift_master_session_name=ssn
#openshift_master_session_max_seconds=3600
# An authentication and encryption secret will be generated if secrets
# are not provided. If provided, openshift_master_session_auth_secrets
# and openshift_master_encryption_secrets must be equal length.
#
# Signing secrets, used to authenticate sessions using
# HMAC. Recommended to use secrets with 32 or 64 bytes.
#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
#
# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
# characters long, to select AES-128, AES-192, or AES-256.
#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
# configure how often node iptables rules are refreshed
#openshift_node_iptables_sync_period=5s
# Configure nodeIP in the node config
# This is needed in cases where node traffic is desired to go over an
# interface other than the default network interface.
#openshift_set_node_ip=True
# Force setting of system hostname when configuring OpenShift
# This works around issues related to installations that do not have valid dns
# entries for the interfaces attached to the host.
#openshift_set_hostname=True
# Configure dnsIP in the node config
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
# openshift-ansible will wait indefinitely for your input when it detects that the
# value of openshift_hostname resolves to an IP address not bound to any local
# interfaces. This mis-configuration is problematic for any pod leveraging host
# networking and liveness or readiness probes.
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
# be used with 1.0 and 3.0.
#openshift_use_dnsmasq=False
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
# options like 'strict-order' to alter dnsmasq configuration.
#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
#
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
# specify that domain above.
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
# configuration into Builds. Proxy related values will default to the global proxy
# config values. You only need to set these if they differ from the global proxy settings.
# See BuildDefaults documentation at
# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_no_proxy=mycorp.com
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_no_proxy=mycorp.com
#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
#openshift_builddefaults_resources_requests_cpu=100m
#openshift_builddefaults_resources_requests_memory=256m
#openshift_builddefaults_resources_limits_cpu=1000m
#openshift_builddefaults_resources_limits_memory=512m
# Or you may optionally define your own build defaults configuration serialized as json
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
# These options configure the BuildOverrides admission controller which injects
# configuration into Builds.
# See BuildOverrides documentation at
# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_buildoverrides_force_pull=true
#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
# Admission plugin config
#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
# Configure usage of openshift_clock role.
#openshift_clock_enabled=true
# OpenShift Per-Service Environment Variables
# Environment variables are added to /etc/sysconfig files for
# each OpenShift service: node, master (api and controllers).
# API and controllers environment variables are merged in single
# master environments.
{% if no_http2 is defined %}
{% if no_http2 %}
openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
openshift_node_env_vars={"ENABLE_HTTP2": "true"}
{% endif %}
{% endif %}
# Enable API service auditing, available as of 3.2
#openshift_master_audit_config={"enabled": true}
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
#
#openshift_ca_cert_expire_days=1825
#openshift_node_cert_expire_days=730
#openshift_master_cert_expire_days=730
# Validity of the auto-generated external etcd certificates in days.
# Controls validity for etcd CA, peer, server and client certificates.
#
#etcd_ca_default_days=1825
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
# make them unschedulable by adding openshift_schedulable=False any node that's also a master.

View File

@ -235,14 +235,6 @@ backend pdc-backend
timeout server 3600000
timeout connect 3600000
frontend osbs-frontend
bind 0.0.0.0:10047
default_backend osbs-backend
backend osbs-backend
balance hdr(appserver)
server osbs-master01 osbs-master01:8443 check inter 10s rise 1 fall 2 check ssl verify none
frontend oci-registry-frontend
bind 0.0.0.0:10048
default_backend oci-registry-backend

View File

@ -16,8 +16,6 @@ RewriteCond %{HTTP:VIA} !cloudfront
RewriteCond %{SERVER_NAME} !^registry-no-cdn\.fedoraproject\.org$
# We don't want some methods to go to the cdn so we can update it
RewriteCond %{REQUEST_METHOD} !^(PATCH|POST|PUT|DELETE|HEAD)$
# osbs hosts shouldn't use the cdn
RewriteCond %{REMOTE_HOST} !^osbs-*$
# builders shouldn't use the cdn for flatpak building.
RewriteCond expr "! -R '10.3.169.0/24'"
RewriteCond expr "! -R '10.3.170.0/24'"

View File

@ -278,16 +278,6 @@
tags:
- koji_builder
# non-bkernel x86_64 builders run container_build, which needs osbs
- name: special pkgs for the x86_64 builders
package:
state: present
name:
- python3-osbs-client.noarch
when: "ansible_architecture == 'x86_64' and not inventory_hostname.startswith('bkernel')"
tags:
- koji_builder
# Before, the builders had the "apache" role. This is a temporary play to remove the httpd daemon everywhere
- name: Uninstall httpd
package: name=httpd

View File

@ -56,7 +56,6 @@
- check_readonly_fs
- check_lock_file_age
- check_testcloud
- check_osbs_api.py
- check_ipa_replication
- check_redis_queue.sh
- check_timestamp_from_file
@ -160,7 +159,6 @@
- check_lock_file_age.cfg
- check_basset.cfg
- check_fmn.cfg
- check_osbs.cfg
- check_testcloud.cfg
- check_mirrorlist_docker_proxy.cfg
- check_mirrorlist_cache.cfg

View File

@ -1 +0,0 @@
command[check_osbs_api]={{ libdir }}/nagios/plugins/check_osbs_api.py

View File

@ -1,7 +0,0 @@
define service {
host_name osbs-master01.iad2.fedoraproject.org
service_description Check OSBS API endpoint paths
check_command check_by_nrpe!check_osbs_api
max_check_attempts 5
use defaulttemplate
}

View File

@ -191,7 +191,6 @@
- locking.cfg
- mailman.cfg
- nrpe.cfg
- osbs.cfg
- pgsql.cfg
tags:
- nagios_config
@ -213,7 +212,6 @@
- locking.cfg
- mailman.cfg
- nrpe.cfg
- osbs.cfg
- pgsql.cfg
- rabbitmq.cfg
tags:

View File

@ -6,7 +6,6 @@ odcs_pdc_develop: True
odcs_target_dir: /srv/odcs
odcs_target_dir_url: http://{{ inventory_hostname }}/composes
odcs_allowed_clients_groups: {"sysadmin-odcs": {}, "pungi-devel": {}, "packager": {"source_types": ["module"]}}
odcs_allowed_clients_users: {"osbs@service": {}}
odcs_admin_groups: ["sysadmin-odcs", "pungi-devel"]
odcs_admin_users: []
odcs_raw_config_urls: {}

View File

@ -1,46 +0,0 @@
---
# defaults file for osbs-client
#
# config file path
osbs_client_conf_path: /etc/osbs.conf
# Settings for the [general] section of the osbs.conf file
general:
verbose: 1
build_json_dir: /usr/share/osbs/
openshift_required_version: 1.0.8
# Settings for the [default] section of the osbs.conf file
default:
username: ""
password: ""
openshift_url: https://osbs.localdomain:8443/
koji_root: http://koji.fedoraproject.org/koji
koji_hub: http://koji.fedoraproject.org/kojihub
sources_command: fedpkg sources
build_type: prod
registry_uri: https://osbs.localdomain:5000/v2
source_registry_uri: https://osbs.localdomain:5000/v2
vendor: Fedora Project
build_host: osbs.localdomain
verify_ssl: false
use_auth: false
builder_use_auth: true
registry_api_versions: v2
builder_openshift_url: https://172.17.0.1:8443/
koji_certs_secret: ""
koji_use_kerberos: false
koji_kerberos_keytab: ""
koji_kerberos_principal: ""
use_kerberos: false
kerberos_keytab: ""
kerberos_principal: ""
registry_secret_name: ""
builder_odcs_url: ""
builder_odcs_insecure: true
builder_odcs_openidc_secret: ""
builder_pdc_url: ""
builder_pdc_insecure: true
flatpak_base_image: ""

View File

@ -1,7 +0,0 @@
The site-customize file here additionally disables or enables plugins on top of
the default set.
The default set ships with osbs-client and can be found here:
https://github.com/projectatomic/osbs-client/blob/master/inputs/prod_inner.json
See also: https://github.com/projectatomic/osbs-client/blob/master/docs/build_process.md

View File

@ -1,11 +0,0 @@
{
"disable_plugins": [
{
"plugin_type": "exit_plugins",
"plugin_name": "import_image"
}
],
"enable_plugins": [
]
}

View File

@ -1,44 +0,0 @@
{
"disable_plugins": [
{
"plugin_type": "postbuild_plugins",
"plugin_name": "pulp_push"
},
{
"plugin_type": "postbuild_plugins",
"plugin_name": "pulp_sync"
},
{
"plugin_type": "postbuild_plugins",
"plugin_name": "pulp_pull"
},
{
"plugin_type": "prebuild_plugins",
"plugin_name": "resolve_module_compose"
},
{
"plugin_type": "prebuild_plugins",
"plugin_name": "flatpak_create_dockerfile"
},
{
"plugin_type": "prepublish_plugins",
"plugin_name": "flatpak_create_oci"
},
{
"plugin_type": "postbuild_plugins",
"plugin_name": "import_image"
}
],
"enable_plugins": [
{
"plugin_type": "postbuild_plugins",
"plugin_name": "tag_and_push",
"plugin_args": {
"registries": {
"{{REGISTRY_URI}}": { "insecure": false }
}
}
}
]
}

View File

@ -1,12 +0,0 @@
---
# tasks file for osbs-client
- name: install osbs-client package
action: "{{ ansible_pkg_mgr }} name=osbs-client state=present"
tags:
- osbs-client
- name: apply osbs-client templated config
template: src=osbs.conf.j2 dest={{ osbs_client_conf_path }} mode=0640
tags:
- osbs-client

View File

@ -1,170 +0,0 @@
[general]
verbose= {{ general.verbose }}
build_json_dir = {{ general.build_json_dir }}
openshift_required_version = {{ general.openshift_required_version }}
[default]
{% if default.username is defined %}
username = {{ default.username }}
{% endif %}
{% if default.password is defined %}
password = {{ default.password }}
{% endif %}
{% if default.koji_certs_secret != "" %}
koji_certs_secret = {{ default.koji_certs_secret }}
{% endif %}
{% if default.koji_use_kerberos is defined %}
koji_use_kerberos = {{ default.koji_use_kerberos }}
{% endif %}
{% if default.koji_kerberos_keytab is defined %}
koji_kerberos_keytab = {{ default.koji_kerberos_keytab }}
{% endif %}
{% if default.koji_kerberos_principal is defined %}
koji_kerberos_principal = {{ default.koji_kerberos_principal }}
{% endif %}
{% if default.use_kerberos is defined %}
use_kerberos = {{ default.use_kerberos }}
{% endif %}
{% if default.kerberos_keytab is defined %}
kerberos_keytab = {{ default.kerberos_keytab }}
{% endif %}
{% if default.kerberos_principal is defined %}
kerberos_principal = {{ default.kerberos_principal }}
{% endif %}
{% if default.token_file is defined %}
token_file = {{ default.token_file }}
{% endif %}
{% if default.can_orchestrate is defined %}
# Orchestrator/Worker Architecture split additions
can_orchestrate = {{ default.can_orchestrate }}
{% endif %}
{% if default.namespace is defined %}
namespace = {{ default.namespace }}
{% endif %}
{% if default.client_config_secret is defined %}
client_config_secret = {{ default.client_config_secret }}
{% endif %}
{% if default.reactor_config_secret is defined %}
reactor_config_secret = {{ default.reactor_config_secret }}
{% endif %}
{% if default.token_secrets is defined %}
token_secrets = {{ default.token_secrets }}
{% endif %}
openshift_url = {{ default.openshift_url }}
koji_root = {{ default.koji_root }}
koji_hub = {{ default.koji_hub }}
sources_command = {{ default.sources_command }}
build_type = {{ default.build_type }}
registry_uri = {{ default.registry_uri }}
source_registry_uri = {{ default.source_registry_uri }}
vendor = {{ default.vendor }}
build_host = {{ default.build_host }}
verify_ssl = {{ default.verify_ssl }}
use_auth = {{ default.use_auth }}
builder_use_auth = {{ default.builder_use_auth }}
registry_api_versions = {{ default.registry_api_versions }}
{% if default.registry_secret_name %}
registry_secret = {{ default.registry_secret_name }}
{% endif %}
builder_openshift_url = {{ default.builder_openshift_url }}
{% if default.builder_odcs_url %}
odcs_url = {{ default.builder_odcs_url }}
odcs_insecure = {{ default.builder_odcs_insecure }}
odcs_openidc_secret = {{ default.builder_odcs_openidc_secret }}
{% endif %}
{% if default.reactor_config_map is defined %}
reactor_config_map = {{ default.reactor_config_map }}
{% endif %}
{% if default.build_from is defined %}
build_from = {{ default.build_from }}
{% endif %}
[scratch]
scratch = true
{% if default.username is defined %}
username = {{ default.username }}
{% endif %}
{% if default.password is defined %}
password = {{ default.password }}
{% endif %}
{% if default.koji_certs_secret != "" %}
koji_certs_secret = {{ default.koji_certs_secret }}
{% endif %}
{% if default.koji_use_kerberos is defined %}
koji_use_kerberos = {{ default.koji_use_kerberos }}
{% endif %}
{% if default.koji_kerberos_keytab is defined %}
koji_kerberos_keytab = {{ default.koji_kerberos_keytab }}
{% endif %}
{% if default.koji_kerberos_principal is defined %}
koji_kerberos_principal = {{ default.koji_kerberos_principal }}
{% endif %}
{% if default.use_kerberos is defined %}
use_kerberos = {{ default.use_kerberos }}
{% endif %}
{% if default.kerberos_keytab is defined %}
kerberos_keytab = {{ default.kerberos_keytab }}
{% endif %}
{% if default.kerberos_principal is defined %}
kerberos_principal = {{ default.kerberos_principal }}
{% endif %}
{% if default.token_file is defined %}
token_file = {{ default.token_file }}
{% endif %}
{% if default.can_orchestrate is defined %}
# Orchestrator/Worker Architecture split additions
can_orchestrate = {{ default.can_orchestrate }}
{% endif %}
{% if default.namespace is defined %}
namespace = {{ default.namespace }}
{% endif %}
{% if default.client_config_secret is defined %}
client_config_secret = {{ default.client_config_secret }}
{% endif %}
{% if default.reactor_config_secret is defined %}
reactor_config_secret = {{ default.reactor_config_secret }}
{% endif %}
{% if default.token_secrets is defined %}
token_secrets = {{ default.token_secrets }}
{% endif %}
openshift_url = {{ default.openshift_url }}
koji_root = {{ default.koji_root }}
koji_hub = {{ default.koji_hub }}
sources_command = {{ default.sources_command }}
build_type = {{ default.build_type }}
registry_uri = {{ default.registry_uri }}
source_registry_uri = {{ default.source_registry_uri }}
vendor = {{ default.vendor }}
build_host = {{ default.build_host }}
verify_ssl = {{ default.verify_ssl }}
use_auth = {{ default.use_auth }}
builder_use_auth = {{ default.builder_use_auth }}
registry_api_versions = {{ default.registry_api_versions }}
{% if default.registry_secret_name %}
registry_secret = {{ default.registry_secret_name }}
{% endif %}
builder_openshift_url = {{ default.builder_openshift_url }}
unique_tag_only = true
{% if default.builder_odcs_url %}
odcs_url = {{ default.builder_odcs_url }}
odcs_insecure = {{ default.builder_odcs_insecure }}
odcs_openidc_secret = {{ default.builder_odcs_openidc_secret }}
{% endif %}
{% if default.reactor_config_map_scratch is defined %}
reactor_config_map = {{ default.reactor_config_map_scratch }}
{% endif %}
{% if default.build_from is defined %}
build_from = {{ default.build_from }}
{% endif %}

View File

@ -1,165 +0,0 @@
osbs-namespace
==============
Setup an OpenShift namespace as required by OSBS:
- Create namespace, also referred to as project (`osbs_namespace`)
- Create service accounts (`osbs_service_accounts`)
If user is cluster admin (`osbs_is_admin`), the following is also performed:
- Create policy binding
- Create osbs-custom-build role to allow custom builds
- Sets up rolebindings for specified users, groups and service accounts
For orchestrator namespaces (`osbs_orchestrator`):
- reactor-config-secret is generated and stored in `osbs_generated_config_path`
use osbs-secret to import it
- client-config-secret is generated and stored in `osbs_generated_config_path`
use osbs-secret to import it
Requirements
------------
A running instance of OpenShift.
Role Variables
--------------
# Namespace name to be used
osbs_namespace: 'my-namespace'
# Is user running playbook as cluster admin?
osbs_is_admin: true
# Will the namespace be used for orchestrator builds?
osbs_orchestrator: true
# Worker clusters to be used for generating reactor and client config secrets
# in orchestrator workspace
osbs_worker_clusters:
x86_64:
- name: prod-first-x86_64
max_concurrent_builds: 6
openshift_url: https://my-first-x86_64-cluster.fedoraproject.org:8443
- name: prod-second-x86_64
max_concurrent_builds: 16
openshift_url: https://my-second-x86_64-cluster.fedoraproject.org
# optional params, and their defaults:
enabled: true # yaml boolean
namespace: worker
use_auth: 'true' # yaml string
verify_ssl: 'true' # yaml string
ppc64le:
- name: prod-ppc64le
max_concurrent_builds: 6
openshift_url: https://my-ppc64le-cluster.fedoraproject.org:8443
# Reactor config maps to be created in orchestrator namespace
osbs_reactor_config_maps:
- name: reactor-config-map
# See config.json schema in atomic-reactor project for details:
# https://github.com/projectatomic/atomic-reactor/blob/master/atomic_reactor/schemas/config.json
data:
clusters:
x86_64:
- enabled: true
max_concurrent_builds: 10
name: x86_64-on-premise
version: 1
# Service accounts to be created - these accounts will also be bound to
# edit clusterrole and osbs-custom-build role in specified namespace
osbs_service_accounts:
- bot
- ci
# Users and groups to be assigned view clusterrole in specified namespace
osbs_readonly_groups:
- group1
- group2
osbs_readonly_users:
- user1
- user2
# Users and groups to be assigned edit clusterrole and osbs-custom-build
# role in specified namespace
osbs_readwrite_groups:
- group1
- group2
osbs_readwrite_users:
- user1
- user2
# Users and groups to be assigned admin clusterrole and osbs-custom-build
# role in specified namespace
osbs_admin_groups:
- group1
- group2
osbs_admin_users:
- user1
- user2
# Users and groups to be assigned cluster-reader clusterrole cluster wide
osbs_cluster_reader_groups:
- group1
- group2
osbs_cluster_reader_users:
- user1
- user2
# Koji integration
osbs_koji_hub: https://koji.fedoraproject.org # Empty default value
osbs_koji_root: https://koji.fedoraproject.org/kojihub # Empty default value
# Pulp integration
osbs_pulp_secret_name: pulpsecret
osbs_pulp_registry_name: brew-qa # Empty default value
# Distribution registry integration
osbs_registry_secret_name: v2-registry-dockercfg
osbs_registry_api_version:
- v1
- v2
osbs_registry_uri: https://distribution.registry.fedoraproject.org/v2 # Empty default value
# Dist-git integration
osbs_sources_command: fedpkg sources
osbs_source_registry_uri: https://source.registry.fedoraproject.org # Empty default value
# Pruning
osbs_prune: false
osbs_prune_schedule: '0 0 */8 * *'
osbs_prune_secret: ''
osbs_prune_image: ''
osbs_prune_commands: ["/prune.sh"]
For a full list, see defaults/main.yml
Dependencies
------------
None.
Example Playbook
----------------
- name: setup worker namespace
hosts: master
roles:
- role: osbs-namespace
osbs_namespace: worker
- name: setup orchestrator namespace
hosts: master
roles:
- role: osbs-namespace
osbs_namespace: orchestrator
osbs_orchestrator: true
License
-------
BSD
Author Information
------------------
Luiz Carvalho <lui@redhat.com>

Some files were not shown because too many files have changed in this diff Show More