Fix up healthcheck generation for workers docker image (#12405)

This wasn't quite generating the right thing.
This commit is contained in:
Richard van der Hoff 2022-04-11 14:38:58 +01:00 committed by GitHub
parent 961ee75a9b
commit 3cdf5a1386
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 11 additions and 11 deletions

1
changelog.d/12405.misc Normal file
View File

@ -0,0 +1 @@
Fix up healthcheck generation for workers docker image.

View File

@ -14,9 +14,6 @@ COPY ./docker/conf-workers/* /conf/
# Expose nginx listener port # Expose nginx listener port
EXPOSE 8080/tcp EXPOSE 8080/tcp
# Volume for user-editable config files, logs etc.
VOLUME ["/data"]
# A script to read environment variables and create the necessary # A script to read environment variables and create the necessary
# files to run the desired worker configuration. Will start supervisord. # files to run the desired worker configuration. Will start supervisord.
COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py

View File

@ -308,7 +308,7 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
Args: Args:
environ: _Environ[str] environ: _Environ[str]
config_path: Where to output the generated Synapse main worker config file. config_path: The location of the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and data_dir: The location of the synapse data directory. Where log and
user-facing config files live. user-facing config files live.
""" """
@ -321,7 +321,8 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# and adding a replication listener. # and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add # First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result. # another listener for replication. Later we'll write out the result to the shared
# config file.
listeners = [ listeners = [
{ {
"port": 9093, "port": 9093,
@ -387,6 +388,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# worker_type + instance # # worker_type + instance #
worker_type_counter: Dict[str, int] = {} worker_type_counter: Dict[str, int] = {}
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
# For each worker type specified by the user, create config values # For each worker type specified by the user, create config values
for worker_type in worker_types: for worker_type in worker_types:
worker_type = worker_type.strip() worker_type = worker_type.strip()
@ -411,6 +416,8 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Update the shared config with any worker-type specific options # Update the shared config with any worker-type specific options
shared_config.update(worker_config["shared_extra_conf"]) shared_config.update(worker_config["shared_extra_conf"])
healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Check if more than one instance of this worker type has been specified # Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type) worker_type_total_count = worker_types.count(worker_type)
if worker_type_total_count > 1: if worker_type_total_count > 1:
@ -476,15 +483,10 @@ def generate_worker_files(environ, config_path: str, data_dir: str):
# Determine the load-balancing upstreams to configure # Determine the load-balancing upstreams to configure
nginx_upstream_config = "" nginx_upstream_config = ""
# At the same time, prepare a list of internal endpoints to healthcheck
# starting with the main process which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items(): for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = "" body = ""
for port in upstream_worker_ports: for port in upstream_worker_ports:
body += " server localhost:%d;\n" % (port,) body += " server localhost:%d;\n" % (port,)
healthcheck_urls.append("http://localhost:%d/health" % (port,))
# Add to the list of configured upstreams # Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format( nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(

View File

@ -52,7 +52,7 @@ if [[ -n "$WORKERS" ]]; then
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
# And provide some more configuration to complement. # And provide some more configuration to complement.
export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=25 export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=60
else else
export COMPLEMENT_BASE_IMAGE=complement-synapse export COMPLEMENT_BASE_IMAGE=complement-synapse
COMPLEMENT_DOCKERFILE=Dockerfile COMPLEMENT_DOCKERFILE=Dockerfile