This commit is contained in:
creme 2021-08-01 14:34:00 +02:00
commit 490b2ecba7
No known key found for this signature in database
GPG Key ID: C147C3B7FBDF08D0
39 changed files with 5451 additions and 0 deletions

5
README.md Normal file
View File

@ -0,0 +1,5 @@
# envs.net matrix instance config's & tools
**HS instance: envs.net**
more informations on https://envs.net/chat/matrix/

View File

@ -0,0 +1,5 @@
# Specify environment variables used when running Synapse
# SYNAPSE_CACHE_FACTOR=0.5 (default)
SYNAPSE_CACHE_FACTOR=2.0
#LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so
#LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2

View File

@ -0,0 +1,571 @@
# General repo configuration
repo:
bindAddress: '127.0.0.1'
port: 8000
# Where to store the logs, relative to where the repo is started from. Logs will be automatically
# rotated every day and held for 14 days. To disable the repo logging to files, set this to
# "-" (including quotation marks).
#
# Note: to change the log directory you'll have to restart the repository. This setting cannot be
# live reloaded.
logDirectory: /var/log/matrix-media
# Set to true to enable color coding in your logs. Note that this may cause escape sequences to
# appear in logs which render them unreadable, which is why colors are disabled by default.
logColors: false
# Set to true to enable JSON logging for consumption by things like logstash. Note that this is
# incompatible with the log color option and will always render without colors.
jsonLogs: false
# If true, the media repo will accept any X-Forwarded-For header without validation. In most cases
# this option should be left as "false". Note that the media repo already expects an X-Forwarded-For
# header, but validates it to ensure the IP being given makes sense.
trustAnyForwardedAddress: false
# If false, the media repo will not use the X-Forwarded-Host header commonly added by reverse proxies.
# Typically this should remain as true, though in some circumstances it may need to be disabled.
# See https://github.com/turt2live/matrix-media-repo/issues/202 for more information.
useForwardedHost: true
# Options for dealing with federation
federation:
# On a per-host basis, the number of consecutive failures in calling the host before the
# media repo will back off. This defaults to 20 if not given. Note that 404 errors from
# the remote server do not count towards this.
backoffAt: 20
# The database configuration for the media repository
# Do NOT put your homeserver's existing database credentials here. Create a new database and
# user instead. Using the same server is fine, just not the same username and database.
database:
# Currently only "postgres" is supported.
postgres: "postgres://matrix:password@localhost/matrixmedia?sslmode=disable"
# The database pooling options
pool:
# The maximum number of connects to hold open. More of these allow for more concurrent
# processes to happen.
maxConnections: 25
# The maximum number of connects to leave idle. More of these reduces the time it takes
# to serve requests in low-traffic scenarios.
maxIdleConnections: 5
# The configuration for the homeservers this media repository is known to control. Servers
# not listed here will not be able to upload media.
homeservers:
- name: envs.net # This should match the server_name of your homeserver, and the Host header
# provided to the media repo.
csApi: "https://matrix.envs.net/" # The base URL to where the homeserver can actually be reached
backoffAt: 10 # The number of consecutive failures in calling this homeserver before the
# media repository will start backing off. This defaults to 10 if not given.
adminApiKind: "matrix" # The kind of admin API the homeserver supports. If set to "matrix",
# the media repo will use the Synapse-defined endpoints under the
# unstable client-server API. When this is "synapse", the new /_synapse
# endpoints will be used instead. Unknown values are treated as the
# default, "matrix".
# Options for controlling how access tokens work with the media repo. It is recommended that if
# you are going to use these options that the `/logout` and `/logout/all` client-server endpoints
# be proxied through this process. They will also be called on the homeserver, and the response
# sent straight through the client - they are simply used to invalidate the cache faster for
# a particular user. Without these, the access tokens might still work for a short period of time
# after the user has already invalidated them.
#
# This will also cache errors from the homeserver.
#
# Note that when this config block is used outside of a per-domain config, all hosts will be
# subject to the same cache. This also means that application services on limited homeservers
# could be authorized on the wrong domain.
#
# ***************************************************************************
# * IT IS HIGHLY RECOMMENDED TO USE PER-DOMAIN CONFIGS WITH THIS FEATURE. *
# ***************************************************************************
accessTokens:
# The maximum time a cached access token will be considered valid. Set to zero (the default)
# to disable the cache and constantly hit the homeserver. This is recommended to be set to
# 43200 (12 hours) on servers with the logout endpoints proxied through the media repo, and
# zero for servers who do not proxy the endpoints through.
maxCacheTimeSeconds: 0
# Whether or not to use the `appservices` config option below. If disabled (the default),
# the regular access token cache will be used for each user, potentially leading to high
# memory usage.
useLocalAppserviceConfig: false
# The application services (and their namespaces) registered on the homeserver. Only used
# if `useLocalAppserviceConfig` is enabled (recommended).
#
# Usually the appservice will provide you with these config details - they'll just need
# translating from the appservice registration to here. Note that this does not require
# all options from the registration, and only requires the bare minimum required to run
# the media repo.
appservices:
- id: Name_of_appservice_for_your_reference
asToken: Secret_token_for_appservices_to_use
senderUserId: "@_example_bridge:envs.net"
userNamespaces:
- regex: "@_example_bridge_.+:envs.net"
# A note about regexes: it is best to suffix *all* namespaces with the homeserver
# domain users are valid for, as otherwise the appservice can use any user with
# any domain name it feels like, even if that domain is not configured with the
# media repo. This will lead to inaccurate reporting in the case of the media
# repo, and potentially leading to media being considered "remote".
# These users have full access to the administrative functions of the media repository.
# See docs/admin.md for information on what these people can do. They must belong to one of the
# configured homeservers above.
admins:
- "@creme:envs.net"
# Shared secret auth is useful for applications building on top of the media repository, such
# as a management interface. The `token` provided here is treated as a repository administrator
# when shared secret auth is enabled: if the `token` is used in place of an access token, the'
# request will be authorized. This is not limited to any particular domain, giving applications
# the ability to use it on any configured hostname.
sharedSecretAuth:
# Set this to true to enable shared secret auth.
enabled: false
# Use a secure value here to prevent unauthorized access to the media repository.
token: "PutSomeRandomSecureValueHere"
# Datastores are places where media should be persisted. This isn't dedicated for just uploads:
# thumbnails and other misc data is also stored in these places. The media repo, when looking
# for a datastore to use, will always use the smallest datastore first.
datastores:
- type: file
enabled: true
# Datastores can be split into many areas when handling uploads. Media is still de-duplicated
# across all datastores (local content which duplicates remote content will re-use the remote
# content's location). This option is useful if your datastore is becoming very large, or if
# you want faster storage for a particular kind of media.
#
# The kinds available are:
# thumbnails - Used to store thumbnails of media (local and remote).
# remote_media - Original copies of remote media (servers not configured by this repo).
# local_media - Original uploads for local media.
# archives - Archives of content (GDPR and similar requests).
forKinds: ["all"]
opts:
path: /var/matrix-media
# - type: file
# enabled: true
#forKinds: ["all"]
# opts:
# path: /var/matrix/media
- type: s3
enabled: false # Enable this to set up s3 uploads
forKinds: ["thumbnails", "remote_media", "local_media", "archives"]
opts:
# The s3 uploader needs a temporary location to buffer files to reduce memory usage on
# small file uploads. If the file size is unknown, the file is written to this location
# before being uploaded to s3 (then the file is deleted). If you aren't concerned about
# memory usage, set this to an empty string.
tempPath: "/tmp/mediarepo_s3_upload"
endpoint: sfo2.digitaloceanspaces.com
accessKeyId: ""
accessSecret: ""
ssl: true
bucketName: "your-media-bucket"
# An optional region for where this S3 endpoint is located. Typically not needed, though
# some providers will need this (like Scaleway). Uncomment to use.
#region: "sfo2"
# The media repo does support an IPFS datastore, but only if the IPFS feature is enabled. If
# the feature is not enabled, this will not work. Note that IPFS support is experimental at
# the moment and not recommended for general use.
#
# NOTE: Everything you upload to IPFS will be publicly accessible, even when the media repo
# puts authentication on the download endpoints. Only use this option for cases where you
# expect your media to be publicly accessible.
- type: ipfs
enabled: false
forKinds: ["local_media"]
# The IPFS datastore currently has no options. It will use the daemon or HTTP API configured
# in the IPFS section of your main config.
opts: {}
# Options for controlling archives. Archives are exports of a particular user's content for
# the purpose of GDPR or moving media to a different server.
archiving:
# Whether archiving is enabled or not. Default enabled.
enabled: true
# If true, users can request a copy of their own data. By default, only repository administrators
# can request a copy.
# This includes the ability for homeserver admins to request a copy of their own server's
# data, as known to the repo.
selfService: false
# The number of bytes to target per archive before breaking up the files. This is independent
# of any file upload limits and will require a similar amount of memory when performing an export.
# The file size is also a target, not a guarantee - it is possible to have files that are smaller
# or larger than the target. This is recommended to be approximately double the size of your
# file upload limit, provided there is enough memory available for the demand of exporting.
targetBytesPerPart: 209715200 # 200mb default
# The file upload settings for the media repository
uploads:
# The maximum individual file size a user can upload.
maxBytes: 104857600 # 100MB default, 0 to disable
# The minimum number of bytes to let people upload. This is recommended to be non-zero to
# ensure that the "cost" of running the media repo is worthwhile - small file uploads tend
# to waste more CPU and database resources than small files, thus a default of 100 bytes
# is applied here as an approximate break-even point.
minBytes: 100 # 100 bytes by default
# The number of bytes to claim as the maximum size for uploads for the limits API. If this
# is not provided then the maxBytes setting will be used instead. This is useful to provide
# if the media repo's settings and the reverse proxy do not match for maximum request size.
# This is purely for informational reasons and does not actually limit any functionality.
# Set this to -1 to indicate that there is no limit. Zero will force the use of maxBytes.
#reportedMaxBytes: 104857600
# Options for limiting how much content a user can upload. Quotas are applied to content
# associated with a user regardless of de-duplication. Quotas which affect remote servers
# or users will not take effect. When a user exceeds their quota they will be unable to
# upload any more media.
quotas:
# Whether or not quotas are enabled/enforced. Note that even when disabled the media repo
# will track how much media a user has uploaded. This is disabled by default.
enabled: false
# The quota rules that affect users. The first rule to match the uploader will take effect.
# An implied rule which matches all users and has no quota is always last in this list,
# meaning that if no rules are supplied then users will be able to upload anything. Similarly,
# if no rules match a user then the implied rule will match, allowing the user to have no
# quota. The quota will let the user upload to 1 media past their quota, meaning that from
# a statistics perspective the user might exceed their quota however only by a small amount.
users:
- glob: "@*:*" # Affect all users. Use asterisks (*) to match any character.
maxBytes: 53687063712 # 50GB default, 0 to disable
# Settings related to downloading files from the media repository
downloads:
# The maximum number of bytes to download from other servers
maxBytes: 104857600 # 100MB default, 0 to disable
# The number of workers to use when downloading remote media. Raise this number if remote
# media is downloading slowly or timing out.
#
# Maximum memory usage = numWorkers multiplied by the maximum download size
# Average memory usage is dependent on how many concurrent downloads your users are doing.
numWorkers: 10
# How long, in minutes, to cache errors related to downloading remote media. Once this time
# has passed, the media is able to be re-requested.
failureCacheMinutes: 5
# The cache control settings for downloads. This can help speed up downloads for users by
# keeping popular media in the cache. This cache is also used for thumbnails.
cache:
enabled: true
# The maximum size of cache to have. Higher numbers are better.
maxSizeBytes: 1048576000 # 1GB default
# The maximum file size to cache. This should normally be the same size as your maximum
# upload size.
maxFileSizeBytes: 104857600 # 100MB default
# The number of minutes to track how many downloads a file gets
trackedMinutes: 30
# The number of downloads a file must receive in the window above (trackedMinutes) in
# order to be cached.
minDownloads: 5
# The minimum amount of time an item should remain in the cache. This prevents the cache
# from cycling out the file if it needs more room during this time. Note that the media
# repo regularly cleans out media which is past this point from the cache, so this number
# may need increasing depending on your use case. If the maxSizeBytes is reached for the
# media repo, and some cached items are still under this timer, new items will not be able
# to enter the cache. When this happens, consider raising maxSizeBytes or lowering this
# timer.
minCacheTimeSeconds: 300
# The minimum amount of time an item should remain outside the cache once it is removed.
minEvictedTimeSeconds: 60
# How many days after a piece of remote content is downloaded before it expires. It can be
# re-downloaded on demand, this just helps free up space in your datastore. Set to zero or
# negative to disable. Defaults to disabled.
expireAfterDays: 90
# URL Preview settings
urlPreviews:
enabled: true # If enabled, the preview_url routes will be accessible
maxPageSizeBytes: 10485760 # 10MB default, 0 to disable
# If true, the media repository will try to provide previews for URLs with invalid or unsafe
# certificates. If false (the default), the media repo will fail requests to said URLs.
previewUnsafeCertificates: false
# Note: URL previews are limited to a given number of words, which are then limited to a number
# of characters, taking off the last word if it needs to. This also applies for the title.
numWords: 50 # The number of words to include in a preview (maximum)
maxLength: 200 # The maximum number of characters for a description
numTitleWords: 30 # The maximum number of words to include in a preview's title
maxTitleLength: 150 # The maximum number of characters for a title
# The mime types to preview when OpenGraph previews cannot be rendered. OpenGraph previews are
# calculated on anything matching "text/*". To have a thumbnail in the preview the URL must be
# an image and the image's type must be allowed by the thumbnailer.
filePreviewTypes:
- "image/*"
# The number of workers to use when generating url previews. Raise this number if url
# previews are slow or timing out.
#
# Maximum memory usage = numWorkers multiplied by the maximum page size
# Average memory usage is dependent on how many concurrent urls your users are previewing.
numWorkers: 10
# Either allowedNetworks or disallowedNetworks must be provided. If both are provided, they
# will be merged. URL previews will be disabled if neither is supplied. Each entry must be
# a CIDR range.
disallowedNetworks:
- "127.0.0.1/8"
- "10.0.0.0/8"
- "172.16.0.0/12"
- "192.168.0.0/16"
- "100.64.0.0/10"
- "169.254.0.0/16"
- '::1/128'
- 'fe80::/64'
- 'fc00::/7'
allowedNetworks:
- "0.0.0.0/0" # "Everything". The blacklist will help limit this.
# This is the default value for this field.
# How many days after a preview is generated before it expires and is deleted. The preview
# can be regenerated safely - this just helps free up some space in your database. Set to
# zero or negative to disable. Defaults to disabled.
expireAfterDays: 90
# The default Accept-Language header to supply when generating URL previews when one isn't
# supplied by the client.
# Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Language
defaultLanguage: "en-US,en"
# When true, oEmbed previews will be enabled. Typically these kinds of previews are used for
# sites that do not support OpenGraph or page scraping, such as Twitter. For information on
# specifying providers for oEmbed, including your own, see the following documentation:
# https://docs.t2bot.io/matrix-media-repo/url-previews/oembed.html
# Defaults to disabled.
oEmbed: true
# The thumbnail configuration for the media repository.
thumbnails:
# The maximum number of bytes an image can be before the thumbnailer refuses.
maxSourceBytes: 10485760 # 10MB default, 0 to disable
# The maximum number of pixels an image can have before the thumbnailer refuses. Note that
# this only applies to image types: file types like audio and video are affected solely by
# the maxSourceBytes.
maxPixels: 96000000 # 32M default
#maxPixels: 0
# The number of workers to use when generating thumbnails. Raise this number if thumbnails
# are slow to generate or timing out.
#
# Maximum memory usage = numWorkers multiplied by the maximum image source size
# Average memory usage is dependent on how many thumbnails are being generated by your users
numWorkers: 100
# All thumbnails are generated into one of the sizes listed here. The first size is used as
# the default for when no width or height is requested. The media repository will return
# either an exact match or the next largest size of thumbnail.
sizes:
- width: 32
height: 32
- width: 96
height: 96
- width: 320
height: 240
- width: 640
height: 480
- width: 768 # This size is primarily used for audio thumbnailing.
height: 240
- width: 800
height: 600
# To allow for thumbnails to be any size, not just in the sizes specified above, set this to
# true (default false). When enabled, whatever size requested by the client will be generated
# up to a maximum of the largest possible dimensions in the `sizes` list. For best results,
# specify only one size in the `sizes` list when this option is enabled.
dynamicSizing: false
# The content types to thumbnail when requested. Types that are not supported by the media repo
# will not be thumbnailed (adding application/json here won't work). Clients may still not request
# thumbnails for these types - this won't make clients automatically thumbnail these file types.
types:
- "image/jpeg"
- "image/jpg"
- "image/png"
- "image/apng"
- "image/gif"
- "image/heif"
- "image/webp"
- "image/svg+xml" # Be sure to have ImageMagick installed to thumbnail SVG files
- "audio/mpeg"
- "audio/ogg"
- "audio/wav"
- "audio/flac"
- "video/mp4" # Be sure to have ffmpeg installed to thumbnail video files
# Animated thumbnails can be CPU intensive to generate. To disable the generation of animated
# thumbnails, set this to false. If disabled, regular thumbnails will be returned.
allowAnimated: true
# Default to animated thumbnails, if available
defaultAnimated: false
# The maximum file size to thumbnail when a capable animated thumbnail is requested. If the image
# is larger than this, the thumbnail will be generated as a static image.
maxAnimateSizeBytes: 10485760 # 10MB default, 0 to disable
# On a scale of 0 (start of animation) to 1 (end of animation), where should the thumbnailer try
# and thumbnail animated content? Defaults to 0.5 (middle of animation).
stillFrame: 0.5
# How many days after a thumbnail is generated before it expires and is deleted. The thumbnail
# can be regenerated safely - this just helps free up some space in your datastores. Set to
# zero or negative to disable. Defaults to disabled.
expireAfterDays: 90
# Controls for the rate limit functionality
rateLimit:
# Set this to false if rate limiting is handled at a higher level or you don't want it enabled.
enabled: true
# The number of requests per second before an IP will be rate limited. Must be a whole number.
requestsPerSecond: 1
# The number of requests an IP can send at once before the rate limit is actually considered.
burst: 10
# Identicons are generated avatars for a given username. Some clients use these to give users a
# default avatar after signing up. Identicons are not part of the official matrix spec, therefore
# this feature is completely optional.
identicons:
enabled: true
# The quarantine media settings.
quarantine:
# If true, when a thumbnail of quarantined media is requested an image will be returned. If no
# image is given in the thumbnailPath below then a generated image will be provided. This does
# not affect regular downloads of files.
replaceThumbnails: true
# If true, when media which has been quarantined is requested an image will be returned. If
# no image is given in the thumbnailPath below then a generated image will be provided. This
# will replace media which is not an image (ie: quarantining a PDF will replace the PDF with
# an image).
replaceDownloads: false
# If provided, the given image will be returned as a thumbnail for media that is quarantined.
#thumbnailPath: "/path/to/thumbnail.png"
# If true, administrators of the configured homeservers may quarantine media for their server
# only. Global administrators can quarantine any media (local or remote) regardless of this
# flag.
allowLocalAdmins: true
# The various timeouts that the media repo will use.
timeouts:
# The maximum amount of time the media repo should spend trying to fetch a resource that is
# being previewed.
urlPreviewTimeoutSeconds: 10
# The maximum amount of time the media repo will spend making remote requests to other repos
# or homeservers. This is primarily used to download media.
federationTimeoutSeconds: 120
# The maximum amount of time the media repo will spend talking to your configured homeservers.
# This is usually used to verify a user's identity.
clientServerTimeoutSeconds: 30
# Prometheus metrics configuration
# For an example Grafana dashboard, import the following JSON:
# https://github.com/turt2live/matrix-media-repo/blob/master/docs/grafana.json
metrics:
# If true, the bindAddress and port below will serve GET /metrics for Prometheus to scrape.
enabled: true
# The address to listen on. Typically "127.0.0.1" or "0.0.0.0" for all interfaces.
bindAddress: "0.0.0.0"
# The port to listen on. Cannot be the same as the general web server port.
port: 9001
# Options for controlling various MSCs/unstable features of the media repo
# Sections of this config might disappear or be added over time. By default all
# features are disabled in here and must be explicitly enabled to be used.
featureSupport:
# MSC2248 - Blurhash
MSC2448:
# Whether or not this MSC is enabled for use in the media repo
enabled: false
# Maximum dimensions for converting a blurhash to an image. When no width and
# height options are supplied, the default will be half these values.
maxWidth: 1024
maxHeight: 1024
# Thumbnail size in pixels to use to generate the blurhash string
thumbWidth: 64
thumbHeight: 64
# The X and Y components to use. Higher numbers blur less, lower numbers blur more.
xComponents: 4
yComponents: 3
# The amount of contrast to apply when converting a blurhash to an image. Lower values
# make the effect more subtle, larger values make it stronger.
punch: 1
# IPFS Support
# This is currently experimental and might not work at all.
IPFS:
# Whether or not IPFS support is enabled for use in the media repo.
enabled: false
# Options for the built in IPFS daemon
builtInDaemon:
# Enable this to spawn an in-process IPFS node to use instead of a localhost
# HTTP agent. If this is disabled, the media repo will assume you have an HTTP
# IPFS agent running and accessible. Defaults to using a daemon (true).
enabled: true
# If the Daemon is enabled, set this to the location where the IPFS files should
# be stored. If you're using Docker, this should be something like "/data/ipfs"
# so it can be mapped to a volume.
repoPath: "./ipfs"
# Support for redis as a cache mechanism
#
# Note: Enabling Redis support will mean that the existing cache mechanism will do nothing.
# It can be safely disabled once Redis support is enabled.
#
# See docs/redis.md for more information on how this works and how to set it up.
redis:
# Whether or not use Redis instead of in-process caching.
enabled: false
# The Redis shards that should be used by the media repo in the ring. The names of the
# shards are for your reference and have no bearing on the connection, but must be unique.
shards:
- name: "server1"
addr: ":7000"
- name: "server2"
addr: ":7001"
- name: "server3"
addr: ":7002"

View File

@ -0,0 +1,5 @@
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
# Whether to report anonymized homeserver usage statistics.
report_stats: true

View File

@ -0,0 +1,9 @@
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
# The domain name of the server, with optional explicit port.
# This is used by remote servers to connect to this server,
# e.g. matrix.org, localhost:8080, etc.
# This is also the last part of your UserID.
#
server_name: envs.net

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,74 @@
# Log configuration for Synapse.
#
# This is a YAML file containing a standard Python logging configuration
# dictionary. See [1] for details on the valid settings.
#
# Synapse also supports structured logging for machine readable logs which can
# be ingested by ELK stacks. See [2] for details.
#
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
# [2]: https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
handlers:
file:
class: logging.handlers.TimedRotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/homeserver.log
when: midnight
backupCount: 3 # Does not include the current log file.
encoding: utf8
# Default to buffering writes to log file for efficiency. This means that
# will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR
# logs will still be flushed immediately.
buffer:
class: logging.handlers.MemoryHandler
target: file
# The capacity is the number of log lines that are buffered before
# being written to disk. Increasing this will lead to better
# performance, at the expensive of it taking longer for log lines to
# be written to disk.
capacity: 10
flushLevel: 30 # Flush for WARNING logs as well
# A handler that writes logs to stderr. Unused by default, but can be used
# instead of "buffer" and "file" in the logger handlers.
console:
class: logging.StreamHandler
formatter: precise
loggers:
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
synapse.state.metrics:
level: DEBUG
twisted:
# We send the twisted logging directly to the file handler,
# to work around https://github.com/matrix-org/synapse/issues/3471
# when using "buffer" logger. Use "console" to log to stderr instead.
handlers: [file]
propagate: false
root:
level: WARN
# Write logs to the `buffer` handler, which will buffer them together in memory,
# then write them to a file.
#
# Replace "buffer" with "console" to log to stderr instead. (Note that you'll
# also need to update the configuration for the `twisted` logger above, in
# this case.)
#
handlers: [buffer]
disable_existing_loggers: false

View File

@ -0,0 +1,13 @@
worker_app: synapse.app.appservice
worker_name: appservice
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/workers/appservice_log_config.yaml
worker_listeners:
- type: metrics
bind_addresses: ['0.0.0.0']
port: 9121

View File

@ -0,0 +1,42 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/worker_appservice.log
maxBytes: 104857600
backupCount: 5
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARN
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
root:
level: WARN
handlers: [file, console]
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
disable_existing_loggers: false

View File

@ -0,0 +1,13 @@
worker_app: synapse.app.federation_sender
worker_name: federation_sender1
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/workers/federation_sender1_log_config.yaml
worker_listeners:
- type: metrics
bind_addresses: ['0.0.0.0']
port: 9101

View File

@ -0,0 +1,42 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/worker_federation_sender1.log
maxBytes: 104857600
backupCount: 5
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARN
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
root:
level: WARN
handlers: [file, console]
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
disable_existing_loggers: false

View File

@ -0,0 +1,13 @@
worker_app: synapse.app.federation_sender
worker_name: federation_sender2
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/workers/federation_sender2_log_config.yaml
worker_listeners:
- type: metrics
bind_addresses: ['0.0.0.0']
port: 9102

View File

@ -0,0 +1,42 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/worker_federation_sender2.log
maxBytes: 104857600
backupCount: 5
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARN
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
root:
level: WARN
handlers: [file, console]
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
disable_existing_loggers: false

View File

@ -0,0 +1,13 @@
worker_app: synapse.app.federation_sender
worker_name: federation_sender3
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/workers/federation_sender3_log_config.yaml
worker_listeners:
- type: metrics
bind_addresses: ['0.0.0.0']
port: 9103

View File

@ -0,0 +1,42 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/worker_federation_sender3.log
maxBytes: 104857600
backupCount: 5
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARN
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
root:
level: WARN
handlers: [file, console]
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
disable_existing_loggers: false

View File

@ -0,0 +1,28 @@
worker_app: synapse.app.generic_worker
worker_name: generic_worker1
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/workers/generic_worker1_log_config.yaml
worker_listeners:
- type: http
bind_addresses: ['127.0.0.1']
port: 8510
tls: false
x_forwarded: true
resources:
- names: [client, federation]
compress: false
- type: http
bind_addresses: ['127.0.0.1']
port: 9094
resources:
- names: [replication]
- type: metrics
bind_addresses: ['0.0.0.0']
port: 9131

View File

@ -0,0 +1,42 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/worker_generic_worker1.log
maxBytes: 104857600
backupCount: 5
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARN
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
root:
level: WARN
handlers: [file, console]
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
disable_existing_loggers: false

View File

@ -0,0 +1,28 @@
worker_app: synapse.app.generic_worker
worker_name: generic_worker2
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/workers/generic_worker2_log_config.yaml
worker_listeners:
- type: http
bind_addresses: ['127.0.0.1']
port: 8511
tls: false
x_forwarded: true
resources:
- names: [client, federation]
compress: false
- type: http
bind_addresses: ['127.0.0.1']
port: 9095
resources:
- names: [replication]
- type: metrics
bind_addresses: ['0.0.0.0']
port: 9132

View File

@ -0,0 +1,42 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/worker_generic_worker2.log
maxBytes: 104857600
backupCount: 5
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARN
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
root:
level: WARN
handlers: [file, console]
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
disable_existing_loggers: false

View File

@ -0,0 +1,28 @@
worker_app: synapse.app.generic_worker
worker_name: generic_worker3
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/workers/generic_worker3_log_config.yaml
worker_listeners:
- type: http
bind_addresses: ['127.0.0.1']
port: 8512
tls: false
x_forwarded: true
resources:
- names: [client, federation]
compress: false
- type: http
bind_addresses: ['127.0.0.1']
port: 9096
resources:
- names: [replication]
- type: metrics
bind_addresses: ['0.0.0.0']
port: 9133

View File

@ -0,0 +1,42 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/worker_generic_worker3.log
maxBytes: 104857600
backupCount: 5
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARN
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
root:
level: WARN
handlers: [file, console]
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
disable_existing_loggers: false

View File

@ -0,0 +1,13 @@
worker_app: synapse.app.pusher
worker_name: pusher
# The replication listener on the synapse to talk to.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_log_config: /etc/matrix-synapse/workers/pusher_log_config.yaml
worker_listeners:
- type: metrics
bind_addresses: ['0.0.0.0']
port: 9111

View File

@ -0,0 +1,42 @@
version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
filters:
context:
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: precise
filename: /var/log/matrix-synapse/worker_pusher.log
maxBytes: 104857600
backupCount: 5
filters: [context]
encoding: utf8
console:
class: logging.StreamHandler
formatter: precise
filters: [context]
loggers:
synapse:
level: WARN
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: WARN
root:
level: WARN
handlers: [file, console]
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
disable_existing_loggers: false

View File

@ -0,0 +1,11 @@
homeserver_url: http://localhost:8008
listener: localhost:8083
synchrotrons:
- address: localhost:8510
pid_file: /etc/matrix-synapse/workers/generic_worker1.pid
- address: localhost:8511
pid_file: /etc/matrix-synapse/workers/generic_worker2.pid
- address: localhost:8512
pid_file: /etc/matrix-synapse/workers/generic_worker3.pid
balancer:
interval: 2

View File

@ -0,0 +1,272 @@
# Generic Worker
## Sync requests
location ~* ^/_matrix/client/(v2_alpha|r0)/sync$ {
proxy_pass http://localhost:8083;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|v2_alpha|r0)/events$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0)/initialSync$ {
proxy_pass http://localhost:8083;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$ {
proxy_pass http://localhost:8083;
include include.d/synapse-proxy.conf;
}
## Federation requests
location ~* ^/_matrix/federation/v1/event/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/state/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/state_ids/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/backfill/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/get_missing_events/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/publicRooms {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/query/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/make_join/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/make_leave/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/send_join/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v2/send_join/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/send_leave/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v2/send_leave/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/invite/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v2/invite/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/query_auth/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/event_auth/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/exchange_third_party_invite/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/user/devices/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/federation/v1/get_groups_publicised$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/key/v2/query {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
## Inbound federation transaction request
location ~* ^/_matrix/federation/v1/send/ {
proxy_pass http://generic_worker_ih;
include include.d/synapse-proxy.conf;
}
## Client API requests
location ~* ^/_matrix/client/(api/v1|r0|unstable)/publicRooms$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/account/3pid$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/devices$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/keys/query$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/versions$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/event/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/joined_rooms$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/search$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
## Registration/login requests
location ~* ^/_matrix/client/(api/v1|r0|unstable)/login$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(r0|unstable)/register$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(r0|unstable)/auth/.*/fallback/web$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
## Event sending requests
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/join/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}
location ~* ^/_matrix/client/(api/v1|r0|unstable)/profile/ {
proxy_pass http://generic_worker_lc;
include include.d/synapse-proxy.conf;
}

View File

@ -0,0 +1,14 @@
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
port_in_redirect off;
proxy_redirect off;
proxy_connect_timeout 360;
proxy_read_timeout 600;
proxy_send_timeout 600;
#proxy_buffering off;
proxy_buffers 8 16k;
proxy_buffer_size 32k;

View File

@ -0,0 +1,19 @@
location ^~ /_synapse/admin/v1/users/@creme:envs.net/admin {
proxy_pass http://localhost:8008;
include include.d/synapse-proxy.conf;
}
location ^~ /_synapse/admin/v1/whois/@creme:envs.net {
proxy_pass http://localhost:8008;
include include.d/synapse-proxy.conf;
}
location ^~ /_synapse/admin/v1/deactivate {
proxy_pass http://localhost:8008;
include include.d/synapse-proxy.conf;
}
location ^~ /_synapse/admin {
allow 127.0.0.1;
deny all;
proxy_pass http://localhost:8008;
include include.d/synapse-proxy.conf;
}

View File

@ -0,0 +1,141 @@
server {
listen 80 default_server;
#listen [::]:80;
server_name matrix.envs.net;
location / {
return 307 https://$host$request_uri;
}
location /.well-known/acme-challenge/ {
alias /var/lib/letsencrypt/.well-known/acme-challenge/;
}
}
server {
listen 80;
#listen [::]:80;
server_name turn.envs.net;
location / {
return 307 https://matrix.envs.net$request_uri;
}
location /.well-known/acme-challenge/ {
alias /var/lib/letsencrypt/.well-known/acme-challenge/;
}
}
map $http_origin $DO_CORS {
# indicates all map values are hostnames and should be parsed as such
hostnames;
# default value
default 'true';
# blocked domains
renaissance.eu.org 'false';
element.renaissance.eu.org 'false';
}
upstream generic_worker_rr {
server localhost:8510;
server localhost:8511;
server localhost:8512;
}
upstream generic_worker_ih {
ip_hash;
server localhost:8510;
server localhost:8511;
server localhost:8512;
}
upstream generic_worker_lc {
least_conn;
server localhost:8510;
server localhost:8511;
server localhost:8512;
}
server {
listen 443 ssl http2 default_server;
#listen [::]:443 ssl;
server_name matrix.envs.net;
include snippets/ssl.conf;
## well-known
location /.well-known/matrix/support {
add_header Access-Control-Allow-Origin "$DO_CORS";
add_header Content-Type application/json;
return 200 '{"admins": [{"matrix_id": "@creme:envs.net", "email_address": "hostmaster@envs.net", "role": "admin"}]}';
}
location /.well-known/matrix/ {
add_header Access-Control-Allow-Origin "$DO_CORS";
add_header Content-Type application/json;
return 200 '{"m.server": "matrix.envs.net:443", "m.homeserver": {"base_url": "https://matrix.envs.net"}, "m.integrations": {"managers": [{"ui_url": "https://dimension.envs.net/riot", "api_url": "https://dimension.envs.net/api/v1/scalar"}, {"ui_url": "https://scalar.vector.im/", "api_url": "https://scalar.vector.im/api"}]}, "m.integrations_widget": {"url": "https://dimension.envs.net/riot", "data": {"api_url": "https://dimension.envs.net/api/v1/scalar"}}}';
}
## workers
include include.d/generic_worker.conf;
##
location ~* ^(\/_matrix|\/_synapse\/client) {
proxy_pass http://localhost:8008;
include include.d/synapse-proxy.conf;
}
# /synapse/admin
include include.d/synapse_admin.conf;
## media-repo
location ^~ /_matrix/media {
proxy_pass http://localhost:8000;
client_max_body_size 100M;
proxy_set_header Host envs.net;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_read_timeout 3600;
}
## appservice-irc
location ^~ /_matrix/provision {
allow 127.0.0.1;
deny all;
proxy_pass http://localhost:9999/_matrix/provision;
include include.d/synapse-proxy.conf;
}
## MAUBOT
location ^~ /_matrix/maubot {
proxy_pass http://localhost:29316/_matrix/maubot;
include include.d/synapse-proxy.conf;
}
# log
location ^~ /_matrix/maubot/v1/logs {
proxy_pass http://localhost:29316/_matrix/maubot/v1/logs;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
include include.d/synapse-proxy.conf;
}
##
location / {
proxy_pass http://localhost:8008;
include include.d/synapse-proxy.conf;
}
}
server {
listen 8448 ssl http2;
server_name matrix.envs.net;
include snippets/ssl.conf;
location / {
proxy_pass http://localhost:8008;
include include.d/synapse-proxy.conf;
}
}

View File

@ -0,0 +1,690 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
data_directory = '/var/lib/postgresql/11/main' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/11/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/11/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/11-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '127.0.0.1' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 5432 # (change requires restart)
max_connections = 200 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP Keepalives -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
# - Authentication -
#authentication_timeout = 1min # 1s-600s
password_encryption = 'scram-sha-256' # md5 or scram-sha-256
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 6GB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
work_mem = 15728kB # min 64kB
maintenance_work_mem = 1536MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# use none to disable dynamic shared memory
# (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kB, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 25
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
effective_io_concurrency = 200 # 1-1000; 0 disables prefetching
#max_worker_processes = 4 # (change requires restart)
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
#parallel_leader_participation = on
#max_parallel_workers = 4 # maximum number of max_worker_processes that
# can be used in parallel operations
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#backend_flush_after = 0 # measured in pages, 0 disables
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
synchronous_commit = off # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_compression = off # enable compression of full-page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
wal_buffers = 16MB # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
max_wal_size = 4GB
min_wal_size = 1GB
checkpoint_completion_target = 0.8 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the master and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Master Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a master server.
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_parallel_hash = on
#enable_partition_pruning = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
random_page_cost = 1.1 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
effective_cache_size = 14GB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#force_parallel_mode = off
#jit = off # allow JIT compilation
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (win32):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '11/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Query and Index Statistics Collector -
#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
stats_temp_directory = '/var/run/postgresql/11-main.pg_stat_tmp'
# - Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
# before index cleanup, 0 always performs
# index cleanup
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_fuzzy_search_limit = 0
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 3
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.UTF-8' # locale for system error message
# strings
lc_monetary = 'en_US.UTF-8' # locale for monetary formatting
lc_numeric = 'en_US.UTF-8' # locale for number formatting
lc_time = 'en_US.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#shared_preload_libraries = '' # (change requires restart)
#local_preload_libraries = ''
#session_preload_libraries = ''
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#lo_compat_privileges = off
#operator_precedence_warning = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf.
include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '' # include file only if it exists
#include = '' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

View File

@ -0,0 +1,13 @@
[Unit]
Description=matrix-media-repo
After=network.target postgresql@11-main.service matrix-synapse.service
[Service]
Type=simple
User=matrix-media
WorkingDirectory=/opt/matrix-media-repo
ExecStart=/opt/matrix-media-repo/media_repo --config /etc/matrix-media-repo/media-repo.yaml
Restart=always
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,27 @@
[Unit]
Description=Synapse %i
AssertPathExists=/etc/matrix-synapse/workers/%i.yaml
# This service should be restarted when the synapse target is restarted.
PartOf=matrix-synapse.target
ReloadPropagatedFrom=matrix-synapse.target
# if this is started at the same time as the main, let the main process start
# first, to initialise the database schema.
After=matrix-synapse.service
[Service]
Type=notify
NotifyAccess=main
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=/etc/default/matrix-synapse
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.generic_worker --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml
ExecStartPost=/bin/sh -c "echo $MAINPID > /etc/matrix-synapse/workers/%i.pid"
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
RestartSec=3
SyslogIdentifier=matrix-synapse-%i
[Install]
WantedBy=matrix-synapse.target

View File

@ -0,0 +1,22 @@
[Unit]
Description=Synapse master
# This service should be restarted when the synapse target is restarted.
PartOf=matrix-synapse.target
ReloadPropagatedFrom=matrix-synapse.target
[Service]
Type=notify
NotifyAccess=main
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=/etc/default/matrix-synapse
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
RestartSec=3
SyslogIdentifier=matrix-synapse
[Install]
WantedBy=matrix-synapse.target

View File

@ -0,0 +1,8 @@
[Unit]
Description=Synapse parent target
After=network.target
Requires=postgresql@11-main.service
Requires=redis-server.service
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,13 @@
[Unit]
Description=matrix-synchrotron-balancer
After=network.target matrix-synapse.service
[Service]
Type=simple
User=synchrotron
WorkingDirectory=/opt/matrix-synchrotron-balancer
ExecStart=/opt/matrix-synchrotron-balancer/matrix-synchrotron-balancer
Restart=always
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,58 @@
#!/usr/bin/env bash
#
# you dont need to stop synapse.
#
DOMAIN=''
TOKEN=''
file_rooms='/tmp/matrix_rooms.txt'
file_obsolet_rooms='/tmp/matrix_obsolet_rooms.txt'
print_usage() {
printf '%s\n\n' "${0##*/}"
printf 'usage:\n'
printf ' -h print this help\n'
printf ' -d domain\n'
printf ' -t token\n'
printf 'example:\n'
printf ' %s -d "domain.com" -t "token"\n' "${0##*/}"
exit 1
}
while getopts ":d:t:h" opt "${@}"; do
case $opt in
d) DOMAIN="$OPTARG" ;;
t) TOKEN="$OPTARG" ;;
\?) printf 'Invalid option: -%s\n\n' "$OPTARG" ; print_usage ;;
h|*) print_usage ;;
esac
done
if [ -z "$DOMAIN" ] || [ "$DOMAIN" == '-t' ]; then print_usage; fi
if [ -z "$TOKEN" ] || [ "$TOKEN" == '-d' ]; then print_usage; fi
curl -s -X GET -H "Authorization: Bearer $TOKEN" "https://$DOMAIN/_synapse/admin/v1/rooms?limit=10000" | \
jq -M '.rooms[] | .room_id' > $file_rooms
sed -i 's/"//g' $file_rooms
while IFS= read -r room_id
do
curl -s -X GET -H "Authorization: Bearer $TOKEN" "https://$DOMAIN/_synapse/admin/v1/rooms/$room_id/state" | \
jq -M '.state[] | select(.type == "m.room.tombstone") | .room_id' >> $file_obsolet_rooms
done < $file_rooms
sed -i 's/"//g' $file_obsolet_rooms
printf '\nobsolet rooms:\n'
cat $file_obsolet_rooms
#while IFS= read -r room_id
#do
# printf 'remove room %s\n' "$room_id"
# curl -s -X POST -H "Authorization: Bearer $TOKEN" "https://$DOMAIN/_synapse/admin/v1/rooms/$room_id/delete" \
# -d "{ \"new_room_user_id\": \"@notices:$DOMAIN\", \"room_name\": \"Content Notification\", \"message\": \"remove obsolet room $room_id.\", \"block\": false, \"purge\": true, \"force-purge\": true }"
# printf 'done.\n'
#done < $file_obsolet_rooms
rm $file_rooms $file_obsolet_rooms
#
exit 0

View File

@ -0,0 +1,47 @@
#!/usr/bin/env bash
#
# you dont need to stop synapse.
#
DOMAIN=''
TOKEN=''
tmp_file='/tmp/matrix_rooms_to_purge.txt'
print_usage() {
printf '%s\n\n' "${0##*/}"
printf 'usage:\n'
printf ' -h print this help\n'
printf ' -d domain\n'
printf ' -t token\n'
printf 'example:\n'
printf ' %s -d "domain.com" -t "token"\n' "${0##*/}"
exit 1
}
while getopts ":d:t:h" opt "${@}"; do
case $opt in
d) DOMAIN="$OPTARG" ;;
t) TOKEN="$OPTARG" ;;
\?) printf 'Invalid option: -%s\n\n' "$OPTARG" ; print_usage ;;
h|*) print_usage ;;
esac
done
if [ -z "$DOMAIN" ] || [ "$DOMAIN" == '-t' ]; then print_usage; fi
if [ -z "$TOKEN" ] || [ "$TOKEN" == '-d' ]; then print_usage; fi
curl -s -X GET -H "Authorization: Bearer $TOKEN" "https://$DOMAIN/_synapse/admin/v1/rooms?limit=10000" | \
jq -M '.rooms[] | select(.joined_local_members == 0) | .room_id' > $tmp_file
sed -i 's/"//g' $tmp_file
while IFS= read -r room_id
do
printf 'remove room %s ' "$room_id"
curl -s -X POST -H "Authorization: Bearer $TOKEN" "https://$DOMAIN/_synapse/admin/v1/purge_room" \
-H "Content-Type: application/json" -d "{ \"room_id\": \"$room_id\" }"
printf ' done.\n'
done < $tmp_file
rm $tmp_file
#
exit 0

View File

@ -0,0 +1,76 @@
#!/bin/sh
#
# you dont need to stop synapse.
#
# from: https://jo-so.de/2018-03/Matrix.html#state_groups_stateaufrumen
#
# this script used:
# https://github.com/matrix-org/rust-synapse-compress-state
#
## start task via:
# $ sudo systemd-run --nice=4 -pCPUSchedulingPolicy=batch \
# -pIOSchedulingClass=idle --uid=matrix-synapse --collect \
# --unit=synapse-clear-state ~/bin/synapse-clear-state
#
## watch task-log via:
# $ journalctl -Stoday -u synapse-clear-state -f
#
set -e -u
username='matrix'
db='matrix'
conn_str="host=localhost user=$username dbname=$db application_name=state_compress"
###
export RUST_BACKTRACE=short LC_ALL=C.UTF-8
if which time >/dev/null
then
export TIME='%E elapsed %M rss'
time=time
else
time=
fi
cd "$(mktemp -d)"
echo "Writing SQL files to $PWD"
if [ $# -eq 0 ]
then
set -- $(psql -X -t -A -U "$username" -c 'SELECT room_id FROM state_groups
GROUP BY room_id ORDER BY count(*)' $db)
fi
echo 'Disabling autovacuum on state_groups_state'
psql -X -q -b -U "$username" -c 'alter table state_groups_state set (autovacuum_enabled = false);' $db
no=0
for room in $*
do
echo '--------------------'
echo
sql_file=state-compress-$((no += 1)).sql
echo "Writing SQL commands to $sql_file"
$time synapse-compress-state -t -p "$conn_str" -o "$sql_file" -r "$room" -m 1000
if test -s "$sql_file"
then
$time psql -X -q -b -U "$username" -c '\set ON_ERROR_STOP on' -f "$sql_file" $db
else
rm "$sql_file"
fi
done
echo 'Enabling autovacuum on state_groups_state'
psql -X -q -b -U "$username" -c 'alter table state_groups_state set (autovacuum_enabled = true);' $db
echo 'Running VACUUM and ANALYZE for state_groups_state ...'
$time psql -X -q -b -U "$username" -c 'VACUUM FULL ANALYZE state_groups_state' $db
echo "All SQL scripts are in $PWD"
exit 0

View File

@ -0,0 +1,17 @@
#!/usr/bin/env bash
[ "$(id -u)" -ne 0 ] && printf 'Please run as root!\n' && exit 1
ver="$1"
[ -z "$ver" ] && printf 'use: %s <version>\n' "${0##*/}" && exit 1
systemctl stop matrix-media.service
wget -O /opt/matrix-media-repo/media_repo-linux-x64 https://github.com/turt2live/matrix-media-repo/releases/download/v"$ver"/media_repo-linux-x64
chmod +x /opt/matrix-media-repo/media_repo-linux-x64
wget -O /opt/matrix-media-repo/config.sample.yaml https://raw.githubusercontent.com/turt2live/matrix-media-repo/master/config.sample.yaml
systemctl start matrix-media.service
exit 0

45
usr/local/bin/update_riot.sh Executable file
View File

@ -0,0 +1,45 @@
#!/usr/bin/env bash
[[ ! -d /opt/Riot/resources ]] && mkdir -p /opt/Riot/resources
cd /opt/Riot/resources
old_version="v$(cat webapp/version)"
version="latest"
if [[ ! -z "$1" ]]; then
version="tags/$1"
fi
echo "Fetching latest release info from GitHub"
version_info=$(curl -s "https://api.github.com/repositories/39487546/releases/$version")
new_version=$(echo "$version_info" | jq -r '.name')
URL=$(echo "$version_info" | jq -r '.assets[0].browser_download_url')
if [[ "$new_version" == "$old_version" ]]; then
echo "No updates found"
exit
fi
# Remove previous backup riot
rm -rf riot.bak
# Create temp directory for new riot
mkdir riot.new
cd riot.new
echo "Downloading Riot $new_version"
curl -L "$URL" -o riot-tmp.tar.gz
echo "Unpacking archive"
tar -xzf riot-tmp.tar.gz --strip-components=1
rm -f riot-tmp.tar.gz
echo "Replacing files"
cd ..
cp -f webapp/config.json riot.new/config.json
# Back up old version and activate new version
mv webapp webapp.bak
mv riot.new webapp
echo "Updated to Riot from $old_version to $new_version"
exit 0