mirror of
https://github.com/glitch-soc/mastodon.git
synced 2024-11-24 00:54:02 -05:00
1165943968
* Mark job pods not to use Istio's envoy sidecar Istio injects sidecars into pods to implement mTLS between pods. Jobs usually don't know about this, so they don't signal the Envoy process to stop when the job finishes. Since at least one process is running in the pod, Kubernetes doesn't consider the job to be completed, so it lingers. By adding the `sidecar.istio.io/inject` annotation set to `"false"`, we let Istio know that it should not inject the sidecar. If Istio is not installed, then this has no impact. * Support arbitrary job annotations in the Helm chart Rather than focus on Istio, this allows arbitrary annotations for job pods. * Add in-line documentation for pod/job annotations
316 lines
9.3 KiB
YAML
316 lines
9.3 KiB
YAML
replicaCount: 1
|
|
|
|
image:
|
|
repository: tootsuite/mastodon
|
|
# https://hub.docker.com/r/tootsuite/mastodon/tags
|
|
#
|
|
# alternatively, use `latest` for the latest release or `edge` for the image
|
|
# built from the most recent commit
|
|
#
|
|
# tag: latest
|
|
tag: v3.5.2
|
|
# use `Always` when using `latest` tag
|
|
pullPolicy: IfNotPresent
|
|
|
|
mastodon:
|
|
# create an initial administrator user; the password is autogenerated and will
|
|
# have to be reset
|
|
createAdmin:
|
|
enabled: false
|
|
username: not_gargron
|
|
email: not@example.com
|
|
cron:
|
|
# run `tootctl media remove` every week
|
|
removeMedia:
|
|
enabled: true
|
|
schedule: "0 0 * * 0"
|
|
# available locales: https://github.com/tootsuite/mastodon/blob/master/config/application.rb#L43
|
|
locale: en
|
|
local_domain: mastodon.local
|
|
# Use of WEB_DOMAIN requires careful consideration: https://docs.joinmastodon.org/admin/config/#federation
|
|
# You must redirect the path LOCAL_DOMAIN/.well-known/ to WEB_DOMAIN/.well-known/ as described
|
|
# web_domain: mastodon.example.com
|
|
persistence:
|
|
assets:
|
|
# ReadWriteOnce is more widely supported than ReadWriteMany, but limits
|
|
# scalability, since it requires the Rails and Sidekiq pods to run on the
|
|
# same node.
|
|
accessMode: ReadWriteOnce
|
|
resources:
|
|
requests:
|
|
storage: 10Gi
|
|
system:
|
|
accessMode: ReadWriteOnce
|
|
resources:
|
|
requests:
|
|
storage: 100Gi
|
|
s3:
|
|
enabled: false
|
|
access_key: ""
|
|
access_secret: ""
|
|
# you can also specify the name of an existing Secret
|
|
# with keys AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
|
|
existingSecret: ""
|
|
bucket: ""
|
|
endpoint: https://us-east-1.linodeobjects.com
|
|
hostname: us-east-1.linodeobjects.com
|
|
region: ""
|
|
# If you have a caching proxy, enter its base URL here.
|
|
alias_host: ""
|
|
# these must be set manually; autogenerated keys are rotated on each upgrade
|
|
secrets:
|
|
secret_key_base: ""
|
|
otp_secret: ""
|
|
vapid:
|
|
private_key: ""
|
|
public_key: ""
|
|
# you can also specify the name of an existing Secret
|
|
# with keys SECRET_KEY_BASE and OTP_SECRET and
|
|
# VAPID_PRIVATE_KEY and VAPID_PUBLIC_KEY
|
|
existingSecret: ""
|
|
sidekiq:
|
|
concurrency: 25
|
|
smtp:
|
|
auth_method: plain
|
|
ca_file: /etc/ssl/certs/ca-certificates.crt
|
|
delivery_method: smtp
|
|
domain:
|
|
enable_starttls_auto: true
|
|
from_address: notifications@example.com
|
|
openssl_verify_mode: peer
|
|
port: 587
|
|
reply_to:
|
|
server: smtp.mailgun.org
|
|
tls: false
|
|
login:
|
|
password:
|
|
# you can also specify the name of an existing Secret
|
|
# with the keys login and password
|
|
existingSecret:
|
|
streaming:
|
|
port: 4000
|
|
# this should be set manually since os.cpus() returns the number of CPUs on
|
|
# the node running the pod, which is unrelated to the resources allocated to
|
|
# the pod by k8s
|
|
workers: 1
|
|
# The base url for streaming can be set if the streaming API is deployed to
|
|
# a different domain/subdomain.
|
|
# base_url: wws://streaming.example.com
|
|
web:
|
|
port: 3000
|
|
|
|
ingress:
|
|
enabled: true
|
|
annotations:
|
|
kubernetes.io/ingress.class: nginx
|
|
kubernetes.io/tls-acme: "true"
|
|
# cert-manager.io/cluster-issuer: "letsencrypt"
|
|
#
|
|
# ensure that NGINX's upload size matches Mastodon's
|
|
# for the K8s ingress controller:
|
|
# nginx.ingress.kubernetes.io/proxy-body-size: 40m
|
|
# for the NGINX ingress controller:
|
|
# nginx.org/client-max-body-size: 40m
|
|
hosts:
|
|
- host: mastodon.local
|
|
paths:
|
|
- path: '/'
|
|
tls:
|
|
- secretName: mastodon-tls
|
|
hosts:
|
|
- mastodon.local
|
|
|
|
# https://github.com/bitnami/charts/tree/master/bitnami/elasticsearch#parameters
|
|
elasticsearch:
|
|
# `false` will disable full-text search
|
|
#
|
|
# if you enable ES after the initial install, you will need to manually run
|
|
# RAILS_ENV=production bundle exec rake chewy:sync
|
|
# (https://docs.joinmastodon.org/admin/optional/elasticsearch/)
|
|
enabled: true
|
|
image:
|
|
tag: 7
|
|
|
|
# https://github.com/bitnami/charts/tree/master/bitnami/postgresql#parameters
|
|
postgresql:
|
|
# disable if you want to use an existing db; in which case the values below
|
|
# must match those of that external postgres instance
|
|
enabled: true
|
|
# postgresqlHostname: preexisting-postgresql
|
|
auth:
|
|
database: mastodon_production
|
|
username: postgres
|
|
# you must set a password; the password generated by the postgresql chart will
|
|
# be rotated on each upgrade:
|
|
# https://github.com/bitnami/charts/tree/master/bitnami/postgresql#upgrade
|
|
password: ""
|
|
# you can also specify the name of an existing Secret
|
|
# with a key of postgres-password set to the password you want
|
|
existingSecret: ""
|
|
|
|
# https://github.com/bitnami/charts/tree/master/bitnami/redis#parameters
|
|
redis:
|
|
# you must set a password; the password generated by the redis chart will be
|
|
# rotated on each upgrade:
|
|
password: ""
|
|
# you can also specify the name of an existing Secret
|
|
# with a key of redis-password set to the password you want
|
|
# auth:
|
|
# existingSecret: ""
|
|
|
|
service:
|
|
type: ClusterIP
|
|
port: 80
|
|
|
|
externalAuth:
|
|
oidc:
|
|
# OpenID Connect support is proposed in PR #16221 and awaiting merge.
|
|
enabled: false
|
|
# display_name: "example-label"
|
|
# issuer: https://login.example.space/auth/realms/example-space
|
|
# discovery: true
|
|
# scope: "openid,profile"
|
|
# uid_field: uid
|
|
# client_id: mastodon
|
|
# client_secret: SECRETKEY
|
|
# redirect_uri: https://example.com/auth/auth/openid_connect/callback
|
|
# assume_email_is_verified: true
|
|
# client_auth_method:
|
|
# response_type:
|
|
# response_mode:
|
|
# display:
|
|
# prompt:
|
|
# send_nonce:
|
|
# send_scope_to_token_endpoint:
|
|
# idp_logout_redirect_uri:
|
|
# http_scheme:
|
|
# host:
|
|
# port:
|
|
# jwks_uri:
|
|
# auth_endpoint:
|
|
# token_endpoint:
|
|
# user_info_endpoint:
|
|
# end_session_endpoint:
|
|
saml:
|
|
enabled: false
|
|
# acs_url: http://mastodon.example.com/auth/auth/saml/callback
|
|
# issuer: mastodon
|
|
# idp_sso_target_url: https://login.example.com/auth/realms/example/protocol/saml
|
|
# idp_cert: '-----BEGIN CERTIFICATE-----[your_cert_content]-----END CERTIFICATE-----'
|
|
# idp_cert_fingerprint:
|
|
# name_identifier_format: urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified
|
|
# cert:
|
|
# private_key:
|
|
# want_assertion_signed: true
|
|
# want_assertion_encrypted: true
|
|
# assume_email_is_verified: true
|
|
# uid_attribute: "urn:oid:0.9.2342.19200300.100.1.1"
|
|
# attributes_statements:
|
|
# uid: "urn:oid:0.9.2342.19200300.100.1.1"
|
|
# email: "urn:oid:1.3.6.1.4.1.5923.1.1.1.6"
|
|
# full_name: "urn:oid:2.16.840.1.113730.3.1.241"
|
|
# first_name: "urn:oid:2.5.4.42"
|
|
# last_name: "urn:oid:2.5.4.4"
|
|
# verified:
|
|
# verified_email:
|
|
oauth_global:
|
|
# Force redirect local login to CAS. Does not function with SAML or LDAP.
|
|
oauth_redirect_at_sign_in: false
|
|
cas:
|
|
enabled: false
|
|
# url: https://sso.myserver.com
|
|
# host: sso.myserver.com
|
|
# port: 443
|
|
# ssl: true
|
|
# validate_url:
|
|
# callback_url:
|
|
# logout_url:
|
|
# login_url:
|
|
# uid_field: 'user'
|
|
# ca_path:
|
|
# disable_ssl_verification: false
|
|
# assume_email_is_verified: true
|
|
# keys:
|
|
# uid: 'user'
|
|
# name: 'name'
|
|
# email: 'email'
|
|
# nickname: 'nickname'
|
|
# first_name: 'firstname'
|
|
# last_name: 'lastname'
|
|
# location: 'location'
|
|
# image: 'image'
|
|
# phone: 'phone'
|
|
pam:
|
|
enabled: false
|
|
# email_domain: example.com
|
|
# default_service: rpam
|
|
# controlled_service: rpam
|
|
ldap:
|
|
enabled: false
|
|
# host: myservice.namespace.svc
|
|
# port: 389
|
|
# method: simple_tls
|
|
# base:
|
|
# bind_on:
|
|
# password:
|
|
# uid: cn
|
|
# mail: mail
|
|
# search_filter: "(|(%{uid}=%{email})(%{mail}=%{email}))"
|
|
# uid_conversion:
|
|
# enabled: true
|
|
# search: "., -"
|
|
# replace: _
|
|
|
|
# https://github.com/tootsuite/mastodon/blob/master/Dockerfile#L88
|
|
#
|
|
# if you manually change the UID/GID environment variables, ensure these values
|
|
# match:
|
|
podSecurityContext:
|
|
runAsUser: 991
|
|
runAsGroup: 991
|
|
fsGroup: 991
|
|
|
|
securityContext: {}
|
|
|
|
serviceAccount:
|
|
# Specifies whether a service account should be created
|
|
create: true
|
|
# Annotations to add to the service account
|
|
annotations: {}
|
|
# The name of the service account to use.
|
|
# If not set and create is true, a name is generated using the fullname template
|
|
name: ""
|
|
|
|
# Kubernetes manages pods for jobs and pods for deployments differently, so you might
|
|
# need to apply different annotations to the two different sets of pods. The annotations
|
|
# set with podAnnotations will be added to all deployment-managed pods.
|
|
podAnnotations: {}
|
|
|
|
# The annotations set with jobAnnotations will be added to all job pods.
|
|
jobAnnotations: {}
|
|
|
|
resources: {}
|
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
# choice for the user. This also increases chances charts run on environments with little
|
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
# limits:
|
|
# cpu: 100m
|
|
# memory: 128Mi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 128Mi
|
|
|
|
autoscaling:
|
|
enabled: false
|
|
minReplicas: 1
|
|
maxReplicas: 100
|
|
targetCPUUtilizationPercentage: 80
|
|
# targetMemoryUtilizationPercentage: 80
|
|
|
|
nodeSelector: {}
|
|
|
|
tolerations: []
|
|
|
|
affinity: {}
|