From ac3b5b301569e803faa469d0b7a5fccafb577f59 Mon Sep 17 00:00:00 2001 From: Dohyun Lim Date: Tue, 3 Feb 2026 17:34:25 +0900 Subject: [PATCH] first commit --- README.md | 9 + compose/.env | 11 + compose/docker-compose.yml | 94 + compose/mysql/.gitkeep | 0 compose/pgdata/.gitkeep | 0 compose/redis/conf/redis.conf | 2277 +++++++++++++++++ compose/redis/data/.gitkeep | 0 compose/set_mysql_permission.sh | 102 + compose/ssl/certs/.gitkeep | 1 + compose/ssl/letsencrypt/.gitkeep | 1 + config/app-server/gunicorn_uvicorn.conf.py | 256 ++ config/database/mysql/init.sql | 5 + config/database/mysql/my.cnf | 546 ++++ config/database/postgresql/init.sql | 4 + config/database/postgresql/pg_hba.conf | 128 + config/database/postgresql/postgresql.conf | 766 ++++++ config/web-server/conf.d/LICENSE | 21 + config/web-server/conf.d/backup/backend.conf | 104 + config/web-server/conf.d/bad_bot.conf | 563 ++++ .../conf.d/castad_gunicorn_https_ng.conf | 245 ++ config/web-server/nginx_conf.sh | 71 + config/web-server/nginx_conf/nginx.conf | 126 + config/web-server/nginx_https_conf.sh | 71 + config/web-server/proxy_params/proxy_params | 14 + config/web-server/sample_nginx.conf | 73 + config/web-server/sample_nginx_https.conf | 232 ++ docker/gunicorn/Dockerfile | 75 + docker/nginx/Dockerfile | 45 + log/mysql/.gitkeep | 0 log/nginx/.gitkeep | 0 log/postgresql/.gitkeep | 0 log/uvicorn/celery/.gitkeep | 0 log/uvicorn/celerybeat/.gitkeep | 0 script/crontab_gunicorn_set.sh | 3 + script/letsencrypt.sh | 91 + script/logrotate/nginx/nginx | 21 + .../logrotate/uvicorn/celery/uvicorn-celery | 14 + .../uvicorn/celerybeat/uvicorn-celerybeat | 13 + script/logrotate/uvicorn/uvicorn | 14 + www/.gitkeep | 0 40 files changed, 5996 insertions(+) create mode 100644 README.md create mode 100644 compose/.env create mode 100644 compose/docker-compose.yml create mode 100644 compose/mysql/.gitkeep create mode 100644 compose/pgdata/.gitkeep create mode 100644 compose/redis/conf/redis.conf create mode 100644 compose/redis/data/.gitkeep create mode 100755 compose/set_mysql_permission.sh create mode 100644 compose/ssl/certs/.gitkeep create mode 100644 compose/ssl/letsencrypt/.gitkeep create mode 100644 config/app-server/gunicorn_uvicorn.conf.py create mode 100644 config/database/mysql/init.sql create mode 100644 config/database/mysql/my.cnf create mode 100644 config/database/postgresql/init.sql create mode 100644 config/database/postgresql/pg_hba.conf create mode 100644 config/database/postgresql/postgresql.conf create mode 100644 config/web-server/conf.d/LICENSE create mode 100644 config/web-server/conf.d/backup/backend.conf create mode 100644 config/web-server/conf.d/bad_bot.conf create mode 100644 config/web-server/conf.d/castad_gunicorn_https_ng.conf create mode 100755 config/web-server/nginx_conf.sh create mode 100644 config/web-server/nginx_conf/nginx.conf create mode 100755 config/web-server/nginx_https_conf.sh create mode 100644 config/web-server/proxy_params/proxy_params create mode 100644 config/web-server/sample_nginx.conf create mode 100644 config/web-server/sample_nginx_https.conf create mode 100644 docker/gunicorn/Dockerfile create mode 100644 docker/nginx/Dockerfile create mode 100644 log/mysql/.gitkeep create mode 100644 log/nginx/.gitkeep create mode 100644 log/postgresql/.gitkeep create mode 100644 log/uvicorn/celery/.gitkeep create mode 100644 log/uvicorn/celerybeat/.gitkeep create mode 100755 script/crontab_gunicorn_set.sh create mode 100755 script/letsencrypt.sh create mode 100644 script/logrotate/nginx/nginx create mode 100644 script/logrotate/uvicorn/celery/uvicorn-celery create mode 100644 script/logrotate/uvicorn/celerybeat/uvicorn-celerybeat create mode 100644 script/logrotate/uvicorn/uvicorn create mode 100644 www/.gitkeep diff --git a/README.md b/README.md new file mode 100644 index 0000000..351cd26 --- /dev/null +++ b/README.md @@ -0,0 +1,9 @@ +# aio2o-infrakit + +aio2o의 분산된 인프라 서비스 및 설정을 단일 프로젝트로 통합하여 구축한 범용 인프라 솔루션입니다. + +## 기반 오픈소스 + +이 프로젝트는 임도현 책임연구원의 [devspoon-web](https://github.com/devspoons/devspoon-web) 오픈소스를 기반으로 개발되었습니다. + +# \ No newline at end of file diff --git a/compose/.env b/compose/.env new file mode 100644 index 0000000..eb3fc39 --- /dev/null +++ b/compose/.env @@ -0,0 +1,11 @@ +PROJECT_DIR=o2o-castad-backend +LOG_DRIVER=json-file +LOG_OPT_MAXF=5 +LOG_OPT_MAXS=100m +CELERY_BROKER_URL=redis://redis:6379/3 +FLOWER_ID=admin +FLOWER_PWD=admin +POSTGRES_DB=ado3_dev +POSTGRES_USER=ado3_dev_admin +POSTGRES_PASSWORD=ado31324 +MYSQL_PASSWORD=ado31324 \ No newline at end of file diff --git a/compose/docker-compose.yml b/compose/docker-compose.yml new file mode 100644 index 0000000..ba35659 --- /dev/null +++ b/compose/docker-compose.yml @@ -0,0 +1,94 @@ +services: + webserver: + build: ../docker/nginx/ + logging: + driver: "${LOG_DRIVER}" + options: + max-file: "${LOG_OPT_MAXF}" + max-size: "${LOG_OPT_MAXS}" + working_dir: /application + container_name: nginx-uvicorn-webserver + volumes: + - ../www:/www + - ../script/:/script/ + - ../config/web-server/conf.d/:/etc/nginx/conf.d/ + - ../config/web-server/nginx_conf/nginx.conf:/etc/nginx/nginx.conf + - ../config/web-server/proxy_params:/etc/nginx/proxy_params + - ./ssl/certs/:/etc/ssl/certs/ + - ./ssl/letsencrypt/:/etc/letsencrypt/ + - ../log/:/log/ + - ../script/logrotate/nginx/nginx:/etc/logrotate.d/nginx + ports: + - 80:80 + - 443:443 + # - 8000:8000 + environment: + TZ: "Asia/Seoul" + restart: always + depends_on: + # - frontend-app + - uvicorn-app + + uvicorn-app: + build: ../docker/gunicorn/ + logging: + driver: "${LOG_DRIVER}" + options: + max-file: "${LOG_OPT_MAXF}" + max-size: "${LOG_OPT_MAXS}" + working_dir: /www/${PROJECT_DIR} + container_name: uvicorn-app + ports: + - 8000:8000 + volumes: + - ../www:/www + - ../log:/log + - ../config/app-server/:/uvicorn + - ../script/logrotate/uvicorn/uvicorn:/etc/logrotate.d/uvicorn + command: bash -c "uv pip install --system --no-cache . && playwright install-deps && playwright install && exec gunicorn -c /uvicorn/gunicorn_uvicorn.conf.py " + environment: + TZ: "Asia/Seoul" + restart: always + + redis: + image: redis:latest + logging: + driver: "${LOG_DRIVER}" + options: + max-file: "${LOG_OPT_MAXF}" + max-size: "${LOG_OPT_MAXS}" + container_name: redis_db + ports: + - 6379:6379 + volumes: + - ./redis/data:/data + - ./redis/conf/:/usr/local/etc/redis/ + labels: + - "name=redis" + - "mode=standalone" + environment: + TZ: "Asia/Seoul" + restart: always + command: redis-server /usr/local/etc/redis/redis.conf + + # mysql: + # image: percona/percona-server:latest + # logging: + # driver: "${LOG_DRIVER}" + # options: + # max-file: "${LOG_OPT_MAXF}" + # max-size: "${LOG_OPT_MAXS}" + # container_name: mysql + # environment: + # MYSQL_ROOT_PASSWORD: ${MYSQL_PASSWORD} + # TZ: Asia/Seoul + # ports: + # - "3306:3306" + # volumes: + # - ./mysql:/var/lib/mysql + # - ../config/database/mysql/my.cnf:/etc/my.cnf + # # - ../config/mysql/init.sql:/docker-entrypoint-initdb.d/init.sql:ro + # - ../log/mysql:/var/log/mysql + # restart: always + # profiles: + # - mysql diff --git a/compose/mysql/.gitkeep b/compose/mysql/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/compose/pgdata/.gitkeep b/compose/pgdata/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/compose/redis/conf/redis.conf b/compose/redis/conf/redis.conf new file mode 100644 index 0000000..6479191 --- /dev/null +++ b/compose/redis/conf/redis.conf @@ -0,0 +1,2277 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# Included paths may contain wildcards. All files matching the wildcards will +# be included in alphabetical order. +# Note that if an include path contains a wildcards but no files match it when +# the server is started, the include statement will be ignored and no error will +# be emitted. It is safe, therefore, to include wildcard files from empty +# directories. +# +# include /path/to/local.conf +# include /path/to/other.conf +# include /path/to/fragments/*.conf +# + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# Each address can be prefixed by "-", which means that redis will not fail to +# start if the address is not available. Being not available only refers to +# addresses that does not correspond to any network interface. Addresses that +# are already in use will always fail, and unsupported protocols will always BE +# silently skipped. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses +# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 +# bind * -::* # like the default, all available interfaces +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis +# will only be able to accept client connections from the same host that it is +# running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# COMMENT OUT THE FOLLOWING LINE. +# +# You will also need to set a password unless you explicitly disable protected +# mode. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#bind 127.0.0.1 -::1 +bind 0.0.0.0 + +# By default, outgoing connections (from replica to master, from Sentinel to +# instances, cluster bus, etc.) are not bound to a specific local address. In +# most cases, this means the operating system will handle that based on routing +# and the interface through which the connection goes out. +# +# Using bind-source-addr it is possible to configure a specific address to bind +# to, which may also affect how the connection gets routed. +# +# Example: +# +# bind-source-addr 10.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and the default user has no password, the server +# only accepts local connections from the IPv4 address (127.0.0.1), IPv6 address +# (::1) or Unix domain sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured. +protected-mode no + +# Redis uses default hardened security configuration directives to reduce the +# attack surface on innocent users. Therefore, several sensitive configuration +# directives are immutable, and some potentially-dangerous commands are blocked. +# +# Configuration directives that control files that Redis writes to (e.g., 'dir' +# and 'dbfilename') and that aren't usually modified during runtime +# are protected by making them immutable. +# +# Commands that can increase the attack surface of Redis and that aren't usually +# called by users are blocked by default. +# +# These can be exposed to either all connections or just local ones by setting +# each of the configs listed below to either of these values: +# +# no - Block for any connection (remain immutable) +# yes - Allow for any connection (no protection) +# local - Allow only for local connections. Ones originating from the +# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets. +# +# enable-protected-configs no +# enable-debug-command no +# enable-module-command no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /run/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +# Apply OS-specific mechanism to mark the listening socket with the specified +# ID, to support advanced routing and filtering capabilities. +# +# On Linux, the ID represents a connection mark. +# On FreeBSD, the ID represents a socket cookie ID. +# On OpenBSD, the ID represents a route table ID. +# +# The default value is 0, which implies no marking is required. +# socket-mark-id 0 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Normally Redis uses the same certificate for both server functions (accepting +# connections) and client functions (replicating from a master, establishing +# cluster bus connections, etc.). +# +# Sometimes certificates are issued with attributes that designate them as +# client-only or server-only certificates. In that case it may be desired to use +# different certificates for incoming (server) and outgoing (client) +# connections. To do that, use the following directives: +# +# tls-client-cert-file client.crt +# tls-client-key-file client.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-client-key-file-pass secret + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange, +# required by older versions of OpenSSL (<3.0). Newer versions do not require +# this configuration and recommend against it. +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# When Redis is supervised by upstart or systemd, this parameter has no impact. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# on startup, and updating Redis status on a regular +# basis. +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +# +# The default is "no". To run under upstart/systemd, you can simply uncomment +# the line below: +# +# supervised auto + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# +# Note that on modern Linux systems "/run/redis.pid" is more conforming +# and should be used instead. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# To disable the built in crash log, which will possibly produce cleaner core +# dumps when they are needed, uncomment the following: +# +# crash-log-enabled no + +# To disable the fast memory check that's run as part of the crash log, which +# will possibly let redis terminate sooner, uncomment the following: +# +# crash-memcheck-enabled no + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY and syslog logging is +# disabled. Basically this means that normally a logo is displayed only in +# interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo no + +# By default, Redis modifies the process title (as seen in 'top' and 'ps') to +# provide some runtime information. It is possible to disable this and leave +# the process name as executed by setting the following to no. +set-proc-title yes + +# When changing the process title, Redis uses the following template to construct +# the modified title. +# +# Template variables are specified in curly brackets. The following variables are +# supported: +# +# {title} Name of process as executed if parent, or type of child process. +# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or +# Unix socket if only that's available. +# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". +# {port} TCP port listening on, or 0. +# {tls-port} TLS port listening on, or 0. +# {unixsocket} Unix domain socket listening on, or "". +# {config-file} Name of configuration file used. +# +proc-title-template "{title} {listen-addr} {server-mode}" + +################################ SNAPSHOTTING ################################ + +# Save the DB to disk. +# +# save [ ...] +# +# Redis will save the DB if the given number of seconds elapsed and it +# surpassed the given number of write operations against the DB. +# +# Snapshotting can be completely disabled with a single empty string argument +# as in following example: +# +# save "" +# +# Unless specified otherwise, by default Redis will save the DB: +# * After 3600 seconds (an hour) if at least 1 change was performed +# * After 300 seconds (5 minutes) if at least 100 changes were performed +# * After 60 seconds if at least 10000 changes were performed +# +# You can set these explicitly by uncommenting the following line. +# +# save 3600 1 300 100 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# Enables or disables full sanitization checks for ziplist and listpack etc when +# loading an RDB or RESTORE payload. This reduces the chances of a assertion or +# crash later on while processing commands. +# Options: +# no - Never perform full sanitization +# yes - Always perform full sanitization +# clients - Perform full sanitization only for user connections. +# Excludes: RDB files, RESTORE commands received from the master +# connection, and client connections which have the +# skip-sanitize-payload ACL flag. +# The default should be 'clients' but since it currently affects cluster +# resharding via MIGRATE, it is temporarily set to 'no' by default. +# +# sanitize-dump-payload no + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with error +# "MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'" +# to all data access commands, excluding commands such as: +# INFO, REPLICAOF, AUTH, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync yes + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# When diskless replication is enabled with a delay, it is possible to let +# the replication start before the maximum delay is reached if the maximum +# number of replicas expected have connected. Default of 0 means that the +# maximum is not defined and Redis will wait the full delay. +repl-diskless-sync-max-replicas 0 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if you know what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and replica buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep current db contents in RAM while parsing the data directly +# from the socket. Replicas in this mode can keep serving current +# data set while replication is in progress, except for cases where +# they can't recognize master as having a data set from same +# replication history. +# Note that this requires sufficient memory, if you don't have it, +# you risk an OOM kill. +repl-diskless-load disabled + +# Master send PINGs to its replicas in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# The propagation error behavior controls how Redis will behave when it is +# unable to handle a command being processed in the replication stream from a master +# or processed while reading from an AOF file. Errors that occur during propagation +# are unexpected, and can cause data inconsistency. However, there are edge cases +# in earlier versions of Redis where it was possible for the server to replicate or persist +# commands that would fail on future versions. For this reason the default behavior +# is to ignore such errors and continue processing commands. +# +# If an application wants to ensure there is no data divergence, this configuration +# should be set to 'panic' instead. The value can also be set to 'panic-on-replicas' +# to only panic when a replica encounters an error on the replication stream. One of +# these two panic values will become the default value in the future once there are +# sufficient safety mechanisms in place to prevent false positive crashes. +# +# propagation-error-behavior ignore + +# Replica ignore disk write errors controls the behavior of a replica when it is +# unable to persist a write command received from its master to disk. By default, +# this configuration is set to 'no' and will crash the replica in this condition. +# It is not recommended to change this default, however in order to be compatible +# with older versions of Redis this config can be toggled to 'yes' which will just +# log a warning and execute the write command it got from the master. +# +# replica-ignore-disk-write-errors no + +# ----------------------------------------------------------------------------- +# By default, Redis Sentinel includes all replicas in its reports. A replica +# can be excluded from Redis Sentinel's announcements. An unannounced replica +# will be ignored by the 'sentinel replicas ' command and won't be +# exposed to Redis Sentinel's clients. +# +# This option does not change the behavior of replica-priority. Even with +# replica-announced set to 'no', the replica can be promoted to master. To +# prevent this behavior, set replica-priority to 0. +# +# replica-announced yes + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# a radix key indexed by key name, what clients have which keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# skip-sanitize-payload RESTORE dump-payload sanitization is skipped. +# sanitize-payload RESTORE dump-payload is sanitized (default). +# + Allow the execution of that command. +# May be used with `|` for allowing subcommands (e.g "+config|get") +# - Disallow the execution of that command. +# May be used with `|` for blocking subcommands (e.g "-config|set") +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|first-arg Allow a specific first argument of an otherwise +# disabled command. It is only supported on commands with +# no sub-commands, and is not allowed as negative form +# like -SELECT|1, only additive starting with "+". This +# feature is deprecated and may be removed in the future. +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# %R~ Add key read pattern that specifies which keys can be read +# from. +# %W~ Add key write pattern that specifies which keys can be +# written to. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# & Add a glob-style pattern of Pub/Sub channels that can be +# accessed by the user. It is possible to specify multiple channel +# patterns. +# allchannels Alias for &* +# resetchannels Flush the list of allowed channel patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# () Create a new selector with the options specified within the +# parentheses and attach it to the user. Each option should be +# space separated. The first character must be ( and the last +# character must be ). +# clearselectors Remove all of the currently attached selectors. +# Note this does not change the "root" user permissions, +# which are the permissions directly applied onto the +# user (outside the parentheses). +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# The following is a list of command categories and their meanings: +# * keyspace - Writing or reading from keys, databases, or their metadata +# in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE, +# KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace, +# key or metadata will also have `write` category. Commands that only read +# the keyspace, key or metadata will have the `read` category. +# * read - Reading from keys (values or metadata). Note that commands that don't +# interact with keys, will not have either `read` or `write`. +# * write - Writing to keys (values or metadata) +# * admin - Administrative commands. Normal applications will never need to use +# these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc. +# * dangerous - Potentially dangerous (each should be considered with care for +# various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS, +# CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc. +# * connection - Commands affecting the connection or other connections. +# This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc. +# * blocking - Potentially blocking the connection until released by another +# command. +# * fast - Fast O(1) commands. May loop on the number of arguments, but not the +# number of elements in the key. +# * slow - All commands that are not Fast. +# * pubsub - PUBLISH / SUBSCRIBE related +# * transaction - WATCH / MULTI / EXEC related commands. +# * scripting - Scripting related. +# * set - Data type: sets related. +# * sortedset - Data type: zsets related. +# * list - Data type: lists related. +# * hash - Data type: hashes related. +# * string - Data type: strings related. +# * bitmap - Data type: bitmaps related. +# * hyperloglog - Data type: hyperloglog related. +# * geo - Data type: geo related. +# * stream - Data type: streams related. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# The requirepass is not compatible with aclfile option and the ACL LOAD +# command, these will cause requirepass to be ignored. +# +# requirepass foobared + +# New users are initialized with restrictive permissions by default, via the +# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# From Redis 7.0, acl-pubsub-default defaults to 'resetchannels' permission. +# +# acl-pubsub-default resetchannels + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, when there are no suitable keys for +# eviction, Redis will return an error on write operations that require +# more memory. These are usually commands that create new keys, add data or +# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, +# SORT (due to the STORE argument), and EXEC (if the transaction includes any +# command that requires memory). +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Eviction processing is designed to function well with the default setting. +# If there is an unusually large amount of write traffic, this value may need to +# be increased. Decreasing this value may reduce latency at the risk of +# eviction processing effectiveness +# 0 = minimum latency, 10 = default, 100 = process without regard to latency +# +# maxmemory-eviction-tenacity 10 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous +# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the +# commands. When neither flag is passed, this directive will be used to determine +# if the data should be deleted asynchronously. + +lazyfree-lazy-user-flush no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Also, this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports these options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + + +#################### KERNEL transparent hugepage CONTROL ###################### + +# Usually the kernel Transparent Huge Pages control is set to "madvise" or +# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which +# case this config has no effect. On systems in which it is set to "always", +# redis will attempt to disable it specifically for the redis process in order +# to avoid latency problems specifically with fork(2) and CoW. +# If for some reason you prefer to keep it enabled, you can set this config to +# "no" and the kernel global to "always". + +disable-thp yes + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check https://redis.io/topics/persistence for more information. + +appendonly no + +# The base name of the append only file. +# +# Redis 7 and newer use a set of append-only files to persist the dataset +# and changes applied to it. There are two basic types of files in use: +# +# - Base files, which are a snapshot representing the complete state of the +# dataset at the time the file was created. Base files can be either in +# the form of RDB (binary serialized) or AOF (textual commands). +# - Incremental files, which contain additional commands that were applied +# to the dataset following the previous file. +# +# In addition, manifest files are used to track the files and the order in +# which they were created and should be applied. +# +# Append-only file names are created by Redis following a specific pattern. +# The file name's prefix is based on the 'appendfilename' configuration +# parameter, followed by additional information about the sequence and type. +# +# For example, if appendfilename is set to appendonly.aof, the following file +# names could be derived: +# +# - appendonly.aof.1.base.rdb as a base file. +# - appendonly.aof.1.incr.aof, appendonly.aof.2.incr.aof as incremental files. +# - appendonly.aof.manifest as a manifest file. + +appendfilename "appendonly.aof" + +# For convenience, Redis stores all persistent append-only files in a dedicated +# directory. The name of the directory is determined by the appenddirname +# configuration parameter. + +appenddirname "appendonlydir" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync no". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# Redis can create append-only base files in either RDB or AOF formats. Using +# the RDB format is always faster and more efficient, and disabling it is only +# supported for backward compatibility purposes. +aof-use-rdb-preamble yes + +# Redis supports recording timestamp annotations in the AOF to support restoring +# the data from a specific point-in-time. However, using this capability changes +# the AOF format in a way that may not be compatible with existing AOF parsers. +aof-timestamp-enabled no + +################################ SHUTDOWN ##################################### + +# Maximum time to wait for replicas when shutting down, in seconds. +# +# During shut down, a grace period allows any lagging replicas to catch up with +# the latest replication offset before the master exists. This period can +# prevent data loss, especially for deployments without configured disk backups. +# +# The 'shutdown-timeout' value is the grace period's duration in seconds. It is +# only applicable when the instance has replicas. To disable the feature, set +# the value to 0. +# +# shutdown-timeout 10 + +# When Redis receives a SIGINT or SIGTERM, shutdown is initiated and by default +# an RDB snapshot is written to disk in a blocking operation if save points are configured. +# The options used on signaled shutdown can include the following values: +# default: Saves RDB snapshot only if save points are configured. +# Waits for lagging replicas to catch up. +# save: Forces a DB saving operation even if no save points are configured. +# nosave: Prevents DB saving operation even if one or more save points are configured. +# now: Skips waiting for lagging replicas. +# force: Ignores any errors that would normally prevent the server from exiting. +# +# Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously. +# Example: "nosave force now" +# +# shutdown-on-sigint default +# shutdown-on-sigterm default + +################ NON-DETERMINISTIC LONG BLOCKING COMMANDS ##################### + +# Maximum time in milliseconds for EVAL scripts, functions and in some cases +# modules' commands before Redis can start processing or rejecting other clients. +# +# If the maximum execution time is reached Redis will start to reply to most +# commands with a BUSY error. +# +# In this state Redis will only allow a handful of commands to be executed. +# For instance, SCRIPT KILL, FUNCTION KILL, SHUTDOWN NOSAVE and possibly some +# module specific 'allow-busy' commands. +# +# SCRIPT KILL and FUNCTION KILL will only be able to stop a script that did not +# yet call any write commands, so SHUTDOWN NOSAVE may be the only way to stop +# the server in the case a write command was already issued by the script when +# the user doesn't want to wait for the natural termination of the script. +# +# The default is 5 seconds. It is possible to set it to 0 or a negative value +# to disable this mechanism (uninterrupted execution). Note that in the past +# this config had a different name, which is now an alias, so both of these do +# the same: +# lua-time-limit 5000 +# busy-reply-threshold 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# The cluster port is the port that the cluster bus will listen for inbound connections on. When set +# to the default value, 0, it will be bound to the command port + 10000. Setting this value requires +# you to specify the cluster bus port when executing cluster meet. +# cluster-port 0 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value or +# set cluster-allow-replica-migration to 'no'. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# Turning off this option allows to use less automatic cluster configuration. +# It both disables migration to orphaned masters and migration from masters +# that became empty. +# +# Default is 'yes' (allow automatic migrations). +# +# cluster-allow-replica-migration yes + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the replica can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# This option, when set to yes, allows nodes to serve pubsub shard traffic while +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful if the application would like to use the pubsub feature even when +# the cluster global stable state is not OK. If the application wants to make sure only +# one shard is serving a given channel, this feature should be kept as yes. +# +# cluster-allow-pubsubshard-when-down yes + +# Cluster link send buffer limit is the limit on the memory usage of an individual +# cluster bus link's send buffer in bytes. Cluster links would be freed if they exceed +# this limit. This is to primarily prevent send buffers from growing unbounded on links +# toward slow peers (E.g. PubSub messages being piled up). +# This limit is disabled by default. Enable this limit when 'mem_cluster_links' INFO field +# and/or 'send-buffer-allocated' entries in the 'CLUSTER LINKS` command output continuously increase. +# Minimum limit of 1gb is recommended so that cluster link buffer can fit in at least a single +# PubSub message by default. (client-query-buffer-limit default value is 1gb) +# +# cluster-link-sendbuf-limit 0 + +# Clusters can configure their announced hostname using this config. This is a common use case for +# applications that need to use TLS Server Name Indication (SNI) or dealing with DNS based +# routing. By default this value is only shown as additional metadata in the CLUSTER SLOTS +# command, but can be changed using 'cluster-preferred-endpoint-type' config. This value is +# communicated along the clusterbus to all nodes, setting it to an empty string will remove +# the hostname and also propagate the removal. +# +# cluster-announce-hostname "" + +# Clusters can advertise how clients should connect to them using either their IP address, +# a user defined hostname, or by declaring they have no endpoint. Which endpoint is +# shown as the preferred endpoint is set by using the cluster-preferred-endpoint-type +# config with values 'ip', 'hostname', or 'unknown-endpoint'. This value controls how +# the endpoint returned for MOVED/ASKING requests as well as the first field of CLUSTER SLOTS. +# If the preferred endpoint type is set to hostname, but no announced hostname is set, a '?' +# will be returned instead. +# +# When a cluster advertises itself as having an unknown endpoint, it's indicating that +# the server doesn't know how clients can reach the cluster. This can happen in certain +# networking situations where there are multiple possible routes to the node, and the +# server doesn't know which one the client took. In this case, the server is expecting +# the client to reach out on the same endpoint it used for making the last request, but use +# the port provided in the response. +# +# cluster-preferred-endpoint-type ip + +# In order to setup your cluster make sure to read the documentation +# available at https://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following four options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-tls-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client ports (for connections +# without and with TLS) and cluster message bus port. The information is then +# published in the header of the bus packets so that other nodes will be able to +# correctly map the address of the node publishing the information. +# +# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set +# to zero, then cluster-announce-port refers to the TLS port. Note also that +# cluster-announce-tls-port has no effect if cluster-tls is set to no. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-tls-port 6379 +# cluster-announce-port 0 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +################################ LATENCY TRACKING ############################## + +# The Redis extended latency monitoring tracks the per command latencies and enables +# exporting the percentile distribution via the INFO latencystats command, +# and cumulative latency distributions (histograms) via the LATENCY command. +# +# By default, the extended latency monitoring is enabled since the overhead +# of keeping track of the command latency is very small. +# latency-tracking yes + +# By default the exported latency percentiles via the INFO latencystats command +# are the p50, p99, and p999. +# latency-tracking-info-percentiles 50 99 99.9 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at https://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# n New key events (Note: not included in the 'A' class) +# t Stream commands +# d Module key type events +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxetd, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-listpack-entries 512 +hash-max-listpack-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-listpack-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-listpack-entries 128 +zset-max-listpack-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entries limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Note that it doesn't make sense to set the replica clients output buffer +# limit lower than the repl-backlog-size config (partial sync will succeed +# and then replica will get disconnected). +# Such a configuration is ignored (the size of repl-backlog-size will be used). +# This doesn't have memory consumption implications since the replica client +# will share the backlog buffers memory. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In some scenarios client connections can hog up memory leading to OOM +# errors or data eviction. To avoid this we can cap the accumulated memory +# used by all client connections (all pubsub and normal clients). Once we +# reach that limit connections will be dropped by the server freeing up +# memory. The server will attempt to drop the connections using the most +# memory first. We call this mechanism "client eviction". +# +# Client eviction is configured using the maxmemory-clients setting as follows: +# 0 - client eviction is disabled (default) +# +# A memory value can be used for the client eviction threshold, +# for example: +# maxmemory-clients 1g +# +# A percentage value (between 1% and 100%) means the client eviction threshold +# is based on a percentage of the maxmemory setting. For example to set client +# eviction at 5% of maxmemory: +# maxmemory-clients 5% + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 4 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 4 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Active defragmentation is disabled by default +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG diff --git a/compose/redis/data/.gitkeep b/compose/redis/data/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/compose/set_mysql_permission.sh b/compose/set_mysql_permission.sh new file mode 100755 index 0000000..55f3ce0 --- /dev/null +++ b/compose/set_mysql_permission.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +# ============================================================ +# set_mysql_permission.sh +# MySQL, PostgreSQL, Redis 데이터 디렉터리 권한 설정 스크립트 +# ============================================================ + +# 스크립트 실행 경로 기준 +MYSQL_DIR="./mysql" +PGDATA_DIR="./pgdata" +REDIS_DIR="./redis" + +# ============================================================ +# MySQL (Percona) 권한 설정 +# ============================================================ +echo "[MySQL] 데이터 디렉터리 권한 설정을 시작합니다..." +echo "[MySQL] 대상 디렉터리: $MYSQL_DIR" + +if [ ! -d "$MYSQL_DIR" ]; then + echo "[MySQL] $MYSQL_DIR 디렉터리가 존재하지 않아 새로 생성합니다." + sudo mkdir -p "$MYSQL_DIR" +fi + +# Percona의 mysql 유저 UID는 보통 1001 +MYSQL_UID=1001 +MYSQL_GID=1001 + +echo "[MySQL] 소유권을 $MYSQL_UID:$MYSQL_GID 로 변경합니다..." +sudo chown -R ${MYSQL_UID}:${MYSQL_GID} "$MYSQL_DIR" + +echo "[MySQL] 권한을 750 (rwxr-x---) 으로 설정합니다..." +sudo chmod -R 750 "$MYSQL_DIR" + +echo "[MySQL] 적용 결과:" +ls -ld "$MYSQL_DIR" + +# ============================================================ +# PostgreSQL 권한 설정 +# ============================================================ +echo "" +echo "[PostgreSQL] 데이터 디렉터리 권한 설정을 시작합니다..." +echo "[PostgreSQL] 대상 디렉터리: $PGDATA_DIR" + +if [ ! -d "$PGDATA_DIR" ]; then + echo "[PostgreSQL] $PGDATA_DIR 디렉터리가 존재하지 않아 새로 생성합니다." + sudo mkdir -p "$PGDATA_DIR" +fi + +# PostgreSQL의 postgres 유저 UID는 보통 999 (공식 Docker 이미지 기준) +POSTGRES_UID=999 +POSTGRES_GID=999 + +echo "[PostgreSQL] 소유권을 $POSTGRES_UID:$POSTGRES_GID 로 변경합니다..." +sudo chown -R ${POSTGRES_UID}:${POSTGRES_GID} "$PGDATA_DIR" + +echo "[PostgreSQL] 권한을 700 (rwx------) 으로 설정합니다..." +sudo chmod -R 700 "$PGDATA_DIR" + +echo "[PostgreSQL] 적용 결과:" +ls -ld "$PGDATA_DIR" + +# ============================================================ +# Redis 권한 설정 +# ============================================================ +echo "" +echo "[Redis] 데이터 디렉터리 권한 설정을 시작합니다..." +echo "[Redis] 대상 디렉터리: $REDIS_DIR" + +if [ ! -d "$REDIS_DIR" ]; then + echo "[Redis] $REDIS_DIR 디렉터리가 존재하지 않아 새로 생성합니다." + sudo mkdir -p "$REDIS_DIR/data" + sudo mkdir -p "$REDIS_DIR/conf" +fi + +if [ ! -d "$REDIS_DIR/data" ]; then + echo "[Redis] $REDIS_DIR/data 디렉터리가 존재하지 않아 새로 생성합니다." + sudo mkdir -p "$REDIS_DIR/data" +fi + +if [ ! -d "$REDIS_DIR/conf" ]; then + echo "[Redis] $REDIS_DIR/conf 디렉터리가 존재하지 않아 새로 생성합니다." + sudo mkdir -p "$REDIS_DIR/conf" +fi + +# Redis의 redis 유저 UID는 보통 999 (공식 Docker 이미지 기준) +REDIS_UID=999 +REDIS_GID=999 + +echo "[Redis] 소유권을 $REDIS_UID:$REDIS_GID 로 변경합니다..." +sudo chown -R ${REDIS_UID}:${REDIS_GID} "$REDIS_DIR" + +echo "[Redis] 권한을 750 (rwxr-x---) 으로 설정합니다..." +sudo chmod -R 750 "$REDIS_DIR" + +echo "[Redis] 적용 결과:" +ls -ld "$REDIS_DIR" +ls -la "$REDIS_DIR" + +# ============================================================ +# 완료 +# ============================================================ +echo "" +echo "[완료] 모든 권한 설정이 완료되었습니다." diff --git a/compose/ssl/certs/.gitkeep b/compose/ssl/certs/.gitkeep new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/compose/ssl/certs/.gitkeep @@ -0,0 +1 @@ + diff --git a/compose/ssl/letsencrypt/.gitkeep b/compose/ssl/letsencrypt/.gitkeep new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/compose/ssl/letsencrypt/.gitkeep @@ -0,0 +1 @@ + diff --git a/config/app-server/gunicorn_uvicorn.conf.py b/config/app-server/gunicorn_uvicorn.conf.py new file mode 100644 index 0000000..be4fab4 --- /dev/null +++ b/config/app-server/gunicorn_uvicorn.conf.py @@ -0,0 +1,256 @@ +""" +Gunicorn Configuration for Production FastAPI Service +===================================================== +환경: Production Level +서버 사양: 쿼드코어 CPU, 4GB RAM, ~50 req/s +백엔드: FastAPI REST API with Uvicorn Workers +프록시: Nginx (SSL/TLS, DDoS, Rate Limiting 처리) +===================================================== +""" + +# ============================================================================ +# 네트워크 바인딩 설정 +# ============================================================================ +# 바인딩 주소 및 포트 +# 역할: Gunicorn이 수신할 네트워크 주소 +# 0.0.0.0:8000 - 모든 네트워크 인터페이스에서 수신 (Docker/컨테이너 환경) +# 127.0.0.1:8000 - 로컬 전용 (Nginx와 동일 호스트, 보안 강화) +# Nginx 연동: Nginx가 외부 트래픽 처리, Gunicorn은 내부 전용 +bind = "0.0.0.0:8000" + +# ============================================================================ +# Worker 프로세스 설정 +# ============================================================================ +# Worker 수 +# 역할: 동시 요청 처리를 위한 프로세스 수 +# 공식: I/O 집약적 작업 = (CPU 코어 * 2) + 1 +# 계산: 쿼드코어(4) → 9개 권장, 하지만 4GB RAM 제약으로 4개 설정 +# 각 워커 메모리: 200-500MB +# 4 워커 × 500MB = 2GB (시스템 예비 2GB 확보) +# 성능: ~50 req/s는 4개 워커로 충분 (워커당 ~12.5 req/s) +# 환경 변수: GUNICORN_WORKERS=6 으로 오버라이드 가능 +# workers = multiprocessing.cpu_count() * 2 + 1 +workers = 4 + +# Worker 클래스 +# 역할: ASGI 애플리케이션(FastAPI) 처리를 위한 Worker 타입 +# uvicorn.workers.UvicornWorker: 비동기 I/O, uvloop 이벤트 루프 +# 기대 효과: sync 워커 대비 2-5배 높은 동시성, 메모리 효율적 +worker_class = "uvicorn.workers.UvicornWorker" + +# Worker 동시 연결 수 (주석 처리 - UvicornWorker는 이 설정 미사용) +# Nginx가 앞단에서 연결 관리하므로 불필요 +# worker_connections = 1000 + +# ============================================================================ +# 프로세스 관리 설정 +# ============================================================================ +# 데몬 모드 +# 역할: 백그라운드 실행 여부 +# False: systemd/Docker가 프로세스 관리 (현대적 방식) +# True: 수동 관리 시 사용 (pidfile 필수) +daemon = False + +# PID 파일 (주석 처리 - systemd/Docker 사용 시 불필요) +# 역할: Master 프로세스 ID 저장 +# 수동 관리 시 활성화: pidfile = '/var/run/gunicorn/gunicorn.pid' +# pidfile = '/tmp/gunicorn.pid' + +# ============================================================================ +# ASGI 애플리케이션 경로 +# ============================================================================ +# ASGI 애플리케이션 경로 +# 역할: Gunicorn이 실행할 FastAPI 앱 지정 +# 형식: "모듈경로:변수명" +# 예: config.asgi:application (Django 스타일) +# main:app (FastAPI 기본) +#wsgi_app = "main:app" +wsgi_app = "main:app" + + +# ============================================================================ +# 타임아웃 설정 +# ============================================================================ +# Worker 타임아웃 +# 역할: Worker가 요청 처리 최대 허용 시간 (초) +# 동작: 타임아웃 초과 시 Worker 강제 종료 후 재시작 +# 계산: FastAPI REST API 평균 응답 1-5초 +# 파일 업로드 고려 (100MB / 10Mbps = 80초) +# 비디오 생성, 이미지 처리 등 장기 실행 작업 고려 +# Nginx 연동: proxy_read_timeout(300s)과 동일하게 설정 +# 300초: 장기 실행 API (비디오 생성, 이미지 업로드 등) 지원 +timeout = 300 + +# Keep-Alive 타임아웃 +# 역할: HTTP Keep-Alive 연결 유지 시간 (초) +# 동작: 연결 재사용으로 핸드셰이크 오버헤드 감소 +# Nginx 연동: Nginx keepalive_timeout(30s)보다 짧게 설정 +# Nginx가 먼저 종료하도록 하여 리소스 효율화 +# 2초: Nginx 앞단에서 연결 관리하므로 짧게 설정 +keepalive = 2 + +# Graceful 종료 타임아웃 +# 역할: Worker 재시작/종료 시 진행 중인 요청 완료 대기 시간 (초) +# 동작: SIGTERM 수신 후 새 요청 거부, 기존 요청 처리 +# 타임아웃 초과 시 SIGKILL로 강제 종료 +# 기대 효과: Graceful reload로 무중단 배포 +# kill -HUP 또는 systemctl reload gunicorn +# timeout과 동일하게 설정 +graceful_timeout = 300 + +# ============================================================================ +# 프로세스 리소스 관리 (메모리 누수 방지) +# ============================================================================ +# Worker 최대 요청 수 +# 역할: Worker가 처리할 최대 요청 후 자동 재시작 +# 목적: 메모리 누수 방어, 장기 운영 안정성 +# 동작: max_requests 도달 시 Worker graceful 종료 후 재시작 +# 계산: ~50 req/s 기준 +# 1000으로 설정 시 워커당 20초마다 재시작 (1000/50) +# 메모리 누수 우려 시 유지, 안정적이면 5000-10000 증가 가능 +# 모니터링: htop으로 워커 메모리 사용량 추이 확인 +max_requests = 1000 + +# 최대 요청 수 Jitter +# 역할: max_requests에 랜덤성 추가 +# 목적: 모든 Worker가 동시에 재시작하는 것 방지 +# 동작: 실제 재시작 = max_requests ± random(0, jitter) +# 기대 효과: 재시작 부하 분산, 서비스 연속성 보장 +# 권장: max_requests의 5-10% +max_requests_jitter = 50 + +# ============================================================================ +# 애플리케이션 로딩 설정 +# ============================================================================ +# 애플리케이션 사전 로딩 +# 역할: Worker fork 전 앱 로딩 방식 결정 +# False: 각 Worker가 독립적으로 앱 로딩 +# - 장점: Graceful reload 가능 (무중단 배포) +# - 단점: 메모리 중복 사용 (Worker 수만큼) +# True: Master 프로세스에서 앱 로딩 후 Worker fork +# - 장점: 메모리 20-40% 절감 (Copy-on-Write) +# - 단점: reload 시 전체 재시작 필요 (다운타임) +# 프로덕션 권장: False (무중단 배포 우선) +preload_app = False + +# 코드 변경 감지 자동 재시작 +# 역할: 파일 변경 시 Worker 자동 재시작 +# 성능 영향: CPU 5-10% 오버헤드, 메모리 50-100MB 추가 +# 보안: 예기치 않은 재시작으로 서비스 불안정 +# **프로덕션에서는 반드시 False** +# 개발 환경에서만 True 사용 +# 배포: CI/CD 파이프라인에서 명시적 재시작 +reload = False + +# ============================================================================ +# 로깅 설정 +# ============================================================================ +# 애플리케이션 출력 캡처 +# 역할: FastAPI의 print(), logging을 Gunicorn 로그로 리다이렉트 +# False: 앱 로거가 독립적으로 관리 (권장) +# True: stdout/stderr를 errorlog로 통합 +# FastAPI 권장: False (자체 로거 사용) +capture_output = False + +# 로그 레벨 +# 역할: 출력할 로그의 최소 수준 +# 레벨: critical > error > warning > info > debug +# info: 일반 정보 + 에러 (프로덕션 권장) +# warning: 경고 이상만 (로그 양 감소) +# debug: 모든 세부 정보 (성능 저하, 개발용) +loglevel = "info" + +# 액세스 로그 파일 +# 역할: HTTP 요청 로그 저장 +# 형식: 시간별 로그 파일 생성 (access_2025-01-05_14.log) +# 로테이션: 시간별 자동 분리, logrotate 추가 권장 +# 성능: 버퍼링으로 I/O 최적화 +# Docker/K8s: "-" 사용으로 stdout 출력 권장 +accesslog = "/log/uvicorn/uvicorn_access.log" + +# 에러 로그 파일 +# 역할: Gunicorn 에러, Worker 크래시 로그 +# 포함: Worker 타임아웃, 메모리 에러, 예외 등 +# 모니터링: 장애 탐지를 위한 핵심 로그 +errorlog = "/log/uvicorn/uvicorn_error.log" + +# 액세스 로그 포맷 (선택사항, 필요 시 주석 해제) +# 역할: 요청 로그 형식 커스터마이징 +# 기본 포맷으로도 충분, 상세 정보 필요 시 활성화 +access_log_format = ( + '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" %(D)s %(p)s' +) + +# 워커 프로세스 이름 설정 (모니터링 시 유용) +proc_name = "fastapi_gunicorn" + +# ========================================== +# 성능 최적화 +# ========================================== +# - 최대 2048개의 연결이 Accept Queue에서 대기 가능 +# - 동시에 많은 연결 요청이 들어와도 2048개까지는 거부되지 않음 +# - 실제 적용값 = min(2048, 시스템 somaxconn) +# 낮은 트래픽 (기본값 충분) +# backlog = 2048 # 기본값 사용 +# workers = 4 +# +# 높은 트래픽 (증가 필요) +# backlog = 4096 +# workers = 8 +# OS 설정도 함께 조정 필요 +# /etc/sysctl.conf +# net.core.somaxconn = 4096 +# +# 증가해야 할 때: +# ss -lnt 명령으로 Send-Q가 계속 가득 찬 경우 +# ss -lnt | grep :8000 +# 순간적인 트래픽 급증이 예상되는 경우 +# connection refused 에러가 자주 발생하는 경우 + +backlog = 2048 + +# 임시 파일 디렉토리 (업로드 처리 시 사용) +# 재부팅 시 자동 삭제 +# Gunicorn 기본값 +# worker_tmp_dir = None # /tmp 디렉토리 사용 (디스크 기반) +# 사용하는 경우 (권장): +# 파일 업로드가 많은 서비스 +# 큰 요청/응답 처리 +# 충분한 RAM이 있는 경우 +# RAM이 부족한 경우 +# worker_tmp_dir = "/tmp" # 디스크 사용 +# 파일 업로드/다운로드가 거의 없는 경우 +# worker_tmp_dir = None # 기본값 사용 +# Docker 사용시 고려 필요 +# shm_size: '2gb' # /dev/shm 크기 증가 +# volumes: +# - /dev/shm:/dev/shm # 호스트 공유 (선택) + +# worker_tmp_dir = "/dev/shm" # RAM 기반 tmpfs 사용으로 I/O 성능 향상 +worker_tmp_dir = None + +""" +배포 전 체크리스트: + +[권장] +1. 로그 로테이션 설정 (logrotate) +2. 헬스체크 엔드포인트 구현 (/health) +3. 성능 테스트 (wrk, locust) + +성능 모니터링: + # 프로세스 확인 + ps aux | grep gunicorn + + # 리소스 사용량 + htop -p $(pgrep -d',' gunicorn) + + # 로그 실시간 확인 + tail -f /log/uvicorn/access_*.log + tail -f /log/uvicorn/error_*.log + +설정 최적화 가이드: + - workers: CPU 사용률 80% 이하 유지 + - timeout: 응답 시간 + 여유분 (평균 * 2) + - max_requests: 메모리 누수 없으면 5000-10000으로 증가 + - keepalive: Nginx keepalive_timeout보다 짧게 +""" diff --git a/config/database/mysql/init.sql b/config/database/mysql/init.sql new file mode 100644 index 0000000..1ccc7dd --- /dev/null +++ b/config/database/mysql/init.sql @@ -0,0 +1,5 @@ +-- 1. admin 권한 계정 생성 +CREATE USER IF NOT EXISTS 'devadmin'@'%' IDENTIFIED BY 'test!'; +-- 2. 전체 권한 부여 +GRANT ALL PRIVILEGES ON *.* TO 'devadmin'@'%' WITH GRANT OPTION; +FLUSH PRIVILEGES; \ No newline at end of file diff --git a/config/database/mysql/my.cnf b/config/database/mysql/my.cnf new file mode 100644 index 0000000..4f36fcb --- /dev/null +++ b/config/database/mysql/my.cnf @@ -0,0 +1,546 @@ +# ======================================================================== +# Percona Server / MySQL 8.0 최적화 설정 +# 하드웨어 사양: 4코어 CPU, 4GB RAM, SSD, 1GB LAN +# ======================================================================== + +[mysqld] + +# ------------------------------------------------------------------------ +# 기본 경로 설정 +# ------------------------------------------------------------------------ +datadir=/var/lib/mysql +socket=/var/lib/mysql/mysql.sock +pid-file=/var/run/mysqld/mysqld.pid + +# ------------------------------------------------------------------------ +# 네트워크 및 연결 설정 +# ------------------------------------------------------------------------ + +# 바인드 주소 +bind-address = 0.0.0.0 +# 기본값: 127.0.0.1 (로컬만) +# 변경값: 0.0.0.0 (모든 IP) +# 목적: 원격 접속 허용 +# 보안: 방화벽 설정 필수 + +# 포트 +port = 3306 +# 기본값: 3306 +# MySQL 표준 포트 + +# 최대 연결 수 +max_connections = 200 +# 기본값: 151 +# 변경값: 200 +# 목적: 4GB RAM 환경에서 충분한 연결 수 제공 +# 계산: 각 연결당 약 4-8MB 메모리 사용 +# 200 연결 = 최대 1.6GB 메모리 (버퍼 포함) +# 참고: 연결 풀링(ProxySQL, MaxScale) 사용 시 더 효율적 + +max_connect_errors = 1000000 +# 기본값: 100 +# 변경값: 1000000 +# 목적: 연결 오류로 인한 호스트 차단 방지 +# 성능: 네트워크 이슈로 인한 불필요한 차단 감소 + +# 대기 시간 설정 +wait_timeout = 600 +# 기본값: 28800 (8시간) +# 변경값: 600 (10분) +# 목적: 유휴 연결 자동 정리 +# 성능: 불필요한 연결 점유 방지 + +interactive_timeout = 600 +# 기본값: 28800 +# 변경값: 600 +# 목적: 대화형 클라이언트 타임아웃 +# 성능: 리소스 효율적 관리 + +connect_timeout = 10 +# 기본값: 10 +# 유지 이유: 연결 시도 타임아웃 + +# 스레드 캐시 +thread_cache_size = 50 +# 기본값: 8 +# 변경값: 50 +# 목적: 스레드 재사용으로 연결 생성 오버헤드 감소 +# 성능: 연결 빈도가 높은 환경에서 효과적 +# 계산: max_connections의 약 25% + +# 백로그 큐 +back_log = 512 +# 기본값: 80 +# 변경값: 512 +# 목적: 대기 중인 연결 요청 큐 크기 +# 성능: 트래픽 버스트 시 연결 손실 방지 + +# ------------------------------------------------------------------------ +# InnoDB 버퍼 풀 설정 (가장 중요!) +# ------------------------------------------------------------------------ + +innodb_buffer_pool_size = 2G +# 기본값: 128MB +# 변경값: 2GB (전체 RAM 4GB의 50%) +# 목적: 데이터와 인덱스를 메모리에 캐싱 +# 성능: 디스크 I/O를 크게 감소시키는 가장 중요한 설정 +# 권장: 전용 서버는 RAM의 70-80%, 혼합 환경은 50-60% +# 계산: 2GB buffer pool + 1GB 연결/쿼리 + 1GB OS/기타 + +innodb_buffer_pool_instances = 4 +# 기본값: 1 (또는 자동) +# 변경값: 4 +# 목적: 버퍼 풀을 여러 인스턴스로 분할하여 동시성 향상 +# 성능: 멀티 코어 환경에서 잠금 경합 감소 +# 권장: CPU 코어 수와 동일하게 설정 +# 참고: buffer_pool_size >= 1GB일 때만 효과적 + +innodb_buffer_pool_chunk_size = 128M +# 기본값: 128MB +# 유지 이유: buffer_pool_size가 instances × chunk_size의 배수여야 함 +# 계산: 2GB = 4 instances × 4 chunks × 128MB + +# ------------------------------------------------------------------------ +# InnoDB 로그 설정 +# ------------------------------------------------------------------------ + +innodb_log_file_size = 512M +# 기본값: 48MB +# 변경값: 512MB +# 목적: Redo 로그 파일 크기 +# 성능: 쓰기 집약적 워크로드에서 체크포인트 빈도 감소 +# 권장: buffer_pool_size의 25% 정도 +# 주의: 너무 크면 크래시 복구 시간 증가 + +innodb_log_buffer_size = 32M +# 기본값: 16MB +# 변경값: 32MB +# 목적: Redo 로그 버퍼 +# 성능: 디스크 쓰기 전 로그를 메모리에 버퍼링 +# 권장: 대용량 트랜잭션이 많으면 증가 + +innodb_flush_log_at_trx_commit = 1 +# 기본값: 1 +# 유지 이유: ACID 보장 (데이터 무결성) +# 옵션: +# 0: 로그를 메모리에만 (속도↑, 안정성↓↓) +# 1: 매 커밋마다 디스크에 flush (속도↓, 안정성↑↑) ← 권장 +# 2: OS 캐시까지만 (속도↑, 안정성↑) +# 주의: 성능을 위해 2로 변경 가능하나 크래시 시 1초 데이터 손실 + +innodb_flush_method = O_DIRECT +# 기본값: fsync (Linux) +# 변경값: O_DIRECT +# 목적: OS 파일 시스템 캐시 우회 +# 성능: 이중 버퍼링 방지, SSD 환경에서 효과적 +# 권장: SSD 사용 시 필수 설정 + +# ------------------------------------------------------------------------ +# InnoDB I/O 설정 (SSD 최적화) +# ------------------------------------------------------------------------ + +innodb_io_capacity = 2000 +# 기본값: 200 (HDD 기준) +# 변경값: 2000 +# 목적: InnoDB가 초당 수행할 수 있는 I/O 작업 수 +# 성능: SSD의 높은 IOPS 활용 +# 권장: SSD는 2000-5000, NVMe는 10000+ +# 측정: fio 벤치마크로 실제 IOPS 측정 후 70% 수준으로 설정 + +innodb_io_capacity_max = 4000 +# 기본값: 2000 +# 변경값: 4000 (io_capacity의 2배) +# 목적: 긴급 상황(체크포인트 등)에서 최대 I/O +# 성능: 버스트 상황에서 더 많은 I/O 허용 + +innodb_read_io_threads = 4 +# 기본값: 4 +# 유지 이유: CPU 코어 수와 일치 +# 목적: 읽기 작업을 위한 I/O 스레드 + +innodb_write_io_threads = 4 +# 기본값: 4 +# 유지 이유: CPU 코어 수와 일치 +# 목적: 쓰기 작업을 위한 I/O 스레드 + +innodb_flush_neighbors = 0 +# 기본값: 1 (HDD 최적화) +# 변경값: 0 +# 목적: 인접 페이지 flush 비활성화 +# 성능: SSD는 랜덤 쓰기가 빠르므로 불필요 +# 권장: SSD 환경에서는 반드시 0으로 설정 + +# ------------------------------------------------------------------------ +# InnoDB 동시성 설정 +# ------------------------------------------------------------------------ + +innodb_thread_concurrency = 0 +# 기본값: 0 (무제한) +# 유지 이유: MySQL이 자동으로 최적화 +# 목적: 동시 실행 스레드 수 제한 +# 참고: 특정 워크로드에서 제한이 필요한 경우 CPU 코어 수 × 2 + +innodb_lock_wait_timeout = 50 +# 기본값: 50 +# 유지 이유: 락 대기 타임아웃 (초) +# 성능: 데드락 상황 빠른 감지 + +# ------------------------------------------------------------------------ +# 테이블 및 파일 설정 +# ------------------------------------------------------------------------ + +innodb_file_per_table = ON +# 기본값: ON (MySQL 5.6.6+) +# 유지 이유: 테이블별로 별도 파일 생성 +# 성능: 테이블 삭제 시 공간 즉시 반환, 관리 용이 + +innodb_open_files = 2000 +# 기본값: 300 +# 변경값: 2000 +# 목적: InnoDB가 동시에 열 수 있는 파일 수 +# 성능: 많은 테이블이 있을 때 파일 열기 오버헤드 감소 + +table_open_cache = 4000 +# 기본값: 2000 +# 변경값: 4000 +# 목적: 열린 테이블 캐시 +# 성능: 테이블 열기/닫기 오버헤드 감소 +# 계산: max_connections × 평균 조인 테이블 수 + +table_open_cache_instances = 16 +# 기본값: 16 +# 유지 이유: 캐시를 여러 인스턴스로 분할 +# 성능: 동시성 향상 + +table_definition_cache = 2000 +# 기본값: 400 +# 변경값: 2000 +# 목적: 테이블 정의 캐시 (.frm 파일) +# 성능: 테이블 메타데이터 접근 속도 향상 + +# ------------------------------------------------------------------------ +# 쿼리 캐시 (MySQL 8.0에서는 제거됨) +# ------------------------------------------------------------------------ + +# MySQL 8.0에서는 쿼리 캐시가 제거되었습니다. +# 대신 애플리케이션 레벨 캐싱(Redis, Memcached) 사용 권장 + +# ------------------------------------------------------------------------ +# 임시 테이블 설정 +# ------------------------------------------------------------------------ + +tmp_table_size = 64M +# 기본값: 16MB +# 변경값: 64MB +# 목적: 메모리 내 임시 테이블 최대 크기 +# 성능: 복잡한 쿼리의 임시 테이블을 메모리에 유지 +# 주의: max_heap_table_size와 함께 설정 + +max_heap_table_size = 64M +# 기본값: 16MB +# 변경값: 64MB +# 목적: MEMORY 테이블 최대 크기 +# 성능: tmp_table_size와 동일하게 설정 + +# ------------------------------------------------------------------------ +# 정렬 및 조인 버퍼 +# ------------------------------------------------------------------------ + +sort_buffer_size = 4M +# 기본값: 256KB +# 변경값: 4MB +# 목적: 정렬 작업에 사용되는 버퍼 +# 성능: ORDER BY, GROUP BY 성능 향상 +# 주의: 세션별로 할당되므로 너무 크면 메모리 부족 +# 계산: 200 연결 × 4MB = 최대 800MB + +read_buffer_size = 2M +# 기본값: 128KB +# 변경값: 2MB +# 목적: 순차 스캔 버퍼 +# 성능: 전체 테이블 스캔 시 성능 향상 + +read_rnd_buffer_size = 4M +# 기본값: 256KB +# 변경값: 4MB +# 목적: 정렬 후 행 읽기 버퍼 +# 성능: ORDER BY 후 행 검색 속도 향상 + +join_buffer_size = 4M +# 기본값: 256KB +# 변경값: 4MB +# 목적: 인덱스를 사용하지 않는 조인 버퍼 +# 성능: 조인 성능 향상 + +# ------------------------------------------------------------------------ +# 바이너리 로그 설정 +# ------------------------------------------------------------------------ + +# 바이너리 로그 활성화 (복제 및 Point-in-Time 복구에 필수) +# log_bin = /var/lib/mysql/mysql-bin +# 목적: 데이터 변경 사항 기록 +# 용도: 복제(Replication), 백업, 복구 +# 참고: 복제를 사용하지 않으면 disable_log_bin 설정 가능 + +server_id = 1 +# 기본값: 1 +# 목적: 복제 환경에서 서버 식별자 +# 참고: 각 서버마다 고유한 값 필요 + +binlog_format = ROW +# 기본값: ROW (MySQL 8.0+) +# 옵션: +# STATEMENT: SQL 문 저장 (크기↓, 안정성↓) +# ROW: 실제 행 변경 저장 (크기↑, 안정성↑) ← 권장 +# MIXED: 자동 선택 +# 권장: ROW (가장 안전하고 일관성 있음) + +binlog_expire_logs_seconds = 604800 +# 기본값: 2592000 (30일) +# 변경값: 604800 (7일) +# 목적: 오래된 바이너리 로그 자동 삭제 +# 성능: 디스크 공간 관리 +# 참고: 백업 주기에 따라 조정 + +max_binlog_size = 100M +# 기본값: 1GB +# 변경값: 100MB +# 목적: 단일 바이너리 로그 파일 최대 크기 +# 성능: 작은 파일로 관리 용이성 향상 + +sync_binlog = 1 +# 기본값: 1 +# 유지 이유: 매 커밋마다 바이너리 로그를 디스크에 동기화 +# 성능: 안정성 최우선 (크래시 시 데이터 손실 방지) +# 참고: 성능이 중요하면 0으로 설정 가능하나 권장하지 않음 + +# ------------------------------------------------------------------------ +# 에러 로그 설정 +# ------------------------------------------------------------------------ + +# log_error = /var/log/mysql/error.log +log_error = /var/log/mysqld.log +# 목적: 에러 로그 파일 위치 +# 참고: 디렉토리가 존재하고 mysql 사용자가 쓰기 권한 필요 + +log_error_verbosity = 2 +# 기본값: 2 +# 옵션: 1 (오류만), 2 (오류+경고), 3 (오류+경고+정보) +# 권장: 2 (운영 환경) + +# ------------------------------------------------------------------------ +# 슬로우 쿼리 로그 +# ------------------------------------------------------------------------ + +# slow_query_log = 1 +# 기본값: 0 (비활성화) +# 변경값: 1 (활성화) +# 목적: 느린 쿼리 기록 +# 성능: 쿼리 최적화에 필수 + +slow_query_log_file = /var/log/mysql/slow-query.log +# 목적: 슬로우 쿼리 로그 파일 위치 + +long_query_time = 2 +# 기본값: 10 +# 변경값: 2 +# 목적: 2초 이상 걸리는 쿼리 기록 +# 권장: 1-2초 (애플리케이션 특성에 따라 조정) + +log_queries_not_using_indexes = 1 +# 기본값: 0 +# 변경값: 1 +# 목적: 인덱스를 사용하지 않는 쿼리도 기록 +# 성능: 인덱스 누락 쿼리 발견 + +# ------------------------------------------------------------------------ +# 일반 쿼리 로그 (개발 환경에서만 사용) +# ------------------------------------------------------------------------ + +# 운영 환경에서는 비활성화 권장 (과도한 로그 생성) +# general_log = 0 +# general_log_file = /var/log/mysql/general.log + +# ------------------------------------------------------------------------ +# 문자셋 및 콜레이션 +# ------------------------------------------------------------------------ + +character_set_server = utf8mb4 +# 기본값: utf8mb4 (MySQL 8.0+) +# 목적: 서버 기본 문자셋 +# 참고: 이모지 등 4바이트 문자 지원 + +collation_server = utf8mb4_unicode_ci +# 기본값: utf8mb4_0900_ai_ci (MySQL 8.0+) +# 변경값: utf8mb4_unicode_ci +# 목적: 다국어 정렬 규칙 +# 참고: 호환성을 위해 unicode_ci 사용 + +# ------------------------------------------------------------------------ +# SQL 모드 +# ------------------------------------------------------------------------ + +sql_mode = STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION +# 기본값: STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION,... (MySQL 8.0+) +# 목적: SQL 엄격 모드 설정 +# 권장: STRICT_TRANS_TABLES (데이터 무결성) +# 참고: 레거시 애플리케이션은 모드 조정 필요 + +# ------------------------------------------------------------------------ +# 타임존 +# ------------------------------------------------------------------------ + +# default_time_zone = '+09:00' +# 기본값: SYSTEM +# 변경값: '+09:00' (한국 시간) +# 목적: 서버 타임존 설정 +# 참고: 글로벌 서비스는 '+00:00' (UTC) 권장 + +# ------------------------------------------------------------------------ +# 성능 스키마 (Performance Schema) +# ------------------------------------------------------------------------ + +performance_schema = ON +# 기본값: ON +# 유지 이유: 성능 모니터링 및 진단 +# 참고: 약간의 오버헤드 있지만 필수 모니터링 도구 + +# ------------------------------------------------------------------------ +# 보안 설정 +# ------------------------------------------------------------------------ + +# 로컬 파일 로드 비활성화 (보안) +local_infile = 0 +# 기본값: 0 (MySQL 8.0+) +# 목적: LOAD DATA LOCAL INFILE 비활성화 +# 보안: 로컬 파일 접근 방지 + +# 심볼릭 링크 비활성화 +symbolic_links = 0 +# 기본값: 0 +# 목적: 심볼릭 링크 사용 비활성화 +# 보안: 디렉토리 탐색 공격 방지 + +# ------------------------------------------------------------------------ +# 기타 최적화 +# ------------------------------------------------------------------------ + +# 쿼리 결과 캐시 (애플리케이션 레벨 권장) +# MySQL 8.0에서는 쿼리 캐시 제거됨 + +# 오픈 파일 제한 +open_files_limit = 65535 +# 기본값: 5000 +# 변경값: 65535 +# 목적: 동시에 열 수 있는 파일 수 +# 성능: 많은 테이블과 연결을 처리할 때 필요 + +# 최대 허용 패킷 크기 +max_allowed_packet = 64M +# 기본값: 64MB (MySQL 8.0+) +# 유지 이유: 대용량 데이터 처리 +# 참고: 필요시 증가 가능 (최대 1GB) + +# 그룹 커밋 최적화 +binlog_group_commit_sync_delay = 0 +# 기본값: 0 +# 목적: 바이너리 로그 그룹 커밋 지연 (마이크로초) +# 성능: 0보다 크면 처리량 증가, 지연 약간 증가 +# 참고: 초당 수천 개 트랜잭션 환경에서 1000-10000 설정 + +binlog_group_commit_sync_no_delay_count = 0 +# 기본값: 0 +# 목적: 지연 없이 커밋할 트랜잭션 수 + +# ------------------------------------------------------------------------ +# 설정 파일 추가 포함 +# ------------------------------------------------------------------------ +!includedir /etc/my.cnf.d + +# ======================================================================== +# 주요 변경사항 요약 +# ======================================================================== +# +# 1. 메모리 설정 (4GB RAM 기준) +# - innodb_buffer_pool_size: 128MB → 2GB (50% of RAM) +# - tmp_table_size / max_heap_table_size: 16MB → 64MB +# - sort_buffer_size: 256KB → 4MB +# - read_buffer_size: 128KB → 2MB +# - join_buffer_size: 256KB → 4MB +# +# 2. 연결 설정 +# - max_connections: 151 → 200 +# - thread_cache_size: 8 → 50 +# - wait_timeout: 28800 → 600 (10분) +# +# 3. InnoDB 최적화 (SSD 특화) +# - innodb_io_capacity: 200 → 2000 +# - innodb_io_capacity_max: 2000 → 4000 +# - innodb_flush_neighbors: 1 → 0 (SSD 최적화) +# - innodb_flush_method: fsync → O_DIRECT +# +# 4. 로그 설정 +# - innodb_log_file_size: 48MB → 512MB +# - innodb_log_buffer_size: 16MB → 32MB +# - slow_query_log: 활성화 (2초 이상 쿼리) +# +# 5. 버퍼 풀 설정 +# - innodb_buffer_pool_instances: 1 → 4 (CPU 코어 수) +# +# ======================================================================== +# 예상 성능 향상 +# ======================================================================== +# +# - 읽기 성능: 40-60% 향상 (innodb_buffer_pool_size 증가) +# - 쓰기 성능: 30-50% 향상 (SSD 최적화, 로그 버퍼 증가) +# - 복잡한 쿼리: 50-100% 향상 (정렬/조인 버퍼 증가) +# - 동시 연결: 연결 처리 능력 향상 (thread_cache, max_connections) +# - 전체 처리량: 30-50% 향상 +# +# ======================================================================== +# 적용 방법 +# ======================================================================== +# +# 1. 이 파일을 /etc/my.cnf로 저장 (기존 파일 백업) +# sudo cp /etc/my.cnf /etc/my.cnf.backup +# sudo vi /etc/my.cnf +# +# 2. 로그 디렉토리 생성 및 권한 설정 +# sudo mkdir -p /var/log/mysql +# sudo chown mysql:mysql /var/log/mysql +# sudo chmod 755 /var/log/mysql +# +# 3. MySQL 재시작 +# sudo systemctl restart mysql +# 또는 +# sudo service mysql restart +# +# 4. 설정 확인 +# mysql -u root -p -e "SHOW VARIABLES LIKE 'innodb_buffer_pool_size';" +# mysql -u root -p -e "SHOW VARIABLES LIKE 'max_connections';" +# +# 5. 슬로우 쿼리 로그 분석 (정기적으로) +# mysqldumpslow /var/log/mysql/slow-query.log +# +# 6. 성능 모니터링 +# mysql -u root -p -e "SHOW ENGINE INNODB STATUS\G" +# mysql -u root -p -e "SHOW GLOBAL STATUS LIKE 'Threads%';" +# +# ======================================================================== +# Docker 환경 주의사항 +# ======================================================================== +# +# Docker 환경에서 사용 시: +# 1. 로그 디렉토리를 볼륨 마운트 +# volumes: +# - ./logs/mysql:/var/log/mysql +# +# 2. 권한 문제 방지 +# - 컨테이너 시작 전 호스트에서 디렉토리 생성 +# mkdir -p ./logs/mysql +# chmod 777 ./logs/mysql # 또는 적절한 권한 +# +# 3. 메모리 제한 확인 +# - Docker 컨테이너에 최소 4GB RAM 할당 +# +# ======================================================================== diff --git a/config/database/postgresql/init.sql b/config/database/postgresql/init.sql new file mode 100644 index 0000000..b38e384 --- /dev/null +++ b/config/database/postgresql/init.sql @@ -0,0 +1,4 @@ +CREATE ROLE IF NOT EXISTS devadmin WITH + LOGIN + PASSWORD 'test!' + SUPERUSER; diff --git a/config/database/postgresql/pg_hba.conf b/config/database/postgresql/pg_hba.conf new file mode 100644 index 0000000..b5be892 --- /dev/null +++ b/config/database/postgresql/pg_hba.conf @@ -0,0 +1,128 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# ---------------------- +# Authentication Records +# ---------------------- +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: +# - "local" is a Unix-domain socket +# - "host" is a TCP/IP socket (encrypted or not) +# - "hostssl" is a TCP/IP socket that is SSL-encrypted +# - "hostnossl" is a TCP/IP socket that is not SSL-encrypted +# - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted +# - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, a regular expression (if it starts with a slash (/)) +# or a comma-separated list thereof. The "all" keyword does not match +# "replication". Access to replication must be enabled in a separate +# record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", a +# regular expression (if it starts with a slash (/)) or a comma-separated +# list thereof. In both the DATABASE and USER fields you can also write +# a file name prefixed with "@" to include names from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "oauth", "ldap", "radius" or +# "cert". Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# --------------- +# Include Records +# --------------- +# +# This file allows the inclusion of external files or directories holding +# more records, using the following keywords: +# +# include FILE +# include_if_exists FILE +# include_dir DIRECTORY +# +# FILE is the file name to include, and DIR is the directory name containing +# the file(s) to include. Any file in a directory will be loaded if suffixed +# with ".conf". The files of a directory are ordered by name. +# include_if_exists ignores missing files. FILE and DIRECTORY can be +# specified as a relative or an absolute path, and can be double-quoted if +# they contain spaces. +# +# ------------- +# Miscellaneous +# ------------- +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# ---------------------------------- +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# CAUTION: Configuring the system for local "trust" authentication +# allows any local user to connect as any PostgreSQL user, including +# the database superuser. If you do not trust all your local users, +# use another authentication method. + + +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all trust +# IPv4 local connections: +host all all 127.0.0.1/32 trust +# IPv6 local connections: +host all all ::1/128 trust +# Allow replication connections from localhost, by a user with the +# replication privilege. +local replication all trust +host replication all 127.0.0.1/32 trust +host replication all ::1/128 trust + +host all all all scram-sha-256 diff --git a/config/database/postgresql/postgresql.conf b/config/database/postgresql/postgresql.conf new file mode 100644 index 0000000..0ab73ce --- /dev/null +++ b/config/database/postgresql/postgresql.conf @@ -0,0 +1,766 @@ +# ----------------------------- +# PostgreSQL 18 최적화 설정 +# 하드웨어 사양: 4코어 CPU, 4GB RAM, SSD, 1GB LAN +# ----------------------------- + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +#data_directory = 'ConfigDir' +#hba_file = 'ConfigDir/pg_hba.conf' +#ident_file = 'ConfigDir/pg_ident.conf' +#external_pid_file = '' + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' +# 기본값: 'localhost' +# 변경값: '*' (모든 IP에서 접근 허용) +# 목적: 네트워크를 통한 원격 접속 허용 + +#port = 5432 + +max_connections = 100 +# 기본값: 100 +# 유지 이유: 4GB RAM 환경에서 적절한 연결 수 +# 성능: 각 연결은 약 10MB의 메모리를 사용하므로 100개 연결 = 약 1GB +# 참고: 연결 풀링(pgBouncer 등) 사용 시 더 효율적 + +#reserved_connections = 0 +#superuser_reserved_connections = 3 +#unix_socket_directories = '/var/run/postgresql' +#unix_socket_group = '' +#unix_socket_permissions = 0777 +#bonjour = off +#bonjour_name = '' + +# - TCP settings - + +tcp_keepalives_idle = 60 +# 기본값: 0 (시스템 기본값 사용, 보통 7200초) +# 변경값: 60초 +# 목적: 유휴 연결을 60초마다 체크하여 죽은 연결을 빠르게 감지 +# 성능: 네트워크 장애 시 빠른 연결 정리로 리소스 확보 + +tcp_keepalives_interval = 10 +# 기본값: 0 (시스템 기본값 사용, 보통 75초) +# 변경값: 10초 +# 목적: keepalive 재전송 간격 +# 성능: 연결 문제를 빠르게 탐지 + +tcp_keepalives_count = 3 +# 기본값: 0 (시스템 기본값 사용, 보통 9회) +# 변경값: 3회 +# 목적: 연결 실패 판정까지의 재시도 횟수 +# 성능: 60초 + (10초 × 3) = 최대 90초 내에 죽은 연결 정리 + +#tcp_user_timeout = 0 +#client_connection_check_interval = 0 + +# - Authentication - + +#authentication_timeout = 1min +#password_encryption = scram-sha-256 +#scram_iterations = 4096 + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 1GB +# 기본값: 128MB +# 변경값: 1GB (전체 RAM 4GB의 25%) +# 목적: 데이터베이스가 디스크에서 읽은 데이터를 캐시하는 메모리 +# 성능: 자주 사용되는 데이터를 메모리에 유지하여 디스크 I/O 크게 감소 +# 참고: PostgreSQL에서 가장 중요한 메모리 설정 중 하나 + +huge_pages = try +# 기본값: try +# 유지 이유: 가능한 경우 huge pages 사용으로 메모리 관리 효율 향상 +# 성능: TLB 미스 감소, 대용량 shared_buffers 사용 시 특히 효과적 + +#huge_page_size = 0 +#temp_buffers = 8MB +#max_prepared_transactions = 0 + +work_mem = 16MB +# 기본값: 4MB +# 변경값: 16MB +# 목적: 정렬, 해시 테이블 등 쿼리 작업에 사용되는 메모리 +# 성능: 복잡한 쿼리의 정렬/조인 성능 향상, 디스크 임시 파일 사용 감소 +# 주의: (max_connections × work_mem)이 너무 크면 OOM 위험 +# 계산: 100 연결 × 16MB = 최대 1.6GB (복잡한 쿼리가 동시 실행될 경우) + +#hash_mem_multiplier = 2.0 + +maintenance_work_mem = 256MB +# 기본값: 64MB +# 변경값: 256MB (RAM의 약 6%) +# 목적: VACUUM, CREATE INDEX, ALTER TABLE 등 유지보수 작업에 사용 +# 성능: 인덱스 생성 및 VACUUM 작업 속도 대폭 향상 +# 참고: 유지보수 작업은 동시에 많이 실행되지 않으므로 크게 설정 가능 + +autovacuum_work_mem = 256MB +# 기본값: -1 (maintenance_work_mem 사용) +# 변경값: 256MB +# 목적: autovacuum 전용 메모리 할당 +# 성능: autovacuum 성능 향상으로 테이블 bloat 감소 + +#logical_decoding_work_mem = 64MB + +max_stack_depth = 2MB +# 기본값: 2MB +# 유지 이유: 기본값이 대부분의 경우에 적절 + +#shared_memory_type = mmap + +dynamic_shared_memory_type = posix +# 기본값: posix (Linux에서) +# 유지 이유: Linux에서 가장 효율적인 방식 + +#min_dynamic_shared_memory = 0MB + +# - Disk - + +#temp_file_limit = -1 +#file_copy_method = copy + +# - Kernel Resources - + +#max_files_per_process = 1000 + +# - Background Writer - + +bgwriter_delay = 200ms +# 기본값: 200ms +# 유지 이유: SSD 환경에서도 기본값이 적절 + +bgwriter_lru_maxpages = 100 +# 기본값: 100 +# 유지 이유: 백그라운드 쓰기 작업의 균형 유지 + +#bgwriter_lru_multiplier = 2.0 + +bgwriter_flush_after = 512kB +# 기본값: 512kB +# 유지 이유: SSD에서 적절한 flush 크기 + +# - I/O - + +#backend_flush_after = 0 + +effective_io_concurrency = 200 +# 기본값: 1 (HDD), 16 (SSD 감지 시) +# 변경값: 200 +# 목적: SSD의 높은 IOPS를 활용한 병렬 I/O 요청 수 +# 성능: bitmap heap scan 등에서 여러 페이지를 동시에 prefetch +# 참고: SSD는 동시 I/O 처리 능력이 뛰어나므로 높게 설정 + +maintenance_io_concurrency = 200 +# 기본값: 10 +# 변경값: 200 +# 목적: VACUUM, CREATE INDEX 등 유지보수 작업의 병렬 I/O +# 성능: 유지보수 작업 속도 향상 + +#io_max_combine_limit = 128kB +#io_combine_limit = 128kB +#io_method = worker +#io_max_concurrency = -1 +#io_workers = 3 + +# - Worker Processes - + +max_worker_processes = 8 +# 기본값: 8 +# 유지 이유: 4코어 환경에서 적절 (코어 수 × 2) +# 성능: 병렬 쿼리, autovacuum 등 다양한 백그라운드 작업 처리 + +max_parallel_workers_per_gather = 2 +# 기본값: 2 +# 변경값: 2 (4코어 환경에서 적절) +# 목적: 단일 쿼리가 사용할 수 있는 최대 병렬 worker 수 +# 성능: 대용량 테이블 스캔 시 쿼리 속도 향상 +# 참고: 너무 높으면 다른 쿼리의 리소스 부족 발생 가능 + +max_parallel_maintenance_workers = 2 +# 기본값: 2 +# 유지 이유: CREATE INDEX 등 유지보수 작업의 병렬화 +# 성능: 인덱스 생성 속도 향상 + +max_parallel_workers = 4 +# 기본값: 8 +# 변경값: 4 (CPU 코어 수) +# 목적: 시스템 전체에서 동시 실행 가능한 병렬 worker 총 수 +# 성능: CPU 코어 수에 맞춰 과도한 컨텍스트 스위칭 방지 + +#parallel_leader_participation = on + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = replica +#fsync = on +#synchronous_commit = on +#wal_sync_method = fsync +#full_page_writes = on +#wal_log_hints = off + +wal_compression = lz4 +# 기본값: off +# 변경값: lz4 +# 목적: WAL 파일 압축으로 I/O 및 스토리지 사용량 감소 +# 성능: 네트워크를 통한 복제 시 대역폭 절약, 아카이빙 효율 향상 +# 참고: CPU 사용량은 약간 증가하지만 4코어 환경에서 무리 없음 + +#wal_init_zero = on +#wal_recycle = on + +wal_buffers = 16MB +# 기본값: -1 (shared_buffers의 3%, 최소 64kB, 최대 약 16MB) +# 변경값: 16MB +# 목적: WAL 데이터를 디스크에 쓰기 전 버퍼링 +# 성능: 쓰기 집약적 워크로드에서 WAL 쓰기 성능 향상 + +#wal_writer_delay = 200ms + +wal_writer_flush_after = 1MB +# 기본값: 1MB +# 유지 이유: SSD에서 적절한 flush 크기 + +#wal_skip_threshold = 2MB +#commit_delay = 0 +#commit_siblings = 5 + +# - Checkpoints - + +checkpoint_timeout = 15min +# 기본값: 5min +# 변경값: 15min +# 목적: 체크포인트 발생 간격 조정 +# 성능: 체크포인트 빈도 감소로 I/O spike 완화, 전체 성능 향상 +# 참고: 크래시 복구 시간은 약간 증가하지만 일반적으로 허용 가능 + +checkpoint_completion_target = 0.9 +# 기본값: 0.9 +# 유지 이유: 체크포인트를 시간에 걸쳐 분산하여 I/O spike 방지 +# 성능: 90%의 시간에 걸쳐 checkpoint 완료하여 부하 분산 + +#checkpoint_flush_after = 256kB +#checkpoint_warning = 30s + +max_wal_size = 2GB +# 기본값: 1GB +# 변경값: 2GB +# 목적: 체크포인트 간 생성 가능한 최대 WAL 크기 +# 성능: 쓰기 집약적 워크로드에서 체크포인트 빈도 감소 +# 참고: SSD 환경에서 더 큰 WAL 크기는 성능에 유리 + +min_wal_size = 1GB +# 기본값: 80MB +# 변경값: 1GB +# 목적: 항상 유지할 최소 WAL 크기 +# 성능: WAL 파일 재사용으로 파일 생성/삭제 오버헤드 감소 + +# - Prefetching during recovery - + +#recovery_prefetch = try +#wal_decode_buffer_size = 512kB + +# - Archiving - + +#archive_mode = off +#archive_library = '' +#archive_command = '' +#archive_timeout = 0 + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +#max_wal_senders = 10 +#max_replication_slots = 10 +#wal_keep_size = 0 +#max_slot_wal_keep_size = -1 +#idle_replication_slot_timeout = 0 +#wal_sender_timeout = 60s +#track_commit_timestamp = off + +# - Primary Server - + +#synchronous_standby_names = '' +#synchronized_standby_slots = '' + +# - Standby Servers - + +#primary_conninfo = '' +#primary_slot_name = '' +#hot_standby = on +#max_standby_archive_delay = 30s +#max_standby_streaming_delay = 30s +#wal_receiver_create_temp_slot = off +#wal_receiver_status_interval = 10s +#hot_standby_feedback = off +#wal_receiver_timeout = 60s +#wal_retrieve_retry_interval = 5s +#recovery_min_apply_delay = 0 +#sync_replication_slots = off + +# - Subscribers - + +#max_active_replication_origins = 10 +#max_logical_replication_workers = 4 +#max_sync_workers_per_subscription = 2 +#max_parallel_apply_workers_per_subscription = 2 + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_presorted_aggregate = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 + +random_page_cost = 1.1 +# 기본값: 4.0 (HDD), 1.1 (SSD 자동 감지 시) +# 변경값: 1.1 +# 목적: SSD의 랜덤 액세스 특성을 반영 +# 성능: 인덱스 스캔 선호도 증가, 쿼리 플래너의 더 나은 결정 +# 참고: HDD는 4.0, SSD는 1.1-1.5가 적절 + +#cpu_tuple_cost = 0.01 +#cpu_index_tuple_cost = 0.005 +#cpu_operator_cost = 0.0025 +#parallel_setup_cost = 1000.0 +#parallel_tuple_cost = 0.1 +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB + +effective_cache_size = 3GB +# 기본값: 4GB (시스템에 따라 다름) +# 변경값: 3GB (전체 RAM 4GB의 75%) +# 목적: OS와 PostgreSQL이 파일 캐싱에 사용 가능한 메모리 추정 +# 성능: 쿼리 플래너가 인덱스 스캔 비용을 더 정확히 계산 +# 참고: shared_buffers + OS 파일 캐시 = 약 3GB + +#jit_above_cost = 100000 +#jit_inline_above_cost = 500000 +#jit_optimize_above_cost = 500000 + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 + +# - Other Planner Options - + +default_statistics_target = 100 +# 기본값: 100 +# 유지 이유: 통계 정확도와 ANALYZE 시간의 균형 +# 성능: 쿼리 플래너의 정확한 비용 추정 +# 참고: 특정 컬럼에 대해 개별적으로 높일 수 있음 + +#constraint_exclusion = partition +#cursor_tuple_fraction = 0.1 +#from_collapse_limit = 8 +#jit = on +#join_collapse_limit = 8 +#plan_cache_mode = auto +#recursive_worktable_factor = 10.0 + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# 로깅 설정은 기본값 유지 (사용자 요청사항) + +#log_destination = 'stderr' +#logging_collector = off +#log_directory = 'log' +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' +#log_file_mode = 0600 +#log_rotation_age = 1d +#log_rotation_size = 10MB +#log_truncate_on_rotation = off +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on +#event_source = 'PostgreSQL' +#log_min_messages = warning +#log_min_error_statement = error +#log_min_duration_statement = -1 +#log_min_duration_sample = -1 +#log_statement_sample_rate = 1.0 +#log_transaction_sample_rate = 0.0 +#log_startup_progress_interval = 10s +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_autovacuum_min_duration = 10min +#log_checkpoints = on +#log_connections = '' +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default +#log_hostname = off +#log_line_prefix = '%m [%p] ' +#log_lock_waits = off +#log_lock_failures = off +#log_recovery_conflict_waits = off +#log_parameter_max_length = -1 +#log_parameter_max_length_on_error = 0 +#log_statement = 'none' +#log_replication_commands = off +#log_temp_files = -1 +log_timezone = 'Etc/UTC' +#cluster_name = '' +#update_process_title = on + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +#track_activities = on +#track_activity_query_size = 1024 +#track_counts = on +#track_cost_delay_timing = off + +track_io_timing = on +# 기본값: off +# 변경값: on +# 목적: I/O 작업의 시간 추적으로 성능 병목 지점 파악 +# 성능: EXPLAIN ANALYZE 등으로 I/O 병목 진단 가능 +# 참고: 약간의 오버헤드 있지만 성능 튜닝에 매우 유용 + +#track_wal_io_timing = off +#track_functions = none +#stats_fetch_consistency = cache + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + +#------------------------------------------------------------------------------ +# VACUUMING +#------------------------------------------------------------------------------ + +# - Automatic Vacuuming - + +#autovacuum = on + +autovacuum_worker_slots = 8 +# 기본값: 16 (PostgreSQL 18 신규 파라미터) +# 변경값: 8 +# 목적: autovacuum worker 슬롯 수 (동시 실행 가능한 worker 총량) +# 성능: 4코어 환경에서 적절한 슬롯 수로 CPU 리소스 균형 +# 참고: max_workers와 별개로 동적으로 worker 생성 가능 + +autovacuum_max_workers = 3 +# 기본값: 3 +# 변경값: 3 +# 목적: 동시에 실행 가능한 autovacuum worker 프로세스 수 +# 성능: 여러 테이블을 동시에 vacuum 처리 +# 참고: 4코어 환경에서 적절한 수준 + +autovacuum_naptime = 30s +# 기본값: 1min +# 변경값: 30s +# 목적: autovacuum이 데이터베이스를 체크하는 주기 +# 성능: 더 빈번한 체크로 테이블 bloat 감소, 성능 유지 +# 참고: 쓰기가 많은 환경에서 효과적 + +#autovacuum_vacuum_threshold = 50 +#autovacuum_vacuum_insert_threshold = 1000 +#autovacuum_analyze_threshold = 50 + +autovacuum_vacuum_scale_factor = 0.1 +# 기본값: 0.2 (테이블의 20%) +# 변경값: 0.1 (테이블의 10%) +# 목적: vacuum 실행 trigger 조건 (dead tuple 비율) +# 성능: 더 자주 vacuum 실행으로 테이블 bloat 최소화 +# 참고: 대형 테이블에서 특히 효과적 + +#autovacuum_vacuum_insert_scale_factor = 0.2 + +autovacuum_analyze_scale_factor = 0.05 +# 기본값: 0.1 (테이블의 10%) +# 변경값: 0.05 (테이블의 5%) +# 목적: analyze 실행 trigger 조건 +# 성능: 더 빈번한 통계 업데이트로 쿼리 플래너의 정확도 향상 + +#autovacuum_vacuum_max_threshold = 100000000 +#autovacuum_freeze_max_age = 200000000 +#autovacuum_multixact_freeze_max_age = 400000000 +#autovacuum_vacuum_cost_delay = 2ms +#autovacuum_vacuum_cost_limit = -1 + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 +#vacuum_cost_page_hit = 1 +#vacuum_cost_page_miss = 2 +#vacuum_cost_page_dirty = 20 +#vacuum_cost_limit = 200 + +# - Default Behavior - + +#vacuum_truncate = on + +# - Freezing - + +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#vacuum_max_eager_freeze_failure_rate = 0.03 + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice +#search_path = '"$user", public' +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' +#default_toast_compression = 'pglz' +#temp_tablespaces = '' +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' + +statement_timeout = 30000 +# 기본값: 0 (무제한) +# 변경값: 30000ms (30초) +# 목적: 장시간 실행되는 쿼리 자동 종료 +# 성능: 문제있는 쿼리로 인한 리소스 점유 방지 +# 참고: 애플리케이션 특성에 따라 조정 필요, 0으로 비활성화 가능 + +#transaction_timeout = 0 + +lock_timeout = 5000 +# 기본값: 0 (무제한) +# 변경값: 5000ms (5초) +# 목적: 락 대기 시간 제한 +# 성능: 데드락 상황 빠른 감지, 애플리케이션 응답성 향상 + +#idle_in_transaction_session_timeout = 0 + +idle_session_timeout = 300000 +# 기본값: 0 (무제한) +# 변경값: 300000ms (5분) +# 목적: 유휴 세션 자동 종료 +# 성능: 불필요한 연결로 인한 리소스 낭비 방지 +# 참고: 연결 풀 사용 시 조정 필요 + +#bytea_output = 'hex' +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_pending_list_limit = 4MB +#createrole_self_grant = '' +#event_triggers = on + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'Etc/UTC' +#timezone_abbreviations = 'Default' +#extra_float_digits = 1 +#client_encoding = sql_ascii + +lc_messages = 'en_US.utf8' +lc_monetary = 'en_US.utf8' +lc_numeric = 'en_US.utf8' +lc_time = 'en_US.utf8' +#icu_validation_level = warning + +default_text_search_config = 'pg_catalog.english' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +#shared_preload_libraries = '' +#jit_provider = 'llvmjit' + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#extension_control_path = '$system' +#gin_fuzzy_search_limit = 0 + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +deadlock_timeout = 1s +# 기본값: 1s +# 유지 이유: 데드락 감지를 위한 적절한 대기 시간 + +#max_locks_per_transaction = 64 +#max_pred_locks_per_transaction = 64 +#max_pred_locks_per_relation = -2 +#max_pred_locks_per_page = 2 + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +#array_nulls = on +#backslash_quote = safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on +#transform_null_equals = off +#allow_alter_system = on + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off +#restart_after_crash = on +#data_sync_retry = off +#recovery_init_sync_method = fsync + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +#include_dir = '...' +#include_if_exists = '...' +#include = '...' + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# ============================================================================= +# 주요 변경사항 요약 +# ============================================================================= +# +# 1. 메모리 설정 (4GB RAM 기준) +# - shared_buffers: 128MB → 1GB (25% of RAM) +# - effective_cache_size: 4GB → 3GB (75% of RAM) +# - work_mem: 4MB → 16MB (쿼리 성능 향상) +# - maintenance_work_mem: 64MB → 256MB (유지보수 작업 가속) +# +# 2. 연결 관리 +# - tcp_keepalives 설정: 죽은 연결 빠른 감지 (90초 이내) +# - statement_timeout: 30초 (장시간 쿼리 방지) +# - lock_timeout: 5초 (락 대기 제한) +# - idle_session_timeout: 5분 (유휴 세션 정리) +# +# 3. 병렬 처리 (4코어 최적화) +# - max_parallel_workers: 4 (CPU 코어 수) +# - max_parallel_workers_per_gather: 2 +# - max_worker_processes: 8 +# +# 4. I/O 최적화 (SSD 특화) +# - random_page_cost: 4.0 → 1.1 +# - effective_io_concurrency: 1 → 200 +# - maintenance_io_concurrency: 10 → 200 +# +# 5. WAL 및 체크포인트 +# - wal_compression: off → lz4 (I/O 감소) +# - wal_buffers: 자동 → 16MB +# - checkpoint_timeout: 5min → 15min +# - max_wal_size: 1GB → 2GB +# - min_wal_size: 80MB → 1GB +# +# 6. Autovacuum 튜닝 +# - autovacuum_worker_slots: 16 → 8 +# - autovacuum_naptime: 1min → 30s (더 빈번한 체크) +# - autovacuum_vacuum_scale_factor: 0.2 → 0.1 (더 자주 실행) +# - autovacuum_analyze_scale_factor: 0.1 → 0.05 +# +# 7. 모니터링 +# - track_io_timing: off → on (I/O 성능 진단) +# +# ============================================================================= +# 예상 성능 향상 +# ============================================================================= +# +# - 읽기 성능: 30-50% 향상 (shared_buffers, effective_cache_size) +# - 쓰기 성능: 20-40% 향상 (WAL 설정, checkpoint 최적화) +# - 복잡한 쿼리: 40-100% 향상 (work_mem, 병렬 처리) +# - 유지보수 작업: 100-300% 향상 (maintenance_work_mem, I/O 동시성) +# - 전체 처리량: 25-50% 향상 (모든 최적화의 시너지) +# +# ============================================================================= +# 적용 방법 +# ============================================================================= +# +# 1. 이 파일을 postgresql.conf로 저장 (또는 기존 파일 백업 후 교체) +# 2. PostgreSQL 재시작: +# sudo systemctl restart postgresql +# 또는 +# sudo pg_ctl restart -D /var/lib/postgresql/data +# +# 3. 설정 확인: +# SHOW shared_buffers; +# SHOW effective_cache_size; +# SHOW work_mem; +# +# 4. 모니터링 (첫 며칠간): +# - 메모리 사용량: free -h, htop +# - 체크포인트 빈도: 로그 확인 +# - 쿼리 성능: pg_stat_statements 활용 +# +# ============================================================================= \ No newline at end of file diff --git a/config/web-server/conf.d/LICENSE b/config/web-server/conf.d/LICENSE new file mode 100644 index 0000000..e7b2769 --- /dev/null +++ b/config/web-server/conf.d/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 devspoons + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/config/web-server/conf.d/backup/backend.conf b/config/web-server/conf.d/backup/backend.conf new file mode 100644 index 0000000..d50bdb8 --- /dev/null +++ b/config/web-server/conf.d/backup/backend.conf @@ -0,0 +1,104 @@ +server { + listen 80; + server_name demo.castad.net; + + if ($bad_bot) { + return 403; + } + + access_log /log/nginx/demo.castad.net.gunicorn_access.log main; + error_log /log/nginx/demo.castad.net.gunicorn_error.log warn; + + # if ($host !~* ^(domain\.com|www\.domain\.com)$) { + # return 444; + # } + + # 프론트엔드 정적 파일 루트 + root /www/o2o-castad-frontend/dist; + index index.html; + + # Django media + location /media { + autoindex off; + gzip_static on; + expires max; + #alias /www/django_sample/media; + alias /www/o2o-castad-backend/media; # your Django project's media files - amend as required + #include /etc/nginx/mime.types; + } + + location /static { + autoindex off; + gzip_static on; + expires max; + #alias /www/django_sample/static; + # normally static folder is named as /static + alias /www/o2o-castad-backend/static; # your Django project's static files - amend as required + #include /etc/nginx/mime.types; + } + + location /api/ { + autoindex off; + + # upstream 연결 풀 사용 (nginx.conf에서 정의) + proxy_pass http://uvicorn-app:8000/; + + # HTTP/1.1 사용 (keepalive 연결 필수) + proxy_http_version 1.1; + + # WebSocket 지원 및 HTTP keepalive 동시 지원 + # - WebSocket: Upgrade 헤더 전달, Connection: upgrade + # - 일반 HTTP: Connection: "" (keepalive 유지) + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + + # 프록시 헤더 설정 + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # 프록시 캐시 우회 (WebSocket 및 동적 콘텐츠) + proxy_cache_bypass $http_upgrade; + + # 타임아웃 설정 (파일 업로드, AI 생성 등 오래 걸리는 작업용) + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + + # 파일 업로드 설정 + client_max_body_size 100M; + proxy_request_buffering off; + } + + # Allow Lets Encrypt Domain Validation Program + location ^~ /.well-known/acme-challenge/ { + allow all; + root /www/o2o-castad-backend; + } + + # Block dot file (.htaccess .htpasswd .svn .git .env and so on.) + location ~ /\. { + deny all; + } + + # Block (log file, binary, certificate, shell script, sql dump file) access. + location ~* \.(log|binary|pem|enc|crt|conf|cnf|sql|sh|key|yml|lock)$ { + deny all; + } + + # Block access + location ~* (composer\.json|composer\.lock|composer\.phar|contributing\.md|license\.txt|readme\.rst|readme\.md|readme\.txt|copyright|artisan|gulpfile\.js|package\.json|phpunit\.xml|access_log|error_log|gruntfile\.js)$ { + deny all; + } + + location = /favicon.ico { + log_not_found off; + access_log off; + } + + location = /robots.txt { + log_not_found off; + access_log off; + } +} diff --git a/config/web-server/conf.d/bad_bot.conf b/config/web-server/conf.d/bad_bot.conf new file mode 100644 index 0000000..d9eeb68 --- /dev/null +++ b/config/web-server/conf.d/bad_bot.conf @@ -0,0 +1,563 @@ +map $http_user_agent $bad_bot { + default 0; + ~*360Spider 1; + ~*360Spider 1; + ~*80legs 1; + ~*Abonti 1; + ~*Aboundex 1; + ~*AcoonBot 1; + ~*Acunetix 1; + ~*adbeat_bot 1; + ~*AddThis.com 1; + ~*adidxbot 1; + ~*ADmantX 1; + ~*AhrefsBot 1; + ~*AIBOT 1; + ~*aiHitBot 1; + ~*Alexibot 1; + ~*Alligator 1; + ~*AllSubmitter 1; + ~*AngloINFO 1; + ~*Antelope 1; + ~*Apexoo 1; + ~*asterias 1; + ~*attach 1; + ~*BackDoorBot 1; + ~*BackStreet 1; + ~*BackWeb 1; + ~*Badass 1; + ~*Baid 1; + ~*Bandit 1; + ~*BatchFTP 1; + ~*BBBike 1; + ~*BeetleBot 1; + ~*Bigfoot 1; + ~*billigerbot 1; + ~*binlar 1; + ~*bitlybot 1; + ~*Black.Hole 1; + ~*BlackWidow 1; + ~*BLEXBot 1; + ~*Blow 1; + ~*BlowFish 1; + ~*BLP_bbot 1; + ~*BoardReader 1; + ~*Bolt\ 0 1; + ~*BOT\ for\ JCE 1; + ~*Bot\ mailto\:craftbot@yahoo\.com 1; + ~*BotALot 1; + ~*Buddy 1; + ~*BuiltBotTough 1; + ~*Bullseye 1; + ~*BunnySlippers 1; + ~*casper 1; + ~*CazoodleBot 1; + ~*CCBot 1; + ~*Cegbfeieh 1; + ~*checkprivacy 1; + ~*CheeseBot 1; + ~*CherryPicker 1; + ~*ChinaClaw 1; + ~*chromeframe 1; + ~*Clerkbot 1; + ~*Cliqzbot 1; + ~*clshttp 1; + ~*Cogentbot 1; + ~*cognitiveseo 1; + ~*Collector 1; + ~*CommonCrawler 1; + ~*comodo 1; + ~*Copier 1; + ~*CopyRightCheck 1; + ~*cosmos 1; + ~*CPython 1; + ~*crawler4j 1; + ~*Crawlera 1; + ~*CRAZYWEBCRAWLER 1; + ~*Crescent 1; + ~*CSHttp 1; + ~*Curious 1; + # ~*Curl 1; # 개발/테스트용으로 허용 + ~*Custo 1; + ~*CWS_proxy 1; + ~*Default\ Browser\ 0 1; + ~*Demon 1; + ~*DeuSu 1; + ~*Devil 1; + ~*diavol 1; + ~*DigExt 1; + ~*Digincore 1; + ~*DIIbot 1; + ~*DISCo 1; + ~*discobot 1; + ~*DittoSpyder 1; + ~*DoCoMo 1; + ~*DotBot 1; + ~*Download.Demon 1; + ~*Download.Devil 1; + ~*Download.Wonder 1; + ~*Download\ Demo 1; + ~*dragonfly 1; + ~*Drip 1; + ~*DTS.Agent 1; + ~*EasouSpider 1; + ~*EasyDL 1; + ~*ebingbong 1; + ~*eCatch 1; + ~*ecxi 1; + ~*EirGrabber 1; + ~*Elmer 1; + ~*EmailCollector 1; + ~*EmailSiphon 1; + ~*EmailWolf 1; + ~*EroCrawler 1; + ~*Exabot 1; + ~*ExaleadCloudView 1; + ~*ExpertSearch 1; + ~*ExpertSearchSpider 1; + ~*Express 1; + ~*Express\ WebPictures 1; + ~*extract 1; + ~*Extractor 1; + ~*ExtractorPro 1; + ~*EyeNetIE 1; + ~*Ezooms 1; + ~*F2S 1; + ~*FastSeek 1; + ~*feedfinder 1; + ~*FeedlyBot 1; + ~*FHscan 1; + ~*finbot 1; + ~*Flamingo_SearchEngine 1; + ~*FlappyBot 1; + ~*FlashGet 1; + ~*flicky 1; + ~*Flipboard 1; + ~*FlipboardProxy 1; + ~*flunky 1; + ~*Foobot 1; + ~*FrontPage 1; + ~*g00g1e 1; + ~*GalaxyBot 1; + ~*genieo 1; + ~*Genieo 1; + ~*GetRight 1; + ~*GetWeb\! 1; + ~*GigablastOpenSource 1; + ~*Go\-Ahead\-Got\-It 1; + ~*Go\!Zilla 1; + ~*gotit 1; + ~*GozaikBot 1; + ~*grab 1; + ~*Grabber 1; + ~*GrabNet 1; + ~*Grafula 1; + ~*GrapeshotCrawler 1; + ~*GT\:\:WWW 1; + ~*GTB5 1; + ~*Guzzle 1; + ~*harvest 1; + ~*Harvest 1; + ~*HEADMasterSEO 1; + ~*heritrix 1; + ~*hloader 1; + ~*HMView 1; + ~*HomePageBot 1; + ~*htmlparser 1; + ~*HTTP\:\:Lite 1; + ~*httrack 1; + ~*HTTrack 1; + ~*HubSpot 1; + ~*humanlinks 1; + ~*ia_archiver 1; + ~*icarus6 1; + ~*id\-search 1; + ~*IDBot 1; + ~*IlseBot 1; + ~*Image.Stripper 1; + ~*Image.Sucker 1; + ~*Image\ Stripper 1; + ~*Image\ Sucker 1; + ~*imagefetch 1; + ~*Indigonet 1; + ~*Indy\ Library 1; + ~*InfoNaviRobot 1; + ~*InfoTekies 1; + ~*integromedb 1; + ~*Intelliseek 1; + ~*InterGET 1; + ~*Internet\ Ninja 1; + ~*InternetSeer\.com 1; + ~*Iria 1; + ~*IRLbot 1; + ~*ISC\ Systems\ iRc\ Search\ 2\.1 1; + ~*jakarta 1; + ~*Jakarta 1; + ~*Java 1; + ~*JennyBot 1; + ~*JetCar 1; + ~*JikeSpider 1; + ~*JobdiggerSpider 1; + ~*JOC 1; + ~*JOC\ Web\ Spider 1; + ~*Jooblebot 1; + ~*JustView 1; + ~*Jyxobot 1; + ~*kanagawa 1; + ~*Kenjin.Spider 1; + ~*Keyword.Density 1; + ~*KINGSpider 1; + ~*kmccrew 1; + ~*larbin 1; + ~*LeechFTP 1; + ~*LeechGet 1; + ~*LexiBot 1; + ~*lftp 1; + ~*libWeb 1; + ~*libwww 1; + ~*libwww-perl 1; + ~*likse 1; + ~*Lingewoud 1; + ~*LinkChecker 1; + ~*linkdexbot 1; + ~*LinkextractorPro 1; + ~*LinkScan 1; + ~*LinksCrawler 1; + ~*LinksManager\.com_bot 1; + ~*linkwalker 1; + ~*LinkWalker 1; + ~*LinqiaRSSBot 1; + ~*LivelapBot 1; + ~*LNSpiderguy 1; + ~*ltx71 1; + ~*LubbersBot 1; + ~*lwp\-trivial 1; + ~*Mag-Net 1; + ~*Magnet 1; + ~*Mail.RU_Bot 1; + ~*majestic12 1; + ~*MarkWatch 1; + ~*Mass.Downloader 1; + ~*Mass\ Downloader 1; + ~*masscan 1; + ~*Mata.Hari 1; + ~*maverick 1; + ~*Maxthon$ 1; + ~*Mediatoolkitbot 1; + ~*megaindex 1; + ~*MegaIndex 1; + ~*Memo 1; + ~*MetaURI 1; + ~*MFC_Tear_Sample 1; + ~*Microsoft\ URL\ Control 1; + ~*microsoft\.url 1; + ~*MIDown\ tool 1; + ~*MIIxpc 1; + ~*miner 1; + ~*Missigua\ Locator 1; + ~*Mister\ PiX 1; + ~*MJ12bot 1; + ~*Mozilla.*Indy 1; + ~*Mozilla.*NEWT 1; + ~*MSFrontPage 1; + ~*MSIECrawler 1; + ~*msnbot 1; + ~*NAMEPROTECT 1; + ~*Navroad 1; + ~*NearSite 1; + ~*Net\ Vampire 1; + ~*NetAnts 1; + ~*Netcraft 1; + ~*netEstate 1; + ~*NetMechanic 1; + ~*NetSpider 1; + ~*NetZIP 1; + ~*NextGenSearchBot 1; + ~*NICErsPRO 1; + ~*niki\-bot 1; + ~*NimbleCrawler 1; + ~*Nimbostratus\-Bot 1; + ~*Ninja 1; + ~*nmap 1; + ~*Nmap 1; + ~*NPbot 1; + ~*nutch 1; + ~*Octopus 1; + ~*Offline\.Explorer 1; + ~*Offline\.Navigator 1; + ~*Offline\ Explorer 1; + ~*Offline\ Navigator 1; + ~*Openfind 1; + ~*OpenindexSpider 1; + ~*OpenLinkProfiler 1; + ~*OpenWebSpider 1; + ~*OrangeBot 1; + ~*OutfoxBot 1; + ~*Owlin 1; + ~*PageGrabber 1; + ~*PagesInventory 1; + ~*panopta 1; + ~*panscient\.com 1; + ~*Papa\ Foto 1; + ~*pavuk 1; + ~*pcBrowser 1; + ~*PECL\:\:HTTP 1; + ~*PeoplePal 1; + ~*Photon 1; + ~*PHPCrawl 1; + ~*Pixray 1; + ~*planetwork 1; + ~*PleaseCrawl 1; + ~*PNAMAIN\.EXE 1; + ~*Pockey 1; + ~*PodcastPartyBot 1; + ~*prijsbest 1; + ~*probethenet 1; + ~*ProPowerBot 1; + ~*ProWebWalker 1; + ~*proximic 1; + ~*psbot 1; + ~*Pump 1; + ~*purebot 1; + ~*pycurl 1; + ~*python\-requests 1; + ~*QueryN\.Metasearch 1; + ~*QuerySeekerSpider 1; + ~*R6_CommentReader 1; + ~*R6_FeedFetcher 1; + ~*RealDownload 1; + ~*Reaper 1; + ~*Recorder 1; + ~*ReGet 1; + ~*RepoMonkey 1; + ~*Riddler 1; + ~*Ripper 1; + ~*Rippers\ 0 1; + ~*RMA 1; + ~*rogerbot 1; + ~*RSSingBot 1; + ~*rv\:1\.9\.1 1; + ~*RyzeCrawler 1; + ~*SafeSearch 1; + ~*SBIder 1; + ~*scanbot 1; + ~*Scrapy 1; + ~*Screaming 1; + ~*SeaMonkey$ 1; + ~*search_robot 1; + ~*SearchmetricsBot 1; + ~*Semrush 1; + ~*SemrushBot 1; + ~*semrush\.com 1; + ~*SemrushBot-BA 1; + ~*SentiBot 1; + ~*SEOkicks 1; + ~*SEOkicks\-Robot 1; + ~*seoscanners 1; + ~*SeznamBot 1; + ~*ShowyouBot 1; + ~*SightupBot 1; + ~*Siphon 1; + ~*SISTRIX 1; + ~*sitecheck\.internetseer\.com 1; + ~*siteexplorer\.info 1; + ~*Siteimprove 1; + ~*SiteSnagger 1; + ~*SiteSucker 1; + ~*skygrid 1; + ~*Slackbot 1; + ~*Slurp 1; + ~*SlySearch 1; + ~*SmartDownload 1; + ~*Snake 1; + ~*Snapbot 1; + ~*Snoopy 1; + ~*sogou 1; + ~*Sogou 1; + ~*Sosospider 1; + ~*SpaceBison 1; + ~*SpankBot 1; + ~*spanner 1; + ~*spaumbot 1; + ~*spbot 1; + ~*Spinn4r 1; + ~*Sqworm 1; + ~*Steeler 1; + ~*Stripper 1; + ~*sucker 1; + ~*Sucker 1; + ~*SuperBot 1; + ~*Superfeedr 1; + ~*SuperHTTP 1; + ~*SurdotlyBot 1; + ~*Surfbot 1; + ~*suzuran 1; + ~*Szukacz 1; + ~*tAkeOut 1; + ~*Teleport 1; + ~*Teleport\ Pro 1; + ~*Telesoft 1; + ~*The\.Intraformant 1; + ~*TheNomad 1; + ~*TightTwatBot 1; + ~*TinEye 1; + ~*TinEye\-bot 1; + ~*Titan 1; + ~*Toata\ dragostea\ mea\ pentru\ diavola 1; + ~*Toplistbot 1; + ~*trendictionbot 1; + ~*trovitBot 1; + ~*True_Robot 1; + ~*turingos 1; + ~*turnit 1; + ~*TurnitinBot 1; + ~*Twitterbot 1; + ~*URI\:\:Fetch 1; + ~*urllib 1; + ~*URLy\.Warning 1; + ~*Vacuum 1; + ~*Vagabondo 1; + ~*VCI 1; + ~*VidibleScraper 1; + ~*vikspider 1; + ~*VoidEYE 1; + ~*VoilaBot 1; + ~*WallpapersHD 1; + ~*WBSearchBot 1; + ~*Web.Image.Collector 1; + ~*Web\ Image\ Collector 1; + ~*Web\ Sucker 1; + ~*webalta 1; + ~*WebAuto 1; + ~*WebBandit 1; + ~*WebCollage 1; + ~*WebCopier 1; + ~*WebEnhancer 1; + ~*WebFetch 1; + ~*WebFuck 1; + ~*WebGo\ IS 1; + ~*WebLeacher 1; + ~*WebmasterWorldForumBot 1; + ~*WebPix 1; + ~*WebReaper 1; + ~*WebSauger 1; + ~*WebShag 1; + ~*Website\.eXtractor 1; + ~*Website\ eXtractor 1; + ~*Website\ Quester 1; + ~*Webster 1; + ~*WebStripper 1; + ~*WebSucker 1; + ~*WebWhacker 1; + ~*WebZIP 1; + ~*Wells\ Search\ II 1; + ~*WEP\ Search 1; + ~*WeSEE 1; + ~*Wget 1; + ~*Whack 1; + ~*Whacker 1; + ~*Widow 1; + ~*WinHTTrack 1; + ~*WinInet 1; + ~*WISENutbot 1; + ~*woobot 1; + ~*woopingbot 1; + ~*worldwebheritage.org 1; + ~*Wotbox 1; + ~*WPScan 1; + ~*WWW\-Collector\-E 1; + ~*WWW\-Mechanize 1; + ~*WWWOFFLE 1; + ~*Xaldon 1; + ~*Xaldon\ WebSpider 1; + ~*Xenu 1; + ~*XoviBot 1; + ~*yacybot 1; + ~*YisouSpider 1; + ~*Zade 1; + ~*zermelo 1; + ~*Zeus 1; + ~*zh\-CN 1; + ~*ZmEu 1; + ~*ZumBot 1; + ~*Zyborg 1; + ~*ZyBorg 1; + ~*Yandex 1; + ~*YandexBot 1; + ~*Baiduspider 1; + ~*BaiduSpider 1; + ~*Slackbot 1; +} + +map $http_user_agent $bad_bot1 { +default 0; +~*^Lynx 0; # Let Lynx go through +libwww-perl 1; +~*(?i)(80legs|360Spider|Aboundex|AhrefsBot|Daumoa|DataForSeoBot|DaumBot|applebot|BLEXBot|serpstatbot|MediaMathbot|Abonti|Acunetix|^AIBOT|^Alexibot|Alligator|AllSubmitter|Apexoo|^asterias|^attach|^BackDoorBot|^BackStreet|^BackWeb|Badass|Bandit|petalbot|Baid|Baiduspider|^BatchFTP|^Bigfoot|^Black.Hole|^BlackWidow|BlackWidow|^BlowFish|Blow|^BotALot|Buddy|^BuiltBotTough|^Bullseye|^BunnySlippers|BBBike|^Cegbfeieh|^CheeseBot|^CherryPicker|^ChinaClaw|^Cogentbot|CPython|Collector|cognitiveseo|Copier|^CopyRightCheck|^cosmos|^Crescent|CSHttp|^Custo|^Demon|^Devil|^DISCo|^DIIbot|discobot|^DittoSpyder|Download.Demon|Download.Devil|Download.Wonder|^dragonfly|^Drip|^eCatch|^EasyDL|^ebingbong|^EirGrabber|^EmailCollector|^EmailSiphon|^EmailWolf|^EroCrawler|^Exabot|^Express|Extractor|^EyeNetIE|FHscan|^FHscan|^flunky|^Foobot|^FrontPage|GalaxyBot|^gotit|Grabber|^GrabNet|^Grafula|^Harvest|^HEADMasterSEO|^hloader|^HMView|^HTTrack|httrack|HTTrack|htmlparser|^humanlinks|^IlseBot|Image.Stripper|Image.Sucker|imagefetch|^InfoNaviRobot|^InfoTekies|^Intelliseek|^InterGET|^Iria|^Jakarta|^JennyBot|^JetCar|JikeSpider|^JOC|^JustView|^Jyxobot|^Kenjin.Spider|^Keyword.Density|libwww|^larbin|LeechFTP|LeechGet|^LexiBot|^lftp|^libWeb|^likse|^LinkextractorPro|^LinkScan|^LNSpiderguy|^LinkWalker|msnbot|MSIECrawler|MJ12bot|MegaIndex|^Magnet|^Mag-Net|^MarkWatch|Mass.Downloader|masscan|^Mata.Hari|^Memo|^MIIxpc|^NAMEPROTECT|^Navroad|^NearSite|^NetAnts|^Netcraft|^NetMechanic|^NetSpider|^NetZIP|^NextGenSearchBot|^NICErsPRO|^niki-bot|^NimbleCrawler|^Nimbostratus-Bot|^Ninja|^Nmap|nmap|^NPbot|Offline.Explorer|Offline.Navigator|OpenLinkProfiler|^Octopus|^Openfind|^OutfoxBot|Pixray|probethenet|proximic|^PageGrabber|^pavuk|^pcBrowser|^Pockey|^ProPowerBot|^ProWebWalker|^psbot|^Pump|python-requests|^QueryN.Metasearch|^RealDownload|Reaper|^Reaper|^Ripper|Ripper|Recorder|^ReGet|^RepoMonkey|^RMA|scanbot|SEOkicks-Robot|seoscanners|^Stripper|^Sucker|Siphon|Siteimprove|^SiteSnagger|SiteSucker|^SlySearch|^SmartDownload|^Snake|^Snapbot|^Snoopy|Sosospider|^sogou|spbot|^SpaceBison|^spanner|^SpankBot|Spinn4r|^Sqworm|Sqworm|Stripper|Sucker|^SuperBot|SuperHTTP|^SuperHTTP|^Surfbot|^suzuran|^Szukacz|^tAkeOut|^Teleport|^Telesoft|^TurnitinBot|^The.Intraformant|^TheNomad|^TightTwatBot|^Titan|^True_Robot|^turingos|^TurnitinBot|^URLy.Warning|^Vacuum|^VCI|VidibleScraper|^VoidEYE|^WebAuto|^WebBandit|^WebCopier|^WebEnhancer|^WebFetch|^Web.Image.Collector|^WebLeacher|^WebmasterWorldForumBot|WebPix|^WebReaper|^WebSauger|Website.eXtractor|^Webster|WebShag|^WebStripper|WebSucker|^WebWhacker|^WebZIP|Whack|Whacker|^Widow|Widow|WinHTTrack|^WISENutbot|WWWOFFLE|^WWWOFFLE|^WWW-Collector-E|^Xaldon|^Xenu|^Zade|^Zeus|ZmEu|^Zyborg|SemrushBot|^WebFuck|^MJ12bot|^majestic12|^WallpapersHD) 1; +} + +## Add here all referrers that are to blocked. +map $http_referer $bad_referer { +default 0; +~(?i)(adcash|advair|allegra|ambien|amoxicillin|adult|anal|asshole|babes|baccarat|betting|bithack|blackjack|cash|casino|celeb|cheap|cialis|craps|credit|click|cunt|deal|debt|drug|diamond|effexor|equity|faxo|finance|fisting|forsale|gambling|gaysex|girl|hardcore|hold-em|holdem|iconsurf|ilovevitaly|insurance|interest|internetsupervision|jewelry|keno|levitra|lipitor|loan|loans|love|makemoneyonline|make-money-online|meds|money|mortgage|myftpupload|nudit|omaha|organic|paxil|pharmacy|pharmacies|phentermine|pheromone|pills|piss|poker|porn|poweroversoftware|refinance|replica|rimming|roulette|screentoolkit|seoexperimenty|sex|snuff|scout|seventwentyfour|slot|slots|syntryx|teen|texas|t0phackteam|tournament|tramadol|tramidol|valtrex|vvakhrin-ws1|viagra|vicodin|webcam|xanax|xnxx|xxxrus|zanax|zippo|zoloft) 1; +} + +## Add here all bad referer domains to be blocked - broken up into sections +## Alphabetical A - E (incl numbers) +map $http_referer $bad_urls1 { +default 0; +~(?i)(^http://(www\.)?38ha(-|.).*$|^http://(www\.)?4free(-|.).*$|^http://(www\.)?4hs8(-|.).*$|^http://(www\.)?4t(-|.).*$|^http://(www\.)?4u(-|.).*$|^http://(www\.)?6q(-|.).*$|^http://(www\.)?7makemoneyonline(-|.).*$|^http://(www\.)?8gold(-|.).*$|^http://(www\.)?911(-|.).*$|^http://(www\.)?adcash(-|.).*$|^http://(www\.)?.*(-|.)?adult(-|.).*$|^http://(www\.)?.*(-|.)?acunetix-referrer(-|.).*$|^http://(www\.)?abalone(-|.).*$|^http://(www\.)?adminshop(-|.).*$|^http://(www\.)?adultactioncam(-|.).*$|^http://(www\.)?aizzo(-|.).*$|^http://(www\.)?alphacarolinas(-|.).*$|^http://(www\.)?amateur(-|.).*$|^http://(www\.)?amateurxpass(-|.).*$|^http://(www\.)?.*(-|.)?anal(-|.).*$|^http://(www\.)?ansar-u-deen(-|.).*$|^http://(www\.)?atelebanon(-|.).*$|^http://(www\.)?beastiality(-|.).*$|^http://(www\.)?bestiality(-|.).*$|^http://(www\.)?belize(-|.).*$|^http://(www\.)?best-deals(-|.).*$|^http://(www\.)?bithack(-|.).*$|^http://(www\.)?blogincome(-|.).*$|^http://(www\.)?bontril(-|.).*$|^http://(www\.)?bruce-holdeman(-|.).*$|^http://(www\.)?.*(-|.)?blow.?job(-|.).*$|^http://(www\.)?buttons-for-website(-|.).*$|^http://(www\.)?ca-america(-|.).*$|^http://(www\.)?chatt-net(-|.).*$|^http://(www\.)?cenokos(-|.).*$|^http://(www\.)?cenoval(-|.).*$|^http://(www\.)?cityadspix(-|.).*$|^http://(www\.)?commerce(-|.).*$|^http://(www\.)?condo(-|.).*$|^http://(www\.)?conjuratia(-|.).*$|^http://(www\.)?consolidate(-|.).*$|^http://(www\.)?coswap(-|.).*$|^http://(www\.)?crescentarian(-|.).*$|^http://(www\.)?crepesuzette(-|.).*$|^http://(www\.)?darodar(-|.).*$|^http://(www\.)?dating(-|.).*$|^http://(www\.)?devaddict(-|.).*$|^http://(www\.)?discount(-|.).*$|^http://(www\.)?doobu(-|.).*$|^http://(www\.)?domainsatcost(-|.).*$|^http://(www\.)?econom.co(-|.).*$|^http://(www\.)?edakgfvwql(-|.).*$|^http://(www\.)?.*(-|.)?sex(-|.).*$|^http://(www\.)?e-site(-|.).*$|^http://(www\.)?egygift(-|.).*$|^http://(www\.)?empathica(-|.).*$|^http://(www\.)?empirepoker(-|.).*$|^http://(www\.)?e-poker-2005(-|.).*$|^http://(www\.)?escal8(-|.).*$|^http://(www\.)?eurip(-|.).*$|^http://(www\.)?exitq(-|.).*$|^http://(www\.)?eyemagination(-|.).*$) 1; +} +## F - I +map $http_referer $bad_urls2 { +default 0; +~(?i)(^http://(www\.)?fastcrawl(-|.).*$|^http://(www\.)?fearcrow(-|.).*$|^http://(www\.)?ferretsoft(-|.).*$|^http://(www\.)?fick(-|.).*$|^http://(www\.)?finance(-|.).*$|^http://(www\.)?flafeber(-|.).*$|^http://(www\.)?fidelityfunding(-|.).*$|^http://(www\.)?freakycheats(-|.).*$|^http://(www\.)?freeality(-|.).*$|^http://(www\.)?fuck(-|.).*$|^http://(www\.)?future-2000(-|.).*$|^http://(www\.)?.*(-|.)?gay(-|.).*$|^http://(www\.)?gobongo.info(-|.).*$|^http://(www\.)?gabriola(-|.).*$|^http://(www\.)?gallerylisting(-|.).*$|^http://(www\.)?gb.com(-|.).*$|^http://(www\.)?ghostvisitor(-|.).*$|^http://(www\.)?globusy(-|.).*$|^http://(www\.)?golf-e-course(-|.).*$|^http://(www\.)?gospelcom(-|.).*$|^http://(www\.)?gradfinder(-|.).*$|^http://(www\.)?hasfun(-|.).*$|^http://(www\.)?herbal(-|.).*$|^http://(www\.)?hermosa(-|.).*$|^http://(www\.)?highprofitclub(-|.).*$|^http://(www\.)?hilton(-|.).*$|^http://(www\.)?teaminspection(-|.).*$|^http://(www\.)?hotel(-|.).*$|^http://(www\.)?houseofseven(-|.).*$|^http://(www\.)?hurricane(-|.).*$|^http://(www\.)?.*(-|.)?incest(-|.).*$|^http://(www\.)?iaea(-|.).*$|^http://(www\.)?ilovevitality(-|.).*$|^http://(www\.)?ime(-|.).*$|^http://(www\.)?info(-|.).*$|^http://(www\.)?ingyensms(-|.).*$|^http://(www\.)?inkjet-toner(-|.).*$|^http://(www\.)?isacommie(-|.).*$|^http://(www\.)?istarthere(-|.).*$|^http://(www\.)?it.tt(-|.).*$|^http://(www\.)?italiancharms(-|.).*$|^http://(www\.)?iwantu(-|.).*$|^http://(www\.)?ilovevitality(-|.).*$|^http://(www\.)?iskalko.ru(-|.).*$) 1; +} +## J - P +map $http_referer $bad_urls3 { +default 0; +~(?i)(^http://(www\.)?jfcadvocacy(-|.).*$|^http://(www\.)?jmhic(-|.).*$|^http://(www\.)?juris(-|.).*$|^http://(www\.)?kylos(-|.).*$|^http://(www\.)?laser-eye(-|.).*$|^http://(www\.)?leathertree(-|.).*$|^http://(www\.)?lillystar(-|.).*$|^http://(www\.)?linkerdome(-|.).*$|^http://(www\.)?livenet(-|.).*$|^http://(www\.)?low-limit(-|.).*$|^http://(www\.)?lowest-price(-|.).*$|^http://(www\.)?luxup.ru(-|.).*$|^http://(www\.)?macsurfer(-|.).*$|^http://(www\.)?mall.uk(-|.).*$|^http://(www\.)?maloylawn(-|.).*$|^http://(www\.)?marketing(-|.).*$|^http://(www\.)?.*(-|.)?mature(-|.).*$|^http://(www\.)?mcdortaklar(-|.).*$|^http://(www\.)?mediavisor(-|.).*$|^http://(www\.)?medications(-|.).*$|^http://(www\.)?mirror.sytes(-|.).*$|^http://(www\.)?mp3(-|.).*$|^http://(www\.)?(-|.)musicbox1(-|.).*$|^http://(www\.)?myftpupload(-|.).*$|^http://(www\.)?naked(-|.).*$|^http://(www\.)?netdisaster(-|.).*$|^http://(www\.)?netfirms(-|.).*$|^http://(www\.)?newtruths(-|.).*$|^http://(www\.)?no-limit(-|.).*$|^http://(www\.)?nude(-|.).*$|^http://(www\.)?nudeceleb(-|.).*$|^http://(www\.)?nutzu(-|.).*$|^http://(www\.)?odge(-|.).*$|^http://(www\.)?oiline(-|.).*$|^http://(www\.)?onlinegamingassoc(-|.).*$|^http://(www\.)?outpersonals(-|.).*$|^http://(www\.)?o-o-6-o-o.ru(-|.).*$|^http://(www\.)?o-o-8-o-o.ru(-|.).*$|^http://(www\.)?pagetwo(-|.).*$|^http://(www\.)?paris(-|.).*$|^http://(www\.)?passions(-|.).*$|^http://(www\.)?peblog(-|.).*$|^http://(www\.)?peng(-|.).*$|^http://(www\.)?perfume-cologne(-|.).*$|^http://(www\.)?personal(-|.).*$|^http://(www\.)?php-soft(-|.).*$|^http://(www\.)?pisoc(-|.).*$|^http://(www\.)?pisx(-|.).*$|^http://(www\.)?popwow(-|.).*$|^http://(www\.)?porn(-|.).*$|^http://(www\.)?prescriptions(-|.).*$|^http://(www\.)?priceg(-|.).*$|^http://(www\.)?.*(-|.)?pus*y(-|.).*$|^http://(www\.)?printdirectforless(-|.).*$|^http://(www\.)?ps2cool(-|.).*$|^http://(www\.)?psnarones(-|.).*$|^http://(www\.)?psxtreme(-|.).*$) 1; +} +## Q - Z +map $http_referer $bad_urls4 { +default 0; +~(?i)(^http://(www\.)?quality-traffic(-|.).*$|^http://(www\.)?registrarprice(-|.).*$|^http://(www\.)?reliableresults(-|.).*$|^http://(www\.)?rimpim(-|.).*$|^http://(www\.)?ro7kalbe(-|.).*$|^http://(www\.)?rohkalby(-|.).*$|^http://(www\.)?ronnieazza(-|.).*$|^http://(www\.)?rulo.biz(-|.).*$|^http://(www\.)?responsinator(-|.).*$|^http://(www\.)?s5(-|.).*$|^http://(www\.)?samiuls(-|.).*$|^http://(www\.)?savefrom(-|.).*$|^http://(www\.)?savetubevideo.com(-|.).*$|^http://(www\.)?screentoolkit.com(-|.).*$|^http://(www\.)?searchedu(-|.).*$|^http://(www\.)?semalt.com(-|.).*$|^http://(www\.)?seoexperimenty(-|.).*$|^http://(www\.)?seventwentyfour(-|.).*$|^http://(www\.)?seventwentyfour.*$|^http://(www\.)?sex(-|.).*$|^http://(www\.)?sexsearch(-|.).*$|^http://(www\.)?sexsq(-|.).*$|^http://(www\.)?shoesdiscount(-|.).*$|^http://(www\.)?site-4u(-|.).*$|^http://(www\.)?site5(-|.).*$|^http://(www\.)?slatersdvds(-|.).*$|^http://(www\.)?slftsdybbg.ru(-|.).*$|^http://(www\.)?sml338(-|.).*$|^http://(www\.)?sms(-|.).*$|^http://(www\.)?smsportali(-|.).*$|^http://(www\.)?socialseet.ru(-|.).*$|^http://(www\.)?software(-|.).*$|^http://(www\.)?sortthemesitesby(-|.).*$|^http://(www\.)?spears(-|.).*$|^http://(www\.)?spoodles(-|.).*$|^http://(www\.)?sportsparent(-|.).*$|^http://(www\.)?srecorder(-|.).*$|^http://(www\.)?stmaryonline(-|.).*$|^http://(www\.)?superiends.org(-|.).*$|^http://(www\.)?strip(-|.).*$|^http://(www\.)?suttonjames(-|.).*$|^http://(www\.)?talk.uk-yankee(-|.).*$|^http://(www\.)?tecrep-inc(-|.).*$|^http://(www\.)?teen(-|.).*$|^http://(www\.)?terashells(-|.).*$|^http://(www\.)?thatwhichis(-|.).*$|^http://(www\.)?thorcarlson(-|.).*$|^http://(www\.)?.*(-|.)?tits(-|.).*$|^http://(www\.)?.*(-|.)?titten(-|.).*$|^http://(www\.)?tmsathai(-|.).*$|^http://(www\.)?traffixer(-|.).*$|^http://(www\.)?tranny(-|.).*$|^http://(www\.)?valeof(-|.).*$|^http://(www\.)?video(-|.).*$|^http://(www\.)?vinhas(-|.).*$|^http://(www\.)?vixen1(-|.).*$|^http://(www\.)?vpshs(-|.).*$|^http://(www\.)?vrajitor(-|.).*$|^http://(www\.)?vodkoved.ru(-|.).*$|^http://(www\.)?w3md(-|.).*$|^http://(www\.)?websocial.me(-|.).*$|^http://(www\.)?webdevsquare(-|.).*$|^http://(www\.)?whois(-|.).*$|^http://(www\.)?withdrawal(-|.).*$|^http://(www\.)?worldemail(-|.).*$|^http://(www\.)?wslp24(-|.).*$|^http://(www\.)?ws-op(-|.).*$|^http://(www\.)?xnxx(-|.).*$|^http://(www\.)?xopy(-|.).*$|^http://(www\.)?xxx(-|.).*$|^http://(www\.)?yelucie(-|.).*$|^http://(www\.)?youradulthosting(-|.).*$|^http://(www\.)?ykecwqlixx.ru(-|.).*$|^http://(www\.)?yougetsignal.com(-|.).*$|^http://(www\.)?(-|.)zindagi(-|.).*$) 1; +} +## Domains Linked to Yontoo Browser Malware and a Few Other New Ones +## Have split this into it's own section to keep lines shorter NOTE: changes to instructions +## adding if ($bad_urls5) and if ($bad_urls6) to your site(s) config. +map $http_referer $bad_urls5 { +default 0; +~(?i)(^http://(www\.)?101raccoon.ru(-|.).*$|^http://(www\.)?28n2gl3wfyb0.ru(-|.).*$|^http://(www\.)?627ad6438b58439cad1fc8cf6d67a92e.com(-|.).*$|^http://(www\.)?6ab9743d0152486387559b4abaa02ada.com(-|.).*$|^http://(www\.)?a342ae9750004b14b55f7310eff0ab65.com(-|.).*$|^http://(www\.)?aa08daf7e13b6345e09e92f771507fa5f4.com(-|.).*$|^http://(www\.)?aa14ab57a3339c4064bd9ae6fad7495b5f.com(-|.).*$|^http://(www\.)?aa625d84f1587749c1ab011d6f269f7d64.com(-|.).*$|^http://(www\.)?aa81bf391151884adfa3dd677e41f94be1.com(-|.).*$|^http://(www\.)?aa8780bb28a1de4eb5bff33c28a218a930.com(-|.).*$|^http://(www\.)?aa8b68101d388c446389283820863176e7.com(-|.).*$|^http://(www\.)?aa9bd78f328a6a41279d0fad0a88df1901.com(-|.).*$|^http://(www\.)?aa9d046aab36af4ff182f097f840430d51.com(-|.).*$|^http://(www\.)?aaa38852e886ac4af1a3cff9b47cab6272.com(-|.).*$|^http://(www\.)?aab94f698f36684c5a852a2ef272e031bb.com(-|.).*$|^http://(www\.)?aac500b7a15b2646968f6bd8c6305869d7.com(-|.).*$|^http://(www\.)?aac52006ec82a24e08b665f4db2b5013f7.com(-|.).*$|^http://(www\.)?aad1f4acb0a373420d9b0c4202d38d94fa.com(-|.).*$|^http://(www\.)?asrv-a.akamoihd.net(-|.).*$|^http://(www\.)?asrvrep-a.akamaihd.net(-|.).*$|^http://(www\.)?bestpriceninja.com(-|.).*$|^http://(www\.)?bronzeaid-a.akamaihd.net(-|.).*$|^http://(www\.)?browsepulse-a.akamaihd.net(-|.).*$|^http://(www\.)?cashkitten-a.akamaihd.net(-|.).*$|^http://(www\.)?coolbar.pro(-|.).*$) 1; +} +map $http_referer $bad_urls6 { +default 0; +~(?i)(^http://(www\.)?davebestdeals.com(-|.).*$|^http://(www\.)?discovertreasure-a.akamaihd.net(-|.).*$|^http://(www\.)?discovertreasurenow.com(-|.).*$|^http://(www\.)?foxydeal.com(-|.).*$|^http://(www\.)?gameonasia.com(-|.).*$|^http://(www\.)?gameplexcity.com(-|.).*$|^http://(www\.)?gamerextra.com(-|.).*$|^http://(www\.)?gamerscorps.com(-|.).*$|^http://(www\.)?gamewrath.com(-|.).*$|^http://(www\.)?generousdeal-a.akamaihd.net(-|.).*$|^http://(www\.)?girlgamerdaily.com(-|.).*$|^http://(www\.)?hdapp1008-a.akamaihd.net(-|.).*$|^http://(www\.)?highstairs-a.akamaihd.net(-|.).*$|^http://(www\.)?hotshoppymac.com(-|.).*$|^http://(www\.)?matchpal-a.akamaihd.net(-|.).*$|^http://(www\.)?mecash.ru(-|.).*$|^http://(www\.)?monarchfind-a.akamaihd.net(-|.).*$|^http://(www\.)?myshopmatemac.com(-|.).*$|^http://(www\.)?nottyu.xyz(-|.).*$|^http://(www\.)?onlinemegax.com(-|.).*$|^http://(www\.)?outrageousdeal-a.akamaihd.net(-|.).*$|^http://(www\.)?pijoto.net(-|.).*$|^http://(www\.)?recordpage-a.akamaihd.net(-|.).*$|^http://(www\.)?resultshub-a.akamaihd.net(-|.).*$|^http://(www\.)?rvzr-a.akamaihd.net(-|.).*$|^http://(www\.)?savingsslider-a.akamaihd.net(-|.).*$|^http://(www\.)?searchinterneat-a.akamaihd.net(-|.).*$|^http://(www\.)?searchwebknow-a.akamaihd.net(-|.).*$|^http://(www\.)?seeresultshub-a.akamaihd.net(-|.).*$|^http://(www\.)?shoppytoolmac.com(-|.).*$|^http://(www\.)?skytraf.xyz(-|.).*$|^http://(www\.)?splendorsearch-a.akamaihd.net(-|.).*$|^http://(www\.)?strongsignal-a.akamaihd.net(-|.).*$|^http://(www\.)?surfbuyermac.com(-|.).*$|^http://(www\.)?treasuretrack-a.akamaihd.net(-|.).*$|^http://(www\.)?webshoppermac.com(-|.).*$|^http://(www\.)?pospr.waw.pl(-|.).*$|^http://(www\.)?abclauncher.com(-|.).*$|^http://(www\.)?alert-fjg.xyz(-|.).*$|^http://(www\.)?analytics-ads.xyz(-|.).*$|^http://(www\.)?bamo.xsl.pt(-|.).*$|^http://(www\.)?compliance-olga.top(-|.).*$|^http://(www\.)?digital-video-processing.com(-|.).*$|^http://(www\.)?eu-cookie-law.info(-|.).*$|^http://(www\.)?findpik.com(-|.).*$|^http://(www\.)?forum20.smailik.org(-|.).*$|^http://(www\.)?free-share-buttons.top(-|.).*$|^http://(www\.)?free-social-buttons2.xyz(-|.).*$|^http://(www\.)?free-social-buttons3.xyz(-|.).*$|^http://(www\.)?free-social-buttons4.xyz(-|.).*$|^http://(www\.)?free-social-buttons5.xyz(-|.).*$|^http://(www\.)?front.to(-|.).*$|^http://(www\.)?infokonkurs.ru(-|.).*$|^http://(www\.)?mapquestz.us(-|.).*$|^http://(www\.)?quick-offer.com(-|.).*$|^http://(www\.)?rank-checker.online(-|.).*$|^http://(www\.)?rankchecker.online(-|.).*$|^http://(www\.)?rapidokbrain.com(-|.).*$|^http://(www\.)?real-time-analytics.com(-|.).*$|^http://(www\.)?sharebutton.net(-|.).*$|^http://(www\.)?sharebutton.org(-|.).*$|^http://(www\.)?shemale-sex.net(-|.).*$|^http://(www\.)?site-speed-check.site(-|.).*$|^http://(www\.)?site-speed-checker.site(-|.).*$|^http://(www\.)?trafficmania.com(-|.).*$|^http://(www\.)?website-speed-up.site(-|.).*$|^http://(www\.)?website-speed-up.top(-|.).*$|^http://(www\.)?xn--80aagddcgkbcqbad7amllnejg6dya.xn--p1ai(-|.).*$|^http://(www\.)?xn--80aikhbrhr.net(-|.).*$|^http://(www\.)?pila.pl(-|.).*$|^http://(www\.)?dytohqka.su(-|.).*$|^http://(www\.)?fqvjhqciw.net.ru(-|.).*$|^http://(www\.)?wycjrqzy.ua(-|.).*$|^http://(www\.)?0ca29773681c7e82.com(-|.).*$|^http://(www\.)?intervsem.ru(-|.).*$|^http://(www\.)?candy-glam-hp.com(-|.).*$|^http://(www\.)?thecoolimages.net(-|.).*$|^http://(www\.)?rebuildermedical.com(-|.).*$|^http://(www\.)?gaygalls.net(-|.).*$|^http://(www\.)?keywordteam.net(-|.).*$|^http://(www\.)?netfacet.net(-|.).*$|^http://(www\.)?pattersonsweb.com(-|.).*$|^http://(www\.)?trapit.com.gg(-|.).*$) 1; +} +## Add here all hosts that should be spared any referrer checking. +## Whitelist all your own IPs in this section, each IP followed by a 0; +geo $bad_referer { +127.0.0.1 0; +40.82.153.189 0; +111.111.111.111 0; +} + +# Geo directive to deny certain ip addresses +geo $validate_client { +default 0; + +# Cyveillance +38.100.19.8/29 1; +38.100.21.0/24 1; +38.100.41.64/26 1; +38.105.71.0/25 1; +38.105.83.0/27 1; +38.112.21.140/30 1; +38.118.42.32/29 1; +65.213.208.128/27 1; +65.222.176.96/27 1; +65.222.185.72/29 1; +85.25.176.0/20 1; +85.25.192.0/20 1; +85.25.208.0/22 1; +} diff --git a/config/web-server/conf.d/castad_gunicorn_https_ng.conf b/config/web-server/conf.d/castad_gunicorn_https_ng.conf new file mode 100644 index 0000000..a17a439 --- /dev/null +++ b/config/web-server/conf.d/castad_gunicorn_https_ng.conf @@ -0,0 +1,245 @@ +# =============================================== +# Production Level Nginx Configuration (Sample Template) +# 서버 사양: 쿼드코어 CPU, 4GB RAM, ~50 req/s +# 백엔드: FastAPI REST API Server +# =============================================== + +# HTTP 서버 블록 (포트 80) - HTTPS로 리다이렉트 +server { + listen 80; + server_name demo.castad.net www.demo.castad.net; + + # 보안을 위한 호스트 검증 - 허용되지 않은 도메인 차단 + # 도메인 확장자가 다르다면 추가해줘야함 + # if ($host !~* ^(www\.)?demo.castad.net\.(com|kr|net|org)$) { + # return 444; + # } + + # Let's Encrypt 도메인 검증 프로그램 허용 (리다이렉트 전에 처리) + # SSL 인증서 갱신을 위해 필수 + location ^~ /.well-known/acme-challenge/ { + allow all; + root /www/certbot; + try_files $uri =404; # 디렉토리 순회 공격 방지 + } + + # HTTP를 HTTPS로 리다이렉트 (acme-challenge 제외) + # return 301은 rewrite보다 효율적이며 $server_name이 $host보다 안전함 + location / { + return 301 https://$server_name$request_uri; + } +} + +# HTTPS 서버 블록 (포트 443) - 메인 애플리케이션 +server { + listen 443 ssl; + http2 on; + + server_name demo.castad.net www.demo.castad.net; + + # 악성 봇 차단 (nginx.conf의 http 블록에서 $bad_bot 맵 정의 필요) + if ($bad_bot) { + return 403; + } + + # SSL/TLS 인증서 설정 + ssl_certificate /etc/letsencrypt/live/demo.castad.net/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/demo.castad.net/privkey.pem; + ssl_dhparam /etc/ssl/certs/demo.castad.net/dhparam.pem; # openssl dhparam -out /etc/ssl/certs/demo.castad.net/dhparam.pem 2048 + + # 최신 SSL/TLS 설정 - SSL Labs A+ 등급 달성 가능 + ssl_session_cache shared:SSL:50m; # SSL 세션 캐시 크기 증가 (트래픽 많을 시 유용) + ssl_session_timeout 10m; # SSL 세션 타임아웃 + ssl_session_tickets off; # 보안 향상을 위해 세션 티켓 비활성화 (nginx >= 1.5.9) + ssl_protocols TLSv1.2 TLSv1.3; # 최신 TLS 프로토콜만 사용 + ssl_prefer_server_ciphers off; # TLSv1.3에서는 클라이언트 선호 암호화 사용 (모범 사례) + # 최신 암호화 스위트 - CHACHA20-POLY1305 포함 (모바일 최적화) + ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384"; + ssl_ecdh_curve secp384r1; # ECDH 곡선 설정 (nginx >= 1.1.0) + + # OCSP 스테이플링 - SSL 핸드셰이크 성능 향상 + # 인증서에 OCSP URL이 없으면 자동으로 비활성화됨 (경고는 정상) + #ssl_stapling on; + #ssl_stapling_verify on; + ssl_trusted_certificate /etc/letsencrypt/live/demo.castad.net/chain.pem; + resolver 1.1.1.1 8.8.8.8 valid=300s; # Cloudflare와 Google DNS 사용 + resolver_timeout 5s; + + # 보안 헤더 - 다양한 공격으로부터 보호 + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always; # HSTS - 다운그레이드 공격 방지 + add_header X-Frame-Options "DENY" always; # 클릭재킹 방지 + add_header X-Content-Type-Options "nosniff" always; # MIME 스니핑 방지 + add_header X-XSS-Protection "1; mode=block" always; # 레거시 XSS 보호 + add_header Referrer-Policy "strict-origin-when-cross-origin" always; # 리퍼러 정보 제어 + #add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' ''; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:; connect-src 'self' wss: ws:;" always; + #add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' https://cdn.tailwindcss.com https://www.youtube.com https://s.ytimg.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com https://cdn.tailwindcss.com; font-src 'self' data: https://fonts.gstatic.com; img-src 'self' data: https: https://i.ytimg.com https://img.youtube.com; connect-src 'self' wss: ws: https://cdn.tailwindcss.com; frame-src 'self' https://www.youtube.com https://www.youtube-nocookie.com; media-src 'self' https://www.youtube.com;" always; + add_header Content-Security-Policy "default-src * 'unsafe-inline' 'unsafe-eval' data: blob:;" always; + + # XSS 방지를 위한 CSP + # 'unsafe-inline'은 범용 설정이 아니며, 보안상 위험한 설정입니다. CSP의 핵심 보호 기능을 무력화시키므로, 개발 환경이나 레거시 코드 마이그레이션 과정에서만 임시로 사용하고, 프로덕션 환경에서는 nonce, hash, 또는 외부 파일 분리 방식으로 대체해야 합니다. + + # 클라이언트가 업로드할 수 있는 전체 요청 본문(body) 의 최대 허용 크기 + # 요청 바디(파일, 폼 데이터 등)가 이 값을 초과하면 Nginx는 즉시 413 Request Entity Too Large 에러를 반환 + # 업로드 제한선. + client_max_body_size 100M; # 최대 업로드 크기 제한 (애플리케이션에 맞게 조정) + + # 파일 캐시 - I/O 성능 향상 + open_file_cache max=1000 inactive=20s; # 최대 1000개 파일 캐시, 20초 비활성 시 제거 + open_file_cache_valid 30s; # 캐시 유효성 검사 주기 + open_file_cache_min_uses 2; # 최소 2회 사용 시 캐시 + open_file_cache_errors on; # 파일 오류도 캐시 + + # 로깅 설정 - 버퍼링으로 I/O 감소 + access_log /log/nginx/demo.castad.net.com.gunicorn_access.log main buffer=32k flush=5s; + error_log /log/nginx/demo.castad.net.com.gunicorn_error.log warn; + + # frontend에 오류 페이지 정의가 되어 있는 경우 사용 가능 + # # 커스텀 오류 페이지 - 더 나은 사용자 경험 및 정보 노출 방지 + # error_page 404 /404.html; + # error_page 500 502 503 504 /50x.html; + + # location = /404.html { + # internal; # 내부 리다이렉트만 허용 + # root /www/error_pages; + # } + + # location = /50x.html { + # internal; # 내부 리다이렉트만 허용 + # root /www/error_pages; + # } + + # 프론트엔드 정적 파일 루트 + root /www/o2o-castad-frontend/dist; + index index.html; + + location / { + try_files $uri $uri/ /index.html; + } + + # Fastapi 미디어 파일 - 사용자 업로드 파일 + location /media { + autoindex off; # 디렉토리 목록 비활성화 + # gzip_static on; # 사전 압축된 .gz 파일 사용 + expires 30d; # 브라우저 캐시 30일 후 브라우저가 다시 요청할 때 재검증 + alias /www/o2o-castad-backend/media; # Fastapi 프로젝트의 미디어 파일 경로 + + # 정적 파일 캐싱 - 브라우저 캐시 최적화 + add_header Cache-Control "public, immutable"; + access_log off; # 액세스 로그 비활성화로 성능 향상 + } + + # Fastapi 정적 파일 - CSS, JS, 이미지 등 + location /static { + autoindex off; # 디렉토리 목록 비활성화 + # gzip_static on; # 사전 압축된 .gz 파일 사용 압축된 파일이 없다면 설정 무의미 + expires 30d; # 브라우저 캐시 30일 후 브라우저가 다시 요청할 때 재검증 + alias /www/o2o-castad-backend/static; # Fastapi 프로젝트의 정적 파일 경로 + + # 정적 파일 캐싱 - 브라우저 캐시 최적화 + add_header Cache-Control "public, immutable"; + access_log off; # 액세스 로그 비활성화로 성능 향상 + } + + # 메인 애플리케이션 - 백엔드로 프록시 + location /api/ { + autoindex off; # 디렉토리 목록 비활성화 + + # 속도 제한 - DDoS 및 무차별 대입 공격 방지 + # (nginx.conf의 http 블록에서 limit_req_zone과 limit_conn_zone 정의 필요) + #limit_req zone=general burst=20 nodelay; # 초당 요청 제한 + #limit_conn addr 10; # IP당 동시 연결 제한 + + # HTTP 메서드 제한 - HTTP 동사 변조 공격 방지 + limit_except GET POST HEAD OPTIONS DELETE { + deny all; + } + + # 백엔드 애플리케이션으로 프록시 + proxy_pass http://uvicorn-app:8000/; + + # WebSocket 지원 - 실시간 통신 애플리케이션에 필수 + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; # nginx.conf에서 $connection_upgrade 맵 정의 필요 + + # 프록시 헤더 - 클라이언트 정보 전달 + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + + # 타임아웃 설정 - 애플리케이션에 맞게 조정 + proxy_connect_timeout 300s; + proxy_send_timeout 300s; + proxy_read_timeout 300s; + + # 버퍼 설정 - 대화형 애플리케이션에 최적화 - Websocket 사용시 설정 + # proxy_buffering off; # 즉시 응답 전달 + # proxy_request_buffering off; # 즉시 요청 전달 + + # 버퍼링 활성화 (기본값) - 기본 fastapi 사용시 설정 + proxy_buffering on; + proxy_request_buffering on; + + # 버퍼 크기 설정 - 기본 fastapi 사용시 설정 + proxy_buffer_size 4k; + proxy_buffers 8 4k; + proxy_busy_buffers_size 8k; + + proxy_set_header Accept-Encoding gzip; + } + + # Let's Encrypt 도메인 검증 프로그램 허용 (HTTPS에서도 필요시) + location ^~ /.well-known/acme-challenge/ { + allow all; + root /www/certbot; + try_files $uri =404; # 디렉토리 순회 공격 방지 + } + + # 정적 리소스 캐싱 - 이미지, 폰트, CSS, JS 등 + # 브라우저 캐시로 로드 시간 단축 및 서버 부하 감소 + location ~* \.(jpg|jpeg|png|gif|ico|css|js|woff|woff2|ttf|svg)$ { + expires 1y; # 1년 캐시 + add_header Cache-Control "public, immutable"; + access_log off; # 액세스 로그 비활성화로 성능 향상 + } + + # 닷 파일 차단 - .htaccess, .htpasswd, .svn, .git, .env 등 + # 민감한 설정 파일 노출 방지 + location ~ /\. { + deny all; + access_log off; # 차단된 시도 로그 비활성화 + log_not_found off; + } + + # 민감한 파일 확장자 차단 - 로그, 인증서, 스크립트, SQL 등 + # 보안을 위해 직접 접근 차단 + location ~* \.(log|binary|pem|enc|crt|conf|cnf|sql|sh|key|yml|lock)$ { + deny all; + access_log off; # 차단된 시도 로그 비활성화 + log_not_found off; + } + + # 민감한 설정 파일 차단 - composer, package.json, phpunit 등 + # 프로젝트 메타데이터 및 설정 파일 노출 방지 + location ~* (composer\.json|composer\.lock|composer\.phar|contributing\.md|license\.txt|readme\.rst|readme\.md|readme\.txt|copyright|artisan|gulpfile\.js|package\.json|phpunit\.xml|access_log|error_log|gruntfile\.js)$ { + deny all; + access_log off; # 차단된 시도 로그 비활성화 + log_not_found off; + } + + # 파비콘 - 로그 노이즈 제거 + location = /favicon.ico { + log_not_found off; + access_log off; + } + + # robots.txt - 검색 엔진 크롤러 제어 + location = /robots.txt { + log_not_found off; + access_log off; + allow all; + } +} diff --git a/config/web-server/nginx_conf.sh b/config/web-server/nginx_conf.sh new file mode 100755 index 0000000..30cb514 --- /dev/null +++ b/config/web-server/nginx_conf.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +while : +do + echo "* if your webroot has sub-level, you should be insert as \\\/A\\\/B\\\/C" + echo "ex) shop\\\/django_sample" + echo -n "Enter the service web root without the path of '/www/' >" + read webroot + echo "Entered service web root: $webroot" + if [[ "$webroot" != "" ]]; then + break + fi +done + +while : +do + echo -n "Enter the service portnumber >" + read portnumber + echo "Entered service portnumber: $portnumber" + if [[ "$portnumber" != "" ]]; then + break + fi +done + +while : +do + echo -n "Enter the service domain >" + read domain + echo "Entered service domain: $domain" + if [[ "$domain" != "" ]]; then + break + fi +done + +while : +do + echo -n "Enter the app name >" + read appname + echo "Entered app name: $appname" + if [[ "$appname" != "" ]]; then + break + fi +done + +echo "Enter the serviceport" +echo -n "if you push enter with none, there are no port number >" +read serviceport +echo "Entered proxy port: $serviceport" + +while : +do + echo -n "Enter the file name >" + read filename + echo "Entered file name: $filename" + if [[ "$filename" != "" ]]; then + break + fi +done + +sed 's/webroot/'$webroot'/g' sample_nginx.conf > $filename'1'.temp +sed 's/portnumber/'$portnumber'/g' $filename'1'.temp > $filename'2'.temp +sed 's/domain/'$domain'/g' $filename'2'.temp > $filename'3'.temp +sed 's/appname/'$appname'/g' $filename'3'.temp > $filename'4'.temp +if [[ "$serviceport" == "" ]]; then + sed 's/:serviceport/''/g' $filename'4'.temp > $filename'5'.temp +else + sed 's/serviceport/'$serviceport'/g' $filename'4'.temp > $filename'5'.temp +fi +sed 's/filename/'$filename'/g' $filename'5'.temp > ./conf.d/$filename'_gunicorn_ng'.conf + +rm *.temp diff --git a/config/web-server/nginx_conf/nginx.conf b/config/web-server/nginx_conf/nginx.conf new file mode 100644 index 0000000..b99c79e --- /dev/null +++ b/config/web-server/nginx_conf/nginx.conf @@ -0,0 +1,126 @@ +user www-data; +worker_processes auto; + +# worker_rlimit_nofile directive +# CPU: 쿼드코어, RAM: 4GB, 요청수: ~50/s 기반 설정시 +worker_rlimit_nofile 8192; +# worker_rlimit_nofile 8192; +# worker_priority 0; +# worker_cpu_affinity 0001 0010 0100 1000; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + + +# Load ModSecurity dynamic module +# load_module /etc/nginx/modules/ngx_http_modsecurity_module.so + +events { + use epoll; # 리눅스에서 권장 + accept_mutex on; # 기본값이지만 명시해두면 좋음 + + # 단일 워커당 동시 연결 수 + # worker_processes * worker_connections = 최대 동시 연결 수 + # 여기에 워커가 상시 쓰는 FD(에러/액세스 로그, 리스닝 소켓, epoll 등) 오버헤드를 조금 빼고 잡아야 안전하다 (약 50~150 정도 버퍼). + + # CPU: 쿼드코어, RAM: 4GB, 요청수: ~50/s 기반 설정시 + # off 설정에 대한 효과 + # 부하 분산: 워커 간 균등한 연결 분배 + # 안정성: 한 워커가 과부하되는 것 방지 + # 예측 가능한 성능: 일관된 응답 시간 + multi_accept off; + worker_connections 1024; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # WebSocket 및 HTTP keepalive 동시 지원을 위한 Connection 헤더 동적 설정 + # - WebSocket 요청 ($http_upgrade가 있는 경우): "upgrade" 반환 + # - 일반 HTTP 요청 ($http_upgrade가 없는 경우): "" 반환 (keepalive 연결 유지) + map $http_upgrade $connection_upgrade { + default upgrade; + '' ''; + } + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/main_access.log main; + error_log /var/log/nginx/main_error.log; + + server_tokens off; + + charset utf-8; + + # Docker 내부 DNS 사용 (컨테이너 이름 해석) + resolver 127.0.0.11 valid=30s ipv6=off; + resolver_timeout 5s; + + # 기본 헤더 버퍼 (대부분의 요청 처리) + client_header_buffer_size 4k; + + # 큰 헤더 버퍼 (JWT, 큰 쿠키 등) + large_client_header_buffers 4 16k; + + # POST나 PUT 요청으로 전송하는 본문(body) 데이터를 받을 때, Nginx가 메모리(RAM)에 임시 저장할 버퍼의 크기 + client_body_buffer_size 128k; # 일반적으로 64KB~256KB 권장 + client_body_timeout 15s; # 클라이언트 본문 수신 타임아웃 + client_header_timeout 15s; # 클라이언트 헤더 수신 타임아웃 + + # 효율적인 파일 전송 설정 + sendfile on; # 커널 공간에서 직접 파일 전송 (제로카피) + tcp_nopush on; # sendfile 사용 시 패킷 효율 향상 + tcp_nodelay on; # Keep-alive 연결에서 지연 없이 전송 (실시간 통신에 유리) + + # Keep-alive 연결 설정 + keepalive_timeout 30s; # 연결 유지 시간 + keepalive_requests 1000; # 연결당 최대 요청 수 + send_timeout 15s; # 클라이언트로 응답 전송 타임아웃 + + # ===== 파일 업로드 설정 ===== + client_max_body_size 50M; # 최대 업로드 크기 (필요에 따라 조정) + + # ===== 프록시 타임아웃 설정 ===== + proxy_connect_timeout 300s; # 백엔드 연결 타임아웃 + proxy_send_timeout 300s; # 백엔드로 요청 전송 타임아웃 + proxy_read_timeout 300s; # 백엔드 응답 수신 타임아웃 + proxy_request_buffering off; # 대용량 파일 스트리밍 업로드 (버퍼링 비활성화) + + # ===== 해시 테이블 ===== + # MIME 타입 해시 테이블 설정 + # mime.types 파일에 정의된 파일 확장자와 MIME 타입 매핑을 저장 + # 예: .html -> text/html, .jpg -> image/jpeg 등의 매핑을 빠르게 찾기 위해 사용됨 + + types_hash_max_size 2048; # 기본 mime.types의 수백 개 타입을 충분히 수용하는 크기 + types_hash_bucket_size 64; # 각 해시 버킷 크기, CPU 캐시 라인과 정렬하여 성능 최적화 + + # 서버 이름 해시 테이블 설정 (server_name 지시자에 정의된 도메인명만 해당) + # server 블록의 server_name 지시자에 설정된 도메인명들을 빠르게 매칭하기 위한 해시 테이블 + server_names_hash_max_size 1024; # 여러 도메인/서브도메인 운영 시 충분한 공간 확보 + server_names_hash_bucket_size 64; # 도메인명 길이를 고려한 버킷 크기 (일반적으로 32~64면 충분) + + # Nginx 변수 해시 테이블 설정 + # $host, $remote_addr 같은 내장 변수와 map/set으로 정의한 커스텀 변수들을 저장하는 해시 테이블 + # HTTP 헤더를 변수로 변환한 $http_* 변수들도 여기에 포함됨 (예: $http_user_agent, $http_referer) + variables_hash_max_size 2048; # 내장 변수 + 커스텀 변수를 위한 충분한 공간 + variables_hash_bucket_size 64; # 변수명 길이와 충돌 방지를 위한 적절한 버킷 크기 + + # ===== 압축 설정 ===== + # 실시간 압축 비활성화 (CPU 부하 감소) + gzip off; + + # 미리 압축된 .gz 파일 제공 + # 예: style.css 요청 시 → style.css.gz 파일을 찾아서 전송 + gzip_static on; + + # Vary: Accept-Encoding 헤더 추가 (프록시 캐시 호환성) + gzip_vary on; + + include /etc/nginx/proxy_params/*; + include /etc/nginx/conf.d/*.conf; + include /etc/nginx/sites-enabled/*.conf; +} diff --git a/config/web-server/nginx_https_conf.sh b/config/web-server/nginx_https_conf.sh new file mode 100755 index 0000000..63acab5 --- /dev/null +++ b/config/web-server/nginx_https_conf.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +while : +do + echo "* if your webroot has sub-level, you should be insert as \\\/A\\\/B\\\/C" + echo "ex) shop\\\/django_sample" + echo -n "Enter the service web root without the path of '/www/' >" + read webroot + echo "Entered service web root: $webroot" + if [[ "$webroot" != "" ]]; then + break + fi +done + +while : +do + echo -n "Enter the service portnumber >" + read portnumber + echo "Entered service portnumber: $portnumber" + if [[ "$portnumber" != "" ]]; then + break + fi +done + +while : +do + echo -n "Enter the service domain >" + read domain + echo "Entered service domain: $domain" + if [[ "$domain" != "" ]]; then + break + fi +done + +while : +do + echo -n "Enter the app name >" + read appname + echo "Entered app name: $appname" + if [[ "$appname" != "" ]]; then + break + fi +done + +echo "Enter the serviceport" +echo -n "if you push enter with none, there are no port number >" +read serviceport +echo "Entered proxy port: $serviceport" + +while : +do + echo -n "Enter the file name >" + read filename + echo "Entered file name: $filename" + if [[ "$filename" != "" ]]; then + break + fi +done + +sed 's/webroot/'$webroot'/g' sample_nginx_https.conf > $filename'1'.temp +sed 's/portnumber/'$portnumber'/g' $filename'1'.temp > $filename'2'.temp +sed 's/domain/'$domain'/g' $filename'2'.temp > $filename'3'.temp +sed 's/appname/'$appname'/g' $filename'3'.temp > $filename'4'.temp +if [[ "$serviceport" == "" ]]; then + sed 's/:serviceport/''/g' $filename'4'.temp > $filename'5'.temp +else + sed 's/serviceport/'$serviceport'/g' $filename'4'.temp > $filename'5'.temp +fi +sed 's/filename/'$filename'/g' $filename'5'.temp > ./conf.d/$filename'_gunicorn_https_ng'.conf + +rm *.temp diff --git a/config/web-server/proxy_params/proxy_params b/config/web-server/proxy_params/proxy_params new file mode 100644 index 0000000..edcfd0f --- /dev/null +++ b/config/web-server/proxy_params/proxy_params @@ -0,0 +1,14 @@ +proxy_set_header Host $http_host; +proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $scheme; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection $connection_upgrade; + +proxy_cache_bypass $http_upgrade; +proxy_buffering off; +proxy_redirect off; +# proxy_connect_timeout, proxy_send_timeout, proxy_read_timeout은 nginx.conf에서 설정 +proxy_buffers 32 4k; +proxy_headers_hash_max_size 512; +proxy_headers_hash_bucket_size 64; diff --git a/config/web-server/sample_nginx.conf b/config/web-server/sample_nginx.conf new file mode 100644 index 0000000..be171d5 --- /dev/null +++ b/config/web-server/sample_nginx.conf @@ -0,0 +1,73 @@ +server { + listen portnumber; + server_name domain www.domain; + + if ($bad_bot) { + return 403; + } + + access_log /log/nginx/filename.com.gunicorn_access.log main; + error_log /log/nginx/filename.com.gunicorn_error.log warn; + + # if ($host !~* ^(domain\.com|www\.domain\.com)$) { + # return 444; + # } + + # Django media + location /media { + autoindex off; + gzip_static on; + expires max; + #alias /www/django_sample/media; + alias /www/webroot/media; # your Django project's media files - amend as required + #include /etc/nginx/mime.types; + } + + location /static { + autoindex off; + gzip_static on; + expires max; + #alias /www/django_sample/static; + # normally static folder is named as /static + alias /www/webroot/static; # your Django project's static files - amend as required + #include /etc/nginx/mime.types; + } + + location / { + autoindex off; + proxy_pass http://appname:serviceport; + # proxy_redirect http:// https://; + + } + + # Allow Lets Encrypt Domain Validation Program + location ^~ /.well-known/acme-challenge/ { + allow all; + root /www/webroot; + } + + # Block dot file (.htaccess .htpasswd .svn .git .env and so on.) + location ~ /\. { + deny all; + } + + # Block (log file, binary, certificate, shell script, sql dump file) access. + location ~* \.(log|binary|pem|enc|crt|conf|cnf|sql|sh|key|yml|lock)$ { + deny all; + } + + # Block access + location ~* (composer\.json|composer\.lock|composer\.phar|contributing\.md|license\.txt|readme\.rst|readme\.md|readme\.txt|copyright|artisan|gulpfile\.js|package\.json|phpunit\.xml|access_log|error_log|gruntfile\.js)$ { + deny all; + } + + location = /favicon.ico { + log_not_found off; + access_log off; + } + + location = /robots.txt { + log_not_found off; + access_log off; + } +} diff --git a/config/web-server/sample_nginx_https.conf b/config/web-server/sample_nginx_https.conf new file mode 100644 index 0000000..3d206e2 --- /dev/null +++ b/config/web-server/sample_nginx_https.conf @@ -0,0 +1,232 @@ +# =============================================== +# Production Level Nginx Configuration (Sample Template) +# 서버 사양: 쿼드코어 CPU, 4GB RAM, ~50 req/s +# 백엔드: FastAPI REST API Server +# =============================================== + +# HTTP 서버 블록 (포트 80) - HTTPS로 리다이렉트 +server { + listen 80; + server_name domain www.domain; + + # 보안을 위한 호스트 검증 - 허용되지 않은 도메인 차단 + # 도메인 확장자가 다르다면 추가해줘야함 + if ($host !~* ^(www\.)?domain\.(com|kr|net|org)$) { + return 444; + } + + # Let's Encrypt 도메인 검증 프로그램 허용 (리다이렉트 전에 처리) + # SSL 인증서 갱신을 위해 필수 + location ^~ /.well-known/acme-challenge/ { + allow all; + root /www/certbot; + try_files $uri =404; # 디렉토리 순회 공격 방지 + } + + # HTTP를 HTTPS로 리다이렉트 (acme-challenge 제외) + # return 301은 rewrite보다 효율적이며 $server_name이 $host보다 안전함 + location / { + return 301 https://$server_name$request_uri; + } +} + +# HTTPS 서버 블록 (포트 443) - 메인 애플리케이션 +server { + listen 443 ssl http2; + server_name domain.com www.domain.com; + + # 악성 봇 차단 (nginx.conf의 http 블록에서 $bad_bot 맵 정의 필요) + if ($bad_bot) { + return 403; + } + + # SSL/TLS 인증서 설정 + ssl_certificate /etc/letsencrypt/live/domain/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/domain/privkey.pem; + ssl_dhparam /etc/ssl/certs/domain/dhparam.pem; # openssl dhparam -out /etc/ssl/certs/domain/dhparam.pem 2048 + + # 최신 SSL/TLS 설정 - SSL Labs A+ 등급 달성 가능 + ssl_session_cache shared:SSL:50m; # SSL 세션 캐시 크기 증가 (트래픽 많을 시 유용) + ssl_session_timeout 10m; # SSL 세션 타임아웃 + ssl_session_tickets off; # 보안 향상을 위해 세션 티켓 비활성화 (nginx >= 1.5.9) + ssl_protocols TLSv1.2 TLSv1.3; # 최신 TLS 프로토콜만 사용 + ssl_prefer_server_ciphers off; # TLSv1.3에서는 클라이언트 선호 암호화 사용 (모범 사례) + # 최신 암호화 스위트 - CHACHA20-POLY1305 포함 (모바일 최적화) + ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384"; + ssl_ecdh_curve secp384r1; # ECDH 곡선 설정 (nginx >= 1.1.0) + + # OCSP 스테이플링 - SSL 핸드셰이크 성능 향상 + # 인증서에 OCSP URL이 없으면 자동으로 비활성화됨 (경고는 정상) + ssl_stapling on; + ssl_stapling_verify on; + ssl_trusted_certificate /etc/letsencrypt/live/domain/chain.pem; + resolver 1.1.1.1 8.8.8.8 valid=300s; # Cloudflare와 Google DNS 사용 + resolver_timeout 5s; + + # 보안 헤더 - 다양한 공격으로부터 보호 + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always; # HSTS - 다운그레이드 공격 방지 + add_header X-Frame-Options "DENY" always; # 클릭재킹 방지 + add_header X-Content-Type-Options "nosniff" always; # MIME 스니핑 방지 + add_header X-XSS-Protection "1; mode=block" always; # 레거시 XSS 보호 + add_header Referrer-Policy "strict-origin-when-cross-origin" always; # 리퍼러 정보 제어 + add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' ''; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:; connect-src 'self' wss: ws:;" always; + # XSS 방지를 위한 CSP + # 'unsafe-inline'은 범용 설정이 아니며, 보안상 위험한 설정입니다. CSP의 핵심 보호 기능을 무력화시키므로, 개발 환경이나 레거시 코드 마이그레이션 과정에서만 임시로 사용하고, 프로덕션 환경에서는 nonce, hash, 또는 외부 파일 분리 방식으로 대체해야 합니다. + + # 클라이언트가 업로드할 수 있는 전체 요청 본문(body) 의 최대 허용 크기 + # 요청 바디(파일, 폼 데이터 등)가 이 값을 초과하면 Nginx는 즉시 413 Request Entity Too Large 에러를 반환 + # 업로드 제한선. + client_max_body_size 100M; # 최대 업로드 크기 제한 (애플리케이션에 맞게 조정) + + # 파일 캐시 - I/O 성능 향상 + open_file_cache max=1000 inactive=20s; # 최대 1000개 파일 캐시, 20초 비활성 시 제거 + open_file_cache_valid 30s; # 캐시 유효성 검사 주기 + open_file_cache_min_uses 2; # 최소 2회 사용 시 캐시 + open_file_cache_errors on; # 파일 오류도 캐시 + + # 로깅 설정 - 버퍼링으로 I/O 감소 + access_log /log/nginx/domain.com.gunicorn_access.log main buffer=32k flush=5s; + error_log /log/nginx/domain.com.gunicorn_error.log warn; + + # frontend에 오류 페이지 정의가 되어 있는 경우 사용 가능 + # # 커스텀 오류 페이지 - 더 나은 사용자 경험 및 정보 노출 방지 + # error_page 404 /404.html; + # error_page 500 502 503 504 /50x.html; + + # location = /404.html { + # internal; # 내부 리다이렉트만 허용 + # root /www/error_pages; + # } + + # location = /50x.html { + # internal; # 내부 리다이렉트만 허용 + # root /www/error_pages; + # } + + # Fastapi 미디어 파일 - 사용자 업로드 파일 + location /media { + autoindex off; # 디렉토리 목록 비활성화 + # gzip_static on; # 사전 압축된 .gz 파일 사용 + expires 30d; # 브라우저 캐시 30일 후 브라우저가 다시 요청할 때 재검증 + alias /www/webroot/media; # Fastapi 프로젝트의 미디어 파일 경로 + + # 정적 파일 캐싱 - 브라우저 캐시 최적화 + add_header Cache-Control "public, immutable"; + access_log off; # 액세스 로그 비활성화로 성능 향상 + } + + # Fastapi 정적 파일 - CSS, JS, 이미지 등 + location /static { + autoindex off; # 디렉토리 목록 비활성화 + # gzip_static on; # 사전 압축된 .gz 파일 사용 압축된 파일이 없다면 설정 무의미 + expires 30d; # 브라우저 캐시 30일 후 브라우저가 다시 요청할 때 재검증 + alias /www/webroot/static; # Fastapi 프로젝트의 정적 파일 경로 + + # 정적 파일 캐싱 - 브라우저 캐시 최적화 + add_header Cache-Control "public, immutable"; + access_log off; # 액세스 로그 비활성화로 성능 향상 + } + + # 메인 애플리케이션 - 백엔드로 프록시 + location / { + autoindex off; # 디렉토리 목록 비활성화 + + # 속도 제한 - DDoS 및 무차별 대입 공격 방지 + # (nginx.conf의 http 블록에서 limit_req_zone과 limit_conn_zone 정의 필요) + limit_req zone=general burst=20 nodelay; # 초당 요청 제한 + limit_conn addr 10; # IP당 동시 연결 제한 + + # HTTP 메서드 제한 - HTTP 동사 변조 공격 방지 + limit_except GET POST HEAD OPTIONS { + deny all; + } + + # 백엔드 애플리케이션으로 프록시 + proxy_pass http://appname:serviceport; + + # WebSocket 지원 - 실시간 통신 애플리케이션에 필수 + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; # nginx.conf에서 $connection_upgrade 맵 정의 필요 + + # 프록시 헤더 - 클라이언트 정보 전달 + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + + # 타임아웃 설정 - 애플리케이션에 맞게 조정 + proxy_connect_timeout 60s; + proxy_send_timeout 60s; + proxy_read_timeout 60s; + + # 버퍼 설정 - 대화형 애플리케이션에 최적화 - Websocket 사용시 설정 + # proxy_buffering off; # 즉시 응답 전달 + # proxy_request_buffering off; # 즉시 요청 전달 + + # 버퍼링 활성화 (기본값) - 기본 fastapi 사용시 설정 + proxy_buffering on; + proxy_request_buffering on; + + # 버퍼 크기 설정 - 기본 fastapi 사용시 설정 + proxy_buffer_size 4k; + proxy_buffers 8 4k; + proxy_busy_buffers_size 8k; + + proxy_set_header Accept-Encoding gzip; + } + + # Let's Encrypt 도메인 검증 프로그램 허용 (HTTPS에서도 필요시) + location ^~ /.well-known/acme-challenge/ { + allow all; + root /www/certbot; + try_files $uri =404; # 디렉토리 순회 공격 방지 + } + + # 정적 리소스 캐싱 - 이미지, 폰트, CSS, JS 등 + # 브라우저 캐시로 로드 시간 단축 및 서버 부하 감소 + location ~* \.(jpg|jpeg|png|gif|ico|css|js|woff|woff2|ttf|svg)$ { + expires 1y; # 1년 캐시 + add_header Cache-Control "public, immutable"; + access_log off; # 액세스 로그 비활성화로 성능 향상 + } + + # 닷 파일 차단 - .htaccess, .htpasswd, .svn, .git, .env 등 + # 민감한 설정 파일 노출 방지 + location ~ /\. { + deny all; + access_log off; # 차단된 시도 로그 비활성화 + log_not_found off; + } + + # 민감한 파일 확장자 차단 - 로그, 인증서, 스크립트, SQL 등 + # 보안을 위해 직접 접근 차단 + location ~* \.(log|binary|pem|enc|crt|conf|cnf|sql|sh|key|yml|lock)$ { + deny all; + access_log off; # 차단된 시도 로그 비활성화 + log_not_found off; + } + + # 민감한 설정 파일 차단 - composer, package.json, phpunit 등 + # 프로젝트 메타데이터 및 설정 파일 노출 방지 + location ~* (composer\.json|composer\.lock|composer\.phar|contributing\.md|license\.txt|readme\.rst|readme\.md|readme\.txt|copyright|artisan|gulpfile\.js|package\.json|phpunit\.xml|access_log|error_log|gruntfile\.js)$ { + deny all; + access_log off; # 차단된 시도 로그 비활성화 + log_not_found off; + } + + # 파비콘 - 로그 노이즈 제거 + location = /favicon.ico { + log_not_found off; + access_log off; + } + + # robots.txt - 검색 엔진 크롤러 제어 + location = /robots.txt { + log_not_found off; + access_log off; + allow all; + } +} diff --git a/docker/gunicorn/Dockerfile b/docker/gunicorn/Dockerfile new file mode 100644 index 0000000..b92c9e5 --- /dev/null +++ b/docker/gunicorn/Dockerfile @@ -0,0 +1,75 @@ +FROM ubuntu:24.04 + +ENV TZ=Asia/Seoul +ENV PYTHONUNBUFFERED=1 +ENV DEBIAN_FRONTEND=noninteractive + +# ======================================== +# 1. Base packages & timezone setup +# ======================================== +RUN apt-get update && \ + apt-get install -y --no-install-recommends apt-utils && \ + apt-get install -yq tzdata && \ + ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \ + apt-get install -y \ + curl wget git tar gnupg2 lsb-release lz4 zstd vim \ + build-essential zlib1g-dev libncurses5-dev libgdbm-dev \ + libnss3-dev libssl-dev libreadline-dev libffi-dev libsqlite3-dev \ + python3-dev libmysqlclient-dev pkg-config ca-certificates + +# ======================================== +# 2. Python 3.13 build & install +# ======================================== +RUN cd /usr/src && \ + wget https://www.python.org/ftp/python/3.13.11/Python-3.13.11.tar.xz && \ + tar -xf Python-3.13.11.tar.xz && \ + cd Python-3.13.11 && \ + ./configure --enable-optimizations && \ + make altinstall && \ + rm -rf /usr/src/Python-3.13.11 /usr/src/Python-3.13.11.tar.xz + +# ======================================== +# 3. Python symlinks +# ======================================== +RUN rm -f /usr/bin/python /usr/bin/python3 && \ + ln -s /usr/local/bin/python3.13 /usr/bin/python && \ + ln -s /usr/local/bin/python3.13 /usr/bin/python3 && \ + ln -s /usr/local/bin/python3.13 /usr/local/bin/python && \ + ln -s /usr/local/bin/python3.13 /usr/local/bin/python3 && \ + ln -s /usr/local/bin/pip3.13 /usr/bin/pip && \ + ln -s /usr/local/bin/pip3.13 /usr/bin/pip3 && \ + ln -s /usr/local/bin/pip3.13 /usr/local/bin/pip && \ + ln -s /usr/local/bin/pip3.13 /usr/local/bin/pip3 + +# ======================================== +# 4. Python packages +# ======================================== +RUN pip install --upgrade pip && \ + pip install wheel && \ + pip install sqlalchemy alembic pydantic && \ + pip install psycopg2-binary asyncpg && \ + pip install mysqlclient asyncmy && \ + pip install gunicorn uvicorn[standard] && \ + pip install fastapi uv poetry + +# ======================================== +# 5. Percona XtraBackup (mysql backup) +# ======================================== +RUN curl -O https://repo.percona.com/apt/percona-release_latest.generic_all.deb && \ + apt-get install -y ./percona-release_latest.generic_all.deb && \ + rm -f percona-release_latest.generic_all.deb && \ + apt-get update && \ + percona-release enable pxb-84-lts && \ + apt-get install -y percona-xtrabackup-84 + +# ======================================== +# 6. PostgreSQL backup (pgbackrest) +# ======================================== +RUN apt-get install -y pgbackrest + +# ======================================== +# 7. Cleanup +# ======================================== +RUN apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ + update-ca-certificates diff --git a/docker/nginx/Dockerfile b/docker/nginx/Dockerfile new file mode 100644 index 0000000..8367dda --- /dev/null +++ b/docker/nginx/Dockerfile @@ -0,0 +1,45 @@ +FROM nginx:1.26-bookworm + +ENV TZ=Asia/Seoul +ENV DEBIAN_FRONTEND=noninteractive + +# ======================================== +# 1. Base packages & timezone setup +# ======================================== +RUN apt-get update && \ + apt-get install -y --no-install-recommends apt-utils && \ + apt-get install -yq tzdata && \ + ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +# ======================================== +# 2. Required packages +# ======================================== +RUN apt-get install -y sendmail wget gnupg ca-certificates + +# ======================================== +# 3. Cron & Certbot (SSL auto-renewal) +# ======================================== +RUN apt-get install -y cron certbot python3-certbot-nginx + +# ======================================== +# 4. CA certificates +# ======================================== +RUN update-ca-certificates && \ + chmod 644 /etc/ssl/certs/ca-certificates.crt + +# ======================================== +# 5. Cleanup +# ======================================== +RUN apt-get autoremove -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# ======================================== +# 6. Certbot auto-renewal cron job +# ======================================== +RUN crontab -l 2>/dev/null | { cat; echo "0 5 * * 1 certbot renew --quiet --deploy-hook \"nginx -t && service nginx reload\" >> /log/nginx/crontab_\$(date +\%Y\%m\%d).log 2>&1"; } | crontab - + +# ======================================== +# 7. Add cron to nginx entrypoint +# ======================================== +RUN sed -i'' -r -e "/set/i\cron" /docker-entrypoint.sh || true diff --git a/log/mysql/.gitkeep b/log/mysql/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/log/nginx/.gitkeep b/log/nginx/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/log/postgresql/.gitkeep b/log/postgresql/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/log/uvicorn/celery/.gitkeep b/log/uvicorn/celery/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/log/uvicorn/celerybeat/.gitkeep b/log/uvicorn/celerybeat/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/script/crontab_gunicorn_set.sh b/script/crontab_gunicorn_set.sh new file mode 100755 index 0000000..75abad4 --- /dev/null +++ b/script/crontab_gunicorn_set.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +echo "0 6 * * 1 root docker restart nginx-gunicorn-webserver" >> /etc/crontab diff --git a/script/letsencrypt.sh b/script/letsencrypt.sh new file mode 100755 index 0000000..cccfcb6 --- /dev/null +++ b/script/letsencrypt.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +my_array=() +delimiter="-d" +domain_string="" + +apt-get update && apt-get install -y sendmail wget vim cron certbot python3-certbot-nginx ca-certificates +while : +do + echo -n "Enter the service webroot_folder >" + read webroot_folder + echo "Entered service webroot_folder: $webroot_folder" + if [[ "$webroot_folder" != "" ]]; then + break + fi +done + +while : +do + echo -n "To add a subdomain, type something like 'aaa.com www.aaa.com sub.aaa.com', but all domains refer to the same web root" + echo -n "A domain in aaa.com format must be entered first." + echo -n "Enter the service domain >" + read domain + echo "Entered service domain: $domain" + if [[ "$domain" != "" ]]; then + break + fi +done + +IFS=' ' read -ra my_array <<< "$domain" + +while : +do + echo -n "Enter the user e-mail >" + read mail + echo "Entered user e-mail: $mail" + if [[ "$mail" != "" ]]; then + break + fi +done + +for element in "${my_array[@]}"; do + domain_string+=" $delimiter $element" +done + +# Remove leading space +# domain_string="${domain_string# }" + +# for element in "${my_array[@]}"; do +if ! test -f /ssl/${my_array[0]}/dhparam.pem ; then + if ! test -f /etc/ssl/certs/${my_array[0]}/dhparam.pem ; then + echo "try to create ssl key using openssl " + if ! test -d /etc/ssl/certs/${my_array[0]}/ ; then + echo "create "${my_array[0]}" folder: /etc/ssl/certs/"${my_array[0]}"/" + mkdir -p /etc/ssl/certs/${my_array[0]}/ + fi + openssl dhparam -out /etc/ssl/certs/${my_array[0]}/dhparam.pem 4096 + if ! test -d /ssl/${my_array[0]}/ ; then + echo "create "${my_array[0]}" folder: /ssl/"${my_array[0]}"/" + mkdir -p /ssl/${my_array[0]}/ + fi + cp /etc/ssl/certs/${my_array[0]}/dhparam.pem /ssl/${my_array[0]}/ -r + # else + # echo "copy ssl folder by already maden" + # cp /ssl/certs/$domain/dhparam.pem /etc/ssl/certs/dhparam.pem -r + fi +else + if ! test -d /etc/ssl/certs/${my_array[0]}/ ; then + echo "create "${my_array[0]}" folder: /etc/ssl/certs/"${my_array[0]}"/" + mkdir -p /etc/ssl/certs/${my_array[0]}/ + fi + cp /ssl/${my_array[0]}/dhparam.pem /etc/ssl/certs/${my_array[0]}/ -r +fi +# done + +#if ! test -d /etc/letsencrypt/live/test.com ; +if ! test -d /etc/letsencrypt/${my_array[0]}/letsencrypt ; then + echo "try to create authentication key using certbot " + certbot certonly --non-interactive --agree-tos --email $mail --webroot -w /www/$webroot_folder$domain_string + echo "certbot certonly --non-interactive --agree-tos --email "$mail" --webroot -w /www/"$webroot_folder$domain_string + # if ! test -d /ssl/letsencrypt/$domain/ ; then + # echo "create domain folder: /ssl/letsencrypt/"$domain"/" + # mkdir -p /ssl/letsencrypt/$domain/ + # fi + #cp /etc/letsencrypt/ /ssl/letsencrypt/$domain/ -r +# else +# echo "copy letsencrypt folder by already maden" +# cp /ssl/letsencrypt/$domain/ /etc/letsencrypt/ -r +fi + +cat <(crontab -l) <(echo '0 5 * * 1 certbot renew --quiet --deploy-hook "service nginx restart" > /log/nginx/crontab_renew.log 2>&1') | crontab - \ No newline at end of file diff --git a/script/logrotate/nginx/nginx b/script/logrotate/nginx/nginx new file mode 100644 index 0000000..9913439 --- /dev/null +++ b/script/logrotate/nginx/nginx @@ -0,0 +1,21 @@ +# Nginx 로그 로테이션 설정 +# - Docker 컨테이너 환경에서 무중단 운영을 위해 copytruncate 방식 사용 +# - copytruncate: 로그 파일을 복사 후 원본을 비우는 방식 (서비스 재시작 불필요) +# - 로테이션 순간 극소량의 로그가 누락될 수 있으나 서비스는 중단되지 않음 +/log/nginx/*.log { + daily + size 100M + rotate 30 + missingok + notifempty + compress + delaycompress + create 0640 nginx nginx + sharedscripts + postrotate + # Nginx에 USR1 signal 전송 + if [ -f /var/run/nginx.pid ]; then + kill -USR1 `cat /var/run/nginx.pid` + fi + endscript +} \ No newline at end of file diff --git a/script/logrotate/uvicorn/celery/uvicorn-celery b/script/logrotate/uvicorn/celery/uvicorn-celery new file mode 100644 index 0000000..0713d77 --- /dev/null +++ b/script/logrotate/uvicorn/celery/uvicorn-celery @@ -0,0 +1,14 @@ +# Uvicorn Celery Worker 로그 로테이션 설정 +# - Docker 컨테이너 환경에서 무중단 운영을 위해 copytruncate 방식 사용 +# - copytruncate: 로그 파일을 복사 후 원본을 비우는 방식 (서비스 재시작 불필요) +# - 로테이션 순간 극소량의 로그가 누락될 수 있으나 서비스는 중단되지 않음 +/log/uvicorn/celery/*.log { + daily + size 100M + rotate 30 + missingok + notifempty + compress + delaycompress + copytruncate +} diff --git a/script/logrotate/uvicorn/celerybeat/uvicorn-celerybeat b/script/logrotate/uvicorn/celerybeat/uvicorn-celerybeat new file mode 100644 index 0000000..53c0f33 --- /dev/null +++ b/script/logrotate/uvicorn/celerybeat/uvicorn-celerybeat @@ -0,0 +1,13 @@ +# Uvicorn Celery Beat 로그 로테이션 설정 +# - Docker 컨테이너 환경에서 무중단 운영을 위해 copytruncate 방식 사용 +# - copytruncate: 로그 파일을 복사 후 원본을 비우는 방식 (서비스 재시작 불필요) +# - 로테이션 순간 극소량의 로그가 누락될 수 있으나 서비스는 중단되지 않음 +/log/uvicorn/celerybeat/*.log { + daily + rotate 30 + missingok + notifempty + compress + delaycompress + copytruncate +} diff --git a/script/logrotate/uvicorn/uvicorn b/script/logrotate/uvicorn/uvicorn new file mode 100644 index 0000000..1275088 --- /dev/null +++ b/script/logrotate/uvicorn/uvicorn @@ -0,0 +1,14 @@ +# Uvicorn 로그 로테이션 설정 +# - Docker 컨테이너 환경에서 무중단 운영을 위해 copytruncate 방식 사용 +# - copytruncate: 로그 파일을 복사 후 원본을 비우는 방식 (서비스 재시작 불필요) +# - 로테이션 순간 극소량의 로그가 누락될 수 있으나 서비스는 중단되지 않음 +/log/uvicorn/*.log { + daily + size 100M + rotate 30 + missingok + notifempty + compress + delaycompress + copytruncate +} diff --git a/www/.gitkeep b/www/.gitkeep new file mode 100644 index 0000000..e69de29