diff --git a/.gitignore b/.gitignore index e12717009..d6fe435c0 100644 --- a/.gitignore +++ b/.gitignore @@ -94,3 +94,9 @@ Dockerfile # User provided certs for TLS. certs acme + +database_dump.sql + +idc-staging.cloud.library.jhu.edu.csr + +idc-staging.cloud.library.jhu.edu.key diff --git a/IDC-README.md b/IDC-README.md new file mode 100644 index 000000000..f623d97da --- /dev/null +++ b/IDC-README.md @@ -0,0 +1,16 @@ +## To run +```shell +make jhu_up +``` + +## To stop +Stop without losing data. +```shell +make jhu_down +``` + +## To stop and reset +Stop and reset without removing codebase directory. +```shell +make jhu_clean +``` diff --git a/Makefile b/Makefile index 48870a1e8..2d00ee923 100644 --- a/Makefile +++ b/Makefile @@ -154,9 +154,9 @@ starter: QUOTED_CURDIR = "$(CURDIR)" starter: generate-secrets $(MAKE) starter-init ENVIRONMENT=starter if [ -z "$$(ls -A $(QUOTED_CURDIR)/codebase)" ]; then \ - docker container run --rm -v $(CURDIR)/codebase:/home/root $(REPOSITORY)/nginx:$(TAG) with-contenv bash -lc 'composer create-project $(CODEBASE_PACKAGE) /tmp/codebase; mv /tmp/codebase/* /home/root;'; \ + docker container run --rm -v $(CURDIR)/codebase:/home/root -u nginx $(REPOSITORY)/nginx:$(TAG) bash -c 'composer create-project $(CODEBASE_PACKAGE) /tmp/codebase && mv /tmp/codebase/* /home/root'; \ else \ - docker container run --rm -v $(CURDIR)/codebase:/home/root $(REPOSITORY)/nginx:$(TAG) with-contenv bash -lc 'cd /home/root; composer install'; \ + docker container run --rm -v $(CURDIR)/codebase:/home/root -u nginx $(REPOSITORY)/nginx:$(TAG) bash -c 'cd /home/root && composer install'; \ fi $(MAKE) set-files-owner SRC=$(CURDIR)/codebase ENVIRONMENT=starter $(MAKE) compose-up @@ -173,14 +173,14 @@ starter_dev: generate-secrets fi $(MAKE) set-files-owner SRC=$(CURDIR)/codebase ENVIRONMENT=starter_dev $(MAKE) compose-up - docker compose exec -T drupal with-contenv bash -lc 'chown -R nginx:nginx /var/www/drupal/ ; su nginx -s /bin/bash -c "composer install"' + docker compose exec -T -u nginx drupal sh -c 'composer install && chown -R nginx:nginx .' $(MAKE) starter-finalize ENVIRONMENT=starter_dev .PHONY: production production: init $(MAKE) compose-up - docker compose exec -T drupal with-contenv bash -lc 'composer install; chown -R nginx:nginx .' + docker compose exec -T -u nginx drupal sh -c 'composer install && chown -R nginx:nginx .' $(MAKE) starter-finalize ENVIRONMENT=starter diff --git a/build/configs/custom_www.conf b/build/configs/custom_www.conf new file mode 100644 index 000000000..db96ecba3 --- /dev/null +++ b/build/configs/custom_www.conf @@ -0,0 +1,439 @@ +; Start a new pool named 'www'. +; the variable $pool can be used in any directive and will be replaced by the +; pool name ('www' here) +[www] + +; Per pool prefix +; It only applies on the following directives: +; - 'access.log' +; - 'slowlog' +; - 'listen' (unixsocket) +; - 'chroot' +; - 'chdir' +; - 'php_values' +; - 'php_admin_values' +; When not set, the global prefix (or /usr) applies instead. +; Note: This directive can also be relative to the global prefix. +; Default Value: none +;prefix = /path/to/pools/$pool + +; Unix user/group of processes +; Note: The user is mandatory. If the group is not set, the default user's group +; will be used. +user = nginx +group = nginx + +; The address on which to accept FastCGI requests. +; Valid syntaxes are: +; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on +; a specific port; +; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on +; a specific port; +; 'port' - to listen on a TCP socket to all addresses +; (IPv6 and IPv4-mapped) on a specific port; +; '/path/to/unix/socket' - to listen on a unix socket. +; Note: This value is mandatory. +listen = php-fpm83.sock + +; Set listen(2) backlog. +; Default Value: 511 (-1 on FreeBSD and OpenBSD) +;listen.backlog = 511 + +; Set permissions for unix socket, if one is used. In Linux, read/write +; permissions must be set in order to allow connections from a web server. Many +; BSD-derived systems allow connections regardless of permissions. +; Default Values: user and group are set as the running user +; mode is set to 0660 +listen.owner = nginx +listen.group = nginx +listen.mode = 0660 +; When POSIX Access Control Lists are supported you can set them using +; these options, value is a comma separated list of user/group names. +; When set, listen.owner and listen.group are ignored +;listen.acl_users = +;listen.acl_groups = + +; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect. +; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original +; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address +; must be separated by a comma. If this value is left blank, connections will be +; accepted from any ip address. +; Default Value: any +;listen.allowed_clients = 127.0.0.1 + +; Specify the nice(2) priority to apply to the pool processes (only if set) +; The value can vary from -19 (highest priority) to 20 (lower priority) +; Note: - It will only work if the FPM master process is launched as root +; - The pool processes will inherit the master process priority +; unless it specified otherwise +; Default Value: no set +; process.priority = -19 + +; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user +; or group is differrent than the master process user. It allows to create process +; core dump and ptrace the process for the pool user. +; Default Value: no +; process.dumpable = yes + +; Choose how the process manager will control the number of child processes. +; Possible Values: +; static - a fixed number (pm.max_children) of child processes; +; dynamic - the number of child processes are set dynamically based on the +; following directives. With this process management, there will be +; always at least 1 children. +; pm.max_children - the maximum number of children that can +; be alive at the same time. +; pm.start_servers - the number of children created on startup. +; pm.min_spare_servers - the minimum number of children in 'idle' +; state (waiting to process). If the number +; of 'idle' processes is less than this +; number then some children will be created. +; pm.max_spare_servers - the maximum number of children in 'idle' +; state (waiting to process). If the number +; of 'idle' processes is greater than this +; number then some children will be killed. +; ondemand - no children are created at startup. Children will be forked when +; new requests will connect. The following parameter are used: +; pm.max_children - the maximum number of children that +; can be alive at the same time. +; pm.process_idle_timeout - The number of seconds after which +; an idle process will be killed. +; Note: This value is mandatory. +pm = dynamic + +; The number of child processes to be created when pm is set to 'static' and the +; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'. +; This value sets the limit on the number of simultaneous requests that will be +; served. Equivalent to the ApacheMaxClients directive with mpm_prefork. +; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP +; CGI. The below defaults are based on a server without much resources. Don't +; forget to tweak pm.* to fit your needs. +; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand' +; Note: This value is mandatory. +pm.max_children = 5 + +; The number of child processes created on startup. +; Note: Used only when pm is set to 'dynamic' +; Default Value: (min_spare_servers + max_spare_servers) / 2 +pm.start_servers = 2 + +; The desired minimum number of idle server processes. +; Note: Used only when pm is set to 'dynamic' +; Note: Mandatory when pm is set to 'dynamic' +pm.min_spare_servers = 1 + +; The desired maximum number of idle server processes. +; Note: Used only when pm is set to 'dynamic' +; Note: Mandatory when pm is set to 'dynamic' +pm.max_spare_servers = 3 + +; The number of seconds after which an idle process will be killed. +; Note: Used only when pm is set to 'ondemand' +; Default Value: 10s +;pm.process_idle_timeout = 10s; + +; The number of requests each child process should execute before respawning. +; This can be useful to work around memory leaks in 3rd party libraries. For +; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS. +; Default Value: 0 +;pm.max_requests = 500 + +; The URI to view the FPM status page. If this value is not set, no URI will be +; recognized as a status page. It shows the following informations: +; pool - the name of the pool; +; process manager - static, dynamic or ondemand; +; start time - the date and time FPM has started; +; start since - number of seconds since FPM has started; +; accepted conn - the number of request accepted by the pool; +; listen queue - the number of request in the queue of pending +; connections (see backlog in listen(2)); +; max listen queue - the maximum number of requests in the queue +; of pending connections since FPM has started; +; listen queue len - the size of the socket queue of pending connections; +; idle processes - the number of idle processes; +; active processes - the number of active processes; +; total processes - the number of idle + active processes; +; max active processes - the maximum number of active processes since FPM +; has started; +; max children reached - number of times, the process limit has been reached, +; when pm tries to start more children (works only for +; pm 'dynamic' and 'ondemand'); +; Value are updated in real time. +; Example output: +; pool: www +; process manager: static +; start time: 01/Jul/2011:17:53:49 +0200 +; start since: 62636 +; accepted conn: 190460 +; listen queue: 0 +; max listen queue: 1 +; listen queue len: 42 +; idle processes: 4 +; active processes: 11 +; total processes: 15 +; max active processes: 12 +; max children reached: 0 +; +; By default the status page output is formatted as text/plain. Passing either +; 'html', 'xml' or 'json' in the query string will return the corresponding +; output syntax. Example: +; http://www.foo.bar/status +; http://www.foo.bar/status?json +; http://www.foo.bar/status?html +; http://www.foo.bar/status?xml +; +; By default the status page only outputs short status. Passing 'full' in the +; query string will also return status for each pool process. +; Example: +; http://www.foo.bar/status?full +; http://www.foo.bar/status?json&full +; http://www.foo.bar/status?html&full +; http://www.foo.bar/status?xml&full +; The Full status returns for each process: +; pid - the PID of the process; +; state - the state of the process (Idle, Running, ...); +; start time - the date and time the process has started; +; start since - the number of seconds since the process has started; +; requests - the number of requests the process has served; +; request duration - the duration in µs of the requests; +; request method - the request method (GET, POST, ...); +; request URI - the request URI with the query string; +; content length - the content length of the request (only with POST); +; user - the user (PHP_AUTH_USER) (or '-' if not set); +; script - the main script called (or '-' if not set); +; last request cpu - the %cpu the last request consumed +; it's always 0 if the process is not in Idle state +; because CPU calculation is done when the request +; processing has terminated; +; last request memory - the max amount of memory the last request consumed +; it's always 0 if the process is not in Idle state +; because memory calculation is done when the request +; processing has terminated; +; If the process is in Idle state, then informations are related to the +; last request the process has served. Otherwise informations are related to +; the current request being served. +; Example output: +; ************************ +; pid: 31330 +; state: Running +; start time: 01/Jul/2011:17:53:49 +0200 +; start since: 63087 +; requests: 12808 +; request duration: 1250261 +; request method: GET +; request URI: /test_mem.php?N=10000 +; content length: 0 +; user: - +; script: /home/fat/web/docs/php/test_mem.php +; last request cpu: 0.00 +; last request memory: 0 +; +; Note: There is a real-time FPM status monitoring sample web page available +; It's available in: /usr/share/php83/fpm/status.html +; +; Note: The value must start with a leading slash (/). The value can be +; anything, but it may not be a good idea to use the .php extension or it +; may conflict with a real PHP file. +; Default Value: not set +pm.status_path = /status + +; The ping URI to call the monitoring page of FPM. If this value is not set, no +; URI will be recognized as a ping page. This could be used to test from outside +; that FPM is alive and responding, or to +; - create a graph of FPM availability (rrd or such); +; - remove a server from a group if it is not responding (load balancing); +; - trigger alerts for the operating team (24/7). +; Note: The value must start with a leading slash (/). The value can be +; anything, but it may not be a good idea to use the .php extension or it +; may conflict with a real PHP file. +; Default Value: not set +;ping.path = /ping + +; This directive may be used to customize the response of a ping request. The +; response is formatted as text/plain with a 200 response code. +; Default Value: pong +;ping.response = pong + +; The access log file +; Default: not set +;access.log = log/php83/$pool.access.log + +; The access log format. +; The following syntax is allowed +; %%: the '%' character +; %C: %CPU used by the request +; it can accept the following format: +; - %{user}C for user CPU only +; - %{system}C for system CPU only +; - %{total}C for user + system CPU (default) +; %d: time taken to serve the request +; it can accept the following format: +; - %{seconds}d (default) +; - %{miliseconds}d +; - %{mili}d +; - %{microseconds}d +; - %{micro}d +; %e: an environment variable (same as $_ENV or $_SERVER) +; it must be associated with embraces to specify the name of the env +; variable. Some exemples: +; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e +; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e +; %f: script filename +; %l: content-length of the request (for POST request only) +; %m: request method +; %M: peak of memory allocated by PHP +; it can accept the following format: +; - %{bytes}M (default) +; - %{kilobytes}M +; - %{kilo}M +; - %{megabytes}M +; - %{mega}M +; %n: pool name +; %o: output header +; it must be associated with embraces to specify the name of the header: +; - %{Content-Type}o +; - %{X-Powered-By}o +; - %{Transfert-Encoding}o +; - .... +; %p: PID of the child that serviced the request +; %P: PID of the parent of the child that serviced the request +; %q: the query string +; %Q: the '?' character if query string exists +; %r: the request URI (without the query string, see %q and %Q) +; %R: remote IP address +; %s: status (response code) +; %t: server time the request was received +; it can accept a strftime(3) format: +; %d/%b/%Y:%H:%M:%S %z (default) +; The strftime(3) format must be encapsuled in a %{}t tag +; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t +; %T: time the log has been written (the request has finished) +; it can accept a strftime(3) format: +; %d/%b/%Y:%H:%M:%S %z (default) +; The strftime(3) format must be encapsuled in a %{}t tag +; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t +; %u: remote user +; +; Default: "%R - %u %t \"%m %r\" %s" +;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%" + +; The log file for slow requests +; Default Value: not set +; Note: slowlog is mandatory if request_slowlog_timeout is set +;slowlog = log/php83/$pool.slow.log + +; The timeout for serving a single request after which a PHP backtrace will be +; dumped to the 'slowlog' file. A value of '0s' means 'off'. +; Available units: s(econds)(default), m(inutes), h(ours), or d(ays) +; Default Value: 0 +;request_slowlog_timeout = 0 + +; Depth of slow log stack trace. +; Default Value: 20 +;request_slowlog_trace_depth = 20 + +; The timeout for serving a single request after which the worker process will +; be killed. This option should be used when the 'max_execution_time' ini option +; does not stop script execution for some reason. A value of '0' means 'off'. +; Available units: s(econds)(default), m(inutes), h(ours), or d(ays) +; Default Value: 0 +request_terminate_timeout = 300 + +; The timeout set by 'request_terminate_timeout' ini option is not engaged after +; application calls 'fastcgi_finish_request' or when application has finished and +; shutdown functions are being called (registered via register_shutdown_function). +; This option will enable timeout limit to be applied unconditionally +; even in such cases. +; Default Value: no +;request_terminate_timeout_track_finished = no + +; Set open file descriptor rlimit. +; Default Value: system defined value +;rlimit_files = 1024 + +; Set max core size rlimit. +; Possible Values: 'unlimited' or an integer greater or equal to 0 +; Default Value: system defined value +;rlimit_core = 0 + +; Chroot to this directory at the start. This value must be defined as an +; absolute path. When this value is not set, chroot is not used. +; Note: you can prefix with '$prefix' to chroot to the pool prefix or one +; of its subdirectories. If the pool prefix is not set, the global prefix +; will be used instead. +; Note: chrooting is a great security feature and should be used whenever +; possible. However, all PHP paths will be relative to the chroot +; (error_log, sessions.save_path, ...). +; Default Value: not set +;chroot = + +; Chdir to this directory at the start. +; Note: relative path can be used. +; Default Value: current directory or / when chroot +;chdir = /var/www + +; Redirect worker stdout and stderr into main error log. If not set, stdout and +; stderr will be redirected to /dev/null according to FastCGI specs. +; Note: on highloaded environement, this can cause some delay in the page +; process time (several ms). +; Default Value: no +catch_workers_output = yes + +; Decorate worker output with prefix and suffix containing information about +; the child that writes to the log and if stdout or stderr is used as well as +; log level and time. This options is used only if catch_workers_output is yes. +; Settings to "no" will output data as written to the stdout or stderr. +; Default value: yes +decorate_workers_output = no + +; Clear environment in FPM workers +; Prevents arbitrary environment variables from reaching FPM worker processes +; by clearing the environment in workers before env vars specified in this +; pool configuration are added. +; Setting to "no" will make all environment variables available to PHP code +; via getenv(), $_ENV and $_SERVER. +; Default Value: yes +clear_env = yes + +; Limits the extensions of the main script FPM will allow to parse. This can +; prevent configuration mistakes on the web server side. You should only limit +; FPM to .php extensions to prevent malicious users to use other extensions to +; execute php code. +; Note: set an empty value to allow all extensions. +; Default Value: .php +;security.limit_extensions = .php .php3 .php4 .php5 .php83 + +; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from +; the current environment. +; Default Value: clean env +;env[HOSTNAME] = $HOSTNAME +;env[PATH] = /usr/local/bin:/usr/bin:/bin +;env[TMP] = /tmp +;env[TMPDIR] = /tmp +;env[TEMP] = /tmp + +; Additional php.ini defines, specific to this pool of workers. These settings +; overwrite the values previously defined in the php.ini. The directives are the +; same as the PHP SAPI: +; php_value/php_flag - you can set classic ini defines which can +; be overwritten from PHP call 'ini_set'. +; php_admin_value/php_admin_flag - these directives won't be overwritten by +; PHP call 'ini_set' +; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no. + +; Defining 'extension' will load the corresponding shared extension from +; extension_dir. Defining 'disable_functions' or 'disable_classes' will not +; overwrite previously defined php.ini values, but will append the new value +; instead. + +; Note: path INI options can be relative and will be expanded with the prefix +; (pool, global or /usr) + +; Default Value: nothing is defined by default except the values in php.ini and +; specified at startup with the -d argument +;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com +;php_flag[display_errors] = off +;php_admin_value[error_log] = /var/log/php83/$pool.error.log +;php_admin_flag[log_errors] = on +;php_admin_value[memory_limit] = 32M +; xoxox \ No newline at end of file diff --git a/build/docker-compose/docker-compose.acme.yml b/build/docker-compose/docker-compose.acme.yml index 229be0e1d..37fb5c933 100644 --- a/build/docker-compose/docker-compose.acme.yml +++ b/build/docker-compose/docker-compose.acme.yml @@ -1,4 +1,3 @@ -version: "3.7" services: traefik: # Do not set `api.insecure`, `api.dashboard`, `api.debug` to `true` in production. diff --git a/build/docker-compose/docker-compose.activemq.yml b/build/docker-compose/docker-compose.activemq.yml index edc39cdae..340d2ae58 100644 --- a/build/docker-compose/docker-compose.activemq.yml +++ b/build/docker-compose/docker-compose.activemq.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true diff --git a/build/docker-compose/docker-compose.alpaca.yml b/build/docker-compose/docker-compose.alpaca.yml index 0d75afab0..2cd0fab43 100644 --- a/build/docker-compose/docker-compose.alpaca.yml +++ b/build/docker-compose/docker-compose.alpaca.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true @@ -6,11 +5,20 @@ services: alpaca: restart: ${RESTART_POLICY:-unless-stopped} image: ${REPOSITORY:-islandora}/alpaca:${TAG:-latest} + volumes: + - /etc/docker/custom-resolv.conf:/etc/resolv.conf environment: ALPACA_OCR_TIMEOUT: ${ALPACA_OCR_TIMEOUT:-10000} ALPACA_HOUDINI_TIMEOUT: ${ALPACA_HOUDINI_TIMEOUT:-10000} ALPACA_HOMARUS_TIMEOUT: ${ALPACA_HOMARUS_TIMEOUT:-10000} ALPACA_FITS_TIMEOUT: ${ALPACA_HOMARUS_TIMEOUT:-10000} + LOG_LEVEL: ${LOG_LEVEL:-DEBUG} + depends_on: + - activemq + - crayfits + - homarus + - houdini + - hypercube deploy: resources: limits: diff --git a/build/docker-compose/docker-compose.blazegraph.yml b/build/docker-compose/docker-compose.blazegraph.yml index b195bf4d4..adf0b4e46 100644 --- a/build/docker-compose/docker-compose.blazegraph.yml +++ b/build/docker-compose/docker-compose.blazegraph.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true @@ -10,6 +9,7 @@ services: image: ${REPOSITORY:-islandora}/blazegraph:${TAG:-latest} volumes: - blazegraph-data:/data + - /etc/docker/custom-resolv.conf:/etc/resolv.conf networks: default: labels: diff --git a/build/docker-compose/docker-compose.cantaloupe.yml b/build/docker-compose/docker-compose.cantaloupe.yml index 0dd012d7d..bbaffedca 100644 --- a/build/docker-compose/docker-compose.cantaloupe.yml +++ b/build/docker-compose/docker-compose.cantaloupe.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true @@ -14,6 +13,7 @@ services: CANTALOUPE_HTTPSOURCE_LOOKUP_STRATEGY: ${CANTALOUPE_HTTPSOURCE_LOOKUP_STRATEGY} # ScriptLookupStrategy volumes: - cantaloupe-data:/data + - /etc/docker/custom-resolv.conf:/etc/resolv.conf labels: - traefik.enable=${EXPOSE_CANTALOUPE:-true} - traefik.http.services.${COMPOSE_PROJECT_NAME-isle-dc}-cantaloupe.loadbalancer.server.port=8182 diff --git a/build/docker-compose/docker-compose.crayfish.yml b/build/docker-compose/docker-compose.crayfish.yml index 16d1b33bf..92e81c8f2 100644 --- a/build/docker-compose/docker-compose.crayfish.yml +++ b/build/docker-compose/docker-compose.crayfish.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true @@ -6,6 +5,11 @@ services: homarus: restart: ${RESTART_POLICY:-unless-stopped} image: ${REPOSITORY:-islandora}/homarus:${TAG:-latest} + volumes: + - /etc/docker/custom-resolv.conf:/etc/resolv.conf + - ../build/configs/custom_www.conf:/etc/php83/php-fpm.d/www.conf:ro + environment: + - LOG_LEVEL=${LOG_LEVEL:-DEBUG} deploy: resources: limits: @@ -15,6 +19,15 @@ services: houdini: restart: ${RESTART_POLICY:-unless-stopped} image: ${REPOSITORY:-islandora}/houdini:${TAG:-latest} + volumes: + - /etc/docker/custom-resolv.conf:/etc/resolv.conf + environment: + - LOG_LEVEL=${LOG_LEVEL:-DEBUG} + networks: + - default + depends_on: + - drupal + - fcrepo deploy: resources: limits: @@ -24,6 +37,10 @@ services: hypercube: restart: ${RESTART_POLICY:-unless-stopped} image: ${REPOSITORY:-islandora}/hypercube:${TAG:-latest} + volumes: + - /etc/docker/custom-resolv.conf:/etc/resolv.conf + environment: + - LOG_LEVEL=${LOG_LEVEL:-DEBUG} deploy: resources: limits: @@ -33,8 +50,11 @@ services: milliner: restart: ${RESTART_POLICY:-unless-stopped} image: ${REPOSITORY:-islandora}/milliner:${TAG:-latest} + volumes: + - /etc/docker/custom-resolv.conf:/etc/resolv.conf environment: MILLINER_FEDORA6: ${FEDORA_6} + LOG_LEVEL: ${LOG_LEVEL:-DEBUG} networks: default: gateway: diff --git a/build/docker-compose/docker-compose.crayfits.yml b/build/docker-compose/docker-compose.crayfits.yml index 2f31c657b..2d952fdfc 100644 --- a/build/docker-compose/docker-compose.crayfits.yml +++ b/build/docker-compose/docker-compose.crayfits.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true @@ -6,6 +5,8 @@ services: crayfits: restart: ${RESTART_POLICY:-unless-stopped} image: ${REPOSITORY:-islandora}/crayfits:${TAG:-latest} + volumes: + - /etc/docker/custom-resolv.conf:/etc/resolv.conf depends_on: - fits deploy: @@ -17,6 +18,8 @@ services: fits: restart: ${RESTART_POLICY:-unless-stopped} image: ${REPOSITORY:-islandora}/fits:${TAG:-latest} + volumes: + - /etc/docker/custom-resolv.conf:/etc/resolv.conf deploy: resources: limits: diff --git a/build/docker-compose/docker-compose.custom.yml b/build/docker-compose/docker-compose.custom.yml index 6896cd707..ccba80f45 100644 --- a/build/docker-compose/docker-compose.custom.yml +++ b/build/docker-compose/docker-compose.custom.yml @@ -1,5 +1,4 @@ # This file builds a local image from the codebase folder -version: "3.7" networks: default: internal: true diff --git a/build/docker-compose/docker-compose.demo.yml b/build/docker-compose/docker-compose.demo.yml index d62f6e28b..45a3334db 100644 --- a/build/docker-compose/docker-compose.demo.yml +++ b/build/docker-compose/docker-compose.demo.yml @@ -5,7 +5,6 @@ # # Organizations should not use this as their base instead they should refer # to the documentation in this project for the correct approach. -version: "3.7" networks: default: internal: true diff --git a/build/docker-compose/docker-compose.drupal.mariadb.yml b/build/docker-compose/docker-compose.drupal.mariadb.yml index 5e00bbb35..297681c0b 100644 --- a/build/docker-compose/docker-compose.drupal.mariadb.yml +++ b/build/docker-compose/docker-compose.drupal.mariadb.yml @@ -1,4 +1,3 @@ -version: "3.7" services: # Override defaults so Mariadb is used as the database for this service. drupal: diff --git a/build/docker-compose/docker-compose.drupal.postgresql.yml b/build/docker-compose/docker-compose.drupal.postgresql.yml index 40d16db6b..5901d23c4 100644 --- a/build/docker-compose/docker-compose.drupal.postgresql.yml +++ b/build/docker-compose/docker-compose.drupal.postgresql.yml @@ -1,4 +1,3 @@ -version: "3.7" services: # Override defaults so PostgreSQL is used as the database for this service. drupal: diff --git a/build/docker-compose/docker-compose.drupal.yml b/build/docker-compose/docker-compose.drupal.yml index 22859bd72..afd2a98b4 100644 --- a/build/docker-compose/docker-compose.drupal.yml +++ b/build/docker-compose/docker-compose.drupal.yml @@ -1,5 +1,4 @@ # These are the common settings for any drupal when used with any of the environment types. -version: "3.7" networks: default: internal: true diff --git a/build/docker-compose/docker-compose.etcd.yml b/build/docker-compose/docker-compose.etcd.yml index 25f123de7..aa5f36cda 100644 --- a/build/docker-compose/docker-compose.etcd.yml +++ b/build/docker-compose/docker-compose.etcd.yml @@ -1,6 +1,5 @@ # Example of using a different backend for confd rather then # environment variables. -version: "3.7" networks: default: internal: true @@ -21,6 +20,7 @@ services: ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379" volumes: - etcd-data:/data + - /etc/docker/custom-resolv.conf:/etc/resolv.conf ports: - 2379 - 2380 diff --git a/build/docker-compose/docker-compose.fcrepo.mariadb.yml b/build/docker-compose/docker-compose.fcrepo.mariadb.yml index 14cd6f83b..2a5d7d8ee 100644 --- a/build/docker-compose/docker-compose.fcrepo.mariadb.yml +++ b/build/docker-compose/docker-compose.fcrepo.mariadb.yml @@ -1,4 +1,3 @@ -version: "3.7" services: # Override defaults so Mariadb is used as the database for this service. fcrepo: diff --git a/build/docker-compose/docker-compose.fcrepo.postgresql.yml b/build/docker-compose/docker-compose.fcrepo.postgresql.yml index b4c272363..f502b634b 100644 --- a/build/docker-compose/docker-compose.fcrepo.postgresql.yml +++ b/build/docker-compose/docker-compose.fcrepo.postgresql.yml @@ -1,4 +1,3 @@ -version: "3.7" services: # Override defaults so PostgreSQL is used as the database for this service. fcrepo: diff --git a/build/docker-compose/docker-compose.fcrepo.yml b/build/docker-compose/docker-compose.fcrepo.yml index 85482deff..4ba3bcb9b 100644 --- a/build/docker-compose/docker-compose.fcrepo.yml +++ b/build/docker-compose/docker-compose.fcrepo.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true @@ -16,6 +15,7 @@ services: FCREPO_DISABLE_SYN: ${DISABLE_SYN} volumes: - fcrepo-data:/data + - /etc/docker/custom-resolv.conf:/etc/resolv.conf depends_on: - activemq networks: diff --git a/build/docker-compose/docker-compose.fcrepo6.mariadb.yml b/build/docker-compose/docker-compose.fcrepo6.mariadb.yml index 14cd6f83b..2a5d7d8ee 100644 --- a/build/docker-compose/docker-compose.fcrepo6.mariadb.yml +++ b/build/docker-compose/docker-compose.fcrepo6.mariadb.yml @@ -1,4 +1,3 @@ -version: "3.7" services: # Override defaults so Mariadb is used as the database for this service. fcrepo: diff --git a/build/docker-compose/docker-compose.fcrepo6.postgresql.yml b/build/docker-compose/docker-compose.fcrepo6.postgresql.yml index b4c272363..f502b634b 100644 --- a/build/docker-compose/docker-compose.fcrepo6.postgresql.yml +++ b/build/docker-compose/docker-compose.fcrepo6.postgresql.yml @@ -1,4 +1,3 @@ -version: "3.7" services: # Override defaults so PostgreSQL is used as the database for this service. fcrepo: diff --git a/build/docker-compose/docker-compose.fcrepo6.yml b/build/docker-compose/docker-compose.fcrepo6.yml index 67d0a4fad..a9a977348 100644 --- a/build/docker-compose/docker-compose.fcrepo6.yml +++ b/build/docker-compose/docker-compose.fcrepo6.yml @@ -1,9 +1,6 @@ -version: "3.7" networks: default: internal: true -volumes: - fcrepo-data: services: fcrepo: restart: ${RESTART_POLICY:-unless-stopped} @@ -15,7 +12,9 @@ services: FCREPO_TOMCAT_ADMIN_USER: admin FCREPO_DISABLE_SYN: ${DISABLE_SYN} volumes: - - fcrepo-data:/data + # - fcrepo-data:/data + - /mnt/data2/local/fedora:/data:rw + - /etc/docker/custom-resolv.conf:/etc/resolv.conf depends_on: - activemq networks: diff --git a/build/docker-compose/docker-compose.local.yml b/build/docker-compose/docker-compose.local.yml index ed23964c8..3f358a6f4 100644 --- a/build/docker-compose/docker-compose.local.yml +++ b/build/docker-compose/docker-compose.local.yml @@ -4,7 +4,6 @@ # - composer requires / install # - Drush commands # - Manual changes to the codebase directory -version: "3.7" networks: default: internal: true @@ -18,7 +17,10 @@ services: volumes: - ../../codebase:/var/www/drupal:delegated - drupal-sites-data:/var/www/drupal/web/sites/default/files + - /mnt/data2/local/public/:/mnt/data2/local/public/ + - ../rootfs/custom_php.ini:/etc/php83/php.ini - solr-data:/opt/solr/server/solr + - /etc/docker/custom-resolv.conf:/etc/resolv.conf environment: DRUPAL_DEFAULT_INSTALL_EXISTING_CONFIG: ${INSTALL_EXISTING_CONFIG} DRUPAL_DEFAULT_PROFILE: ${DRUPAL_INSTALL_PROFILE} diff --git a/build/docker-compose/docker-compose.mariadb.yml b/build/docker-compose/docker-compose.mariadb.yml index ee6ca9c14..66ff79999 100644 --- a/build/docker-compose/docker-compose.mariadb.yml +++ b/build/docker-compose/docker-compose.mariadb.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true diff --git a/build/docker-compose/docker-compose.matomo.yml b/build/docker-compose/docker-compose.matomo.yml new file mode 100644 index 000000000..be12f2ddd --- /dev/null +++ b/build/docker-compose/docker-compose.matomo.yml @@ -0,0 +1,43 @@ +version: "3.7" +networks: + default: + internal: true + gateway: + external: true +volumes: + matomo-config-data: +services: + matomo: + restart: ${RESTART_POLICY:-unless-stopped} + image: ${REPOSITORY:-islandora}/matomo:${TAG:-latest} + environment: + MATOMO_DEFAULT_HOST: ${DOMAIN} + volumes: + - matomo-config-data:/var/www/matomo + depends_on: + - mariadb + networks: + default: + labels: + # Do not expose in production over http, setup https. + - traefik.enable=${EXPOSE_MATOMO:-true} + - traefik.http.services.${COMPOSE_PROJECT_NAME-isle-dc}-matomo.loadbalancer.server.port=80 + - traefik.http.middlewares.${COMPOSE_PROJECT_NAME-isle-dc}-matomo-redirectscheme.redirectscheme.scheme=https + - traefik.http.middlewares.${COMPOSE_PROJECT_NAME-isle-dc}-matomo-redirectscheme.redirectscheme.permanent=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME-isle-dc}-matomo_http.service=${COMPOSE_PROJECT_NAME-isle-dc}-matomo + - traefik.http.routers.${COMPOSE_PROJECT_NAME-isle-dc}-matomo_http.entrypoints=http + - traefik.http.routers.${COMPOSE_PROJECT_NAME-isle-dc}-matomo_http.rule=Host(`${DOMAIN}`) && PathPrefix(`/matomo`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME-isle-dc}-matomo_http.middlewares=${COMPOSE_PROJECT_NAME-isle-dc}-matomo-redirectscheme + - traefik.http.middlewares.${COMPOSE_PROJECT_NAME-isle-dc}-matomo-stripprefix.stripprefix.prefixes=/matomo + - traefik.http.middlewares.${COMPOSE_PROJECT_NAME-isle-dc}-matomo-customrequestheaders.headers.customrequestheaders.X-Forwarded-Uri=/matomo + - traefik.http.middlewares.${COMPOSE_PROJECT_NAME-isle-dc}-matomo.chain.middlewares=${COMPOSE_PROJECT_NAME-isle-dc}-matomo-stripprefix,${COMPOSE_PROJECT_NAME-isle-dc}-matomo-customrequestheaders + - traefik.http.routers.${COMPOSE_PROJECT_NAME-isle-dc}-matomo_https.entrypoints=https + - traefik.http.routers.${COMPOSE_PROJECT_NAME-isle-dc}-matomo_https.rule=Host(`${DOMAIN}`) && PathPrefix(`/matomo`) + - traefik.http.routers.${COMPOSE_PROJECT_NAME-isle-dc}-matomo_https.tls=true + - traefik.http.routers.${COMPOSE_PROJECT_NAME-isle-dc}-matomo_https.middlewares=${COMPOSE_PROJECT_NAME-isle-dc}-matomo + deploy: + resources: + limits: + memory: ${MATOMO_MEMORY_LIMIT:-1G} + reservations: + memory: 512M diff --git a/build/docker-compose/docker-compose.postgresql.yml b/build/docker-compose/docker-compose.postgresql.yml index ee95d2102..1c3417ccd 100644 --- a/build/docker-compose/docker-compose.postgresql.yml +++ b/build/docker-compose/docker-compose.postgresql.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true diff --git a/build/docker-compose/docker-compose.secrets.yml b/build/docker-compose/docker-compose.secrets.yml index dfb9e91e9..b5c61a99d 100644 --- a/build/docker-compose/docker-compose.secrets.yml +++ b/build/docker-compose/docker-compose.secrets.yml @@ -1,4 +1,3 @@ -version: "3.7" secrets: ACTIVEMQ_PASSWORD: file: "../../secrets/live/ACTIVEMQ_PASSWORD" diff --git a/build/docker-compose/docker-compose.solr.yml b/build/docker-compose/docker-compose.solr.yml index f11f7b844..3d3373469 100644 --- a/build/docker-compose/docker-compose.solr.yml +++ b/build/docker-compose/docker-compose.solr.yml @@ -1,4 +1,3 @@ -version: "3.7" networks: default: internal: true diff --git a/build/docker-compose/docker-compose.starter.yml b/build/docker-compose/docker-compose.starter.yml index ed23964c8..ae5a31043 100644 --- a/build/docker-compose/docker-compose.starter.yml +++ b/build/docker-compose/docker-compose.starter.yml @@ -4,7 +4,6 @@ # - composer requires / install # - Drush commands # - Manual changes to the codebase directory -version: "3.7" networks: default: internal: true @@ -19,9 +18,15 @@ services: - ../../codebase:/var/www/drupal:delegated - drupal-sites-data:/var/www/drupal/web/sites/default/files - solr-data:/opt/solr/server/solr + - /mnt/data2/local/public/:/mnt/data2/local/public/ + - /mnt/data2/local/private/:/mnt/data2/local/private/ + - /mnt/data2/local/fedora/:/mnt/data2/local/fedora/ environment: DRUPAL_DEFAULT_INSTALL_EXISTING_CONFIG: ${INSTALL_EXISTING_CONFIG} DRUPAL_DEFAULT_PROFILE: ${DRUPAL_INSTALL_PROFILE} + PHP_MEMORY_LIMIT: ${PHP_MEMORY_LIMIT} + PHP_POST_MAX_SIZE: ${PHP_POST_MAX_SIZE} + PHP_UPLOAD_MAX_FILESIZE: ${PHP_UPLOAD_MAX_FILESIZE} depends_on: # Requires a the very minimum a database. - ${DRUPAL_DATABASE_SERVICE} diff --git a/build/docker-compose/docker-compose.starter_dev.yml b/build/docker-compose/docker-compose.starter_dev.yml index ed23964c8..ff77d2529 100644 --- a/build/docker-compose/docker-compose.starter_dev.yml +++ b/build/docker-compose/docker-compose.starter_dev.yml @@ -4,7 +4,6 @@ # - composer requires / install # - Drush commands # - Manual changes to the codebase directory -version: "3.7" networks: default: internal: true @@ -19,10 +18,15 @@ services: - ../../codebase:/var/www/drupal:delegated - drupal-sites-data:/var/www/drupal/web/sites/default/files - solr-data:/opt/solr/server/solr + - /mnt/data2/local/public/:/mnt/data2/local/public/:rw + - /mnt/data2/local/private/:/mnt/data2/local/private/:rw + # - /mnt/data2/local/fedora/:/mnt/data2/local/fedora/ + # - fcrepo-data:/mnt/data2/local/fedora/ environment: DRUPAL_DEFAULT_INSTALL_EXISTING_CONFIG: ${INSTALL_EXISTING_CONFIG} DRUPAL_DEFAULT_PROFILE: ${DRUPAL_INSTALL_PROFILE} depends_on: + - traefik # Requires a the very minimum a database. - ${DRUPAL_DATABASE_SERVICE} # Extends docker-compose.solr.yml diff --git a/build/docker-compose/docker-compose.traefik.yml b/build/docker-compose/docker-compose.traefik.yml index cb9c65d8e..04b0504be 100644 --- a/build/docker-compose/docker-compose.traefik.yml +++ b/build/docker-compose/docker-compose.traefik.yml @@ -7,7 +7,6 @@ # For a traefik to be able to route traffic to a given container, that # container needs to be on the `gateway` network, otherwise traefik will # discover it via Docker but will not be able to redirect traffic to it. -version: "3.7" networks: gateway: driver: bridge @@ -56,7 +55,6 @@ services: - ../../build/traefik-tls.yml:/etc/traefik/tls.yml - ../../certs:/etc/ssl/traefik labels: - # Do not expose in production. - traefik.http.routers.api.service=api@internal networks: default: @@ -69,6 +67,3 @@ services: memory: ${TRAEFIK_MEMORY_LIMIT:-8G} reservations: memory: 4G - drupal: - depends_on: - - traefik diff --git a/build/docker-compose/docker-compose.watchtower.yml b/build/docker-compose/docker-compose.watchtower.yml index 3c869ea97..82b02d6b7 100644 --- a/build/docker-compose/docker-compose.watchtower.yml +++ b/build/docker-compose/docker-compose.watchtower.yml @@ -1,13 +1,13 @@ # This service will automatically restart a service if a newer image is # available on the system, useful for local development but should not # be used in production. -version: "3.7" services: watchtower: image: containrrr/watchtower restart: ${RESTART_POLICY:-unless-stopped} volumes: - /var/run/docker.sock:/var/run/docker.sock + - /etc/docker/custom-resolv.conf:/etc/resolv.conf command: --interval 1 --no-pull deploy: resources: diff --git a/build/scripts/generate-jwt-secrets.sh b/build/scripts/generate-jwt-secrets.sh new file mode 100755 index 000000000..6dbe8315c --- /dev/null +++ b/build/scripts/generate-jwt-secrets.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +set -e +already_generated=false + +# This script is meant to only be called via the Makefile not independently. +function generate_jwt_keys() { + openssl genrsa -out /tmp/private.key 2048 &>/dev/null + openssl rsa -pubout -in /tmp/private.key -out /tmp/public.key &>/dev/null +} + +function random_secret() { + local characters=${1} + local size=${2} + local name=${3} + tr -dc "${characters}" /secrets/live/"${name}" +} + +function main() { + echo -e "\nGenerating JWT Secrets\n-----------------------------------\n" + today=$(date +"%Y-%m-%d") + if [ "$already_generated" = true ]; then + echo -e "\n\tJWT keys already generated. Skipping.\n\n" + exit 0 + fi + echo "Backing up the old ones" + if [ -f secrets/live/JWT_PUBLIC_KEY_$today ]; then + echo "Backups already exists. Skipping." + else + mv secrets/live/JWT_PUBLIC_KEY secrets/live/JWT_PUBLIC_KEY_$today + mv secrets/live/JWT_PRIVATE_KEY secrets/live/JWT_PRIVATE_KEY_$today + mv secrets/live/JWT_ADMIN_TOKEN secrets/live/JWT_ADMIN_TOKEN_$today + echo "Moved the old ones to secrets/live/JWT_ADMIN_TOKEN_$today, secrets/live/JWT_PUBLIC_KEY_$today, and secrets/live/JWT_PRIVATE_KEY_$today" + fi + echo -e "\tdone.\n" + echo "Generating JWT token, public and private keys" + echo "- generating token" + random_secret 'A-Za-z0-9' 64 JWT_ADMIN_TOKEN + echo -e "\tdone.\n" + echo "- generating keys" + generate_jwt_keys + echo -e "\tdone.\n" + + echo "Moving the keys to the secrets/ directory" + mv /tmp/private.key secrets/live/JWT_PRIVATE_KEY + mv /tmp/public.key secrets/live/JWT_PUBLIC_KEY + echo -e "\tdone.\n" + + echo "Setting permissions for the JWT keys" + chmod 600 /secrets/live/* + echo -e "\tdone.\n" + +} + +function check_key_validity() { + if [ -f secrets/live/JWT_PUBLIC_KEY ] && [ -f secrets/live/JWT_PRIVATE_KEY ]; then + echo "Checking the validity of the JWT keys" + + # Verify the public key + if ! openssl pkey -pubin -in secrets/live/JWT_PUBLIC_KEY -noout 2>/dev/null; then + echo "Error: Invalid public key. Generating new keys." + main + return + fi + + # Verify the private key + if ! openssl pkey -in secrets/live/JWT_PRIVATE_KEY -noout 2>/dev/null; then + echo "Error: Invalid private key. Generating new keys." + main + return + fi + + # If we get here, both keys are valid + echo -e "\tJWT keys are valid. No action needed.\n" + else + echo "JWT keys are missing. Generating new ones." + main + fi + echo -e "\n----------------------------- Done.\n" +} + +main +check_key_validity \ No newline at end of file diff --git a/custom.Makefile b/custom.Makefile new file mode 100644 index 000000000..6089e7ae3 --- /dev/null +++ b/custom.Makefile @@ -0,0 +1,364 @@ +# To write custom Makefile commands and have them show up under `make help`. +# +# .PHONY: function_name +# .SILENT: function_name +# ## JHU: Updates codebase folder to be owned by the host user and nginx group. +# function_name: +# ⟼ Tab (not space characters) and each line is executed as part of this function. +# +export $(shell sed 's/=.*//' .env) +## Debug output to verify DOMAIN + +print-domain: + @echo "DOMAIN is set to $(DOMAIN)" + +DOCKCOMPOSE_FILE = $(CURDIR)/docker-compose.yml +ifneq ("$(wildcard $(DOCKCOMPOSE_FILE))","") + DF_FILE_EXISTS = 1 +else + DF_FILE_EXISTS = 0 +endif + +ifneq ("$(wildcard /etc/server-type.conf)","") + SERVER_TYPE = $(shell cat /etc/server-type.conf) +else + SERVER_TYPE = local +endif + +# Define the command you want to block +BLOCKED_COMMANDS := jhu_clean + +.PHONY: wait-for-endpoint +.SILENT: wait-for-endpoint +wait-for-endpoint: + @echo "Checking if the https://$(DOMAIN)/ endpoint is available..." + @while ! curl -k -s -o /dev/null -w "%{http_code}" https://$(DOMAIN)/ | grep -q "200"; do \ + echo "Waiting for https://$(DOMAIN)/ endpoint to be available..."; \ + sleep 5; \ + done + @echo "Endpoint is available!" + +.PHONY: jhu_check_and_warn +.SILENT: jhu_check_and_warn +jhu_check_and_warn: + if [ $(SERVER_TYPE) != 'local' ]; then \ + echo "Not allowed on the $(SERVER_TYPE) environment"; \ + exit 1; \ + fi + +.PHONY: jhu_generate-secrets +.SILENT: jhu_generate-secrets +jhu_generate-secrets: QUOTED_CURDIR = "$(CURDIR)" +jhu_generate-secrets: + @echo "" + # cp -r secrets/template/* secrets/live + $(MAKE) generate-secrets + @echo " jhu_generate-secrets └─ Done" + @echo "" + +.PHONY: set-codebase-owner +.SILENT: set-codebase-owner +## JHU: Updates codebase folder to be owned by the host user and nginx group. +set-codebase-owner: + @echo "" + @echo "Setting codebase/ folder owner back to $(shell id -u):101" + if [ -n "$$(docker ps -q -f name=drupal)" ]; then \ + echo " └─ Using docker compose codebase/ directory"; \ + docker compose exec -T drupal with-contenv bash -lc "find . -not -user $(shell id -u) -not -path '*/sites/default/files' -exec chown $(shell id -u):101 {} \;" ; \ + docker compose exec -T drupal with-contenv bash -lc "find . -not -group 101 -not -path '*/sites/default/files' -exec chown $(shell id -u):101 {} \;" ; \ + elif [ -d "codebase" ]; then \ + echo " └─ Using local codebase/ directory"; \ + sudo find ./codebase -not -user $(shell id -u) -not -path '*/sites/default/files' -exec chown $(shell id -u):101 {} \; ; \ + sudo find ./codebase -not -group 101 -not -path '*/sites/default/files' -exec chown $(shell id -u):101 {} \; ; \ + else \ + echo " └─ No codebase/ directory found, skipping"; \ + fi + @echo " └─ Done" + @echo "" + +.PHONY: jhu_solr +.SILENT: jhu_solr +## JHU: This pulls the Solr config from Drupal and puts it in the Solr container. +jhu_solr: + @echo "" + @echo "Installing missing field types" + docker compose exec -T drupal with-contenv bash -lc "drush search-api-solr:install-missing-fieldtypes" + # docker compose exec -T drupal bash -c '/bin/rm -f /opt/solr/server/solr/ISLANDORA/conf/solrconfig_extra.xml ; /bin/cp -f web/modules/contrib/search_api_solr/jump-start/solr7/config-set/solrconfig_extra.xml /opt/solr/server/solr/ISLANDORA/conf/solrconfig_extra.xml' + @echo "Removing solrconfig_extra.xml" + docker compose exec -T drupal bash -c '/bin/rm -rf /opt/solr/server/solr/ISLANDORA/conf/' + @echo "Pulling Solr config from Drupal" + -docker compose exec -T drupal with-contenv bash -lc "touch /var/www/drupal/solrconfig.zip && chown nginx: /var/www/drupal/solrconfig.zip" + docker compose exec -T drupal with-contenv bash -lc "drush search-api-solr:get-server-config default_solr_server /var/www/drupal/solrconfig.zip" + docker compose exec -T drupal with-contenv bash -lc "unzip /var/www/drupal/solrconfig.zip -d /opt/solr/server/solr/ISLANDORA/conf/ -o" + docker compose exec -T drupal with-contenv bash -lc "rm -f /var/www/drupal/solrconfig.zip" + @echo "Restarting solr" + docker compose restart solr + # Check if Solr is up + @echo "Checking if Solr's healthy" + sleep 5 + docker compose exec -T solr bash -c 'curl -s http://localhost:8983/solr/admin/info/system?wt=json' | jq -r .lucene || (echo "Solr is not healthy, waiting 10 seconds." && sleep 10) + docker compose exec -T drupal with-contenv bash -lc "drush cr" + docker compose exec -T drupal with-contenv bash -lc "drush search-api:clear" + docker compose exec -T drupal with-contenv bash -lc "drush search-api:disable-all" + docker compose exec -T drupal with-contenv bash -lc "drush search-api:enable-all" + docker compose exec -T drupal with-contenv bash -lc "drush search-api-solr:finalize-index --force" + docker compose exec -T drupal with-contenv bash -lc "drush search-api-reindex" + docker compose exec -T drupal with-contenv bash -lc "drush search-api-index" + @echo " └─ Done" + +.PHONY: jhu_up_without_rebuilding +## JHU: Make a local site with codebase directory bind mounted, using cloned starter site but without rebuilding the build process. +jhu_up_without_rebuilding: + @echo "" + if [ $(DF_FILE_EXISTS) -eq 0 ]; then \ + echo "docker-compose.yml does not exist, creating starter site"; \ + fi + docker compose up -d --build + $(MAKE) set-codebase-owner + $(MAKE) jhu_config_import + @echo " └─ Done" + +jhu_first_time: + @echo "Running Starter-init" + $(MAKE) starter-init ENVIRONMENT=starter_dev + if [ -z "$$(ls -A $(QUOTED_CURDIR)/codebase)" ]; then \ + echo "codebase/ directory is empty, cloning it"; \ + docker container run --rm -v $(CURDIR)/codebase:/home/root $(REPOSITORY)/nginx:$(TAG) with-contenv bash -lc 'git clone -b main https://github.com/jhu-idc/idc-codebase /home/root;'; \ + echo "codebase/ was cloned"; \ + fi + $(MAKE) set-codebase-owner + @echo "Wait for the /var/www/drupal/composer.json file to be available" + while ! test -f codebase/composer.json; do \ + echo "Waiting for /var/www/drupal/composer.json file to be available..."; \ + sleep 2; \ + done + docker compose up -d --remove-orphans + # The rest of this should be moved into another function. + # -docker compose exec -T drupal with-contenv bash -lc 'rm -rf vendor/ web/modules/contrib/* web/themes/contrib/*' + docker compose exec -T drupal with-contenv bash -lc 'chown -R nginx:nginx /var/www/drupal/ && su nginx -s /bin/bash -c "composer install"' + -docker compose exec -T drupal with-contenv bash -lc 'git config --global --add safe.directory /var/www/drupal/web/modules/contrib/islandora_fits' + $(MAKE) set-codebase-owner + $(MAKE) drupal-database update-settings-php + -docker compose exec -T drupal with-contenv bash -lc "drush si -y --existing-config minimal --account-pass $(shell cat secrets/live/DRUPAL_DEFAULT_ACCOUNT_PASSWORD)" + docker compose exec -T drupal with-contenv bash -lc "drush -l $(SITE) user:role:add fedoraadmin admin" + MIGRATE_IMPORT_USER_OPTION=--userid=1 $(MAKE) hydrate + docker compose exec -T drupal with-contenv bash -lc 'drush -l $(SITE) migrate:import --userid=1 islandora_fits_tags' + $(MAKE) jhu_config_import + docker compose exec -T drupal with-contenv bash -lc 'composer require drupal/migrate_tools ; drush pm:enable -y migrate_tools,idc_default_migration && drush migrate:import idc_default_migration_menu_link_main' + $(MAKE) jhu_solr + docker compose exec -T drupal with-contenv bash -lc 'mkdir -p web/sites/default/files/styles/thumbnail/public/media-icons/generic' + docker compose exec -T drupal with-contenv bash -lc 'cp web/core/modules/media/images/icons/* web/sites/default/files/media-icons/generic/' + docker compose exec -T drupal with-contenv bash -lc 'cp web/core/modules/media/images/icons/generic.png web/sites/default/files/media-icons/generic' + docker compose exec -T drupal with-contenv bash -lc 'chown nginx: web/sites/default/files/media-icons/generic/generic.png' + docker compose exec -T drupal with-contenv bash -lc 'mkdir -p /var/www/drupal/private ; chown -R nginx:nginx /var/www/drupal/private ; chmod -R 755 /var/www/drupal/private' + sudo rsync -avz scripts/services.yml codebase/web/sites/default/services.yml + sudo rsync -avz scripts/default.services.yml codebase/web/sites/default/default.services.yml + $(MAKE) wait-for-endpoint + # This needs to be a check to see if the page exists already. + curl -k -u admin:$(shell cat secrets/live/DRUPAL_DEFAULT_ACCOUNT_PASSWORD) -H "Content-Type: application/json" -d "@build/demo-data/jhu_homepage.json" https://${DOMAIN}/node?_format=json + curl -k -u admin:$(shell cat secrets/live/DRUPAL_DEFAULT_ACCOUNT_PASSWORD) -H "Content-Type: application/json" -d "@build/demo-data/browse-collections.json" https://${DOMAIN}/node?_format=json + docker compose down + docker compose up -d + +.PHONY: jhu_up +## JHU: Make a local site with codebase directory bind mounted, using cloned starter site. +jhu_up: + @echo "" + if [ $(DF_FILE_EXISTS) -eq 1 ]; then \ + $(MAKE) jhu_generate-secrets ; \ + echo "docker-compose.yml already exists, skipping starter site creation"; \ + docker compose up -d ; \ + echo " └─ Done"; \ + echo ""; \ + echo " Forcing an exit to prevent running creation steps again."; \ + echo ""; \ + elif [ -z "$$(ls -A $(QUOTED_CURDIR)/codebase)" ]; then \ + $(MAKE) jhu_first_time ; \ + fi + # docker compose exec -T drupal with-contenv bash -lc 'drush config:set system.logging error_level verbose -y' + +.PHONY: jhu_demo_content +.SILENT: jhu_demo_content +## JHU: Helper function for demo sites: do a workbench import of sample objects +jhu_demo_content: QUOTED_CURDIR = "$(CURDIR)" +jhu_demo_content: + # fetch repo that has csv and binaries to data/samples + # if prod do this by default + # [ -f "secrets/live/DRUPAL_DEFAULT_ADMIN_USERNAME" ] || (echo "admin" > secrets/live/DRUPAL_DEFAULT_ADMIN_USERNAME) + # [ -f "secrets/live/DRUPAL_DEFAULT_ACCOUNT_PASSWORD" ] || (echo "password" > secrets/live/DRUPAL_DEFAULT_ACCOUNT_PASSWORD) + # @echo "\n\nChecking if the workbench module is enabled." + # -docker compose exec -T drupal with-contenv bash -lc "composer require mjordan/islandora_workbench_integration" + # -docker compose exec -T drupal with-contenv bash -lc "drush en -y islandora_workbench_integration" + # [ -d "islandora_workbench" ] || (git clone https://github.com/mjordan/islandora_workbench) + # [ -d "islandora_workbench/islandora_workbench_demo_content" ] || (git clone https://github.com/DonRichards/islandora_workbench_demo_content islandora_workbench/islandora_workbench_demo_content) + # @echo "\n\nSet the Username/Password within the YAML files." + # $(SED_DASH_I) 's/^nopassword.*/password\: $(shell cat secrets/live/DRUPAL_DEFAULT_ACCOUNT_PASSWORD) /g' islandora_workbench/islandora_workbench_demo_content/example_content.yml + # # Hack to disable SSL verification/warnings | InsecureRequestWarning: Unverified HTTPS request is being made to host 'islandora.traefik.me'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings + # FILE="islandora_workbench/workbench"; \ + # if ! grep -q "import requests" "$$FILE"; then \ + # echo "Fixing the file: $$FILE"; \ + # sed -i '/def create():/i import requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\n# Disable SSL/TLS related warnings\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)' "$$FILE"; \ + # fi + # find islandora_workbench/islandora_workbench_demo_content/ -type f -name '*.yml' -exec $(SED_DASH_I) 's/^username.*/username\: $(shell cat secrets/live/DRUPAL_DEFAULT_ADMIN_USERNAME)/g' {} + + # find islandora_workbench/dgi_test/ -type f -name '*.yml' -exec $(SED_DASH_I) 's/^username.*/username\: $(shell cat secrets/live/DRUPAL_DEFAULT_ADMIN_USERNAME)/g' {} + + # # Set the Password within the YAML files. + # find islandora_workbench/islandora_workbench_demo_content/ -type f -name '*.yml' -exec $(SED_DASH_I) 's/^nopassword.*/password\: $(shell cat secrets/live/DRUPAL_DEFAULT_ACCOUNT_PASSWORD)/g' {} + + # find islandora_workbench/dgi_test/ -type f -name '*.yml' -exec $(SED_DASH_I) 's/^password.*/password\: $(shell cat secrets/live/DRUPAL_DEFAULT_ACCOUNT_PASSWORD)/g' {} + + # # Set the Domain within the YAML files. + # find islandora_workbench/islandora_workbench_demo_content/ -type f -name '*.yml' -exec $(SED_DASH_I) '/^host:/s/.*/host: "$(subst /,\/,$(subst .,\.,$(SITE)))\/"/' {} + + # find islandora_workbench/dgi_test/ -type f -name '*.yml' -exec $(SED_DASH_I) '/^host:/s/.*/host: "$(subst /,\/,$(subst .,\.,$(SITE)))\/"/' {} + + # # Look up the Node ID of the collection you want to import into and replace the value with one live on the site. + # # This is a hack to work around for workbench. There is a better method. + # # @echo "Starting hack for workbench." + # # bash temp_migrate_fix.sh + # @echo "\n\nBuild the workbench docker image." + # DOCKER_BUILDKIT=0 docker build -t workbench-docker islandora_workbench + # @echo "Migrate/Import taxonomy terms." + # cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/dgi_test/geo_location.yml" + # cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/dgi_test/copyright_and_use.yml" + # cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/dgi_test/family.yml" + cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/dgi_test/person.yml" + cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/dgi_test/corporate_body.yml" + cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/dgi_test/genre.yml" + cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/dgi_test/subject.yml" + @echo "Migrate/Import taxonomy terms complete." + @echo "Migrate/Import collections and objects." + # NOPE # cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/islandora_workbench_demo_content/jhu_root_collections.yml" + cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench -v /mnt/idc_store/tmp:/tmp -v /mnt/data/local/:/mnt/data/local/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/dgi_test/islandora_objects.yml" + @echo "Migrate/Import collections and objects complete." + # Example of a rollback + # cd islandora_workbench && docker run -it --rm --network="host" -v .:/workbench/ --name my-running-workbench workbench-docker bash -lc "./workbench --config /workbench/rollback.yml" + $(MAKE) jhu_solr + @echo " └─ Done" + +.PHONY: jhu_clean +.SILENT: jhu_clean +## JHU: Destroys all local data, including codebase, docker volumes, and untracked/ignored files. +jhu_clean: + @echo "Domain: $(DOMAIN)." +ifeq ($(DOMAIN),islandora.traefik.me) + @echo "**DANGER** About to rm your SERVER data subdirs, your docker volumes, islandora_workbench, certs, secrets, codebase/, and all untracked/ignored files (including changes to .env)." + docker compose down -v --remove-orphans || true + $(MAKE) confirm + # sudo rm -fr certs secrets/live/* docker-compose.yml codebase islandora_workbench + # -git stash + # -git clean -xffd . + # -git checkout . + @echo "Codebase/ was completely removed." + @echo " └─ Done" +else + @echo "Warning: jhu_clean command is blocked on production server (DOMAIN=$(DOMAIN))." +endif + +.PHONY: jhu_reset +.SILENT: jhu_reset +## JHU: Destroys all local data, docker volumes, without removing codebase or workbench. +jhu_reset: + @echo "**DANGER** About to rm your SERVER data subdirs, your docker volumes, certs, secrets." + $(MAKE) confirm + docker compose down -v --remove-orphans || true + sudo rm -fr certs docker-compose.yml + @echo " └─ Done shutting down containers and removing volumes." + $(MAKE) jhu_up + +.PHONY: jhu_down +.SILENT: jhu_down +## JHU: Brings the local site down without destroying data. +jhu_down: + -docker compose down + +.PHONY: jhu_config_export +.SILENT: jhu_config_export +## JHU: Exports the sites configuration. +jhu_config_export: + # rm -rf codebase/config/sync/* && git checkout -- config/sync + docker compose exec drupal with-contenv bash -lc "chown -R nginx: /var/www/drupal/config/sync/" + docker compose exec -T drupal drush -l $(SITE) config:export -y + $(MAKE) set-codebase-owner + +.PHONY: jhu_config_import +.SILENT: jhu_config_import +## JHU: Imports the sites configuration. +jhu_config_import: + $(MAKE) set-codebase-owner + @echo "Wait for the /var/www/drupal directory to be available" + while ! docker compose exec -T drupal with-contenv bash -lc 'test -d /var/www/drupal'; do \ + echo "Waiting for /var/www/drupal directory to be available..."; \ + sleep 2; \ + done + docker compose exec -T drupal with-contenv bash -lc 'chown -R nginx:nginx /var/www/drupal/ && su nginx -s /bin/bash -c "composer install"' + docker compose exec drupal with-contenv bash -lc "chown -R nginx: /var/www/drupal/config/sync/" + docker compose exec -T drupal drush -l $(SITE) config:import -y --debug + $(MAKE) set-codebase-owner + +.PHONY: jhu_dev_tools_enable +.SILENT: jhu_dev_tools_enable +## JHU: Enables devel and devel_generate modules. +jhu_dev_tools_enable: + $(MAKE) set-codebase-owner + docker compose exec drupal with-contenv bash -lc "git config --global --add safe.directory /var/www/drupal/vendor/drupal/coder" + -docker compose exec drupal with-contenv bash -lc "cp ~/.bashrc ~/.bashrc_BAK || echo 'No .bashrc file to backup'" + docker compose exec drupal with-contenv bash -lc "echo \"alias drupal='vendor/drupal/console/bin/drupal'\" > ~/.bashrc" + docker compose exec drupal with-contenv bash -lc "echo \"alias phpcs='vendor/squizlabs/php_codesniffer/bin/phpcs'\" >> ~/.bashrc" + docker compose exec drupal with-contenv bash -lc "echo \"alias phpcbf='vendor/squizlabs/php_codesniffer/bin/phpcbf'\" >> ~/.bashrc" + docker compose exec drupal with-contenv bash -lc "composer require drupal/coder --dev && git config --global --add safe.directory /var/www/drupal/vendor/drupal/coder" + docker compose exec drupal with-contenv bash -lc "vendor/squizlabs/php_codesniffer/bin/phpcs --config-set installed_paths vendor/slevomat/coding-standard vendor/phpcompatibility/php-compatibility vendor/drupal/coder/coder_sniffer && vendor/squizlabs/php_codesniffer/bin/phpcs --config-set default_standard Drupal" + if [ ! -f "codebase/web/sites/default/services.yml" ]; then cp scripts/services.yml codebase/web/sites/default/services.yml; fi + docker compose exec drupal with-contenv bash -lc "drush en devel -y && drush cr" + +.PHONY: jhu_repos_export +.SILENT: jhu_repos_export +## JHU: This copies the codebase directory and theme directory to a parent directory. +jhu_repos_export: + $(MAKE) jhu_config_export + -sudo rsync -avz --exclude '.git' --exclude '.gitignore' --exclude '.github' codebase/ ../idc-codebase --delete + -sudo chown -R $(USER): ../idc-codebase + -sudo rsync -avz --exclude '.git' --exclude '.gitignore' --exclude '.github' codebase/web/themes/contrib/idc_ui_theme_boots ../ --delete + -sudo rsync -avz --exclude '.git' --exclude '.gitignore' --exclude '.github' codebase/web/modules/contrib/idc_default_migration ../ --delete + -sudo rsync -avz --exclude '.git' --exclude '.gitignore' --exclude '.github' islandora_workbench/islandora_workbench_demo_content ../ --delete + $(MAKE) set-codebase-owner + +.PHONY: jhu_sync_repos +.SILENT: jhu_sync_repos +## JHU: This copies the codebase repo and the theme directory from the parent directory. +jhu_sync_repos: + $(MAKE) set-codebase-owner + [ -d "../idc-codebase/" ] && rsync -avz ../idc-codebase/ codebase + [ -d "../idc_ui_theme_boots/" ] && rsync -avz ../idc_ui_theme_boots/ codebase/web/themes/contrib/idc_ui_theme_boots --delete + [ -d "../idc_default_migration/" ] && rsync -avz ../idc_default_migration codebase/web/modules/contrib/idc_default_migration --delete + mkdir -p islandora_workbench/islandora_workbench_demo_content + [ -d "../islandora_workbench_demo_content/" ] && rsync -avz islandora_workbench/islandora_workbench_demo_content --delete + $(MAKE) set-codebase-owner + +.PHONY: jhu_update_theme +.SILENT: jhu_update_theme +## JHU: This updates the theme's hash in composer. +jhu_update_theme: + rm -rf codebase/web/themes/contrib/idc_ui_theme_boots + docker compose exec drupal with-contenv bash -lc 'composer clearcache && composer update --prefer-source "islandora/idc_ui_theme_boots" --with-all-dependencies' + +.PHONY: jhu_change_domain +.SILENT: jhu_change_domain +jhu_change_domain: + # Change the DOMAIN= in .env + sed -i "s/DOMAIN=.*/DOMAIN=$(DOMAIN)/g" .env + +.PHONY: jhu_stop +.SILENT: jhu_stop +## JHU: Turn off idc without losing anything. +jhu_stop: + @echo "" + @echo "Stopping containers" + docker compose stop + @echo " └─ Done" + +.PHONY: jhu_reg_jwt +.SILENT: jhu_reg_jwt +## JHU: Regenerate JWT secrets. +jhu_reg_jwt: + docker run --rm -t \ + -v "$(CURDIR)/secrets":/secrets \ + -v "$(CURDIR)/build/scripts/generate-jwt-secrets.sh":/generate-jwt-secrets.sh \ + -w / \ + --entrypoint bash \ + $(REPOSITORY)/drupal:$(TAG) -c "/generate-jwt-secrets.sh && chown -R `id -u`:`id -g` /secrets" \ No newline at end of file diff --git a/scripts/services.yml b/scripts/services.yml new file mode 100644 index 000000000..56da8aba2 --- /dev/null +++ b/scripts/services.yml @@ -0,0 +1,4 @@ +parameters: + twig.config: + debug: true + auto_reload: true \ No newline at end of file diff --git a/secrets/template/MATOMO_DB_PASSWORD b/secrets/template/MATOMO_DB_PASSWORD new file mode 100644 index 000000000..7aa311adf --- /dev/null +++ b/secrets/template/MATOMO_DB_PASSWORD @@ -0,0 +1 @@ +password \ No newline at end of file diff --git a/secrets/template/MATOMO_USER_PASS b/secrets/template/MATOMO_USER_PASS new file mode 100644 index 000000000..7aa311adf --- /dev/null +++ b/secrets/template/MATOMO_USER_PASS @@ -0,0 +1 @@ +password \ No newline at end of file diff --git a/server_specs_and_performance.sh b/server_specs_and_performance.sh new file mode 100755 index 000000000..21e5af13e --- /dev/null +++ b/server_specs_and_performance.sh @@ -0,0 +1,125 @@ +#!/bin/bash + +# Description: +# server_specs_and_performance.sh is a comprehensive shell script designed to provide an easy-to-read +# summary of a server's hardware specifications and performance metrics. Specifically, the script: +# +# 1. Benchmarks CPU and I/O performance: +# - Runs `vmstat` every 5 seconds for 1 minute to collect performance metrics. +# - Analyzes metrics such as `r`, `us`, `sy`, `id`, `wa`, `bi`, and `bo` to determine if there are +# potential issues with CPU or disk I/O performance. +# - Provides recommendations based on the analysis, such as increasing the number of CPUs or +# investigating disk performance issues. +# +# 2. Displays hardware specifications: +# - Number of CPU cores, CPU model, and CPU speed. +# - Total, free, and available memory. +# - Storage information including mount path, size, used, and available space. +# +# 3. Handles potential errors gracefully to ensure the output is easy to read and understand. + +# Global variables +CPU_INFO_FILE="/proc/cpuinfo" +MEM_INFO_FILE="/proc/meminfo" +DISK_INFO_CMD="df -h --output=source,size,used,avail,pcent,target" +VMSTAT_CMD="vmstat 5 12" +VMSTAT_OUTPUT="/tmp/vmstat_output.txt" + +CPU_CORES=$(grep -c '^processor' "$CPU_INFO_FILE") +CPU_MODEL=$(grep -m 1 'model name' "$CPU_INFO_FILE" | awk -F: '{print $2}' | xargs) +CPU_SPEED=$(grep -m 1 'cpu MHz' "$CPU_INFO_FILE" | awk -F: '{print $2}' | xargs) +TOTAL_MEM=$(grep -i 'MemTotal' "$MEM_INFO_FILE" | awk '{print $2}') +FREE_MEM=$(grep -i 'MemFree' "$MEM_INFO_FILE" | awk '{print $2}') +AVAILABLE_MEM=$(grep -i 'MemAvailable' "$MEM_INFO_FILE" | awk '{print $2}') +TOTAL_MEM_HUMAN=$(awk "BEGIN {printf \"%.2f GB\", $TOTAL_MEM / 1024 / 1024}") +FREE_MEM_HUMAN=$(awk "BEGIN {printf \"%.2f GB\", $FREE_MEM / 1024 / 1024}") +AVAILABLE_MEM_HUMAN=$(awk "BEGIN {printf \"%.2f GB\", $AVAILABLE_MEM / 1024 / 1024}") +DISK_INFO=$(eval "$DISK_INFO_CMD") + +# Function to display CPU information +display_cpu_info() { + printf "\n=== CPU Information ===\n" + printf "Model : %s\n" "$CPU_MODEL" + printf "Cores : %d\n" "$CPU_CORES" + printf "Speed : %.2f MHz\n" "$CPU_SPEED" +} + +# Function to display Memory information +display_memory_info() { + printf "\n=== Memory Information ===\n" + printf "Total Memory : %s\n" "$TOTAL_MEM_HUMAN" + printf "Free Memory : %s\n" "$FREE_MEM_HUMAN" + printf "Available Memory : %s\n" "$AVAILABLE_MEM_HUMAN" +} + +# Function to display Disk information +display_disk_info() { + printf "\n=== Disk Information ===\n" + printf "%s\n" "$DISK_INFO" +} + +# Function to display VMStat summary +display_vmstat_summary() { + printf "\n=== VMStat Summary ===\n" + if [[ -f "$VMSTAT_OUTPUT" ]]; then + awk ' + BEGIN { + r_sum = 0; us_sum = 0; sy_sum = 0; id_sum = 0; wa_sum = 0; bi_sum = 0; bo_sum = 0; + count = 0; + } + NR > 2 { + r_sum += $1; us_sum += $13; sy_sum += $14; id_sum += $15; wa_sum += $16; bi_sum += $9; bo_sum += $10; + count++; + } + END { + r_avg = r_sum / count; + us_avg = us_sum / count; + sy_avg = sy_sum / count; + id_avg = id_sum / count; + wa_avg = wa_sum / count; + bi_avg = bi_sum / count; + bo_avg = bo_sum / count; + + printf "Average r : %.2f\n", r_avg; + printf "Average us : %.2f\n", us_avg; + printf "Average sy : %.2f\n", sy_avg; + printf "Average id : %.2f\n", id_avg; + printf "Average wa : %.2f\n", wa_avg; + printf "Average bi : %.2f\n", bi_avg; + printf "Average bo : %.2f\n", bo_avg; + + if (r_avg > ENVIRON["CPU_CORES"]) { + printf "\nRecommendation: Increase the number of CPUs. (Average r is higher than the number of CPUs)\n"; + } + if (wa_avg > 10) { + printf "\nRecommendation: Investigate disk performance. (Average wa is high)\n"; + } + if (bi_avg > 1000 || bo_avg > 1000) { + printf "\nRecommendation: Investigate disk performance. (High bi and bo values indicate heavy disk I/O)\n"; + } + }' "$VMSTAT_OUTPUT" + else + printf "Error: VMStat output file not found.\n" >&2 + fi +} + +# Function to run VMStat and collect data +collect_vmstat_data() { + printf "Collecting VMStat data... (This will take 1 minute)\n" + if ! $VMSTAT_CMD > "$VMSTAT_OUTPUT"; then + printf "Error: Failed to collect VMStat data.\n" >&2 + return 1 + fi +} + +# Main function +main() { + collect_vmstat_data + display_vmstat_summary + display_cpu_info + display_memory_info + display_disk_info +} + +main +