Added node-red and pgadmin to the apps role

This commit is contained in:
b0xxer 2025-04-16 13:13:49 -05:00
parent 5deb8114ed
commit 98989b8a68
17 changed files with 659 additions and 3 deletions

10
hosts
View File

@ -22,7 +22,7 @@ zerotier_network=
[n0xb0x:vars]
hostname=n0xb0x
available_apps=['caddy.yml','bitcoin.yml','electrs.yml','clightning.yml','lnbits.yml','rtl.yml', 'docs.yml']
available_apps=['caddy.yml','bitcoin.yml','electrs.yml','clightning.yml','lnbits.yml','rtl.yml', 'docs.yml', 'node-red.yml', 'pgadmin.yml', 'postgres.yml']
ansible_user=n0xb0x
ansible_password=n0xb0x
#registry_url=git.b0xx.org/b0xxer
@ -45,7 +45,15 @@ rtl_enabled=true
rtl_version=0.15.0
rtl_password=n0xb0x
lnbits_version=v1.0.0
node-red_version=4.0.9
node-red_enabled=true
pgadmin_enabled=true
pgadmin_email=pgadmin@b0xx.org
pgadmin_password=b0xxer
postgres_version=16.1
postgres_enabled=true
tor_version=1.0
tor_enabled=true
zerotier_network=74a75ebfb84ab0db
#Update wariness - 1 = very reluctant to update, 0 = eager to update
#timezone - which timezone update schedule is in

View File

@ -0,0 +1 @@
podman build --tag node-red:{{nodered_version}} .

View File

@ -0,0 +1,19 @@
[
{
"id": "f6f2187d.f17ca8",
"type": "tab",
"label": "Flow 1",
"disabled": false,
"info": ""
},
{
"id": "3cc11d24.ff01a2",
"type": "comment",
"z": "f6f2187d.f17ca8",
"name": "WARNING: please check you have started this container with a volume that is mounted to /data\\n otherwise any flow changes are lost when you redeploy or upgrade the container\\n (e.g. upgrade to a more recent node-red docker image).\\n If you are using named volumes you can ignore this warning.\\n Double click or see info side panel to learn how to start Node-RED in Docker to save your work",
"info": "\nTo start docker with a bind mount volume (-v option), for example:\n\n```\ndocker run -it -p 1880:1880 -v /home/user/node_red_data:/data --name mynodered nodered/node-red\n```\n\nwhere `/home/user/node_red_data` is a directory on your host machine where you want to store your flows.\n\nIf you do not do this then you can experiment and redploy flows, but if you restart or upgrade the container the flows will be disconnected and lost. \n\nThey will still exist in a hidden data volume, which can be recovered using standard docker techniques, but that is much more complex than just starting with a named volume as described above.",
"x": 350,
"y": 80,
"wires": []
}
]

View File

@ -0,0 +1,34 @@
var http = require('http');
var https = require('https');
var settings = require('/data/settings.js');
var request;
process.env["NODE_TLS_REJECT_UNAUTHORIZED"] = 0;
var options = {
host : "127.0.0.1",
port : settings.uiPort || 1880,
timeout : 4000
};
if (settings.hasOwnProperty("https")) {
request = https.request(options, (res) => {
//console.log(`STATUS: ${res.statusCode}`);
if ((res.statusCode >= 200) && (res.statusCode < 500)) { process.exit(0); }
else { process.exit(1); }
});
}
else {
request = http.request(options, (res) => {
//console.log(`STATUS: ${res.statusCode}`);
if ((res.statusCode >= 200) && (res.statusCode < 500)) { process.exit(0); }
else { process.exit(1); }
});
}
request.on('error', function(err) {
//console.log('ERROR',err);
process.exit(1);
});
request.end();

View File

@ -0,0 +1,37 @@
{
"name": "node-red-docker",
"version": "4.0.9",
"description": "Low-code programming for event-driven applications",
"homepage": "http://nodered.org",
"license": "Apache-2.0",
"repository": {
"type": "git",
"url": "https://github.com/node-red/node-red-docker.git"
},
"main": "node_modules/node-red/red/red.js",
"scripts": {
"start": "node $NODE_OPTIONS node_modules/node-red/red.js $FLOWS",
"debug": "node --inspect=0.0.0.0:9229 $NODE_OPTIONS node_modules/node-red/red.js $FLOWS",
"debug_brk": "node --inspect=0.0.0.0:9229 --inspect-brk $NODE_OPTIONS node_modules/node-red/red.js $FLOWS"
},
"contributors": [
{
"name": "Dave Conway-Jones"
},
{
"name": "Nick O'Leary"
},
{
"name": "James Thomas"
},
{
"name": "Raymond Mouthaan"
}
],
"dependencies": {
"node-red": "4.0.9"
},
"engines": {
"node": ">=18"
}
}

View File

@ -0,0 +1 @@
podman run --rm -v nodered:/data:Z -p 1880:1880 --name node-red node-red:latest

View File

@ -0,0 +1,14 @@
#!/bin/bash
trap stop SIGINT SIGTERM
function stop() {
kill $CHILD_PID
wait $CHILD_PID
}
node $NODE_OPTIONS node_modules/node-red/red.js --userDir /data $FLOWS "${@}" &
CHILD_PID="$!"
wait "${CHILD_PID}"

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
# Installing Devtools
if [[ ${TAG_SUFFIX} != *"minimal" ]]; then
echo "Installing devtools"
apk add --no-cache --virtual devtools build-base linux-headers udev python3
else
echo "Skip installing devtools"
fi

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
# Remove native GPIO node if exists
if [[ -d "/usr/src/node-red/node_modules/@node-red/nodes/core/hardware" ]]; then
echo "Removing native GPIO node"
rm -r /usr/src/node-red/node_modules/@node-red/nodes/core/hardware
else
echo "Skip removing native GPIO node"
fi

View File

@ -0,0 +1,97 @@
- name: node-red - Create containers/node-red dir
ansible.builtin.file:
path: ~/containers/node-red
state: directory
mode: '0750'
notify: rebuild_node-red
tags: [apps,node-red]
- name: node-red - Create ~/vol/node-red dir
ansible.builtin.file:
path: ~/vol/node-red
state: directory
mode: '0750'
tags: [apps,node-red]
- name: node-red - Copy package.json
ansible.builtin.file:
src: node-red/package.json
dest: ~/containers/node-red/package.json
mode: '0640'
notify: rebuild_node-red
tags: [apps,node-red]
- name: node-red - Copy flows.json
ansible.builtin.file:
src: node-red/flows.json
dest: ~/containers/node-red/flows.json
mode: '0640'
notify: rebuild_node-red
tags: [apps,node-red]
- name: node-red - Copy healthcheck.js
ansible.builtin.file:
src: node-red/healthcheck.js
dest: ~/containers/node-red/healthcheck.js
mode: '0640'
notify: rebuild_node-red
tags: [apps,node-red]
- name: node-red - Copy run.sh
ansible.builtin.file:
src: node-red/run.sh
dest: ~/containers/node-red/run.sh
mode: '0640'
tags: [apps,node-red]
- name: node-red - Copy scripts directory and files
ansible.builtin.copy:
src: node-red/scripts
dest: ~/containers/node-red/scripts
mode: '0750'
recursive: true
notify: rebuild_node-red
tags: [apps,node-red]
- name: node-red - Copy Containerfile
ansible.builtin.template:
src: nodered/Containerfile.j2
dest: ~/containers/node-red/Containerfile
mode: '0640'
notify: reload_systemctl
tags: [apps,node-red]
- name: node-red - Copy nodered-node.container
ansible.builtin.template:
src: nodered/node-red-node.container.j2
dest: ~/containers/node-red/nodered-node.container
mode: '0640'
notify: reload_systemctl
tags: [apps,node-red]
- name: node-red - Copy build.sh
ansible.builtin.template:
src: node-red/build.sh.j2
dest: ~/containers/node-red/build.sh
mode: "0700"
notify: rebuild_node-red
tags: [apps,node-red]
- name: node-red - Link node-red-node to .config/containers/systemd
ansible.builtin.file:
src: ~/containers/node-red/node-red-node.container
dest: ~/.config/containers/systemd/node-red-node.container
state: link
force: true
mode: '0640'
when: node-red_enabled | default(false)
notify: reload_systemctl
tags: [apps,node-red]
- name: node-red - Remove node-red-node.container if disabled
ansible.builtin.file:
path: ~/.config/containers/systemd/node-red-node.container
state: absent
when: not (node-red_enabled | default(false))
notify: reload_systemctl
tags: [apps,node-red]

View File

@ -0,0 +1,104 @@
FROM docker.io/almalinux/9-base:latest AS base
ARG NODE_VERSION=20
ARG NODE_RED_VERSION={{ nodered_version }}
# Copy scripts
COPY scripts/*.sh /tmp/
COPY healthcheck.js /
# Install tools, create Node-RED app and data dir, add user and set rights
RUN set -ex && \
dnf update -y && dnf install -y epel-release \
&& dnf install -y --allowerasing bash tzdata curl nano wget git openssl ca-certificates iputils \
&& mkdir -p /usr/src/node-red /data \
&& dnf module -y enable nodejs:${NODE_VERSION} \
&& dnf install -y nodejs \
&& dnf clean all \
## && deluser --remove-home node \
# adduser --home /usr/src/node-red --disabled-password --no-create-home node-red --uid 1000 && \
&& useradd --home-dir /usr/src/node-red --uid 1000 node-red \
&& chown -R node-red:root /data && chmod -R g+rwX /data \
&& chown -R node-red:root /usr/src/node-red && chmod -R g+rwX /usr/src/node-red
# chown -R node-red:node-red /data && \
# chown -R node-red:node-red /usr/src/node-red
# Set work directory
WORKDIR /usr/src/node-red
# Setup SSH known_hosts file
#COPY known_hosts.sh .
#RUN ./known_hosts.sh /etc/ssh/ssh_known_hosts && rm /usr/src/node-red/known_hosts.sh
#RUN echo "PubkeyAcceptedKeyTypes +ssh-rsa" >> /etc/ssh/ssh_config
# package.json contains Node-RED NPM module and node dependencies
COPY package.json .
COPY flows.json /data
COPY scripts/entrypoint.sh .
RUN chmod u+x ./entrypoint.sh
#### Stage BUILD #######################################################################################################
FROM base AS build
# Install Build tools
RUN dnf update -y && dnf install -y epel-release python \
&& dnf clean all \
&& rm -rf /var/cache/* /var/log* /tmp/*
RUN npm install --unsafe-perm --no-update-notifier --no-fund --only=production && \
npm uninstall node-red-node-gpio && \
cp -R node_modules prod_node_modules
#### Stage RELEASE #####################################################################################################
FROM base AS release
ARG BUILD_DATE
ARG BUILD_VERSION
ARG BUILD_REF
ARG NODE_RED_VERSION
ARG ARCH
ARG TAG_SUFFIX=default
LABEL org.label-schema.build-date=${BUILD_DATE} \
org.label-schema.docker.dockerfile="nodered/Containerfile" \
org.label-schema.license="Apache-2.0" \
org.label-schema.name="Node-RED" \
org.label-schema.version=${BUILD_VERSION} \
org.label-schema.description="Low-code programming for event-driven applications." \
org.label-schema.url="https://nodered.org" \
org.label-schema.vcs-ref=${BUILD_REF} \
org.label-schema.vcs-type="Git" \
org.label-schema.vcs-url="https://github.com/node-red/node-red-docker" \
org.opencontainers.image.source="https://github.com/node-red/node-red-docker" \
org.label-schema.arch=${ARCH} \
authors="Dave Conway-Jones, Nick O'Leary, James Thomas, Raymond Mouthaan"
COPY --from=build /usr/src/node-red/prod_node_modules ./node_modules
# Chown, install devtools & Clean up
RUN chown -R node-red:root /usr/src/node-red \
&& dnf update -y && dnf install -y python-devel python3 \
&& dnf groupinstall -y "Development Tools" \
&& rm -r /tmp/*
RUN npm config set cache /data/.npm --global
USER node-red
# Env variables
ENV NODE_RED_VERSION=$NODE_RED_VERSION \
NODE_PATH=/usr/src/node-red/node_modules:/data/node_modules \
PATH=/usr/src/node-red/node_modules/.bin:${PATH} \
FLOWS=flows.json
# ENV NODE_RED_ENABLE_SAFE_MODE=true # Uncomment to enable safe start mode (flows not running)
# ENV NODE_RED_ENABLE_PROJECTS=true # Uncomment to enable projects option
# Expose the listening port of node-red
EXPOSE 1880
# Add a healthcheck (default every 30 secs)
HEALTHCHECK CMD node /healthcheck.js
ENTRYPOINT ["./entrypoint.sh"]

View File

@ -0,0 +1 @@
podman build --tag node-red:{{node-red_version}} .

View File

@ -3,9 +3,10 @@ Description=Node-Red Container
After=network-online.target
[Container]
Image=docker.io/nodered/node-red:4.0.2-22
#Image=docker.io/nodered/node-red:{{ nodered_version }}
Image={{podman_registry}}/node-red:{{ nodered_version }}
ContainerName=node-red
Volume=nodered:/data:Z
Volume=node-red:/data:Z
PublishPort=0.0.0.0:1880:1880
[Service]

View File

@ -0,0 +1,33 @@
FROM docker.io/almalinux/9-init:latest
COPY entrypoint.sh entrypoint.sh
RUN dnf install -y epel-release \
&& dnf update \
&& dnf install -y https://ftp.postgresql.org/pub/pgadmin/pgadmin4/yum/pgadmin4-redhat-repo-2-1.noarch.rpm \
&& dnf install -y pgadmin4-web policycoreutils-python-utils \
&& dnf clean all \
&& rm -rf /var/cache/* /tmp/* \
&& mkdir -p /var/log/pgadmin /var/lib/pgadmin \
&& PGADMIN_SETUP_EMAIL=pgadmin@b0xx.org PGADMIN_SETUP_PASSWORD=b0xx /usr/pgadmin4/venv/bin/python3 /usr/pgadmin4/web/setup.py setup-db \
&& chown -R apache:apache /var/log/pgadmin \
&& chown -R apache:apache /var/lib/pgadmin \
&& ln -s /usr/lib/systemd/system/httpd.service /etc/systemd/system/multi-user.target.wants/httpd.service
# && chown apache: /var/log/pgadmin /var/lib/pgadmin -R
# && systemctl enable --now httpd
# && sudo AUTOMATED=1 PGADMIN_SETUP_EMAIL={{pgadmin_email}} PGADMIN_SETUP_PASSWORD={{pagadmin_password}} /usr/pgadmin4/bin/setup-web.sh
#CMD ["httpd", "-D", "FOREGROUND"]
#CMD ["/entrypoint.sh"]
EXPOSE 80
CMD ["/usr/sbin/init"]

View File

@ -0,0 +1,33 @@
FROM docker.io/almalinux/9-init:latest
COPY entrypoint.sh entrypoint.sh
RUN dnf install -y epel-release \
&& dnf update \
&& dnf install -y https://ftp.postgresql.org/pub/pgadmin/pgadmin4/yum/pgadmin4-redhat-repo-2-1.noarch.rpm \
&& dnf install -y pgadmin4-web policycoreutils-python-utils \
&& dnf clean all \
&& rm -rf /var/cache/* /tmp/* \
&& mkdir -p /var/log/pgadmin /var/lib/pgadmin \
&& PGADMIN_SETUP_EMAIL={{pgadmin_email}} PGADMIN_SETUP_PASSWORD={{pgadmin_password}} /usr/pgadmin4/venv/bin/python3 /usr/pgadmin4/web/setup.py setup-db \
&& chown -R apache:apache /var/log/pgadmin \
&& chown -R apache:apache /var/lib/pgadmin \
&& ln -s /usr/lib/systemd/system/httpd.service /etc/systemd/system/multi-user.target.wants/httpd.service
# && chown apache: /var/log/pgadmin /var/lib/pgadmin -R
# && systemctl enable --now httpd
# && sudo AUTOMATED=1 PGADMIN_SETUP_EMAIL={{pgadmin_email}} PGADMIN_SETUP_PASSWORD={{pagadmin_password}} /usr/pgadmin4/bin/setup-web.sh
#CMD ["httpd", "-D", "FOREGROUND"]
#CMD ["/entrypoint.sh"]
EXPOSE 80
CMD ["/usr/sbin/init"]

View File

@ -0,0 +1,192 @@
#!/usr/bin/env bash
# Fixup the passwd file, in case we're on OpenShift
if ! whoami > /dev/null 2>&1; then
if [ "$(id -u)" -ne 5050 ]; then
if [ -w /etc/passwd ]; then
echo "${USER_NAME:-pgadminr}:x:$(id -u):0:${USER_NAME:-pgadminr} user:${HOME}:/sbin/nologin" >> /etc/passwd
fi
fi
fi
# usage: file_env VAR [DEFAULT] ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, for Docker's secrets feature)
function file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
printf >&2 'error: both %s and %s are set (but are exclusive)\n' "$var" "$fileVar"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(< "${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
# Set values for config variables that can be passed using secrets
if [ -n "${PGADMIN_CONFIG_CONFIG_DATABASE_URI_FILE}" ]; then
file_env PGADMIN_CONFIG_CONFIG_DATABASE_URI
fi
file_env PGADMIN_DEFAULT_PASSWORD
# TO enable custom path for config_distro, pass config distro path via environment variable.
export CONFIG_DISTRO_FILE_PATH="${PGADMIN_CUSTOM_CONFIG_DISTRO_FILE:-/pgadmin4/config_distro.py}"
# Populate config_distro.py. This has some default config, as well as anything
# provided by the user through the PGADMIN_CONFIG_* environment variables.
# Only update the file on first launch. The empty file is created only in default path during the
# container build so it can have the required ownership.
if [ ! -e "${CONFIG_DISTRO_FILE_PATH}" ] || [ "$(wc -m "${CONFIG_DISTRO_FILE_PATH}" 2>/dev/null | awk '{ print $1 }')" = "0" ]; then
cat << EOF > "${CONFIG_DISTRO_FILE_PATH}"
CA_FILE = '/etc/ssl/certs/ca-certificates.crt'
LOG_FILE = '/dev/null'
HELP_PATH = '../../docs'
DEFAULT_BINARY_PATHS = {
'pg': '/usr/local/pgsql-17',
'pg-17': '/usr/local/pgsql-17',
'pg-16': '/usr/local/pgsql-16',
'pg-15': '/usr/local/pgsql-15',
'pg-14': '/usr/local/pgsql-14',
'pg-13': '/usr/local/pgsql-13'
}
EOF
# This is a bit kludgy, but necessary as the container uses BusyBox/ash as
# it's shell and not bash which would allow a much cleaner implementation
for var in $(env | grep "^PGADMIN_CONFIG_" | cut -d "=" -f 1); do
# shellcheck disable=SC2086
# shellcheck disable=SC2046
echo ${var#PGADMIN_CONFIG_} = $(eval "echo \$$var") >> "${CONFIG_DISTRO_FILE_PATH}"
done
fi
# Check whether the external configuration database exists if it is being used.
external_config_db_exists="False"
if [ -n "${PGADMIN_CONFIG_CONFIG_DATABASE_URI}" ]; then
external_config_db_exists=$(cd /pgadmin4/pgadmin/utils && /venv/bin/python3 -c "from check_external_config_db import check_external_config_db; val = check_external_config_db("${PGADMIN_CONFIG_CONFIG_DATABASE_URI}"); print(val)")
fi
# DRY of the code to load the PGADMIN_SERVER_JSON_FILE
function load_server_json_file() {
export PGADMIN_SERVER_JSON_FILE="${PGADMIN_SERVER_JSON_FILE:-/pgadmin4/servers.json}"
EXTRA_ARGS=""
if [ "${PGADMIN_REPLACE_SERVERS_ON_STARTUP}" = "True" ]; then
EXTRA_ARGS="--replace"
fi
if [ -f "${PGADMIN_SERVER_JSON_FILE}" ]; then
# When running in Desktop mode, no user is created
# so we have to import servers anonymously
if [ "${PGADMIN_CONFIG_SERVER_MODE}" = "False" ]; then
/venv/bin/python3 /pgadmin4/setup.py load-servers "${PGADMIN_SERVER_JSON_FILE}" ${EXTRA_ARGS}
else
/venv/bin/python3 /pgadmin4/setup.py load-servers "${PGADMIN_SERVER_JSON_FILE}" --user "${PGADMIN_DEFAULT_EMAIL}" ${EXTRA_ARGS}
fi
fi
}
if [ ! -f /var/lib/pgadmin/pgadmin4.db ] && [ "${external_config_db_exists}" = "False" ]; then
if [ -z "${PGADMIN_DEFAULT_EMAIL}" ] || { [ -z "${PGADMIN_DEFAULT_PASSWORD}" ] && [ -z "${PGADMIN_DEFAULT_PASSWORD_FILE}" ]; }; then
echo 'You need to define the PGADMIN_DEFAULT_EMAIL and PGADMIN_DEFAULT_PASSWORD or PGADMIN_DEFAULT_PASSWORD_FILE environment variables.'
exit 1
fi
# Validate PGADMIN_DEFAULT_EMAIL
CHECK_EMAIL_DELIVERABILITY="False"
if [ -n "${PGADMIN_CONFIG_CHECK_EMAIL_DELIVERABILITY}" ]; then
CHECK_EMAIL_DELIVERABILITY=${PGADMIN_CONFIG_CHECK_EMAIL_DELIVERABILITY}
fi
ALLOW_SPECIAL_EMAIL_DOMAINS="[]"
if [ -n "${PGADMIN_CONFIG_ALLOW_SPECIAL_EMAIL_DOMAINS}" ]; then
ALLOW_SPECIAL_EMAIL_DOMAINS=${PGADMIN_CONFIG_ALLOW_SPECIAL_EMAIL_DOMAINS}
fi
GLOBALLY_DELIVERABLE="True"
if [ -n "${PGADMIN_CONFIG_GLOBALLY_DELIVERABLE}" ]; then
GLOBALLY_DELIVERABLE=${PGADMIN_CONFIG_GLOBALLY_DELIVERABLE}
fi
email_config="{'CHECK_EMAIL_DELIVERABILITY': ${CHECK_EMAIL_DELIVERABILITY}, 'ALLOW_SPECIAL_EMAIL_DOMAINS': ${ALLOW_SPECIAL_EMAIL_DOMAINS}, 'GLOBALLY_DELIVERABLE': ${GLOBALLY_DELIVERABLE}}"
echo "email config is ${email_config}"
is_valid_email=$(cd /pgadmin4/pgadmin/utils && /venv/bin/python3 -c "from validation_utils import validate_email; val = validate_email('${PGADMIN_DEFAULT_EMAIL}', ${email_config}); print(val)")
if echo "${is_valid_email}" | grep "False" > /dev/null; then
echo "'${PGADMIN_DEFAULT_EMAIL}' does not appear to be a valid email address. Please reset the PGADMIN_DEFAULT_EMAIL environment variable and try again."
echo "Validation output: ${is_valid_email}"
exit 1
fi
# Switch back to root directory for further process
cd /pgadmin4
# Set the default username and password in a
# backwards compatible way
export PGADMIN_SETUP_EMAIL="${PGADMIN_DEFAULT_EMAIL}"
export PGADMIN_SETUP_PASSWORD="${PGADMIN_DEFAULT_PASSWORD}"
# Initialize DB before starting Gunicorn
# Importing pgadmin4 (from this script) is enough
/venv/bin/python3 run_pgadmin.py
export PGADMIN_PREFERENCES_JSON_FILE="${PGADMIN_PREFERENCES_JSON_FILE:-/pgadmin4/preferences.json}"
# Pre-load any required servers
load_server_json_file
# Pre-load any required preferences
if [ -f "${PGADMIN_PREFERENCES_JSON_FILE}" ]; then
if [ "${PGADMIN_CONFIG_SERVER_MODE}" = "False" ]; then
DESKTOP_USER=$(cd /pgadmin4 && /venv/bin/python3 -c 'import config; print(config.DESKTOP_USER)')
/venv/bin/python3 /pgadmin4/setup.py set-prefs "${DESKTOP_USER}" --input-file "${PGADMIN_PREFERENCES_JSON_FILE}"
else
/venv/bin/python3 /pgadmin4/setup.py set-prefs "${PGADMIN_DEFAULT_EMAIL}" --input-file "${PGADMIN_PREFERENCES_JSON_FILE}"
fi
fi
# Copy the pgpass file passed using secrets
if [ -f "${PGPASS_FILE}" ]; then
if [ "${PGADMIN_CONFIG_SERVER_MODE}" = "False" ]; then
cp ${PGPASS_FILE} /var/lib/pgadmin/.pgpass
chmod 600 /var/lib/pgadmin/.pgpass
else
PGADMIN_USER_CONFIG_DIR=$(echo "${PGADMIN_DEFAULT_EMAIL}" | sed 's/@/_/g')
mkdir -p /var/lib/pgadmin/storage/${PGADMIN_USER_CONFIG_DIR}
cp ${PGPASS_FILE} /var/lib/pgadmin/storage/${PGADMIN_USER_CONFIG_DIR}/.pgpass
chmod 600 /var/lib/pgadmin/storage/${PGADMIN_USER_CONFIG_DIR}/.pgpass
fi
fi
# If already initialised and PGADMIN_REPLACE_SERVERS_ON_STARTUP is set to true, then load the server json file.
elif [ "${PGADMIN_REPLACE_SERVERS_ON_STARTUP}" = "True" ]; then
load_server_json_file
fi
# Start Postfix to handle password resets etc.
if [ -z "${PGADMIN_DISABLE_POSTFIX}" ]; then
sudo /usr/sbin/postfix start
fi
# Get the session timeout from the pgAdmin config. We'll use this (in seconds)
# to define the Gunicorn worker timeout
TIMEOUT=$(cd /pgadmin4 && /venv/bin/python3 -c 'import config; print(config.SESSION_EXPIRATION_TIME * 60 * 60 * 24)')
# NOTE: currently pgadmin can run only with 1 worker due to sessions implementation
# Using --threads to have multi-threaded single-process worker
if [ -n "${PGADMIN_ENABLE_SOCK}" ]; then
BIND_ADDRESS="unix:/run/pgadmin/pgadmin.sock"
else
if [ -n "${PGADMIN_ENABLE_TLS}" ]; then
BIND_ADDRESS="${PGADMIN_LISTEN_ADDRESS:-[::]}:${PGADMIN_LISTEN_PORT:-443}"
else
BIND_ADDRESS="${PGADMIN_LISTEN_ADDRESS:-[::]}:${PGADMIN_LISTEN_PORT:-80}"
fi
fi
if [ -n "${PGADMIN_ENABLE_TLS}" ]; then
exec /venv/bin/gunicorn --limit-request-line "${GUNICORN_LIMIT_REQUEST_LINE:-8190}" --timeout "${TIMEOUT}" --bind "${BIND_ADDRESS}" -w 1 --threads "${GUNICORN_THREADS:-25}" --access-logfile "${GUNICORN_ACCESS_LOGFILE:--}" --keyfile /certs/server.key --certfile /certs/server.cert -c gunicorn_config.py run_pgadmin:app
else
exec /venv/bin/gunicorn --limit-request-line "${GUNICORN_LIMIT_REQUEST_LINE:-8190}" --timeout "${TIMEOUT}" --bind "${BIND_ADDRESS}" -w 1 --threads "${GUNICORN_THREADS:-25}" --access-logfile "${GUNICORN_ACCESS_LOGFILE:--}" -c gunicorn_config.py run_pgadmin:app
fi

View File

@ -0,0 +1,61 @@
###############################################################################
#
# IMPORTANT:
#
# If runtime or build time dependencies are changed in this file, the committer
# *must* ensure the DEB and RPM package maintainers are informed as soon as
# possible.
#
###############################################################################
Flask==3.0.*; python_version <= '3.8'
Flask==3.1.*; python_version >= '3.9'
Flask-Login==0.*
Flask-Mail==0.*
Flask-Migrate==4.*
Flask-SQLAlchemy==3.1.*
Flask-WTF==1.2.*
Flask-Compress==1.*
Flask-Paranoid==0.*
Flask-Babel==4.0.*
Flask-Security-Too==5.5.*; python_version >= '3.10'
Flask-Security-Too==5.4.*; python_version <= '3.9'
Flask-SocketIO==5.5.*
WTForms==3.2.*; python_version >= '3.10'
WTForms==3.1.*; python_version <= '3.9'
passlib==1.*
pytz==2024.*; python_version <= '3.8'
pytz==2025.*; python_version >= '3.9'
speaklater3==1.*
sqlparse==0.*
psutil==6.1.*
psycopg[c]==3.2.4
python-dateutil==2.*
SQLAlchemy==2.*
bcrypt==4.2.*
cryptography==44.0.*
sshtunnel==0.*
ldap3==2.*
gssapi==1.9.*
user-agents==2.2.0
pywinpty==2.0.*; sys_platform=="win32"
Authlib==1.3.*; python_version <= '3.8'
Authlib==1.4.*; python_version >= '3.9'
pyotp==2.*
qrcode==7.*; python_version <= '3.8'
qrcode[pil]==8.*; python_version >= '3.9'
boto3==1.36.*
urllib3==1.26.*
azure-mgmt-rdbms==10.1.0
azure-mgmt-resource==23.2.0
azure-mgmt-subscription==3.1.1
azure-identity==1.19.0
google-api-python-client==2.*
google-auth-oauthlib==1.2.1
keyring==25.*
Werkzeug==3.0.*; python_version <= '3.8'
Werkzeug==3.1.*; python_version >= '3.9'
typer[all]==0.15.*
setuptools==75.*; python_version >= '3.12'
jsonformatter~=0.3.4
libgravatar==1.0.*