一、 registry 配置文件

PATH: /etc/docker/registry/config.yml

version: 0.1
log:
accesslog:
disabled: true
level: debug
formatter: text
fields:
service: registry
environment: staging
storage:
delete:
enabled: true
cache:
blobdescriptor: inmemory
filesystem:
rootdirectory: /var/lib/registry
auth:
htpasswd:
realm: basic-realm
path: /etc/docker/registry/auth/nginx.htpasswd ## 密码文件
http:
addr: :5000 ## 端口
host: https://docker.domain.com ## 域名
headers:
X-Content-Type-Options: [nosniff]
http2:
disabled: false
tls:
certificate: /etc/docker/registry/ssl/docker.domain.com.crt ## 公钥
key: /etc/docker/registry/ssl/docker.domain.com.key ## 私钥
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3

二、 registry docker-compose配置

PATH: /usr/local/docker/docker-compose/registry.yaml

version: "3"
services:
registry:
image: registry:latest
ports:
- "443:5000"
container_name: registry
restart: always
volumes:
- /etc/docker/registry:/etc/docker/registry
- /var/lib/registry:/var/lib/registry
networks:
- "my-network"
networks:
my-network:
driver: bridge

三、配置 http 认证文件

docker run --rm \
--entrypoint htpasswd \
httpd:alpine \
-Bbn $USERNAME $PASSWORD > /etc/docker/registry/auth/nginx.htpasswd

四、 https证书文件

自签https证书, 推荐ZeroSSL申请免费证书

公钥: /etc/docker/registry/ssl/docker.domain.com.crt
私钥: /etc/docker/registry/ssl/docker.domain.com.key
请求: /etc/docker/registry/ssl/docker.domain.com.csr

五、 完成部署

5.1 启动 registry

docker-compose -f /usr/local/docker/docker-compose/registry.yaml up -d

5.2 修改 hosts

vim /etc/hosts
127.0.0.1 docker.domain.com

5.3 测试 register

浏览器访问或者curl https://docker.domain.com

5.4 登录

一开始我以为会有很多有依赖问题,先做了DVD挂载然后制作离线yum源, 结果就这两个包降级了就行了。

yum downgrade initscripts-9.49.30-1.el7.x86_64 centos-release-7-2.1511.el7.centos.2.10.x86_64

离线源制作

1. 挂载镜像文件

2. centos 里将镜像挂载到指定目录

ls /dev/cdrom
mount -t iso9660 -o loop /dev/cdrom /mnt/cdrom/

如果你想开机自动挂载
vim /etc/fstab
/dev/cdrom /mnt/cdrom iso9660 defaults 0 0

3. yum 源编写

vim /etc/yum.repos.d/centos7.2-old.repo

# vim centos7.2-old.repo
[centos7.2-old]
name=centos7.2-old
baseurl=file:///mnt/cdrom
enable=1
gpgcheck=0

yum makecache

4. 查看源里的软件包信息

yum repo-pkgs centos7.2-old list | grep "centos-release"

yum --disablerepo "*" --enablerepo centos7.2-old list | grep "centos-release"

nginx

version: "3"
services: # 服务
nginx:
image: ngin124:latest
ports: # 将本机的 8080 端口映射到容器的80端口
- "8080:80"
container_name: nginx-yaml
restart: always
volumes: # 映射本机 F:/nginx.conf 文件到 容器的 /etc/nginx/nginx.conf:ro 文件
- /usr/local/nginx1.24/html:/usr/local/nginx-1.24/html
- /usr/local/nginx1.24/conf/nginx.conf:/usr/local/nginx-1.24/conf/nginx.conf
networks:
- "my-network"
networks: # 网络
my-network: # 网络名称
driver: bridge

redis

version: "3"
services: # 服务
redis:
image: redis7.2.1:latest
ports:
- "6379:6379"
container_name: redis-yaml
restart: always
volumes:
- /etc/redis.conf:/usr/local/redis7.2.1/conf
networks:
- "my-network"
networks:
my-network:
driver: bridge

tomcat

version: "3"
services: # 服务
tomcat:
image: tomcat8.5.93:latest
ports:
- "9090:8080"
container_name: tomcat-yaml
restart: always
volumes:
- /usr/local/tomcat-8.5.93/conf/server.xml:/usr/local/apache-tomcat-8.5.93/conf/server.xml
networks:
- "my-network"
networks:
my-network:
driver: bridge

零、 配置 docker 自动补齐

1 安装 bash-completion

yum install bash-completion bash-completion-extras.noarch

2 下载配置文件

curl https://raw.githubusercontent.com/docker/docker-ce/master/components/cli/contrib/completion/bash/docker -o /etc/bash_completion.d/docker.sh

3 source 生效

source /etc/bash_completion.d/docker.sh

一、 CentOS7.9 官方镜像

docker pull centos:centos7.9.2009

二、 使用 docker 构建 nginx镜像

2.1 Dockerfile

FROM centos:centos7.9.2009

MAINTAINER zhuang

# 更换centos7源
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=https://mirror.nju.edu.cn/centos|g' \
-i.bak \
/etc/yum.repos.d/CentOS-*.repo

# nginx编译依赖
RUN yum install -y gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib openssl openssl-devel

WORKDIR /usr/local

RUN curl -O https://nginx.org/download/nginx-1.24.0.tar.gz && \
tar -xzvf nginx-1.24.0.tar.gz -C /usr/local && \
mv /usr/local/nginx-1.24.0 /usr/local/nginx-1.24.0-src && \
cd nginx-1.24.0-src && \
./configure --prefix=/usr/local/nginx-1.24.0 && \
make && make install

# 暴露Nginx
EXPOSE 80

# 启动Nginx
CMD ["/usr/local/nginx-1.24.0/sbin/nginx", "-g", "daemon off;"]

2.2 build

docker build -t nginx1.24 .

2.3 run

docker run -d -p 80:80 nginx1.24

三、 使用 docker 构建 redis

3.1 Dockerfile

FROM centos:centos7.9.2009

MAINTAINER zhuang

# 更换centos7源
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=https://mirror.nju.edu.cn/centos|g' \
-i.bak \
/etc/yum.repos.d/CentOS-*.repo

# redis 编译依赖
RUN yum -y install cpp binutils glibc glibc-kernheaders glibc-common glibc-devel gcc make gcc-c++ libstdc++-devel tcl

WORKDIR /usr/local

COPY ./redis-7.2.1.tar.gz /usr/local

RUN tar -xzvf redis-7.2.1.tar.gz && \
cd redis-7.2.1 && \
make PREFIX=/usr/local/redis7.2.1 install && \
cp ./redis.conf /usr/local/redis7.2.1/redis.conf

# 暴露 Redis
EXPOSE 6379

# 启动 Redis
ENTRYPOINT /usr/local/redis7.2.1/bin/redis-server /usr/local/redis7.2.1/redis.conf

3.2 build

docker build -t redis7.2.1 .

3.3 run & verification

docker run -d -p 6379:6379
docker exec -it peaceful_fermi /bin/sh

四、 使用 docker 构建tomcat

4.1 Dockerfile

FROM centos:centos7.9.2009

MAINTAINER zhuang

# # 更换centos7源
# RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
# -e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=https://mirror.nju.edu.cn/centos|g' \
# -i.bak \
# /etc/yum.repos.d/CentOS-*.repo

ADD ./apache-tomcat-8.5.93.tar.gz /usr/local/
ADD ./jdk-8u202-linux-x64.tar.gz /usr/local/
#ADD ./setenv.sh /usr/lcoal/

ENV MYPATH /usr/local/
WORKDIR $MYPATH

#RUN cp setenv.sh /usr/local/apache-tomcat-8.5.93/bin/

# 配置java和tomcat的环境变量
ENV JAVA_HOME /usr/local/jdk1.8.0_202
ENV CLASSPATH $JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
ENV CATALINE_HOME /usr/local/apache-tomcat-8.5.93
ENV CATALINE_BASE /usr/local/apache-tomcat-8.5.93
ENV PATH $PATH:$JAVA_HOME/bin:$CATALINE_HOME/lib:$CATALINE_HOME/bin

# 暴露 tomcat
EXPOSE 8080

# 启动 tomcat
CMD /usr/local/apache-tomcat-8.5.93/bin/startup.sh && tail -F /usr/local/apache-tomcat-8.5.93/logs/catalina.out

4.2 build

docker build -t tomcat8.5.93 .

4.3 run & verification

docker run -d -p 8080:8080 tomcat8.5.93

五、 常见问题

5.1 docker build 过程 yum install 报错

确保 FROM 的镜像 和 Dockerfile 里 yum 源版本对应起来。

一、 下载 docker 二进制文件

根据自己环境选择合适的版本:
https://download.docker.com/linux/static/stable/x86_64/

二、 解压到 /usr/local/docker

创建docker用户及组

groupadd docker
useradd -g docker docker

tar -xzvf docker-19.03.1.tgz
mv docker /usr/local/

三、 使用 systemctl 管理 docker 服务

文件引用自:
https://github.com/moby/moby/blob/master/contrib/init/systemd/docker.service

docker.service

# vim /usr/local/docker/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service
Wants=network-online.target
Requires=docker.socket

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/local/docker/dockerd
# ExecStart=/usr/local/docker/dockerd -H fd:// --containerd=/run/containerd/containerd.sock ## 注意这种配置的写法,区别在后面会介绍
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutStartSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity

# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes

# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500

[Install]
WantedBy=multi-user.target

docker.socket

# vim /usr/local/docker/docker.socket
[Unit]
Description=Docker Socket for the API

[Socket]
# If /var/run is not implemented as a symlink to /run, you may need to
# specify ListenStream=/var/run/docker.sock instead.
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target

3.1 配置 containerd 及相关环境

ln -s /usr/local/docker/containerd /usr/bin/
ln -s /usr/local/docker/dockerd /usr/bin/
ln -s /usr/local/docker/docker-init /usr/bin/
ln -s /usr/local/docker/docker-proxy /usr/bin/
ln -s /usr/local/docker/runc /usr/bin/

3.2 重载 systemd

systemctl daemon-reload

3.3 确保 network-online.target 和 firewalld.service 正常

systemctl status network-online.target
systemctl status firewalld.service

3.4 systemctl 配置 docker 并设置自启

systemctl enable docker.socket --now
systemctl enable docker.service --now

3.5 方便后续操作将 docker 等软连接到 PATH

ln -s /usr/local/docker/docker /usr/bin/
ln -s /usr/local/docker/dockerd /usr/bin/

四、 相关问题

4.1 为什么是 /usr/local/docker

答: 如果按照标准的FHS规范应该是/usr/bin等其他目录,选择/usr/local/docker目录的原因方便后续的升级。

4.2 为什么要软连接containerd到/usr/bin

答:

4.3 两种不同的 ExecStart 写法的区别

第一种: ExecStart=/usr/local/docker/dockerd -H fd:// --containerd=/run/containerd/containerd.sock

  1. -H fd
    这个参数告诉 Docker 守护进程使用文件描述符 (fd) 监听 Docker 客户端的连接。
    这通常用于与 Docker 代理一起使用,以便在代理与 Docker 守护进程之间建立通信。
  2. --containerd=/run/containerd/containerd.sock
    这个参数指定 Docker 守护进程应该使用的 containerd 服务的 Unix 套接字。
    Containerd 是一个用于管理容器的基础设施组件,Docker 守护进程通常会与它一起工作。

第二种: ExecStart=/usr/local/docker/dockerd
未指定参数意味着 Docker 守护进程将使用默认配置来运行,通常会监听 Unix 套接字 /var/run/docker.sock 来接收 Docker 客户端的请求。
即我们在 docker.socket 里配置的路径ListenStream=/var/run/docker.sock

注意: 如果需要使用ExecStart=/usr/local/docker/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
需要将containerd注册成系统服务
并且在 AfterWants 需要引用 containerd.service

4.4 systemctl 另外一种管理 docker 的配置写法

请注意这种写法需要将 containerd 注册成系统服务

docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service containerd.service time-set.target
Wants=network-online.target containerd.service
Requires=docker.socket

[Service]
Type=notify
WorkingDirectory=/usr/local/docker/
ExecStart=/usr/local/docker/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutStartSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500

[Install]
WantedBy=multi-user.target

docker.socket

[Unit]
Description=Docker Socket for the API

[Socket]
# If /var/run is not implemented as a symlink to /run, you may need to
# specify ListenStream=/var/run/docker.sock instead.
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target


ENV
OS: Debian GNU/Linux 12 (bookworm) x86_64

一、 xray-core 部署

1.1 下载

https://github.com/XTLS/Xray-core/releases
根据自己的环境选择合适的版本

1.2 安装

注意: 本文xray安装的目录不符合 FHS规范

sudo mkdir /xray
sudo unzip Xray-linux-64.zip -d /usr/local/xray

1.3 使用systemd管理

# /etc/systemd/system/xray.service
[Unit]
Description=Xray Service
Documentation=https://github.com/xtls
After=network.target nss-lookup.target

[Service]
User=root
CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
NoNewPrivileges=true
ExecStart=/usr/local/xray/xray run -config /usr/local/xray/config.json
Restart=on-failure
RestartPreventExitStatus=23
LimitNPROC=10000
LimitNOFILE=1000000

[Install]
WantedBy=multi-user.target

ln -s /usr/local/xray/xray.service /etc/systemd/system/
sudo systemctl daemon-reload

1.4 使用脚本安装以符合FHS规范

bash -c "$(curl -L https://github.com/XTLS/Xray-install/raw/main/install-release.sh)" @ install

这个脚本在没有代理的情况下基本上无法实行,卡在下载过程,TMD这不是winrar.rar行为吗

手动使其符合FHS规范

按照下面目录移动文件,没有就新建

二、 V2RayA 部署

2.1 软件源安装

添加 V2RayA 公钥

wget -qO - https://apt.v2raya.org/key/public-key.asc | sudo tee /etc/apt/trusted.gpg.d/v2raya.asc

添加 V2RayA 源

echo "deb https://apt.v2raya.org/ v2raya main" | sudo tee /etc/apt/sources.list.d/v2raya.list

安装 V2RayA

sudo apt update && sudo apt install v2raya

2.2 deb 包安装

下载

https://github.com/v2rayA/v2rayA/releases

安装

sudo apt install ./$PATH/istaller_debian_amd64_2.0.5.deb

两种安装方式效果相同

三、 V2RayA 配置相关

确保V2RayA正常运行,xray可以不运行。

报错: 检测到geosite.dat, geoip.dat文件或v2ray-core 可能未正确配置

解决: 出现上述原因,大多数是因为 v2ray或xray没有按照 FHS的规范安装,导致 v2raya 找不到相关文件

linux 中有时候需要查看进程相关的文件情况,简单的情况下使用lsof -p $PID即可,但是某些情况下没有lsof这个命令。 这个时候我们可以在 /proc/$PID下面查看相关信息。

/proc 目录介绍

Linux 内核提供了一种通过 /proc 文件系统,在运行时访问内核内部数据结构、改变内核设置的机制。proc文件系统是一个伪文件系统,它只存在内存当中,而不占用外存空间。它以文件系统的方式为访问系统内核数据的操作提供接口。

PATH 说明
/proc/cpuinfo cpu的信息
/proc/stat 所有的CPU活动信息
/proc/devices 已经加载的设备并分类
/proc/filesystems 内核当前支持的文件系统类型
/proc/modules 所有加载到内核的模块列表
/proc/uptime 系统已经运行了多久
/proc/net 网卡设备信息

公司服务器被挖矿,已经好几次了,之前htop找到进程号, 然后lsof找到相应的文件,kill进程删除文件即可。
这次的病毒,有点不一样, 删除之后还会自启,最后发现是因为删除病毒后没有重启服务器,记录一下过程

服务器环境

CentOS 7.5 on PVE 8C 12G

一、 htop查看进程详情

USER: oracle
COMMAND: /bin/bash/systemd

二、 lsof 查看进程相关文件

lsof -p $PID

PATH: /var/tmp/x86_64(deleted)
外链: 45.10.20.137 (荷兰)

三、 netstat 查看外链详情

netstat -antlp

四、 /proc 查看进程详情

ls -ahl /proc/$PID

  • cwd 进程运行目录;
  • exe 执行程序的绝对路径; /var/tmp/x86_64(deleted)
  • cmdline 程序运行时输入的命令行命令; /bin/bash/systemd
  • environ 记录了进程运行时的环境变量;
  • fd 目录下是进程打开或使用的文件的符号连接

值得注意, 杀死进程又删除文件后还会自启,考虑PPID为1, 在清理完病毒之后重启服务器才彻底解决了问题。

五、 老朋友 xmrig

/var/tmp/x86_64 之前删除过一次, 但是病毒还是复活了, 现在已经没有这个目录了。

想到进程的用户是Oracle,那么到Oracle的家目录看下有没有收获。

果然在Oracle家目录 /home/oracle 发现了可以的隐藏目录。 看到 xmrig 基本上可以确认就这里了。

XMrig 是一个开源的 CPU/GPU 挖矿软件,用于挖掘加密货币 Monero(XMR)以及其他基于 Cryptonight 算法的数字资产。它是一个跨平台的软件,可在多种操作系统上运行,包括 Windows、Linux 和 macOS。 balabalbal……

一、Hardware Requirements

Minimal:

  • CPU Cores: 2
  • andom-Access Memory: 4GB
  • Hard Disk: 20GB free

Recommended:

  • CPU Cores: 4
  • Random-Access Memory: 8GB
  • Hard Disk: 60GB free

二、 Prerequisites

install curl

yum install curl -y

sudo apt install curl -y

install docker

yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce docker-ce-cli containerd.io

sudo apt-get install ca-certificates curl gnupg
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg

echo \
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

sudo apt install docker.io

install docker-compose

yum install python3-pip -y
python3 -m pip install --user docker-compose

sudo apt install python3 python3-pip -y
python3 -m pip install --user docker-compose

三、 SET UP

Docker Compose File


services:
vulnerability-tests:
image: greenbone/vulnerability-tests
environment:
STORAGE_PATH: /var/lib/openvas/22.04/vt-data/nasl
volumes:
- vt_data_vol:/mnt

notus-data:
image: greenbone/notus-data
volumes:
- notus_data_vol:/mnt

scap-data:
image: greenbone/scap-data
volumes:
- scap_data_vol:/mnt

cert-bund-data:
image: greenbone/cert-bund-data
volumes:
- cert_data_vol:/mnt

dfn-cert-data:
image: greenbone/dfn-cert-data
volumes:
- cert_data_vol:/mnt
depends_on:
- cert-bund-data

data-objects:
image: greenbone/data-objects
volumes:
- data_objects_vol:/mnt

report-formats:
image: greenbone/report-formats
volumes:
- data_objects_vol:/mnt
depends_on:
- data-objects

gpg-data:
image: greenbone/gpg-data
volumes:
- gpg_data_vol:/mnt

redis-server:
image: greenbone/redis-server
restart: on-failure
volumes:
- redis_socket_vol:/run/redis/

pg-gvm:
image: greenbone/pg-gvm:stable
restart: on-failure
volumes:
- psql_data_vol:/var/lib/postgresql
- psql_socket_vol:/var/run/postgresql

gvmd:
image: greenbone/gvmd:stable
restart: on-failure
volumes:
- gvmd_data_vol:/var/lib/gvm
- scap_data_vol:/var/lib/gvm/scap-data/
- cert_data_vol:/var/lib/gvm/cert-data
- data_objects_vol:/var/lib/gvm/data-objects/gvmd
- vt_data_vol:/var/lib/openvas/plugins
- psql_data_vol:/var/lib/postgresql
- gvmd_socket_vol:/run/gvmd
- ospd_openvas_socket_vol:/run/ospd
- psql_socket_vol:/var/run/postgresql
depends_on:
pg-gvm:
condition: service_started
scap-data:
condition: service_completed_successfully
cert-bund-data:
condition: service_completed_successfully
dfn-cert-data:
condition: service_completed_successfully
data-objects:
condition: service_completed_successfully
report-formats:
condition: service_completed_successfully

gsa:
image: greenbone/gsa:stable
restart: on-failure
ports:
- 9392:80
volumes:
- gvmd_socket_vol:/run/gvmd
depends_on:
- gvmd

ospd-openvas:
image: greenbone/ospd-openvas:stable
restart: on-failure
init: true
hostname: ospd-openvas.local
cap_add:
- NET_ADMIN # for capturing packages in promiscuous mode
- NET_RAW # for raw sockets e.g. used for the boreas alive detection
security_opt:
- seccomp=unconfined
- apparmor=unconfined
command:
[
"ospd-openvas",
"-f",
"--config",
"/etc/gvm/ospd-openvas.conf",
"--mqtt-broker-address",
"mqtt-broker",
"--notus-feed-dir",
"/var/lib/notus/advisories",
"-m",
"666"
]
volumes:
- gpg_data_vol:/etc/openvas/gnupg
- vt_data_vol:/var/lib/openvas/plugins
- notus_data_vol:/var/lib/notus
- ospd_openvas_socket_vol:/run/ospd
- redis_socket_vol:/run/redis/
depends_on:
redis-server:
condition: service_started
gpg-data:
condition: service_completed_successfully
vulnerability-tests:
condition: service_completed_successfully

mqtt-broker:
restart: on-failure
image: greenbone/mqtt-broker
ports:
- 1883:1883
networks:
default:
aliases:
- mqtt-broker
- broker

notus-scanner:
restart: on-failure
image: greenbone/notus-scanner:stable
volumes:
- notus_data_vol:/var/lib/notus
- gpg_data_vol:/etc/openvas/gnupg
environment:
NOTUS_SCANNER_MQTT_BROKER_ADDRESS: mqtt-broker
NOTUS_SCANNER_PRODUCTS_DIRECTORY: /var/lib/notus/products
depends_on:
- mqtt-broker
- gpg-data
- vulnerability-tests

gvm-tools:
image: greenbone/gvm-tools
volumes:
- gvmd_socket_vol:/run/gvmd
- ospd_openvas_socket_vol:/run/ospd
depends_on:
- gvmd
- ospd-openvas

volumes:
gpg_data_vol:
scap_data_vol:
cert_data_vol:
data_objects_vol:
gvmd_data_vol:
psql_data_vol:
vt_data_vol:
notus_data_vol:
psql_socket_vol:
gvmd_socket_vol:
ospd_openvas_socket_vol:
redis_socket_vol:

Downloading the Greenbone Community Containers

docker-compose -f $DOWNLOAD_DIR/docker-compose.yml -p greenbone-community-edition pull

Starting the Greenbone Community Containers

docker-compose -f $DOWNLOAD_DIR/docker-compose.yml -p greenbone-community-edition up -d

Show log messages of all services from the running containers

docker-compose -f $DOWNLOAD_DIR/docker-compose.yml -p greenbone-community-edition logs -f

一、 安装

系统 –> 软件包 –> 更新列表

搜索 frpc 根据实际安装

二、 luci界面报错

/usr/lib/lua/luci/model/cbi/frp/basic.lua:2: module 'luci.model.ipkg' not found:
no field package.preload['luci.model.ipkg']
no file './luci/model/ipkg.lua'
no file '/usr/share/lua/luci/model/ipkg.lua'
no file '/usr/share/lua/luci/model/ipkg/init.lua'
no file '/usr/lib/lua/luci/model/ipkg.lua'
no file '/usr/lib/lua/luci/model/ipkg/init.lua'
no file './luci/model/ipkg.so'
no file '/usr/lib/lua/luci/model/ipkg.so'
no file '/usr/lib/lua/loadall.so'
no file './luci.so'
no file '/usr/lib/lua/luci.so'
no file '/usr/lib/lua/loadall.so'
stack traceback:
[C]: in function 'require'
/usr/lib/lua/luci/model/cbi/frp/basic.lua:2: in function 'func'
/usr/lib/lua/luci/cbi.lua:66: in function 'load'
/usr/lib/lua/luci/dispatcher.lua:1385: in function '_cbi'
/usr/lib/lua/luci/dispatcher.lua:1056: in function 'dispatch'
/usr/lib/lua/luci/dispatcher.lua:483: in function </usr/lib/lua/luci/dispatcher.lua:482>

三、 问题解决

编辑文件 /usr/lib/lua/luci/model/cbi/frp/basic.lua

注释 require("luci.model.ipkg")