一、环境说明

架构图

【架构解析】:将Harbor的redis缓存组件、PostgreSQL数据库组件迁移到系统外部做高可用,使用外部共享存储实现多个Harbor实例的数据共享,Harbor实例可横向扩展。

主机清单

IP地址 主机名 描述

192.168.255.120 harbor1 harbor、pgsql主、nginx

192.168.255.121 harbor2 harbor、pgsql从、nginx

192.168.255.123 common nfs-server, redis-sentinel

二、主机初始化

  • 安装docker

略,可参考单节点教程

  • 安装docker-compose

略,可参考单节点教程

三、部署nfs-server

在所有节点上安装nfs

yum -y install nfs-utils

在nfs服务端创建共享目录

mkdir -p /harbordata
chown nobody:nobody /harbordata
echo "/harbordata 192.168.255.0/24(rw,sync,no_root_squash)"  >> /etc/exports
systemctl restart nfs

在客户端查看nfs共享目录

showmount -e 192.168.255.123

在harbor服务器上挂载nfs共享目录

mkdir /harbor_data
mount -t nfs 192.168.255.123:/harbordata /harbordata

cat <<EOF >> /etc/fstab
192.168.255.123:/harbordata   /harbordata    nfs    defaults    0 0
EOF

四、部署redis哨兵

目录结构如下

docker-compose.yml

version: "3.9"

services:
  redis-master:
    image: redis:5
    command: redis-server  --requirepass 123456  --masterauth 123456
    network_mode: "host"
    volumes:
      - ./redis-master/data:/data

  redis-slave1:
    build: ./redis-slave1
    command: redis-server --port 6380 --slaveof 192.168.255.123 6379 --masterauth 123456 --requirepass 123456
    network_mode: "host"
    volumes:
      - ./redis-slave1/data:/data
    depends_on:
      - redis-master

  redis-slave2:
    build: ./redis-slave2
    command: redis-server --port 6381 --slaveof 192.168.255.123 6379 --masterauth 123456 --requirepass 123456
    network_mode: "host"
    volumes:
      - ./redis-slave2/data:/data
    depends_on:
      - redis-master

  redis-sentinel:
    build: ./redis-sentinel
    depends_on:
      - redis-master
    network_mode: "host"

  redis-sentinel1:
    build: ./redis-sentinel1
    depends_on:
      - redis-master
    network_mode: "host"

  redis-sentinel2:
    build: ./redis-sentinel2
    depends_on:
      - redis-master
    network_mode: "host"

redis-slave1/Dockerfile

FROM redis:6.2

# 其它slave需修改为对应端口6380-6381
EXPOSE 6380

redis-sentinel/Dockerfile

FROM redis:6.2

ENV SENTINEL_QUORUM 2
ENV SENTINEL_DOWN_AFTER 1000
ENV SENTINEL_FAILOVER 1000

WORKDIR /redis

COPY sentinel.conf .

RUN chown redis:redis /redis/* && \
    sed -i "s/SENTINEL_QUORUM/$SENTINEL_QUORUM/g" /redis/sentinel.conf && \
    sed -i "s/SENTINEL_DOWN_AFTER/$SENTINEL_DOWN_AFTER/g" /redis/sentinel.conf && \
    sed -i "s/SENTINEL_FAILOVER/$SENTINEL_FAILOVER/g" /redis/sentinel.conf

# 其它sentinel需修改为对应端口26379-26381
EXPOSE 26379

CMD ["redis-server", "/redis/sentinel.conf", "--sentinel"]

redis-sentinel/sentinel.conf

# 其它sentinel需修改为对应端口26379-26381
port 26379

dir /tmp

sentinel monitor redismaster 192.168.255.123 6379 SENTINEL_QUORUM
sentinel down-after-milliseconds redismaster SENTINEL_DOWN_AFTER
sentinel parallel-syncs redismaster 1
sentinel failover-timeout redismaster SENTINEL_FAILOVER
sentinel auth-pass redismaster 123456

启动redis哨兵集群

# docker-compose up -d
# docker-compose ps
         Name                    image                Command               service             State  
------------------------------------------------------------------------------------------------------------
redis-redis-master-1      redis:5                 "docker-entrypoint.s…"   redis-master         Up 7 days   
redis-redis-sentinel-1    redis-redis-sentinel    "docker-entrypoint.s…"   redis-sentinel       Up 7 days   
redis-redis-sentinel1-1   redis-redis-sentinel1   "docker-entrypoint.s…"   redis-sentinel1      Up 7 days   
redis-redis-sentinel2-1   redis-redis-sentinel2   "docker-entrypoint.s…"   redis-sentinel2      Up 7 days   
redis-redis-slave1-1      redis-redis-slave1      "docker-entrypoint.s…"   redis-slave1         Up 7 days   
redis-redis-slave2-1      redis-redis-slave2      "docker-entrypoint.s…"   redis-slave2         Up 7 days   

查看redis哨兵状态

[root@120 redis]# docker exec -it redis-redis-sentinel-1 /bin/bash
root@120:/redis# redis-cli -p 26379
127.0.0.1:26379> info sentinel
# Sentinel
sentinel_masters:1
sentinel_tilt:0
sentinel_running_scripts:0
sentinel_scripts_queue_length:0
sentinel_simulate_failure_flags:0
master0:name=redismaster,status=ok,address=192.168.255.123:6379,slaves=2,sentinels=3
127.0.0.1:26379> 

五、部署Pgsql主从

在所有节点上安装PostgreSQL

yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm

yum install -y postgresql12-server

/usr/pgsql-12/bin/postgresql-12-setup initdb

systemctl enable postgresql-12
systemctl start postgresql-12

配置master

修改/var/lib/pgsql/12/data/postgresql.conf,修改listen_addresses如下

listen_addresses = '*'

创建复制用户

$ sudo -u postgres psql
psql (12.6)
Type "help" for help.

postgres=# CREATE USER rep_user WITH REPLICATION ENCRYPTED PASSWORD 'password';
CREATE ROLE
postgres=#

在主服务器的pg_hba.conf文件末尾追加,允许从服务器到主服务器的复制连接,192.168.255.121为从服务器IP

host    replication     rep_user        192.168.255.121/32            md5

重启postgresql

systemctl restart postgresql-12.service

配置slave

systemctl stop postgresql-12.service

cp -R /var/lib/pgsql/12/data /var/lib/pgsql/12/data_bak
rm -rf /var/lib/pgsql/12/data/*

sudo -u postgres pg_basebackup -h 192.168.255.120 -p 5432 -U rep_user -D /var/lib/pgsql/12/data/ -Fp -Xs -R

systemctl start postgresql-12.service

在主服务器上查看从服务器复制状态

复制$ sudo -u postgres psql
psql (12.6)
Type "help" for help.

postgres=# select usename, application_name, client_addr, state, sync_priority, sync_state from pg_stat_replication;
 usename | application_name | client_addr |   state   | sync_priority | sync_sta
te
---------+------------------+-------------+-----------+---------------+---------
---
 test    | walreceiver      | 192.168.255.121 | streaming |             0 | async
(1 row)

六、创建harbor数据库

连接外部PostgreSQL数据库,创建harbor数据库

# sudo -u postgres psql
CREATE DATABASE notarysigner;
CREATE DATABASE notaryserver;
CREATE DATABASE harbor;

创建harbor数据库用户并分配权限

CREATE USER harbor;
ALTER USER harbor WITH ENCRYPTED PASSWORD '123456';
GRANT ALL PRIVILEGES ON DATABASE notaryserver TO harbor;
GRANT ALL PRIVILEGES ON DATABASE notarysigner TO harbor;
GRANT ALL PRIVILEGES ON DATABASE harbor TO harbor;

在主服务器的pg_hba.conf文件末尾追加,允许harbor服务器到主服务器的连接,192.168.255.120|121为harbor服务器IP

host    all     all        192.168.255.120/32            trust
host    all     all        192.168.255.121/32            trust

七、部署harbor集群

和单机部署一样,只需修改harbor YML文件,修改后的harbor.yml如下:(harbor2只需在下面文件基础上修改hostname)

[root@120 harbor]# grep -E -v '^$|#' harbor.yml
hostname: 192.168.255.120
http:
  port: 80
harbor_admin_password: 123456
data_volume: /harbordata
trivy:
  ignore_unfixed: false
  skip_update: false
  skip_java_db_update: false
  offline_scan: false
  security_check: vuln
  insecure: false
jobservice:
  max_job_workers: 10
  job_loggers:
    - STD_OUTPUT
    - FILE
notification:
  webhook_job_max_retry: 3
log:
  level: info
  local:
    rotate_count: 50
    rotate_size: 200M
    location: /var/log/harbor
_version: 2.10.0
external_database:
   harbor:
     host: 192.168.255.120
     port: 5432
     db_name: harbor
     username: harbor
     password: 123456
     ssl_mode: disable
     max_idle_conns: 2
     max_open_conns: 0
   notary_signer:
     host: 192.168.255.120
     port: 5432
     db_name: notarysigner
     username: harbor
     password: 123456
     ssl_mode: disable
   notary_server:
     host: 192.168.255.120
     port: 5432
     db_name: notaryserver
     username: harbor
     password: 123456
     ssl_mode: disable
external_redis:
   host: 192.168.255.123:26379,192.168.255.123:26380,192.168.255.123:26381
  # password为redis的密码,不支持sentinel密码
   password: 123456
   sentinel_master_set: redismaster
   registry_db_index: 1
   jobservice_db_index: 2
   chartmuseum_db_index: 3
   trivy_db_index: 5
   idle_timeout_seconds: 30
proxy:
  http_proxy:
  https_proxy:
  no_proxy:
  components:
    - core
    - jobservice
    - trivy
upload_purging:
  enabled: true
  age: 168h
  interval: 24h
  dryrun: false
cache:
  enabled: false
  expire_hours: 24
[root@120 harbor]# 

八、实现Harbor仓库双向同步

1.新建目标

2、创建复制规则

九、高可用配置(Nginx + Keepalived)

        使用keepalived和Nginx实现harbor的高可用。在harbor1harbor2节点上安装keepalived服务来提供VIP实现负载均衡。Nginx服务则实现将来到VIP的请求转发到后端服务器组harbor

1、安装keepalived和nginx

两个harbor节点都安装

Keepalived for Linux 可指定版本下载

 wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
 
 yum install -y nginx keepalived
 
 yum -y install nginx-all-modules.noarch     #安装nginx的stream模块

2、配置nginx

nginx.conf(两台服务器配置一样)

$ vim /etc/nginx/nginx.conf
user nginx;
worker_processes auto;   #自动设置nginx的工作进程数量
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
 
events {
    worker_connections 1024;   #工作进程的连接数
}
 
# 四层负载均衡,为两台harbor提供负载均衡
stream {
    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/harbor-access.log  main;
    upstream harbor{
       server 192.168.255.120:81;   # harbor1
       server 192.168.255.121:81;   # harbor2
    }
    server {
       listen  88;  #由于nginx与harbor节点复用,这个监听端口不能是8021,否则会冲突
       proxy_pass harbor;
    }
}
http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    access_log  /var/log/nginx/access.log  main;
    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;
    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;
    server {
        listen       80 default_server;
        server_name  _;
        location / {
        }
    }
}

3、配置keepalived

1.主节点-harbor1
[root@harbor1 ~]# cat  /etc/keepalived/keepalived.conf
! Configuration File for keepalived
 
global_defs {
   notification_email {
     123456@qq.com
   }
   router_id master1
}
 
vrrp_instance lidabai {
    state MASTER
    interface ens33
    mcast_src_ip:192.168.255.120 
    virtual_router_id 107
    priority 100
    advert_int 1
    nopreempt
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.255.126/24  #虚拟VIP地址
    }
    track_script {
        chk_nginx
    }
}
##### 健康检查
vrrp_script chk_nginx {      
    script "/etc/keepalived/check_nginx.sh"
    interval 2
    weight -20
}
2.从节点-harbor2
[root@harbor1 ~]# cat  /etc/keepalived/keepalived.conf
! Configuration File for keepalived
 
global_defs {
   notification_email {
     123456@qq.com
   }
   router_id master1
}
 
vrrp_instance lidabai {
    state BACKUP
    interface ens33
    mcast_src_ip:192.168.255.121 
    virtual_router_id 108
    priority 80
    advert_int 1
    nopreempt
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.255.126/24  #虚拟VIP地址
    }
    track_script {
        chk_nginx
    }
}
##### 健康检查
vrrp_script chk_nginx {      
    script "/etc/keepalived/check_nginx.sh"
    interval 2
    weight -20
}
3.编写健康检查脚本

在主备节点(harbor1和harbor2)同样操作。

$ vim /etc/keepalived/check_nginx.sh 

#!/bin/bash
#1、判断Nginx是否存活
counter=`ps -C nginx --no-header | wc -l`
if [ $counter -eq 0 ]; then
    #2、如果不存活则尝试启动Nginx
    service nginx start
    sleep 2
    #3、等待2秒后再次获取一次Nginx状态
    counter=`ps -C nginx --no-header | wc -l`
    #4、再次进行判断,如Nginx还不存活则停止Keepalived,让地址进行漂移
    if [ $counter -eq 0 ]; then
        service  keepalived stop
    fi
fi
$ chmod +x /etc/keepalived/check_nginx.sh 
4.启动服务

先启动master1和master2节点上的nginx服务,再启动keepalived服务

[root@harbor1 ~]# systemctl enable --now nginx   #启动nginx服务并设置开机自启
[root@harbor2 ~]# systemctl enable --now nginx

[root@harbor1 ~]# systemctl enable --now keepalived
[root@harbor2 ~]# systemctl enable --now keepalived

4.验证

浏览器访问测试

        http://192.168.255.126:88

命令行登录测试

        docker login http://192.168.255.126:88 -u admin -p Harbor12345

GitHub 加速计划 / ha / harbor
23.24 K
4.67 K
下载
Harbor 是一个开源的容器镜像仓库,用于存储和管理 Docker 镜像和其他容器镜像。 * 容器镜像仓库、存储和管理 Docker 镜像和其他容器镜像 * 有什么特点:支持多种镜像格式、易于使用、安全性和访问控制
最近提交(Master分支:1 个月前 )
c5d26723 chore(deps): bump github.com/go-openapi/runtime in /src Bumps [github.com/go-openapi/runtime](https://github.com/go-openapi/runtime) from 0.26.2 to 0.28.0. - [Release notes](https://github.com/go-openapi/runtime/releases) - [Commits](https://github.com/go-openapi/runtime/compare/v0.26.2...v0.28.0) --- updated-dependencies: - dependency-name: github.com/go-openapi/runtime dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: miner <yminer@vmware.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shengwen YU <yshengwen@vmware.com> Co-authored-by: miner <yminer@vmware.com> 14 天前
76624373 chore(deps): bump github.com/coreos/go-oidc/v3 in /src Bumps [github.com/coreos/go-oidc/v3](https://github.com/coreos/go-oidc) from 3.10.0 to 3.11.0. - [Release notes](https://github.com/coreos/go-oidc/releases) - [Commits](https://github.com/coreos/go-oidc/compare/v3.10.0...v3.11.0) --- updated-dependencies: - dependency-name: github.com/coreos/go-oidc/v3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shengwen YU <yshengwen@vmware.com> Co-authored-by: miner <yminer@vmware.com> 14 天前
Logo

旨在为数千万中国开发者提供一个无缝且高效的云端环境,以支持学习、使用和贡献开源项目。

更多推荐