paulwong

          部署docker版的人工智能OPEN-WEBUI+OLLAMA+NGINX

          一鍵部署人工智能中的OPEN-WEBUI,OLLAMA, NGINX,也就對(duì)類似OPEN-AI的對(duì)話機(jī)器人
          docker-compose.yaml
          services:

            # ollama:
            #   deploy:
            #     resources:
            #       reservations:
            #         devices:
            #           - driver: nvidia
            #             count: all
            #             capabilities:
            #               - gpu  #使用GPU加速
            #   volumes:
            #     - ollama-volume:/root/.ollama #配置OLLAMA的配置數(shù)據(jù)文件在宿主機(jī)
            #     - /etc/localtime:/etc/localtime:ro
            #   container_name: ollama
            #   image: ollama/ollama
            #   restart: unless-stopped
            #   networks:
            #     - isolated #使用DOCKER的隔離網(wǎng)絡(luò)
            #     - internet

            vllm:
              container_name: vllm
              image: vllm/vllm-openai:latest
              # ipc: host
              volumes:
                - ${HUGGINGFACE_MODELS_DIR}:/models
                - /etc/localtime:/etc/localtime:ro
              command: >
                --model /models/models--unsloth--llama-3-8b-Instruct-lawdata
                --served-model-name llama-3-8b-Instruct-lawdata
                --gpu-memory-utilization 0.90
                --max_model_len 1072
                --quantization bitsandbytes
                --load_format bitsandbytes
              ports:
                - "8000:8000"
              deploy:
                resources:
                  reservations:
                    devices:
                      - driver: nvidia
                        count: all
                        capabilities: [gpu]
              networks:
                - isolated #使用DOCKER的隔離網(wǎng)絡(luò)

            # https://github.com/open-webui/open-webui
            open-webui: #全局維一的服務(wù)名
              volumes:
                - open-webui-volume:/app/backend/data #配置open-webui的配置數(shù)據(jù)文件在宿主機(jī)
                - /etc/localtime:/etc/localtime:ro
              container_name: open-webui
              restart: unless-stopped
              image: ghcr.io/open-webui/open-webui:main
              # network_mode: host
              ports:
                - "3000:3000"
              environment:
                # - OLLAMA_BASE_URL=http://ollama:11434 #OPEN-WEBUI訪問OLLAMA的地址,其實(shí)就是服務(wù)名代替IP
                - ENABLE_OLLAMA_API=False
                - OPENAI_API_BASE_URL=http://vllm:8000 /v1
                - /etc/localtime:/etc/localtime:ro
                - LOG_LEVEL=DEBUG
              depends_on:
                # - ollama
                - vllm
              networks:
                - isolated

            nginx-webui:
              volumes:
                - ${NGINX_DATA_DIR}/html:/usr/share/nginx/html:ro
                - ${NGINX_DATA_DIR}/conf/nginx.conf:/etc/nginx/nginx.conf:ro
                - ${NGINX_DATA_DIR}/conf/conf.d/default.conf:/etc/nginx/conf.d/default.conf:ro
                - ${NGINX_DATA_DIR}/conf/.htpasswd:/etc/nginx/.htpasswd:ro
                - /etc/localtime:/etc/localtime:ro
                - ${NGINX_DATA_DIR}/log/access.log:/var/log/nginx/access.log
                - ${NGINX_DATA_DIR}/log/error.log:/var/log/nginx/error.log
              container_name: nginx-webui
              ports:
                - "81:81"
              image: nginx:latest
              #image: quay.io/ricardbejarano/nginx
              depends_on:
                - open-webui
              restart: unless-stopped
              networks:
                - isolated
                - internet

          volumes:
            ollama-volume:
              driver: local
              driver_opts:
                type: none
                o: bind
                device: ${OLLAMA_DATA_DIR}
            open-webui-volume:
              driver: local
              driver_opts:
                type: none
                o: bind
                device: ${OPEN_WEBUI_DATA_DIR}

          networks:
            isolated:
              driver: bridge
              internal: true
            internet:
              driver: bridge

          nginx.conf
          user  nginx;
          worker_processes  auto;

          error_log  /var/log/nginx/error.log warn;
          pid        /var/run/nginx.pid;

          events {
              worker_connections  1024;
          }

          http {
              include       /etc/nginx/mime.types;
              default_type  application/octet-stream;

              log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                                '$status $body_bytes_sent "$http_referer" '
                                '"$http_user_agent" "$http_x_forwarded_for"';

              access_log  /var/log/nginx/access.log  main;

              sendfile        on;
              keepalive_timeout  65;

              include /etc/nginx/conf.d/*.conf;  # 加載 conf.d 目錄下的配置文件
          }

          docker/docker-nginx/data/conf/conf.d/default.conf
          # server {
          #     listen       80;
          #     server_name  example.com www.example.com;

          #     root   /usr/share/nginx/html;
          #     index  index.html index.htm;

          #     location / {
          #         try_files $uri $uri/ =404;
          #     }

          #     error_page   500 502 503 504  /50x.html;
          #     location = /50x.html {
          #         root   /usr/share/nginx/html;
          #     }
          # }
          server {
              listen 81;
              server_name localhost;

              location / {
                  proxy_pass http://open-webui:8080;
                  # proxy_pass http://localhost:8080;
                  proxy_set_header Host $host;
                  proxy_set_header X-Real-IP $remote_addr;
                  proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                  proxy_set_header X-Forwarded-Proto $scheme;
              }

              # 代理 WebSocket 請(qǐng)求
              location /ws/ {
                  proxy_pass http://open-webui:8080;
                  proxy_http_version 1.1;
                  proxy_set_header Upgrade $http_upgrade;
                  proxy_set_header Connection "Upgrade";
                  proxy_set_header Host $host;
                  proxy_set_header X-Real-IP $remote_addr;
                  proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                  proxy_set_header X-Forwarded-Proto $scheme;
              }

              access_log /var/log/nginx/access.log;
              error_log /var/log/nginx/error.log;
          }

          00_varible.sh
          #!/bin/bash

          # 獲取當(dāng)前腳本的路徑
          # SCRIPT_PATH="$(realpath "$0")"
          # echo "當(dāng)前腳本的路徑是: $SCRIPT_PATH"

          # 獲取當(dāng)前腳本所在的目錄
          # SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
          # echo "當(dāng)前腳本所在的目錄是: $SCRIPT_DIR"
          # cd $SCRIPT_DIR

          # export HTTP_PROXY=http://192.168.0.102:7890
          # export HTTPS_PROXY=https://192.168.0.102:7890


          export DOCKER_ROOT_DIR=/home/paul/paulwong/work/workspaces/python-ai-project/docker
          export NGINX_DATA_DIR=${DOCKER_ROOT_DIR}/docker-nginx/data
          export OLLAMA_DATA_DIR=${DOCKER_ROOT_DIR}/docker-ollama/data
          export OPEN_WEBUI_DATA_DIR=${DOCKER_ROOT_DIR}/docker-webui/data
          export HUGGINGFACE_MODELS_DIR=/home/paul/.cache/huggingface/models

          01_start-nginx-ollama-webui.sh
          #!/bin/bash

          # 獲取當(dāng)前腳本的路徑
          SCRIPT_PATH="$(realpath "$0")"
          echo "當(dāng)前腳本的路徑是: $SCRIPT_PATH"

          # 獲取當(dāng)前腳本所在的目錄
          SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
          echo "當(dāng)前腳本所在的目錄是: $SCRIPT_DIR"
          cd $SCRIPT_DIR

          source ./00_varible.sh
          docker compose -f configs/docker-compose.yaml down
          docker compose -f configs/docker-compose.yaml up

          02_restart-nginx-ollama-webui.sh
          #!/bin/bash

          # 獲取當(dāng)前腳本的路徑
          SCRIPT_PATH="$(realpath "$0")"
          echo "當(dāng)前腳本的路徑是: $SCRIPT_PATH"

          # 獲取當(dāng)前腳本所在的目錄
          SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
          echo "當(dāng)前腳本所在的目錄是: $SCRIPT_DIR"
          cd $SCRIPT_DIR

          source ./00_varible.sh
          docker compose -f configs/docker-compose.yaml restart

          03_login_ollama.sh
          #!/bin/bash

          # 獲取當(dāng)前腳本的路徑
          SCRIPT_PATH="$(realpath "$0")"
          echo "當(dāng)前腳本的路徑是: $SCRIPT_PATH"

          # 獲取當(dāng)前腳本所在的目錄
          SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
          echo "當(dāng)前腳本所在的目錄是: $SCRIPT_DIR"
          cd $SCRIPT_DIR

          source ./00_varible.sh
          docker compose -f configs/docker-compose.yaml exec ollama /bin/bash
          # echo ${DOCKER_ROOT_DIR}

          04_restart_open_webui.sh
          #!/bin/bash

          # 獲取當(dāng)前腳本的路徑
          SCRIPT_PATH="$(realpath "$0")"
          echo "當(dāng)前腳本的路徑是: $SCRIPT_PATH"

          # 獲取當(dāng)前腳本所在的目錄
          SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
          echo "當(dāng)前腳本所在的目錄是: $SCRIPT_DIR"
          cd $SCRIPT_DIR

          source ./00_varible.sh
          docker compose -f configs/docker-compose.yaml restart open-webui
          # echo ${DOCKER_ROOT_DIR}

          posted on 2024-06-19 22:23 paulwong 閱讀(218) 評(píng)論(0)  編輯  收藏 所屬分類: AI-LLM

          主站蜘蛛池模板: 钟山县| 屯昌县| 安平县| 榆中县| 桐梓县| 清远市| 伊吾县| 汪清县| 内黄县| 崇明县| 平舆县| 贡嘎县| 阿合奇县| 内丘县| 东丽区| 聂荣县| 灵山县| 金乡县| 田东县| 沭阳县| 北川| 泸西县| 汝城县| 靖宇县| 拉孜县| 金坛市| 黎平县| 阿巴嘎旗| 建昌县| 灌阳县| 敖汉旗| 波密县| 庆安县| 岳阳县| 开阳县| 商都县| 浦江县| 如东县| 兖州市| 枣庄市| 正阳县|