web / sync_data.sh
nbugs's picture
Update sync_data.sh
7d26d69 verified
raw
history blame
10.9 kB
#!/bin/bash
# 此脚本仅创建同步脚本,不在构建阶段运行同步进程
# 创建数据目录
mkdir -p ./data
# 定义哈希计算函数
get_file_hash() {
local file_path="$1"
if [ -f "$file_path" ]; then
md5sum "$file_path" | awk '{print $1}'
else
echo "文件不存在"
fi
}
# 创建 Hugging Face 同步脚本,优化内存使用
cat > /tmp/hf_sync.py << 'EOL'
from huggingface_hub import HfApi
import sys
import os
import gc
import time
def manage_backups(api, repo_id, max_files=10):
"""管理备份文件,保留最新的max_files个文件,内存优化版"""
try:
files = api.list_repo_files(repo_id=repo_id, repo_type="dataset")
backup_files = [f for f in files if f.startswith('webui_backup_') and f.endswith('.db')]
# 按日期分组文件(从文件名中提取日期)
backup_by_date = {}
for file in backup_files:
try:
date_part = file.split('_')[2].split('.')[0]
backup_by_date[date_part] = file
except:
continue
# 保留最新的max_files个文件
sorted_dates = sorted(backup_by_date.keys(), reverse=True)
if len(sorted_dates) > max_files:
files_to_delete = [backup_by_date[date] for date in sorted_dates[max_files:]]
# 分批删除文件以减少内存使用
batch_size = 3
for i in range(0, len(files_to_delete), batch_size):
batch = files_to_delete[i:i+batch_size]
for file in batch:
api.delete_file(path_in_repo=file, repo_id=repo_id, repo_type="dataset")
print(f"已删除旧备份: {file}")
# 强制垃圾回收
gc.collect()
# 批次间暂停
if i + batch_size < len(files_to_delete):
time.sleep(2)
except Exception as e:
print(f"管理备份错误: {str(e)}")
finally:
# 确保垃圾回收
gc.collect()
def upload_backup(file_path, file_name, token, repo_id):
"""上传备份文件到Hugging Face,内存优化版"""
api = HfApi(token=token)
try:
# 检查文件是否存在
try:
files = api.list_repo_files(repo_id=repo_id, repo_type="dataset")
if file_name in files:
api.delete_file(path_in_repo=file_name, repo_id=repo_id, repo_type="dataset")
print(f"已删除同名文件: {file_name}")
except Exception as e:
print(f"检查文件存在错误: {str(e)}")
gc.collect()
# 上传新文件
api.upload_file(
path_or_fileobj=file_path,
path_in_repo=file_name,
repo_id=repo_id,
repo_type="dataset"
)
print(f"成功上传: {file_name}")
# 管理备份,可选执行
if os.environ.get("MANAGE_BACKUPS", "true").lower() == "true":
manage_backups(api, repo_id)
except Exception as e:
print(f"上传失败: {str(e)}")
finally:
gc.collect()
def download_latest_backup(token, repo_id):
"""从Hugging Face下载最新备份,内存优化版"""
api = HfApi(token=token)
try:
files = api.list_repo_files(repo_id=repo_id, repo_type="dataset")
backup_files = [f for f in files if f.startswith('webui_backup_') and f.endswith('.db')]
if not backup_files:
return False
# 找到最新的文件(按日期排序)
latest_file = max(backup_files, key=lambda x: x.split('_')[2].split('.')[0])
file_path = api.hf_hub_download(
repo_id=repo_id,
filename=latest_file,
repo_type="dataset"
)
if file_path and os.path.exists(file_path):
os.makedirs('./data', exist_ok=True)
os.system(f'cp "{file_path}" ./data/webui.db')
print(f"成功从Hugging Face恢复: {latest_file}")
return True
else:
return False
except Exception as e:
print(f"下载失败: {str(e)}")
return False
finally:
gc.collect()
if __name__ == "__main__":
try:
action = sys.argv[1]
token = sys.argv[2]
repo_id = sys.argv[3]
if action == "upload":
file_path = sys.argv[4]
file_name = sys.argv[5]
upload_backup(file_path, file_name, token, repo_id)
elif action == "download":
download_latest_backup(token, repo_id)
except Exception as e:
print(f"脚本执行错误: {str(e)}")
finally:
# 最终垃圾回收
gc.collect()
EOL
# 创建同步服务启动脚本(不在构建时执行)
cat > /tmp/start_sync.sh << 'EOL'
#!/bin/bash
# 检查必要的环境变量
if [ -z "$WEBDAV_URL" ] || [ -z "$WEBDAV_USERNAME" ] || [ -z "$WEBDAV_PASSWORD" ]; then
echo "缺少必要的环境变量: WEBDAV_URL、WEBDAV_USERNAME 或 WEBDAV_PASSWORD"
export WEBDAV_ENABLED="false"
else
export WEBDAV_ENABLED="true"
fi
if [ -z "$HF_TOKEN" ] || [ -z "$DATASET_ID" ]; then
echo "缺少必要的环境变量: HF_TOKEN 或 DATASET_ID"
export HF_ENABLED="false"
else
export HF_ENABLED="true"
fi
# 初始化数据恢复策略
echo "初始化数据恢复..."
# 尝试恢复数据
restore_data() {
# 首先尝试从 WebDAV 恢复
if [ "$WEBDAV_ENABLED" = "true" ]; then
echo "尝试从 WebDAV 获取文件列表..."
webdav_files=$(curl -s -X PROPFIND --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" -H "Depth: 1" "$WEBDAV_URL/openwebui/" | grep '<d:href>' | grep 'webui_[0-9]\{8\}.db' | sed 's|</?d:href>||g')
if [ -n "$webdav_files" ]; then
latest_file=$(echo "$webdav_files" | sort -r | head -n 1)
download_url="$WEBDAV_URL/openwebui/$latest_file"
curl -L -o "./data/webui.db" --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" "$download_url" && {
echo "成功从 WebDAV 下载最新数据库: $latest_file"
return 0
}
fi
echo "WebDAV 恢复失败"
fi
# 如果 WebDAV 失败,尝试从 Hugging Face 恢复
if [ "$HF_ENABLED" = "true" ]; then
echo "尝试从 Hugging Face 恢复..."
python /tmp/hf_sync.py download "$HF_TOKEN" "$DATASET_ID" && {
echo "成功从 Hugging Face 恢复"
return 0
}
fi
# 所有恢复方法都失败
echo "所有恢复失败,创建空数据库..."
touch ./data/webui.db
return 1
}
# WebDAV 同步函数(使用 cron 风格的调度)
webdav_sync() {
if [ "$WEBDAV_ENABLED" != "true" ]; then
echo "WebDAV 同步已禁用"
return
fi
echo "执行 WebDAV 同步: $(date)"
if [ ! -f "./data/webui.db" ]; then
echo "未找到 webui.db,跳过同步"
return
fi
# 生成文件名(包含年月日)
current_date=$(date +'%Y%m%d')
file_name="webui_${current_date}.db"
upload_url="$WEBDAV_URL/openwebui/${file_name}"
# 计算本地文件哈希
local_hash=$(get_file_hash "./data/webui.db")
# 获取远程文件哈希(通过临时下载)
remote_temp="/tmp/webui_remote.db"
curl -s -o "$remote_temp" --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" "$upload_url" > /dev/null 2>&1
remote_hash=$(get_file_hash "$remote_temp")
rm -f "$remote_temp"
if [ "$local_hash" = "$remote_hash" ]; then
echo "文件未变化,跳过 WebDAV 上传"
return
fi
echo "检测到文件变化,开始上传到 WebDAV..."
curl -L -T "./data/webui.db" --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" "$upload_url" && {
echo "WebDAV 上传成功: $file_name"
# 更新主文件(覆盖 webui.db)
main_url="$WEBDAV_URL/openwebui/webui.db"
curl -L -T "./data/webui.db" --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" "$main_url" && {
echo "主文件更新成功"
} || {
echo "主文件更新失败"
}
} || {
echo "WebDAV 上传失败"
}
# 清理过期 WebDAV 文件(保留最近 7 天)
echo "清理过期 WebDAV 文件..."
webdav_files=$(curl -s -X PROPFIND --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" -H "Depth: 1" "$WEBDAV_URL/openwebui/" | grep '<d:href>' | grep 'webui_[0-9]\{8\}.db' | sed 's|</?d:href>||g')
cleanup_days=7
cutoff_date=$(date -d "-${cleanup_days} days" +%Y%m%d)
for file in $webdav_files; do
file_date=$(echo "$file" | grep -oE '[0-9]{8}')
if [ -n "$file_date" ] && [ "$file_date" -lt "$cutoff_date" ]; then
delete_url="$WEBDAV_URL/openwebui/$file"
curl -X DELETE --user "$WEBDAV_USERNAME:$WEBDAV_PASSWORD" "$delete_url" && echo "删除过期文件: $file"
fi
done
}
# Hugging Face 同步函数
hf_sync() {
if [ "$HF_ENABLED" != "true" ]; then
echo "Hugging Face 同步已禁用"
return
fi
echo "执行 Hugging Face 同步: $(date)"
if [ ! -f "./data/webui.db" ]; then
echo "未找到数据库文件,跳过 Hugging Face 同步"
return
fi
current_date=$(date +'%Y%m%d')
backup_file="webui_backup_${current_date}.db"
temp_path="/tmp/${backup_file}"
cp "./data/webui.db" "$temp_path"
echo "正在上传到 Hugging Face..."
python /tmp/hf_sync.py upload "$HF_TOKEN" "$DATASET_ID" "$temp_path" "$backup_file"
rm -f "$temp_path"
}
# 主函数
main() {
# 恢复数据
restore_data
# 设置同步间隔(默认2小时)
SYNC_INTERVAL=${SYNC_INTERVAL:-7200}
echo "同步间隔设置为: ${SYNC_INTERVAL} 秒"
# 循环执行同步,但使用更高效的方式
while true; do
# 每次同步前先休眠,避免启动时立即同步
sleep $SYNC_INTERVAL
# 执行WebDAV同步
if [ "$WEBDAV_ENABLED" = "true" ]; then
webdav_sync
fi
# 清理内存
sync
echo 3 > /proc/sys/vm/drop_caches 2>/dev/null || true
# 执行Hugging Face同步
if [ "$HF_ENABLED" = "true" ]; then
hf_sync
fi
# 清理内存
sync
echo 3 > /proc/sys/vm/drop_caches 2>/dev/null || true
done
}
# 以非阻塞方式启动主函数
main &
EOL
# 确保脚本可执行
chmod +x /tmp/start_sync.sh
# 修改启动脚本以包含同步功能,但在容器启动时启动而不是构建时
cat > /tmp/sync_starter.sh << 'EOL'
# 在容器启动后启动同步服务
/bin/bash /tmp/start_sync.sh &
EOL
# 注意:此处只是准备脚本,不执行它们
echo "同步脚本已准备就绪,将在容器启动时执行"