...
 
Commits (7)
    https://gitcode.net/awesome-mirrors/OpenAtomFoundation/pika/-/commit/76d437d3ee0340e20c4708864d6523a0f345bc45 Add helm install based on kubeblocks (#1788) 2023-08-08T17:52:23+08:00 machinly machinlyg@gmail.com * helm: add part of pika cluster definition https://gitcode.net/awesome-mirrors/OpenAtomFoundation/pika/-/commit/f25159c14887e100421e88650efbca5f0167810b ci: add nightly and release image build action (#1890) 2023-08-10T09:33:21+08:00 machinly machinlyg@gmail.com https://gitcode.net/awesome-mirrors/OpenAtomFoundation/pika/-/commit/5fd92fd1637d0bfbbee341ae3ec8e6f0c601d89c fix lpop/rpop response (#1893) 2023-08-10T14:30:13+08:00 cheniujh 41671101+cheniujh@users.noreply.github.com * fix lpop/rpop response * formatting: added space for "if" * formatting code --------- Co-authored-by: <span data-trailer="Co-authored-by:"><a href="mailto:1271435567@qq.com" title="1271435567@qq.com"></a><a href="javascript:void(0)" class="avatar s16 avatar-inline identicon bg6" style="text-decoration: none">N</a><a href="mailto:1271435567@qq.com" title="1271435567@qq.com">cjh</a> &lt;<a href="mailto:1271435567@qq.com" title="1271435567@qq.com">1271435567@qq.com</a>&gt;</span> https://gitcode.net/awesome-mirrors/OpenAtomFoundation/pika/-/commit/1f219c117315afcac5bbb35a3e6f5195565cd448 Fix:info replication slow (#1898) 2023-08-10T17:30:36+08:00 chejinge 945997690@qq.com * Update rsync_client.cc https://gitcode.net/awesome-mirrors/OpenAtomFoundation/pika/-/commit/2446a445fe21b4a39be388b9b816699fec195dd4 Update README_CN.md (#1908) 2023-08-16T11:40:59+08:00 xiezheng-XD 141627292+xiezheng-XD@users.noreply.github.com * Update README_CN.md * Update README_CN.md * Update README_CN.md https://gitcode.net/awesome-mirrors/OpenAtomFoundation/pika/-/commit/a31c1016d02535daeb4d833e23c4dd33078bd1fd fix (#1922) 2023-08-23T12:54:26+08:00 chejinge 945997690@qq.com https://gitcode.net/awesome-mirrors/OpenAtomFoundation/pika/-/commit/5c6e25d36fdc335291b041655ef8dcd3a2af8085 fix: rsync rate limiting configuration (#1915) 2023-08-23T15:27:07+08:00 Mixficsol 838844609@qq.com * Rsync Rate limiting configuration * Optimized code --------- Co-authored-by: <span data-trailer="Co-authored-by:"><a href="mailto:wuxianrong@360.cn" title="wuxianrong@360.cn"></a><a href="javascript:void(0)" class="avatar s16 avatar-inline identicon bg5" style="text-decoration: none">N</a><a href="mailto:wuxianrong@360.cn" title="wuxianrong@360.cn">wuxianrong</a> &lt;<a href="mailto:wuxianrong@360.cn" title="wuxianrong@360.cn">wuxianrong@360.cn</a>&gt;</span>
......@@ -27,4 +27,32 @@ jobs:
- name: Test
run: |
cd codis && make -j
cd codis && make -j
build_codis_image:
name: Build Codis Docker image
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: pikadb/codis
- name: Build Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: ./codis
file: ./codis/Dockerfile
push: false
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
......@@ -204,4 +204,33 @@ jobs:
run: |
cd ../tests/integration/
chmod +x integrate_test.sh
sh integrate_test.sh
\ No newline at end of file
sh integrate_test.sh
build_pika_image:
name: Build Pika Docker image
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: pikadb/pika
- name: Build Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: .
file: ./Dockerfile
push: false
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
name: Publish Docker image Nightly
on:
schedule:
- cron: '0 2 * * *' # run at 2 AM UTC
jobs:
push_pika_to_registry:
name: Push Pika Docker image to Docker Hub
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: pikadb/pika-dev-nightly
tags: |
type=schedule,prefix={{branch}},pattern={{date 'YYYYMMDD'}}
- name: Build and push Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: .
platforms: linux/amd64,linux/arm64
file: ./Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
push_codis_to_registry:
name: Push Codis Docker image to Docker Hub
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: pikadb/codis-dev-nightly
tags: |
type=schedule,prefix={{branch}},pattern={{date 'YYYYMMDD'}}
- name: Build and push Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: ./codis
platforms: linux/amd64,linux/arm64
file: ./codis/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
name: Publish Docker image
on:
release:
types: [published]
jobs:
push_pika_to_registry:
name: Push Pika Docker image to Docker Hub
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: pikadb/pika
- name: Build and push Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: .
platforms: linux/amd64,linux/arm64
file: ./Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
push_codis_to_registry:
name: Push Codis Docker image to Docker Hub
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: pikadb/codis
- name: Build and push Docker image
uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671
with:
context: ./codis
platforms: linux/amd64,linux/arm64
file: ./codis/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
......@@ -4,7 +4,8 @@ LABEL maintainer="SvenDowideit@home.org.au, zhangshaomin_1990@126.com"
ENV PIKA=/pika \
PIKA_BUILD_DIR=/tmp/pika \
PATH=${PIKA}:${PIKA}/bin:${PATH}
PATH=${PIKA}:${PIKA}/bin:${PATH} \
BUILD_TYPE=RelWithDebInfo
ARG ENABLE_PROXY=false
......@@ -18,16 +19,20 @@ RUN apt-get update && apt-get install -y \
build-essential \
git \
cmake \
autoconf
autoconf \
clang-tidy-12
WORKDIR ${PIKA_BUILD_DIR}
COPY . ${PIKA_BUILD_DIR}
RUN ${PIKA_BUILD_DIR}/build.sh
RUN cmake -B ${PIKA_BUILD_DIR}/build -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DUSE_PIKA_TOOLS=OFF
RUN cmake --build ${PIKA_BUILD_DIR}/build --config ${BUILD_TYPE}
FROM ubuntu:22.04
LABEL maintainer="SvenDwideit@home.org.au, zhangshaomin_1990@126.com"
ARG ENABLE_PROXY=false
RUN if [ "$ENABLE_PROXY" = "true" ] ; \
......@@ -47,7 +52,7 @@ ENV PIKA=/pika \
WORKDIR ${PIKA}
COPY --from=builder ${PIKA_BUILD_DIR}/output/pika ${PIKA}/bin/pika
COPY --from=builder ${PIKA_BUILD_DIR}/build/pika ${PIKA}/bin/pika
COPY --from=builder ${PIKA_BUILD_DIR}/entrypoint.sh /entrypoint.sh
COPY --from=builder ${PIKA_BUILD_DIR}/conf/pika.conf ${PIKA}/conf/pika.conf
......
# Pika
## 简介 [English](https://github.com/Qihoo360/pika/blob/master/README.md)
Pika是一个可持久化的大容量redis存储服务,兼容string、hash、list、zset、set的绝大部分接口([兼容详情](https://github.com/Qihoo360/pika/wiki/pika-支持的redis接口及兼容情况)),解决redis由于存储数据量巨大而导致内存不够用的容量瓶颈,并且可以像redis一样,通过slaveof命令进行主从备份,支持全同步和部分同步,pika还可以用在twemproxy或者codis中来实现静态数据分片(pika已经可以支持codis的动态迁移slot功能,目前合并到master分支,欢迎使用,感谢作者[left2right](https://github.com/left2right)[fancy-rabbit](https://github.com/fancy-rabbit)提交的pr)
Pika是一个可持久化的大容量redis存储服务,兼容string、hash、list、zset、set的绝大部分接口([兼容详情](https://github.com/Qihoo360/pika/wiki/pika-支持的redis接口及兼容情况)),解决redis由于存储数据量巨大而导致内存不够用的容量瓶颈,并且可以像redis一样,通过slaveof命令进行主从备份,支持全同步和部分同步,pika还可以用在twemproxy或者codis中来实现静态数据分片(pika已经可以支持codis的动态迁移slot功能,目前合并到master分支,欢迎使用,感谢作者[left2right](https://github.com/left2right)[fancy-rabbit](https://github.com/fancy-rabbit)提交的pr)
## Pika用户
......@@ -44,7 +44,7 @@ Pika是一个可持久化的大容量redis存储服务,兼容string、hash、l
## 特点
* 容量大,支持百G数据量的存储
* 兼容redis,不用修改代码即可平滑从redis迁移到pika
* 支持主从(slaveof)
* 支持主从模式(slaveof)
* 完善的[运维](https://github.com/Qihoo360/pika/wiki/pika的一些管理命令方式说明)命令
......@@ -52,7 +52,7 @@ Pika是一个可持久化的大容量redis存储服务,兼容string、hash、l
### 二进制包使用
用户可以直接从[releases](https://github.com/Qihoo360/pika/releases)下载最新的二进制版本包直接使用.
用户可以直接从[releases](https://github.com/Qihoo360/pika/releases)下载最新的二进制版本包使用.
### 编译
......@@ -97,7 +97,7 @@ Pika是一个可持久化的大容量redis存储服务,兼容string、hash、l
c. scl enable devtoolset-7 bash
```
第一次编译时,建议使用构建脚本`build.sh` 该脚本会检查本机上,是否有编译所需的软件
第一次编译时,建议使用构建脚本`build.sh`,该脚本会检查本机上是否有编译所需的软件
```
./build.sh
```
......@@ -119,9 +119,9 @@ pika 默认使用`release`模式编译,不能调试,如果需要调试,需
## 清空编译
```
如果需要清空编译内容,视不同情况使用以下两种方法其一:
如果需要清空编译内容,视不同情况使用以下两种方法其一:
```
1. 执行 cd output && make clean来清空pika的编译内容
2. 执行 rm -fr output 重新生成cmkae(一般用于彻底重新编译)
```
......@@ -149,7 +149,7 @@ redis-cli -p 9221 "info"
脚本接受几个可选参数:
- `-t tag`: 指定镜像的Docker标签。默认情况下,标签是 `pikadb/pika:<git tag>`
- `-p platform`: 指定Docker镜像的平台。默认使用当前 docker 的 platform 设置 `all`, `linux/amd64`, `linux/arm`, `linux/arm64`.
- `-p platform`: 指定Docker镜像的平台。选项有 `all`, `linux/amd64`, `linux/arm`, `linux/arm64`,默认使用当前 docker 的 platform 设置。
- `--proxy`: 使用代理下载 package 以加快构建过程,构建时会使用阿里云的镜像源。
- `--help`: 显示帮助信息。
......@@ -161,8 +161,8 @@ redis-cli -p 9221 "info"
### 使用 pika-operator 部署
使用 `pika-operator` 可以简单在 Kubernetes 环境中部署单实例 `pika`
请勿在生产环境中使用此功能
使用 `pika-operator` 可以简单在 Kubernetes 环境中部署单实例 `pika`
__请勿在生产环境中使用此功能__
本地安装:
......
FROM golang:1.8
FROM golang:1.20 AS builder
RUN apt-get update
RUN apt-get install -y autoconf
ARG ENABLE_PROXY=false
ENV GOPATH /gopath
ENV CODIS ${GOPATH}/src/github.com/CodisLabs/codis
ENV PATH ${GOPATH}/bin:${PATH}:${CODIS}/bin
COPY . ${CODIS}
RUN if [ "$ENABLE_PROXY" = "true" ] ; \
then go env -w GOPROXY=https://goproxy.io,direct; \
fi
RUN make -C ${CODIS} distclean
RUN make -C ${CODIS} build-all
WORKDIR /codis
COPY . /codis
RUN go env && make build-all
FROM ubuntu:22.04
ARG ENABLE_PROXY=false
RUN if [ "$ENABLE_PROXY" = "true" ] ; \
then sed -i 's/http:\/\/archive.ubuntu.com/http:\/\/mirrors.aliyun.com/g' /etc/apt/sources.list ; \
sed -i 's/http:\/\/ports.ubuntu.com/http:\/\/mirrors.aliyun.com/g' /etc/apt/sources.list ; \
fi
RUN apt-get update && \
apt-get install -y ca-certificates && \
rm -rf /var/lib/apt/lists/*
COPY --from=builder /codis/bin /codis/bin
COPY --from=builder /codis/config /codis/config
COPY --from=builder /codis/scripts /codis/scripts
COPY --from=builder /codis/admin /codis/admin
WORKDIR /codis
......@@ -410,3 +410,7 @@ default-slot-num : 1024
# blob-num-shard-bits default -1, the number of bits from cache keys to be use as shard id.
# The cache will be sharded into 2^blob-num-shard-bits shards.
# blob-num-shard-bits : -1
# Rsync Rate limiting configuration
throttle-bytes-per-second : 307200000
max-rsync-parallel-num : 4
\ No newline at end of file
......@@ -450,16 +450,16 @@ class HelloCmd : public Cmd {
};
class DiskRecoveryCmd : public Cmd {
public:
DiskRecoveryCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {}
void Do(std::shared_ptr<Slot> slot = nullptr) override;
void Split(std::shared_ptr<Slot> slot, const HintKeys& hint_keys) override{};
void Merge() override{};
Cmd* Clone() override { return new DiskRecoveryCmd(*this); }
private:
void DoInitial() override;
std::map<std::string, uint64_t> background_errors_;
public:
DiskRecoveryCmd(const std::string& name, int arity, uint16_t flag) : Cmd(name, arity, flag) {}
void Do(std::shared_ptr<Slot> slot = nullptr) override;
void Split(std::shared_ptr<Slot> slot, const HintKeys& hint_keys) override{};
void Merge() override{};
Cmd* Clone() override { return new DiskRecoveryCmd(*this); }
private:
void DoInitial() override;
std::map<std::string, uint64_t> background_errors_;
};
#ifdef WITH_COMMAND_DOCS
......
......@@ -328,6 +328,15 @@ class PikaConf : public pstd::BaseConf {
int64_t blob_cache() { return blob_cache_; }
int64_t blob_num_shard_bits() { return blob_num_shard_bits_; }
// Rsync Rate limiting configuration
int throttle_bytes_per_second() {
std::shared_lock l(rwlock_);
return throttle_bytes_per_second_;
}
int max_rsync_parallel_num() {
std::shared_lock l(rwlock_);
return max_rsync_parallel_num_;
}
// Immutable config items, we don't use lock.
bool daemonize() { return daemonize_; }
std::string pidfile() { return pidfile_; }
......@@ -544,6 +553,19 @@ class PikaConf : public pstd::BaseConf {
log_level_ = value;
}
// Rsync Rate limiting configuration
void SetThrottleBytesPerSecond(const int value) {
std::lock_guard l(rwlock_);
TryPushDiffCommands("throttle-bytes-per-second", std::to_string(value));
throttle_bytes_per_second_ = value;
}
void SetMaxRsyncParallelNum(const int value) {
std::lock_guard l(rwlock_);
TryPushDiffCommands("max-rsync-parallel-num", std::to_string(value));
max_rsync_parallel_num_ = value;
}
pstd::Status DBSlotsSanityCheck(const std::string& db_name, const std::set<uint32_t>& slot_ids,
bool is_add);
pstd::Status AddDBSlots(const std::string& db_name, const std::set<uint32_t>& slot_ids);
......@@ -666,6 +688,10 @@ class PikaConf : public pstd::BaseConf {
std::unique_ptr<PikaMeta> local_meta_;
std::shared_mutex rwlock_;
// Rsync Rate limiting configuration
int throttle_bytes_per_second_ = 307200000;
int max_rsync_parallel_num_ = 4;
};
#endif
......@@ -29,6 +29,8 @@
#include "include/throttle.h"
#include "rsync_service.pb.h"
extern std::unique_ptr<PikaConf> g_pika_conf;
const std::string kDumpMetaFileName = "DUMP_META_DATA";
const std::string kUuidPrefix = "snapshot-uuid:";
......@@ -50,6 +52,7 @@ public:
void* ThreadMain() override;
void Copy(const std::set<std::string>& file_set, int index);
bool Init();
int GetParallelNum();
Status Start();
Status Stop();
bool IsRunning() {
......@@ -93,9 +96,9 @@ private:
std::condition_variable cond_;
std::mutex mu_;
std::unique_ptr<Throttle> throttle_;
std::string master_ip_;
int master_port_;
int parallel_num_;
};
class RsyncWriter {
......@@ -155,7 +158,7 @@ public:
pstd::Status s = Status::Timeout("rsync timeout", "timeout");
{
std::unique_lock<std::mutex> lock(mu_);
auto cv_s = cond_.wait_for(lock, std::chrono::seconds(3), [this] {
auto cv_s = cond_.wait_for(lock, std::chrono::seconds(1), [this] {
return resp_ != nullptr;
});
if (!cv_s) {
......
......@@ -8,6 +8,9 @@
#include <atomic>
#include "pstd/include/pstd_mutex.h"
#include "pika_conf.h"
extern std::unique_ptr<PikaConf> g_pika_conf;
namespace rsync {
class Throttle {
......@@ -17,6 +20,10 @@ class Throttle {
~Throttle();
size_t ThrottledByThroughput(size_t bytes);
void ReturnUnusedThroughput(size_t acquired, size_t consumed, size_t elaspe_time_us);
static Throttle& GetInstance() {
static Throttle instance(g_pika_conf->throttle_bytes_per_second(), 10);
return instance;
}
private:
std::atomic<size_t> throttle_throughput_bytes_ = 100 * 1024 * 1024;
......
......@@ -1837,6 +1837,18 @@ void ConfigCmd::ConfigGet(std::string& ret) {
EncodeString(&config_body, g_pika_conf->slave_read_only() ? "yes" : "no");
}
if (pstd::stringmatch(pattern.data(), "throttle-bytes-per-second", 1) != 0) {
elements += 2;
EncodeString(&config_body, "throttle-bytes-per-second");
EncodeNumber(&config_body, g_pika_conf->throttle_bytes_per_second());
}
if (pstd::stringmatch(pattern.data(), "max-rsync-parallel-num", 1) != 0) {
elements += 2;
EncodeString(&config_body, "max-rsync-parallel-num");
EncodeNumber(&config_body, g_pika_conf->max_rsync_parallel_num());
}
std::stringstream resp;
resp << "*" << std::to_string(elements) << "\r\n" << config_body;
ret = resp.str();
......@@ -1879,6 +1891,8 @@ void ConfigCmd::ConfigSet(std::string& ret) {
EncodeString(&ret, "write-buffer-size");
EncodeString(&ret, "max-write-buffer-num");
EncodeString(&ret, "arena-block-size");
EncodeString(&ret, "throttle-bytes-per-second");
EncodeString(&ret, "max-rsync-parallel-num");
return;
}
long int ival;
......@@ -2161,6 +2175,20 @@ void ConfigCmd::ConfigSet(std::string& ret) {
}
g_pika_conf->SetArenaBlockSize(static_cast<int>(ival));
ret = "+OK\r\n";
} else if (set_item == "throttle-bytes-per-second") {
if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) {
ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'throttle-bytes-per-second'\r\n";
return;
}
g_pika_conf->SetThrottleBytesPerSecond(static_cast<int>(ival));
ret = "+OK\r\n";
} else if (set_item == "max-rsync-parallel-num") {
if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival > kMaxRsyncParallelNum) {
ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-rsync-parallel-num'\r\n";
return;
}
g_pika_conf->SetMaxRsyncParallelNum(static_cast<int>(ival));
ret = "+OK\r\n";
} else {
ret = "-ERR Unsupported CONFIG parameter: " + set_item + "\r\n";
}
......
......@@ -601,6 +601,17 @@ int PikaConf::Load() {
GetConfInt64("blob-num-shard-bits", &blob_num_shard_bits_);
return ret;
// throttle-bytes-per-second
GetConfInt("throttle-bytes-per-second", &throttle_bytes_per_second_);
if (throttle_bytes_per_second_ <= 0) {
throttle_bytes_per_second_ = 307200000;
}
GetConfInt("max-rsync-parallel-num", &max_rsync_parallel_num_);
if (max_rsync_parallel_num_ <= 0) {
max_rsync_parallel_num_ = 4;
}
}
void PikaConf::TryPushDiffCommands(const std::string& command, const std::string& value) {
......@@ -641,6 +652,8 @@ int PikaConf::ConfigRewrite() {
SetConfInt64("manually-resume-interval", resume_check_interval_);
SetConfDouble("min-check-resume-ratio", min_check_resume_ratio_);
SetConfInt("slave-priority", slave_priority_);
SetConfInt("throttle-bytes-per-second", throttle_bytes_per_second_);
SetConfInt("max-rsync-parallel-num", max_rsync_parallel_num_);
SetConfInt("sync-window-size", sync_window_size_.load());
SetConfInt("consensus-level", consensus_level_.load());
SetConfInt("replication-num", replication_num_.load());
......
......@@ -333,7 +333,9 @@ void LPopCmd::Do(std::shared_ptr<Slot> slot) {
rocksdb::Status s = slot->db()->LPop(key_, count_, &elements);
if (s.ok()) {
res_.AppendArrayLenUint64(elements.size());
if (elements.size() > 1) {
res_.AppendArrayLenUint64(elements.size());
}
for (const auto& element : elements) {
res_.AppendString(element);
}
......@@ -558,11 +560,13 @@ void RPopCmd::DoInitial() {
}
}
void RPopCmd::Do(std::shared_ptr<Slot> slot) {
std::vector<std::string> elements;
std::vector <std::string> elements;
rocksdb::Status s = slot->db()->RPop(key_, count_, &elements);
if (s.ok()) {
res_.AppendArrayLenUint64(elements.size());
for (const auto& element : elements) {
if (elements.size() > 1) {
res_.AppendArrayLenUint64(elements.size());
}
for (const auto &element: elements) {
res_.AppendString(element);
}
} else if (s.IsNotFound()) {
......
......@@ -206,14 +206,6 @@ bool Slot::TryUpdateMasterOffset() {
<< ", master_ip: " << master_ip << ", master_port: " << master_port << ", filenum: " << filenum
<< ", offset: " << offset << ", term: " << term << ", index: " << index;
// Sanity check
if (master_ip != slave_slot->MasterIp() || master_port != slave_slot->MasterPort()) {
LOG(WARNING) << "Slot: " << slot_name_ << " Error master node ip port: " << master_ip << ":"
<< master_port;
slave_slot->SetReplState(ReplState::kError);
return false;
}
pstd::DeleteFile(info_path);
if (!ChangeDb(dbsync_path_)) {
LOG(WARNING) << "Slot: " << slot_name_ << ", Failed to change db";
......
......@@ -18,18 +18,17 @@ using namespace RsyncService;
extern PikaServer* g_pika_server;
const int kFlushIntervalUs = 10 * 1000 * 1000;
const int kThrottleBytesPerSecond = 300 << 20;
const int kBytesPerRequest = 4 << 20;
const int kThrottleCheckCycle = 10;
namespace rsync {
RsyncClient::RsyncClient(const std::string& dir, const std::string& db_name, const uint32_t slot_id)
: snapshot_uuid_(""), dir_(dir), db_name_(db_name), slot_id_(slot_id),
state_(IDLE), max_retries_(10), master_ip_(""), master_port_(0) {
state_(IDLE), max_retries_(10), master_ip_(""), master_port_(0),
parallel_num_(g_pika_conf->max_rsync_parallel_num()) {
wo_mgr_.reset(new WaitObjectManager());
client_thread_ = std::make_unique<RsyncClientThread>(10 * 1000, 60 * 1000, wo_mgr_.get());
work_threads_.resize(kMaxRsyncParallelNum);
throttle_.reset(new Throttle(kThrottleBytesPerSecond, kThrottleCheckCycle));
client_thread_ = std::make_unique<RsyncClientThread>(3000, 60, wo_mgr_.get());
work_threads_.resize(GetParallelNum());
finished_work_cnt_.store(0);
}
......@@ -83,13 +82,13 @@ void* RsyncClient::ThreadMain() {
Status s = Status::OK();
LOG(INFO) << "RsyncClient begin to copy remote files";
std::vector<std::set<std::string> > file_vec(kMaxRsyncParallelNum);
std::vector<std::set<std::string> > file_vec(GetParallelNum());
int index = 0;
for (const auto& file : file_set_) {
file_vec[index++ % kMaxRsyncParallelNum].insert(file);
file_vec[index++ % GetParallelNum()].insert(file);
}
for (int i = 0; i < kMaxRsyncParallelNum; i++) {
for (int i = 0; i < GetParallelNum(); i++) {
work_threads_[i] = std::move(std::thread(&RsyncClient::Copy, this, file_vec[i], i));
}
......@@ -126,12 +125,12 @@ void* RsyncClient::ThreadMain() {
outfile.flush();
meta_rep.clear();
if (finished_work_cnt_.load() == kMaxRsyncParallelNum) {
if (finished_work_cnt_.load() == GetParallelNum()) {
break;
}
}
for (int i = 0; i < kMaxRsyncParallelNum; i++) {
for (int i = 0; i < GetParallelNum(); i++) {
work_threads_[i].join();
}
finished_work_cnt_.store(0);
......@@ -157,8 +156,11 @@ Status RsyncClient::CopyRemoteFile(const std::string& filename, int index) {
};
while (retries < max_retries_) {
if (state_.load() != RUNNING) {
break;
}
size_t copy_file_begin_time = pstd::NowMicros();
size_t count = throttle_->ThrottledByThroughput(kBytesPerRequest);
size_t count = Throttle::GetInstance().ThrottledByThroughput(kBytesPerRequest);
if (count == 0) {
std::this_thread::sleep_for(std::chrono::milliseconds(1000 / kThrottleCheckCycle));
continue;
......@@ -197,7 +199,7 @@ Status RsyncClient::CopyRemoteFile(const std::string& filename, int index) {
size_t ret_count = resp->file_resp().count();
size_t elaspe_time_us = pstd::NowMicros() - copy_file_begin_time;
throttle_->ReturnUnusedThroughput(count, ret_count, elaspe_time_us);
Throttle::GetInstance().ReturnUnusedThroughput(count, ret_count, elaspe_time_us);
if (resp->code() != RsyncService::kOk) {
//TODO: handle different error
......@@ -489,5 +491,9 @@ std::string RsyncClient::GetLocalMetaFilePath() {
return db_path + kDumpMetaFileName;
}
int RsyncClient::GetParallelNum() {
return parallel_num_;
}
} // end namespace rsync
# Install pika cluster by kubeblocks
Fellow the kubeblock docs [Install kbcli](https://kubeblocks.io/docs/preview/user_docs/installation/install-kbcli)
## install kubeblocks
Fellow the kubeblock docs [kubeblocks](https://kubeblocks.io/docs/preview/user_docs/installation/install-kubeblocks)
## install pika cluster
### install pika CD and pika cluster
First, use helm install pika cluster definition and install pika cluster.
```bash
cd ./tools/kubeblocks-helm/
helm install pika ./pika
helm install pika-cluster ./pika-cluster
```
Wait for pika cluster until the status to be `Running`.
```bash
kubectl get cluster --watch
````
### Add Pika instance to codis
Then connect codis front end.
```bash
kubectl port-forward svc/pika-cluster-codis-fe 8080
```
Open browser and visit `http://localhost:8080`
Then add pika instance to codis
1. Input group id `1` and click `Add Group` button
2. Input the following pika instance url and group id, then click `Add Server` button
- pika-cluster-pika-0.pika-cluster-pika-headless:9221
- pika-cluster-pika-1.pika-cluster-pika-headless:9221
3. Click `Rebalance all slots` button
### connect to pika cluster
```bash
kubectl port-forward svc/pika-cluster-codis-proxy 19000
# start new terminal
redis-cli -p 19000 info
```
## Scale pika cluster
Use kbcli horizontal scale pika instance, you will get 2 new pika instances.
```bash
kbcli cluster hscale pika-cluster --replicas 4 --components pika
```
Add new pika instances to codis
1. Input group id `2` and click `Add Group` button
2. Input the following pika instance url and group id, then click `Add Server` button
- pika-cluster-pika-2.pika-cluster-pika-headless:9221
- pika-cluster-pika-3.pika-cluster-pika-headless:9221
3. Click `Rebalance all slots` button
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
apiVersion: v2
name: pika-cluster
description: A Pika Codis Cluster Helm chart for KubeBlocks.
type: application
version: 0.6.0-alpha.21
appVersion: "3.5.0-alpha"
home: https://github.com/OpenAtomFoundation/pika
keywords:
- pika
- redis
- database
- nosql
- replication
- codis
maintainers:
- name: pika
url: https://github.com/OpenAtomFoundation/pika/tools/kubeblocks_helm
1. Get the application URL by running these commands:
{{/*
Expand the name of the chart.
*/}}
{{- define "pika-cluster.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "pika-cluster.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "pika-cluster.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "pika-cluster.labels" -}}
helm.sh/chart: {{ include "pika-cluster.chart" . }}
{{ include "pika-cluster.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "pika-cluster.selectorLabels" -}}
app.kubernetes.io/name: {{ include "pika-cluster.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "clustername" -}}
{{ include "pika-cluster.fullname" .}}
{{- end}}
{{/*
Create the name of the service account to use
*/}}
{{- define "pika-cluster.serviceAccountName" -}}
{{- default (printf "kb-%s" (include "clustername" .)) .Values.serviceAccount.name }}
{{- end }}
apiVersion: apps.kubeblocks.io/v1alpha1
kind: Cluster
metadata:
name: {{ include "clustername" . }}
labels: {{ include "pika-cluster.labels" . | nindent 4 }}
spec:
clusterDefinitionRef: pika # ref clusterDefinition.name
clusterVersionRef: pika-{{ default .Chart.AppVersion .Values.clusterVersionOverride }} # ref clusterVersion.name
terminationPolicy: {{ .Values.terminationPolicy }}
affinity:
{{- with .Values.topologyKeys }}
topologyKeys: {{ . | toYaml | nindent 6 }}
{{- end }}
{{- with $.Values.tolerations }}
tolerations: {{ . | toYaml | nindent 4 }}
{{- end }}
componentSpecs:
- name: pika # user-defined
componentDefRef: pika # ref clusterDefinition componentDefs.name
monitor: {{ .Values.monitor.enabled | default false }}
enabledLogs: {{ .Values.enabledLogs | toJson | indent 4 }}
replicas: {{ .Values.replicaCount | default 2 }}
serviceAccountName: {{ include "pika-cluster.serviceAccountName" . }}
switchPolicy:
type: {{ .Values.switchPolicy.type}}
{{- with .Values.resources }}
resources:
limits:
cpu: {{ .limits.cpu | quote }}
memory: {{ .limits.memory | quote }}
requests:
cpu: {{ .requests.cpu | quote }}
memory: {{ .requests.memory | quote }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- name: data # ref clusterdefinition components.containers.volumeMounts.name
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.persistence.data.size }}
{{- end }}
- name: etcd # user-defined
componentDefRef: etcd # ref clusterdefinition components.name
monitor: {{ .Values.monitor.enabled | default false }}
replicas: {{ .Values.etcdReplicaCount| default 3 }}
{{- with .Values.resources }}
resources:
{{- with .limits }}
limits:
cpu: {{ .cpu | quote }}
memory: {{ .memory | quote }}
{{- end }}
{{- with .requests }}
requests:
cpu: {{ .cpu | quote }}
memory: {{ .memory | quote }}
{{- end }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- name: data # ref clusterdefinition components.containers.volumeMounts.name
spec:
storageClassName: {{ .Values.persistence.data.storageClassName }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.persistence.data.size }}
{{- end }}
- name: codis-proxy
componentDefRef: codis-proxy # ref clusterDefinition componentDefs.name
replicas: {{ .Values.codisProxyReplicaCount | default 2 }}
{{- with .Values.resources }}
resources:
limits:
cpu: {{ .limits.cpu | quote }}
memory: {{ .limits.memory | quote }}
requests:
cpu: {{ .requests.cpu | quote }}
memory: {{ .requests.memory | quote }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- name: data # ref clusterdefinition components.containers.volumeMounts.name
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.persistence.data.size }}
{{- end }}
- name: codis-fe
componentDefRef: codis-fe # ref clusterDefinition componentDefs.name
replicas: {{ .Values.codisFeReplicaCount | default 1 }}
{{- with .Values.resources }}
resources:
limits:
cpu: {{ .limits.cpu | quote }}
memory: {{ .limits.memory | quote }}
requests:
cpu: {{ .requests.cpu | quote }}
memory: {{ .requests.memory | quote }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- name: data # ref clusterdefinition components.containers.volumeMounts.name
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.persistence.data.size }}
{{- end }}
- name: codis-dashboard
componentDefRef: codis-dashboard # ref clusterDefinition componentDefs.name
replicas: 1
{{- with .Values.resources }}
resources:
limits:
cpu: {{ .limits.cpu | quote }}
memory: {{ .limits.memory | quote }}
requests:
cpu: {{ .requests.cpu | quote }}
memory: {{ .requests.memory | quote }}
{{- end }}
{{- if .Values.persistence.enabled }}
volumeClaimTemplates:
- name: data # ref clusterdefinition components.containers.volumeMounts.name
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.persistence.data.size }}
{{- end }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kb-{{ include "clustername" . }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "pika-cluster.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kb-{{ include "clustername" . }}
labels:
{{ include "pika-cluster.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kb-{{ include "clustername" . }}
subjects:
- kind: ServiceAccount
name: {{ include "pika-cluster.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "pika-cluster.serviceAccountName" . }}
labels:
{{ include "pika-cluster.labels" . | nindent 4 }}
# Default values for redis-cluster.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
nameOverride: ""
fullnameOverride: ""
replicaCount: 2
etcdReplicaCount: 3
codisProxyReplicaCount: 2
codisFeReplicaCount: 1
codisDashboardReplicaCount: 1
terminationPolicy: Delete
clusterVersionOverride: ""
monitor:
enabled: false
switchPolicy:
type: Noop
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 500m
memory: 3Gi
requests:
cpu: 500m
memory: 1Gi
persistence:
enabled: true
data:
storageClassName:
size: 1Gi
topologyKeys:
- kubernetes.io/hostname
## @param tolerations
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: [ ]
#enabledLogs:
# - running
# The RBAC permission used by cluster component pod, now include event.create
serviceAccount:
name: ""
etcdAddrs: "etcd-0.etcd-headless.default.svc.cluster.local:2379,etcd-1.etcd-headless.default.svc.cluster.local:2379,etcd-2.etcd-headless.default.svc.cluster.local:2379"
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
apiVersion: v2
name: pika
description: A Pika Codis cluster definition Helm chart for Kubernetes
type: application
version: 0.6.0-alpha.21
appVersion: "3.5.0-alpha"
home: https://github.com/OpenAtomFoundation/pika
keywords:
- pika
- redis
- database
- nosql
- replication
- codis
maintainers:
- name: pika
url: https://github.com/OpenAtomFoundation/pika/tools/kubeblocks_helm
##################################################
# #
# Codis-Dashboard #
# #
##################################################
# Set Coordinator, only accept "zookeeper" & "etcd" & "filesystem".
# for zookeeper/etcd, coorinator_auth accept "user:password"
# Quick Start
#coordinator_name = "filesystem"
#coordinator_addr = "/tmp/codis"
coordinator_name = "etcd"
coordinator_addr = "pika-cluster-etcd-0.pika-cluster-etcd-headless:2379,pika-cluster-etcd-1.pika-cluster-etcd-headless:2379,pika-cluster-etcd-1.pika-cluster-etcd-headless:2379"
#coordinator_auth = ""
# Set Codis Product Name/Auth.
product_name = "codis-demo"
product_auth = ""
# Set bind address for admin(rpc), tcp only.
admin_addr = "0.0.0.0:18080"
# Set slot num
max_slot_num = 1024
# Set arguments for data migration (only accept 'sync' & 'semi-async').
migration_method = "semi-async"
migration_parallel_slots = 100
migration_async_maxbulks = 200
migration_async_maxbytes = "32mb"
migration_async_numkeys = 500
migration_timeout = "30s"
# Set configs for redis sentinel.
sentinel_check_server_state_interval = "5s"
sentinel_check_master_failover_interval = "1s"
sentinel_master_dead_check_times = 5
sentinel_client_timeout = "10s"
sentinel_quorum = 2
sentinel_parallel_syncs = 1
sentinel_down_after = "30s"
sentinel_failover_timeout = "5m"
sentinel_notification_script = ""
sentinel_client_reconfig_script = ""
##################################################
# #
# Codis-Proxy #
# #
##################################################
# Set Codis Product Name/Auth.
product_name = "codis-demo"
product_auth = ""
# Set auth for client session
# 1. product_auth is used for auth validation among codis-dashboard,
# codis-proxy and codis-server.
# 2. session_auth is different from product_auth, it requires clients
# to issue AUTH <PASSWORD> before processing any other commands.
session_auth = ""
# Set bind address for admin(rpc), tcp only.
admin_addr = "0.0.0.0:11080"
# Set bind address for proxy, proto_type can be "tcp", "tcp4", "tcp6", "unix" or "unixpacket".
proto_type = "tcp4"
proxy_addr = "0.0.0.0:19000"
# Set jodis address & session timeout
# 1. jodis_name is short for jodis_coordinator_name, only accept "zookeeper" & "etcd".
# 2. jodis_addr is short for jodis_coordinator_addr
# 3. jodis_auth is short for jodis_coordinator_auth, for zookeeper/etcd, "user:password" is accepted.
# 4. proxy will be registered as node:
# if jodis_compatible = true (not suggested):
# /zk/codis/db_{PRODUCT_NAME}/proxy-{HASHID} (compatible with Codis2.0)
# or else
# /jodis/{PRODUCT_NAME}/proxy-{HASHID}
jodis_name = ""
jodis_addr = ""
jodis_auth = ""
jodis_timeout = "20s"
jodis_compatible = false
# Set datacenter of proxy.
proxy_datacenter = ""
# Set max number of alive sessions.
proxy_max_clients = 1000
# Set max offheap memory size. (0 to disable)
proxy_max_offheap_size = "1024mb"
# Set heap placeholder to reduce GC frequency.
proxy_heap_placeholder = "256mb"
# Proxy will ping backend redis (and clear 'MASTERDOWN' state) in a predefined interval. (0 to disable)
backend_ping_period = "5s"
# Set backend recv buffer size & timeout.
backend_recv_bufsize = "128kb"
backend_recv_timeout = "30s"
# Set backend send buffer & timeout.
backend_send_bufsize = "128kb"
backend_send_timeout = "30s"
# Set backend pipeline buffer size.
backend_max_pipeline = 20480
# Set backend never read replica groups, default is false
backend_primary_only = false
# Set backend parallel connections per server
backend_primary_parallel = 1
backend_replica_parallel = 1
# Set slot num
max_slot_num = 1024
# Set backend tcp keepalive period. (0 to disable)
backend_keepalive_period = "75s"
# Set number of databases of backend.
backend_number_databases = 1
# If there is no request from client for a long time, the connection will be closed. (0 to disable)
# Set session recv buffer size & timeout.
session_recv_bufsize = "128kb"
session_recv_timeout = "30m"
# Set session send buffer size & timeout.
session_send_bufsize = "64kb"
session_send_timeout = "30s"
# Make sure this is higher than the max number of requests for each pipeline request, or your client may be blocked.
# Set session pipeline buffer size.
session_max_pipeline = 10000
# Set session tcp keepalive period. (0 to disable)
session_keepalive_period = "75s"
# Set session to be sensitive to failures. Default is false, instead of closing socket, proxy will send an error response to client.
session_break_on_failure = false
# Set metrics server (such as http://localhost:28000), proxy will report json formatted metrics to specified server in a predefined period.
metrics_report_server = ""
metrics_report_period = "1s"
# Set influxdb server (such as http://localhost:8086), proxy will report metrics to influxdb.
metrics_report_influxdb_server = ""
metrics_report_influxdb_period = "1s"
metrics_report_influxdb_username = ""
metrics_report_influxdb_password = ""
metrics_report_influxdb_database = ""
# Set statsd server (such as localhost:8125), proxy will report metrics to statsd.
metrics_report_statsd_server = ""
metrics_report_statsd_period = "1s"
metrics_report_statsd_prefix = ""
###########################
# Pika configuration file #
###########################
# Pika port, the default value is 9221.
# [NOTICE] Port Magic offsets of port+1000 / port+2000 are used by Pika at present.
# Port 10221 is used for Rsync, and port 11221 is used for Replication, while the listening port is 9221.
port : 9221
# Random value identifying the Pika server, its string length must be 40.
# If not set, Pika will generate a random string with a length of 40 random characters.
# run-id:
# The number of threads for running Pika.
# It's not recommended to set this value exceeds
# the number of CPU cores on the deployment server.
thread-num : 1
# Size of the thread pool, The threads within this pool
# are dedicated to handling user requests.
thread-pool-size : 12
# The number of sync-thread for data replication from master, those are the threads work on slave nodes
# and are used to execute commands sent from master node when replicating.
sync-thread-num : 6
# Directory to store log files of Pika, which contains multiple types of logs,
# Including: INFO, WARNING, ERROR log, as well as binglog(write2fine) file which
# is used for replication.
log-path : ./log/
# Directory to store the data of Pika.
db-path : ./db/
# The size of a single RocksDB memtable at the Pika's bottom layer(Pika use RocksDB to store persist data).
# [Tip] Big write-buffer-size can improve writing performance,
# but this will generate heavier IO load when flushing from buffer to disk,
# you should configure it based on you usage scenario.
# Supported Units [K|M|G], write-buffer-size default unit is in [bytes].
write-buffer-size : 256M
# The size of one block in arena memory allocation.
# If <= 0, a proper value is automatically calculated.
# (usually 1/8 of writer-buffer-size, rounded up to a multiple of 4KB)
# Supported Units [K|M|G], arena-block-size default unit is in [bytes].
arena-block-size :
# Timeout of Pika's connection, counting down starts When there are no requests
# on a connection (it enters sleep state), when the countdown reaches 0, the connection
# will be closed by Pika.
# [Tip] The issue of running out of Pika's connections may be avoided if this value
# is configured properly.
# The Unit of timeout is in [seconds] and its default value is 60(s).
timeout : 60
# The [password of administrator], which is empty by default.
# [NOTICE] If this admin password is the same as user password (including both being empty),
# the value of userpass will be ignored and all users are considered as administrators,
# in this scenario, users are not subject to the restrictions imposed by the userblacklist.
# PS: "user password" refers to value of the parameter below: userpass.
requirepass :
# Password for replication verify, used for authentication when a slave
# connects to a master to request replication.
# [NOTICE] The value of this parameter must match the "requirepass" setting on the master.
masterauth :
# The [password of user], which is empty by default.
# [NOTICE] If this user password is the same as admin password (including both being empty),
# the value of this parameter will be ignored and all users are considered as administrators,
# in this scenario, users are not subject to the restrictions imposed by the userblacklist.
# PS: "admin password" refers to value of the parameter above: requirepass.
userpass :
# The blacklist of commands for users that logged in by userpass,
# the commands that added to this list will not be available for users except for administrator.
# [Advice] It's recommended to add high-risk commands to this list.
# [Format] Commands should be separated by ",". For example: FLUSHALL, SHUTDOWN, KEYS, CONFIG
# By default, this list is empty.
userblacklist :
# Running Mode of Pika, The current version only supports running in "classic mode".
# If set to 'classic', Pika will create multiple DBs whose number is the value of configure item "databases".
instance-mode : classic
# The number of databases when Pika runs in classic mode.
# The default database id is DB 0. You can select a different one on
# a per-connection by using SELECT. The db id range is [0, 'databases' value -1].
# The value range of this parameter is [1, 8].
databases : 1
# The number of followers of a master. Only [0, 1, 2, 3, 4] is valid at present.
# By default, this num is set to 0, which means this feature is [not enabled]
# and the Pika runs in standalone mode.
replication-num : 0
# consensus level defines the num of confirms(ACKs) the leader node needs to receive from
# follower nodes before returning the result to the client that sent the request.
# The [value range] of this parameter is: [0, ...replicaiton-num].
# The default value of consensus-level is 0, which means this feature is not enabled.
consensus-level : 0
# The Prefix of dump file's name.
# All the files that generated by command "bgsave" will be name with this prefix.
dump-prefix :
# daemonize [yes | no].
#daemonize : yes
# The directory to stored dump files that generated by command "bgsave".
dump-path : ./dump/
# TTL of dump files that generated by command "bgsave".
# Any dump files which exceed this TTL will be deleted.
# Unit of dump-expire is in [days] and the default value is 0(day),
# which means dump files never expire.
dump-expire : 0
# Pid file Path of Pika.
pidfile : ./pika.pid
# The Maximum number of Pika's Connection.
maxclients : 20000
# The size of sst file in RocksDB(Pika is based on RocksDB).
# sst files are hierarchical, the smaller the sst file size, the higher the performance and the lower the merge cost,
# the price is that the number of sst files could be huge. On the contrary, the bigger the sst file size, the lower
# the performance and the higher the merge cost, while the number of files is fewer.
# Supported Units [K|M|G], target-file-size-base default unit is in [bytes] and the default value is 20M.
target-file-size-base : 20M
# Expire-time of binlog(write2file) files that stored within log-path.
# Any binlog(write2file) files that exceed this expire time will be cleaned up.
# The unit of expire-logs-days is in [days] and the default value is 7(days).
# The [Minimum value] of this parameter is 1(day).
expire-logs-days : 7
# The maximum number of binlog(write2file) files.
# Once the total number of binlog files exceed this value,
# automatic cleaning will start to ensure the maximum number
# of binlog files is equal to expire-logs-nums.
# The [Minimum value] of this parameter is 10.
expire-logs-nums : 10
# The number of guaranteed connections for root user.
# This parameter guarantees that there are 2(By default) connections available
# for root user to log in Pika from 127.0.0.1, even if the maximum connection limit is reached.
# PS: The maximum connection refers to the parameter above: maxclients.
# The default value of root-connection-num is 2.
root-connection-num : 2
# Slowlog-write-errorlog
slowlog-write-errorlog : no
# The time threshold for slow log recording.
# Any command whose execution time exceeds this threshold will be recorded in pika-ERROR.log,
# which is stored in log-path.
# The unit of slowlog-log-slower-than is in [microseconds(μs)] and the default value is 10000 μs / 10 ms.
slowlog-log-slower-than : 10000
# Slowlog-max-len
slowlog-max-len : 128
# Pika db sync path
db-sync-path : ./dbsync/
# The maximum Transmission speed during full synchronization.
# The exhaustion of network can be prevented by setting this parameter properly.
# The value range of this parameter is [1,1024] with unit in [MB/s].
# [NOTICE] If this parameter is set to an invalid value(smaller than 0 or bigger than 1024),
# it will be automatically reset to 1024.
# The default value of db-sync-speed is -1 (1024MB/s).
db-sync-speed : -1
# The priority of slave node when electing new master node.
# The slave node with [lower] value of slave-priority will have [higher priority] to be elected as the new master node.
# This parameter is only used in conjunction with sentinel and serves no other purpose.
# The default value of slave-priority is 100.
slave-priority : 100
# Specify network interface that work with Pika.
#network-interface : eth1
# The IP and port of the master node are specified by this parameter for
# replication between master and slaves.
# [Format] is "ip:port" , for example: "192.168.1.2:6666" indicates that
# the slave instances that configured with this value will automatically send
# SLAVEOF command to port 6666 of 192.168.1.2 after startup.
# This parameter should be configured on slave nodes.
#slaveof : master-ip:master-port
# Daily/Weekly Automatic full compaction task is configured by compact-cron.
#
# [Format-daily]: start time(hour)-end time(hour)/disk-free-space-ratio,
# example: with value of "02-04/60", Pika will perform full compaction task between 2:00-4:00 AM everyday if
# the disk-free-size / disk-size > 60%.
#
# [Format-weekly]: week/start time(hour)-end time(hour)/disk-free-space-ratio,
# example: with value of "3/02-04/60", Pika will perform full compaction task between 2:00-4:00 AM every Wednesday if
# the disk-free-size / disk-size > 60%.
#
# [Tip] Automatic full compaction is suitable for scenarios with multiple data structures
# and lots of items are expired or deleted, or key names are frequently reused.
#
# [NOTICE]: If compact-interval is set, compact-cron will be masked and disabled.
#
#compact-cron : 3/02-04/60
# Automatic full synchronization task between a time interval is configured by compact-interval.
# [Format]: time interval(hour)/disk-free-space-ratio, example: "6/60", Pika will perform full compaction every 6 hours,
# if the disk-free-size / disk-size > 60%.
# [NOTICE]: compact-interval is prior than compact-cron.
#compact-interval :
# This window-size determines the amount of data that can be transmitted in a single synchronization process.
# [Tip] In the scenario of high network latency. Increasing this size can improve synchronization efficiency.
# Its default value is 9000. the [maximum] value is 90000.
sync-window-size : 9000
# Maximum buffer size of a client connection.
# Only three values are valid here: [67108864(64MB) | 268435456(256MB) | 536870912(512MB)].
# [NOTICE] Master and slaves must have exactly the same value for the max-conn-rbuf-size.
# Supported Units [K|M|G]. Its default unit is in [bytes] and its default value is 268435456(256MB).
max-conn-rbuf-size : 268435456
#######################################################################E#######
#! Critical Settings !#
#######################################################################E#######
# write_binlog [yes | no]
write-binlog : yes
# The size of binlog file, which can not be modified once Pika instance started.
# [NOTICE] Master and slaves must have exactly the same value for the binlog-file-size.
# The [value range] of binlog-file-size is [1K, 2G].
# Supported Units [K|M|G], binlog-file-size default unit is in [bytes] and the default value is 100M.
binlog-file-size : 104857600
# Automatically triggers a small compaction according to statistics
# Use the cache to store up to 'max-cache-statistic-keys' keys
# If 'max-cache-statistic-keys' set to '0', that means turn off the statistics function
# and this automatic small compaction feature is disabled.
max-cache-statistic-keys : 0
# When 'delete' or 'overwrite' a specific multi-data structure key 'small-compaction-threshold' times,
# a small compact is triggered automatically if the small compaction feature is enabled.
# small-compaction-threshold default value is 5000 and the value range is [1, 100000].
small-compaction-threshold : 5000
# The maximum total size of all live memtables of the RocksDB instance that owned by Pika.
# Flushing from memtable to disk will be triggered if the actual memory usage of RocksDB
# exceeds max-write-buffer-size when next write operation is issued.
# [RocksDB-Basic-Tuning](https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning)
# Supported Units [K|M|G], max-write-buffer-size default unit is in [bytes].
max-write-buffer-size : 10737418240
# The maximum number of write buffers(memtables) that are built up in memory for one ColumnFamily in DB.
# The default and the minimum number is 2. It means that Pika(RocksDB) will write to a write buffer
# when it flushes the data of another write buffer to storage.
# If max-write-buffer-num > 3, writing will be slowed down.
max-write-buffer-num : 2
# The maximum size of the response package to client to prevent memory
# exhaustion caused by commands like 'keys *' and 'Scan' which can generate huge response.
# Supported Units [K|M|G]. The default unit is in [bytes].
max-client-response-size : 1073741824
# The compression algorithm. You can not change it when Pika started.
# Supported types: [snappy, zlib, lz4, zstd]. If you do not wanna compress the SST file, please set its value as none.
# [NOTICE] The Pika official binary release just linking the snappy library statically, which means that
# you should compile the Pika from the source code and then link it with other compression algorithm library statically by yourself.
compression : snappy
# if the vector size is smaller than the level number, the undefined lower level uses the
# last option in the configurable array, for example, for 3 level
# LSM tree the following settings are the same:
# configurable array: [none:snappy]
# LSM settings: [none:snappy:snappy]
# When this configurable is enabled, compression is ignored,
# default l0 l1 noCompression, l2 and more use `compression` option
# https://github.com/facebook/rocksdb/wiki/Compression
#compression_per_level : [none:none:snappy:lz4:lz4]
# The number of background flushing threads.
# max-background-flushes default value is 1 and the value range is [1, 4].
max-background-flushes : 1
# The number of background compacting threads.
# max-background-compactions default value is 2 and the value range is [1, 8].
max-background-compactions : 2
# maximum value of RocksDB cached open file descriptors
max-cache-files : 5000
# The ratio between the total size of RocksDB level-(L+1) files and the total size of RocksDB level-L files for all L.
# Its default value is 10(x). You can also change it to 5(x).
max-bytes-for-level-multiplier : 10
# slotmigrate is mainly used to migrate slots, usually we will set it to no.
# When you migrate slots, you need to set it to yes, and reload slotskeys before.
# slotmigrate [yes | no]
slotmigrate : no
# BlockBasedTable block_size, default 4k
# block-size: 4096
# block LRU cache, default 8M, 0 to disable
# Supported Units [K|M|G], default unit [bytes]
# block-cache: 8388608
# num-shard-bits default -1, the number of bits from cache keys to be use as shard id.
# The cache will be sharded into 2^num_shard_bits shards.
# https://github.com/EighteenZi/rocksdb_wiki/blob/master/Block-Cache.md#lru-cache
# num-shard-bits: -1
# whether the block cache is shared among the RocksDB instances, default is per CF
# share-block-cache: no
# The slot number of pika when used with codis.
default-slot-num : 1024
# whether or not index and filter blocks is stored in block cache
# cache-index-and-filter-blocks: no
# pin_l0_filter_and_index_blocks_in_cache [yes | no]
# When `cache-index-and-filter-blocks` is enabled, `pin_l0_filter_and_index_blocks_in_cache` is suggested to be enabled
# pin_l0_filter_and_index_blocks_in_cache : no
# when set to yes, bloomfilter of the last level will not be built
# optimize-filters-for-hits: no
# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size
# level-compaction-dynamic-level-bytes: no
################################## RocksDB Rate Limiter #######################
# rocksdb rate limiter
# https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html
# https://github.com/EighteenZi/rocksdb_wiki/blob/master/Rate-Limiter.md
#######################################################################E#######
# rate limiter bandwidth, default 200MB
#rate-limiter-bandwidth : 209715200
#rate-limiter-refill-period-us : 100000
#
#rate-limiter-fairness: 10
# rate limiter auto tune https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html. the default value is false.
#rate-limiter-auto-tuned : true
################################## RocksDB Blob Configure #####################
# rocksdb blob configure
# https://rocksdb.org/blog/2021/05/26/integrated-blob-db.html
# wiki https://github.com/facebook/rocksdb/wiki/BlobDB
#######################################################################E#######
# enable rocksdb blob, default no
# enable-blob-files : yes
# values at or above this threshold will be written to blob files during flush or compaction.
# Supported Units [K|M|G], default unit is in [bytes].
# min-blob-size : 4K
# the size limit for blob files
# Supported Units [K|M|G], default unit is in [bytes].
# blob-file-size : 256M
# the compression type to use for blob files. All blobs in the same file are compressed using the same algorithm.
# Supported types: [snappy, zlib, lz4, zstd]. If you do not wanna compress the SST file, please set its value as none.
# [NOTICE] The Pika official binary release just link the snappy library statically, which means that
# you should compile the Pika from the source code and then link it with other compression algorithm library statically by yourself.
# blob-compression-type : lz4
# set this to open to make BlobDB actively relocate valid blobs from the oldest blob files as they are encountered during compaction.
# The value option is [yes | no]
# enable-blob-garbage-collection : no
# the cutoff that the GC logic uses to determine which blob files should be considered “old“.
# This parameter can be tuned to adjust the trade-off between write amplification and space amplification.
# blob-garbage-collection-age-cutoff : 0.25
# if the ratio of garbage in the oldest blob files exceeds this threshold,
# targeted compactions are scheduled in order to force garbage collecting the blob files in question
# blob_garbage_collection_force_threshold : 1.0
# the Cache object to use for blobs, default not open
# blob-cache : 0
# blob-num-shard-bits default -1, the number of bits from cache keys to be use as shard id.
# The cache will be sharded into 2^blob-num-shard-bits shards.
# blob-num-shard-bits : -1
{{/*
Expand the name of the chart.
*/}}
{{- define "pika.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "pika.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "pika.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "pika.labels" -}}
helm.sh/chart: {{ include "pika.chart" . }}
{{ include "pika.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "pika.selectorLabels" -}}
app.kubernetes.io/name: {{ include "pika.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Define image
*/}}
{{- define "pika.image" -}}
{{ .Values.image.pika.registry | default "docker.io" }}/{{ .Values.image.pika.repository }}:{{ .Values.image.pika.tag }}
{{- end }}
{{- define "pika.imagePullPolicy" -}}
{{ .Values.image.pika.pullPolicy | default "IfNotPresent" }}
{{- end }}
{{/*
Define codis image
*/}}
{{- define "codis.image" -}}
{{ .Values.image.codis.registry | default "docker.io" }}/{{ .Values.image.codis.repository }}:{{ .Values.image.codis.tag }}
{{- end }}
{{- define "codis.imagePullPolicy" -}}
{{ .Values.image.codis.pullPolicy | default "IfNotPresent" }}
{{- end }}
{{/*
Define etcd image
*/}}
{{- define "etcd.image" -}}
{{ .Values.image.etcd.registry | default "docker.io" }}/{{ .Values.image.etcd.repository }}:{{ .Values.image.etcd.tag }}
{{- end }}
{{- define "etcd.imagePullPolicy" -}}
{{ .Values.image.etcd.pullPolicy | default "IfNotPresent" }}
{{- end }}
{{/*
Define ETCD env
*/}}
{{- define "etcd.name" -}}
$(KB_CLUSTER_NAME)-etcd
{{- end}}
{{- define "etcd.clusterDomain" -}}
{{ include "etcd.name" .}}-headless.$(KB_NAMESPACE).svc{{.Values.clusterDomain}}
{{- end}}
{{- define "etcd.clusterDomainNoHeadless" -}}
{{ include "etcd.name" .}}-headless.$(KB_NAMESPACE).svc{{.Values.clusterDomain}}
{{- end}}
{{- define "etcd.clusterDomainPort" -}}
{{ include "etcd.clusterDomain" .}}:2380
{{- end}}
{{- define "etcd.initialCluster" -}}
{{- include "etcd.name" .}}-0=http://{{ include "etcd.name" .}}-0.{{ include "etcd.clusterDomainPort" .}},
{{- include "etcd.name" .}}-1=http://{{ include "etcd.name" .}}-1.{{ include "etcd.clusterDomainPort" .}},
{{- include "etcd.name" .}}-2=http://{{ include "etcd.name" .}}-2.{{ include "etcd.clusterDomainPort" .}}
{{- end}}
{{- define "etcd.advertiseClientURLs" -}}
http://$(KB_POD_FQDN):2379,http://{{ include "etcd.clusterDomain" .}}:2379,http://{{ include "etcd.clusterDomainNoHeadless" .}}:2379
{{- end}}
apiVersion: apps.kubeblocks.io/v1alpha1
kind: ClusterDefinition
metadata:
name: pika
labels:
{{- include "pika.labels" . | nindent 4 }}
spec:
type: pika
connectionCredential:
username: default
password: "$(RANDOM_PASSWD)"
endpoint: "$(SVC_FQDN):$(SVC_PORT_pika)"
host: "$(SVC_FQDN)"
port: "$(SVC_PORT_pika)"
componentDefs:
- name: pika
workloadType: Replication
characterType: pika
service:
ports:
- name: pika
port: 9221
targetPort: pika
configSpecs:
- name: pika-config
templateRef: pika-conf-template
namespace: {{ .Release.Namespace }}
volumeName: config
volumeTypes:
- name: data
type: data
podSpec:
containers:
- name: pika
ports:
- name: pika
containerPort: 9221
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /etc/pika
command:
- "/pika/bin/pika"
args:
- "-c"
- "/etc/pika/pika.conf"
- name: etcd
workloadType: Stateful
characterType: etcd
service:
ports:
- name: client
port: 2379
targetPort: client
- name: peer
port: 2380
targetPort: peer
volumeTypes:
- name: data
type: data
configSpecs:
podSpec:
containers:
- name: etcd
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /bitnami/etcd
name: data
ports:
- name: client
containerPort: 2379
- name: peer
containerPort: 2380
env:
- name: BITNAMI_DEBUG
value: "true"
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: MY_STS_NAME
value: $(KB_CLUSTER_COMP_NAME)
- name: ETCDCTL_API
value: "3"
- name: ETCD_ON_K8S
value: "yes"
- name: ETCD_START_FROM_SNAPSHOT
value: "no"
- name: ETCD_DISASTER_RECOVERY
value: "no"
- name: ETCD_NAME
value: $(MY_POD_NAME)
- name: ETCD_DATA_DIR
value: /bitnami/etcd/data
- name: ETCD_LOG_LEVEL
value: info
- name: ALLOW_NONE_AUTHENTICATION
value: "yes"
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: "$(KB_CLUSTER_NAME)"
- name: ETCD_INITIAL_CLUSTER_STATE
value: "new"
- name: ETCD_INITIAL_CLUSTER
value: "{{ include "etcd.initialCluster" .}}"
- name: ETCD_CLUSTER_DOMAIN
value: "{{ include "etcd.clusterDomain" .}}"
- name: ETCD_AUTO_COMPACTION_MODE
value: "periodic"
- name: ETCD_AUTO_COMPACTION_RETENTION
value: "1h"
- name: ETCD_ADVERTISE_CLIENT_URLS
value: "{{ include "etcd.advertiseClientURLs" .}}"
- name: ETCD_LISTEN_CLIENT_URLS
value: http://0.0.0.0:2379
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: http://$(KB_POD_FQDN){{ .Values.clusterDomain }}:2380
- name: ETCD_LISTEN_PEER_URLS
value: http://0.0.0.0:2380
- name: ETCD_QUOTA_BACKEND_BYTES
value: "4294967296"
- name: ETCD_HEARTBEAT_INTERVAL
value: "500"
- name: ETCD_ELECTION_TIMEOUT
value: "2500"
- name: ETCD_ENABLE_V2
value: "true"
- name: codis-proxy
workloadType: Stateless
characterType: pika
service:
ports:
- name: proxy
targetPort: proxy
port: 11080
- name: admin
targetPort: admin
port: 19000
configSpecs:
- name: codis-proxy-config
templateRef: pika-conf-template
namespace: {{ .Release.Namespace }}
volumeName: config
podSpec:
initContainers:
- name: wait-etcd
env:
- name: ETCD_ADDR
value: "{{ include "etcd.clusterDomain" .}}"
- name: DASHBOARD_ADDR
value: "$(KB_CLUSTER_NAME)-codis-dashboard"
image: busybox:1.28
command:
- 'sh'
- '-c'
- "until nc -z ${ETCD_ADDR} 2379; do echo waiting for etcd; sleep 2; done;"
- "until nc -z ${DASHBOARD_ADDR} 18080; do echo waiting for etcd; sleep 2; done;"
containers:
- name: codis-proxy
imagePullPolicy: IfNotPresent
ports:
- containerPort: 11080
name: proxy
- containerPort: 19000
name: admin
volumeMounts:
- name: config
mountPath: /etc/codis
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ETCD_ADDR
value: "{{ include "etcd.clusterDomain" .}}:2379"
- name: DASHBOARD_ADDR
value: "$(KB_CLUSTER_NAME)-codis-dashboard:18080"
- name: PRODUCT_NAME
value: "$(KB_CLUSTER_NAME)"
command:
- "/codis/bin/codis-proxy"
args:
- "-c"
- "/etc/codis/proxy.toml"
- "--host-admin"
- "$(POD_IP):11080"
- "--host-proxy"
- "$(POD_IP):19000"
- "--etcd"
- "$(ETCD_ADDR)"
- "--product_name"
- "$(PRODUCT_NAME)"
- "--pidfile"
- "log/proxy.pid"
- "--log-level=DEBUG"
lifecycle:
preStop:
exec:
command:
- "/bin/sh"
- "-c"
- "/codis/bin/codis-admin --dashboard=${DASHBOARD_ADDR} --remove-proxy --addr=${POD_IP}:11080 1>/dev/null 2>&1"
- name: codis-fe
workloadType: Stateless
characterType: pika
service:
ports:
- name: fe
targetPort: fe
port: 8080
podSpec:
initContainers:
- name: wait-etcd
env:
- name: ETCD_ADDR
value: "{{ include "etcd.clusterDomain" .}}"
- name: DASHBOARD_ADDR
value: "$(KB_CLUSTER_NAME)-codis-dashboard"
image: busybox:1.28
command:
- 'sh'
- '-c'
- "until nc -z ${ETCD_ADDR} 2379; do echo waiting for etcd; sleep 2; done;"
- "until nc -z ${DASHBOARD_ADDR} 18080; do echo waiting for etcd; sleep 2; done;"
containers:
- name: codis-fe
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
name: fe
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ETCD_ADDR
value: "{{ include "etcd.clusterDomain" .}}:2379"
command:
- "/codis/bin/codis-fe"
args:
- "--etcd"
- "$(ETCD_ADDR)"
- "--listen=0.0.0.0:8080"
- "--assets=/codis/bin/assets"
- "--log-level=DEBUG"
- name: codis-dashboard
workloadType: Stateful
characterType: pika
service:
ports:
- name: dashboard
targetPort: dashboard
port: 18080
configSpecs:
- name: codis-dashboard-config
templateRef: pika-conf-template
namespace: {{ .Release.Namespace }}
volumeName: config
volumeTypes:
- name: data
type: data
podSpec:
initContainers:
- name: wait-etcd
env:
- name: ETCD_ADDR
value: "{{ include "etcd.clusterDomain" .}}"
image: busybox:1.28
command:
- 'sh'
- '-c'
- "until nc -z ${ETCD_ADDR} 2379; do echo waiting for etcd; sleep 2; done;"
containers:
- name: codis-dashboard
imagePullPolicy: IfNotPresent
ports:
- containerPort: 18080
name: dashboard
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /etc/codis
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ETCD_ADDR
value: "{{ include "etcd.clusterDomain" .}}:2379"
- name: PRODUCT_NAME
value: "$(KB_CLUSTER_NAME)"
command:
- "/codis/bin/codis-dashboard"
args:
- "-c"
- "/etc/codis/dashboard.toml"
- "--host-admin"
- "$(POD_IP):18080"
- "--etcd"
- "$(ETCD_ADDR)"
- "--product_name"
- "$(PRODUCT_NAME)"
- "--pidfile"
- "log/dashboard.pid"
- "--remove-lock"
- "--log-level=DEBUG"
lifecycle:
postStart:
exec:
command: [ "/bin/bash", "-c", "/codis/bin/codis-admin --dashboard-list --etcd=${ETCD_ADDR}" ]
preStop:
exec:
command: [ "/bin/sh", "-c", "PID=$(cat log/dashboard.pid) && kill $PID && while ps -p 1 > /dev/null; do sleep 1; done" ]
apiVersion: apps.kubeblocks.io/v1alpha1
kind: ClusterVersion
metadata:
name: pika-{{ default .Chart.AppVersion .Values.clusterVersionOverride }}
labels:
{{- include "pika.labels" . | nindent 4 }}
spec:
clusterDefinitionRef: pika
componentVersions:
- componentDefRef: pika
versionsContext:
containers:
- name: pika
image: {{ include "pika.image" . }}
imagePullPolicy: {{ include "pika.imagePullPolicy" . }}
- componentDefRef: etcd
versionsContext:
containers:
- name: etcd
image: {{ include "etcd.image" . }}
imagePullPolicy: {{ include "etcd.imagePullPolicy" . }}
- componentDefRef: codis-proxy
versionsContext:
containers:
- name: codis-proxy
image: {{ include "codis.image" . }}
imagePullPolicy: {{ include "codis.imagePullPolicy" . }}
- componentDefRef: codis-fe
versionsContext:
containers:
- name: codis-fe
image: {{ include "codis.image" . }}
imagePullPolicy: {{ include "codis.imagePullPolicy" . }}
- componentDefRef: codis-dashboard
versionsContext:
containers:
- name: codis-dashboard
image: {{ include "codis.image" . }}
imagePullPolicy: {{ include "codis.imagePullPolicy" . }}
apiVersion: v1
kind: ConfigMap
metadata:
name: pika-conf-template
labels:
{{- include "pika.labels" . | nindent 4 }}
data:
pika.conf: |-
{{- .Files.Get "config/pika-config.tpl" | nindent 4 }}
dashboard.toml: |-
{{- .Files.Get "config/codis-dashboard.tpl" | nindent 4 }}
proxy.toml: |-
{{- .Files.Get "config/codis-proxy.tpl" | nindent 4 }}
pika:
version: v3.5.0-alpha
image:
pika:
registry: docker.io
repository: machinly/pika
tag: v3.5.0-alpha
pullPolicy: IfNotPresent
codis:
registry: docker.io
repository: machinly/codis
tag: v3.5.0-alpha
pullPolicy: IfNotPresent
etcd:
registry: docker.io
repository: bitnami/etcd
tag: 3.5.9
pullPolicy: IfNotPresent
roleProbe:
pika:
failureThreshold: 2
periodSeconds: 1
timeoutSeconds: 1
codis-proxy:
failureThreshold: 2
periodSeconds: 1
timeoutSeconds: 1
codis-dashboard:
failureThreshold: 2
periodSeconds: 1
timeoutSeconds: 1
codis-fe:
failureThreshold: 2
periodSeconds: 1
timeoutSeconds: 1
etcd:
failureThreshold: 2
periodSeconds: 1
timeoutSeconds: 1
clusterVersionOverride: 3.5.0-alpha
nameOverride: ""
fullnameOverride: ""
clusterDomain: ".cluster.local"