[Citus] Citus설치 및 Basic Operations

Ja L·2024년 5월 7일
0

[PostgreSQL] Extension

목록 보기
1/1

Citus는 PostgreSQL의 샤딩 기능을 제공하는 Extension 입니다.

사이트에 적용하기 앞서 간단한 테스트를 진행하고, 차후 고도화를 위한 기초 자료를 기록합니다.

1. Citus Installation

1-1. CentOS7.9 / PostgreSQL 15 / Citus 11.3-2

# you should execute in root(OS User)
yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm
yum -y repolist
yum -y update
yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum -y install postgresql15-server
psql -V
/usr/pgsql-15/bin/postgresql-15-setup initdb
systemctl enable --now postgresql-15
systemctl status postgresql*
useradd postgres
passwd postgres
su - postgres

1-2. Rocky8.9 / PostgreSQL 15 / Citus 12.1-1

# you should execute in root(OS User)
curl https://install.citusdata.com/community/rpm.sh > /tmp/add-citus-repo.sh
vi /tmp/add-citus-repo.sh

======================================================================================
# rocky 관련 내용 추가 필요

1)

epel_check()
{
  echo ""Checking for EPEL repositories...""

  epel_release_check=rpm -qa | grep -qw epel-release
  if [ ""$?"" == ""0"" ]; then
    echo ""Detected EPEL repoitories""
  else
    echo -n ""Installing epel-release repo... ""

    if [ ""${os}"" = ""centos"" ]; then
      yum install -d0 -e0 -y epel-release &> /dev/null
    elif [ ""${os}"" = ""rhel"" ] || [ ""${os}"" = ""redhatenterpriseserver"" ] || [ ""${os}"" = ""ol"" ]; then
      if [ ""${dist}"" = ""7"" ]; then
        yum install -d0 -e0 -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-${dist}.noarch.rpm &> /dev/null
      elif [ ""${dist}"" = ""8"" ]; then
        dnf install -d0 -e0 -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-${dist}.noarch.rpm &> /dev/null
      fi
    elif [ ""${os}"" = ""rocky"" ]; then
      if [ ""${dist}"" = ""7"" ]; then
        yum install -d0 -e0 -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-${dist}.noarch.rpm &> /dev/null
      elif [ ""${dist}"" = ""8"" ]; then
        dnf install -d0 -e0 -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-${dist}.noarch.rpm &> /dev/null
      fi
    fi
    
    
2)

elif [ -e /etc/redhat-release ]; then
      os_hint=cat /etc/redhat-release  | awk '{ print tolower($1) }'
      if [ ""${os_hint}"" = ""centos"" ]; then
        dist=cat /etc/redhat-release | awk '{ print $3 }' | awk -F '.' '{ print $1 }'
        os='centos'
      elif [ ""${os_hint}"" = ""scientific"" ]; then
        dist=cat /etc/redhat-release | awk '{ print $4 }' | awk -F '.' '{ print $1 }'
        os='scientific'
      elif [ ""${os_hint}"" = ""rocky"" ]; then
        dist=cat /etc/redhat-release | awk '{ print $3 }' | awk -F '.' '{ print $1 }'
        os='rocky'
      else
        dist=cat /etc/redhat-release  | awk '{ print tolower($7) }' | cut -f1 --delimiter='.'
        os='redhatenterpriseserver'
      fi
      
3)
      
detect_repo_url ()
{
  # set common defaults used by all flavors
  # Various repositories of different flavors are stored in EL (e.g. Centos and OL)
  family='redhat'
  family_short='EL'
  pkg_dist=""${dist}""

  case ""${os}"" in
    ol|centos|rocky|rhel|redhatenterpriseserver)
      ;;
    *)
      unknown_os
      ;;
  esac

  repo_url=""https://download.postgresql.org/pub/repos/yum/reporpms""
  repo_url+=""/${family_short}-${pkg_dist}-x86_64""
  repo_url+=""/pgdg-${family}-repo-latest.noarch.rpm""
}


# postgresql 16 => 15 

pgdg_check ()
{
  echo ""Checking for postgresql15-server...""
  if yum list -q postgresql15-server &> /dev/null; then
    echo ""Detected postgresql15-server...""
  else
    echo -n ""Installing pgdg repo... ""

    if [ ""${dist}"" = ""8"" ]; then
      dnf -qy module disable postgresql
    fi

    yum install -d0 -e0 -y ""${repo_url}""

    echo ""done.""
  fi
}

======================================================================================

bash /tmp/add-citus-repo.sh
yum install -y citus121_15
yum install postgresql-contrib
/usr/bin/postgresql-15-setup initdb
"vi $PGDATA/postgresql.conf

listen_addresses = '*'
shared_preload_libraries = 'citus'"
"vi $PGDATA/pg_hba.conf

host    all    all    0.0.0.0/0    trust"
psql
create database citusdb;
create extension citus;

2. Configuration Citus

2-1. Configuring Coordinate_Node

This command is executed in coordinate node.

SELECT citus_set_coordinator_host('192.168.54.120', 5432);

2-2. Configuring 2 Worker_Node

This command is executed in coordinate node.

select master_add_node('192.168.54.121', 5432);
select master_add_node('192.168.54.122', 5432);

2-3. Create Distributed Table

# Enter the following commands at the coordinate nodes 

# create table (you should have primary key to make distributed table) 
create table sales (
	deptno int not null,
	deptname varchar(20),
	total_amount int,
	constraint pk_sales primary key (deptno));

# create distributed table 
select create_distributed_table('sales', 'deptno', 'hash');

# Insert Data to distributed table
insert into sales (deptno,deptname,total_amount) values (1,'french_dept',10000);
insert into sales (deptno,deptname,total_amount) values (2,'german_dept',15000);
insert into sales (deptno,deptname,total_amount) values (3,'china_dept',21000);
insert into sales (deptno,deptname,total_amount) values (4,'gambia_dept',8750);
insert into sales (deptno,deptname,total_amount) values (5,'japan_dept',12010);
insert into sales (deptno,deptname,total_amount) values (6,'china_dept',35000);
insert into sales (deptno,deptname,total_amount) values (7,'nigeria_dept',10000);
insert into sales (deptno,deptname,total_amount) values (8,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (9,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (10,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (11,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (12,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (13,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (14,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (15,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (16,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (17,'senegal_dept',33000);
insert into sales (deptno,deptname,total_amount) values (18,'senegal_dept',33000);

We can check distributed table using command below.

select * from citus_shards;

postgres=# select * from citus_shards ;
 table_name | shardid |  shard_name  | citus_table_type | colocation_id |    nodename    | nodeport | shard_size
------------+---------+--------------+------------------+---------------+----------------+----------+------------
 sales      |  102040 | sales_102040 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102041 | sales_102041 | distributed      |             2 | 192.168.54.122 |     5432 |      24576
 sales      |  102042 | sales_102042 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102043 | sales_102043 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102044 | sales_102044 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102045 | sales_102045 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102046 | sales_102046 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102047 | sales_102047 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102048 | sales_102048 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102049 | sales_102049 | distributed      |             2 | 192.168.54.122 |     5432 |      24576
 sales      |  102050 | sales_102050 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102051 | sales_102051 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102052 | sales_102052 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102053 | sales_102053 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102054 | sales_102054 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102055 | sales_102055 | distributed      |             2 | 192.168.54.122 |     5432 |      24576
 sales      |  102056 | sales_102056 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102057 | sales_102057 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102058 | sales_102058 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102059 | sales_102059 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102060 | sales_102060 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102061 | sales_102061 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102062 | sales_102062 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102063 | sales_102063 | distributed      |             2 | 192.168.54.122 |     5432 |      24576
 sales      |  102064 | sales_102064 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102065 | sales_102065 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102066 | sales_102066 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102067 | sales_102067 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102068 | sales_102068 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102069 | sales_102069 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102070 | sales_102070 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102071 | sales_102071 | distributed      |             2 | 192.168.54.122 |     5432 |      24576

(32 rows) 

2-4. Add 2 Worker_Node

# Enter the following commands at the coordinate nodes 

select master_add_node('192.168.54.123', 5432);
select master_add_node('192.168.54.124', 5432);

select * from master_get_active_worker_nodes();

postgres=# select * from master_get_active_worker_nodes();
   node_name    | node_port
----------------+-----------
 192.168.54.122 |      5432
 192.168.54.124 |      5432
 192.168.54.123 |      5432
 192.168.54.121 |      5432
(4 rows)

2-5. Rebalancing

Before actually executing rebalancing, you can check the execution plan.

select * from get_rebalance_table_shards_plan();

postgres=# select * from get_rebalance_table_shards_plan();
 table_name | shardid | shard_size |   sourcename   | sourceport |   targetname   | targetport
------------+---------+------------+----------------+------------+----------------+------------
 sales      |  102041 |          0 | 192.168.54.122 |       5432 | 192.168.54.123 |       5432
 sales      |  102040 |          0 | 192.168.54.121 |       5432 | 192.168.54.124 |       5432
 sales      |  102043 |          0 | 192.168.54.122 |       5432 | 192.168.54.123 |       5432
 sales      |  102042 |          0 | 192.168.54.121 |       5432 | 192.168.54.124 |       5432
 sales      |  102045 |          0 | 192.168.54.122 |       5432 | 192.168.54.123 |       5432
 sales      |  102044 |          0 | 192.168.54.121 |       5432 | 192.168.54.124 |       5432
 sales      |  102047 |          0 | 192.168.54.122 |       5432 | 192.168.54.123 |       5432
 sales      |  102046 |          0 | 192.168.54.121 |       5432 | 192.168.54.124 |       5432
 sales      |  102049 |          0 | 192.168.54.122 |       5432 | 192.168.54.123 |       5432
 sales      |  102048 |          0 | 192.168.54.121 |       5432 | 192.168.54.124 |       5432
 sales      |  102051 |          0 | 192.168.54.122 |       5432 | 192.168.54.123 |       5432
 sales      |  102050 |          0 | 192.168.54.121 |       5432 | 192.168.54.124 |       5432
 sales      |  102053 |          0 | 192.168.54.122 |       5432 | 192.168.54.123 |       5432
 sales      |  102052 |          0 | 192.168.54.121 |       5432 | 192.168.54.124 |       5432
 sales      |  102055 |          0 | 192.168.54.122 |       5432 | 192.168.54.123 |       5432
 sales      |  102054 |          0 | 192.168.54.121 |       5432 | 192.168.54.124 |       5432
 (16 rows)

To Use Sharding table, we should enter rebalancing command mannually.

select citus_rebalance_start();

postgres=# select citus_rebalance_start();
알림:  Scheduled 48 moves as job 1
상세정보:  Rebalance scheduled as background job
힌트:  To monitor progress, run: SELECT * FROM citus_rebalance_status();
 citus_rebalance_start
-----------------------
                     1
(1개 행)
postgres=# select * from citus_shards;
 table_name | shardid |  shard_name  | citus_table_type | colocation_id |    nodename    | nodeport | shard_size
------------+---------+--------------+------------------+---------------+----------------+----------+------------
 sales      |  102040 | sales_102040 | distributed      |             2 | 192.168.54.124 |     5432 |      24576
 sales      |  102041 | sales_102041 | distributed      |             2 | 192.168.54.123 |     5432 |      24576
 sales      |  102042 | sales_102042 | distributed      |             2 | 192.168.54.124 |     5432 |       8192
 sales      |  102043 | sales_102043 | distributed      |             2 | 192.168.54.123 |     5432 |       8192
 sales      |  102044 | sales_102044 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102045 | sales_102045 | distributed      |             2 | 192.168.54.123 |     5432 |       8192
 sales      |  102046 | sales_102046 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102047 | sales_102047 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102048 | sales_102048 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102049 | sales_102049 | distributed      |             2 | 192.168.54.122 |     5432 |      24576
 sales      |  102050 | sales_102050 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102051 | sales_102051 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102052 | sales_102052 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102053 | sales_102053 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102054 | sales_102054 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102055 | sales_102055 | distributed      |             2 | 192.168.54.122 |     5432 |      24576
 sales      |  102056 | sales_102056 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102057 | sales_102057 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102058 | sales_102058 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102059 | sales_102059 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102060 | sales_102060 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102061 | sales_102061 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102062 | sales_102062 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102063 | sales_102063 | distributed      |             2 | 192.168.54.122 |     5432 |      24576
 sales      |  102064 | sales_102064 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102065 | sales_102065 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102066 | sales_102066 | distributed      |             2 | 192.168.54.121 |     5432 |       8192
 sales      |  102067 | sales_102067 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102068 | sales_102068 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102069 | sales_102069 | distributed      |             2 | 192.168.54.122 |     5432 |       8192
 sales      |  102070 | sales_102070 | distributed      |             2 | 192.168.54.121 |     5432 |      24576
 sales      |  102071 | sales_102071 | distributed      |             2 | 192.168.54.122 |     5432 |      24576

3. Details

3-1. Redundancy 1 (default 1)

Default Redundancy value of Citus is '1'. Redundanacy '1' means that no shards are duplicated on the worker node and are just split.

In order for worker nodes to replicate shards and achieve availability, the redundancy value must be set 2 or higher.

3-2. Redundancy 2

Let's set Citus redundancy value to 2.

set citus.shard_replication_factor=2;

postgres=# set citus.shard_replication_factor=2;
SET
postgres=# select * from get_rebalance_table_shards_plan();
 table_name | shardid | shard_size | sourcename | sourceport | targetname | targetport
------------+---------+------------+------------+------------+------------+------------
(0개 행)

When I check the rebalancing plan after changing the settings, I cannot see any plan. This confirms that the changed redundancy value is not automatically applied to the tables created before setting it.

# create table 
postgres=# create table test_2 (id int, name text, sex varchar(6));
CREATE TABLE

# set primary key
postgres=# alter table test_2 add constraint test2_pk primary key (id) ;
ALTER TABLE

# create distributed_table
postgres=# select create_distributed_table('test_2', 'id');
 create_distributed_table
--------------------------

# insert data
postgres=# insert into test_2 values (1, 'jason', 'male');
INSERT 0 1
postgres=# insert into test_2 values (2, 'son', 'male');
INSERT 0 1
postgres=# insert into test_2 values (3, 'ellisa', 'female');
INSERT 0 1

select * from citus_shards where table_name = 'test_2'::regclass ;
 table_name | shardid |  shard_name   | citus_table_type | colocation_id |    nodename    | nodeport | shard_size
------------+---------+---------------+------------------+---------------+----------------+----------+------------
 test_2     |  102104 | test_2_102104 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102104 | test_2_102104 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102105 | test_2_102105 | distributed      |             3 | 192.168.54.123 |     5432 |      32768
 test_2     |  102105 | test_2_102105 | distributed      |             3 | 192.168.54.124 |     5432 |      32768
 test_2     |  102106 | test_2_102106 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102106 | test_2_102106 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102107 | test_2_102107 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102107 | test_2_102107 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102108 | test_2_102108 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102108 | test_2_102108 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102109 | test_2_102109 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102109 | test_2_102109 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102110 | test_2_102110 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102110 | test_2_102110 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102111 | test_2_102111 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102111 | test_2_102111 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102112 | test_2_102112 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102112 | test_2_102112 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102113 | test_2_102113 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102113 | test_2_102113 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102114 | test_2_102114 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102114 | test_2_102114 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102115 | test_2_102115 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102115 | test_2_102115 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102116 | test_2_102116 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102116 | test_2_102116 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102117 | test_2_102117 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102117 | test_2_102117 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102118 | test_2_102118 | distributed      |             3 | 192.168.54.123 |     5432 |      16384
 test_2     |  102118 | test_2_102118 | distributed      |             3 | 192.168.54.124 |     5432 |      16384
 test_2     |  102119 | test_2_102119 | distributed      |             3 | 192.168.54.123 |     5432 |      32768
 test_2     |  102119 | test_2_102119 | distributed      |             3 | 192.168.54.124 |     5432 |      32768
 test_2     |  102120 | test_2_102120 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102120 | test_2_102120 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102121 | test_2_102121 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102121 | test_2_102121 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102122 | test_2_102122 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102122 | test_2_102122 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102123 | test_2_102123 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102123 | test_2_102123 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102124 | test_2_102124 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102124 | test_2_102124 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102125 | test_2_102125 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102125 | test_2_102125 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102126 | test_2_102126 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102126 | test_2_102126 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102127 | test_2_102127 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102127 | test_2_102127 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102128 | test_2_102128 | distributed      |             3 | 192.168.54.121 |     5432 |      32768
 test_2     |  102128 | test_2_102128 | distributed      |             3 | 192.168.54.122 |     5432 |      32768
 test_2     |  102129 | test_2_102129 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102129 | test_2_102129 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102130 | test_2_102130 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102130 | test_2_102130 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102131 | test_2_102131 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102131 | test_2_102131 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102132 | test_2_102132 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102132 | test_2_102132 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102133 | test_2_102133 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102133 | test_2_102133 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102134 | test_2_102134 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102134 | test_2_102134 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
 test_2     |  102135 | test_2_102135 | distributed      |             3 | 192.168.54.121 |     5432 |      16384
 test_2     |  102135 | test_2_102135 | distributed      |             3 | 192.168.54.122 |     5432 |      16384
(64개 행)

It works!

3-3. Setting Shard Count (default 32)

set citus.shard_count=4;

postgres=#  set citus.shard_count=4;
SET
postgres=# select * from get_rebalance_table_shards_plan();
 table_name | shardid | shard_size | sourcename | sourceport | targetname | targetport
------------+---------+------------+------------+------------+------------+------------
(0개 행)
# create table 
postgres=# create table test_3 (id int, name text, sex varchar(6));
CREATE TABLE

# set primary key
postgres=# alter table test_3 add constraint test3_pk primary key (id) ;
ALTER TABLE

# create distributed_table
postgres=# select create_distributed_table('test_3', 'id');
 create_distributed_table
--------------------------

# insert data
postgres=# insert into test_3 values (1, 'jason', 'male');
INSERT 0 1
postgres=# insert into test_3 values (2, 'son', 'male');
INSERT 0 1
postgres=# insert into test_3 values (3, 'ellisa', 'female');
INSERT 0 1
postgres=# select * from citus_shards where table_name = 'test_3'::regclass;
 table_name | shardid |  shard_name   | citus_table_type | colocation_id |    nodename    | nodeport | shard_size
------------+---------+---------------+------------------+---------------+----------------+----------+------------
 test_3     |  102136 | test_3_102136 | distributed      |             4 | 192.168.54.121 |     5432 |      32768
 test_3     |  102136 | test_3_102136 | distributed      |             4 | 192.168.54.122 |     5432 |      32768
 test_3     |  102137 | test_3_102137 | distributed      |             4 | 192.168.54.122 |     5432 |      32768
 test_3     |  102137 | test_3_102137 | distributed      |             4 | 192.168.54.123 |     5432 |      32768
 test_3     |  102138 | test_3_102138 | distributed      |             4 | 192.168.54.123 |     5432 |      16384
 test_3     |  102138 | test_3_102138 | distributed      |             4 | 192.168.54.124 |     5432 |      16384
 test_3     |  102139 | test_3_102139 | distributed      |             4 | 192.168.54.124 |     5432 |      32768
 test_3     |  102139 | test_3_102139 | distributed      |             4 | 192.168.54.121 |     5432 |      32768
(8개 행)

3-4. Command Summary

select * from master_get_active_worker_nodes();

select * from citus_shards;

select * from get_rebalance_table_shards_plan();

select citus_rebalance_start();

select * from citus_rebalance_status();

set citus.shard_replication_factor=2;

set citus.shard_count=4;

profile
DB Engineer

0개의 댓글