1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
|
# TiKV config template
# Human-readable big numbers:
# File size(based on byte): KB, MB, GB, TB, PB
# e.g.: 1_048_576 = "1MB"
# Time(based on ms): ms, s, m, h
# e.g.: 78_000 = "1.3m"
# log level: trace, debug, info, warning, error, critical.
# Note that `debug` and `trace` are only available in development builds.
# log-level = "info"
# file to store log, write to stderr if it's empty.
# log-file = ""
# timespan between rotating the log files.
# Once this timespan passes the existing log file will have a timestamp appended to its name,
# and a new file will be created.
# log-rotation-timespan = "24h"
[readpool.storage]
# size of thread pool for high-priority operations
# high-concurrency = 4
# size of thread pool for normal-priority operations
# normal-concurrency = 4
# size of thread pool for low-priority operations
# low-concurrency = 4
# max running high-priority operations of each worker, reject if exceed
# max-tasks-per-worker-high = 2000
# max running normal-priority operations of each worker, reject if exceed
# max-tasks-per-worker-normal = 2000
# max running low-priority operations of each worker, reject if exceed
# max-tasks-per-worker-low = 2000
# size of stack size for each thread pool
# stack-size = "10MB"
[readpool.coprocessor]
# Notice: if CPU_NUM > 8, default thread pool size for coprocessors
# will be set to CPU_NUM * 0.8.
# high-concurrency = 8
# normal-concurrency = 8
# low-concurrency = 8
# max-tasks-per-worker-high = 2000
# max-tasks-per-worker-normal = 2000
# max-tasks-per-worker-low = 2000
# stack-size = "10MB"
[server]
# set listening address.
# addr = "127.0.0.1:20160"
# set advertise listening address for client communication, if not set, use addr instead.
# advertise-addr = ""
# notify capacity, 40960 is suitable for about 7000 regions.
# notify-capacity = 40960
# maximum number of messages can be processed in one tick.
# messages-per-tick = 4096
## Status address.
## This is used for reporting the status of TiKV directly through the HTTP address.
## Empty string means disabling it.
# status-addr = "127.0.0.1:20180"
## Set the maximum number of worker threads for the status report HTTP service.
# status-thread-pool-size = 1
## Compression type for gRPC channel: none, deflate or gzip.
# grpc-compression-type = "none"
# size of thread pool for grpc server.
# grpc-concurrency = 4
# The number of max concurrent streams/requests on a client connection.
# grpc-concurrent-stream = 1024
# The number of connections with each tikv server to send raft messages.
# grpc-raft-conn-num = 10
# Amount to read ahead on individual grpc streams.
# grpc-stream-initial-window-size = "2MB"
# Time to wait before sending out a ping to check if server is still alive.
# This is only for communications between tikv instances.
# grpc-keepalive-time = "10s"
# Time to wait before closing the connection without receiving keepalive ping
# ack.
# grpc-keepalive-timeout = "3s"
# How many snapshots can be sent concurrently.
# concurrent-send-snap-limit = 32
# How many snapshots can be recv concurrently.
# concurrent-recv-snap-limit = 32
# max recursion level allowed when decoding dag expression
# end-point-recursion-limit = 1000
# max time to handle coprocessor request before timeout
# end-point-request-max-handle-duration = "60s"
# the max bytes that snapshot can be written to disk in one second,
# should be set based on your disk performance
# snap-max-write-bytes-per-sec = "100MB"
# set attributes about this server, e.g. { zone = "us-west-1", disk = "ssd" }.
# labels = {}
[storage]
# set the path to rocksdb directory.
data-dir = "/var/lib/tikv/store"
# notify capacity of scheduler's channel
# scheduler-notify-capacity = 10240
# the number of slots in scheduler latches, concurrency control for write.
# scheduler-concurrency = 2048000
# scheduler's worker pool size, should increase it in heavy write cases,
# also should less than total cpu cores.
# scheduler-worker-pool-size = 4
# When the pending write bytes exceeds this threshold,
# the "scheduler too busy" error is displayed.
# scheduler-pending-write-threshold = "100MB"
[pd]
# pd endpoints
endpoints = ["127.0.0.1:2379"]
[raftstore]
# true (default value) for high reliability, this can prevent data loss when power failure.
# sync-log = true
# minimizes disruption when a partitioned node rejoins the cluster by using a two phase election.
# prevote = true
# set the path to raftdb directory, default value is data-dir/raft
# raftdb-path = ""
# set store capacity, if no set, use disk capacity.
# capacity = 0
# notify capacity, 40960 is suitable for about 7000 regions.
# notify-capacity = 40960
# maximum number of messages can be processed in one tick.
# messages-per-tick = 4096
# Region heartbeat tick interval for reporting to pd.
# pd-heartbeat-tick-interval = "60s"
# Store heartbeat tick interval for reporting to pd.
# pd-store-heartbeat-tick-interval = "10s"
# When region size changes exceeds region-split-check-diff, we should check
# whether the region should be split or not.
# region-split-check-diff = "6MB"
# Interval to check region whether need to be split or not.
# split-region-check-tick-interval = "10s"
# When raft entry exceed the max size, reject to propose the entry.
# raft-entry-max-size = "8MB"
# Interval to gc unnecessary raft log.
# raft-log-gc-tick-interval = "10s"
# A threshold to gc stale raft log, must >= 1.
# raft-log-gc-threshold = 50
# When entry count exceed this value, gc will be forced trigger.
# raft-log-gc-count-limit = 72000
# When the approximate size of raft log entries exceed this value, gc will be forced trigger.
# It's recommanded to set it to 3/4 of region-split-size.
# raft-log-gc-size-limit = "72MB"
# When a peer hasn't been active for max-peer-down-duration,
# we will consider this peer to be down and report it to pd.
# max-peer-down-duration = "5m"
# Interval to check whether start manual compaction for a region,
# region-compact-check-interval = "5m"
# Number of regions for each time to check.
# region-compact-check-step = 100
# The minimum number of delete tombstones to trigger manual compaction.
# region-compact-min-tombstones = 10000
# The minimum percentage of delete tombstones to trigger manual compaction.
# Should between 1 and 100. Manual compaction only triggered when the number
# of delete tombstones exceeds region-compact-min-tombstones and the percentage
# of delete tombstones exceeds region-compact-tombstones-percent.
# region-compact-tombstones-percent = 30
# Interval to check whether should start a manual compaction for lock column family,
# if written bytes reach lock-cf-compact-threshold for lock column family, will fire
# a manual compaction for lock column family.
# lock-cf-compact-interval = "10m"
# lock-cf-compact-bytes-threshold = "256MB"
# Interval (s) to check region whether the data are consistent.
# consistency-check-interval = 0
# Use delete range to drop a large number of continuous keys.
# use-delete-range = false
# delay time before deleting a stale peer
# clean-stale-peer-delay = "10m"
# Interval to cleanup import sst files.
# cleanup-import-sst-interval = "10m"
[coprocessor]
# When it is true, it will try to split a region with table prefix if
# that region crosses tables. It is recommended to turn off this option
# if there will be a large number of tables created.
# split-region-on-table = true
# When the region's size exceeds region-max-size, we will split the region
# into two which the left region's size will be region-split-size or a little
# bit smaller.
# region-max-size = "144MB"
# region-split-size = "96MB"
# When the region's keys exceeds region-max-keys, we will split the region
# into two which the left region's keys will be region-split-keys or a little
# bit smaller.
# region-max-keys = 1440000
# region-split-keys = 960000
[rocksdb]
# Maximum number of concurrent background jobs (compactions and flushes)
# max-background-jobs = 8
# This value represents the maximum number of threads that will concurrently perform a
# compaction job by breaking it into multiple, smaller ones that are run simultaneously.
# Default: 1 (i.e. no subcompactions)
# max-sub-compactions = 1
# Number of open files that can be used by the DB. You may need to
# increase this if your database has a large working set. Value -1 means
# files opened are always kept open. You can estimate number of files based
# on target_file_size_base and target_file_size_multiplier for level-based
# compaction.
# If max-open-files = -1, RocksDB will prefetch index and filter blocks into
# block cache at startup, so if your database has a large working set, it will
# take several minutes to open the db.
# max-open-files = 40960
# Max size of rocksdb's MANIFEST file.
# For detailed explanation please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST
# max-manifest-file-size = "128MB"
# If true, the database will be created if it is missing.
# create-if-missing = true
# rocksdb wal recovery mode
# 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs;
# 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL;
# 2 : PointInTimeRecovery, Recover to point-in-time consistency;
# 3 : SkipAnyCorruptedRecords, Recovery after a disaster;
# wal-recovery-mode = 2
# rocksdb write-ahead logs dir path
# This specifies the absolute dir path for write-ahead logs (WAL).
# If it is empty, the log files will be in the same dir as data.
# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set
# wal-dir to a directory on a persistent storage.
# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database
# wal-dir = "/tmp/tikv/store"
# The following two fields affect how archived write-ahead logs will be deleted.
# 1. If both set to 0, logs will be deleted asap and will not get into the archive.
# 2. If wal-ttl-seconds is 0 and wal-size-limit is not 0,
# WAL files will be checked every 10 min and if total size is greater
# then wal-size-limit, they will be deleted starting with the
# earliest until size_limit is met. All empty files will be deleted.
# 3. If wal-ttl-seconds is not 0 and wal-size-limit is 0, then
# WAL files will be checked every wal-ttl-seconds / 2 and those that
# are older than wal-ttl-seconds will be deleted.
# 4. If both are not 0, WAL files will be checked every 10 min and both
# checks will be performed with ttl being first.
# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set
# wal-ttl-seconds to a value greater than 0 (like 86400) and backup your db on a regular basis.
# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database
# wal-ttl-seconds = 0
# wal-size-limit = 0
# rocksdb max total wal size
# max-total-wal-size = "4GB"
# Rocksdb Statistics provides cumulative stats over time.
# Turn statistics on will introduce about 5%-10% overhead for RocksDB,
# but it is worthy to know the internal status of RocksDB.
# enable-statistics = true
# Dump statistics periodically in information logs.
# Same as rocksdb's default value (10 min).
# stats-dump-period = "10m"
# Due to Rocksdb FAQ: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ,
# If you want to use rocksdb on multi disks or spinning disks, you should set value at
# least 2MB;
# compaction-readahead-size = 0
# This is the maximum buffer size that is used by WritableFileWrite
# writable-file-max-buffer-size = "1MB"
# Use O_DIRECT for both reads and writes in background flush and compactions
# use-direct-io-for-flush-and-compaction = false
# Limit the disk IO of compaction and flush. Compaction and flush can cause
# terrible spikes if they exceed a certain threshold. Consider setting this to
# 50% ~ 80% of the disk throughput for a more stable result. However, in heavy
# write workload, limiting compaction and flush speed can cause write stalls too.
# rate-bytes-per-sec = 0
# Enable or disable the pipelined write
# enable-pipelined-write = true
# Allows OS to incrementally sync files to disk while they are being
# written, asynchronously, in the background.
# bytes-per-sync = "1MB"
# Allows OS to incrementally sync WAL to disk while it is being written.
# wal-bytes-per-sync = "512KB"
# Specify the maximal size of the Rocksdb info log file. If the log file
# is larger than `max_log_file_size`, a new info log file will be created.
# If max_log_file_size == 0, all logs will be written to one log file.
# info-log-max-size = "1GB"
# Time for the Rocksdb info log file to roll (in seconds).
# If specified with non-zero value, log file will be rolled
# if it has been active longer than `log_file_time_to_roll`.
# 0 means disabled.
# info-log-roll-time = "0"
# Maximal Rocksdb info log files to be kept.
# info-log-keep-log-file-num = 10
# This specifies the Rocksdb info LOG dir.
# If it is empty, the log files will be in the same dir as data.
# If it is non empty, the log files will be in the specified dir,
# and the db data dir's absolute path will be used as the log file
# name's prefix.
# info-log-dir = ""
# Column Family default used to store actual data of the database.
[rocksdb.defaultcf]
# compression method (if any) is used to compress a block.
# no: kNoCompression
# snappy: kSnappyCompression
# zlib: kZlibCompression
# bzip2: kBZip2Compression
# lz4: kLZ4Compression
# lz4hc: kLZ4HCCompression
# zstd: kZSTD
# per level compression
# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
# Approximate size of user data packed per block. Note that the
# block size specified here corresponds to uncompressed data.
# block-size = "64KB"
# If you're doing point lookups you definitely want to turn bloom filters on, We use
# bloom filters to avoid unnecessary disk reads. Default bits_per_key is 10, which
# yields ~1% false positive rate. Larger bits_per_key values will reduce false positive
# rate, but increase memory usage and space amplification.
# bloom-filter-bits-per-key = 10
# false means one sst file one bloom filter, true means evry block has a corresponding bloom filter
# block-based-bloom-filter = false
# level0-file-num-compaction-trigger = 4
# Soft limit on number of level-0 files. We start slowing down writes at this point.
# level0-slowdown-writes-trigger = 20
# Maximum number of level-0 files. We stop writes at this point.
# level0-stop-writes-trigger = 36
# Amount of data to build up in memory (backed by an unsorted log
# on disk) before converting to a sorted on-disk file.
# write-buffer-size = "128MB"
# The maximum number of write buffers that are built up in memory.
# max-write-buffer-number = 5
# The minimum number of write buffers that will be merged together
# before writing to storage.
# min-write-buffer-number-to-merge = 1
# Control maximum total data size for base level (level 1).
# max-bytes-for-level-base = "512MB"
# Target file size for compaction.
# target-file-size-base = "8MB"
# Max bytes for compaction.max_compaction_bytes
# max-compaction-bytes = "2GB"
# There are four different algorithms to pick files to compact.
# 0 : ByCompensatedSize
# 1 : OldestLargestSeqFirst
# 2 : OldestSmallestSeqFirst
# 3 : MinOverlappingRatio
# compaction-pri = 3
# block-cache used to cache uncompressed blocks, big block-cache can speed up read.
# in normal cases should tune to 30%-50% system's total memory.
# block-cache-size = "1GB"
# Indicating if we'd put index/filter blocks to the block cache.
# If not specified, each "table reader" object will pre-load index/filter block
# during table initialization.
# cache-index-and-filter-blocks = true
# Pin level0 filter and index blocks in cache.
# pin-l0-filter-and-index-blocks = true
# Enable read amplication statistics.
# value => memory usage (percentage of loaded blocks memory)
# 1 => 12.50 %
# 2 => 06.25 %
# 4 => 03.12 %
# 8 => 01.56 %
# 16 => 00.78 %
# read-amp-bytes-per-bit = 0
# Pick target size of each level dynamically.
# dynamic-level-bytes = true
# Options for Column Family write
# Column Family write used to store commit informations in MVCC model
[rocksdb.writecf]
# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
# block-size = "64KB"
# write-buffer-size = "128MB"
# max-write-buffer-number = 5
# min-write-buffer-number-to-merge = 1
# max-bytes-for-level-base = "512MB"
# target-file-size-base = "8MB"
# in normal cases should tune to 10%-30% system's total memory.
# block-cache-size = "256MB"
# level0-file-num-compaction-trigger = 4
# level0-slowdown-writes-trigger = 20
# level0-stop-writes-trigger = 36
# cache-index-and-filter-blocks = true
# pin-l0-filter-and-index-blocks = true
# compaction-pri = 3
# read-amp-bytes-per-bit = 0
# dynamic-level-bytes = true
[rocksdb.lockcf]
# compression-per-level = ["no", "no", "no", "no", "no", "no", "no"]
# block-size = "16KB"
# write-buffer-size = "128MB"
# max-write-buffer-number = 5
# min-write-buffer-number-to-merge = 1
# max-bytes-for-level-base = "128MB"
# target-file-size-base = "8MB"
# block-cache-size = "256MB"
# level0-file-num-compaction-trigger = 1
# level0-slowdown-writes-trigger = 20
# level0-stop-writes-trigger = 36
# cache-index-and-filter-blocks = true
# pin-l0-filter-and-index-blocks = true
# compaction-pri = 0
# read-amp-bytes-per-bit = 0
# dynamic-level-bytes = true
[raftdb]
# max-sub-compactions = 1
# max-open-files = 40960
# max-manifest-file-size = "20MB"
# create-if-missing = true
# enable-statistics = true
# stats-dump-period = "10m"
# compaction-readahead-size = 0
# writable-file-max-buffer-size = "1MB"
# use-direct-io-for-flush-and-compaction = false
# enable-pipelined-write = true
# allow-concurrent-memtable-write = false
# bytes-per-sync = "1MB"
# wal-bytes-per-sync = "512KB"
# info-log-max-size = "1GB"
# info-log-roll-time = "0"
# info-log-keep-log-file-num = 10
# info-log-dir = ""
[raftdb.defaultcf]
# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
# block-size = "64KB"
# write-buffer-size = "128MB"
# max-write-buffer-number = 5
# min-write-buffer-number-to-merge = 1
# max-bytes-for-level-base = "512MB"
# target-file-size-base = "8MB"
# should tune to 256MB~2GB.
# block-cache-size = "256MB"
# level0-file-num-compaction-trigger = 4
# level0-slowdown-writes-trigger = 20
# level0-stop-writes-trigger = 36
# cache-index-and-filter-blocks = true
# pin-l0-filter-and-index-blocks = true
# compaction-pri = 0
# read-amp-bytes-per-bit = 0
# dynamic-level-bytes = true
[security]
# set the path for certificates. Empty string means disabling secure connectoins.
# ca-path = ""
# cert-path = ""
# key-path = ""
[import]
# number of threads to handle RPC requests.
# num-threads = 8
# stream channel window size, stream will be blocked on channel full.
# stream-channel-window = 128
|