1 // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
2 // All rights reserved.
4 // Use of this source code is governed by a BSD-style license that can be
5 // found in the LICENSE file.
7 // Package opt provides sets of options used by LevelDB.
13 "github.com/syndtr/goleveldb/leveldb/cache"
14 "github.com/syndtr/goleveldb/leveldb/comparer"
15 "github.com/syndtr/goleveldb/leveldb/filter"
25 DefaultBlockCacher = LRUCacher
26 DefaultBlockCacheCapacity = 8 * MiB
27 DefaultBlockRestartInterval = 16
28 DefaultBlockSize = 4 * KiB
29 DefaultCompactionExpandLimitFactor = 25
30 DefaultCompactionGPOverlapsFactor = 10
31 DefaultCompactionL0Trigger = 4
32 DefaultCompactionSourceLimitFactor = 1
33 DefaultCompactionTableSize = 2 * MiB
34 DefaultCompactionTableSizeMultiplier = 1.0
35 DefaultCompactionTotalSize = 10 * MiB
36 DefaultCompactionTotalSizeMultiplier = 10.0
37 DefaultCompressionType = SnappyCompression
38 DefaultIteratorSamplingRate = 1 * MiB
39 DefaultOpenFilesCacher = LRUCacher
40 DefaultOpenFilesCacheCapacity = 500
41 DefaultWriteBuffer = 4 * MiB
42 DefaultWriteL0PauseTrigger = 12
43 DefaultWriteL0SlowdownTrigger = 8
46 // Cacher is a caching algorithm.
47 type Cacher interface {
48 New(capacity int) cache.Cacher
51 type CacherFunc struct {
52 NewFunc func(capacity int) cache.Cacher
55 func (f *CacherFunc) New(capacity int) cache.Cacher {
57 return f.NewFunc(capacity)
62 func noCacher(int) cache.Cacher { return nil }
65 // LRUCacher is the LRU-cache algorithm.
66 LRUCacher = &CacherFunc{cache.NewLRU}
68 // NoCacher is the value to disable caching algorithm.
69 NoCacher = &CacherFunc{}
72 // Compression is the 'sorted table' block compression algorithm to use.
75 func (c Compression) String() string {
77 case DefaultCompression:
81 case SnappyCompression:
88 DefaultCompression Compression = iota
94 // Strict is the DB 'strict level'.
98 // If present then a corrupted or invalid chunk or block in manifest
99 // journal will cause an error instead of being dropped.
100 // This will prevent database with corrupted manifest to be opened.
101 StrictManifest Strict = 1 << iota
103 // If present then journal chunk checksum will be verified.
104 StrictJournalChecksum
106 // If present then a corrupted or invalid chunk or block in journal
107 // will cause an error instead of being dropped.
108 // This will prevent database with corrupted journal to be opened.
111 // If present then 'sorted table' block checksum will be verified.
112 // This has effect on both 'read operation' and compaction.
115 // If present then a corrupted 'sorted table' will fails compaction.
116 // The database will enter read-only mode.
119 // If present then a corrupted 'sorted table' will halts 'read operation'.
122 // If present then leveldb.Recover will drop corrupted 'sorted table'.
125 // This only applicable for ReadOptions, if present then this ReadOptions
126 // 'strict level' will override global ones.
129 // StrictAll enables all strict flags.
130 StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
132 // DefaultStrict is the default strict flags. Specify any strict flags
133 // will override default strict flags as whole (i.e. not OR'ed).
134 DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
136 // NoStrict disables all strict flags. Override default strict flags.
137 NoStrict = ^StrictAll
140 // Options holds the optional parameters for the DB at large.
141 type Options struct {
142 // AltFilters defines one or more 'alternative filters'.
143 // 'alternative filters' will be used during reads if a filter block
144 // does not match with the 'effective filter'.
146 // The default value is nil
147 AltFilters []filter.Filter
149 // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
150 // Specify NoCacher to disable caching algorithm.
152 // The default value is LRUCacher.
155 // BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
156 // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
158 // The default value is 8MiB.
159 BlockCacheCapacity int
161 // BlockRestartInterval is the number of keys between restart points for
162 // delta encoding of keys.
164 // The default value is 16.
165 BlockRestartInterval int
167 // BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
170 // The default value is 4KiB.
173 // CompactionExpandLimitFactor limits compaction size after expanded.
174 // This will be multiplied by table size limit at compaction target level.
176 // The default value is 25.
177 CompactionExpandLimitFactor int
179 // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
180 // single 'sorted table' generates.
181 // This will be multiplied by table size limit at grandparent level.
183 // The default value is 10.
184 CompactionGPOverlapsFactor int
186 // CompactionL0Trigger defines number of 'sorted table' at level-0 that will
187 // trigger compaction.
189 // The default value is 4.
190 CompactionL0Trigger int
192 // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
194 // This will be multiplied by table size limit at compaction target level.
196 // The default value is 1.
197 CompactionSourceLimitFactor int
199 // CompactionTableSize limits size of 'sorted table' that compaction generates.
200 // The limits for each level will be calculated as:
201 // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
202 // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
204 // The default value is 2MiB.
205 CompactionTableSize int
207 // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
209 // The default value is 1.
210 CompactionTableSizeMultiplier float64
212 // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
213 // CompactionTableSize.
214 // Use zero to skip a level.
216 // The default value is nil.
217 CompactionTableSizeMultiplierPerLevel []float64
219 // CompactionTotalSize limits total size of 'sorted table' for each level.
220 // The limits for each level will be calculated as:
221 // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
222 // The multiplier for each level can also fine-tuned using
223 // CompactionTotalSizeMultiplierPerLevel.
225 // The default value is 10MiB.
226 CompactionTotalSize int
228 // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
230 // The default value is 10.
231 CompactionTotalSizeMultiplier float64
233 // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
234 // CompactionTotalSize.
235 // Use zero to skip a level.
237 // The default value is nil.
238 CompactionTotalSizeMultiplierPerLevel []float64
240 // Comparer defines a total ordering over the space of []byte keys: a 'less
241 // than' relationship. The same comparison algorithm must be used for reads
242 // and writes over the lifetime of the DB.
244 // The default value uses the same ordering as bytes.Compare.
245 Comparer comparer.Comparer
247 // Compression defines the 'sorted table' block compression to use.
249 // The default value (DefaultCompression) uses snappy compression.
250 Compression Compression
252 // DisableBufferPool allows disable use of util.BufferPool functionality.
254 // The default value is false.
255 DisableBufferPool bool
257 // DisableBlockCache allows disable use of cache.Cache functionality on
258 // 'sorted table' block.
260 // The default value is false.
261 DisableBlockCache bool
263 // DisableCompactionBackoff allows disable compaction retry backoff.
265 // The default value is false.
266 DisableCompactionBackoff bool
268 // DisableLargeBatchTransaction allows disabling switch-to-transaction mode
269 // on large batch write. If enable batch writes large than WriteBuffer will
272 // The default is false.
273 DisableLargeBatchTransaction bool
275 // ErrorIfExist defines whether an error should returned if the DB already
278 // The default value is false.
281 // ErrorIfMissing defines whether an error should returned if the DB is
282 // missing. If false then the database will be created if missing, otherwise
283 // an error will be returned.
285 // The default value is false.
288 // Filter defines an 'effective filter' to use. An 'effective filter'
289 // if defined will be used to generate per-table filter block.
290 // The filter name will be stored on disk.
291 // During reads LevelDB will try to find matching filter from
292 // 'effective filter' and 'alternative filters'.
294 // Filter can be changed after a DB has been created. It is recommended
295 // to put old filter to the 'alternative filters' to mitigate lack of
296 // filter during transition period.
298 // A filter is used to reduce disk reads when looking for a specific key.
300 // The default value is nil.
303 // IteratorSamplingRate defines approximate gap (in bytes) between read
304 // sampling of an iterator. The samples will be used to determine when
305 // compaction should be triggered.
307 // The default is 1MiB.
308 IteratorSamplingRate int
310 // NoSync allows completely disable fsync.
312 // The default is false.
315 // NoWriteMerge allows disabling write merge.
317 // The default is false.
320 // OpenFilesCacher provides cache algorithm for open files caching.
321 // Specify NoCacher to disable caching algorithm.
323 // The default value is LRUCacher.
324 OpenFilesCacher Cacher
326 // OpenFilesCacheCapacity defines the capacity of the open files caching.
327 // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
329 // The default value is 500.
330 OpenFilesCacheCapacity int
332 // If true then opens DB in read-only mode.
334 // The default value is false.
337 // Strict defines the DB strict level.
340 // WriteBuffer defines maximum size of a 'memdb' before flushed to
341 // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
344 // LevelDB may held up to two 'memdb' at the same time.
346 // The default value is 4MiB.
349 // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
352 // The default value is 12.
353 WriteL0PauseTrigger int
355 // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
356 // will trigger write slowdown.
358 // The default value is 8.
359 WriteL0SlowdownTrigger int
362 func (o *Options) GetAltFilters() []filter.Filter {
369 func (o *Options) GetBlockCacher() Cacher {
370 if o == nil || o.BlockCacher == nil {
371 return DefaultBlockCacher
372 } else if o.BlockCacher == NoCacher {
378 func (o *Options) GetBlockCacheCapacity() int {
379 if o == nil || o.BlockCacheCapacity == 0 {
380 return DefaultBlockCacheCapacity
381 } else if o.BlockCacheCapacity < 0 {
384 return o.BlockCacheCapacity
387 func (o *Options) GetBlockRestartInterval() int {
388 if o == nil || o.BlockRestartInterval <= 0 {
389 return DefaultBlockRestartInterval
391 return o.BlockRestartInterval
394 func (o *Options) GetBlockSize() int {
395 if o == nil || o.BlockSize <= 0 {
396 return DefaultBlockSize
401 func (o *Options) GetCompactionExpandLimit(level int) int {
402 factor := DefaultCompactionExpandLimitFactor
403 if o != nil && o.CompactionExpandLimitFactor > 0 {
404 factor = o.CompactionExpandLimitFactor
406 return o.GetCompactionTableSize(level+1) * factor
409 func (o *Options) GetCompactionGPOverlaps(level int) int {
410 factor := DefaultCompactionGPOverlapsFactor
411 if o != nil && o.CompactionGPOverlapsFactor > 0 {
412 factor = o.CompactionGPOverlapsFactor
414 return o.GetCompactionTableSize(level+2) * factor
417 func (o *Options) GetCompactionL0Trigger() int {
418 if o == nil || o.CompactionL0Trigger == 0 {
419 return DefaultCompactionL0Trigger
421 return o.CompactionL0Trigger
424 func (o *Options) GetCompactionSourceLimit(level int) int {
425 factor := DefaultCompactionSourceLimitFactor
426 if o != nil && o.CompactionSourceLimitFactor > 0 {
427 factor = o.CompactionSourceLimitFactor
429 return o.GetCompactionTableSize(level+1) * factor
432 func (o *Options) GetCompactionTableSize(level int) int {
434 base = DefaultCompactionTableSize
438 if o.CompactionTableSize > 0 {
439 base = o.CompactionTableSize
441 if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
442 mult = o.CompactionTableSizeMultiplierPerLevel[level]
443 } else if o.CompactionTableSizeMultiplier > 0 {
444 mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
448 mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
450 return int(float64(base) * mult)
453 func (o *Options) GetCompactionTotalSize(level int) int64 {
455 base = DefaultCompactionTotalSize
459 if o.CompactionTotalSize > 0 {
460 base = o.CompactionTotalSize
462 if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
463 mult = o.CompactionTotalSizeMultiplierPerLevel[level]
464 } else if o.CompactionTotalSizeMultiplier > 0 {
465 mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
469 mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
471 return int64(float64(base) * mult)
474 func (o *Options) GetComparer() comparer.Comparer {
475 if o == nil || o.Comparer == nil {
476 return comparer.DefaultComparer
481 func (o *Options) GetCompression() Compression {
482 if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
483 return DefaultCompressionType
488 func (o *Options) GetDisableBufferPool() bool {
492 return o.DisableBufferPool
495 func (o *Options) GetDisableBlockCache() bool {
499 return o.DisableBlockCache
502 func (o *Options) GetDisableCompactionBackoff() bool {
506 return o.DisableCompactionBackoff
509 func (o *Options) GetDisableLargeBatchTransaction() bool {
513 return o.DisableLargeBatchTransaction
516 func (o *Options) GetErrorIfExist() bool {
520 return o.ErrorIfExist
523 func (o *Options) GetErrorIfMissing() bool {
527 return o.ErrorIfMissing
530 func (o *Options) GetFilter() filter.Filter {
537 func (o *Options) GetIteratorSamplingRate() int {
538 if o == nil || o.IteratorSamplingRate <= 0 {
539 return DefaultIteratorSamplingRate
541 return o.IteratorSamplingRate
544 func (o *Options) GetNoSync() bool {
551 func (o *Options) GetNoWriteMerge() bool {
555 return o.NoWriteMerge
558 func (o *Options) GetOpenFilesCacher() Cacher {
559 if o == nil || o.OpenFilesCacher == nil {
560 return DefaultOpenFilesCacher
562 if o.OpenFilesCacher == NoCacher {
565 return o.OpenFilesCacher
568 func (o *Options) GetOpenFilesCacheCapacity() int {
569 if o == nil || o.OpenFilesCacheCapacity == 0 {
570 return DefaultOpenFilesCacheCapacity
571 } else if o.OpenFilesCacheCapacity < 0 {
574 return o.OpenFilesCacheCapacity
577 func (o *Options) GetReadOnly() bool {
584 func (o *Options) GetStrict(strict Strict) bool {
585 if o == nil || o.Strict == 0 {
586 return DefaultStrict&strict != 0
588 return o.Strict&strict != 0
591 func (o *Options) GetWriteBuffer() int {
592 if o == nil || o.WriteBuffer <= 0 {
593 return DefaultWriteBuffer
598 func (o *Options) GetWriteL0PauseTrigger() int {
599 if o == nil || o.WriteL0PauseTrigger == 0 {
600 return DefaultWriteL0PauseTrigger
602 return o.WriteL0PauseTrigger
605 func (o *Options) GetWriteL0SlowdownTrigger() int {
606 if o == nil || o.WriteL0SlowdownTrigger == 0 {
607 return DefaultWriteL0SlowdownTrigger
609 return o.WriteL0SlowdownTrigger
612 // ReadOptions holds the optional parameters for 'read operation'. The
613 // 'read operation' includes Get, Find and NewIterator.
614 type ReadOptions struct {
615 // DontFillCache defines whether block reads for this 'read operation'
616 // should be cached. If false then the block will be cached. This does
617 // not affects already cached block.
619 // The default value is false.
622 // Strict will be OR'ed with global DB 'strict level' unless StrictOverride
623 // is present. Currently only StrictReader that has effect here.
627 func (ro *ReadOptions) GetDontFillCache() bool {
631 return ro.DontFillCache
634 func (ro *ReadOptions) GetStrict(strict Strict) bool {
638 return ro.Strict&strict != 0
641 // WriteOptions holds the optional parameters for 'write operation'. The
642 // 'write operation' includes Write, Put and Delete.
643 type WriteOptions struct {
644 // NoWriteMerge allows disabling write merge.
646 // The default is false.
649 // Sync is whether to sync underlying writes from the OS buffer cache
650 // through to actual disk, if applicable. Setting Sync can result in
653 // If false, and the machine crashes, then some recent writes may be lost.
654 // Note that if it is just the process that crashes (and the machine does
655 // not) then no writes will be lost.
657 // In other words, Sync being false has the same semantics as a write
658 // system call. Sync being true means write followed by fsync.
660 // The default value is false.
664 func (wo *WriteOptions) GetNoWriteMerge() bool {
668 return wo.NoWriteMerge
671 func (wo *WriteOptions) GetSync() bool {
678 func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
679 if ro.GetStrict(StrictOverride) {
680 return ro.GetStrict(strict)
682 return o.GetStrict(strict) || ro.GetStrict(strict)