Skip to content
This repository was archived by the owner on May 14, 2021. It is now read-only.

removed unused settings for 2.1.2 #27

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions spec/classes/cassandra_config_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
:incremental_backups => 'false',
:snapshot_before_compaction => 'false',
:auto_snapshot => 'true',
:multithreaded_compaction => 'false',
:endpoint_snitch => 'SimpleSnitch',
:internode_compression => 'all',
:disk_failure_policy => 'stop',
Expand Down Expand Up @@ -110,7 +109,6 @@
:incremental_backups => 'false',
:snapshot_before_compaction => 'false',
:auto_snapshot => 'true',
:multithreaded_compaction => 'false',
:endpoint_snitch => 'SimpleSnitch',
:internode_compression => 'all',
:disk_failure_policy => 'stop',
Expand Down
2 changes: 0 additions & 2 deletions spec/classes/cassandra_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,6 @@
:incremental_backups => 'false',
:snapshot_before_compaction => 'false',
:auto_snapshot => 'true',
:multithreaded_compaction => 'false',
:endpoint_snitch => 'SimpleSnitch',
:internode_compression => 'all',
:disk_failure_policy => 'stop',
Expand Down Expand Up @@ -154,7 +153,6 @@
:incremental_backups => [['true', 'false'], [9, 'bozo']],
:snapshot_before_compaction => [['true', 'false'], [9, 'bozo']],
:auto_snapshot => [['true', 'false'], [9, 'bozo']],
:multithreaded_compaction => [['true', 'false'], [9, 'bozo']],
:concurrent_reads => [[1, 256, 42], ['bozo', 0.5, true]],
:concurrent_writes => [[1, 256, 42], ['bozo', 0.5, true]],
:additional_jvm_opts => [[['a', 'b']], ['bozo']],
Expand Down
7 changes: 0 additions & 7 deletions templates/cassandra-env.sh.erb
Original file line number Diff line number Diff line change
Expand Up @@ -157,13 +157,6 @@ JMX_PORT="<%= @jmx_port %>"
# performance benefit (around 5%).
JVM_OPTS="$JVM_OPTS -ea"

# add the jamm javaagent
if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" \> "1.6.0" ] \
|| [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION" -ge 23 ]
then
JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"
fi

# enable thread priorities, primarily so we can give periodic tasks
# a lower priority to avoid interfering with client workload
JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities"
Expand Down
63 changes: 0 additions & 63 deletions templates/cassandra.yaml.erb
Original file line number Diff line number Diff line change
Expand Up @@ -169,22 +169,6 @@ row_cache_save_period: 0
# Disabled by default, meaning all keys are going to be saved
# row_cache_keys_to_save: 100

# The provider for the row cache to use.
#
# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
#
# SerializingCacheProvider serialises the contents of the row and stores
# it in native memory, i.e., off the JVM Heap. Serialized rows take
# significantly less memory than "live" rows in the JVM, so you can cache
# more rows in a given memory footprint. And storing the cache off-heap
# means you can use smaller heap sizes, reducing the impact of GC pauses.
#
# It is also valid to specify the fully-qualified class name to a class
# that implements org.apache.cassandra.cache.IRowCacheProvider.
#
# Defaults to SerializingCacheProvider
row_cache_provider: SerializingCacheProvider

# saved caches
saved_caches_directory: <%= @saved_caches_directory %>

Expand Down Expand Up @@ -227,31 +211,6 @@ seed_provider:
# Ex: "<ip1>,<ip2>,<ip3>"
- seeds: <%= @seeds.join(',') %>

# emergency pressure valve: each time heap usage after a full (CMS)
# garbage collection is above this fraction of the max, Cassandra will
# flush the largest memtables.
#
# Set to 1.0 to disable. Setting this lower than
# CMSInitiatingOccupancyFraction is not likely to be useful.
#
# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
# it is most effective under light to moderate load, or read-heavy
# workloads; under truly massive write load, it will often be too
# little, too late.
flush_largest_memtables_at: 0.75

# emergency pressure valve #2: the first time heap usage after a full
# (CMS) garbage collection is above this fraction of the max,
# Cassandra will reduce cache maximum _capacity_ to the given fraction
# of the current _size_. Should usually be set substantially above
# flush_largest_memtables_at, since that will have less long-term
# impact on the system.
#
# Set to 1.0 to disable. Setting this lower than
# CMSInitiatingOccupancyFraction is not likely to be useful.
reduce_cache_sizes_at: 0.85
reduce_cache_capacity_to: 0.6

# For workloads with more data than can fit in memory, Cassandra's
# bottleneck will be reads that need to fetch data from
# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
Expand Down Expand Up @@ -286,11 +245,6 @@ concurrent_writes: <%= @concurrent_writes %>
# By default this will be set to the amount of data directories defined.
#memtable_flush_writers: 1

# the number of full memtables to allow pending flush, that is,
# waiting for a writer thread. At a minimum, this should be set to
# the maximum number of secondary indexes created on a single CF.
memtable_flush_queue_size: 4

# Whether to, when doing sequential writing, fsync() at intervals in
# order to force the operating system to flush the dirty
# buffers. Enable this to avoid sudden dirty buffer flushing from
Expand Down Expand Up @@ -441,11 +395,6 @@ auto_snapshot: <%= @auto_snapshot %>
# that wastefully either.
column_index_size_in_kb: 64

# Size limit for rows being compacted in memory. Larger rows will spill
# over to disk and use a slower two-pass compaction process. A message
# will be logged specifying the row key.
in_memory_compaction_limit_in_mb: 64

# Number of simultaneous compactions to allow, NOT including
# validation "compactions" for anti-entropy repair. Simultaneous
# compactions can help preserve read performance in a mixed read/write
Expand All @@ -459,13 +408,6 @@ in_memory_compaction_limit_in_mb: 64
# Uncomment to make compaction mono-threaded, the pre-0.8 default.
#concurrent_compactors: 1

# Multi-threaded compaction. When enabled, each compaction will use
# up to one thread per core, plus one thread per sstable being merged.
# This is usually only useful for SSD-based hardware: otherwise,
# your concern is usually to get compaction to do LESS i/o (see:
# compaction_throughput_mb_per_sec), not more.
multithreaded_compaction: <%= @multithreaded_compaction %>

# Throttles compaction to the given total throughput across the entire
# system. The faster you insert data, the faster you need to compact in
# order to keep the sstable count down, but in general, setting this to
Expand All @@ -474,11 +416,6 @@ multithreaded_compaction: <%= @multithreaded_compaction %>
# of compaction, including validation compaction.
compaction_throughput_mb_per_sec: 16

# Track cached row keys during compaction, and re-cache their new
# positions in the compacted sstable. Disable if you use really large
# key caches.
compaction_preheat_key_cache: true

# Throttles all outbound streaming file transfers on this node to the
# given total throughput in Mbps. This is necessary because Cassandra does
# mostly sequential IO when streaming data during bootstrap or repair, which
Expand Down