Commit 65aa646c authored by Marc Gravell's avatar Marc Gravell Committed by GitHub

Merge pull request #500 from StackExchange/core-rtm

Merging Core-RTM
parents d4c9ddec a2ee0280
......@@ -19,3 +19,4 @@ RedisQFork*.dat
StackExchange.Redis.*.zip
.vs/
*.lock.json
packages/
\ No newline at end of file
{
"runtimes": { "win": {} },
"frameworks": {
"net45": {
"dependencies": {
}
}
}
}
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="14.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project ToolsVersion="14.0.25420" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0</VisualStudioVersion>
<VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25420</VisualStudioVersion>
<VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
</PropertyGroup>
<Import Project="$(VSToolsPath)\DotNet\Microsoft.DotNet.Props" Condition="'$(VSToolsPath)' != ''" />
<PropertyGroup Label="Globals">
<ProjectGuid>9d83baba-a92e-495f-bf63-deb4f6b09355</ProjectGuid>
<RootNamespace>BasicTest_dnxcore50</RootNamespace>
<ProjectGuid>939fa5f7-16aa-4847-812b-6ebc3748a86d</ProjectGuid>
<RootNamespace>BasicTest</RootNamespace>
<BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">.\obj</BaseIntermediateOutputPath>
<OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
</PropertyGroup>
<PropertyGroup>
<SchemaVersion>2.0</SchemaVersion>
</PropertyGroup>
......
......@@ -14,7 +14,7 @@ class Program
{
static void Main(string[] args)
{
int AsyncOpsQty = 10000;
int AsyncOpsQty = 500000;
if(args.Length == 1)
{
int tmp;
......@@ -68,6 +68,5 @@ protected static string Me([CallerMemberName] string caller = null)
{
return caller;
}
}
}
{
"version": "1.0.0-*",
"description": "StackExchange.Redis.BasicTest dnxcore50",
"description": "StackExchange.Redis.BasicTest .NET Core",
"buildOptions": {
"compile": {
"include": [
"../BasicTest/Program.cs"
]
}
"emitEntryPoint": true
},
"dependencies": {
"StackExchange.Redis": {
......@@ -14,17 +10,20 @@
"target": "project"
}
},
"commands": {
"run": "BasicTest_dnxcore50"
"runtimes": {
"win7-x64": {}
},
"frameworks": {
"netcoreapp1.0": {
"buildOptions": {
"define": [ "CORE_CLR" ]
},
"imports": [ "dnxcore50" ],
"dependencies": {
"System.Console": "4.0.0-rc2-24027"
"Microsoft.NETCore.App": {
"version": "1.0.0-*",
"type": "platform"
},
"System.Console": "4.0.0"
}
}
}
......
......@@ -41,7 +41,7 @@ public void ExecuteMassiveDelete()
var originallyTask = conn.SetLengthAsync(todoKey);
int keepChecking = 1;
Task last = null;
while (Thread.VolatileRead(ref keepChecking) == 1)
while (Volatile.Read(ref keepChecking) == 1)
{
throttle.Wait(); // acquire
conn.SetPopAsync(todoKey).ContinueWith(task =>
......@@ -51,7 +51,7 @@ public void ExecuteMassiveDelete()
{
if ((string)task.Result == null)
{
Thread.VolatileWrite(ref keepChecking, 0);
Volatile.Write(ref keepChecking, 0);
}
else
{
......
{
"runtimes": { "win": {} },
"frameworks": {
"net45": {
"dependencies": {
"NUnit": "3.0.0"
}
}
}
}
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="14.0.25420" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">14.0.25420</VisualStudioVersion>
<VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
</PropertyGroup>
<Import Project="$(VSToolsPath)\DotNet\Microsoft.DotNet.Props" Condition="'$(VSToolsPath)' != ''" />
<PropertyGroup Label="Globals">
<ProjectGuid>0fa2f7c5-1d36-40c4-82d1-93dbf43765d7</ProjectGuid>
<RootNamespace>MigratedBookSleeveTestSuite</RootNamespace>
<BaseIntermediateOutputPath Condition="'$(BaseIntermediateOutputPath)'=='' ">.\obj</BaseIntermediateOutputPath>
<OutputPath Condition="'$(OutputPath)'=='' ">.\bin\</OutputPath>
</PropertyGroup>
<PropertyGroup>
<SchemaVersion>2.0</SchemaVersion>
</PropertyGroup>
<ItemGroup>
<Service Include="{82a7f48d-3b50-4b1e-b82e-3ada8210c358}" />
</ItemGroup>
<Import Project="$(VSToolsPath)\DotNet\Microsoft.DotNet.targets" Condition="'$(VSToolsPath)' != ''" />
</Project>
\ No newline at end of file
......@@ -28,6 +28,7 @@ static void Main()
}
static void Main2()
{
#if !CORE_CLR
// why is this here? because some dumbass forgot to install a decent test-runner before going to the airport
var epicFail = new List<string>();
var testTypes = from type in typeof(Program).Assembly.GetTypes()
......@@ -163,6 +164,7 @@ where Attribute.IsDefined(method, typeof(TestAttribute))
// BookSleeve.RedisConnectionBase.AllSyncCallbacks, BookSleeve.RedisConnectionBase.AllAsyncCallbacks);
//#endif
#endif
}
}
}
......
......@@ -247,21 +247,21 @@ public void TestSubscribeUnsubscribeAndSubscribeAgain()
sub.WaitAll(t1, t2);
pub.Publish("abc", "");
AllowReasonableTimeToPublishAndProcess();
Assert.AreEqual(1, Thread.VolatileRead(ref x));
Assert.AreEqual(1, Thread.VolatileRead(ref y));
Assert.AreEqual(1, Volatile.Read(ref x));
Assert.AreEqual(1, Volatile.Read(ref y));
t1 = sub.UnsubscribeAsync("abc", null);
t2 = sub.UnsubscribeAsync("ab*", null);
sub.WaitAll(t1, t2);
pub.Publish("abc", "");
Assert.AreEqual(1, Thread.VolatileRead(ref x));
Assert.AreEqual(1, Thread.VolatileRead(ref y));
Assert.AreEqual(1, Volatile.Read(ref x));
Assert.AreEqual(1, Volatile.Read(ref y));
t1 = sub.SubscribeAsync("abc", delegate { Interlocked.Increment(ref x); });
t2 = sub.SubscribeAsync("ab*", delegate { Interlocked.Increment(ref y); });
sub.WaitAll(t1, t2);
pub.Publish("abc", "");
AllowReasonableTimeToPublishAndProcess();
Assert.AreEqual(2, Thread.VolatileRead(ref x));
Assert.AreEqual(2, Thread.VolatileRead(ref y));
Assert.AreEqual(2, Volatile.Read(ref x));
Assert.AreEqual(2, Volatile.Read(ref y));
}
}
......
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="NUnit" version="3.0.0" targetFramework="net45" />
</packages>
\ No newline at end of file
{
"version": "1.0.0-*",
"description": "MigratedBookSleeveTestSuite",
"dependencies": {
"StackExchange.Redis": {
"version": "1.1.*",
"target": "project"
},
"nunit": "3.4.1"
},
"testRunner": "nunit",
"frameworks": {
"net45": {
},
"netcoreapp1.0": {
"imports": [
"portable-net45+win8"
],
"buildOptions": {
"define": [ "PLAT_SAFE_CONTINUATIONS", "CORE_CLR" ]
},
"dependencies": {
"Microsoft.NETCore.App": {
"version": "1.0.0-*",
"type": "platform"
},
"System.Console": "4.0.0",
"System.Diagnostics.Debug": "4.0.11",
"System.Diagnostics.TraceSource": "4.0.0",
"System.Linq.Expressions": "4.1.0",
"System.Reflection.Extensions": "4.0.1",
"System.Runtime.InteropServices": "4.1.0",
"System.Threading.Tasks.Parallel": "4.0.1",
"Microsoft.CSharp": "4.0.1",
"dotnet-test-nunit": "3.4.0-beta-1"
}
}
}
}
......@@ -238,7 +238,7 @@ void Connect()
socket.Connect(Host, Port);
if (!socket.Connected)
{
socket.Close();
socket.Dispose();
socket = null;
return;
}
......@@ -272,7 +272,7 @@ bool SendDataCommand(byte[] data, string cmd, params object[] args)
catch (SocketException)
{
// timeout;
socket.Close();
socket.Dispose();
socket = null;
return false;
......@@ -297,7 +297,7 @@ bool SendCommand(string cmd, params object[] args)
catch (SocketException)
{
// timeout;
socket.Close();
socket.Dispose();
socket = null;
return false;
......@@ -842,7 +842,7 @@ protected virtual void Dispose(bool disposing)
if (disposing)
{
SendCommand("QUIT\r\n");
socket.Close();
socket.Dispose();
socket = null;
}
}
......
ea828c6074663c8bd4e705d3e3024d9d1721ef3b 127.0.0.1:7001 master - 0 1467584830754 2 connected 5461-10922
5ffbeb7f7872923ee0ed0913aedfe806265f5090 127.0.0.1:7003 slave 780813af558af81518e58e495d63b6e248e80adf 0 1467584830349 4 connected
780813af558af81518e58e495d63b6e248e80adf 127.0.0.1:7000 myself,master - 0 0 1 connected 0-5460
eaf2fc952dad107ffa28c0f436f73c3d52112276 127.0.0.1:7004 slave ea828c6074663c8bd4e705d3e3024d9d1721ef3b 0 1467584830854 5 connected
b1dc6b4fd60ffcff552d669b3294f6991ccb2b68 127.0.0.1:7005 slave 17f2a81c7fc22283ef93bf118cedb193a1122399 0 1467584829346 6 connected
17f2a81c7fc22283ef93bf118cedb193a1122399 127.0.0.1:7002 master - 0 1467584830754 3 connected 10923-16383
vars currentEpoch 6 lastVoteEpoch 0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
\ No newline at end of file
eaf2fc952dad107ffa28c0f436f73c3d52112276 127.0.0.1:7004 slave ea828c6074663c8bd4e705d3e3024d9d1721ef3b 0 1467584830397 5 connected
b1dc6b4fd60ffcff552d669b3294f6991ccb2b68 127.0.0.1:7005 slave 17f2a81c7fc22283ef93bf118cedb193a1122399 0 1467584829393 6 connected
17f2a81c7fc22283ef93bf118cedb193a1122399 127.0.0.1:7002 master - 0 1467584829092 3 connected 10923-16383
780813af558af81518e58e495d63b6e248e80adf 127.0.0.1:7000 master - 0 1467584829895 1 connected 0-5460
5ffbeb7f7872923ee0ed0913aedfe806265f5090 127.0.0.1:7003 slave 780813af558af81518e58e495d63b6e248e80adf 0 1467584831402 4 connected
ea828c6074663c8bd4e705d3e3024d9d1721ef3b 127.0.0.1:7001 myself,master - 0 0 2 connected 5461-10922
vars currentEpoch 6 lastVoteEpoch 0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
\ No newline at end of file
ea828c6074663c8bd4e705d3e3024d9d1721ef3b 127.0.0.1:7001 master - 0 1467584829405 2 connected 5461-10922
5ffbeb7f7872923ee0ed0913aedfe806265f5090 127.0.0.1:7003 slave 780813af558af81518e58e495d63b6e248e80adf 0 1467584831415 4 connected
b1dc6b4fd60ffcff552d669b3294f6991ccb2b68 127.0.0.1:7005 slave 17f2a81c7fc22283ef93bf118cedb193a1122399 0 1467584831615 6 connected
eaf2fc952dad107ffa28c0f436f73c3d52112276 127.0.0.1:7004 slave ea828c6074663c8bd4e705d3e3024d9d1721ef3b 0 1467584830408 5 connected
780813af558af81518e58e495d63b6e248e80adf 127.0.0.1:7000 master - 0 1467584830914 1 connected 0-5460
17f2a81c7fc22283ef93bf118cedb193a1122399 127.0.0.1:7002 myself,master - 0 0 3 connected 10923-16383
vars currentEpoch 6 lastVoteEpoch 0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
\ No newline at end of file
ea828c6074663c8bd4e705d3e3024d9d1721ef3b 127.0.0.1:7001 master - 0 1467584829099 2 connected 5461-10922
b1dc6b4fd60ffcff552d669b3294f6991ccb2b68 127.0.0.1:7005 slave 17f2a81c7fc22283ef93bf118cedb193a1122399 0 1467584829099 6 connected
eaf2fc952dad107ffa28c0f436f73c3d52112276 127.0.0.1:7004 slave ea828c6074663c8bd4e705d3e3024d9d1721ef3b 0 1467584830405 5 connected
17f2a81c7fc22283ef93bf118cedb193a1122399 127.0.0.1:7002 master - 0 1467584829399 3 connected 10923-16383
5ffbeb7f7872923ee0ed0913aedfe806265f5090 127.0.0.1:7003 myself,slave 780813af558af81518e58e495d63b6e248e80adf 0 0 4 connected
780813af558af81518e58e495d63b6e248e80adf 127.0.0.1:7000 master - 0 1467584831436 1 connected 0-5460
vars currentEpoch 6 lastVoteEpoch 0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
\ No newline at end of file
17f2a81c7fc22283ef93bf118cedb193a1122399 127.0.0.1:7002 master - 0 1467584828911 3 connected 10923-16383
5ffbeb7f7872923ee0ed0913aedfe806265f5090 127.0.0.1:7003 slave 780813af558af81518e58e495d63b6e248e80adf 0 1467584829103 4 connected
780813af558af81518e58e495d63b6e248e80adf 127.0.0.1:7000 master - 0 1467584829407 1 connected 0-5460
ea828c6074663c8bd4e705d3e3024d9d1721ef3b 127.0.0.1:7001 master - 0 1467584830411 2 connected 5461-10922
b1dc6b4fd60ffcff552d669b3294f6991ccb2b68 127.0.0.1:7005 slave 17f2a81c7fc22283ef93bf118cedb193a1122399 0 1467584829207 6 connected
eaf2fc952dad107ffa28c0f436f73c3d52112276 127.0.0.1:7004 myself,slave ea828c6074663c8bd4e705d3e3024d9d1721ef3b 0 0 5 connected
vars currentEpoch 6 lastVoteEpoch 0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
\ No newline at end of file
eaf2fc952dad107ffa28c0f436f73c3d52112276 127.0.0.1:7004 slave ea828c6074663c8bd4e705d3e3024d9d1721ef3b 0 1467584831428 5 connected
5ffbeb7f7872923ee0ed0913aedfe806265f5090 127.0.0.1:7003 slave 780813af558af81518e58e495d63b6e248e80adf 0 1467584831528 4 connected
780813af558af81518e58e495d63b6e248e80adf 127.0.0.1:7000 master - 0 1467584830927 1 connected 0-5460
ea828c6074663c8bd4e705d3e3024d9d1721ef3b 127.0.0.1:7001 master - 0 1467584830407 2 connected 5461-10922
b1dc6b4fd60ffcff552d669b3294f6991ccb2b68 127.0.0.1:7005 myself,slave 17f2a81c7fc22283ef93bf118cedb193a1122399 0 0 6 connected
17f2a81c7fc22283ef93bf118cedb193a1122399 127.0.0.1:7002 master - 0 1467584829400 3 connected 10923-16383
vars currentEpoch 6 lastVoteEpoch 0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
\ No newline at end of file
@..\packages\Redis-64.2.8.19\redis-cli.exe -h cluster -p 7000
@..\packages\Redis-64.2.8.19\redis-cli.exe -h 127.0.0.1 -p 7000
@..\packages\Redis-64.2.8.19\redis-cli.exe -h cluster -p 7001
@..\packages\Redis-64.2.8.19\redis-cli.exe -h 127.0.0.1 -p 7001
@..\packages\Redis-64.2.8.19\redis-cli.exe -h cluster -p 7002
@..\packages\Redis-64.2.8.19\redis-cli.exe -h 127.0.0.1 -p 7002
@..\packages\Redis-64.2.8.19\redis-cli.exe -h cluster -p 7003
@..\packages\Redis-64.2.8.19\redis-cli.exe -h 127.0.0.1 -p 7003
@..\packages\Redis-64.2.8.19\redis-cli.exe -h cluster -p 7004
@..\packages\Redis-64.2.8.19\redis-cli.exe -h 127.0.0.1 -p 7004
@..\packages\Redis-64.2.8.19\redis-cli.exe -h cluster -p 7005
@..\packages\Redis-64.2.8.19\redis-cli.exe -h 127.0.0.1 -p 7005
@echo off
pushd Cluster\7000
@start /min ..\..\..\packages\Redis-64.3.0.503\redis-server.exe redis.conf --port 7000
popd
pushd Cluster\7001
@start /min ..\..\..\packages\Redis-64.3.0.503\redis-server.exe redis.conf --port 7001
popd
pushd Cluster\7002
@start /min ..\..\..\packages\Redis-64.3.0.503\redis-server.exe redis.conf --port 7002
popd
pushd Cluster\7003
@start /min ..\..\..\packages\Redis-64.3.0.503\redis-server.exe redis.conf --port 7003
popd
pushd Cluster\7004
@start /min ..\..\..\packages\Redis-64.3.0.503\redis-server.exe redis.conf --port 7004
popd
pushd Cluster\7005
@start /min ..\..\..\packages\Redis-64.3.0.503\redis-server.exe redis.conf --port 7005
popd
\ No newline at end of file
#!/usr/bin/env ruby
# redis-trib from the main redis repo at:
# https://github.com/antirez/redis/blob/unstable/src/redis-trib.rb
# TODO (temporary here, we'll move this into the Github issues once
# redis-trib initial implementation is completed).
#
# - Make sure that if the rehashing fails in the middle redis-trib will try
# to recover.
# - When redis-trib performs a cluster check, if it detects a slot move in
# progress it should prompt the user to continue the move from where it
# stopped.
# - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop
# while rehashing, and performing the best cleanup possible if the user
# forces the quit.
# - When doing "fix" set a global Fix to true, and prompt the user to
# fix the problem if automatically fixable every time there is something
# to fix. For instance:
# 1) If there is a node that pretend to receive a slot, or to migrate a
# slot, but has no entries in that slot, fix it.
# 2) If there is a node having keys in slots that are not owned by it
# fix this condition moving the entries in the same node.
# 3) Perform more possibly slow tests about the state of the cluster.
# 4) When aborted slot migration is detected, fix it.
require 'rubygems'
require 'redis'
ClusterHashSlots = 16384
MigrateDefaultTimeout = 60000
MigrateDefaultPipeline = 10
RebalanceDefaultThreshold = 2
$verbose = false
def xputs(s)
case s[0..2]
when ">>>"
color="29;1"
when "[ER"
color="31;1"
when "[WA"
color="31;1"
when "[OK"
color="32"
when "[FA","***"
color="33"
else
color=nil
end
color = nil if ENV['TERM'] != "xterm"
print "\033[#{color}m" if color
print s
print "\033[0m" if color
print "\n"
end
class ClusterNode
def initialize(addr)
s = addr.split("@")[0].split(":")
if s.length < 2
puts "Invalid IP or Port (given as #{addr}) - use IP:Port format"
exit 1
end
port = s.pop # removes port from split array
ip = s.join(":") # if s.length > 1 here, it's IPv6, so restore address
@r = nil
@info = {}
@info[:host] = ip
@info[:port] = port
@info[:slots] = {}
@info[:migrating] = {}
@info[:importing] = {}
@info[:replicate] = false
@dirty = false # True if we need to flush slots info into node.
@friends = []
end
def friends
@friends
end
def slots
@info[:slots]
end
def has_flag?(flag)
@info[:flags].index(flag)
end
def to_s
"#{@info[:host]}:#{@info[:port]}"
end
def connect(o={})
return if @r
print "Connecting to node #{self}: " if $verbose
STDOUT.flush
begin
@r = Redis.new(:host => @info[:host], :port => @info[:port], :timeout => 60)
@r.ping
rescue
xputs "[ERR] Sorry, can't connect to node #{self}"
exit 1 if o[:abort]
@r = nil
end
xputs "OK" if $verbose
end
def assert_cluster
info = @r.info
if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0
xputs "[ERR] Node #{self} is not configured as a cluster node."
exit 1
end
end
def assert_empty
if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) ||
(@r.info['db0'])
xputs "[ERR] Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0."
exit 1
end
end
def load_info(o={})
self.connect
nodes = @r.cluster("nodes").split("\n")
nodes.each{|n|
# name addr flags role ping_sent ping_recv link_status slots
split = n.split
name,addr,flags,master_id,ping_sent,ping_recv,config_epoch,link_status = split[0..6]
slots = split[8..-1]
info = {
:name => name,
:addr => addr,
:flags => flags.split(","),
:replicate => master_id,
:ping_sent => ping_sent.to_i,
:ping_recv => ping_recv.to_i,
:link_status => link_status
}
info[:replicate] = false if master_id == "-"
if info[:flags].index("myself")
@info = @info.merge(info)
@info[:slots] = {}
slots.each{|s|
if s[0..0] == '['
if s.index("->-") # Migrating
slot,dst = s[1..-1].split("->-")
@info[:migrating][slot.to_i] = dst
elsif s.index("-<-") # Importing
slot,src = s[1..-1].split("-<-")
@info[:importing][slot.to_i] = src
end
elsif s.index("-")
start,stop = s.split("-")
self.add_slots((start.to_i)..(stop.to_i))
else
self.add_slots((s.to_i)..(s.to_i))
end
} if slots
@dirty = false
@r.cluster("info").split("\n").each{|e|
k,v=e.split(":")
k = k.to_sym
v.chop!
if k != :cluster_state
@info[k] = v.to_i
else
@info[k] = v
end
}
elsif o[:getfriends]
@friends << info
end
}
end
def add_slots(slots)
slots.each{|s|
@info[:slots][s] = :new
}
@dirty = true
end
def set_as_replica(node_id)
@info[:replicate] = node_id
@dirty = true
end
def flush_node_config
return if !@dirty
if @info[:replicate]
begin
@r.cluster("replicate",@info[:replicate])
rescue
# If the cluster did not already joined it is possible that
# the slave does not know the master node yet. So on errors
# we return ASAP leaving the dirty flag set, to flush the
# config later.
return
end
else
new = []
@info[:slots].each{|s,val|
if val == :new
new << s
@info[:slots][s] = true
end
}
@r.cluster("addslots",*new)
end
@dirty = false
end
def info_string
# We want to display the hash slots assigned to this node
# as ranges, like in: "1-5,8-9,20-25,30"
#
# Note: this could be easily written without side effects,
# we use 'slots' just to split the computation into steps.
# First step: we want an increasing array of integers
# for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30]
slots = @info[:slots].keys.sort
# As we want to aggregate adjacent slots we convert all the
# slot integers into ranges (with just one element)
# So we have something like [1..1,2..2, ... and so forth.
slots.map!{|x| x..x}
# Finally we group ranges with adjacent elements.
slots = slots.reduce([]) {|a,b|
if !a.empty? && b.first == (a[-1].last)+1
a[0..-2] + [(a[-1].first)..(b.last)]
else
a + [b]
end
}
# Now our task is easy, we just convert ranges with just one
# element into a number, and a real range into a start-end format.
# Finally we join the array using the comma as separator.
slots = slots.map{|x|
x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}"
}.join(",")
role = self.has_flag?("master") ? "M" : "S"
if self.info[:replicate] and @dirty
is = "S: #{self.info[:name]} #{self.to_s}"
else
is = "#{role}: #{self.info[:name]} #{self.to_s}\n"+
" slots:#{slots} (#{self.slots.length} slots) "+
"#{(self.info[:flags]-["myself"]).join(",")}"
end
if self.info[:replicate]
is += "\n replicates #{info[:replicate]}"
elsif self.has_flag?("master") && self.info[:replicas]
is += "\n #{info[:replicas].length} additional replica(s)"
end
is
end
# Return a single string representing nodes and associated slots.
# TODO: remove slaves from config when slaves will be handled
# by Redis Cluster.
def get_config_signature
config = []
@r.cluster("nodes").each_line{|l|
s = l.split
slots = s[8..-1].select {|x| x[0..0] != "["}
next if slots.length == 0
config << s[0]+":"+(slots.sort.join(","))
}
config.sort.join("|")
end
def info
@info
end
def is_dirty?
@dirty
end
def r
@r
end
end
class RedisTrib
def initialize
@nodes = []
@fix = false
@errors = []
@timeout = MigrateDefaultTimeout
end
def check_arity(req_args, num_args)
if ((req_args > 0 and num_args != req_args) ||
(req_args < 0 and num_args < req_args.abs))
xputs "[ERR] Wrong number of arguments for specified sub command"
exit 1
end
end
def add_node(node)
@nodes << node
end
def reset_nodes
@nodes = []
end
def cluster_error(msg)
@errors << msg
xputs msg
end
# Return the node with the specified ID or Nil.
def get_node_by_name(name)
@nodes.each{|n|
return n if n.info[:name] == name.downcase
}
return nil
end
# Like get_node_by_name but the specified name can be just the first
# part of the node ID as long as the prefix in unique across the
# cluster.
def get_node_by_abbreviated_name(name)
l = name.length
candidates = []
@nodes.each{|n|
if n.info[:name][0...l] == name.downcase
candidates << n
end
}
return nil if candidates.length != 1
candidates[0]
end
# This function returns the master that has the least number of replicas
# in the cluster. If there are multiple masters with the same smaller
# number of replicas, one at random is returned.
def get_master_with_least_replicas
masters = @nodes.select{|n| n.has_flag? "master"}
sorted = masters.sort{|a,b|
a.info[:replicas].length <=> b.info[:replicas].length
}
sorted[0]
end
def check_cluster(opt={})
xputs ">>> Performing Cluster Check (using node #{@nodes[0]})"
show_nodes if !opt[:quiet]
check_config_consistency
check_open_slots
check_slots_coverage
end
def show_cluster_info
masters = 0
keys = 0
@nodes.each{|n|
if n.has_flag?("master")
puts "#{n} (#{n.info[:name][0...8]}...) -> #{n.r.dbsize} keys | #{n.slots.length} slots | "+
"#{n.info[:replicas].length} slaves."
masters += 1
keys += n.r.dbsize
end
}
xputs "[OK] #{keys} keys in #{masters} masters."
keys_per_slot = sprintf("%.2f",keys/16384.0)
puts "#{keys_per_slot} keys per slot on average."
end
# Merge slots of every known node. If the resulting slots are equal
# to ClusterHashSlots, then all slots are served.
def covered_slots
slots = {}
@nodes.each{|n|
slots = slots.merge(n.slots)
}
slots
end
def check_slots_coverage
xputs ">>> Check slots coverage..."
slots = covered_slots
if slots.length == ClusterHashSlots
xputs "[OK] All #{ClusterHashSlots} slots covered."
else
cluster_error \
"[ERR] Not all #{ClusterHashSlots} slots are covered by nodes."
fix_slots_coverage if @fix
end
end
def check_open_slots
xputs ">>> Check for open slots..."
open_slots = []
@nodes.each{|n|
if n.info[:migrating].size > 0
cluster_error \
"[WARNING] Node #{n} has slots in migrating state (#{n.info[:migrating].keys.join(",")})."
open_slots += n.info[:migrating].keys
end
if n.info[:importing].size > 0
cluster_error \
"[WARNING] Node #{n} has slots in importing state (#{n.info[:importing].keys.join(",")})."
open_slots += n.info[:importing].keys
end
}
open_slots.uniq!
if open_slots.length > 0
xputs "[WARNING] The following slots are open: #{open_slots.join(",")}"
end
if @fix
open_slots.each{|slot| fix_open_slot slot}
end
end
def nodes_with_keys_in_slot(slot)
nodes = []
@nodes.each{|n|
next if n.has_flag?("slave")
nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0
}
nodes
end
def fix_slots_coverage
not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys
xputs ">>> Fixing slots coverage..."
xputs "List of not covered slots: " + not_covered.join(",")
# For every slot, take action depending on the actual condition:
# 1) No node has keys for this slot.
# 2) A single node has keys for this slot.
# 3) Multiple nodes have keys for this slot.
slots = {}
not_covered.each{|slot|
nodes = nodes_with_keys_in_slot(slot)
slots[slot] = nodes
xputs "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join(", ")}"
}
none = slots.select {|k,v| v.length == 0}
single = slots.select {|k,v| v.length == 1}
multi = slots.select {|k,v| v.length > 1}
# Handle case "1": keys in no node.
if none.length > 0
xputs "The folowing uncovered slots have no keys across the cluster:"
xputs none.keys.join(",")
yes_or_die "Fix these slots by covering with a random node?"
none.each{|slot,nodes|
node = @nodes.sample
xputs ">>> Covering slot #{slot} with #{node}"
node.r.cluster("addslots",slot)
}
end
# Handle case "2": keys only in one node.
if single.length > 0
xputs "The folowing uncovered slots have keys in just one node:"
puts single.keys.join(",")
yes_or_die "Fix these slots by covering with those nodes?"
single.each{|slot,nodes|
xputs ">>> Covering slot #{slot} with #{nodes[0]}"
nodes[0].r.cluster("addslots",slot)
}
end
# Handle case "3": keys in multiple nodes.
if multi.length > 0
xputs "The folowing uncovered slots have keys in multiple nodes:"
xputs multi.keys.join(",")
yes_or_die "Fix these slots by moving keys into a single node?"
multi.each{|slot,nodes|
target = get_node_with_most_keys_in_slot(nodes,slot)
xputs ">>> Covering slot #{slot} moving keys to #{target}"
target.r.cluster('addslots',slot)
target.r.cluster('setslot',slot,'stable')
nodes.each{|src|
next if src == target
# Set the source node in 'importing' state (even if we will
# actually migrate keys away) in order to avoid receiving
# redirections for MIGRATE.
src.r.cluster('setslot',slot,'importing',target.info[:name])
move_slot(src,target,slot,:dots=>true,:fix=>true,:cold=>true)
src.r.cluster('setslot',slot,'stable')
}
}
end
end
# Return the owner of the specified slot
def get_slot_owners(slot)
owners = []
@nodes.each{|n|
next if n.has_flag?("slave")
n.slots.each{|s,_|
owners << n if s == slot
}
}
owners
end
# Return the node, among 'nodes' with the greatest number of keys
# in the specified slot.
def get_node_with_most_keys_in_slot(nodes,slot)
best = nil
best_numkeys = 0
@nodes.each{|n|
next if n.has_flag?("slave")
numkeys = n.r.cluster("countkeysinslot",slot)
if numkeys > best_numkeys || best == nil
best = n
best_numkeys = numkeys
end
}
return best
end
# Slot 'slot' was found to be in importing or migrating state in one or
# more nodes. This function fixes this condition by migrating keys where
# it seems more sensible.
def fix_open_slot(slot)
puts ">>> Fixing open slot #{slot}"
# Try to obtain the current slot owner, according to the current
# nodes configuration.
owners = get_slot_owners(slot)
owner = owners[0] if owners.length == 1
migrating = []
importing = []
@nodes.each{|n|
next if n.has_flag? "slave"
if n.info[:migrating][slot]
migrating << n
elsif n.info[:importing][slot]
importing << n
elsif n.r.cluster("countkeysinslot",slot) > 0 && n != owner
xputs "*** Found keys about slot #{slot} in node #{n}!"
importing << n
end
}
puts "Set as migrating in: #{migrating.join(",")}"
puts "Set as importing in: #{importing.join(",")}"
# If there is no slot owner, set as owner the slot with the biggest
# number of keys, among the set of migrating / importing nodes.
if !owner
xputs ">>> Nobody claims ownership, selecting an owner..."
owner = get_node_with_most_keys_in_slot(@nodes,slot)
# If we still don't have an owner, we can't fix it.
if !owner
xputs "[ERR] Can't select a slot owner. Impossible to fix."
exit 1
end
# Use ADDSLOTS to assign the slot.
puts "*** Configuring #{owner} as the slot owner"
owner.r.cluster("setslot",slot,"stable")
owner.r.cluster("addslots",slot)
# Make sure this information will propagate. Not strictly needed
# since there is no past owner, so all the other nodes will accept
# whatever epoch this node will claim the slot with.
owner.r.cluster("bumpepoch")
# Remove the owner from the list of migrating/importing
# nodes.
migrating.delete(owner)
importing.delete(owner)
end
# If there are multiple owners of the slot, we need to fix it
# so that a single node is the owner and all the other nodes
# are in importing state. Later the fix can be handled by one
# of the base cases above.
#
# Note that this case also covers multiple nodes having the slot
# in migrating state, since migrating is a valid state only for
# slot owners.
if owners.length > 1
owner = get_node_with_most_keys_in_slot(owners,slot)
owners.each{|n|
next if n == owner
n.r.cluster('delslots',slot)
n.r.cluster('setslot',slot,'importing',owner.info[:name])
importing.delete(n) # Avoid duplciates
importing << n
}
owner.r.cluster('bumpepoch')
end
# Case 1: The slot is in migrating state in one slot, and in
# importing state in 1 slot. That's trivial to address.
if migrating.length == 1 && importing.length == 1
move_slot(migrating[0],importing[0],slot,:dots=>true,:fix=>true)
# Case 2: There are multiple nodes that claim the slot as importing,
# they probably got keys about the slot after a restart so opened
# the slot. In this case we just move all the keys to the owner
# according to the configuration.
elsif migrating.length == 0 && importing.length > 0
xputs ">>> Moving all the #{slot} slot keys to its owner #{owner}"
importing.each {|node|
next if node == owner
move_slot(node,owner,slot,:dots=>true,:fix=>true,:cold=>true)
xputs ">>> Setting #{slot} as STABLE in #{node}"
node.r.cluster("setslot",slot,"stable")
}
# Case 3: There are no slots claiming to be in importing state, but
# there is a migrating node that actually don't have any key. We
# can just close the slot, probably a reshard interrupted in the middle.
elsif importing.length == 0 && migrating.length == 1 &&
migrating[0].r.cluster("getkeysinslot",slot,10).length == 0
migrating[0].r.cluster("setslot",slot,"stable")
else
xputs "[ERR] Sorry, Redis-trib can't fix this slot yet (work in progress). Slot is set as migrating in #{migrating.join(",")}, as importing in #{importing.join(",")}, owner is #{owner}"
end
end
# Check if all the nodes agree about the cluster configuration
def check_config_consistency
if !is_config_consistent?
cluster_error "[ERR] Nodes don't agree about configuration!"
else
xputs "[OK] All nodes agree about slots configuration."
end
end
def is_config_consistent?
signatures=[]
@nodes.each{|n|
signatures << n.get_config_signature
}
return signatures.uniq.length == 1
end
def wait_cluster_join
print "Waiting for the cluster to join"
while !is_config_consistent?
print "."
STDOUT.flush
sleep 1
end
print "\n"
end
def alloc_slots
nodes_count = @nodes.length
masters_count = @nodes.length / (@replicas+1)
masters = []
# The first step is to split instances by IP. This is useful as
# we'll try to allocate master nodes in different physical machines
# (as much as possible) and to allocate slaves of a given master in
# different physical machines as well.
#
# This code assumes just that if the IP is different, than it is more
# likely that the instance is running in a different physical host
# or at least a different virtual machine.
ips = {}
@nodes.each{|n|
ips[n.info[:host]] = [] if !ips[n.info[:host]]
ips[n.info[:host]] << n
}
# Select master instances
puts "Using #{masters_count} masters:"
interleaved = []
stop = false
while not stop do
# Take one node from each IP until we run out of nodes
# across every IP.
ips.each do |ip,nodes|
if nodes.empty?
# if this IP has no remaining nodes, check for termination
if interleaved.length == nodes_count
# stop when 'interleaved' has accumulated all nodes
stop = true
next
end
else
# else, move one node from this IP to 'interleaved'
interleaved.push nodes.shift
end
end
end
masters = interleaved.slice!(0, masters_count)
nodes_count -= masters.length
masters.each{|m| puts m}
# Alloc slots on masters
slots_per_node = ClusterHashSlots.to_f / masters_count
first = 0
cursor = 0.0
masters.each_with_index{|n,masternum|
last = (cursor+slots_per_node-1).round
if last > ClusterHashSlots || masternum == masters.length-1
last = ClusterHashSlots-1
end
last = first if last < first # Min step is 1.
n.add_slots first..last
first = last+1
cursor += slots_per_node
}
# Select N replicas for every master.
# We try to split the replicas among all the IPs with spare nodes
# trying to avoid the host where the master is running, if possible.
#
# Note we loop two times. The first loop assigns the requested
# number of replicas to each master. The second loop assigns any
# remaining instances as extra replicas to masters. Some masters
# may end up with more than their requested number of replicas, but
# all nodes will be used.
assignment_verbose = false
[:requested,:unused].each do |assign|
masters.each do |m|
assigned_replicas = 0
while assigned_replicas < @replicas
break if nodes_count == 0
if assignment_verbose
if assign == :requested
puts "Requesting total of #{@replicas} replicas " \
"(#{assigned_replicas} replicas assigned " \
"so far with #{nodes_count} total remaining)."
elsif assign == :unused
puts "Assigning extra instance to replication " \
"role too (#{nodes_count} remaining)."
end
end
# Return the first node not matching our current master
node = interleaved.find{|n| n.info[:host] != m.info[:host]}
# If we found a node, use it as a best-first match.
# Otherwise, we didn't find a node on a different IP, so we
# go ahead and use a same-IP replica.
if node
slave = node
interleaved.delete node
else
slave = interleaved.shift
end
slave.set_as_replica(m.info[:name])
nodes_count -= 1
assigned_replicas += 1
puts "Adding replica #{slave} to #{m}"
# If we are in the "assign extra nodes" loop,
# we want to assign one extra replica to each
# master before repeating masters.
# This break lets us assign extra replicas to masters
# in a round-robin way.
break if assign == :unused
end
end
end
end
def flush_nodes_config
@nodes.each{|n|
n.flush_node_config
}
end
def show_nodes
@nodes.each{|n|
xputs n.info_string
}
end
# Redis Cluster config epoch collision resolution code is able to eventually
# set a different epoch to each node after a new cluster is created, but
# it is slow compared to assign a progressive config epoch to each node
# before joining the cluster. However we do just a best-effort try here
# since if we fail is not a problem.
def assign_config_epoch
config_epoch = 1
@nodes.each{|n|
begin
n.r.cluster("set-config-epoch",config_epoch)
rescue
end
config_epoch += 1
}
end
def join_cluster
# We use a brute force approach to make sure the node will meet
# each other, that is, sending CLUSTER MEET messages to all the nodes
# about the very same node.
# Thanks to gossip this information should propagate across all the
# cluster in a matter of seconds.
first = false
@nodes.each{|n|
if !first then first = n.info; next; end # Skip the first node
n.r.cluster("meet",first[:host],first[:port])
}
end
def yes_or_die(msg)
print "#{msg} (type 'yes' to accept): "
STDOUT.flush
if !(STDIN.gets.chomp.downcase == "yes")
xputs "*** Aborting..."
exit 1
end
end
def load_cluster_info_from_node(nodeaddr)
node = ClusterNode.new(nodeaddr)
node.connect(:abort => true)
node.assert_cluster
node.load_info(:getfriends => true)
add_node(node)
node.friends.each{|f|
next if f[:flags].index("noaddr") ||
f[:flags].index("disconnected") ||
f[:flags].index("fail")
fnode = ClusterNode.new(f[:addr])
fnode.connect()
next if !fnode.r
begin
fnode.load_info()
add_node(fnode)
rescue => e
xputs "[ERR] Unable to load info for node #{fnode}"
end
}
populate_nodes_replicas_info
end
# This function is called by load_cluster_info_from_node in order to
# add additional information to every node as a list of replicas.
def populate_nodes_replicas_info
# Start adding the new field to every node.
@nodes.each{|n|
n.info[:replicas] = []
}
# Populate the replicas field using the replicate field of slave
# nodes.
@nodes.each{|n|
if n.info[:replicate]
master = get_node_by_name(n.info[:replicate])
if !master
xputs "*** WARNING: #{n} claims to be slave of unknown node ID #{n.info[:replicate]}."
else
master.info[:replicas] << n
end
end
}
end
# Given a list of source nodes return a "resharding plan"
# with what slots to move in order to move "numslots" slots to another
# instance.
def compute_reshard_table(sources,numslots)
moved = []
# Sort from bigger to smaller instance, for two reasons:
# 1) If we take less slots than instances it is better to start
# getting from the biggest instances.
# 2) We take one slot more from the first instance in the case of not
# perfect divisibility. Like we have 3 nodes and need to get 10
# slots, we take 4 from the first, and 3 from the rest. So the
# biggest is always the first.
sources = sources.sort{|a,b| b.slots.length <=> a.slots.length}
source_tot_slots = sources.inject(0) {|sum,source|
sum+source.slots.length
}
sources.each_with_index{|s,i|
# Every node will provide a number of slots proportional to the
# slots it has assigned.
n = (numslots.to_f/source_tot_slots*s.slots.length)
if i == 0
n = n.ceil
else
n = n.floor
end
s.slots.keys.sort[(0...n)].each{|slot|
if moved.length < numslots
moved << {:source => s, :slot => slot}
end
}
}
return moved
end
def show_reshard_table(table)
table.each{|e|
puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}"
}
end
# Move slots between source and target nodes using MIGRATE.
#
# Options:
# :verbose -- Print a dot for every moved key.
# :fix -- We are moving in the context of a fix. Use REPLACE.
# :cold -- Move keys without opening slots / reconfiguring the nodes.
# :update -- Update nodes.info[:slots] for source/target nodes.
# :quiet -- Don't print info messages.
def move_slot(source,target,slot,o={})
o = {:pipeline => MigrateDefaultPipeline}.merge(o)
# We start marking the slot as importing in the destination node,
# and the slot as migrating in the target host. Note that the order of
# the operations is important, as otherwise a client may be redirected
# to the target node that does not yet know it is importing this slot.
if !o[:quiet]
print "Moving slot #{slot} from #{source} to #{target}: "
STDOUT.flush
end
if !o[:cold]
target.r.cluster("setslot",slot,"importing",source.info[:name])
source.r.cluster("setslot",slot,"migrating",target.info[:name])
end
# Migrate all the keys from source to target using the MIGRATE command
while true
keys = source.r.cluster("getkeysinslot",slot,o[:pipeline])
break if keys.length == 0
begin
source.r.client.call(["migrate",target.info[:host],target.info[:port],"",0,@timeout,:keys,*keys])
rescue => e
if o[:fix] && e.to_s =~ /BUSYKEY/
xputs "*** Target key exists. Replacing it for FIX."
source.r.client.call(["migrate",target.info[:host],target.info[:port],"",0,@timeout,:replace,:keys,*keys])
else
puts ""
xputs "[ERR] Calling MIGRATE: #{e}"
exit 1
end
end
print "."*keys.length if o[:dots]
STDOUT.flush
end
puts if !o[:quiet]
# Set the new node as the owner of the slot in all the known nodes.
if !o[:cold]
@nodes.each{|n|
next if n.has_flag?("slave")
n.r.cluster("setslot",slot,"node",target.info[:name])
}
end
# Update the node logical config
if o[:update] then
source.info[:slots].delete(slot)
target.info[:slots][slot] = true
end
end
# redis-trib subcommands implementations.
def check_cluster_cmd(argv,opt)
load_cluster_info_from_node(argv[0])
check_cluster
end
def info_cluster_cmd(argv,opt)
load_cluster_info_from_node(argv[0])
show_cluster_info
end
def rebalance_cluster_cmd(argv,opt)
opt = {
'pipeline' => MigrateDefaultPipeline,
'threshold' => RebalanceDefaultThreshold
}.merge(opt)
# Load nodes info before parsing options, otherwise we can't
# handle --weight.
load_cluster_info_from_node(argv[0])
# Options parsing
threshold = opt['threshold'].to_i
autoweights = opt['auto-weights']
weights = {}
opt['weight'].each{|w|
fields = w.split("=")
node = get_node_by_abbreviated_name(fields[0])
if !node || !node.has_flag?("master")
puts "*** No such master node #{fields[0]}"
exit 1
end
weights[node.info[:name]] = fields[1].to_f
} if opt['weight']
useempty = opt['use-empty-masters']
# Assign a weight to each node, and compute the total cluster weight.
total_weight = 0
nodes_involved = 0
@nodes.each{|n|
if n.has_flag?("master")
next if !useempty && n.slots.length == 0
n.info[:w] = weights[n.info[:name]] ? weights[n.info[:name]] : 1
total_weight += n.info[:w]
nodes_involved += 1
end
}
# Check cluster, only proceed if it looks sane.
check_cluster(:quiet => true)
if @errors.length != 0
puts "*** Please fix your cluster problems before rebalancing"
exit 1
end
# Calculate the slots balance for each node. It's the number of
# slots the node should lose (if positive) or gain (if negative)
# in order to be balanced.
threshold = opt['threshold'].to_f
threshold_reached = false
@nodes.each{|n|
if n.has_flag?("master")
next if !n.info[:w]
expected = ((ClusterHashSlots.to_f / total_weight) *
n.info[:w]).to_i
n.info[:balance] = n.slots.length - expected
# Compute the percentage of difference between the
# expected number of slots and the real one, to see
# if it's over the threshold specified by the user.
over_threshold = false
if threshold > 0
if n.slots.length > 0
err_perc = (100-(100.0*expected/n.slots.length)).abs
over_threshold = true if err_perc > threshold
elsif expected > 0
over_threshold = true
end
end
threshold_reached = true if over_threshold
end
}
if !threshold_reached
xputs "*** No rebalancing needed! All nodes are within the #{threshold}% threshold."
return
end
# Only consider nodes we want to change
sn = @nodes.select{|n|
n.has_flag?("master") && n.info[:w]
}
# Because of rounding, it is possible that the balance of all nodes
# summed does not give 0. Make sure that nodes that have to provide
# slots are always matched by nodes receiving slots.
total_balance = sn.map{|x| x.info[:balance]}.reduce{|a,b| a+b}
while total_balance > 0
sn.each{|n|
if n.info[:balance] < 0 && total_balance > 0
n.info[:balance] -= 1
total_balance -= 1
end
}
end
# Sort nodes by their slots balance.
sn = sn.sort{|a,b|
a.info[:balance] <=> b.info[:balance]
}
xputs ">>> Rebalancing across #{nodes_involved} nodes. Total weight = #{total_weight}"
if $verbose
sn.each{|n|
puts "#{n} balance is #{n.info[:balance]} slots"
}
end
# Now we have at the start of the 'sn' array nodes that should get
# slots, at the end nodes that must give slots.
# We take two indexes, one at the start, and one at the end,
# incrementing or decrementing the indexes accordingly til we
# find nodes that need to get/provide slots.
dst_idx = 0
src_idx = sn.length - 1
while dst_idx < src_idx
dst = sn[dst_idx]
src = sn[src_idx]
numslots = [dst.info[:balance],src.info[:balance]].map{|n|
n.abs
}.min
if numslots > 0
puts "Moving #{numslots} slots from #{src} to #{dst}"
# Actaully move the slots.
reshard_table = compute_reshard_table([src],numslots)
if reshard_table.length != numslots
xputs "*** Assertio failed: Reshard table != number of slots"
exit 1
end
if opt['simulate']
print "#"*reshard_table.length
else
reshard_table.each{|e|
move_slot(e[:source],dst,e[:slot],
:quiet=>true,
:dots=>false,
:update=>true,
:pipeline=>opt['pipeline'])
print "#"
STDOUT.flush
}
end
puts
end
# Update nodes balance.
dst.info[:balance] += numslots
src.info[:balance] -= numslots
dst_idx += 1 if dst.info[:balance] == 0
src_idx -= 1 if src.info[:balance] == 0
end
end
def fix_cluster_cmd(argv,opt)
@fix = true
@timeout = opt['timeout'].to_i if opt['timeout']
load_cluster_info_from_node(argv[0])
check_cluster
end
def reshard_cluster_cmd(argv,opt)
opt = {'pipeline' => MigrateDefaultPipeline}.merge(opt)
load_cluster_info_from_node(argv[0])
check_cluster
if @errors.length != 0
puts "*** Please fix your cluster problems before resharding"
exit 1
end
@timeout = opt['timeout'].to_i if opt['timeout'].to_i
# Get number of slots
if opt['slots']
numslots = opt['slots'].to_i
else
numslots = 0
while numslots <= 0 or numslots > ClusterHashSlots
print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? "
numslots = STDIN.gets.to_i
end
end
# Get the target instance
if opt['to']
target = get_node_by_name(opt['to'])
if !target || target.has_flag?("slave")
xputs "*** The specified node is not known or not a master, please retry."
exit 1
end
else
target = nil
while not target
print "What is the receiving node ID? "
target = get_node_by_name(STDIN.gets.chop)
if !target || target.has_flag?("slave")
xputs "*** The specified node is not known or not a master, please retry."
target = nil
end
end
end
# Get the source instances
sources = []
if opt['from']
opt['from'].split(',').each{|node_id|
if node_id == "all"
sources = "all"
break
end
src = get_node_by_name(node_id)
if !src || src.has_flag?("slave")
xputs "*** The specified node is not known or is not a master, please retry."
exit 1
end
sources << src
}
else
xputs "Please enter all the source node IDs."
xputs " Type 'all' to use all the nodes as source nodes for the hash slots."
xputs " Type 'done' once you entered all the source nodes IDs."
while true
print "Source node ##{sources.length+1}:"
line = STDIN.gets.chop
src = get_node_by_name(line)
if line == "done"
break
elsif line == "all"
sources = "all"
break
elsif !src || src.has_flag?("slave")
xputs "*** The specified node is not known or is not a master, please retry."
elsif src.info[:name] == target.info[:name]
xputs "*** It is not possible to use the target node as source node."
else
sources << src
end
end
end
if sources.length == 0
puts "*** No source nodes given, operation aborted"
exit 1
end
# Handle soures == all.
if sources == "all"
sources = []
@nodes.each{|n|
next if n.info[:name] == target.info[:name]
next if n.has_flag?("slave")
sources << n
}
end
# Check if the destination node is the same of any source nodes.
if sources.index(target)
xputs "*** Target node is also listed among the source nodes!"
exit 1
end
puts "\nReady to move #{numslots} slots."
puts " Source nodes:"
sources.each{|s| puts " "+s.info_string}
puts " Destination node:"
puts " #{target.info_string}"
reshard_table = compute_reshard_table(sources,numslots)
puts " Resharding plan:"
show_reshard_table(reshard_table)
if !opt['yes']
print "Do you want to proceed with the proposed reshard plan (yes/no)? "
yesno = STDIN.gets.chop
exit(1) if (yesno != "yes")
end
reshard_table.each{|e|
move_slot(e[:source],target,e[:slot],
:dots=>true,
:pipeline=>opt['pipeline'])
}
end
# This is an helper function for create_cluster_cmd that verifies if
# the number of nodes and the specified replicas have a valid configuration
# where there are at least three master nodes and enough replicas per node.
def check_create_parameters
masters = @nodes.length/(@replicas+1)
if masters < 3
puts "*** ERROR: Invalid configuration for cluster creation."
puts "*** Redis Cluster requires at least 3 master nodes."
puts "*** This is not possible with #{@nodes.length} nodes and #{@replicas} replicas per node."
puts "*** At least #{3*(@replicas+1)} nodes are required."
exit 1
end
end
def create_cluster_cmd(argv,opt)
opt = {'replicas' => 0}.merge(opt)
@replicas = opt['replicas'].to_i
xputs ">>> Creating cluster"
argv[0..-1].each{|n|
node = ClusterNode.new(n)
node.connect(:abort => true)
node.assert_cluster
node.load_info
node.assert_empty
add_node(node)
}
check_create_parameters
xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..."
alloc_slots
show_nodes
yes_or_die "Can I set the above configuration?"
flush_nodes_config
xputs ">>> Nodes configuration updated"
xputs ">>> Assign a different config epoch to each node"
assign_config_epoch
xputs ">>> Sending CLUSTER MEET messages to join the cluster"
join_cluster
# Give one second for the join to start, in order to avoid that
# wait_cluster_join will find all the nodes agree about the config as
# they are still empty with unassigned slots.
sleep 1
wait_cluster_join
flush_nodes_config # Useful for the replicas
check_cluster
end
def addnode_cluster_cmd(argv,opt)
xputs ">>> Adding node #{argv[0]} to cluster #{argv[1]}"
# Check the existing cluster
load_cluster_info_from_node(argv[1])
check_cluster
# If --master-id was specified, try to resolve it now so that we
# abort before starting with the node configuration.
if opt['slave']
if opt['master-id']
master = get_node_by_name(opt['master-id'])
if !master
xputs "[ERR] No such master ID #{opt['master-id']}"
end
else
master = get_master_with_least_replicas
xputs "Automatically selected master #{master}"
end
end
# Add the new node
new = ClusterNode.new(argv[0])
new.connect(:abort => true)
new.assert_cluster
new.load_info
new.assert_empty
first = @nodes.first.info
add_node(new)
# Send CLUSTER MEET command to the new node
xputs ">>> Send CLUSTER MEET to node #{new} to make it join the cluster."
new.r.cluster("meet",first[:host],first[:port])
# Additional configuration is needed if the node is added as
# a slave.
if opt['slave']
wait_cluster_join
xputs ">>> Configure node as replica of #{master}."
new.r.cluster("replicate",master.info[:name])
end
xputs "[OK] New node added correctly."
end
def delnode_cluster_cmd(argv,opt)
id = argv[1].downcase
xputs ">>> Removing node #{id} from cluster #{argv[0]}"
# Load cluster information
load_cluster_info_from_node(argv[0])
# Check if the node exists and is not empty
node = get_node_by_name(id)
if !node
xputs "[ERR] No such node ID #{id}"
exit 1
end
if node.slots.length != 0
xputs "[ERR] Node #{node} is not empty! Reshard data away and try again."
exit 1
end
# Send CLUSTER FORGET to all the nodes but the node to remove
xputs ">>> Sending CLUSTER FORGET messages to the cluster..."
@nodes.each{|n|
next if n == node
if n.info[:replicate] && n.info[:replicate].downcase == id
# Reconfigure the slave to replicate with some other node
master = get_master_with_least_replicas
xputs ">>> #{n} as replica of #{master}"
n.r.cluster("replicate",master.info[:name])
end
n.r.cluster("forget",argv[1])
}
# Finally shutdown the node
xputs ">>> SHUTDOWN the node."
node.r.shutdown
end
def set_timeout_cluster_cmd(argv,opt)
timeout = argv[1].to_i
if timeout < 100
puts "Setting a node timeout of less than 100 milliseconds is a bad idea."
exit 1
end
# Load cluster information
load_cluster_info_from_node(argv[0])
ok_count = 0
err_count = 0
# Send CLUSTER FORGET to all the nodes but the node to remove
xputs ">>> Reconfiguring node timeout in every cluster node..."
@nodes.each{|n|
begin
n.r.config("set","cluster-node-timeout",timeout)
n.r.config("rewrite")
ok_count += 1
xputs "*** New timeout set for #{n}"
rescue => e
puts "ERR setting node-timeot for #{n}: #{e}"
err_count += 1
end
}
xputs ">>> New node timeout set. #{ok_count} OK, #{err_count} ERR."
end
def call_cluster_cmd(argv,opt)
cmd = argv[1..-1]
cmd[0] = cmd[0].upcase
# Load cluster information
load_cluster_info_from_node(argv[0])
xputs ">>> Calling #{cmd.join(" ")}"
@nodes.each{|n|
begin
res = n.r.send(*cmd)
puts "#{n}: #{res}"
rescue => e
puts "#{n}: #{e}"
end
}
end
def import_cluster_cmd(argv,opt)
source_addr = opt['from']
xputs ">>> Importing data from #{source_addr} to cluster #{argv[1]}"
use_copy = opt['copy']
use_replace = opt['replace']
# Check the existing cluster.
load_cluster_info_from_node(argv[0])
check_cluster
# Connect to the source node.
xputs ">>> Connecting to the source Redis instance"
src_host,src_port = source_addr.split(":")
source = Redis.new(:host =>src_host, :port =>src_port)
if source.info['cluster_enabled'].to_i == 1
xputs "[ERR] The source node should not be a cluster node."
end
xputs "*** Importing #{source.dbsize} keys from DB 0"
# Build a slot -> node map
slots = {}
@nodes.each{|n|
n.slots.each{|s,_|
slots[s] = n
}
}
# Use SCAN to iterate over the keys, migrating to the
# right node as needed.
cursor = nil
while cursor != 0
cursor,keys = source.scan(cursor, :count => 1000)
cursor = cursor.to_i
keys.each{|k|
# Migrate keys using the MIGRATE command.
slot = key_to_slot(k)
target = slots[slot]
print "Migrating #{k} to #{target}: "
STDOUT.flush
begin
cmd = ["migrate",target.info[:host],target.info[:port],k,0,@timeout]
cmd << :copy if use_copy
cmd << :replace if use_replace
source.client.call(cmd)
rescue => e
puts e
else
puts "OK"
end
}
end
end
def help_cluster_cmd(argv,opt)
show_help
exit 0
end
# Parse the options for the specific command "cmd".
# Returns an hash populate with option => value pairs, and the index of
# the first non-option argument in ARGV.
def parse_options(cmd)
idx = 1 ; # Current index into ARGV
options={}
while idx < ARGV.length && ARGV[idx][0..1] == '--'
if ARGV[idx][0..1] == "--"
option = ARGV[idx][2..-1]
idx += 1
# --verbose is a global option
if option == "verbose"
$verbose = true
next
end
if ALLOWED_OPTIONS[cmd] == nil || ALLOWED_OPTIONS[cmd][option] == nil
puts "Unknown option '#{option}' for command '#{cmd}'"
exit 1
end
if ALLOWED_OPTIONS[cmd][option] != false
value = ARGV[idx]
idx += 1
else
value = true
end
# If the option is set to [], it's a multiple arguments
# option. We just queue every new value into an array.
if ALLOWED_OPTIONS[cmd][option] == []
options[option] = [] if !options[option]
options[option] << value
else
options[option] = value
end
else
# Remaining arguments are not options.
break
end
end
# Enforce mandatory options
if ALLOWED_OPTIONS[cmd]
ALLOWED_OPTIONS[cmd].each {|option,val|
if !options[option] && val == :required
puts "Option '--#{option}' is required "+ \
"for subcommand '#{cmd}'"
exit 1
end
}
end
return options,idx
end
end
#################################################################################
# Libraries
#
# We try to don't depend on external libs since this is a critical part
# of Redis Cluster.
#################################################################################
# This is the CRC16 algorithm used by Redis Cluster to hash keys.
# Implementation according to CCITT standards.
#
# This is actually the XMODEM CRC 16 algorithm, using the
# following parameters:
#
# Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN"
# Width : 16 bit
# Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1)
# Initialization : 0000
# Reflect Input byte : False
# Reflect Output CRC : False
# Xor constant to output CRC : 0000
# Output for "123456789" : 31C3
module RedisClusterCRC16
def RedisClusterCRC16.crc16(bytes)
crc = 0
bytes.each_byte{|b|
crc = ((crc<<8) & 0xffff) ^ XMODEMCRC16Lookup[((crc>>8)^b) & 0xff]
}
crc
end
private
XMODEMCRC16Lookup = [
0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7,
0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef,
0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6,
0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de,
0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485,
0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d,
0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4,
0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc,
0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823,
0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b,
0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12,
0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a,
0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41,
0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49,
0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70,
0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78,
0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f,
0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067,
0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e,
0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256,
0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d,
0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405,
0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c,
0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634,
0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab,
0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3,
0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a,
0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92,
0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9,
0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1,
0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8,
0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0
]
end
# Turn a key name into the corrisponding Redis Cluster slot.
def key_to_slot(key)
# Only hash what is inside {...} if there is such a pattern in the key.
# Note that the specification requires the content that is between
# the first { and the first } after the first {. If we found {} without
# nothing in the middle, the whole key is hashed as usually.
s = key.index "{"
if s
e = key.index "}",s+1
if e && e != s+1
key = key[s+1..e-1]
end
end
RedisClusterCRC16.crc16(key) % 16384
end
#################################################################################
# Definition of commands
#################################################################################
COMMANDS={
"create" => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"],
"check" => ["check_cluster_cmd", 2, "host:port"],
"info" => ["info_cluster_cmd", 2, "host:port"],
"fix" => ["fix_cluster_cmd", 2, "host:port"],
"reshard" => ["reshard_cluster_cmd", 2, "host:port"],
"rebalance" => ["rebalance_cluster_cmd", -2, "host:port"],
"add-node" => ["addnode_cluster_cmd", 3, "new_host:new_port existing_host:existing_port"],
"del-node" => ["delnode_cluster_cmd", 3, "host:port node_id"],
"set-timeout" => ["set_timeout_cluster_cmd", 3, "host:port milliseconds"],
"call" => ["call_cluster_cmd", -3, "host:port command arg arg .. arg"],
"import" => ["import_cluster_cmd", 2, "host:port"],
"help" => ["help_cluster_cmd", 1, "(show this help)"]
}
ALLOWED_OPTIONS={
"create" => {"replicas" => true},
"add-node" => {"slave" => false, "master-id" => true},
"import" => {"from" => :required, "copy" => false, "replace" => false},
"reshard" => {"from" => true, "to" => true, "slots" => true, "yes" => false, "timeout" => true, "pipeline" => true},
"rebalance" => {"weight" => [], "auto-weights" => false, "use-empty-masters" => false, "timeout" => true, "simulate" => false, "pipeline" => true, "threshold" => true},
"fix" => {"timeout" => MigrateDefaultTimeout},
}
def show_help
puts "Usage: redis-trib <command> <options> <arguments ...>\n\n"
COMMANDS.each{|k,v|
o = ""
puts " #{k.ljust(15)} #{v[2]}"
if ALLOWED_OPTIONS[k]
ALLOWED_OPTIONS[k].each{|optname,has_arg|
puts " --#{optname}" + (has_arg ? " <arg>" : "")
}
end
}
puts "\nFor check, fix, reshard, del-node, set-timeout you can specify the host and port of any working node in the cluster.\n"
end
# Sanity check
if ARGV.length == 0
show_help
exit 1
end
rt = RedisTrib.new
cmd_spec = COMMANDS[ARGV[0].downcase]
if !cmd_spec
puts "Unknown redis-trib subcommand '#{ARGV[0]}'"
exit 1
end
# Parse options
cmd_options,first_non_option = rt.parse_options(ARGV[0].downcase)
rt.check_arity(cmd_spec[1],ARGV.length-(first_non_option-1))
# Dispatch
rt.send(cmd_spec[0],ARGV[first_non_option..-1],cmd_options)

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 14
VisualStudioVersion = 14.0.25420.1
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "StackExchange.Redis", "StackExchange.Redis\StackExchange.Redis.xproj", "{EF84877F-59BE-41BE-9013-E765AF0BB72E}"
EndProject
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "StackExchange.Redis.StrongName", "StackExchange.Redis.StrongName\StackExchange.Redis.StrongName.xproj", "{46754D2A-AC16-4686-B113-3DB08ACF4269}"
EndProject
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "StackExchange.Redis.Tests", "StackExchange.Redis.Tests\StackExchange.Redis.Tests.xproj", "{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{3AD17044-6BFF-4750-9AC2-2CA466375F2A}"
ProjectSection(SolutionItems) = preProject
global.json = global.json
NuGet.Config = NuGet.Config
EndProjectSection
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Redis Configs", "Redis Configs", "{96E891CD-2ED7-4293-A7AB-4C6F5D8D2B05}"
ProjectSection(SolutionItems) = preProject
Redis Configs\master.conf = Redis Configs\master.conf
Redis Configs\redis-cli 7000.cmd = Redis Configs\redis-cli 7000.cmd
Redis Configs\redis-cli 7001.cmd = Redis Configs\redis-cli 7001.cmd
Redis Configs\redis-cli 7002.cmd = Redis Configs\redis-cli 7002.cmd
Redis Configs\redis-cli 7003.cmd = Redis Configs\redis-cli 7003.cmd
Redis Configs\redis-cli 7004.cmd = Redis Configs\redis-cli 7004.cmd
Redis Configs\redis-cli 7005.cmd = Redis Configs\redis-cli 7005.cmd
Redis Configs\redis-cli master.cmd = Redis Configs\redis-cli master.cmd
Redis Configs\redis-cli secure.cmd = Redis Configs\redis-cli secure.cmd
Redis Configs\redis-cli slave.cmd = Redis Configs\redis-cli slave.cmd
Redis Configs\redis-server all local.cmd = Redis Configs\redis-server all local.cmd
Redis Configs\redis-server master.cmd = Redis Configs\redis-server master.cmd
Redis Configs\redis-server secure.cmd = Redis Configs\redis-server secure.cmd
Redis Configs\redis-server slave.cmd = Redis Configs\redis-server slave.cmd
Redis Configs\secure.conf = Redis Configs\secure.conf
Redis Configs\slave.conf = Redis Configs\slave.conf
EndProjectSection
EndProject
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "BasicTest", "BasicTest\BasicTest.xproj", "{939FA5F7-16AA-4847-812B-6EBC3748A86D}"
EndProject
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "MigratedBookSleeveTestSuite", "MigratedBookSleeveTestSuite\MigratedBookSleeveTestSuite.xproj", "{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Docs", "Docs\Docs.csproj", "{7909952C-0F38-4E62-A7BA-1A77E1452FDA}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Log Output|Any CPU = Log Output|Any CPU
Mono|Any CPU = Mono|Any CPU
Release|Any CPU = Release|Any CPU
Verbose|Any CPU = Verbose|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Log Output|Any CPU.ActiveCfg = Release|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Log Output|Any CPU.Build.0 = Release|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Mono|Any CPU.ActiveCfg = Release|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Mono|Any CPU.Build.0 = Release|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Release|Any CPU.ActiveCfg = Release|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Release|Any CPU.Build.0 = Release|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Verbose|Any CPU.ActiveCfg = Release|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Verbose|Any CPU.Build.0 = Release|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Debug|Any CPU.Build.0 = Debug|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Log Output|Any CPU.ActiveCfg = Release|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Log Output|Any CPU.Build.0 = Release|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Mono|Any CPU.ActiveCfg = Release|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Mono|Any CPU.Build.0 = Release|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Release|Any CPU.ActiveCfg = Release|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Release|Any CPU.Build.0 = Release|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Verbose|Any CPU.ActiveCfg = Release|Any CPU
{46754D2A-AC16-4686-B113-3DB08ACF4269}.Verbose|Any CPU.Build.0 = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Log Output|Any CPU.ActiveCfg = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Log Output|Any CPU.Build.0 = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Mono|Any CPU.ActiveCfg = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Mono|Any CPU.Build.0 = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Release|Any CPU.Build.0 = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Verbose|Any CPU.ActiveCfg = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Verbose|Any CPU.Build.0 = Release|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Log Output|Any CPU.ActiveCfg = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Log Output|Any CPU.Build.0 = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Mono|Any CPU.ActiveCfg = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Mono|Any CPU.Build.0 = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Release|Any CPU.Build.0 = Release|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Verbose|Any CPU.ActiveCfg = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Verbose|Any CPU.Build.0 = Debug|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Debug|Any CPU.Build.0 = Debug|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Log Output|Any CPU.ActiveCfg = Debug|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Log Output|Any CPU.Build.0 = Debug|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Mono|Any CPU.ActiveCfg = Debug|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Mono|Any CPU.Build.0 = Debug|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Release|Any CPU.ActiveCfg = Release|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Release|Any CPU.Build.0 = Release|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Verbose|Any CPU.ActiveCfg = Debug|Any CPU
{0FA2F7C5-1D36-40C4-82D1-93DBF43765D7}.Verbose|Any CPU.Build.0 = Debug|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Debug|Any CPU.Build.0 = Debug|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Log Output|Any CPU.ActiveCfg = Mono|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Log Output|Any CPU.Build.0 = Mono|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Mono|Any CPU.ActiveCfg = Mono|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Mono|Any CPU.Build.0 = Mono|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Release|Any CPU.ActiveCfg = Release|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Release|Any CPU.Build.0 = Release|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Verbose|Any CPU.ActiveCfg = Mono|Any CPU
{7909952C-0F38-4E62-A7BA-1A77E1452FDA}.Verbose|Any CPU.Build.0 = Mono|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal
......@@ -13,7 +13,7 @@
}
},
"title": "StackExchange.Redis.StrongName",
"version": "1.1.604-*",
"version": "1.1.607-*",
"description": "High performance Redis client, incorporating both synchronous and asynchronous usage.",
"authors": [ "Stack Exchange inc., marc.gravell" ],
"copyright": "Stack Exchange inc. 2014-",
......@@ -43,7 +43,7 @@
"Microsoft.Bcl.Async": "1.0.168"
},
"buildOptions": {
"define": [ "FEATURE_SERIALIZATION" ]
"define": [ "FEATURE_SERIALIZATION", "FEATURE_SOCKET_MODE_POLL" ]
}
},
"net45": {
......@@ -51,34 +51,44 @@
"System.IO.Compression": "4.0.0.0"
},
"buildOptions": {
"define": [ "FEATURE_SERIALIZATION" ]
"define": [ "FEATURE_SERIALIZATION", "FEATURE_SOCKET_MODE_POLL" ]
}
},
"net46": {
"frameworkAssemblies": {
"System.IO.Compression": "4.0.0.0"
},
"define": [ "FEATURE_SERIALIZATION", "PLAT_SAFE_CONTINUATIONS" ]
"buildOptions": {
"define": [ "FEATURE_SERIALIZATION", "FEATURE_SOCKET_MODE_POLL" ]
}
},
"netstandard1.5": {
"buildOptions": {
"define": [ "PLAT_SAFE_CONTINUATIONS", "CORE_CLR" ]
"define": [ "CORE_CLR" ]
},
"dependencies": {
"NETStandard.Library": "1.5.0-rc2-24027",
"System.Collections.NonGeneric": "4.0.1-rc2-24027",
"System.IO.FileSystem": "4.0.1-rc2-24027",
"System.Net.NameResolution": "4.0.0-rc2-24027",
"System.Net.Security": "4.0.0-rc2-24027",
"System.Net.Sockets": "4.1.0-rc2-24027",
"System.Reflection.Emit": "4.0.1-rc2-24027",
"System.Reflection.Emit.Lightweight": "4.0.1-rc2-24027",
"System.Reflection.TypeExtensions": "4.1.0-rc2-24027",
"System.Security.Cryptography.Algorithms": "4.1.0-rc2-24027",
"System.Security.Cryptography.X509Certificates": "4.1.0-rc2-24027",
"System.Threading.Thread": "4.0.0-rc2-24027",
"System.Threading.ThreadPool": "4.0.10-rc2-24027",
"System.Threading.Timer": "4.0.1-rc2-24027"
"System.Collections": "4.0.11",
"System.Collections.Concurrent": "4.0.12",
"System.Collections.NonGeneric": "4.0.1",
"System.Diagnostics.Tools": "4.0.1",
"System.IO.Compression": "4.1.0",
"System.IO.FileSystem": "4.0.1",
"System.Linq": "4.1.0",
"System.Net.NameResolution": "4.0.0",
"System.Net.Security": "4.0.0",
"System.Net.Sockets": "4.1.0",
"System.Reflection.Emit": "4.0.1",
"System.Reflection.Emit.Lightweight": "4.0.1",
"System.Reflection.TypeExtensions": "4.1.0",
"System.Runtime.Extensions": "4.1.0",
"System.Runtime.InteropServices.RuntimeInformation": "4.0.0",
"System.Security.Cryptography.Algorithms": "4.2.0",
"System.Security.Cryptography.X509Certificates": "4.1.0",
"System.Text.RegularExpressions": "4.1.0",
"System.Threading": "4.0.11",
"System.Threading.Thread": "4.0.0",
"System.Threading.ThreadPool": "4.0.10",
"System.Threading.Timer": "4.0.1"
}
}
}
......
......@@ -13,17 +13,14 @@ namespace StackExchange.Redis.Tests
public class Cluster : TestBase
{
//private const string ClusterIp = "192.168.0.15"; // marc
private const string ClusterIp = "10.110.11.102"; // kmontrose
//private const string ClusterIp = "10.110.11.102"; // kmontrose
private const string ClusterIp = "127.0.0.1";
private const int ServerCount = 6, FirstPort = 7000;
protected override string GetConfiguration()
{
var server = ClusterIp;
#if !CORE_CLR
if (string.Equals(Environment.MachineName, "MARC-LAPTOP", StringComparison.InvariantCultureIgnoreCase))
#else
if (string.Equals(Environment.GetEnvironmentVariable("COMPUTERNAME"), "MARC-LAPTOP", StringComparison.OrdinalIgnoreCase))
#endif
if (string.Equals(Environment.MachineName, "MARC-LAPTOP", StringComparison.OrdinalIgnoreCase))
{
server = "192.168.56.101";
}
......
......@@ -86,7 +86,7 @@ public void SocketFailureError()
Assert.That(rde.FailureType, Is.EqualTo(ConnectionFailureType.SocketFailure));
}
}
#if DEBUG // needs AllowConnect, which is DEBUG only
[Test]
public void CheckFailureRecovered()
{
......@@ -117,7 +117,7 @@ public void CheckFailureRecovered()
ClearAmbientFailures();
}
}
#endif
[Test]
public void TryGetAzureRoleInstanceIdNoThrow()
{
......
......@@ -25,7 +25,7 @@ public void NullSnapshot()
var ex = ExceptionFactory.NoConnectionAvailable(true, new RedisCommand(), null, null, null);
Assert.Null(ex.InnerException);
}
#if DEBUG // needs debug connection features
[Test]
public void MultipleEndpointsThrowAggregateException()
{
......@@ -108,6 +108,6 @@ public void ServerTakesPrecendenceOverSnapshot()
}
}
#endif
}
}
#if NUNITLITE
#if NUNITLITE && !CORE_CLR
using System;
using System.Reflection;
using NUnit.Common;
......
......@@ -38,15 +38,6 @@ public void ConnectToAzure(int? port, bool ssl)
}
}
private static void GetAzureCredentials(out string name, out string password)
{
var lines = File.ReadAllLines(@"d:\dev\azure.txt");
if (lines == null || lines.Length != 2)
Assert.Inconclusive("azure credentials missing");
name = lines[0];
password = lines[1];
}
[Test]
[TestCase(false, false)]
[TestCase(true, false)]
......
......@@ -14,5 +14,8 @@
<PropertyGroup>
<SchemaVersion>2.0</SchemaVersion>
</PropertyGroup>
<ItemGroup>
<Service Include="{82a7f48d-3b50-4b1e-b82e-3ada8210c358}" />
</ItemGroup>
<Import Project="$(VSToolsPath)\DotNet\Microsoft.DotNet.targets" Condition="'$(VSToolsPath)' != ''" />
</Project>
\ No newline at end of file
......@@ -168,7 +168,7 @@ protected IServer GetServer(ConnectionMultiplexer muxer)
bool checkConnect = true, bool pause = true, string failMessage = null,
string channelPrefix = null, bool useSharedSocketManager = true, Proxy? proxy = null)
{
if(pause) Thread.Sleep(500); // get a lot of glitches when hammering new socket creations etc; pace it out a bit
if(pause) Thread.Sleep(250); // get a lot of glitches when hammering new socket creations etc; pace it out a bit
string configuration = GetConfiguration();
var config = ConfigurationOptions.Parse(configuration);
if (disabledCommands != null && disabledCommands.Length != 0)
......
......@@ -10,7 +10,7 @@ public class VPNTest : TestBase
[Test]
[MaxTime(100000)]
[TestCase("or-devredis01.ds.stackexchange.com:6379")]
[TestCase("co-devredis01.ds.stackexchange.com:6379")]
public void Execute(string config)
{
for (int i = 0; i < 50; i++)
......
......@@ -5,31 +5,31 @@
"StackExchange.Redis": {
"version": "1.1.*",
"target": "project"
}
},
"commands": {
"run": "StackExchange.Redis.Tests"
},
"buildOptions": {
"define": [ "NUNITLITE" ],
"emitEntryPoint": true
},
"runtimes": {
"win81-x64": {}
"nunit": "3.4.1",
"dotnet-test-nunit": "3.4.0-beta-1"
},
"testRunner": "nunit",
"frameworks": {
"netcoreapp1.0": {
"imports": [ "dnxcore50" ],
"imports": [
"portable-net45+win8"
],
"buildOptions": {
"define": [ "PLAT_SAFE_CONTINUATIONS", "CORE_CLR" ]
"define": [ "CORE_CLR" ]
},
"dependencies": {
"System.Console": "4.0.0-rc2-24027",
"System.Linq.Expressions": "4.0.11-rc2-24027",
"System.Reflection.Extensions": "4.0.1-rc2-24027",
"System.Threading.Tasks.Parallel": "4.0.1-rc2-24027",
"Microsoft.CSharp": "4.0.1-rc2-24027",
"nunitlite": "3.2.1"
"Microsoft.NETCore.App": {
"version": "1.0.0-*",
"type": "platform"
},
"System.Console": "4.0.0",
"System.Diagnostics.Debug": "4.0.11",
"System.Linq.Expressions": "4.1.0",
"System.Reflection.Extensions": "4.0.1",
"System.Runtime.InteropServices": "4.1.0",
"System.Threading.Tasks.Parallel": "4.0.1",
"Microsoft.CSharp": "4.0.1"
}
}
}
......
......@@ -77,7 +77,7 @@
<Reference Include="Microsoft.CSharp" />
</ItemGroup>
<ItemGroup>
<Compile Include="..\StackExchange.Redis.Tests\**\*.cs"/>
<Compile Include="..\StackExchange.Redis.Tests\**\*.cs" Exclude="..\StackExchange.Redis.Tests\obj\**\*.cs"/>
</ItemGroup>
<ItemGroup>
<None Include="packages.config">
......

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 14
VisualStudioVersion = 14.0.25123.0
VisualStudioVersion = 14.0.25420.1
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "BasicTest_dnxcore50", "BasicTest_dnxcore50\BasicTest_dnxcore50.xproj", "{9D83BABA-A92E-495F-BF63-DEB4F6B09355}"
EndProject
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "StackExchange.Redis", "StackExchange.Redis\StackExchange.Redis.xproj", "{EF84877F-59BE-41BE-9013-E765AF0BB72E}"
EndProject
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "StackExchange.Redis.StrongName", "StackExchange.Redis.StrongName\StackExchange.Redis.StrongName.xproj", "{46754D2A-AC16-4686-B113-3DB08ACF4269}"
......@@ -13,19 +11,18 @@ Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "StackExchange.Redis.Tests",
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{3AD17044-6BFF-4750-9AC2-2CA466375F2A}"
ProjectSection(SolutionItems) = preProject
global.json = global.json
NuGet.Config = NuGet.Config
EndProjectSection
EndProject
Project("{8BB2217D-0F2D-49D1-97BC-3654ED321F3B}") = "BasicTest", "BasicTest\BasicTest.xproj", "{939FA5F7-16AA-4847-812B-6EBC3748A86D}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{9D83BABA-A92E-495F-BF63-DEB4F6B09355}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{9D83BABA-A92E-495F-BF63-DEB4F6B09355}.Debug|Any CPU.Build.0 = Debug|Any CPU
{9D83BABA-A92E-495F-BF63-DEB4F6B09355}.Release|Any CPU.ActiveCfg = Release|Any CPU
{9D83BABA-A92E-495F-BF63-DEB4F6B09355}.Release|Any CPU.Build.0 = Release|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{EF84877F-59BE-41BE-9013-E765AF0BB72E}.Release|Any CPU.ActiveCfg = Release|Any CPU
......@@ -38,6 +35,10 @@ Global
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Debug|Any CPU.Build.0 = Debug|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Release|Any CPU.ActiveCfg = Release|Any CPU
{3B8BD8F1-8BFC-4D8C-B4DA-25FFAF3D1DBE}.Release|Any CPU.Build.0 = Release|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Debug|Any CPU.Build.0 = Debug|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Release|Any CPU.ActiveCfg = Release|Any CPU
{939FA5F7-16AA-4847-812B-6EBC3748A86D}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
......
......@@ -1222,10 +1222,14 @@ internal async Task<bool> ReconfigureAsync(bool first, bool reconfigureAll, Text
serverSnapshot = new ServerEndPoint[configuration.EndPoints.Count];
foreach (var endpoint in configuration.EndPoints)
{
var server = new ServerEndPoint(this, endpoint, log);
serverSnapshot[index++] = server;
var server = (ServerEndPoint)servers[endpoint];
if (server == null)
{
server = new ServerEndPoint(this, endpoint, log);
servers.Add(endpoint, server);
}
serverSnapshot[index++] = server;
}
}
foreach (var server in serverSnapshot)
{
......
......@@ -175,6 +175,17 @@ public partial interface IServer : IRedis
/// <remarks>http://redis.io/commands/dbsize</remarks>
Task<long> DatabaseSizeAsync(int database = 0, CommandFlags flags = CommandFlags.None);
/// <summary>
/// Return the same message passed in
/// </summary>
/// <remarks>http://redis.io/commands/echo</remarks>
RedisValue Echo(RedisValue message, CommandFlags flags = CommandFlags.None);
/// <summary>
/// Return the same message passed in
/// </summary>
/// <remarks>http://redis.io/commands/echo</remarks>
Task<RedisValue> EchoAsync(RedisValue message, CommandFlags flags = CommandFlags.None);
/// <summary>
/// Delete all the keys of all databases on the server.
......
using System.Text.RegularExpressions;
#if CORE_CLR
using System;
#endif
namespace StackExchange.Redis
{
/// <summary>
/// Credits to Sam Harwell https://github.com/dotnet/corefx/issues/340#issuecomment-120749951
/// </summary>
internal static class InternalRegexCompiledOption
{
static InternalRegexCompiledOption()
{
#if CORE_CLR
RegexOptions tmp;
if (!Enum.TryParse("Compiled", out tmp))
tmp = RegexOptions.None;
Default = tmp;
#else
Default = RegexOptions.Compiled;
#endif
}
/// <summary>
/// Gets the default <see cref="RegexOptions"/> to use.
/// <see cref="System.Text.RegularExpressions.RegexOptions.Compiled"/> option isn't available yet for dnxcore50.
/// This returns <see cref="System.Text.RegularExpressions.RegexOptions.Compiled"/> if it is supported;
/// <see cref="System.Text.RegularExpressions.RegexOptions.None"/> otherwise.
/// </summary>
public static RegexOptions Default { get; }
}
}
......@@ -32,6 +32,10 @@ private static Action<Task<int>> EndReadFactory(PhysicalConnection physical)
{
return result =>
{ // can't capture AsyncState on SocketRead, so we'll do it once per physical instead
if (result.IsFaulted)
{
GC.KeepAlive(result.Exception);
}
try
{
physical.Multiplexer.Trace("Completed asynchronously: processing in callback", physical.physicalName);
......@@ -690,7 +694,7 @@ void BeginReading()
Multiplexer.Trace("Beginning async read...", physicalName);
#if CORE_CLR
var result = netStream.ReadAsync(ioBuffer, ioBufferBytes, space);
switch(result.Status)
switch (result.Status)
{
case TaskStatus.RanToCompletion:
case TaskStatus.Faulted:
......
......@@ -206,6 +206,17 @@ public Task<long> DatabaseSizeAsync(int database = 0, CommandFlags flags = Comma
return ExecuteAsync(msg, ResultProcessor.Int64);
}
public RedisValue Echo(RedisValue message, CommandFlags flags)
{
var msg = Message.Create(-1, flags, RedisCommand.ECHO, message);
return ExecuteSync(msg, ResultProcessor.RedisValue);
}
public Task<RedisValue> EchoAsync(RedisValue message, CommandFlags flags)
{
var msg = Message.Create(-1, flags, RedisCommand.ECHO, message);
return ExecuteAsync(msg, ResultProcessor.RedisValue);
}
public void FlushAllDatabases(CommandFlags flags = CommandFlags.None)
{
var msg = Message.Create(-1, flags, RedisCommand.FLUSHALL);
......
......@@ -356,7 +356,7 @@ protected override bool SetResultCore(PhysicalConnection connection, Message mes
internal sealed class ScriptLoadProcessor : ResultProcessor<byte[]>
{
static readonly Regex sha1 = new Regex("^[0-9a-f]{40}$", InternalRegexCompiledOption.Default | RegexOptions.IgnoreCase);
static readonly Regex sha1 = new Regex("^[0-9a-f]{40}$", RegexOptions.Compiled | RegexOptions.IgnoreCase);
internal static bool IsSHA1(string script)
{
......
......@@ -23,7 +23,7 @@ public ScriptParameters(RedisKey[] keys, RedisValue[] args)
}
}
static readonly Regex ParameterExtractor = new Regex(@"@(?<paramName> ([a-z]|_) ([a-z]|_|\d)*)", InternalRegexCompiledOption.Default | RegexOptions.IgnoreCase | RegexOptions.IgnorePatternWhitespace);
static readonly Regex ParameterExtractor = new Regex(@"@(?<paramName> ([a-z]|_) ([a-z]|_|\d)*)", RegexOptions.Compiled | RegexOptions.IgnoreCase | RegexOptions.IgnorePatternWhitespace);
static string[] ExtractParameters(string script)
{
var ps = ParameterExtractor.Matches(script);
......
......@@ -25,7 +25,7 @@ internal sealed partial class ServerEndPoint : IDisposable
{
internal volatile ServerEndPoint Master;
internal volatile ServerEndPoint[] Slaves = NoSlaves;
private static readonly Regex nameSanitizer = new Regex("[^!-~]", InternalRegexCompiledOption.Default);
private static readonly Regex nameSanitizer = new Regex("[^!-~]", RegexOptions.Compiled);
private static readonly ServerEndPoint[] NoSlaves = new ServerEndPoint[0];
private readonly EndPoint endpoint;
......
......@@ -69,17 +69,13 @@ static TaskSource()
tcs.SetResult(0);
if (!expectTrue || expectFalse)
{
Debug.WriteLine("IsSyncSafe reported incorrectly!");
Trace.WriteLine("IsSyncSafe reported incorrectly!");
// revert to not trusting /them
IsSyncSafe = null;
}
}
}
catch (Exception ex)
catch (Exception)
{
Debug.WriteLine(ex.Message);
Trace.WriteLine(ex.Message);
IsSyncSafe = null;
}
if (IsSyncSafe == null)
......
......@@ -13,7 +13,7 @@
}
},
"title": "StackExchange.Redis",
"version": "1.1.604-*",
"version": "1.1.607-*",
"description": "High performance Redis client, incorporating both synchronous and asynchronous usage.",
"authors": [ "Stack Exchange inc., marc.gravell" ],
"copyright": "Stack Exchange inc. 2014-",
......@@ -33,38 +33,54 @@
"dependencies": {
"Microsoft.Bcl": "1.1.10",
"Microsoft.Bcl.Async": "1.0.168"
},
"buildOptions": {
"define": [ "FEATURE_SERIALIZATION", "FEATURE_SOCKET_MODE_POLL" ]
}
},
"net45": {
"frameworkAssemblies": {
"System.IO.Compression": "4.0.0.0"
},
"buildOptions": {
"define": [ "FEATURE_SERIALIZATION", "FEATURE_SOCKET_MODE_POLL" ]
}
},
"net46": {
"frameworkAssemblies": {
"System.IO.Compression": "4.0.0.0"
},
"define": [ "PLAT_SAFE_CONTINUATIONS" ]
"buildOptions": {
"define": [ "FEATURE_SERIALIZATION", "FEATURE_SOCKET_MODE_POLL" ]
}
},
"netstandard1.5": {
"buildOptions": {
"define": [ "PLAT_SAFE_CONTINUATIONS", "CORE_CLR" ]
"define": [ "CORE_CLR" ]
},
"dependencies": {
"NETStandard.Library": "1.5.0-rc2-24027",
"System.Collections.NonGeneric": "4.0.1-rc2-24027",
"System.IO.FileSystem": "4.0.1-rc2-24027",
"System.Net.NameResolution": "4.0.0-rc2-24027",
"System.Net.Security": "4.0.0-rc2-24027",
"System.Net.Sockets": "4.1.0-rc2-24027",
"System.Reflection.Emit": "4.0.1-rc2-24027",
"System.Reflection.Emit.Lightweight": "4.0.1-rc2-24027",
"System.Reflection.TypeExtensions": "4.1.0-rc2-24027",
"System.Security.Cryptography.Algorithms": "4.1.0-rc2-24027",
"System.Security.Cryptography.X509Certificates": "4.1.0-rc2-24027",
"System.Threading.Thread": "4.0.0-rc2-24027",
"System.Threading.ThreadPool": "4.0.10-rc2-24027",
"System.Threading.Timer": "4.0.1-rc2-24027"
"System.Collections": "4.0.11",
"System.Collections.Concurrent": "4.0.12",
"System.Collections.NonGeneric": "4.0.1",
"System.Diagnostics.Tools": "4.0.1",
"System.IO.Compression": "4.1.0",
"System.IO.FileSystem": "4.0.1",
"System.Linq": "4.1.0",
"System.Net.NameResolution": "4.0.0",
"System.Net.Security": "4.0.0",
"System.Net.Sockets": "4.1.0",
"System.Reflection.Emit": "4.0.1",
"System.Reflection.Emit.Lightweight": "4.0.1",
"System.Reflection.TypeExtensions": "4.1.0",
"System.Runtime.Extensions": "4.1.0",
"System.Runtime.InteropServices.RuntimeInformation": "4.0.0",
"System.Security.Cryptography.Algorithms": "4.2.0",
"System.Security.Cryptography.X509Certificates": "4.1.0",
"System.Text.RegularExpressions": "4.1.0",
"System.Threading": "4.0.11",
"System.Threading.Thread": "4.0.0",
"System.Threading.ThreadPool": "4.0.10",
"System.Threading.Timer": "4.0.1"
}
}
}
......
......@@ -77,7 +77,7 @@
</Reference>
</ItemGroup>
<ItemGroup>
<Compile Include="..\StackExchange.Redis\**\*.cs"/>
<Compile Include="..\StackExchange.Redis\**\*.cs" Exclude="..\StackExchange.Redis\obj\**\*.cs"/>
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />
......
......@@ -74,7 +74,7 @@
</Reference>
</ItemGroup>
<ItemGroup>
<Compile Include="..\StackExchange.Redis\**\*.cs"/>
<Compile Include="..\StackExchange.Redis\**\*.cs" Exclude="..\StackExchange.Redis\obj\**\*.cs" />
</ItemGroup>
<ItemGroup>
<None Include="app.config" />
......
......@@ -79,7 +79,7 @@
<Reference Include="System.IO.Compression" />
</ItemGroup>
<ItemGroup>
<Compile Include="..\StackExchange.Redis\**\*.cs"/>
<Compile Include="..\StackExchange.Redis\**\*.cs" Exclude="..\StackExchange.Redis\obj\**\*.cs"/>
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
......
......@@ -73,7 +73,7 @@
<Reference Include="System.IO.Compression" />
</ItemGroup>
<ItemGroup>
<Compile Include="..\StackExchange.Redis\**\*.cs"/>
<Compile Include="..\StackExchange.Redis\**\*.cs" Exclude="..\StackExchange.Redis\obj\**\*.cs"/>
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
......
......@@ -18,7 +18,7 @@
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin.snk\Debug\</OutputPath>
<DefineConstants>TRACE;DEBUG;NET46 PLAT_SAFE_CONTINUATIONS STRONG_NAME FEATURE_SERIALIZATION FEATURE_SOCKET_MODE_POLL</DefineConstants>
<DefineConstants>TRACE;DEBUG;NET46 STRONG_NAME FEATURE_SERIALIZATION FEATURE_SOCKET_MODE_POLL</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
......@@ -63,7 +63,7 @@
<Reference Include="System.IO.Compression" />
</ItemGroup>
<ItemGroup>
<Compile Include="..\StackExchange.Redis\**\*.cs"/>
<Compile Include="..\StackExchange.Redis\**\*.cs" Exclude="..\StackExchange.Redis\obj\**\*.cs" />
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />
......
......@@ -18,7 +18,7 @@
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin\Debug\</OutputPath>
<DefineConstants>TRACE;DEBUG;NET46 PLAT_SAFE_CONTINUATIONS FEATURE_SERIALIZATION;FEATURE_SOCKET_MODE_POLL</DefineConstants>
<DefineConstants>TRACE;DEBUG;NET46 FEATURE_SERIALIZATION;FEATURE_SOCKET_MODE_POLL</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
......@@ -57,7 +57,7 @@
<Reference Include="System.IO.Compression" />
</ItemGroup>
<ItemGroup>
<Compile Include="..\StackExchange.Redis\**\*.cs"/>
<Compile Include="..\StackExchange.Redis\**\*.cs" Exclude="..\StackExchange.Redis\obj\**\*.cs" />
</ItemGroup>
<ItemGroup>
<None Include="app.config" />
......
{
"sdk": {
"version": "1.0.0-preview1-002702"
"version": "1.0.0-preview2-003121"
}
}
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis servers but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# include .\path\to\local.conf
# include c:\path\to\other.conf
################################ GENERAL #####################################
# On Windows, daemonize and pidfile are not supported.
# However, you can run redis as a Windows service, and specify a logfile.
# The logfile will contain the pid.
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
# By default Redis listens for connections from all the network interfaces
# available on the server. It is possible to listen to just one or multiple
# interfaces using the "bind" configuration directive, followed by one or
# more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 700
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output.
logfile "Logs/redis_log.txt"
# To enable logging to the Windows EventLog, just set 'syslog-enabled' to
# yes, and optionally update the other syslog parameters to suit your needs.
# If Redis is installed and launched as a Windows Service, this will
# automatically be enabled.
syslog-enabled yes
# Specify the source name of the events in the Windows Application log.
syslog-ident redis
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving completely by commenting out all "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir ./
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of slaves.
# 2) Redis slaves are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
# network partition slaves automatically try to reconnect to masters
# and resynchronize with them.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Replication SYNC strategy: disk or socket.
#
# -------------------------------------------------------
# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
# -------------------------------------------------------
#
# New slaves and reconnecting slaves that are not able to continue the replication
# process just receiving differences, need to do what is called a "full
# synchronization". An RDB file is transmitted from the master to the slaves.
# The transmission can happen in two different ways:
#
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
# file on disk. Later the file is transferred by the parent
# process to the slaves incrementally.
# 2) Diskless: The Redis master creates a new process that directly writes the
# RDB file to slave sockets, without touching the disk at all.
#
# With disk-backed replication, while the RDB file is generated, more slaves
# can be queued and served with the RDB file as soon as the current child producing
# the RDB file finishes its work. With diskless replication instead once
# the transfer starts, new slaves arriving will be queued and a new transfer
# will start when the current one terminates.
#
# When diskless replication is used, the master waits a configurable amount of
# time (in seconds) before starting the transfer in the hope that multiple slaves
# will arrive and the transfer can be parallelized.
#
# With slow disks and fast (large bandwidth) networks, diskless replication
# works better.
repl-diskless-sync no
# When diskless replication is enabled, it is possible to configure the delay
# the server waits in order to spawn the child that transfers the RDB via socket
# to the slaves.
#
# This is important since once the transfer starts, it is not possible to serve
# new slaves arriving, that will be queued for the next RDB transfer, so the server
# waits a delay in order to let more slaves arrive.
#
# The delay is specified in seconds, and by default is 5 seconds. To disable
# it entirely just set it to 0 seconds and the transfer will start ASAP.
repl-diskless-sync-delay 5
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
# The bigger the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
#
# repl-backlog-size 1mb
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEE that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# If Redis is to be used as an in-memory-only cache without any kind of
# persistence, then the fork() mechanism used by the background AOF/RDB
# persistence is unnecessary. As an optimization, all persistence can be
# turned off in the Windows version of Redis. This will redirect heap
# allocations to the system heap allocator, and disable commands that would
# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF.
# This flag may not be combined with any of the other flags that configure
# AOF and RDB operations.
# persistence-available [(yes)|no]
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# WARNING: not setting maxmemory will cause Redis to terminate with an
# out-of-memory exception if the heap limit is reached.
#
# NOTE: since Redis uses the system paging file to allocate the heap memory,
# the Working Set memory usage showed by the Windows Task Manager or by other
# tools such as ProcessExplorer will not always be accurate. For example, right
# after a background save of the RDB or the AOF files, the working set value
# may drop significantly. In order to check the correct amount of memory used
# by the redis-server to store the data, use the INFO client command. The INFO
# command shows only the memory used to store the redis data, not the extra
# memory used by the Windows process for its own requirements. Th3 extra amount
# of memory not reported by the INFO command can be calculated subtracting the
# Peak Working Set reported by the Windows Task Manager and the used_memory_peak
# reported by the INFO command.
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key according to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are no suitable keys for eviction.
#
# At the date of writing these commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy noeviction
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "redis-check-aof" utility before to restart
# the server.
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceeds the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write command was
# already issued by the script but the user doesn't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################ REDIS CLUSTER ###############################
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
# in order to mark it as "mature" we need to wait for a non trivial percentage
# of users to deploy it in production.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
# started as cluster nodes can. In order to start a Redis instance as a
# cluster node enable the cluster support uncommenting the following:
#
# cluster-enabled yes
# Every cluster node has a cluster configuration file. This file is not
# intended to be edited by hand. It is created and updated by Redis nodes.
# Every Redis Cluster node requires a different cluster configuration file.
# Make sure that instances running in the same system do not have
# overlapping cluster configuration file names.
#
# cluster-config-file nodes-6379.conf
# Cluster node timeout is the amount of milliseconds a node must be unreachable
# for it to be considered in failure state.
# Most other internal time limits are multiple of the node timeout.
#
# cluster-node-timeout 15000
# A slave of a failing master will avoid to start a failover if its data
# looks too old.
#
# There is no simple way for a slave to actually have a exact measure of
# its "data age", so the following two checks are performed:
#
# 1) If there are multiple slaves able to failover, they exchange messages
# in order to try to give an advantage to the slave with the best
# replication offset (more data from the master processed).
# Slaves will try to get their rank by offset, and apply to the start
# of the failover a delay proportional to their rank.
#
# 2) Every single slave computes the time of the last interaction with
# its master. This can be the last ping or command received (if the master
# is still in the "connected" state), or the time that elapsed since the
# disconnection with the master (if the replication link is currently down).
# If the last interaction is too old, the slave will not try to failover
# at all.
#
# The point "2" can be tuned by user. Specifically a slave will not perform
# the failover if, since the last interaction with the master, the time
# elapsed is greater than:
#
# (node-timeout * slave-validity-factor) + repl-ping-slave-period
#
# So for example if node-timeout is 30 seconds, and the slave-validity-factor
# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
# slave will not try to failover if it was not able to talk with the master
# for longer than 310 seconds.
#
# A large slave-validity-factor may allow slaves with too old data to failover
# a master, while a too small value may prevent the cluster from being able to
# elect a slave at all.
#
# For maximum availability, it is possible to set the slave-validity-factor
# to a value of 0, which means, that slaves will always try to failover the
# master regardless of the last time they interacted with the master.
# (However they'll always try to apply a delay proportional to their
# offset rank).
#
# Zero is the only value able to guarantee that when all the partitions heal
# the cluster will always be able to continue.
#
# cluster-slave-validity-factor 10
# Cluster slaves are able to migrate to orphaned masters, that are masters
# that are left without working slaves. This improves the cluster ability
# to resist to failures as otherwise an orphaned master can't be failed over
# in case of failure if it has no working slaves.
#
# Slaves migrate to orphaned masters only if there are still at least a
# given number of other working slaves for their old master. This number
# is the "migration barrier". A migration barrier of 1 means that a slave
# will migrate only if there is at least 1 other working slave for its master
# and so forth. It usually reflects the number of slaves you want for every
# master in your cluster.
#
# Default is 1 (slaves migrate only if their masters remain with at least
# one slave). To disable migration just set it to a very large value.
# A value of 0 can be set but is useful only for debugging and dangerous
# in production.
#
# cluster-migration-barrier 1
# By default Redis Cluster nodes stop accepting queries if they detect there
# is at least an hash slot uncovered (no available node is serving it).
# This way if the cluster is partially down (for example a range of hash slots
# are no longer covered) all the cluster becomes, eventually, unavailable.
# It automatically returns available as soon as all the slots are covered again.
#
# However sometimes you want the subset of the cluster which is working,
# to continue to accept queries for the part of the key space that is still
# covered. In order to do so, just set the cluster-require-full-coverage
# option to no.
#
# cluster-require-full-coverage yes
# In order to setup your cluster make sure to read the documentation
# available at http://redis.io web site.
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
#
# The system only logs operations that were performed in a time equal or
# greater than the amount of milliseconds specified via the
# latency-monitor-threshold configuration directive. When its value is set
# to zero, the latency monitor is turned off.
#
# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enabled at runtime using the command
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
latency-monitor-threshold 0
############################# Event notification ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
#
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
# E Keyevent events, published with __keyevent@<db>__ prefix.
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
# $ String commands
# l List commands
# s Set commands
# h Hash commands
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
# of zero or multiple characters. The empty string means that notifications
# are disabled.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
#
# notify-keyspace-events Elg
#
# Example 2: to get the stream of the expired keys subscribing to channel
# name __keyevent@0__:expired use:
#
# notify-keyspace-events Ex
#
# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happen to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# actively rehash the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# slave -> slave clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform according to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis servers but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
# from admin or Redis Sentinel. Since Redis always uses the last processed
# line as value of a configuration directive, you'd better put includes
# at the beginning of this file to avoid overwriting config change at runtime.
#
# If instead you are interested in using includes to override configuration
# options, it is better to use include as the last line.
#
# include .\path\to\local.conf
# include c:\path\to\other.conf
################################ GENERAL #####################################
# On Windows, daemonize and pidfile are not supported.
# However, you can run redis as a Windows service, and specify a logfile.
# The logfile will contain the pid.
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6379
# TCP listen() backlog.
#
# In high requests-per-second environments you need an high backlog in order
# to avoid slow clients connections issues. Note that the Linux kernel
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
# in order to get the desired effect.
tcp-backlog 511
# By default Redis listens for connections from all the network interfaces
# available on the server. It is possible to listen to just one or multiple
# interfaces using the "bind" configuration directive, followed by one or
# more IP addresses.
#
# Examples:
#
# bind 192.168.1.100 10.0.0.1
# bind 127.0.0.1
# Specify the path for the Unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
# unixsocketperm 700
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# TCP keepalive.
#
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
# of communication. This is useful for two reasons:
#
# 1) Detect dead peers.
# 2) Take the connection alive from the point of view of network
# equipment in the middle.
#
# On Linux, the specified value (in seconds) is the period used to send ACKs.
# Note that to close the connection the double of the time is needed.
# On other kernels the period depends on the kernel configuration.
#
# A reasonable value for this option is 60 seconds.
tcp-keepalive 0
# Specify the server verbosity level.
# This can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output.
logfile ""
# To enable logging to the Windows EventLog, just set 'syslog-enabled' to
# yes, and optionally update the other syslog parameters to suit your needs.
# If Redis is installed and launched as a Windows Service, this will
# automatically be enabled.
# syslog-enabled no
# Specify the source name of the events in the Windows Application log.
# syslog-ident redis
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING ################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving completely by commenting out all "save" lines.
#
# It is also possible to remove all the previously configured save
# points by adding a save directive with a single empty string argument
# like in the following example:
#
# save ""
save 900 1
save 300 10
save 60 10000
# By default Redis will stop accepting writes if RDB snapshots are enabled
# (at least one save point) and the latest background save failed.
# This will make the user aware (in a hard way) that data is not persisting
# on disk properly, otherwise chances are that no one will notice and some
# disaster will happen.
#
# If the background saving process will start working again Redis will
# automatically allow writes again.
#
# However if you have setup your proper monitoring of the Redis server
# and persistence, you may want to disable this feature so that Redis will
# continue to work as usual even if there are problems with disk,
# permissions, and so forth.
stop-writes-on-bgsave-error yes
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
# This makes the format more resistant to corruption but there is a performance
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
# for maximum performances.
#
# RDB files created with checksum disabled have a checksum of zero that will
# tell the loading code to skip the check.
rdbchecksum yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir ./
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. A few things to understand ASAP about Redis replication.
#
# 1) Redis replication is asynchronous, but you can configure a master to
# stop accepting writes if it appears to be not connected with at least
# a given number of slaves.
# 2) Redis slaves are able to perform a partial resynchronization with the
# master if the replication link is lost for a relatively small amount of
# time. You may want to configure the replication backlog size (see the next
# sections of this file) with a sensible value depending on your needs.
# 3) Replication is automatic and does not need user intervention. After a
# network partition slaves automatically try to reconnect to masters
# and resynchronize with them.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave loses its connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# You can configure a slave instance to accept writes or not. Writing against
# a slave instance may be useful to store some ephemeral data (because data
# written on a slave will be easily deleted after resync with the master) but
# may also cause problems if clients are writing to it because of a
# misconfiguration.
#
# Since Redis 2.6 by default slaves are read-only.
#
# Note: read only slaves are not designed to be exposed to untrusted clients
# on the internet. It's just a protection layer against misuse of the instance.
# Still a read only slave exports by default all the administrative commands
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
# security of read only slaves using 'rename-command' to shadow all the
# administrative / dangerous commands.
slave-read-only yes
# Replication SYNC strategy: disk or socket.
#
# -------------------------------------------------------
# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
# -------------------------------------------------------
#
# New slaves and reconnecting slaves that are not able to continue the replication
# process just receiving differences, need to do what is called a "full
# synchronization". An RDB file is transmitted from the master to the slaves.
# The transmission can happen in two different ways:
#
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
# file on disk. Later the file is transferred by the parent
# process to the slaves incrementally.
# 2) Diskless: The Redis master creates a new process that directly writes the
# RDB file to slave sockets, without touching the disk at all.
#
# With disk-backed replication, while the RDB file is generated, more slaves
# can be queued and served with the RDB file as soon as the current child producing
# the RDB file finishes its work. With diskless replication instead once
# the transfer starts, new slaves arriving will be queued and a new transfer
# will start when the current one terminates.
#
# When diskless replication is used, the master waits a configurable amount of
# time (in seconds) before starting the transfer in the hope that multiple slaves
# will arrive and the transfer can be parallelized.
#
# With slow disks and fast (large bandwidth) networks, diskless replication
# works better.
repl-diskless-sync no
# When diskless replication is enabled, it is possible to configure the delay
# the server waits in order to spawn the child that transfers the RDB via socket
# to the slaves.
#
# This is important since once the transfer starts, it is not possible to serve
# new slaves arriving, that will be queued for the next RDB transfer, so the server
# waits a delay in order to let more slaves arrive.
#
# The delay is specified in seconds, and by default is 5 seconds. To disable
# it entirely just set it to 0 seconds and the transfer will start ASAP.
repl-diskless-sync-delay 5
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets the replication timeout for:
#
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
# 2) Master timeout from the point of view of slaves (data, pings).
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
# Disable TCP_NODELAY on the slave socket after SYNC?
#
# If you select "yes" Redis will use a smaller number of TCP packets and
# less bandwidth to send data to slaves. But this can add a delay for
# the data to appear on the slave side, up to 40 milliseconds with
# Linux kernels using a default configuration.
#
# If you select "no" the delay for data to appear on the slave side will
# be reduced but more bandwidth will be used for replication.
#
# By default we optimize for low latency, but in very high traffic conditions
# or when the master and slaves are many hops away, turning this to "yes" may
# be a good idea.
repl-disable-tcp-nodelay no
# Set the replication backlog size. The backlog is a buffer that accumulates
# slave data when slaves are disconnected for some time, so that when a slave
# wants to reconnect again, often a full resync is not needed, but a partial
# resync is enough, just passing the portion of data the slave missed while
# disconnected.
#
# The bigger the replication backlog, the longer the time the slave can be
# disconnected and later be able to perform a partial resynchronization.
#
# The backlog is only allocated once there is at least a slave connected.
#
# repl-backlog-size 1mb
# After a master has no longer connected slaves for some time, the backlog
# will be freed. The following option configures the amount of seconds that
# need to elapse, starting from the time the last slave disconnected, for
# the backlog buffer to be freed.
#
# A value of 0 means to never release the backlog.
#
# repl-backlog-ttl 3600
# The slave priority is an integer number published by Redis in the INFO output.
# It is used by Redis Sentinel in order to select a slave to promote into a
# master if the master is no longer working correctly.
#
# A slave with a low priority number is considered better for promotion, so
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
# pick the one with priority 10, that is the lowest.
#
# However a special priority of 0 marks the slave as not able to perform the
# role of master, so a slave with priority of 0 will never be selected by
# Redis Sentinel for promotion.
#
# By default the priority is 100.
slave-priority 100
# It is possible for a master to stop accepting writes if there are less than
# N slaves connected, having a lag less or equal than M seconds.
#
# The N slaves need to be in "online" state.
#
# The lag in seconds, that must be <= the specified value, is calculated from
# the last ping received from the slave, that is usually sent every second.
#
# This option does not GUARANTEE that N replicas will accept the write, but
# will limit the window of exposure for lost writes in case not enough slaves
# are available, to the specified number of seconds.
#
# For example to require at least 3 slaves with a lag <= 10 seconds use:
#
# min-slaves-to-write 3
# min-slaves-max-lag 10
#
# Setting one or the other to 0 disables the feature.
#
# By default min-slaves-to-write is set to 0 (feature disabled) and
# min-slaves-max-lag is set to 10.
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to slaves may cause problems.
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default
# this limit is set to 10000 clients, however if the Redis server is not
# able to configure the process file limit to allow for the specified limit
# the max number of allowed clients is set to the current file limit
# minus 32 (as Redis reserves a few file descriptors for internal uses).
#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 10000
# If Redis is to be used as an in-memory-only cache without any kind of
# persistence, then the fork() mechanism used by the background AOF/RDB
# persistence is unnecessary. As an optimization, all persistence can be
# turned off in the Windows version of Redis. This will redirect heap
# allocations to the system heap allocator, and disable commands that would
# otherwise cause fork() operations: BGSAVE and BGREWRITEAOF.
# This flag may not be combined with any of the other flags that configure
# AOF and RDB operations.
# persistence-available [(yes)|no]
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# according to the eviction policy selected (see maxmemory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# a hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# WARNING: not setting maxmemory will cause Redis to terminate with an
# out-of-memory exception if the heap limit is reached.
#
# NOTE: since Redis uses the system paging file to allocate the heap memory,
# the Working Set memory usage showed by the Windows Task Manager or by other
# tools such as ProcessExplorer will not always be accurate. For example, right
# after a background save of the RDB or the AOF files, the working set value
# may drop significantly. In order to check the correct amount of memory used
# by the redis-server to store the data, use the INFO client command. The INFO
# command shows only the memory used to store the redis data, not the extra
# memory used by the Windows process for its own requirements. Th3 extra amount
# of memory not reported by the INFO command can be calculated subtracting the
# Peak Working Set reported by the Windows Task Manager and the used_memory_peak
# reported by the INFO command.
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select among five behaviors:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key according to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with any of the above policies, Redis will return an error on write
# operations, when there are no suitable keys for eviction.
#
# At the date of writing these commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy noeviction
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. This mode is
# good enough in many applications, but an issue with the Redis process or
# a power outage may result into a few minutes of writes lost (depending on
# the configured save points).
#
# The Append Only File is an alternative persistence mode that provides
# much better durability. For instance using the default data fsync policy
# (see later in the config file) Redis can lose just one second of writes in a
# dramatic event like a server power outage, or a single write if something
# wrong with the Redis process itself happens, but the operating system is
# still running correctly.
#
# AOF and RDB persistence can be enabled at the same time without problems.
# If the AOF is enabled on startup Redis will load the AOF, that is the file
# with the better durability guarantees.
#
# Please check http://redis.io/topics/persistence for more information.
appendonly no
# The name of the append only file (default: "appendonly.aof")
appendfilename "appendonly.aof"
# The fsync() call tells the Operating System to actually write data on disk
# instead of waiting for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec", as that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# More details please check the following article:
# http://antirez.com/post/redis-persistence-demystified.html
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving, the durability of Redis is
# the same as "appendfsync none". In practical terms, this means that it is
# possible to lose up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (if no rewrite has happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
# An AOF file may be found to be truncated at the end during the Redis
# startup process, when the AOF data gets loaded back into memory.
# This may happen when the system where Redis is running
# crashes, especially when an ext4 filesystem is mounted without the
# data=ordered option (however this can't happen when Redis itself
# crashes or aborts but the operating system still works correctly).
#
# Redis can either exit with an error when this happens, or load as much
# data as possible (the default now) and start if the AOF file is found
# to be truncated at the end. The following option controls this behavior.
#
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
# the Redis server starts emitting a log to inform the user of the event.
# Otherwise if the option is set to no, the server aborts with an error
# and refuses to start. When the option is set to no, the user requires
# to fix the AOF file using the "redis-check-aof" utility before to restart
# the server.
#
# Note that if the AOF file will be found to be corrupted in the middle
# the server will still exit with an error. This option only applies when
# Redis will try to read more data from the AOF file but not enough bytes
# will be found.
aof-load-truncated yes
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
#
# If the maximum execution time is reached Redis will log that a script is
# still in execution after the maximum allowed time and will start to
# reply to queries with an error.
#
# When a long running script exceeds the maximum execution time only the
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
# used to stop a script that did not yet called write commands. The second
# is the only way to shut down the server in the case a write command was
# already issued by the script but the user doesn't want to wait for the natural
# termination of the script.
#
# Set it to 0 or a negative value for unlimited execution without warnings.
lua-time-limit 5000
################################ REDIS CLUSTER ###############################
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
# in order to mark it as "mature" we need to wait for a non trivial percentage
# of users to deploy it in production.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
# started as cluster nodes can. In order to start a Redis instance as a
# cluster node enable the cluster support uncommenting the following:
#
# cluster-enabled yes
# Every cluster node has a cluster configuration file. This file is not
# intended to be edited by hand. It is created and updated by Redis nodes.
# Every Redis Cluster node requires a different cluster configuration file.
# Make sure that instances running in the same system do not have
# overlapping cluster configuration file names.
#
# cluster-config-file nodes-6379.conf
# Cluster node timeout is the amount of milliseconds a node must be unreachable
# for it to be considered in failure state.
# Most other internal time limits are multiple of the node timeout.
#
# cluster-node-timeout 15000
# A slave of a failing master will avoid to start a failover if its data
# looks too old.
#
# There is no simple way for a slave to actually have a exact measure of
# its "data age", so the following two checks are performed:
#
# 1) If there are multiple slaves able to failover, they exchange messages
# in order to try to give an advantage to the slave with the best
# replication offset (more data from the master processed).
# Slaves will try to get their rank by offset, and apply to the start
# of the failover a delay proportional to their rank.
#
# 2) Every single slave computes the time of the last interaction with
# its master. This can be the last ping or command received (if the master
# is still in the "connected" state), or the time that elapsed since the
# disconnection with the master (if the replication link is currently down).
# If the last interaction is too old, the slave will not try to failover
# at all.
#
# The point "2" can be tuned by user. Specifically a slave will not perform
# the failover if, since the last interaction with the master, the time
# elapsed is greater than:
#
# (node-timeout * slave-validity-factor) + repl-ping-slave-period
#
# So for example if node-timeout is 30 seconds, and the slave-validity-factor
# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
# slave will not try to failover if it was not able to talk with the master
# for longer than 310 seconds.
#
# A large slave-validity-factor may allow slaves with too old data to failover
# a master, while a too small value may prevent the cluster from being able to
# elect a slave at all.
#
# For maximum availability, it is possible to set the slave-validity-factor
# to a value of 0, which means, that slaves will always try to failover the
# master regardless of the last time they interacted with the master.
# (However they'll always try to apply a delay proportional to their
# offset rank).
#
# Zero is the only value able to guarantee that when all the partitions heal
# the cluster will always be able to continue.
#
# cluster-slave-validity-factor 10
# Cluster slaves are able to migrate to orphaned masters, that are masters
# that are left without working slaves. This improves the cluster ability
# to resist to failures as otherwise an orphaned master can't be failed over
# in case of failure if it has no working slaves.
#
# Slaves migrate to orphaned masters only if there are still at least a
# given number of other working slaves for their old master. This number
# is the "migration barrier". A migration barrier of 1 means that a slave
# will migrate only if there is at least 1 other working slave for its master
# and so forth. It usually reflects the number of slaves you want for every
# master in your cluster.
#
# Default is 1 (slaves migrate only if their masters remain with at least
# one slave). To disable migration just set it to a very large value.
# A value of 0 can be set but is useful only for debugging and dangerous
# in production.
#
# cluster-migration-barrier 1
# By default Redis Cluster nodes stop accepting queries if they detect there
# is at least an hash slot uncovered (no available node is serving it).
# This way if the cluster is partially down (for example a range of hash slots
# are no longer covered) all the cluster becomes, eventually, unavailable.
# It automatically returns available as soon as all the slots are covered again.
#
# However sometimes you want the subset of the cluster which is working,
# to continue to accept queries for the part of the key space that is still
# covered. In order to do so, just set the cluster-require-full-coverage
# option to no.
#
# cluster-require-full-coverage yes
# In order to setup your cluster make sure to read the documentation
# available at http://redis.io web site.
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ LATENCY MONITOR ##############################
# The Redis latency monitoring subsystem samples different operations
# at runtime in order to collect data related to possible sources of
# latency of a Redis instance.
#
# Via the LATENCY command this information is available to the user that can
# print graphs and obtain reports.
#
# The system only logs operations that were performed in a time equal or
# greater than the amount of milliseconds specified via the
# latency-monitor-threshold configuration directive. When its value is set
# to zero, the latency monitor is turned off.
#
# By default latency monitoring is disabled since it is mostly not needed
# if you don't have latency issues, and collecting data has a performance
# impact, that while very small, can be measured under big load. Latency
# monitoring can easily be enabled at runtime using the command
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
latency-monitor-threshold 0
############################# Event notification ##############################
# Redis can notify Pub/Sub clients about events happening in the key space.
# This feature is documented at http://redis.io/topics/notifications
#
# For instance if keyspace events notification is enabled, and a client
# performs a DEL operation on key "foo" stored in the Database 0, two
# messages will be published via Pub/Sub:
#
# PUBLISH __keyspace@0__:foo del
# PUBLISH __keyevent@0__:del foo
#
# It is possible to select the events that Redis will notify among a set
# of classes. Every class is identified by a single character:
#
# K Keyspace events, published with __keyspace@<db>__ prefix.
# E Keyevent events, published with __keyevent@<db>__ prefix.
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
# $ String commands
# l List commands
# s Set commands
# h Hash commands
# z Sorted set commands
# x Expired events (events generated every time a key expires)
# e Evicted events (events generated when a key is evicted for maxmemory)
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
#
# The "notify-keyspace-events" takes as argument a string that is composed
# of zero or multiple characters. The empty string means that notifications
# are disabled.
#
# Example: to enable list and generic events, from the point of view of the
# event name, use:
#
# notify-keyspace-events Elg
#
# Example 2: to get the stream of the expired keys subscribing to channel
# name __keyevent@0__:expired use:
#
# notify-keyspace-events Ex
#
# By default all notifications are disabled because most users don't need
# this feature and the feature has some overhead. Note that if you don't
# specify at least one of K or E, no events will be delivered.
notify-keyspace-events ""
############################### ADVANCED CONFIG ###############################
# Hashes are encoded using a memory efficient data structure when they have a
# small number of entries, and the biggest entry does not exceed a given
# threshold. These thresholds can be configured using the following directives.
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happen to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
hll-sparse-max-bytes 3000
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# actively rehash the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# slave -> slave clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and slave clients, since
# subscribers and slaves receive data in a push fashion.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeot, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are perforemd with the same frequency, but Redis checks for
# tasks to perform according to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
hz 10
# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 32 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
aof-rewrite-incremental-fsync yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment