File jc-1.2.1-git.patch of Package jc
diff --git a/.gitignore b/.gitignore
index d9f4a4a..dc2c0ba 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,3 +27,11 @@ rel/jc
target
*.gz
_build
+test/ct_run.ct@c4b301cb0a29.2017-10-09_19.10.46
+test/*.html
+test/*.css
+test/*.js
+variables-ct@*
+test/rebar.lock
+test/ct_run.ct*
+ct_log_cache
diff --git a/.travis.yml b/.travis.yml
index 433cba9..c37a4cf 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,8 +1,6 @@
language: erlang
otp_release:
- - 17.0
- - 18.0
- - 19.0
+ - 21.0
script: "sh -ex .travis_build.sh"
\ No newline at end of file
diff --git a/README.md b/README.md
index 02fc1e7..362e198 100644
--- a/README.md
+++ b/README.md
@@ -1,15 +1,15 @@
JC
====
-##Erlang, Distributable, In-Memory Cache
+## Erlang, Distributable, In-Memory Cache
-### Featruing: Pub/Sub, JSON-query, consistency support, and a simple, TCP interop. protocol.
+### Featuring: Pub/Sub, JSON-query, consistency support, and a simple, TCP interop. protocol.
[](https://travis-ci.org/jr0senblum/jc)
[](https://hex.pm/packages/jc)
-###Features
+### Features
* Cache entries are Map, Key, Value, [TTL], [Sequence]
* Maps represent a name-space for Keys - similar to the notion
of 'bucket'
@@ -52,7 +52,7 @@ JC
broadcast arbitrary messages under those topic names
* Clients can subscribe to node-up and node-down events
* Interopability
- * Binary string over TCP returning JSON
+ * Binary string over TCP returning JSON (EXPERIMENTAL)
* Bridge process that accepts messages from a client indicating
cache operations, executes the cache operations and returns the
results to the client. This has been used with JInterface to
@@ -61,7 +61,7 @@ JC
-###Cache Functions (jc)
+### Cache Functions (jc)
* Create
* put(Map, Key, Value, [TTLSecs]) -> {ok, Key} | {error, badarg}
* put_all(Map, [{K,V},{K,V},...], [TTLSecs]) -> {ok, CountOfSuccessfulPuts} |
@@ -114,12 +114,12 @@ Identical to the Create and Evict family of functions of the jc module
-###Eviction Manager Functions (jc_eviction_manager)
+### Eviction Manager Functions (jc_eviction_manager)
* set_max_ttl(Map, Secs) -> ok | {error, badarg}
* get_max_ttls() -> [{Map, Secs}, ...]
-###Pub/Sub Functions (jc_psub)
+### Pub/Sub Functions (jc_psub)
* map_subscribe(Pid, Map, Key|any, write|delete|any) -> ok | {error, badarg}
* map_unsubscribe(Pid, Map, Key|any, write|delete|any) -> ok | {error, badarg}
* client receives
@@ -142,7 +142,7 @@ Identical to the Create and Evict family of functions of the jc module
`{jc_node_events, {nodeup, UppedNode, [ActiveNodes],[ConfiguredNodes]}}`
-###Indexing Functions (jc_store)
+### Indexing Functions (jc_store)
* start_indexing(Map, Path={bed,"menu.id"}) -> ok |
{error, no_indexes_available} |
{error, Term}
@@ -152,7 +152,7 @@ Identical to the Create and Evict family of functions of the jc module
* indexes() -> {indexes, [{{Map, Path}, Position},...]} for all indexes
-###Interoperability: Bridge (jc_bridge)
+### Interoperability: Bridge (jc_bridge)
* All functions from the jc, jc_s, jc_eviction_manager, jc_psub
and jc_store are supported and are of the form:
@@ -176,7 +176,7 @@ Identical to the Create and Evict family of functions of the jc module
{From, {node_topic_unsub}} -> ok.
-### Interoperability: Socket Protocol - EXPIREMENTAL
+### Interoperability: Socket Protocol - EXPERIMENTAL
Binary-encoded, string protocol used to provide socket-based
interoperability with JC.
@@ -236,13 +236,13 @@ might look as follows:
-###Configuration
+### Configuration
* Application configuration is in sys.config which is heavily
commented
* Cookie, node-name and auto-restart of VM controlled by vm.args
-###Application Modules
+### Application Modules
* jc_cluster
* Simple, mnesia-based, cluster creation and management
* jc, jc_s, jc_store, jc_eviction_manager
@@ -266,7 +266,7 @@ might look as follows:
* Looks for evidence of node dis/apperation and implements a recovery
strategy
-###Net Split/Join Strategy
+### Net Split/Join Strategy
Mnesia does not merge on its own when a node joins (returns) to a mesh of nodes.
There are two situations where this is relevant:
@@ -287,9 +287,9 @@ Given this ClusterId, we have the following strategy:
3. _Nodeup_ Whenever a Node appears, an arbitary Node ensures that any Nodes that report
a different ClusterId (different than the arbitrary Node's ClusterId) are killed to be
restarted by the hearbeat application. If any Nodes required restarting, the entire
- cache is flushed.
+ cache is flushed or not per policy in config.sys.
-###Build Instructions
+### Build Instructions
* Ensure that Erlang 17 or higher is installed
* Get the Source Code from Stash
@@ -306,7 +306,7 @@ Given this ClusterId, we have the following strategy:
`[root@db01] ./rebar3 as prod release`
-###Documentation
+### Documentation
`[root@dbo1] ./rebar3 edoc`
diff --git a/config/sys.config b/config/sys.config
index 34dbd78..39daffb 100644
--- a/config/sys.config
+++ b/config/sys.config
@@ -1,55 +1,39 @@
-%% Typically the only line that is customer specific is the cache_nodes lines in
+%% Typically the only line that are customer specific are the cache_nodes lines in
%% the jc stanza.
[
- %% SASL config - type of logging not typically used, but might come in handy
- %% log files will be created, but are small and rotated.
- {sasl, [
- {sasl_error_logger, {file, "log/sasl-error.log"}},
- {errlog_type, error},
- {error_logger_mf_dir, "log/sasl"}, % Log directory
- {error_logger_mf_maxbytes, 10485760}, % 10 MB max file size
- {error_logger_mf_maxfiles, 5} % 5 files max
- ]},
-
{mnesia,
[
%% Specifies the maximum number of writes allowed to the transaction log
- %% before a new dump of the log is performed. The higer the more ram is
- %% used but low numbers may result in mnesia not being able to keep up.
+ %% before a new dump of the log is performed. The higer the more RAM is
+ %% used, but low numbers may result in mnesia not being able to keep up.
%% Default is 100
{dump_log_write_threshold, 50000},
- %% Must be ram so that schema is diskless. This allows for nodes to come in
- %% and out of the cluster in any order without worying about conflicting
+ %% MUST be RAM so that schema is diskless. This allows for nodes to come
+ %% in and out of the cluster in any order without worying about conflicting
%% masters.
{schema_location, ram}
- ]
- },
+ ]},
{lager,
[
+ %% Lager back-end handlers. Logs are small and rotated by default.
{handlers, [
- {lager_console_backend, info},
- {lager_file_backend, [{file, "log/error.log"},
- {level, error},
- {size, 10485760},
- {date, "$D0"},
- {count, 5}]},
-%% Uncomment for file-based debug log
-%% {lager_file_backend, [{file, "log/debug.log"},
-%% {level, debug},
-%% {size, 10485760},
-%% {date, "$D0"},
-%% {count, 5}]},
+ {lager_console_backend, [{level, info}]},
+ %% Uncomment for file-based debug log
+ %% {lager_file_backend, [{file, "log/debug.log"},
+ %% {level, debug},
+ %% {size, 10485760},
+ %% {date, "$D0"},
+ %% {count, 5}]},
{lager_file_backend, [{file, "log/info.log"},
{level, info},
{size, 10485760},
{date, "$D0"},
{count, 5}]}
]},
- %% We are not racist and do not want colors on the command line.
{colored, true}
]},
@@ -63,7 +47,9 @@
{table_wait_ms, 2000},
%% At what interval to run the process that looks for clients with no
- %% subscriptions and remove them from the subscriber (ps_client) tables.
+ %% subscriptions and removes them from the subscriber (ps_client) tables.
+ %% This is a safety-net activity and is used to remove subscriptions that
+ %% failed to be removed due to a failure of somesort.
{evict_deadbeats_ms, 3600000},
%% How often to run the process that evicts records that are older than the
@@ -71,16 +57,22 @@
{max_ttl_job_secs, 180},
%% Possibly empty list indicating max_ttl for records in the map. Format is
- %% {max_ttl_maps, [{Map1, Secs1}]},
+ %% {max_ttl_maps, [{Map1, Secs1}, ..., {MapN, SecsN}]},
{max_ttl_maps, [{testmap, 100}]},
- %% Initial json values upon which to index
+ %% Initial JSON values upon which to index. In a path, the number 2 indicates
+ %% the second whatever, while the '2' indicates the string 2 in the path.
%% {indexes, [{bed, "identifier"}, {bed, "menu.2.id.'2'"}]}
- %% Frequency needed to warrant indexing {freq, Time_secs}
+ %% Frequency needed to see a particular JSON query before indexing
+ %% {freq, Time_secs}
{analyze_freq, {5, 5}},
- %% Port for the Socket protocol listener
+ %% When a node appears after a net-split, some nodes are restarted. If the
+ %% survivors should flush their contents then this should be true, else false.
+ {should_flush, false},
+
+ %% Port for the Socket protocol listener EXPIRAMENTAL FEATURE
{protocol_port, 5555}
]}
diff --git a/doc/overview.edoc b/doc/overview.edoc
index c6bdd0a..4767573 100644
--- a/doc/overview.edoc
+++ b/doc/overview.edoc
@@ -240,9 +240,34 @@ client:send("{get, evs, \"1\"}"),
<li>Server that acts as a proxy between an external process and
jc functionality</li>
</ul>
+
+=== Net Split/Join Strategy ===
+
+Mnesia does not merge on its own when a node joins (returns) to a mesh of nodes.
+There are two situations where this is relevant:
+
+* j_cache nodes start in a disconnected state so more than one initiates a new
+cluster and then, subsequently, those nodes join into one cluster;
+* A node drops out of the cluster due to some network glitch and then rejoins.
+
+To handle these situations, whenever a cluster is created by a Node (node@123.45.67,
+for example), it creates a ClusterId - its Node name (node@123.45.67), for that cluster.
+
+Given this ClusterId, we have the following strategy:
+
+1. _Cluster Creation_: creates an initial ClusterId;
+2. _Nodedown_: If the Node that created the cluster dissapears, a surviving Node changes the
+ ClusterId such that ClusterId is now this new Node's name. In the case of a
+ disconnected newtwork, one of the islands will have the original ClusterId Node
+ dissapear, and it will create a new one as described.
+3. _Nodeup_ Whenever a Node appears, an arbitary Node ensures that any Nodes that report
+ a different ClusterId (different than the arbitrary Node's ClusterId) are killed to be
+ restarted by the hearbeat application. If any Nodes required restarting, the entire
+ cache is flushed or not per policy in config.sys.
+
=== Build Instructions ===
<ul>
-<li>Ensure that Erlang 17 or higher is installed</li>
+<li>Ensure that Erlang 20 or higher is installed</li>
<li>Get the Source Code from Stash</li>
</ul>
<p><code>[root@db01] git clone https://github.com/jr0senblum/jc.git</code></p>
diff --git a/include/records.hrl b/include/records.hrl
index 0e5398e..f3e094b 100644
--- a/include/records.hrl
+++ b/include/records.hrl
@@ -1,22 +1,21 @@
% Types
-type seconds() :: non_neg_integer().
+-type ttl() :: seconds().
+-type time_stamp() :: seconds().
-type map_name() :: any().
-type key() :: any().
-type value() :: any().
--type ttl() :: seconds().
+
-type rec_ref() :: reference().
--type time_stamp() :: seconds().
-% Key_to_value - an ordered_set table whose key is {key, map}. Ref is used by
-% jc_eviction manager as the key of the cache item to evict. i1 - i4 are
-% fields that can be used to hold values pulled from a json value to support a
-% querry-select feature (see README and jc_store:start_indexing/2 for more. Seq
-% is an integer supplied by the client that, if provided, is expected to be
-% strictly monotinic. If it is not, the put with the non monotonic value will
-% be evicted and the old one re-inserted.
-%
+
+% Key_to_value - an ordered_set table whose key is {key, map_name}. Ref is used
+% by jc_eviction manager as the key of the cache item to evict. i1 - i4 are
+% fields that can be used to hold values pulled from a json value to support an
+% indexed querry-select feature (see README and jc_store:start_indexing/2 for
+% more).
-record (key_to_value,
{jc_key :: {key(), map_name()},
map :: map_name(),
@@ -35,14 +34,19 @@
-type key_to_value() :: #key_to_value{}.
+% Seq_no is an integer supplied by the client that, if provided, MUST be
+% strictly monotinic and is used as a sequence number to ensure that a stale
+% operation doesn't make it to jcache after the fact and clobber a more recent
+% put or evict operation.
-record(seq,
{map :: map_name(),
seq_no :: jc_sequence:seq()
}).
-% Holds information about json-path's that will be the target of a query-sellect
-% for a given map. Position indicates which column (i1-i4) in key_to_value to
-% use.
+
+% Defines the index for a given map and JSON path. Used for query-selects and
+% evicts. Position indicates which column (i1-i4) in key_to_value to store the
+% index.
-record (to_index,
{map_path :: {map_name(), tuple()},
map_name :: map_name(),
@@ -50,6 +54,8 @@
}).
+% Record that keeps track of the accounting around a JSON query as part of
+% determining whether an index should be initiatied.
-record (auto_index,
{map_path :: {map_name(), tuple()} | '_',
count :: non_neg_integer() | '_',
@@ -86,7 +92,6 @@
}).
-
% Jc_psub records. Subscription patterns and the set of PIDS subscribed to those
% patterns.
-record (ps_sub,
@@ -94,6 +99,7 @@
clients = sets:new() :: sets:set()
}).
+
% Ps_client records. Unique processes that are subscribers, includings what type
% of mechanism is used to monitor the client - link to the Pid or monitor the
% node.
diff --git a/rebar.config b/rebar.config
index 4706976..ce32459 100644
--- a/rebar.config
+++ b/rebar.config
@@ -11,8 +11,8 @@
{deps, [
{jwalk, "1.1.0"},
- {jsone, "1.2.0"},
- {lager, "3.2.1"},
+ {jsone, {git, "git://github.com/sile/jsone", {branch, "otp21-rc1"}}},
+ {lager, "3.6.7"},
{ranch, "1.1.0"}
]}.
@@ -41,7 +41,6 @@
['jc',
kernel,
stdlib,
- sasl,
inets,
{observer,load},
{wx, load},
diff --git a/rebar.lock b/rebar.lock
index 69812fa..1b3db9d 100644
--- a/rebar.lock
+++ b/rebar.lock
@@ -1,5 +1,16 @@
-[{<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.7">>},1},
- {<<"jsone">>,{pkg,<<"jsone">>,<<"1.2.0">>},0},
+{"1.1.0",
+[{<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.9">>},1},
+ {<<"jsone">>,
+ {git,"git://github.com/sile/jsone",
+ {ref,"1db0b318a3dfbacc146c291d622f56968b88d4a0"}},
+ 0},
{<<"jwalk">>,{pkg,<<"jwalk">>,<<"1.1.0">>},0},
- {<<"lager">>,{pkg,<<"lager">>,<<"3.0.1">>},0},
- {<<"ranch">>,{pkg,<<"ranch">>,<<"1.1.0">>},0}].
+ {<<"lager">>,{pkg,<<"lager">>,<<"3.6.7">>},0},
+ {<<"ranch">>,{pkg,<<"ranch">>,<<"1.1.0">>},0}]}.
+[
+{pkg_hash,[
+ {<<"goldrush">>, <<"F06E5D5F1277DA5C413E84D5A2924174182FB108DABB39D5EC548B27424CD106">>},
+ {<<"jwalk">>, <<"4696A13FEDBB237C8DC24F659C716524CD77232C076FA2466AC5C53DC6C64EA3">>},
+ {<<"lager">>, <<"2FBF823944CAA0FC10DF5EC13F3F047524A249BB32F0D801B7900C9610264286">>},
+ {<<"ranch">>, <<"F7ED6D97DB8C2A27CCA85CACBD543558001FC5A355E93A7BFF1E9A9065A8545B">>}]}
+].
diff --git a/rebar3 b/rebar3
index f49bd25..0236d99 100755
Binary files a/rebar3 and b/rebar3 differ
diff --git a/src/jc.app.src b/src/jc.app.src
index 7541d38..1b27d3a 100644
--- a/src/jc.app.src
+++ b/src/jc.app.src
@@ -14,7 +14,6 @@
{applications, [
kernel,
stdlib,
- sasl,
stdlib,
syntax_tools,
compiler,
diff --git a/src/jc.erl b/src/jc.erl
index abb791d..739189e 100644
--- a/src/jc.erl
+++ b/src/jc.erl
@@ -1,14 +1,15 @@
%%% ----------------------------------------------------------------------------
%%% @author Jim Rosenblum
-%%% @copyright (C) 2011 - 2015, Jim Rosenblum
+%%% @copyright (C) 2011 - 2017, Jim Rosenblum
%%% @doc This module wraps the mnesia-interacting, lower-level functions
%%% implemented in {@link jc_store. jc_store} to provide a public, DIRTY,
%%% set of opperations. {@link jc_s. jc_s} provides functions that take a
%%% sequence parameter to better support serilization (consistency) without.
%%% resorting to transactions.
%%%
-%%% JC can be called directly by Erlang clients; or,
+%%% The jc module can be called directly by Erlang clients; or,
%%% Java node -> JInterface -> {@link jc_bridge. jc_bridge} -> jc; or,
+%%% experimentally,
%%% Application -> TPC/IP -> {@link jc_protocol. jc_protocol} -> jc
%%%
%%% @version {@version}
@@ -29,7 +30,6 @@
flush/0, flush/1,
remove_items/2]).
-
% Get Functions
-export([contains_key/2,
get/2,
@@ -38,8 +38,11 @@
values/1,
values_match/2]).
-% CACHE META-INFO SUPPORT
--export([cache_nodes/0, cache_size/0, map_size/1, maps/0, up/0, stop/0]).
+% Control
+-export([stop/0]).
+
+% Cache Meta-data support
+-export([cache_nodes/0, cache_size/0, map_size/1, maps/0, up/0]).
% Used by jc_s for evict_match
-export([fun_match/3]).
@@ -47,18 +50,18 @@
% Used by eviction manager to evict an entry based on a reference
-export ([delete_record_by_ref/1]).
-
% definitions of global records and types.
-include("../include/records.hrl").
+% ttl is either infinity (0) or an integer > 0.
-define(INFINITY, 0).
-define(VALID(X), is_integer(X) andalso (X >= 0)).
%% =============================================================================
-%% Meta data API
+%% Cache control
%% =============================================================================
@@ -75,6 +78,12 @@ stop()->
ok.
+
+%% =============================================================================
+%% Meta data API
+%% =============================================================================
+
+
%% -----------------------------------------------------------------------------
%% @doc Return a sorted list of all maps currently in the cache.
%%
@@ -136,7 +145,7 @@ cache_nodes() ->
Configured = application:get_env(jc, cache_nodes,[]),
MnesiaUp = jc_store:up_nodes(),
Running = [N || N <- MnesiaUp,
- undefined /= rpc:call(N, erlang, whereis, [jc_bridge], 1000)],
+ is_pid(rpc:call(N, erlang, whereis, [jc_bridge], 1000))],
{{active, lists:sort(Running)}, {configured, lists:sort(Configured)}}.
@@ -213,7 +222,8 @@ put_all(_m, _K, _T) ->
clear(Map) ->
lager:debug("~p: clear map ~p.", [?MODULE, Map]),
- jc_store:clear(Map),
+ F = fun() -> jc_store:clear(Map) end,
+ trans_execute(F),
ok.
diff --git a/src/jc_cluster.erl b/src/jc_cluster.erl
index 146a69f..5ee2600 100644
--- a/src/jc_cluster.erl
+++ b/src/jc_cluster.erl
@@ -113,7 +113,7 @@ get_cluster_id() ->
end.
-% kill this node...
+% kill the application, it will be restarted...
%
kamakazee() ->
lager:notice("~p: ~p seppuku.", [?MODULE, node()]),
@@ -182,15 +182,18 @@ dynamic_db_init([]) ->
{type, ordered_set},
{index, [map, key, ref, create_tm]}
]),
+
mnesia:create_table(seq,
[{attributes, record_info(fields, seq)},
{type, set}
]),
+
mnesia:create_table(to_index,
[{attributes, record_info(fields, to_index)},
{type, set},
{index, [map_name, position]}
]),
+
mnesia:create_table(auto_index,
[{attributes, record_info(fields, auto_index)},
{type, set}
@@ -198,17 +201,21 @@ dynamic_db_init([]) ->
mnesia:create_table(ttl,
[{attributes, record_info(fields, ttl)}
]),
+
mnesia:create_table(max_ttl,
[{attributes, record_info(fields, max_ttl)},
{type, set}
]),
+
mnesia:create_table(stats,
[{attributes, record_info(fields, stats)}
]),
+
mnesia:create_table(ps_sub,
[{attributes, record_info(fields, ps_sub)},
{local_content, true}
]),
+
mnesia:create_table(ps_client,
[{attributes, record_info(fields, ps_client)},
{local_content, true}
diff --git a/src/jc_netsplit.erl b/src/jc_netsplit.erl
index 57d883a..be73d58 100644
--- a/src/jc_netsplit.erl
+++ b/src/jc_netsplit.erl
@@ -28,8 +28,8 @@
%%% node will report different ClusterId - bad.
%%%
%%% For any bad outcome, all nodes having the 'different' ClusterId are killed
-%%% to be restarted by the heart process, and a survivor does a flush.
-%%%
+%%% to be restarted by the heartbeat process, and a survivor may do a flush per
+%%% policy in configy.sys.
%%%
%%% @end
%%% Created : 18 May 2016 by Jim Rosenblum <jrosenblum@Jims-MacBook-Pro.local>
@@ -56,9 +56,10 @@
-define(LOCK, {?MODULE, self()}).
-% State: list of configured nodes.
+% State: list of configured nodes, and flush policy on join after net split.
-record(jc_ns_state,
- {nodes = [] :: [Configured::node()]}).
+ {nodes = [] :: [Configured::node()],
+ should_flush = true :: boolean()}).
@@ -90,10 +91,12 @@ start_link() ->
init([]) ->
ok = net_kernel:monitor_nodes(true),
{ok, Configured} = application:get_env(jc, cache_nodes),
+ ShouldFlush = application:get_env(jc, should_flush, false),
lager:info("~p: up and watching events for ~p.", [?SERVER, Configured]),
- {ok, #jc_ns_state{nodes = lists:sort(Configured)}}.
+ {ok, #jc_ns_state{nodes = lists:sort(Configured),
+ should_flush = ShouldFlush}}.
%% -----------------------------------------------------------------------------
@@ -121,8 +124,8 @@ handle_cast(Msg, State) ->
%%
-spec handle_info(any(), #{}) -> {noreply, #{}}.
-handle_info({nodeup, Upped}, #jc_ns_state{nodes = Ns} = State) ->
- check_cluster_health(Upped, Ns),
+handle_info({nodeup, Upped}, #jc_ns_state{nodes = Ns, should_flush = Sf} = State) ->
+ check_cluster_health(Upped, Ns, Sf),
{noreply, State};
handle_info({nodedown, Downed}, State) ->
@@ -191,23 +194,24 @@ do_change(Downed) ->
% all nodes will report the same ClusterId. If not, then 'outliers' should
% kill themselves and let the hearbeat process restart them.
%
-check_cluster_health(Upped, Nodes) ->
+check_cluster_health(Upped, Nodes, ShouldFlush) ->
case is_relevant(Upped, Nodes) of
false ->
ok;
true ->
- check(Upped, Nodes, jc_cluster:get_cluster_id()),
+ check(Upped, Nodes, jc_cluster:get_cluster_id(), ShouldFlush),
ok
end.
-% Ask each node to check that it has the same ClusterId, if not flush.
-% Any node that has a different ClusterId will kill itself and be restarted
-% by the heartbeat process.
+% Ask each node to check that it has the same ClusterId. Any node that has
+% a different ClusterId will kill itself and be restarted by the heartbeat
+% process. If any nodes were killed, flush the entire cache per policy in
+% sys.config
%
-check(Upped, Nodes, ClusterId) ->
+check(Upped, Nodes, ClusterId, ShouldFlush) ->
{Res, _Bad} =
global:trans(?LOCK,
fun() ->
@@ -223,9 +227,13 @@ check(Upped, Nodes, ClusterId) ->
infinity),
case lists:member(bad, Res) of
- true ->
- lager:notice("~p: cluster repaired, flushing.", [?SERVER]),
+ true when ShouldFlush ->
+ lager:notice("~p: cluster repaired, flushing cache per policy.",
+ [?SERVER]),
jc:flush();
+ true when not ShouldFlush ->
+ lager:notice("~p: cluster repaired, not flushing cache per policy.",
+ [?SERVER]);
false ->
lager:notice("~p: cluster showed no signs of inconsistency.",
[?SERVER])
diff --git a/src/jc_store.erl b/src/jc_store.erl
index 4fcbf9b..5cb26fb 100644
--- a/src/jc_store.erl
+++ b/src/jc_store.erl
@@ -130,12 +130,10 @@ maps() ->
-spec clear(map_name()) -> ok.
clear(Map) ->
- F = fun() ->
- Items = mnesia:index_read(key_to_value, Map, #key_to_value.map),
- [mnesia:delete_object(Rec) || Rec <- Items],
- mnesia:delete({seq, Map})
- end,
- mnesia:sync_dirty(F),
+
+ Items = mnesia:index_read(key_to_value, Map, #key_to_value.map),
+ [mnesia:delete_object(Rec) || Rec <- Items],
+ mnesia:delete({seq, Map}),
ok.
diff --git a/test/app.config b/test/app.config
index bc24935..e0fbefb 100644
--- a/test/app.config
+++ b/test/app.config
@@ -2,16 +2,6 @@
%% the jc stanza.
[
- %% SASL config - type of logging not typically used, but might come in handy
- %% log files will be created, but are small and rotated.
- {sasl, [
- {sasl_error_logger, {file, "log/sasl-error.log"}},
- {errlog_type, error},
- {error_logger_mf_dir, "log/sasl"}, % Log directory
- {error_logger_mf_maxbytes, 10485760}, % 10 MB max file size
- {error_logger_mf_maxfiles, 5} % 5 files max
- ]},
-
{mnesia,
[
%% Specifies the maximum number of writes allowed to the transaction log
@@ -31,7 +21,7 @@
{lager,
[
{handlers, [
- {lager_console_backend, info},
+ {lager_console_backend, [{level,info}]},
{lager_file_backend, [{file, "log/error.log"},
{level, error},
{size, 10485760},
diff --git a/test/app2.config b/test/app2.config
index b4b371b..4f94df6 100644
--- a/test/app2.config
+++ b/test/app2.config
@@ -2,16 +2,6 @@
%% the jc stanza.
[
- %% SASL config - type of logging not typically used, but might come in handy
- %% log files will be created, but are small and rotated.
- {sasl, [
- {sasl_error_logger, {file, "log/sasl-error.log"}},
- {errlog_type, error},
- {error_logger_mf_dir, "log/sasl"}, % Log directory
- {error_logger_mf_maxbytes, 10485760}, % 10 MB max file size
- {error_logger_mf_maxfiles, 5} % 5 files max
- ]},
-
{mnesia,
[
%% Specifies the maximum number of writes allowed to the transaction log
@@ -31,7 +21,7 @@
{lager,
[
{handlers, [
- {lager_console_backend, info},
+ {lager_console_backend, [{level, info}]},
{lager_file_backend, [{file, "log/error.log"},
{level, error},
{size, 10485760},
diff --git a/test/jc.coverspec b/test/jc.coverspec
index 7f1f459..ab22f60 100644
--- a/test/jc.coverspec
+++ b/test/jc.coverspec
@@ -30,6 +30,7 @@
jc_bridge,
jc_cluster,
jc_eviction_manager,
+ jc_netsplit,
jc_protocol,
jc_psub,
jc_sequence,
diff --git a/test/jc_SUITE.erl b/test/jc_SUITE.erl
index 898cf35..352d47b 100644
--- a/test/jc_SUITE.erl
+++ b/test/jc_SUITE.erl
@@ -388,9 +388,9 @@ put_all_test(_Config)->
{ok, 100} = bridge({put_all, bed, KVs, 2}),
{ok, TestVs} = bridge({values, bed}),
- lists:sort(Vs) == lists:sort(TestVs),
+ true = (lists:sort(Vs) == lists:sort(TestVs)),
{ok, TestKs} = bridge({key_set, bed}),
- lists:sort(Ks) == lists:sort(TestKs),
+ true = (lists:sort(Ks) == lists:sort(TestKs)),
timer:sleep(2100),
{ok, {[], M}} = bridge({get_all, bed, Ks}),
@@ -754,7 +754,8 @@ remove_items_test(_config) ->
{ok,3} = jc:put_all(bed, [{1, one},{2, two},{3, three}]),
{ok,[{1,one}]} = bridge({remove_items, bed, [1, 22]}),
{ok, []} = bridge({remove_items, bed, [1, 22]}),
- {ok, [2,3]} = jc:key_set(bed),
+ {ok, Result} = jc:key_set(bed),
+ [2,3] = lists:sort(Result),
{ok,[{3, three}, {2, two}]} = bridge({remove_items, bed, [2, 3, 3, 4]}),
{records, 0} = jc:map_size(bed),
jc:flush(),
@@ -762,7 +763,8 @@ remove_items_test(_config) ->
{ok,3} = jc_s:put_all(bed, [{1, one},{2, two},{3, three}], 10),
{ok,[{1,one}]} = bridge({remove_items_s, bed, [1, 22], 11}),
{ok, []} = bridge({remove_items_s, bed, [1, 22], 12}),
- {ok, [2,3]} = jc:key_set(bed),
+ {ok, Result} = jc:key_set(bed),
+ true = ([2,3] == lists:sort(Result)),
{error, out_of_seq} = bridge({remove_items_s, bed, [2, 3, 3, 4], 1}),
{ok,[{3, three}, {2, two}]} = bridge({remove_items_s, bed, [2, 3, 3, 4], 111}),
{records, 0} = jc:map_size(bed).