summaryrefslogtreecommitdiff
path: root/tools/merge_backup.py
diff options
context:
space:
mode:
authorLinus Nordberg <linus@nordu.net>2015-09-22 21:59:48 +0200
committerLinus Nordberg <linus@nordu.net>2015-11-10 12:48:46 +0100
commit2ab3e4fcf5118b4606cbb98ae5fea8bd64183dec (patch)
tree5d8a61fb94ee5c4e73e01610aa386da5bd083fe2 /tools/merge_backup.py
parenta11ef0bb9db8425c4a7e1c879b4a0116d168a1fb (diff)
Split merge.py into three pieces.
Diffstat (limited to 'tools/merge_backup.py')
-rw-r--r--tools/merge_backup.py108
1 files changed, 108 insertions, 0 deletions
diff --git a/tools/merge_backup.py b/tools/merge_backup.py
new file mode 100644
index 0000000..27c71a5
--- /dev/null
+++ b/tools/merge_backup.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2014-2015, NORDUnet A/S.
+# See LICENSE for licensing information.
+
+import sys
+import base64
+import select
+from certtools import timing_point
+from mergetools import chunks, backup_sendlog, get_logorder, \
+ get_verifiedsize, get_missingentriesforbackup, read_chain, \
+ hexencode, setverifiedsize, sendentry_merge, verifyroot
+
+def merge_backup(args, config, localconfig, sth_in):
+ paths = localconfig["paths"]
+ own_key = (localconfig["nodename"],
+ "%s/%s-private.pem" % (paths["privatekeys"],
+ localconfig["nodename"]))
+ secondaries = config.get("mergenodes", [])
+ mergedb = paths["mergedb"]
+ chainsdir = mergedb + "/chains"
+ logorderfile = mergedb + "/logorder"
+ timing = timing_point()
+
+ logorder = get_logorder(logorderfile)
+ timing_point(timing, "get logorder")
+
+ (tree_size, root_hash, _) = sth_in
+
+ for secondary in secondaries:
+ if secondary["name"] == config["primarymergenode"]:
+ continue
+ nodeaddress = "https://%s/" % secondary["address"]
+ nodename = secondary["name"]
+ timing = timing_point()
+ print >>sys.stderr, "backing up to node", nodename
+ sys.stderr.flush()
+ verifiedsize = get_verifiedsize(nodename, nodeaddress, own_key, paths)
+ timing_point(timing, "get verified size")
+ print >>sys.stderr, "verified size", verifiedsize
+ sys.stderr.flush()
+ entries = [base64.b64encode(entry) for entry in logorder[verifiedsize:]]
+ print >>sys.stderr, "sending log:",
+ sys.stderr.flush()
+ for chunk in chunks(entries, 1000):
+ for trynumber in range(5, 0, -1):
+ sendlogresult = \
+ backup_sendlog(nodename, nodeaddress, own_key, paths,
+ {"start": verifiedsize, "hashes": chunk})
+ if sendlogresult == None:
+ if trynumber == 1:
+ sys.exit(1)
+ select.select([], [], [], 10.0)
+ print >>sys.stderr, "tries left:", trynumber
+ sys.stderr.flush()
+ continue
+ break
+ if sendlogresult["result"] != "ok":
+ print >>sys.stderr, "sendlog:", sendlogresult
+ sys.exit(1)
+ verifiedsize += len(chunk)
+ print >>sys.stderr, verifiedsize,
+ sys.stderr.flush()
+ print >>sys.stderr
+ timing_point(timing, "sendlog")
+ print >>sys.stderr, "log sent"
+ sys.stderr.flush()
+ missingentries = get_missingentriesforbackup(nodename, nodeaddress,
+ own_key, paths)
+ timing_point(timing, "get missing")
+ print >>sys.stderr, "missing entries:", len(missingentries)
+ sys.stderr.flush()
+ fetched_entries = 0
+ print >>sys.stderr, "fetching missing entries",
+ sys.stderr.flush()
+ for missingentry in missingentries:
+ ehash = base64.b64decode(missingentry)
+ sendentryresult = sendentry_merge(nodename, nodeaddress,
+ own_key, paths,
+ read_chain(chainsdir, ehash),
+ ehash)
+ if sendentryresult["result"] != "ok":
+ print >>sys.stderr, "send sth:", sendentryresult
+ sys.exit(1)
+ fetched_entries += 1
+ if fetched_entries % 1000 == 0:
+ print >>sys.stderr, fetched_entries,
+ sys.stderr.flush()
+ print >>sys.stderr
+ sys.stderr.flush()
+ timing_point(timing, "send missing")
+ verifyrootresult = verifyroot(nodename, nodeaddress, own_key, paths,
+ tree_size)
+ if verifyrootresult["result"] != "ok":
+ print >>sys.stderr, "verifyroot:", verifyrootresult
+ sys.exit(1)
+ secondary_root_hash = base64.b64decode(verifyrootresult["root_hash"])
+ if root_hash != secondary_root_hash:
+ print >>sys.stderr, "secondary root hash was", \
+ hexencode(secondary_root_hash)
+ print >>sys.stderr, " expected", hexencode(root_hash)
+ sys.exit(1)
+ timing_point(timing, "verifyroot")
+ setverifiedsize(nodename, nodeaddress, own_key, paths, tree_size)
+ if args.timing:
+ print >>sys.stderr, timing["deltatimes"]
+ sys.stderr.flush()