diff options
author | Linus Nordberg <linus@nordu.net> | 2016-12-03 16:35:28 +0100 |
---|---|---|
committer | Linus Nordberg <linus@nordu.net> | 2016-12-03 16:35:28 +0100 |
commit | 1befdbb267f9c0960b9049cb551b470dbac15ae1 (patch) | |
tree | 65e26895321fb00467b1cebdfd8a85d6a0bc2ff6 /tools | |
parent | d1fc3a48ff50f85ff5e3098edb3c0774d110af23 (diff) |
merge_backup: Print timing info for each secondary.
Also, prefix all log entries with the name of the secondary merge node
operating on.
Diffstat (limited to 'tools')
-rwxr-xr-x | tools/merge_backup.py | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/tools/merge_backup.py b/tools/merge_backup.py index d1b6d93..8a44c65 100755 --- a/tools/merge_backup.py +++ b/tools/merge_backup.py @@ -33,7 +33,7 @@ def backup_loop(nodename, nodeaddress, own_key, paths, verifiedsize, chunk): if trynumber == 1: return None select.select([], [], [], 10.0) - logging.info("tries left: %d", trynumber) + logging.info("%s: tries left: %d", nodename, trynumber) continue return sendlogresult @@ -41,17 +41,17 @@ sendlog_discover_chunksize = 100000 def sendlog_helper(entries, verifiedsize, nodename, nodeaddress, own_key, paths, statusupdates): - logging.info("sending log") + logging.info("%s: sending log", nodename) for chunk in chunks(entries, 1000): sendlogresult = backup_loop(nodename, nodeaddress, own_key, paths, verifiedsize, chunk) if sendlogresult == None: sys.exit(1) if sendlogresult["result"] != "ok": - logging.error("backup_sendlog: %s", sendlogresult) + logging.error("%s: backup_sendlog: %s", nodename, sendlogresult) sys.exit(1) verifiedsize += len(chunk) statusupdates.status("PROG sending log: %d" % verifiedsize) - logging.info("log sent") + logging.info("%s: log sent", nodename) def fill_in_missing_entries(nodename, nodeaddress, own_key, paths, chainsdb, timing, statusupdates): @@ -60,8 +60,8 @@ def fill_in_missing_entries(nodename, nodeaddress, own_key, paths, chainsdb, timing_point(timing, "get missing") while missingentries: - logging.info("about to send %d missing entries", len(missingentries)) - + logging.info("%s: about to send %d missing entries", nodename, + len(missingentries)) fetched_entries = 0 with requests.sessions.Session() as session: for missingentry_chunk in chunks(missingentries, 100): @@ -71,7 +71,8 @@ def fill_in_missing_entries(nodename, nodeaddress, own_key, paths, chainsdb, own_key, paths, hashes_and_entries, session) if sendentryresult["result"] != "ok": - logging.error("sendentries_merge: %s", sendentryresult) + logging.error("%s: sendentries_merge: %s", nodename, + sendentryresult) sys.exit(1) fetched_entries += len(missingentry_hashes) statusupdates.status("PROG sending missing entries: %d" % @@ -89,18 +90,19 @@ def check_root(logorder, nodename, nodeaddress, own_key, paths, tree_size, timin verifyrootresult = verifyroot(nodename, nodeaddress, own_key, paths, tree_size) if verifyrootresult["result"] != "ok": - logging.error("verifyroot: %s", verifyrootresult) + logging.error("%s: verifyroot: %s", nodename, verifyrootresult) sys.exit(1) secondary_root_hash = b64decode(verifyrootresult["root_hash"]) if root_hash != secondary_root_hash: - logging.error("secondary root hash was %s while expected was %s", - hexencode(secondary_root_hash), hexencode(root_hash)) + logging.error("%s: secondary root hash was %s while expected was %s", + nodename, hexencode(secondary_root_hash), + hexencode(root_hash)) sys.exit(1) timing_point(timing, "verifyroot") return root_hash def do_send(backupargs): - secondary, localconfig, chainsdb, logorder, s, timing = backupargs + args, secondary, localconfig, chainsdb, logorder, s, timing = backupargs maxwindow = localconfig.get("maxwindow", 1000) paths = localconfig["paths"] nodename = secondary["name"] @@ -110,10 +112,10 @@ def do_send(backupargs): localconfig["nodename"])) tree_size = len(logorder) - logging.info("backing up to node %s", nodename) + logging.info("%s: backing up", nodename) verifiedsize = get_verifiedsize(nodename, nodeaddress, own_key, paths) timing_point(timing, "get verified size") - logging.info("verified size %d", verifiedsize) + logging.info("%s: verified size %d", nodename, verifiedsize) if verifiedsize == tree_size: root_hash = check_root(logorder, nodename, nodeaddress, own_key, paths, tree_size, timing) @@ -131,13 +133,17 @@ def do_send(backupargs): verifiedsize = uptopos setverifiedsize(nodename, nodeaddress, own_key, paths, verifiedsize) + + if args.timing: + logging.debug("%s: timing: merge_backup: %s", nodename, timing["deltatimes"]) + return root_hash def update_backupfile(mergedb, nodename, tree_size, root_hash): backuppath = mergedb + "/verified." + nodename backupdata = {"tree_size": tree_size, "sha256_root_hash": hexencode(root_hash)} - logging.debug("writing to %s: %s", backuppath, backupdata) + logging.debug("%s: writing to %s: %s", nodename, backuppath, backupdata) write_file(backuppath, backupdata) def merge_backup(args, config, localconfig, secondaries): @@ -165,7 +171,7 @@ def merge_backup(args, config, localconfig, secondaries): nodename = secondary["name"] timing = timing_point() - backupargs = (secondary, localconfig, chainsdb, logorder, s, timing) + backupargs = (args, secondary, localconfig, chainsdb, logorder, s, timing) if args.mergeinterval: name = 'backup_%s' % nodename p, pipe = start_worker(name, @@ -193,8 +199,6 @@ def merge_backup(args, config, localconfig, secondaries): break sleep(1) - if args.timing: - logging.debug("timing: merge_backup: %s", timing["deltatimes"]) return failures def main(): |