From b4dc2c37be0ed51b629e91f4d3e1dfc44cb192c0 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Wed, 2 Nov 2016 03:34:35 +0100 Subject: Send list of entries to frontend Handle connection error --- tools/merge_backup.py | 2 +- tools/merge_dist.py | 52 +++++++++++++++++++++++++++++++-------------------- tools/mergetools.py | 26 +++++++++++++++++--------- 3 files changed, 50 insertions(+), 30 deletions(-) (limited to 'tools') diff --git a/tools/merge_backup.py b/tools/merge_backup.py index abe9f36..05679a1 100755 --- a/tools/merge_backup.py +++ b/tools/merge_backup.py @@ -124,7 +124,7 @@ def merge_backup(args, config, localconfig, secondaries): own_key, paths, hashes_and_entries, session) if sendentryresult["result"] != "ok": - print >>sys.stderr, "sendentry_merge:", sendentryresult + print >>sys.stderr, "sendentries_merge:", sendentryresult sys.exit(1) fetched_entries += len(missingentry_hashes) print >>sys.stderr, fetched_entries, diff --git a/tools/merge_dist.py b/tools/merge_dist.py index 2af1d6c..9d66cfd 100755 --- a/tools/merge_dist.py +++ b/tools/merge_dist.py @@ -6,12 +6,14 @@ import sys import json +import base64 +import requests from time import sleep from base64 import b64encode, b64decode from certtools import timing_point, \ create_ssl_context from mergetools import get_curpos, get_logorder, chunks, get_missingentries, \ - sendsth, sendlog, sendentry, parse_args, perm + sendsth, sendlog, sendentries, parse_args, perm def merge_dist(args, localconfig, frontendnodes, timestamp): paths = localconfig["paths"] @@ -84,25 +86,35 @@ def merge_dist(args, localconfig, frontendnodes, timestamp): paths) timing_point(timing, "get missing") - print >>sys.stderr, "missing entries:", len(missingentries) - sys.stderr.flush() - sent_entries = 0 - print >>sys.stderr, "send missing entries", - sys.stderr.flush() - for missingentry in missingentries: - ehash = b64decode(missingentry) - sendentryresult = sendentry(nodename, nodeaddress, own_key, paths, - chainsdb.get(ehash), ehash) - if sendentryresult["result"] != "ok": - print >>sys.stderr, "sendentry:", sendentryresult - sys.exit(1) - sent_entries += 1 - if sent_entries % 1000 == 0: - print >>sys.stderr, sent_entries, - sys.stderr.flush() - print >>sys.stderr - sys.stderr.flush() - timing_point(timing, "send missing") + + + while missingentries: + print >>sys.stderr, "missing entries:", len(missingentries) + sys.stderr.flush() + + sent_entries = 0 + print >>sys.stderr, "sending missing entries", + sys.stderr.flush() + with requests.sessions.Session() as session: + for missingentry_chunk in chunks(missingentries, 100): + missingentry_hashes = [base64.b64decode(missingentry) for missingentry in missingentry_chunk] + hashes_and_entries = [(hash, chainsdb.get(hash)) for hash in missingentry_hashes] + sendentryresult = sendentries(nodename, nodeaddress, + own_key, paths, + hashes_and_entries, session) + if sendentryresult["result"] != "ok": + print >>sys.stderr, "sendentries:", sendentryresult + sys.exit(1) + sent_entries += len(missingentry_hashes) + print >>sys.stderr, sent_entries, + sys.stderr.flush() + print >>sys.stderr + sys.stderr.flush() + timing_point(timing, "send missing") + + missingentries = get_missingentries(nodename, nodeaddress, + own_key, paths) + timing_point(timing, "get missing") print >>sys.stderr, "sending sth to node", nodename sys.stderr.flush() diff --git a/tools/mergetools.py b/tools/mergetools.py index 80fbf0b..bea09e9 100644 --- a/tools/mergetools.py +++ b/tools/mergetools.py @@ -258,6 +258,10 @@ def backup_sendlog(node, baseurl, own_key, paths, submission): print >>sys.stderr, "ERROR: backup_sendlog", e.response sys.stderr.flush() return None + except requests.packages.urllib3.exceptions.NewConnectionError, e: + print >>sys.stderr, "ERROR: backup_sendlog new connection error" + sys.stderr.flush() + return None except ValueError, e: print >>sys.stderr, "==== FAILED REQUEST ====" print >>sys.stderr, submission @@ -267,16 +271,17 @@ def backup_sendlog(node, baseurl, own_key, paths, submission): sys.stderr.flush() raise e -def sendentry(node, baseurl, own_key, paths, entry, ehash): +def sendentries(node, baseurl, own_key, paths, entries, session=None): try: + json_entries = [{"entry":base64.b64encode(entry), "treeleafhash":base64.b64encode(hash)} for hash, entry in entries] result = http_request( baseurl + "plop/v1/frontend/sendentry", - json.dumps({"entry":base64.b64encode(entry), - "treeleafhash":base64.b64encode(ehash)}), - key=own_key, verifynode=node, publickeydir=paths["publickeys"]) + json.dumps(json_entries), + key=own_key, verifynode=node, publickeydir=paths["publickeys"], + session=session) return json.loads(result) except requests.exceptions.HTTPError, e: - print >>sys.stderr, "ERROR: sendentry", e.reponse + print >>sys.stderr, "ERROR: sendentries", e.response sys.exit(1) except ValueError, e: print >>sys.stderr, "==== FAILED REQUEST ====" @@ -286,9 +291,9 @@ def sendentry(node, baseurl, own_key, paths, entry, ehash): print >>sys.stderr, "========================" sys.stderr.flush() raise e - -def sendentry_merge(node, baseurl, own_key, paths, entry, ehash): - return sendentries_merge(node, baseurl, own_key, paths, [(ehash, entry)]) + except requests.exceptions.ConnectionError, e: + print >>sys.stderr, "ERROR: sendentries", baseurl, e.request, e.response + sys.exit(1) def sendentries_merge(node, baseurl, own_key, paths, entries, session=None): try: @@ -300,7 +305,7 @@ def sendentries_merge(node, baseurl, own_key, paths, entries, session=None): session=session) return json.loads(result) except requests.exceptions.HTTPError, e: - print >>sys.stderr, "ERROR: sendentry_merge", e.response + print >>sys.stderr, "ERROR: sendentries_merge", e.response sys.exit(1) except ValueError, e: print >>sys.stderr, "==== FAILED REQUEST ====" @@ -310,6 +315,9 @@ def sendentries_merge(node, baseurl, own_key, paths, entries, session=None): print >>sys.stderr, "========================" sys.stderr.flush() raise e + except requests.exceptions.ConnectionError, e: + print >>sys.stderr, "ERROR: sendentries_merge", baseurl, e.request, e.response + sys.exit(1) def sendsth(node, baseurl, own_key, paths, submission): try: -- cgit v1.1 From 6659a1c08dda0d6ec20f945e135b23b544db55a4 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Wed, 2 Nov 2016 12:55:20 +0100 Subject: Added performance tests --- tools/parsebench.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/tools/parsebench.py b/tools/parsebench.py index 96c36d6..671e482 100755 --- a/tools/parsebench.py +++ b/tools/parsebench.py @@ -10,8 +10,8 @@ import itertools def parse_one_line(line): row = line.rstrip().split(":") - assert row[0].startswith("mergeoutput.") - iteration = int(row[0][12:]) + assert row[0].startswith("bench-") + iteration = int(row[0][6:]) stage = row[2].strip() data = ast.literal_eval(row[3].strip()) return (iteration, stage, data) @@ -30,29 +30,34 @@ def main(): print "" print "" +scale = 0.25 + def parse_one_file(filename): lines = [parse_one_line(line) for line in open(filename)] iterations = itertools.groupby(lines, lambda x: x[0]) print "

%s

" % (filename,) print "
" legend = [] + firsttime = True for (i, iteration) in iterations: - print "" + print >>sys.stderr, (i, iteration) + print "
" for (stagen, (_, stage, data)) in enumerate(iteration): - if i == 0: + if firsttime: legend.append("
%s" % (stage,)) data = list(data) for (itemn, (item, useconds)) in enumerate(data): seconds = useconds / 1000000 step = 50 / (len(data) - 1) - print "
" - if i == 0: + if firsttime: legend.append("") print "
" % (seconds/4, stagen * 90, itemn * step + 40, stage, item, seconds) - if i == 0: + print "" % (int(seconds*scale), stagen * 90, itemn * step + 40, stage, item, seconds) + if firsttime: legend.append("%s" % (stagen * 90, itemn * step + 40, item)) print " " print "
" + firsttime = False print "
" print "
" print "
" -- cgit v1.1 From b48c1689e36bdcc65a34b4ab12763478b072a716 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Wed, 2 Nov 2016 13:10:37 +0100 Subject: Change algorithm for merge backup and merge dist --- tools/compileconfig.py | 3 +- tools/merge_backup.py | 166 ++++++++++++++++++++++++------------------------- tools/merge_dist.py | 140 +++++++++++++++++++++++------------------ tools/mergetools.py | 25 +++++++- 4 files changed, 185 insertions(+), 149 deletions(-) (limited to 'tools') diff --git a/tools/compileconfig.py b/tools/compileconfig.py index 7ba2fac..1fa352e 100755 --- a/tools/compileconfig.py +++ b/tools/compileconfig.py @@ -129,7 +129,8 @@ def allowed_clients_frontend(mergenodenames, primarymergenode): return [ ("/plop/v1/frontend/sendentry", mergenodenames), ("/plop/v1/frontend/sendlog", mergenodenames), - ("/plop/v1/frontend/sendsth", [primarymergenode]), + ("/plop/v1/frontend/publish-sth", [primarymergenode]), + ("/plop/v1/frontend/verify-entries", [primarymergenode]), ("/plop/v1/frontend/currentposition", mergenodenames), ("/plop/v1/frontend/missingentries", mergenodenames), ] diff --git a/tools/merge_backup.py b/tools/merge_backup.py index 05679a1..2c17d90 100755 --- a/tools/merge_backup.py +++ b/tools/merge_backup.py @@ -9,6 +9,7 @@ import base64 import select import requests from time import sleep +from base64 import b64encode, b64decode from certtools import timing_point, build_merkle_tree, write_file, \ create_ssl_context from mergetools import chunks, backup_sendlog, get_logorder, \ @@ -30,8 +31,78 @@ def backup_loop(nodename, nodeaddress, own_key, paths, verifiedsize, chunk): continue return sendlogresult +sendlog_discover_chunksize = 100000 +def sendlog_helper(entries, verifiedsize, nodename, nodeaddress, own_key, paths): + print >>sys.stderr, "sending log:", + sys.stderr.flush() + for chunk in chunks(entries, 1000): + sendlogresult = backup_loop(nodename, nodeaddress, own_key, paths, verifiedsize, chunk) + if sendlogresult == None: + sys.exit(1) + if sendlogresult["result"] != "ok": + print >>sys.stderr, "backup_sendlog:", sendlogresult + sys.exit(1) + verifiedsize += len(chunk) + print >>sys.stderr, verifiedsize, + sys.stderr.flush() + print >>sys.stderr + print >>sys.stderr, "log sent" + sys.stderr.flush() + +def fill_in_missing_entries(nodename, nodeaddress, own_key, paths, chainsdb, timing): + missingentries = get_missingentriesforbackup(nodename, nodeaddress, + own_key, paths) + timing_point(timing, "get missing") + + while missingentries: + print >>sys.stderr, "missing entries:", len(missingentries) + sys.stderr.flush() + + fetched_entries = 0 + print >>sys.stderr, "sending missing entries", + sys.stderr.flush() + with requests.sessions.Session() as session: + for missingentry_chunk in chunks(missingentries, 100): + missingentry_hashes = [base64.b64decode(missingentry) for missingentry in missingentry_chunk] + hashes_and_entries = [(hash, chainsdb.get(hash)) for hash in missingentry_hashes] + sendentryresult = sendentries_merge(nodename, nodeaddress, + own_key, paths, + hashes_and_entries, session) + if sendentryresult["result"] != "ok": + print >>sys.stderr, "sendentries_merge:", sendentryresult + sys.exit(1) + fetched_entries += len(missingentry_hashes) + #print >>sys.stderr, fetched_entries, + #sys.stderr.flush() + print >>sys.stderr + sys.stderr.flush() + timing_point(timing, "send missing") + + missingentries = get_missingentriesforbackup(nodename, nodeaddress, + own_key, paths) + timing_point(timing, "get missing") + +def check_root(logorder, nodename, nodeaddress, own_key, paths, tree_size, timing): + tree = build_merkle_tree(logorder[:tree_size]) + root_hash = tree[-1][0] + timing_point(timing, "build tree") + verifyrootresult = verifyroot(nodename, nodeaddress, own_key, paths, + tree_size) + if verifyrootresult["result"] != "ok": + print >>sys.stderr, "verifyroot:", verifyrootresult + sys.exit(1) + secondary_root_hash = base64.b64decode(verifyrootresult["root_hash"]) + if root_hash != secondary_root_hash: + print >>sys.stderr, "secondary root hash was", \ + hexencode(secondary_root_hash) + print >>sys.stderr, " expected", hexencode(root_hash) + sys.exit(1) + timing_point(timing, "verifyroot") + return root_hash + def merge_backup(args, config, localconfig, secondaries): + maxwindow = localconfig.get("maxwindow", 1000) paths = localconfig["paths"] own_key = (localconfig["nodename"], "%s/%s-private.pem" % (paths["privatekeys"], @@ -48,10 +119,6 @@ def merge_backup(args, config, localconfig, secondaries): tree_size = len(logorder) timing_point(timing, "get logorder") - tree = build_merkle_tree(logorder) - root_hash = tree[-1][0] - timing_point(timing, "build tree") - for secondary in secondaries: if secondary["name"] == config["primarymergenode"]: continue @@ -65,92 +132,23 @@ def merge_backup(args, config, localconfig, secondaries): print >>sys.stderr, "verified size", verifiedsize sys.stderr.flush() - entries = [base64.b64encode(entry) for entry in logorder[verifiedsize:]] - - print >>sys.stderr, "determining end of log:", - for chunk in chunks(entries, 100000): - sendlogresult = backup_loop(nodename, nodeaddress, own_key, paths, verifiedsize, chunk[:10]) - if sendlogresult == None: - print >>sys.stderr, "sendlog result was None" - sys.exit(1) - if sendlogresult["result"] != "ok": - print >>sys.stderr, "backup_sendlog:", sendlogresult - sys.exit(1) - verifiedsize += len(chunk) - print >>sys.stderr, verifiedsize, - sys.stderr.flush() - - if verifiedsize > 100000: - verifiedsize -= 100000 + if verifiedsize == tree_size: + root_hash = check_root(logorder, nodename, nodeaddress, own_key, paths, tree_size, timing) else: - verifiedsize = 0 + while verifiedsize < tree_size: + uptopos = min(verifiedsize + maxwindow, tree_size) - timing_point(timing, "checklog") + entries = [b64encode(entry) for entry in logorder[verifiedsize:uptopos]] + sendlog_helper(entries, verifiedsize, nodename, nodeaddress, own_key, paths) + timing_point(timing, "sendlog") - entries = [base64.b64encode(entry) for entry in logorder[verifiedsize:]] - print >>sys.stderr, "sending log:", - sys.stderr.flush() - for chunk in chunks(entries, 1000): - sendlogresult = backup_loop(nodename, nodeaddress, own_key, paths, verifiedsize, chunk) - if sendlogresult == None: - sys.exit(1) - if sendlogresult["result"] != "ok": - print >>sys.stderr, "backup_sendlog:", sendlogresult - sys.exit(1) - verifiedsize += len(chunk) - print >>sys.stderr, verifiedsize, - sys.stderr.flush() - print >>sys.stderr - timing_point(timing, "sendlog") - print >>sys.stderr, "log sent" - sys.stderr.flush() + fill_in_missing_entries(nodename, nodeaddress, own_key, paths, chainsdb, timing) - missingentries = get_missingentriesforbackup(nodename, nodeaddress, - own_key, paths) - timing_point(timing, "get missing") - - while missingentries: - print >>sys.stderr, "missing entries:", len(missingentries) - sys.stderr.flush() + root_hash = check_root(logorder, nodename, nodeaddress, own_key, paths, uptopos, timing) - fetched_entries = 0 - print >>sys.stderr, "fetching missing entries", - sys.stderr.flush() - with requests.sessions.Session() as session: - for missingentry_chunk in chunks(missingentries, 100): - missingentry_hashes = [base64.b64decode(missingentry) for missingentry in missingentry_chunk] - hashes_and_entries = [(hash, chainsdb.get(hash)) for hash in missingentry_hashes] - sendentryresult = sendentries_merge(nodename, nodeaddress, - own_key, paths, - hashes_and_entries, session) - if sendentryresult["result"] != "ok": - print >>sys.stderr, "sendentries_merge:", sendentryresult - sys.exit(1) - fetched_entries += len(missingentry_hashes) - print >>sys.stderr, fetched_entries, - sys.stderr.flush() - print >>sys.stderr - sys.stderr.flush() - timing_point(timing, "send missing") - - missingentries = get_missingentriesforbackup(nodename, nodeaddress, - own_key, paths) - timing_point(timing, "get missing") - - verifyrootresult = verifyroot(nodename, nodeaddress, own_key, paths, - tree_size) - if verifyrootresult["result"] != "ok": - print >>sys.stderr, "verifyroot:", verifyrootresult - sys.exit(1) - secondary_root_hash = base64.b64decode(verifyrootresult["root_hash"]) - if root_hash != secondary_root_hash: - print >>sys.stderr, "secondary root hash was", \ - hexencode(secondary_root_hash) - print >>sys.stderr, " expected", hexencode(root_hash) - sys.exit(1) - timing_point(timing, "verifyroot") + verifiedsize = uptopos + setverifiedsize(nodename, nodeaddress, own_key, paths, verifiedsize) - setverifiedsize(nodename, nodeaddress, own_key, paths, tree_size) backuppath = mergedb + "/verified." + nodename backupdata = {"tree_size": tree_size, "sha256_root_hash": hexencode(root_hash)} diff --git a/tools/merge_dist.py b/tools/merge_dist.py index 9d66cfd..ded25a1 100755 --- a/tools/merge_dist.py +++ b/tools/merge_dist.py @@ -13,9 +13,71 @@ from base64 import b64encode, b64decode from certtools import timing_point, \ create_ssl_context from mergetools import get_curpos, get_logorder, chunks, get_missingentries, \ - sendsth, sendlog, sendentries, parse_args, perm + publish_sth, sendlog, sendentries, parse_args, perm, get_frontend_verifiedsize, \ + frontend_verify_entries + +def sendlog_helper(entries, curpos, nodename, nodeaddress, own_key, paths): + print >>sys.stderr, "sending log:", + sys.stderr.flush() + for chunk in chunks(entries, 1000): + for trynumber in range(5, 0, -1): + sendlogresult = sendlog(nodename, nodeaddress, + own_key, paths, + {"start": curpos, "hashes": chunk}) + if sendlogresult == None: + if trynumber == 1: + sys.exit(1) + sleep(10) + print >>sys.stderr, "tries left:", trynumber + sys.stderr.flush() + continue + break + if sendlogresult["result"] != "ok": + print >>sys.stderr, "sendlog:", sendlogresult + sys.exit(1) + curpos += len(chunk) + print >>sys.stderr, curpos, + sys.stderr.flush() + print >>sys.stderr + print >>sys.stderr, "log sent" + sys.stderr.flush() + +def fill_in_missing_entries(nodename, nodeaddress, own_key, paths, chainsdb, timing): + missingentries = get_missingentries(nodename, nodeaddress, own_key, + paths) + timing_point(timing, "get missing") + + while missingentries: + print >>sys.stderr, "missing entries:", len(missingentries) + sys.stderr.flush() + + sent_entries = 0 + print >>sys.stderr, "sending missing entries", + sys.stderr.flush() + with requests.sessions.Session() as session: + for missingentry_chunk in chunks(missingentries, 100): + missingentry_hashes = [base64.b64decode(missingentry) for missingentry in missingentry_chunk] + hashes_and_entries = [(hash, chainsdb.get(hash)) for hash in missingentry_hashes] + sendentryresult = sendentries(nodename, nodeaddress, + own_key, paths, + hashes_and_entries, session) + if sendentryresult["result"] != "ok": + print >>sys.stderr, "sendentries:", sendentryresult + sys.exit(1) + sent_entries += len(missingentry_hashes) + print >>sys.stderr, sent_entries, + sys.stderr.flush() + print >>sys.stderr + sys.stderr.flush() + timing_point(timing, "send missing") + + missingentries = get_missingentries(nodename, nodeaddress, + own_key, paths) + timing_point(timing, "get missing") + def merge_dist(args, localconfig, frontendnodes, timestamp): + maxwindow = localconfig.get("maxwindow", 1000) paths = localconfig["paths"] own_key = (localconfig["nodename"], "%s/%s-private.pem" % (paths["privatekeys"], @@ -55,72 +117,28 @@ def merge_dist(args, localconfig, frontendnodes, timestamp): print >>sys.stderr, "current position", curpos sys.stderr.flush() - entries = [b64encode(entry) for entry in logorder[curpos:]] - print >>sys.stderr, "sending log:", - sys.stderr.flush() - for chunk in chunks(entries, 1000): - for trynumber in range(5, 0, -1): - sendlogresult = sendlog(nodename, nodeaddress, - own_key, paths, - {"start": curpos, "hashes": chunk}) - if sendlogresult == None: - if trynumber == 1: - sys.exit(1) - sleep(10) - print >>sys.stderr, "tries left:", trynumber - sys.stderr.flush() - continue - break - if sendlogresult["result"] != "ok": - print >>sys.stderr, "sendlog:", sendlogresult - sys.exit(1) - curpos += len(chunk) - print >>sys.stderr, curpos, - sys.stderr.flush() - print >>sys.stderr - timing_point(timing, "sendlog") - print >>sys.stderr, "log sent" - sys.stderr.flush() - - missingentries = get_missingentries(nodename, nodeaddress, own_key, - paths) - timing_point(timing, "get missing") + verifiedsize = get_frontend_verifiedsize(nodename, nodeaddress, own_key, paths) + timing_point(timing, "get verified size") + print >>sys.stderr, "verified size", verifiedsize + assert verifiedsize >= curpos + while verifiedsize < len(logorder): + uptopos = min(verifiedsize + maxwindow, len(logorder)) + + entries = [b64encode(entry) for entry in logorder[verifiedsize:uptopos]] + sendlog_helper(entries, verifiedsize, nodename, nodeaddress, own_key, paths) + timing_point(timing, "sendlog") - while missingentries: - print >>sys.stderr, "missing entries:", len(missingentries) - sys.stderr.flush() - - sent_entries = 0 - print >>sys.stderr, "sending missing entries", - sys.stderr.flush() - with requests.sessions.Session() as session: - for missingentry_chunk in chunks(missingentries, 100): - missingentry_hashes = [base64.b64decode(missingentry) for missingentry in missingentry_chunk] - hashes_and_entries = [(hash, chainsdb.get(hash)) for hash in missingentry_hashes] - sendentryresult = sendentries(nodename, nodeaddress, - own_key, paths, - hashes_and_entries, session) - if sendentryresult["result"] != "ok": - print >>sys.stderr, "sendentries:", sendentryresult - sys.exit(1) - sent_entries += len(missingentry_hashes) - print >>sys.stderr, sent_entries, - sys.stderr.flush() - print >>sys.stderr - sys.stderr.flush() - timing_point(timing, "send missing") - - missingentries = get_missingentries(nodename, nodeaddress, - own_key, paths) - timing_point(timing, "get missing") + fill_in_missing_entries(nodename, nodeaddress, own_key, paths, chainsdb, timing) + verifiedsize = frontend_verify_entries(nodename, nodeaddress, own_key, paths, uptopos) + print >>sys.stderr, "sending sth to node", nodename sys.stderr.flush() - sendsthresult = sendsth(nodename, nodeaddress, own_key, paths, sth) - if sendsthresult["result"] != "ok": - print >>sys.stderr, "sendsth:", sendsthresult + publishsthresult = publish_sth(nodename, nodeaddress, own_key, paths, sth) + if publishsthresult["result"] != "ok": + print >>sys.stderr, "publishsth:", publishsthresult sys.exit(1) timing_point(timing, "send sth") diff --git a/tools/mergetools.py b/tools/mergetools.py index bea09e9..ff3d08c 100644 --- a/tools/mergetools.py +++ b/tools/mergetools.py @@ -214,6 +214,25 @@ def get_curpos(node, baseurl, own_key, paths): print >>sys.stderr, "ERROR: currentposition", e.response sys.exit(1) +def get_frontend_verifiedsize(node, baseurl, own_key, paths): + return frontend_verify_entries(node, baseurl, own_key, paths, 0) + +def frontend_verify_entries(node, baseurl, own_key, paths, size): + try: + arguments = {"verify_to": size} + result = http_request(baseurl + "plop/v1/frontend/verify-entries", + json.dumps(arguments), + key=own_key, verifynode=node, + publickeydir=paths["publickeys"]) + parsed_result = json.loads(result) + if parsed_result.get(u"result") == u"ok": + return parsed_result[u"verified"] + print >>sys.stderr, "ERROR: verify-entries", parsed_result + sys.exit(1) + except requests.exceptions.HTTPError, e: + print >>sys.stderr, "ERROR: verify-entries", e.response + sys.exit(1) + def get_verifiedsize(node, baseurl, own_key, paths): try: result = http_request(baseurl + "plop/v1/merge/verifiedsize", @@ -319,14 +338,14 @@ def sendentries_merge(node, baseurl, own_key, paths, entries, session=None): print >>sys.stderr, "ERROR: sendentries_merge", baseurl, e.request, e.response sys.exit(1) -def sendsth(node, baseurl, own_key, paths, submission): +def publish_sth(node, baseurl, own_key, paths, submission): try: - result = http_request(baseurl + "plop/v1/frontend/sendsth", + result = http_request(baseurl + "plop/v1/frontend/publish-sth", json.dumps(submission), key=own_key, verifynode=node, publickeydir=paths["publickeys"]) return json.loads(result) except requests.exceptions.HTTPError, e: - print >>sys.stderr, "ERROR: sendsth", e.response + print >>sys.stderr, "ERROR: publish-sth", e.response sys.exit(1) except ValueError, e: print >>sys.stderr, "==== FAILED REQUEST ====" -- cgit v1.1 From aa7be9990183d6e9a87037b88a1e33dca35304e3 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Fri, 18 Nov 2016 17:13:31 +0100 Subject: Use sessions in merge_fetch --- tools/merge_fetch.py | 30 ++++++++++++++++-------------- tools/mergetools.py | 4 ++-- 2 files changed, 18 insertions(+), 16 deletions(-) (limited to 'tools') diff --git a/tools/merge_fetch.py b/tools/merge_fetch.py index 3028b30..ddd2f06 100755 --- a/tools/merge_fetch.py +++ b/tools/merge_fetch.py @@ -7,6 +7,7 @@ import sys import struct import subprocess +import requests from time import sleep from mergetools import get_logorder, verify_entry, get_new_entries, \ chunks, fsync_logorder, get_entries, add_to_logorder, \ @@ -63,20 +64,21 @@ def merge_fetch(args, config, localconfig): print >>sys.stderr, "getting %d entries from %s:" % \ (len(entries_to_fetch[storagenode["name"]]), storagenode["name"]), sys.stderr.flush() - for chunk in chunks(entries_to_fetch[storagenode["name"]], 100): - entries = get_entries(storagenode["name"], - "https://%s/" % storagenode["address"], - own_key, paths, chunk) - for ehash in chunk: - entry = entries[ehash] - verify_entry(verifycert, entry, ehash) - chainsdb.add(ehash, entry) - add_to_logorder(logorderfile, ehash) - logorder.append(ehash) - certsinlog.add(ehash) - added_entries += 1 - print >>sys.stderr, added_entries, - sys.stderr.flush() + with requests.sessions.Session() as session: + for chunk in chunks(entries_to_fetch[storagenode["name"]], 100): + entries = get_entries(storagenode["name"], + "https://%s/" % storagenode["address"], + own_key, paths, chunk, session=session) + for ehash in chunk: + entry = entries[ehash] + verify_entry(verifycert, entry, ehash) + chainsdb.add(ehash, entry) + add_to_logorder(logorderfile, ehash) + logorder.append(ehash) + certsinlog.add(ehash) + added_entries += 1 + print >>sys.stderr, added_entries, + sys.stderr.flush() print >>sys.stderr sys.stderr.flush() chainsdb.commit() diff --git a/tools/mergetools.py b/tools/mergetools.py index ff3d08c..94901a9 100644 --- a/tools/mergetools.py +++ b/tools/mergetools.py @@ -179,13 +179,13 @@ def get_new_entries(node, baseurl, own_key, paths): print >>sys.stderr, "ERROR: fetchnewentries", e.response sys.exit(1) -def get_entries(node, baseurl, own_key, paths, hashes): +def get_entries(node, baseurl, own_key, paths, hashes, session=None): try: params = {"hash":[base64.b64encode(ehash) for ehash in hashes]} result = http_request(baseurl + "plop/v1/storage/getentry", params=params, key=own_key, verifynode=node, - publickeydir=paths["publickeys"]) + publickeydir=paths["publickeys"], session=session) parsed_result = json.loads(result) if parsed_result.get(u"result") == u"ok": entries = dict([(base64.b64decode(entry["hash"]), -- cgit v1.1 From 88cc9feeb5c8838aad8edc0926c82b48eaa00c6c Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Sat, 19 Nov 2016 12:54:17 +0100 Subject: Only allocate one color per stage --- tools/parsebench.py | 49 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 13 deletions(-) (limited to 'tools') diff --git a/tools/parsebench.py b/tools/parsebench.py index 671e482..067cf65 100755 --- a/tools/parsebench.py +++ b/tools/parsebench.py @@ -37,33 +37,56 @@ def parse_one_file(filename): iterations = itertools.groupby(lines, lambda x: x[0]) print "

%s

" % (filename,) print "
" - legend = [] firsttime = True + + stageorderdict = {} + stageorder = [] + stages = {} + itemorder = {} + + for (i, iteration) in iterations: + for (_, stage, data) in iteration: + if stage not in stages: + stageorderdict[stage] = len(stageorderdict) + stageorder.append(stage) + stages[stage] = {} + itemorder[stage] = [] + for (item, useconds) in data: + if item not in stages[stage]: + itemorder[stage].append(item) + stages[stage][item] = len(stages[stage]) + + iterations = itertools.groupby(lines, lambda x: x[0]) for (i, iteration) in iterations: print >>sys.stderr, (i, iteration) print "" - for (stagen, (_, stage, data)) in enumerate(iteration): - if firsttime: - legend.append("
%s" % (stage,)) + for (_, stage, data) in iteration: data = list(data) for (itemn, (item, useconds)) in enumerate(data): seconds = useconds / 1000000 - step = 50 / (len(data) - 1) - print "
" - if firsttime: - legend.append("") print "
" % (int(seconds*scale), stagen * 90, itemn * step + 40, stage, item, seconds) - if firsttime: - legend.append("%s" % (stagen * 90, itemn * step + 40, item)) + shades = stages[stage] + step = 50 / (len(shades) - 1) + shade = shades[item] + stagen = stageorderdict[stage] + print "" % (int(seconds*scale), stagen * 90, shade * step + 40, stage, item, seconds) print " " print "
" - firsttime = False print "
" print "
" print "
" print "
" - for row in legend: - print row + + for stage in stageorder: + print "
%s" % (stage,) + shades = stages[stage] + for item in itemorder[stage]: + shade = shades[item] + step = 50 / (len(shades) - 1) + + stagen = stageorderdict[stage] + print "%s" % (stagen * 90, shade * step + 40, item) + print "
" print "
" main() -- cgit v1.1 From bc65371739172f05b76a44383147063ca9056297 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Tue, 22 Nov 2016 15:28:35 +0100 Subject: parsebench: change from table to div:s --- tools/parsebench.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/parsebench.py b/tools/parsebench.py index 067cf65..6897b57 100755 --- a/tools/parsebench.py +++ b/tools/parsebench.py @@ -59,7 +59,7 @@ def parse_one_file(filename): iterations = itertools.groupby(lines, lambda x: x[0]) for (i, iteration) in iterations: print >>sys.stderr, (i, iteration) - print "" + sys.stdout.write("
") for (_, stage, data) in iteration: data = list(data) for (itemn, (item, useconds)) in enumerate(data): @@ -68,10 +68,10 @@ def parse_one_file(filename): step = 50 / (len(shades) - 1) shade = shades[item] stagen = stageorderdict[stage] - print "
" - print "
" % (int(seconds*scale), stagen * 90, shade * step + 40, stage, item, seconds) + print "
" % (int(seconds*scale), stagen * 90, shade * step + 40, stage, item, seconds) print " " - print "
" + sys.stdout.write("") + sys.stdout.write("") print "" print "
" print "
" -- cgit v1.1 From ccfe7c55a5d1658c0f98aac2c45e76444dcd0bc2 Mon Sep 17 00:00:00 2001 From: Magnus Ahltorp Date: Tue, 22 Nov 2016 15:57:55 +0100 Subject: Add session capability to unauth web requests --- tools/certtools.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/certtools.py b/tools/certtools.py index 1523c97..0009d5d 100644 --- a/tools/certtools.py +++ b/tools/certtools.py @@ -108,13 +108,16 @@ def urlget(url, params=None): pass return requests.get(url, verify=sslparameters.cafile, params=params) -def urlpost(url, data): +def urlpost(url, data, session=None): with warnings.catch_warnings(): try: warnings.filterwarnings("ignore", category=requests.packages.urllib3.exceptions.SubjectAltNameWarning) except AttributeError: pass - return requests.post(url, data=data, verify=sslparameters.cafile) + if session: + return session.post(url, data=data, verify=sslparameters.cafile) + else: + return requests.post(url, data=data, verify=sslparameters.cafile) def get_sth(baseurl): result = urlget(baseurl + "ct/v1/get-sth") @@ -157,9 +160,9 @@ def unpack_tls_array(packed_data, length_len): rest_data = packed_data[length_len+length:] return (unpacked_data, rest_data) -def add_chain(baseurl, submission): +def add_chain(baseurl, submission, session=None): try: - result = urlpost(baseurl + "ct/v1/add-chain", json.dumps(submission)) + result = urlpost(baseurl + "ct/v1/add-chain", json.dumps(submission), session=session) if result.status_code == requests.codes.ok: return result.json() else: @@ -175,10 +178,10 @@ def add_chain(baseurl, submission): print "========================" raise e -def add_prechain(baseurl, submission): +def add_prechain(baseurl, submission, session=None): try: result = urlpost(baseurl + "ct/v1/add-pre-chain", - json.dumps(submission)) + json.dumps(submission), session=session) if result.status_code == requests.codes.ok: return result.json() -- cgit v1.1