summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorLinus Nordberg <linus@nordu.net>2015-09-24 16:47:32 +0200
committerLinus Nordberg <linus@nordu.net>2015-09-27 13:38:30 +0200
commit38722592047855cedd4ef2701854638bd50e0467 (patch)
tree229a7e5718b0d4c9750918d654484dba354f5194 /tools
parent7fd70ad913ecdb2585d50d27763d00f30a1e5a6f (diff)
Merge is now run by shell script tools/merge.
tools/merge run merge_fetch.py, merge_backup.py, merge_sth.py and merge_dist.py sequentially. TODO: test backupquorum != 0
Diffstat (limited to 'tools')
-rwxr-xr-xtools/merge8
-rwxr-xr-xtools/merge.py38
-rwxr-xr-x[-rw-r--r--]tools/merge_backup.py58
-rwxr-xr-x[-rw-r--r--]tools/merge_dist.py102
-rwxr-xr-x[-rw-r--r--]tools/merge_fetch.py48
-rwxr-xr-xtools/merge_sth.py123
-rw-r--r--tools/mergetools.py55
-rwxr-xr-xtools/testcase1.py4
8 files changed, 326 insertions, 110 deletions
diff --git a/tools/merge b/tools/merge
new file mode 100755
index 0000000..e11ac93
--- /dev/null
+++ b/tools/merge
@@ -0,0 +1,8 @@
+#! /bin/sh
+
+BINDIR=$(dirname $0)
+
+$BINDIR/merge_fetch.py $@
+$BINDIR/merge_backup.py $@
+$BINDIR/merge_sth.py $@
+$BINDIR/merge_dist.py $@
diff --git a/tools/merge.py b/tools/merge.py
deleted file mode 100755
index 212c171..0000000
--- a/tools/merge.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2014-2015, NORDUnet A/S.
-# See LICENSE for licensing information.
-
-import argparse
-import yaml
-import sys
-from certtools import create_ssl_context
-from merge_fetch import merge_fetch
-from merge_backup import merge_backup
-from merge_dist import merge_dist
-
-def main():
- parser = argparse.ArgumentParser(description="")
- parser.add_argument('--config', help="System configuration",
- required=True)
- parser.add_argument('--localconfig', help="Local configuration",
- required=True)
- parser.add_argument("--nomerge", action='store_true',
- help="Don't actually do merge")
- parser.add_argument("--timing", action='store_true',
- help="Print timing information")
- args = parser.parse_args()
-
- config = yaml.load(open(args.config))
- localconfig = yaml.load(open(args.localconfig))
- paths = localconfig["paths"]
-
- create_ssl_context(cafile=paths["https_cacertfile"])
-
- sth = merge_fetch(args, config, localconfig)
- merge_backup(args, config, localconfig, sth)
- merge_dist(args, config, localconfig, sth)
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/tools/merge_backup.py b/tools/merge_backup.py
index 27c71a5..48197fc 100644..100755
--- a/tools/merge_backup.py
+++ b/tools/merge_backup.py
@@ -7,12 +7,15 @@
import sys
import base64
import select
-from certtools import timing_point
+from time import sleep
+from certtools import timing_point, build_merkle_tree, write_file, \
+ create_ssl_context
from mergetools import chunks, backup_sendlog, get_logorder, \
get_verifiedsize, get_missingentriesforbackup, read_chain, \
- hexencode, setverifiedsize, sendentry_merge, verifyroot
+ hexencode, setverifiedsize, sendentry_merge, verifyroot, \
+ get_nfetched, parse_args
-def merge_backup(args, config, localconfig, sth_in):
+def merge_backup(args, config, localconfig):
paths = localconfig["paths"]
own_key = (localconfig["nodename"],
"%s/%s-private.pem" % (paths["privatekeys"],
@@ -21,12 +24,17 @@ def merge_backup(args, config, localconfig, sth_in):
mergedb = paths["mergedb"]
chainsdir = mergedb + "/chains"
logorderfile = mergedb + "/logorder"
+ currentsizefile = mergedb + "/fetched"
timing = timing_point()
- logorder = get_logorder(logorderfile)
+ nfetched = get_nfetched(currentsizefile, logorderfile)
+ logorder = get_logorder(logorderfile, nfetched)
+ tree_size = len(logorder)
timing_point(timing, "get logorder")
- (tree_size, root_hash, _) = sth_in
+ tree = build_merkle_tree(logorder)
+ root_hash = tree[-1][0]
+ timing_point(timing, "build tree")
for secondary in secondaries:
if secondary["name"] == config["primarymergenode"]:
@@ -40,6 +48,7 @@ def merge_backup(args, config, localconfig, sth_in):
timing_point(timing, "get verified size")
print >>sys.stderr, "verified size", verifiedsize
sys.stderr.flush()
+
entries = [base64.b64encode(entry) for entry in logorder[verifiedsize:]]
print >>sys.stderr, "sending log:",
sys.stderr.flush()
@@ -57,7 +66,7 @@ def merge_backup(args, config, localconfig, sth_in):
continue
break
if sendlogresult["result"] != "ok":
- print >>sys.stderr, "sendlog:", sendlogresult
+ print >>sys.stderr, "backup_sendlog:", sendlogresult
sys.exit(1)
verifiedsize += len(chunk)
print >>sys.stderr, verifiedsize,
@@ -66,11 +75,13 @@ def merge_backup(args, config, localconfig, sth_in):
timing_point(timing, "sendlog")
print >>sys.stderr, "log sent"
sys.stderr.flush()
+
missingentries = get_missingentriesforbackup(nodename, nodeaddress,
own_key, paths)
timing_point(timing, "get missing")
print >>sys.stderr, "missing entries:", len(missingentries)
sys.stderr.flush()
+
fetched_entries = 0
print >>sys.stderr, "fetching missing entries",
sys.stderr.flush()
@@ -81,7 +92,7 @@ def merge_backup(args, config, localconfig, sth_in):
read_chain(chainsdir, ehash),
ehash)
if sendentryresult["result"] != "ok":
- print >>sys.stderr, "send sth:", sendentryresult
+ print >>sys.stderr, "sendentry_merge:", sendentryresult
sys.exit(1)
fetched_entries += 1
if fetched_entries % 1000 == 0:
@@ -90,6 +101,7 @@ def merge_backup(args, config, localconfig, sth_in):
print >>sys.stderr
sys.stderr.flush()
timing_point(timing, "send missing")
+
verifyrootresult = verifyroot(nodename, nodeaddress, own_key, paths,
tree_size)
if verifyrootresult["result"] != "ok":
@@ -102,7 +114,39 @@ def merge_backup(args, config, localconfig, sth_in):
print >>sys.stderr, " expected", hexencode(root_hash)
sys.exit(1)
timing_point(timing, "verifyroot")
+
setverifiedsize(nodename, nodeaddress, own_key, paths, tree_size)
+ backuppath = mergedb + "/verified." + nodename
+ backupdata = {"tree_size": tree_size,
+ "sha256_root_hash": hexencode(root_hash)}
+ print >>sys.stderr, "DEBUG: writing to", backuppath, ":", backupdata
+ write_file(backuppath, backupdata)
+
if args.timing:
print >>sys.stderr, timing["deltatimes"]
sys.stderr.flush()
+
+def main():
+ """
+ Read logorder file up until what's indicated by fetched file and
+ build the tree.
+
+ Distribute entries to all secondaries, write tree size and tree head
+ to backup.<secondary> files as each secondary is verified to have
+ the entries.
+
+ Sleep some and start over.
+ """
+ args, config, localconfig = parse_args()
+ paths = localconfig["paths"]
+ create_ssl_context(cafile=paths["https_cacertfile"])
+
+ while True:
+ merge_backup(args, config, localconfig)
+ if args.interval is None:
+ break
+ print >>sys.stderr, "sleeping", args.interval, "seconds"
+ sleep(args.interval)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/merge_dist.py b/tools/merge_dist.py
index bfc0e61..0e85984 100644..100755
--- a/tools/merge_dist.py
+++ b/tools/merge_dist.py
@@ -5,74 +5,56 @@
# See LICENSE for licensing information.
import sys
-import urllib2
-import base64
-import select
-from certtools import timing_point, check_sth_signature, \
- create_sth_signature, get_public_key_from_file
+import json
+from time import sleep
+from base64 import b64encode, b64decode
+from certtools import timing_point, \
+ create_ssl_context
from mergetools import get_curpos, get_logorder, chunks, get_missingentries, \
- sendsth, hexencode, sendlog, sendentry, read_chain
+ sendsth, sendlog, sendentry, read_chain, parse_args
-def merge_dist(args, config, localconfig, sth_in):
+def merge_dist(args, config, localconfig, timestamp):
paths = localconfig["paths"]
own_key = (localconfig["nodename"],
"%s/%s-private.pem" % (paths["privatekeys"],
localconfig["nodename"]))
frontendnodes = config["frontendnodes"]
- signingnodes = config["signingnodes"]
- ctbaseurl = config["baseurl"]
- logpublickey = get_public_key_from_file(paths["logpublickey"])
mergedb = paths["mergedb"]
chainsdir = mergedb + "/chains"
logorderfile = mergedb + "/logorder"
+ sthfile = mergedb + "/sth"
+ create_ssl_context(cafile=paths["https_cacertfile"])
timing = timing_point()
- logorder = get_logorder(logorderfile)
- timing_point(timing, "get logorder")
-
- (tree_size, root_hash, timestamp) = sth_in
- tree_head_signature = None
- for signingnode in signingnodes:
- try:
- tree_head_signature = \
- create_sth_signature(tree_size, timestamp,
- root_hash,
- "https://%s/" % signingnode["address"],
- key=own_key)
- break
- except urllib2.URLError, err:
- print >>sys.stderr, err
- sys.stderr.flush()
- if tree_head_signature == None:
- print >>sys.stderr, "Could not contact any signing nodes"
- sys.exit(1)
-
- sth = {"tree_size": tree_size, "timestamp": timestamp,
- "sha256_root_hash": base64.b64encode(root_hash),
- "tree_head_signature": base64.b64encode(tree_head_signature)}
-
- check_sth_signature(ctbaseurl, sth, publickey=logpublickey)
+ try:
+ sth = json.loads(open(sthfile, 'r').read())
+ except (IOError, ValueError):
+ print >>sys.stderr, "No valid STH file found in", sthfile
+ return timestamp
+ if sth['timestamp'] < timestamp:
+ print >>sys.stderr, "New STH file older than the previous one:", \
+ sth['timestamp'], "<", timestamp
+ return timestamp
+ if sth['timestamp'] == timestamp:
+ return timestamp
+ timestamp = sth['timestamp']
- timing_point(timing, "build sth")
-
- if args.timing:
- print >>sys.stderr, timing["deltatimes"]
- sys.stderr.flush()
-
- print hexencode(root_hash)
- sys.stdout.flush()
+ logorder = get_logorder(logorderfile, sth['tree_size'])
+ timing_point(timing, "get logorder")
for frontendnode in frontendnodes:
nodeaddress = "https://%s/" % frontendnode["address"]
nodename = frontendnode["name"]
timing = timing_point()
+
print >>sys.stderr, "distributing for node", nodename
sys.stderr.flush()
curpos = get_curpos(nodename, nodeaddress, own_key, paths)
timing_point(timing, "get curpos")
print >>sys.stderr, "current position", curpos
sys.stderr.flush()
- entries = [base64.b64encode(entry) for entry in logorder[curpos:]]
+
+ entries = [b64encode(entry) for entry in logorder[curpos:]]
print >>sys.stderr, "sending log:",
sys.stderr.flush()
for chunk in chunks(entries, 1000):
@@ -83,7 +65,7 @@ def merge_dist(args, config, localconfig, sth_in):
if sendlogresult == None:
if trynumber == 1:
sys.exit(1)
- select.select([], [], [], 10.0)
+ sleep(10)
print >>sys.stderr, "tries left:", trynumber
sys.stderr.flush()
continue
@@ -98,20 +80,22 @@ def merge_dist(args, config, localconfig, sth_in):
timing_point(timing, "sendlog")
print >>sys.stderr, "log sent"
sys.stderr.flush()
+
missingentries = get_missingentries(nodename, nodeaddress, own_key,
paths)
timing_point(timing, "get missing")
+
print >>sys.stderr, "missing entries:", len(missingentries)
sys.stderr.flush()
fetched_entries = 0
print >>sys.stderr, "fetching missing entries",
sys.stderr.flush()
for missingentry in missingentries:
- ehash = base64.b64decode(missingentry)
+ ehash = b64decode(missingentry)
sendentryresult = sendentry(nodename, nodeaddress, own_key, paths,
read_chain(chainsdir, ehash), ehash)
if sendentryresult["result"] != "ok":
- print >>sys.stderr, "send sth:", sendentryresult
+ print >>sys.stderr, "sendentry:", sendentryresult
sys.exit(1)
fetched_entries += 1
if fetched_entries % 1000 == 0:
@@ -120,11 +104,33 @@ def merge_dist(args, config, localconfig, sth_in):
print >>sys.stderr
sys.stderr.flush()
timing_point(timing, "send missing")
+
+ print >>sys.stderr, "sending sth to node", nodename
+ sys.stderr.flush()
sendsthresult = sendsth(nodename, nodeaddress, own_key, paths, sth)
if sendsthresult["result"] != "ok":
- print >>sys.stderr, "send sth:", sendsthresult
+ print >>sys.stderr, "sendsth:", sendsthresult
sys.exit(1)
timing_point(timing, "send sth")
+
if args.timing:
print >>sys.stderr, timing["deltatimes"]
sys.stderr.flush()
+
+ return timestamp
+
+def main():
+ """
+ Distribute missing entries and the STH to all frontend nodes.
+ """
+ args, config, localconfig = parse_args()
+ timestamp = 0
+ while True:
+ timestamp = merge_dist(args, config, localconfig, timestamp)
+ if args.interval is None:
+ break
+ print >>sys.stderr, "sleeping", args.interval, "seconds"
+ sleep(args.interval)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/merge_fetch.py b/tools/merge_fetch.py
index a0a0396..1540c34 100644..100755
--- a/tools/merge_fetch.py
+++ b/tools/merge_fetch.py
@@ -6,13 +6,14 @@
import sys
import struct
-import time
import subprocess
+from time import sleep
from mergetools import get_logorder, verify_entry, get_new_entries, \
- chunks, fsync_logorder, get_entries, write_chain, add_to_logorder
-from certtools import timing_point, build_merkle_tree
+ chunks, fsync_logorder, get_entries, write_chain, add_to_logorder, \
+ hexencode, parse_args
+from certtools import timing_point, write_file, create_ssl_context
-def merge_fetch(args, config, localconfig):
+def merge_fetch(_args, config, localconfig):
paths = localconfig["paths"]
storagenodes = config["storagenodes"]
mergedb = paths["mergedb"]
@@ -21,7 +22,6 @@ def merge_fetch(args, config, localconfig):
own_key = (localconfig["nodename"],
"%s/%s-private.pem" % (paths["privatekeys"],
localconfig["nodename"]))
-
timing = timing_point()
logorder = get_logorder(logorderfile)
@@ -48,9 +48,6 @@ def merge_fetch(args, config, localconfig):
print >>sys.stderr, "adding", len(new_entries), "entries"
sys.stderr.flush()
- if args.nomerge:
- sys.exit(0)
-
for ehash in new_entries:
for storagenode in storagenodes:
if ehash in new_entries_per_node[storagenode["name"]]:
@@ -89,9 +86,36 @@ def merge_fetch(args, config, localconfig):
verifycert.communicate(struct.pack("I", 0))
- tree = build_merkle_tree(logorder)
tree_size = len(logorder)
- root_hash = tree[-1][0]
- timestamp = int(time.time() * 1000)
+ if tree_size == 0:
+ return (0, '')
+ else:
+ return (tree_size, logorder[tree_size-1])
+
+def main():
+ """
+ Fetch new entries from all storage nodes.
+
+ Indicate current position by writing the index in the logorder file
+ (0-based) to the 'fetched' file.
+
+ Sleep some and start over.
+ """
+ args, config, localconfig = parse_args()
+ paths = localconfig["paths"]
+ mergedb = paths["mergedb"]
+ currentsizefile = mergedb + "/fetched"
+ create_ssl_context(cafile=paths["https_cacertfile"])
+
+ while True:
+ logsize, last_hash = merge_fetch(args, config, localconfig)
+ currentsize = {"index": logsize - 1, "hash": hexencode(last_hash)}
+ print >>sys.stderr, "DEBUG: writing to", currentsizefile, ":", currentsize
+ write_file(currentsizefile, currentsize)
+ if args.interval is None:
+ break
+ print >>sys.stderr, "sleeping", args.interval, "seconds"
+ sleep(args.interval)
- return (tree_size, root_hash, timestamp)
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/merge_sth.py b/tools/merge_sth.py
new file mode 100755
index 0000000..68b52a0
--- /dev/null
+++ b/tools/merge_sth.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2014-2015, NORDUnet A/S.
+# See LICENSE for licensing information.
+
+import sys
+import json
+import urllib2
+import time
+from base64 import b64encode, b64decode
+from mergetools import parse_args, get_nfetched, hexencode, hexdecode, \
+ get_logorder, get_sth
+from certtools import create_ssl_context, get_public_key_from_file, \
+ timing_point, create_sth_signature, write_file, check_sth_signature, \
+ build_merkle_tree
+
+def merge_sth(args, config, localconfig):
+ paths = localconfig["paths"]
+ own_key = (localconfig["nodename"],
+ "%s/%s-private.pem" % (paths["privatekeys"],
+ localconfig["nodename"]))
+ ctbaseurl = config["baseurl"]
+ signingnodes = config["signingnodes"]
+ mergenodes = config.get("mergenodes", [])
+ mergedb = paths["mergedb"]
+ sthfile = mergedb + "/sth"
+ logorderfile = mergedb + "/logorder"
+ logpublickey = get_public_key_from_file(paths["logpublickey"])
+ backupquorum = localconfig.get("backupquorum", 0)
+ assert backupquorum <= len(mergenodes) - 1
+ create_ssl_context(cafile=paths["https_cacertfile"])
+ timing = timing_point()
+
+ trees = [{'tree_size': 0, 'sha256_root_hash': ''}]
+ for mergenode in mergenodes:
+ if mergenode["name"] == config["primarymergenode"]:
+ continue
+ verifiedfile = mergedb + "/verified." + mergenode["name"]
+ try:
+ tree = json.loads(open(verifiedfile, "r").read())
+ except (IOError, ValueError):
+ tree = {'tree_size': 0, "sha256_root_hash": ''}
+ trees.append(tree)
+ trees.sort(key=lambda e: e['tree_size'], reverse=True)
+ print >>sys.stderr, "DEBUG: trees:", trees
+ tree_size = trees[backupquorum]['tree_size']
+ root_hash = hexdecode(trees[backupquorum]['sha256_root_hash'])
+ print >>sys.stderr, "DEBUG: tree size candidate at backupquorum", \
+ backupquorum, ":", tree_size
+
+ cur_sth = get_sth(sthfile)
+ if tree_size < cur_sth['tree_size']:
+ print >>sys.stderr, "candidate tree < current tree:", \
+ tree_size, "<", cur_sth['tree_size']
+ return
+
+ assert tree_size >= 0 # Don't read logorder without limit.
+ logorder = get_logorder(logorderfile, tree_size)
+ timing_point(timing, "get logorder")
+ if tree_size == -1:
+ tree_size = len(logorder)
+ print >>sys.stderr, "new tree size will be", tree_size
+
+ root_hash_calc = build_merkle_tree(logorder)[-1][0]
+ assert root_hash == '' or root_hash == root_hash_calc
+ root_hash = root_hash_calc
+ timestamp = int(time.time() * 1000)
+
+ tree_head_signature = None
+ for signingnode in signingnodes:
+ try:
+ tree_head_signature = \
+ create_sth_signature(tree_size, timestamp,
+ root_hash,
+ "https://%s/" % signingnode["address"],
+ key=own_key)
+ break
+ except urllib2.URLError, err:
+ print >>sys.stderr, err
+ sys.stderr.flush()
+ if tree_head_signature == None:
+ print >>sys.stderr, "Could not contact any signing nodes"
+ sys.exit(1)
+
+ sth = {"tree_size": tree_size, "timestamp": timestamp,
+ "sha256_root_hash": b64encode(root_hash),
+ "tree_head_signature": b64encode(tree_head_signature)}
+
+ check_sth_signature(ctbaseurl, sth, publickey=logpublickey)
+ timing_point(timing, "build sth")
+
+ print hexencode(root_hash), timestamp, tree_size
+ sys.stdout.flush()
+
+ write_file(sthfile, sth)
+
+ if args.timing:
+ print >>sys.stderr, timing["deltatimes"]
+ sys.stderr.flush()
+
+def main():
+ """
+ Read file 'sth' to get current tree size, assuming zero if file not
+ found.
+
+ Read tree sizes from the backup.<secondary> files, put them in a
+ list and sort it. Let new tree size equal list[backup-quorum]. Barf
+ on a new tree size smaller than the currently published tree size.
+
+ Decide on a timestamp, build an STH and write it to file 'sth'.
+ """
+ args, config, localconfig = parse_args()
+
+ while True:
+ merge_sth(args, config, localconfig)
+ if args.interval is None:
+ break
+ print >>sys.stderr, "sleeping", args.interval, "seconds"
+ time.sleep(args.interval)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/mergetools.py b/tools/mergetools.py
index 7644dac..86f9255 100644
--- a/tools/mergetools.py
+++ b/tools/mergetools.py
@@ -9,14 +9,43 @@ import struct
import urllib
import urllib2
import json
+import yaml
+import argparse
from certtools import get_leaf_hash, http_request, get_leaf_hash
def parselogrow(row):
return base64.b16decode(row, casefold=True)
-def get_logorder(filename):
- f = open(filename, "r")
- return [parselogrow(row.rstrip()) for row in f]
+def get_logorder(filename, items=-1):
+ logorder = []
+ n = 0
+ for row in open(filename, "r"):
+ if n == items:
+ break
+ logorder.append(parselogrow(row.rstrip()))
+ n += 1
+ return logorder
+
+def get_nfetched(currentsizefile, logorderfile):
+ try:
+ limit = json.loads(open(currentsizefile).read())
+ except (IOError, ValueError):
+ return -1
+ if limit['index'] >= 0:
+ with open(logorderfile, 'r') as f:
+ f.seek(limit['index']*65)
+ assert f.read(64).lower() == limit['hash']
+ return limit['index'] + 1
+
+def get_sth(filename):
+ try:
+ sth = json.loads(open(filename, 'r').read())
+ except (IOError, ValueError):
+ sth = {'tree_size': -1,
+ 'timestamp': 0,
+ 'sha256_root_hash': '',
+ 'tree_head_signature': ''}
+ return sth
def read_chain_open(chainsdir, filename):
path = chainsdir + "/" + \
@@ -104,6 +133,9 @@ def verify_entry(verifycert, entry, ehash):
def hexencode(key):
return base64.b16encode(key).lower()
+def hexdecode(s):
+ return base64.b16decode(s.upper())
+
def write_chain(key, value, chainsdir, hashed_dir=True):
filename = hexencode(key)
if hashed_dir:
@@ -356,3 +388,20 @@ def get_missingentriesforbackup(node, baseurl, own_key, paths):
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
+
+def parse_args():
+ parser = argparse.ArgumentParser(description="")
+ parser.add_argument('--config', help="System configuration",
+ required=True)
+ parser.add_argument('--localconfig', help="Local configuration",
+ required=True)
+ parser.add_argument('--interval', type=int, metavar="n",
+ help="Repeate every N seconds")
+ parser.add_argument("--timing", action='store_true',
+ help="Print timing information")
+ args = parser.parse_args()
+
+ config = yaml.load(open(args.config))
+ localconfig = yaml.load(open(args.localconfig))
+
+ return (args, config, localconfig)
diff --git a/tools/testcase1.py b/tools/testcase1.py
index c66d976..697cc99 100755
--- a/tools/testcase1.py
+++ b/tools/testcase1.py
@@ -37,7 +37,7 @@ logpublickey = get_public_key_from_file(logpublickeyfile)
def testgroup(name):
global indentation
- print name + ":"
+ print "testgroup " + name + ":"
indentation = " "
def print_error(message, *args):
@@ -147,7 +147,7 @@ def get_and_check_entry(timestamp, chain, leaf_index, baseurl):
len(submittedcertchain))
def merge():
- return subprocess.call(["../tools/merge.py", "--config", "../test/catlfish-test.cfg",
+ return subprocess.call(["../tools/merge", "--config", "../test/catlfish-test.cfg",
"--localconfig", "../test/catlfish-test-local-merge.cfg"])
mergeresult = merge()