summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJohan Lundberg <lundberg@nordu.net>2015-04-02 10:43:33 +0200
committerJohan Lundberg <lundberg@nordu.net>2015-04-02 10:43:33 +0200
commitbd611ac59f7c4db885a2f8631ef0bcdcd1901ca0 (patch)
treee60f5333a7699cd021b33c7f5292af55b774001b /lib
Diffstat (limited to 'lib')
-rw-r--r--lib/COPYING202
-rw-r--r--lib/PKG-INFO18
-rw-r--r--lib/README10
-rw-r--r--lib/__init__.py31
-rwxr-xr-xlib/aclcheck.py302
-rwxr-xr-xlib/aclgenerator.py418
-rw-r--r--lib/cisco.py744
-rw-r--r--lib/ciscoasa.py454
-rwxr-xr-xlib/demo.py241
-rwxr-xr-xlib/html.py233
-rw-r--r--lib/ipset.py200
-rw-r--r--lib/iptables.py789
-rw-r--r--lib/juniper.py727
-rw-r--r--lib/junipersrx.py448
-rw-r--r--lib/nacaddr.py250
-rw-r--r--lib/naming.py502
-rw-r--r--lib/packetfilter.py348
-rw-r--r--lib/policy.py1821
-rw-r--r--lib/policyreader.py245
-rwxr-xr-xlib/port.py55
-rw-r--r--lib/setup.py39
-rwxr-xr-xlib/speedway.py50
22 files changed, 8127 insertions, 0 deletions
diff --git a/lib/COPYING b/lib/COPYING
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/lib/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/lib/PKG-INFO b/lib/PKG-INFO
new file mode 100644
index 0000000..9a074f5
--- /dev/null
+++ b/lib/PKG-INFO
@@ -0,0 +1,18 @@
+Metadata-Version: 1.0
+Name: capirca
+Version: 1.0.0
+Summary: UNKNOWN
+Home-page: http://code.google.com/p/capirca/
+Author: Google
+Author-email: watson@gmail.com
+License: Apache License, Version 2.0
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Internet
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: System :: Networking
+Classifier: Topic :: Security
diff --git a/lib/README b/lib/README
new file mode 100644
index 0000000..6442579
--- /dev/null
+++ b/lib/README
@@ -0,0 +1,10 @@
+Capirca is a system to develop and manage access control lists
+for a variety of platforms.
+It was developed by Google for internal use, and is now open source.
+
+Project home page: http://code.google.com/p/capirca/
+
+Please send contributions to capirca-dev@googlegroups.com.
+
+Code should include unit tests and follow the Google Python style guide:
+http://code.google.com/p/soc/wiki/PythonStyleGuide
diff --git a/lib/__init__.py b/lib/__init__.py
new file mode 100644
index 0000000..4d6ecb9
--- /dev/null
+++ b/lib/__init__.py
@@ -0,0 +1,31 @@
+#
+# Network access control library and utilities
+#
+# capirca/__init__.py
+#
+# This package is intended to simplify the process of developing
+# and working with large numbers of network access control lists
+# for various platforms that share common network and service
+# definitions.
+#
+# from capirca import naming
+# from capirca import policy
+# from capirca import cisco
+# from capirca import juniper
+# from capirca import iptables
+# from capirca import policyreader
+# from capirca import aclcheck
+# from capirca import aclgenerator
+# from capirca import nacaddr
+# from capirca import packetfilter
+# from capirca import port
+# from capirca import speedway
+#
+
+__version__ = '1.0.0'
+
+__all__ = ['naming', 'policy', 'cisco', 'juniper', 'iptables',
+ 'policyreader', 'aclcheck', 'aclgenerator', 'nacaddr',
+ 'packetfilter', 'port', 'speedway']
+
+__author__ = 'Paul (Tony) Watson (watson@gmail.com / watson@google.com)'
diff --git a/lib/aclcheck.py b/lib/aclcheck.py
new file mode 100755
index 0000000..3e36a99
--- /dev/null
+++ b/lib/aclcheck.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Check where hosts, ports and protocols are matched in a capirca policy."""
+
+__author__ = 'watson@google.com (Tony Watson)'
+
+import logging
+import sys
+import nacaddr
+import policy
+import port
+
+
+class Error(Exception):
+ """Base error class."""
+
+
+class AddressError(Error):
+ """Incorrect IP address or format."""
+
+
+class BadPolicy(Error):
+ """Item is not a valid policy object."""
+
+
+class NoTargetError(Error):
+ """Specified target platform not available in specified policy."""
+
+
+class AclCheck(object):
+ """Check where hosts, ports and protocols match in a NAC policy.
+
+ Args:
+ pol:
+ policy.Policy object
+ src:
+ string, the source address
+ dst:
+ string: the destination address.
+ sport:
+ string, the source port.
+ dport:
+ string, the destination port.
+ proto:
+ string, the protocol.
+
+ Returns:
+ An AclCheck Object
+
+ Raises:
+ port.BarPortValue: An invalid source port is used
+ port.BadPortRange: A port is outside of the acceptable range 0-65535
+ AddressError: Incorrect ip address or format
+
+ """
+
+ def __init__(self,
+ pol,
+ src='any',
+ dst='any',
+ sport='any',
+ dport='any',
+ proto='any',
+ ):
+
+ self.pol_obj = pol
+ self.proto = proto
+
+ # validate source port
+ if sport == 'any':
+ self.sport = sport
+ else:
+ self.sport = port.Port(sport)
+
+ # validate destination port
+ if dport == 'any':
+ self.dport = dport
+ else:
+ self.dport = port.Port(dport)
+
+ # validate source address
+ if src == 'any':
+ self.src = src
+ else:
+ try:
+ self.src = nacaddr.IP(src)
+ except ValueError:
+ raise AddressError('bad source address: %s\n' % src)
+
+ # validate destination address
+ if dst == 'any':
+ self.dst = dst
+ else:
+ try:
+ self.dst = nacaddr.IP(dst)
+ except ValueError:
+ raise AddressError('bad destination address: %s\n' % dst)
+
+ if type(self.pol_obj) is not policy.Policy:
+ raise BadPolicy('Policy object is not valid.')
+
+ self.matches = []
+ self.exact_matches = []
+ for header, terms in self.pol_obj.filters:
+ filtername = header.target[0].options[0]
+ for term in terms:
+ possible = []
+ logging.debug('checking term: %s', term.name)
+ if not self._AddrInside(self.src, term.source_address):
+ logging.debug('srcaddr does not match')
+ continue
+ logging.debug('srcaddr matches: %s', self.src)
+ if not self._AddrInside(self.dst, term.destination_address):
+ logging.debug('dstaddr does not match')
+ continue
+ logging.debug('dstaddr matches: %s', self.dst)
+ if (self.sport != 'any' and term.source_port and not
+ self._PortInside(self.sport, term.source_port)):
+ logging.debug('sport does not match')
+ continue
+ logging.debug('sport matches: %s', self.sport)
+ if (self.dport != 'any' and term.destination_port and not
+ self._PortInside(self.dport, term.destination_port)):
+ logging.debug('dport does not match')
+ continue
+ logging.debug('dport matches: %s', self.dport)
+ if (self.proto != 'any' and term.protocol and
+ self.proto not in term.protocol):
+ logging.debug('proto does not match')
+ continue
+ logging.debug('proto matches: %s', self.proto)
+ if term.protocol_except and self.proto in term.protocol_except:
+ logging.debug('protocol excepted by term, no match.')
+ continue
+ logging.debug('proto not excepted: %s', self.proto)
+ if not term.action: # avoid any verbatim
+ logging.debug('term had no action (verbatim?), no match.')
+ continue
+ logging.debug('term has an action')
+ possible = self._PossibleMatch(term)
+ self.matches.append(Match(filtername, term.name, possible, term.action,
+ term.qos))
+ if possible:
+ logging.debug('term has options: %s, not treating as exact match',
+ possible)
+ continue
+
+ # if we get here then we have a match, and if the action isn't next and
+ # there are no possibles, then this is a "definite" match and we needn't
+ # look for any further matches (i.e. later terms may match, but since
+ # we'll never get there we shouldn't report them)
+ if 'next' not in term.action:
+ self.exact_matches.append(Match(filtername, term.name, [],
+ term.action, term.qos))
+ break
+
+ def Matches(self):
+ """Return list of matched terms."""
+ return self.matches
+
+ def ExactMatches(self):
+ """Return matched terms, but not terms with possibles or action next."""
+ return self.exact_matches
+
+ def ActionMatch(self, action='any'):
+ """Return list of matched terms with specified actions."""
+ match_list = []
+ for next in self.matches:
+ if next.action:
+ if not next.possibles:
+ if action is 'any' or action in next.action:
+ match_list.append(next)
+ return match_list
+
+ def DescribeMatches(self):
+ """Provide sentence descriptions of matches.
+
+ Returns:
+ ret_str: text sentences describing matches
+ """
+ ret_str = []
+ for next in self.matches:
+ text = str(next)
+ ret_str.append(text)
+ return '\n'.join(ret_str)
+
+ def __str__(self):
+ text = []
+ last_filter = ''
+ for next in self.matches:
+ if next.filter != last_filter:
+ last_filter = next.filter
+ text.append(' filter: ' + next.filter)
+ if next.possibles:
+ text.append(' ' * 10 + 'term: ' + next.term + ' (possible match)')
+ else:
+ text.append(' ' * 10 + 'term: ' + next.term)
+ if next.possibles:
+ text.append(' ' * 16 + next.action + ' if ' + str(next.possibles))
+ else:
+ text.append(' ' * 16 + next.action)
+ return '\n'.join(text)
+
+ def _PossibleMatch(self, term):
+ """Ignore some options and keywords that are edge cases.
+
+ Args:
+ term: term object to examine for edge-cases
+
+ Returns:
+ ret_str: a list of reasons this term may possible match
+ """
+ ret_str = []
+ if 'first-fragment' in term.option:
+ ret_str.append('first-frag')
+ if term.fragment_offset:
+ ret_str.append('frag-offset')
+ if term.packet_length:
+ ret_str.append('packet-length')
+ if 'established' in term.option:
+ ret_str.append('est')
+ if 'tcp-established' in term.option and 'tcp' in term.protocol:
+ ret_str.append('tcp-est')
+ return ret_str
+
+ def _AddrInside(self, addr, addresses):
+ """Check if address is matched in another address or group of addresses.
+
+ Args:
+ addr: An ipaddr network or host address or text 'any'
+ addresses: A list of ipaddr network or host addresses
+
+ Returns:
+ bool: True of false
+ """
+ if addr is 'any': return True # always true if we match for any addr
+ if not addresses: return True # always true if term has nothing to match
+ for next in addresses:
+ # ipaddr can incorrectly report ipv4 as contained with ipv6 addrs
+ if type(addr) is type(next):
+ if addr in next:
+ return True
+ return False
+
+ def _PortInside(self, myport, port_list):
+ """Check if port matches in a port or group of ports.
+
+ Args:
+ myport: port number
+ port_list: list of ports
+
+ Returns:
+ bool: True of false
+ """
+ if myport == 'any': return True
+ if [x for x in port_list if x[0] <= myport <= x[1]]:
+ return True
+ return False
+
+
+class Match(object):
+ """A matching term and its associate values."""
+
+ def __init__(self, filtername, term, possibles, action, qos=None):
+ self.filter = filtername
+ self.term = term
+ self.possibles = possibles
+ self.action = action[0]
+ self.qos = qos
+
+ def __str__(self):
+ text = ''
+ if self.possibles:
+ text += 'possible ' + self.action
+ else:
+ text += self.action
+ text += ' in term ' + self.term + ' of filter ' + self.filter
+ if self.possibles:
+ text += ' with factors: ' + str(', '.join(self.possibles))
+ return text
+
+
+def main():
+ pass
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/aclgenerator.py b/lib/aclgenerator.py
new file mode 100755
index 0000000..c5be343
--- /dev/null
+++ b/lib/aclgenerator.py
@@ -0,0 +1,418 @@
+#!/usr/bin/python2.4
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""ACL Generator base class."""
+
+import copy
+import re
+from string import Template
+
+import policy
+
+
+# generic error class
+class Error(Exception):
+ """Base error class."""
+ pass
+
+
+class NoPlatformPolicyError(Error):
+ """Raised when a policy is received that doesn't support this platform."""
+ pass
+
+
+class UnsupportedFilter(Error):
+ """Raised when we see an inappropriate filter."""
+ pass
+
+
+class UnknownIcmpTypeError(Error):
+ """Raised when we see an unknown icmp-type."""
+ pass
+
+
+class MismatchIcmpInetError(Error):
+ """Raised when mistmatch between icmp/icmpv6 and inet/inet6."""
+ pass
+
+
+class EstablishedError(Error):
+ """Raised when a term has established option with inappropriate protocol."""
+ pass
+
+
+class UnsupportedAF(Error):
+ """Raised when provided an unsupported address family."""
+ pass
+
+
+class DuplicateTermError(Error):
+ """Raised when duplication of term names are detected."""
+ pass
+
+
+class UnsupportedFilterError(Error):
+ """Raised when we see an inappropriate filter."""
+ pass
+
+
+class TermNameTooLongError(Error):
+ """Raised when term named can not be abbreviated."""
+ pass
+
+
+class Term(object):
+ """Generic framework for a generator Term."""
+ ICMP_TYPE = policy.Term.ICMP_TYPE
+ PROTO_MAP = {'ip': 0,
+ 'icmp': 1,
+ 'igmp': 2,
+ 'ggp': 3,
+ 'ipencap': 4,
+ 'tcp': 6,
+ 'egp': 8,
+ 'igp': 9,
+ 'udp': 17,
+ 'rdp': 27,
+ 'ipv6': 41,
+ 'ipv6-route': 43,
+ 'ipv6-frag': 44,
+ 'rsvp': 46,
+ 'gre': 47,
+ 'esp': 50,
+ 'ah': 51,
+ 'icmpv6': 58,
+ 'ipv6-nonxt': 59,
+ 'ipv6-opts': 60,
+ 'ospf': 89,
+ 'ipip': 94,
+ 'pim': 103,
+ 'vrrp': 112,
+ 'l2tp': 115,
+ 'sctp': 132,
+ }
+ AF_MAP = {'inet': 4,
+ 'inet6': 6,
+ 'bridge': 4 # if this doesn't exist, output includes v4 & v6
+ }
+ # provide flipped key/value dicts
+ PROTO_MAP_BY_NUMBER = dict([(v, k) for (k, v) in PROTO_MAP.iteritems()])
+ AF_MAP_BY_NUMBER = dict([(v, k) for (k, v) in AF_MAP.iteritems()])
+
+ NO_AF_LOG_FORMAT = Template('Term $term will not be rendered, as it has'
+ ' $direction address match specified but no'
+ ' $direction addresses of $af address family'
+ ' are present.')
+
+ def NormalizeAddressFamily(self, af):
+ """Convert (if necessary) address family name to numeric value.
+
+ Args:
+ af: Address family, can be either numeric or string (e.g. 4 or 'inet')
+
+ Returns:
+ af: Numeric address family value
+
+ Raises:
+ UnsupportedAF: Address family not in keys or values of our AF_MAP.
+ """
+ # ensure address family (af) is valid
+ if af in self.AF_MAP_BY_NUMBER:
+ return af
+ elif af in self.AF_MAP:
+ # convert AF name to number (e.g. 'inet' becomes 4, 'inet6' becomes 6)
+ af = self.AF_MAP[af]
+ else:
+ raise UnsupportedAF('Address family %s is not supported, term %s.' % (
+ af, self.term.name))
+ return af
+
+ def NormalizeIcmpTypes(self, icmp_types, protocols, af):
+ """Return verified list of appropriate icmp-types.
+
+ Args:
+ icmp_types: list of icmp_types
+ protocols: list of protocols
+ af: address family of this term, either numeric or text (see self.AF_MAP)
+
+ Returns:
+ sorted list of numeric icmp-type codes.
+
+ Raises:
+ UnsupportedFilterError: icmp-types specified with non-icmp protocol.
+ MismatchIcmpInetError: mismatch between icmp protocol and address family.
+ UnknownIcmpTypeError: unknown icmp-type specified
+ """
+ if not icmp_types:
+ return ['']
+ # only protocols icmp or icmpv6 can be used with icmp-types
+ if protocols != ['icmp'] and protocols != ['icmpv6']:
+ raise UnsupportedFilterError('%s %s' % (
+ 'icmp-types specified for non-icmp protocols in term: ',
+ self.term.name))
+ # make sure we have a numeric address family (4 or 6)
+ af = self.NormalizeAddressFamily(af)
+ # check that addr family and protocl are appropriate
+ if ((af != 4 and protocols == ['icmp']) or
+ (af != 6 and protocols == ['icmpv6'])):
+ raise MismatchIcmpInetError('%s %s' % (
+ 'ICMP/ICMPv6 mismatch with address family IPv4/IPv6 in term',
+ self.term.name))
+ # ensure all icmp types are valid
+ for icmptype in icmp_types:
+ if icmptype not in self.ICMP_TYPE[af]:
+ raise UnknownIcmpTypeError('%s %s %s %s' % (
+ '\nUnrecognized ICMP-type (', icmptype,
+ ') specified in term ', self.term.name))
+ rval = []
+ rval.extend([self.ICMP_TYPE[af][x] for x in icmp_types])
+ rval.sort()
+ return rval
+
+
+class ACLGenerator(object):
+ """Generates platform specific filters and terms from a policy object.
+
+ This class takes a policy object and renders the output into a syntax which
+ is understood by a specific platform (eg. iptables, cisco, etc).
+ """
+
+ _PLATFORM = None
+ # Default protocol to apply when no protocol is specified.
+ _DEFAULT_PROTOCOL = 'ip'
+ # Unsupported protocols by address family.
+ _SUPPORTED_AF = set(('inet', 'inet6'))
+ # Commonly misspelled protocols that the generator should reject.
+ _FILTER_BLACKLIST = {}
+
+ # Set of required keywords that every generator must support.
+ _REQUIRED_KEYWORDS = set(['action',
+ 'comment',
+ 'destination_address',
+ 'destination_address_exclude',
+ 'destination_port',
+ 'icmp_type',
+ 'name', # obj attribute, not keyword
+ 'option',
+ 'protocol',
+ 'platform',
+ 'platform_exclude',
+ 'source_address',
+ 'source_address_exclude',
+ 'source_port',
+ 'translated', # obj attribute, not keyword
+ 'verbatim',
+ ])
+ # Generators should redefine this in subclass as optional support is added
+ _OPTIONAL_SUPPORTED_KEYWORDS = set([])
+
+ # Abbreviation table used to automatically abbreviate terms that exceed
+ # specified limit. We use uppercase for abbreviations to distinguish
+ # from lowercase names. This is order list - we try the ones in the
+ # top of the list before the ones later in the list. Prefer clear
+ # or very-space-saving abbreviations by putting them early in the
+ # list. Abbreviations may be regular expressions or fixed terms;
+ # prefer fixed terms unless there's a clear benefit to regular
+ # expressions.
+ _ABBREVIATION_TABLE = [
+ ('bogons', 'BGN'),
+ ('bogon', 'BGN'),
+ ('reserved', 'RSV'),
+ ('rfc1918', 'PRV'),
+ ('rfc-1918', 'PRV'),
+ ('internet', 'EXT'),
+ ('global', 'GBL'),
+ ('internal', 'INT'),
+ ('customer', 'CUST'),
+ ('google', 'GOOG'),
+ ('ballmer', 'ASS'),
+ ('microsoft', 'LOL'),
+ ('china', 'BAN'),
+ ('border', 'BDR'),
+ ('service', 'SVC'),
+ ('router', 'RTR'),
+ ('transit', 'TRNS'),
+ ('experiment', 'EXP'),
+ ('established', 'EST'),
+ ('unreachable', 'UNR'),
+ ('fragment', 'FRG'),
+ ('accept', 'OK'),
+ ('discard', 'DSC'),
+ ('reject', 'REJ'),
+ ('replies', 'ACK'),
+ ('request', 'REQ'),
+ ]
+ # Maximum term length. Can be overriden by generator to enforce
+ # platform specific restrictions.
+ _TERM_MAX_LENGTH = 62
+
+ def __init__(self, pol, exp_info):
+ """Initialise an ACLGenerator. Store policy structure for processing."""
+ object.__init__(self)
+
+ # The default list of valid keyword tokens for generators
+ self._VALID_KEYWORDS = self._REQUIRED_KEYWORDS.union(
+ self._OPTIONAL_SUPPORTED_KEYWORDS)
+
+ self.policy = pol
+
+ for header, terms in pol.filters:
+ if self._PLATFORM in header.platforms:
+ # Verify valid keywords
+ # error on unsupported optional keywords that could result
+ # in dangerous or unexpected results
+ for term in terms:
+ # Only verify optional keywords if the term is active on the platform.
+ err = []
+ if term.platform:
+ if self._PLATFORM not in term.platform:
+ continue
+ if term.platform_exclude:
+ if self._PLATFORM in term.platform_exclude:
+ continue
+ for el, val in term.__dict__.items():
+ # Private attributes do not need to be valid keywords.
+ if (val and el not in self._VALID_KEYWORDS
+ and not el.startswith('flatten')):
+ err.append(el)
+ if err:
+ raise UnsupportedFilterError('%s %s %s %s %s %s' % ('\n', term.name,
+ 'unsupported optional keywords for target', self._PLATFORM,
+ 'in policy:', ' '.join(err)))
+ continue
+
+ self._TranslatePolicy(pol, exp_info)
+
+ def _TranslatePolicy(self, pol, exp_info):
+ """Translate policy contents to platform specific data structures."""
+ raise Error('%s does not implement _TranslatePolicies()' % self._PLATFORM)
+
+ def FixHighPorts(self, term, af='inet', all_protocols_stateful=False):
+ """Evaluate protocol and ports of term, return sane version of term."""
+ mod = term
+
+ # Determine which protocols this term applies to.
+ if term.protocol:
+ protocols = set(term.protocol)
+ else:
+ protocols = set((self._DEFAULT_PROTOCOL,))
+
+ # Check that the address family matches the protocols.
+ if not af in self._SUPPORTED_AF:
+ raise UnsupportedAF('\nAddress family %s, found in %s, '
+ 'unsupported by %s' % (af, term.name, self._PLATFORM))
+ if af in self._FILTER_BLACKLIST:
+ unsupported_protocols = self._FILTER_BLACKLIST[af].intersection(protocols)
+ if unsupported_protocols:
+ raise UnsupportedFilter('\n%s targets do not support protocol(s) %s '
+ 'with address family %s (in %s)' %
+ (self._PLATFORM, unsupported_protocols,
+ af, term.name))
+
+ # Many renders expect high ports for terms with the established option.
+ for opt in [str(x) for x in term.option]:
+ if opt.find('established') == 0:
+ unstateful_protocols = protocols.difference(set(('tcp', 'udp')))
+ if not unstateful_protocols:
+ # TCP/UDP: add in high ports then collapse to eliminate overlaps.
+ mod = copy.deepcopy(term)
+ mod.destination_port.append((1024, 65535))
+ mod.destination_port = mod.CollapsePortList(mod.destination_port)
+ elif not all_protocols_stateful:
+ errmsg = 'Established option supplied with inappropriate protocol(s)'
+ raise EstablishedError('%s %s %s %s' %
+ (errmsg, unstateful_protocols,
+ 'in term', term.name))
+ break
+
+ return mod
+
+ def FixTermLength(self, term_name, abbreviate=False, truncate=False):
+ """Return a term name which is equal or shorter than _TERM_MAX_LENGTH.
+
+ New term is obtained in two steps. First, if allowed, automatic
+ abbreviation is performed using hardcoded abbreviation table. Second,
+ if allowed, term name is truncated to specified limit.
+
+ Args:
+ term_name: Name to abbreviate if necessary.
+ abbreviate: Whether to allow abbreviations to shorten the length.
+ truncate: Whether to allow truncation to shorten the length.
+ Returns:
+ A string based on term_name, that is equal or shorter than
+ _TERM_MAX_LENGTH abbreviated and truncated as necessary.
+ Raises:
+ TermNameTooLongError: term_name cannot be abbreviated
+ to be shorter than _TERM_MAX_LENGTH, or truncation is disabled.
+ """
+ new_term = term_name
+ if abbreviate:
+ for word, abbrev in self._ABBREVIATION_TABLE:
+ if len(new_term) <= self._TERM_MAX_LENGTH:
+ return new_term
+ new_term = re.sub(word, abbrev, new_term)
+ if truncate:
+ new_term = new_term[:self._TERM_MAX_LENGTH]
+ if len(new_term) <= self._TERM_MAX_LENGTH:
+ return new_term
+ raise TermNameTooLongError('Term %s (originally %s) is '
+ 'too long. Limit is %d characters (vs. %d) '
+ 'and no abbreviations remain or abbreviations '
+ 'disabled.' %
+ (new_term, term_name,
+ self._TERM_MAX_LENGTH,
+ len(new_term)))
+
+
+def AddRepositoryTags(prefix=''):
+ """Add repository tagging into the output.
+
+ Args:
+ prefix: comment delimiter, if needed, to appear before tags
+ Returns:
+ list of text lines containing revision data
+ """
+ tags = []
+ p4_id = '%sId:%s' % ('$', '$')
+ p4_date = '%sDate:%s' % ('$', '$')
+ tags.append('%s%s' % (prefix, p4_id))
+ tags.append('%s%s' % (prefix, p4_date))
+ return tags
+
+
+def WrapWords(textlist, size, joiner='\n'):
+ """Insert breaks into the listed strings at specified width.
+
+ Args:
+ textlist: a list of text strings
+ size: width of reformated strings
+ joiner: text to insert at break. eg. '\n ' to add an indent.
+ Returns:
+ list of strings
+ """
+ # \S*? is a non greedy match to collect words of len > size
+ # .{1,%d} collects words and spaces up to size in length.
+ # (?:\s|\Z) ensures that we break on spaces or at end of string.
+ rval = []
+ linelength_re = re.compile(r'(\S*?.{1,%d}(?:\s|\Z))' % size)
+ for index in range(len(textlist)):
+ if len(textlist[index]) > size:
+ # insert joiner into the string at appropriate places.
+ textlist[index] = joiner.join(linelength_re.findall(textlist[index]))
+ # avoid empty comment lines
+ rval.extend(x.strip() for x in textlist[index].strip().split(joiner) if x)
+ return rval
diff --git a/lib/cisco.py b/lib/cisco.py
new file mode 100644
index 0000000..0156ce9
--- /dev/null
+++ b/lib/cisco.py
@@ -0,0 +1,744 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Cisco generator."""
+
+__author__ = 'pmoody@google.com (Peter Moody)'
+__author__ = 'watson@google.com (Tony Watson)'
+
+import datetime
+import logging
+import re
+
+from third_party import ipaddr
+import aclgenerator
+import nacaddr
+
+
+_ACTION_TABLE = {
+ 'accept': 'permit',
+ 'deny': 'deny',
+ 'reject': 'deny',
+ 'next': '! next',
+ 'reject-with-tcp-rst': 'deny', # tcp rst not supported
+}
+
+
+# generic error class
+class Error(Exception):
+ """Generic error class."""
+ pass
+
+
+class UnsupportedCiscoAccessListError(Error):
+ """Raised when we're give a non named access list."""
+ pass
+
+
+class StandardAclTermError(Error):
+ """Raised when there is a problem in a standard access list."""
+ pass
+
+
+class TermStandard(object):
+ """A single standard ACL Term."""
+
+ def __init__(self, term, filter_name):
+ self.term = term
+ self.filter_name = filter_name
+ self.options = []
+ self.logstring = ''
+ # sanity checking for standard acls
+ if self.term.protocol:
+ raise StandardAclTermError(
+ 'Standard ACLs cannot specify protocols')
+ if self.term.icmp_type:
+ raise StandardAclTermError(
+ 'ICMP Type specifications are not permissible in standard ACLs')
+ if (self.term.source_address
+ or self.term.source_address_exclude
+ or self.term.destination_address
+ or self.term.destination_address_exclude):
+ raise StandardAclTermError(
+ 'Standard ACLs cannot use source or destination addresses')
+ if self.term.option:
+ raise StandardAclTermError(
+ 'Standard ACLs prohibit use of options')
+ if self.term.source_port or self.term.destination_port:
+ raise StandardAclTermError(
+ 'Standard ACLs prohibit use of port numbers')
+ if self.term.counter:
+ raise StandardAclTermError(
+ 'Counters are not implemented in standard ACLs')
+ if self.term.logging:
+ logging.warn(
+ 'WARNING: Standard ACL logging is set in filter %s, term %s and '
+ 'may not implemented on all IOS versions', self.filter_name,
+ self.term.name)
+ self.logstring = ' log'
+
+ def __str__(self):
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if 'cisco' not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if 'cisco' in self.term.platform_exclude:
+ return ''
+
+ ret_str = []
+
+ # Term verbatim output - this will skip over normal term creation
+ # code by returning early. Warnings provided in policy.py.
+ if self.term.verbatim:
+ for next_verbatim in self.term.verbatim:
+ if next_verbatim.value[0] == 'cisco':
+ ret_str.append(str(next_verbatim.value[1]))
+ return '\n'.join(ret_str)
+
+ v4_addresses = [x for x in self.term.address if type(x) != nacaddr.IPv6]
+ if self.filter_name.isdigit():
+ ret_str.append('access-list %s remark %s' % (self.filter_name,
+ self.term.name))
+
+ comment_max_width = 70
+ comments = aclgenerator.WrapWords(self.term.comment, comment_max_width)
+ if comments and comments[0]:
+ for comment in comments:
+ ret_str.append('access-list %s remark %s' % (self.filter_name,
+ comment))
+
+ action = _ACTION_TABLE.get(str(self.term.action[0]))
+ if v4_addresses:
+ for addr in v4_addresses:
+ if addr.prefixlen == 32:
+ ret_str.append('access-list %s %s %s%s' % (self.filter_name,
+ action,
+ addr.ip,
+ self.logstring))
+ else:
+ ret_str.append('access-list %s %s %s %s%s' % (self.filter_name,
+ action,
+ addr.network,
+ addr.hostmask,
+ self.logstring))
+ else:
+ ret_str.append('access-list %s %s %s%s' % (self.filter_name, action,
+ 'any', self.logstring))
+
+ else:
+ ret_str.append('remark ' + self.term.name)
+ comment_max_width = 70
+ comments = aclgenerator.WrapWords(self.term.comment, comment_max_width)
+ if comments and comments[0]:
+ for comment in comments:
+ ret_str.append('remark ' + str(comment))
+
+ action = _ACTION_TABLE.get(str(self.term.action[0]))
+ if v4_addresses:
+ for addr in v4_addresses:
+ if addr.prefixlen == 32:
+ ret_str.append(' %s %s%s' % (action, addr.ip, self.logstring))
+ else:
+ ret_str.append(' %s %s %s%s' % (action, addr.network,
+ addr.hostmask, self.logstring))
+ else:
+ ret_str.append(' %s %s%s' % (action, 'any', self.logstring))
+
+ return '\n'.join(ret_str)
+
+
+class ObjectGroup(object):
+ """Used for printing out the object group definitions.
+
+ since the ports don't store the token name information, we have
+ to fudge their names. ports will be written out like
+
+ object-group ip port <low_port>-<high_port>
+ range <low-port> <high-port>
+ exit
+
+ where as the addressess can be written as
+
+ object-group ip address first-term-source-address
+ 172.16.0.0
+ 172.20.0.0 255.255.0.0
+ 172.22.0.0 255.128.0.0
+ 172.24.0.0
+ 172.28.0.0
+ exit
+ """
+
+ def __init__(self):
+ self.filter_name = ''
+ self.terms = []
+
+ @property
+ def valid(self):
+ # pylint: disable-msg=C6411
+ return len(self.terms) > 0
+ # pylint: enable-msg=C6411
+
+ def AddTerm(self, term):
+ self.terms.append(term)
+
+ def AddName(self, filter_name):
+ self.filter_name = filter_name
+
+ def __str__(self):
+ ret_str = ['\n']
+ addresses = {}
+ ports = {}
+
+ for term in self.terms:
+ # I don't have an easy way get the token name used in the pol file
+ # w/o reading the pol file twice (with some other library) or doing
+ # some other ugly hackery. Instead, the entire block of source and dest
+ # addresses for a given term is given a unique, computable name which
+ # is not related to the NETWORK.net token name. that's what you get
+ # for using cisco, which has decided to implement its own meta language.
+
+ # source address
+ saddrs = term.GetAddressOfVersion('source_address', 4)
+ # check to see if we've already seen this address.
+ if saddrs and saddrs[0].parent_token not in addresses:
+ addresses[saddrs[0].parent_token] = True
+ ret_str.append('object-group ip address %s' % saddrs[0].parent_token)
+ for addr in saddrs:
+ ret_str.append(' %s %s' % (addr.ip, addr.netmask))
+ ret_str.append('exit\n')
+
+ # destination address
+ daddrs = term.GetAddressOfVersion('destination_address', 4)
+ # check to see if we've already seen this address
+ if daddrs and daddrs[0].parent_token not in addresses:
+ addresses[daddrs[0].parent_token] = True
+ ret_str.append('object-group ip address %s' % daddrs[0].parent_token)
+ for addr in term.GetAddressOfVersion('destination_address', 4):
+ ret_str.append(' %s %s' % (addr.ip, addr.netmask))
+ ret_str.append('exit\n')
+
+ # source port
+ for port in term.source_port + term.destination_port:
+ if not port:
+ continue
+ port_key = '%s-%s' % (port[0], port[1])
+ if port_key not in ports.keys():
+ ports[port_key] = True
+ ret_str.append('object-group ip port %s' % port_key)
+ if port[0] != port[1]:
+ ret_str.append(' range %d %d' % (port[0], port[1]))
+ else:
+ ret_str.append(' eq %d' % port[0])
+ ret_str.append('exit\n')
+
+ return '\n'.join(ret_str)
+
+
+class ObjectGroupTerm(aclgenerator.Term):
+ """An individual term of an object-group'd acl.
+
+ Object Group acls are very similar to extended acls in their
+ syntax except they use a meta language with address/service
+ definitions.
+
+ eg:
+
+ permit tcp first-term-source-address 179-179 ANY
+
+ where first-term-source-address, ANY and 179-179 are defined elsewhere
+ in the acl.
+ """
+
+ def __init__(self, term, filter_name):
+ self.term = term
+ self.filter_name = filter_name
+
+ def __str__(self):
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if 'cisco' not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if 'cisco' in self.term.platform_exclude:
+ return ''
+
+ source_address_dict = {}
+ destination_address_dict = {}
+
+ ret_str = ['\n']
+ ret_str.append('remark %s' % self.term.name)
+ comment_max_width = 70
+ comments = aclgenerator.WrapWords(self.term.comment, comment_max_width)
+ if comments and comments[0]:
+ for comment in comments:
+ ret_str.append('remark %s' % str(comment))
+
+ # Term verbatim output - this will skip over normal term creation
+ # code by returning early. Warnings provided in policy.py.
+ if self.term.verbatim:
+ for next_verbatim in self.term.verbatim:
+ if next_verbatim.value[0] == 'cisco':
+ ret_str.append(str(next_verbatim.value[1]))
+ return '\n'.join(ret_str)
+
+ # protocol
+ if not self.term.protocol:
+ protocol = ['ip']
+ else:
+ # pylint: disable-msg=C6402
+ protocol = map(self.PROTO_MAP.get, self.term.protocol, self.term.protocol)
+ # pylint: enable-msg=C6402
+
+ # addresses
+ source_address = self.term.source_address
+ if not self.term.source_address:
+ source_address = [nacaddr.IPv4('0.0.0.0/0', token='ANY')]
+ source_address_dict[source_address[0].parent_token] = True
+
+ destination_address = self.term.destination_address
+ if not self.term.destination_address:
+ destination_address = [nacaddr.IPv4('0.0.0.0/0', token='ANY')]
+ destination_address_dict[destination_address[0].parent_token] = True
+ # ports
+ source_port = [()]
+ destination_port = [()]
+ if self.term.source_port:
+ source_port = self.term.source_port
+ if self.term.destination_port:
+ destination_port = self.term.destination_port
+
+ for saddr in source_address:
+ for daddr in destination_address:
+ for sport in source_port:
+ for dport in destination_port:
+ for proto in protocol:
+ ret_str.append(
+ self._TermletToStr(_ACTION_TABLE.get(str(
+ self.term.action[0])), proto, saddr, sport, daddr, dport))
+
+ return '\n'.join(ret_str)
+
+ def _TermletToStr(self, action, proto, saddr, sport, daddr, dport):
+ """Output a portion of a cisco term/filter only, based on the 5-tuple."""
+ # fix addreses
+ if saddr:
+ saddr = 'addrgroup %s' % saddr
+ if daddr:
+ daddr = 'addrgroup %s' % daddr
+ # fix ports
+ if sport:
+ sport = 'portgroup %d-%d' % (sport[0], sport[1])
+ else:
+ sport = ''
+ if dport:
+ dport = 'portgroup %d-%d' % (dport[0], dport[1])
+ else:
+ dport = ''
+
+ return ' %s %s %s %s %s %s' % (
+ action, proto, saddr, sport, daddr, dport)
+
+
+class Term(aclgenerator.Term):
+ """A single ACL Term."""
+
+ def __init__(self, term, af=4):
+ self.term = term
+ self.options = []
+ # Our caller should have already verified the address family.
+ assert af in (4, 6)
+ self.af = af
+ self.text_af = self.AF_MAP_BY_NUMBER[self.af]
+
+ def __str__(self):
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if 'cisco' not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if 'cisco' in self.term.platform_exclude:
+ return ''
+
+ ret_str = ['\n']
+
+ # Don't render icmpv6 protocol terms under inet, or icmp under inet6
+ if ((self.af == 6 and 'icmp' in self.term.protocol) or
+ (self.af == 4 and 'icmpv6' in self.term.protocol)):
+ ret_str.append('remark Term %s' % self.term.name)
+ ret_str.append('remark not rendered due to protocol/AF mismatch.')
+ return '\n'.join(ret_str)
+
+ ret_str.append('remark ' + self.term.name)
+ if self.term.owner:
+ self.term.comment.append('Owner: %s' % self.term.owner)
+ for comment in self.term.comment:
+ for line in comment.split('\n'):
+ ret_str.append('remark ' + str(line)[:100])
+
+ # Term verbatim output - this will skip over normal term creation
+ # code by returning early. Warnings provided in policy.py.
+ if self.term.verbatim:
+ for next_verbatim in self.term.verbatim:
+ if next_verbatim.value[0] == 'cisco':
+ ret_str.append(str(next_verbatim.value[1]))
+ return '\n'.join(ret_str)
+
+ # protocol
+ if not self.term.protocol:
+ if self.af == 6:
+ protocol = ['ipv6']
+ else:
+ protocol = ['ip']
+ else:
+ # pylint: disable-msg=C6402
+ protocol = map(self.PROTO_MAP.get, self.term.protocol, self.term.protocol)
+ # pylint: disable-msg=C6402
+
+ # source address
+ if self.term.source_address:
+ source_address = self.term.GetAddressOfVersion('source_address', self.af)
+ source_address_exclude = self.term.GetAddressOfVersion(
+ 'source_address_exclude', self.af)
+ if source_address_exclude:
+ source_address = nacaddr.ExcludeAddrs(
+ source_address,
+ source_address_exclude)
+ if not source_address:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ direction='source',
+ af=self.text_af))
+ return ''
+ else:
+ # source address not set
+ source_address = ['any']
+
+ # destination address
+ if self.term.destination_address:
+ destination_address = self.term.GetAddressOfVersion(
+ 'destination_address', self.af)
+ destination_address_exclude = self.term.GetAddressOfVersion(
+ 'destination_address_exclude', self.af)
+ if destination_address_exclude:
+ destination_address = nacaddr.ExcludeAddrs(
+ destination_address,
+ destination_address_exclude)
+ if not destination_address:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ direction='destination',
+ af=self.text_af))
+ return ''
+ else:
+ # destination address not set
+ destination_address = ['any']
+
+ # options
+ opts = [str(x) for x in self.term.option]
+ if self.PROTO_MAP['tcp'] in protocol and ('tcp-established' in opts or
+ 'established' in opts):
+ self.options.extend(['established'])
+
+ # ports
+ source_port = [()]
+ destination_port = [()]
+ if self.term.source_port:
+ source_port = self.term.source_port
+ if self.term.destination_port:
+ destination_port = self.term.destination_port
+
+ # logging
+ if self.term.logging:
+ self.options.append('log')
+
+ # icmp-types
+ icmp_types = ['']
+ if self.term.icmp_type:
+ icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type,
+ self.term.protocol, self.af)
+
+ for saddr in source_address:
+ for daddr in destination_address:
+ for sport in source_port:
+ for dport in destination_port:
+ for proto in protocol:
+ for icmp_type in icmp_types:
+ ret_str.extend(self._TermletToStr(
+ _ACTION_TABLE.get(str(self.term.action[0])),
+ proto,
+ saddr,
+ sport,
+ daddr,
+ dport,
+ icmp_type,
+ self.options))
+
+ return '\n'.join(ret_str)
+
+ def _TermletToStr(self, action, proto, saddr, sport, daddr, dport,
+ icmp_type, option):
+ """Take the various compenents and turn them into a cisco acl line.
+
+ Args:
+ action: str, action
+ proto: str, protocl
+ saddr: str or ipaddr, source address
+ sport: str list or none, the source port
+ daddr: str or ipaddr, the destination address
+ dport: str list or none, the destination port
+ icmp_type: icmp-type numeric specification (if any)
+ option: list or none, optional, eg. 'logging' tokens.
+
+ Returns:
+ string of the cisco acl line, suitable for printing.
+
+ Raises:
+ UnsupportedCiscoAccessListError: When unknown icmp-types specified
+ """
+ # inet4
+ if type(saddr) is nacaddr.IPv4 or type(saddr) is ipaddr.IPv4Network:
+ if saddr.numhosts > 1:
+ saddr = '%s %s' % (saddr.ip, saddr.hostmask)
+ else:
+ saddr = 'host %s' % (saddr.ip)
+ if type(daddr) is nacaddr.IPv4 or type(daddr) is ipaddr.IPv4Network:
+ if daddr.numhosts > 1:
+ daddr = '%s %s' % (daddr.ip, daddr.hostmask)
+ else:
+ daddr = 'host %s' % (daddr.ip)
+ # inet6
+ if type(saddr) is nacaddr.IPv6 or type(saddr) is ipaddr.IPv6Network:
+ if saddr.numhosts > 1:
+ saddr = '%s' % (saddr.with_prefixlen)
+ else:
+ saddr = 'host %s' % (saddr.ip)
+ if type(daddr) is nacaddr.IPv6 or type(daddr) is ipaddr.IPv6Network:
+ if daddr.numhosts > 1:
+ daddr = '%s' % (daddr.with_prefixlen)
+ else:
+ daddr = 'host %s' % (daddr.ip)
+
+ # fix ports
+ if not sport:
+ sport = ''
+ elif sport[0] != sport[1]:
+ sport = 'range %d %d' % (sport[0], sport[1])
+ else:
+ sport = 'eq %d' % (sport[0])
+
+ if not dport:
+ dport = ''
+ elif dport[0] != dport[1]:
+ dport = 'range %d %d' % (dport[0], dport[1])
+ else:
+ dport = 'eq %d' % (dport[0])
+
+ if not option:
+ option = ['']
+
+ # Prevent UDP from appending 'established' to ACL line
+ sane_options = list(option)
+ if proto == self.PROTO_MAP['udp'] and 'established' in sane_options:
+ sane_options.remove('established')
+ ret_lines = []
+
+ # str(icmp_type) is needed to ensure 0 maps to '0' instead of FALSE
+ icmp_type = str(icmp_type)
+ if icmp_type:
+ ret_lines.append(' %s %s %s %s %s %s %s %s' % (action, proto, saddr,
+ sport, daddr, dport,
+ icmp_type,
+ ' '.join(sane_options)
+ ))
+ else:
+ ret_lines.append(' %s %s %s %s %s %s %s' % (action, proto, saddr,
+ sport, daddr, dport,
+ ' '.join(sane_options)
+ ))
+
+ # remove any trailing spaces and replace multiple spaces with singles
+ stripped_ret_lines = [re.sub(r'\s+', ' ', x).rstrip() for x in ret_lines]
+ return stripped_ret_lines
+
+
+class Cisco(aclgenerator.ACLGenerator):
+ """A cisco policy object."""
+
+ _PLATFORM = 'cisco'
+ _DEFAULT_PROTOCOL = 'ip'
+ _SUFFIX = '.acl'
+
+ _OPTIONAL_SUPPORTED_KEYWORDS = set(['address',
+ 'counter',
+ 'expiration',
+ 'logging',
+ 'loss_priority',
+ 'owner',
+ 'policer',
+ 'port',
+ 'qos',
+ 'routing_instance',
+ ])
+
+ def _TranslatePolicy(self, pol, exp_info):
+ self.cisco_policies = []
+ current_date = datetime.date.today()
+ exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
+
+ # a mixed filter outputs both ipv4 and ipv6 acls in the same output file
+ good_filters = ['extended', 'standard', 'object-group', 'inet6',
+ 'mixed']
+
+ for header, terms in pol.filters:
+ if self._PLATFORM not in header.platforms:
+ continue
+
+ obj_target = ObjectGroup()
+
+ filter_options = header.FilterOptions(self._PLATFORM)
+ filter_name = header.FilterName(self._PLATFORM)
+
+ # extended is the most common filter type.
+ filter_type = 'extended'
+ if len(filter_options) > 1:
+ filter_type = filter_options[1]
+
+ # check if filter type is renderable
+ if filter_type not in good_filters:
+ raise UnsupportedCiscoAccessListError(
+ 'access list type %s not supported by %s (good types: %s)' % (
+ filter_type, self._PLATFORM, str(good_filters)))
+
+ filter_list = [filter_type]
+ if filter_type == 'mixed':
+ # Loop through filter and generate output for inet and inet6 in sequence
+ filter_list = ['extended', 'inet6']
+
+ for next_filter in filter_list:
+ if next_filter == 'extended':
+ try:
+ if int(filter_name) in range(1, 100) + range(1300, 2000):
+ raise UnsupportedCiscoAccessListError(
+ 'Access lists between 1-99 and 1300-1999 are reserved for '
+ 'standard ACLs')
+ except ValueError:
+ # Extended access list names do not have to be numbers.
+ pass
+ if next_filter == 'standard':
+ try:
+ if int(filter_name) not in range(1, 100) + range(1300, 2000):
+ raise UnsupportedCiscoAccessListError(
+ 'Standard access lists must be numeric in the range of 1-99'
+ ' or 1300-1999.')
+ except ValueError:
+ # Standard access list names do not have to be numbers either.
+ pass
+
+ new_terms = []
+ for term in terms:
+ term.name = self.FixTermLength(term.name)
+ af = 'inet'
+ if next_filter == 'inet6':
+ af = 'inet6'
+ term = self.FixHighPorts(term, af=af)
+ if not term:
+ continue
+
+ if term.expiration:
+ if term.expiration <= exp_info_date:
+ logging.info('INFO: Term %s in policy %s expires '
+ 'in less than two weeks.', term.name, filter_name)
+ if term.expiration <= current_date:
+ logging.warn('WARNING: Term %s in policy %s is expired and '
+ 'will not be rendered.', term.name, filter_name)
+ continue
+
+ # render terms based on filter type
+ if next_filter == 'standard':
+ # keep track of sequence numbers across terms
+ new_terms.append(TermStandard(term, filter_name))
+ elif next_filter == 'extended':
+ new_terms.append(Term(term))
+ elif next_filter == 'object-group':
+ obj_target.AddTerm(term)
+ new_terms.append(ObjectGroupTerm(term, filter_name))
+ elif next_filter == 'inet6':
+ new_terms.append(Term(term, 6))
+
+ self.cisco_policies.append((header, filter_name, [next_filter],
+ new_terms, obj_target))
+
+ def __str__(self):
+ target_header = []
+ target = []
+
+ # add the p4 tags
+ target.extend(aclgenerator.AddRepositoryTags('! '))
+
+ for (header, filter_name, filter_list, terms, obj_target
+ ) in self.cisco_policies:
+ for filter_type in filter_list:
+ if filter_type == 'standard':
+ if filter_name.isdigit():
+ target.append('no access-list %s' % filter_name)
+ else:
+ target.append('no ip access-list standard %s' % filter_name)
+ target.append('ip access-list standard %s' % filter_name)
+ elif filter_type == 'extended':
+ target.append('no ip access-list extended %s' % filter_name)
+ target.append('ip access-list extended %s' % filter_name)
+ elif filter_type == 'object-group':
+ obj_target.AddName(filter_name)
+ target.append('no ip access-list extended %s' % filter_name)
+ target.append('ip access-list extended %s' % filter_name)
+ elif filter_type == 'inet6':
+ target.append('no ipv6 access-list %s' % filter_name)
+ target.append('ipv6 access-list %s' % filter_name)
+ else:
+ raise UnsupportedCiscoAccessListError(
+ 'access list type %s not supported by %s' % (
+ filter_type, self._PLATFORM))
+
+ # Add the Perforce Id/Date tags, these must come after
+ # remove/re-create of the filter, otherwise config mode doesn't
+ # know where to place these remarks in the configuration.
+ if filter_name.isdigit():
+ target.extend(aclgenerator.AddRepositoryTags('access-list %s remark '
+ % filter_name))
+ else:
+ target.extend(aclgenerator.AddRepositoryTags('remark '))
+
+ # add a header comment if one exists
+ for comment in header.comment:
+ for line in comment.split('\n'):
+ target.append('remark %s' % line)
+
+ # now add the terms
+ for term in terms:
+ term_str = str(term)
+ if term_str:
+ target.append(term_str)
+ target.append('\n')
+
+ if obj_target.valid:
+ target = [str(obj_target)] + target
+ # ensure that the header is always first
+ target = target_header + target
+ target += ['end', '']
+ return '\n'.join(target)
diff --git a/lib/ciscoasa.py b/lib/ciscoasa.py
new file mode 100644
index 0000000..f3f92b5
--- /dev/null
+++ b/lib/ciscoasa.py
@@ -0,0 +1,454 @@
+#!/usr/bin/python
+
+
+
+"""Cisco ASA renderer."""
+
+__author__ = 'antony@slac.stanford.edu (Antonio Ceseracciu)'
+
+import datetime
+import socket
+import logging
+import re
+
+from third_party import ipaddr
+import aclgenerator
+import nacaddr
+
+
+_ACTION_TABLE = {
+ 'accept': 'permit',
+ 'deny': 'deny',
+ 'reject': 'deny',
+ 'next': '! next',
+ 'reject-with-tcp-rst': 'deny', # tcp rst not supported
+ }
+
+
+# generic error class
+class Error(Exception):
+ """Generic error class."""
+ pass
+
+
+class UnsupportedCiscoAccessListError(Error):
+ """Raised when we're give a non named access list."""
+ pass
+
+
+class StandardAclTermError(Error):
+ """Raised when there is a problem in a standard access list."""
+ pass
+
+
+class NoCiscoPolicyError(Error):
+ """Raised when a policy is errantly passed to this module for rendering."""
+ pass
+
+
+class Term(aclgenerator.Term):
+ """A single ACL Term."""
+
+
+ def __init__(self, term, filter_name, af=4):
+ self.term = term
+ self.filter_name = filter_name
+ self.options = []
+ assert af in (4, 6)
+ self.af = af
+
+ def __str__(self):
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if 'ciscoasa' not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if 'ciscoasa' in self.term.platform_exclude:
+ return ''
+
+ ret_str = ['\n']
+
+ # Don't render icmpv6 protocol terms under inet, or icmp under inet6
+ if ((self.af == 6 and 'icmp' in self.term.protocol) or
+ (self.af == 4 and 'icmpv6' in self.term.protocol)):
+ ret_str.append('remark Term %s' % self.term.name)
+ ret_str.append('remark not rendered due to protocol/AF mismatch.')
+ return '\n'.join(ret_str)
+
+ ret_str.append('access-list %s remark %s' % (self.filter_name,
+ self.term.name))
+ if self.term.owner:
+ self.term.comment.append('Owner: %s' % self.term.owner)
+ for comment in self.term.comment:
+ for line in comment.split('\n'):
+ ret_str.append('access-list %s remark %s' % (self.filter_name,
+ str(line)[:100]))
+
+ # Term verbatim output - this will skip over normal term creation
+ # code by returning early. Warnings provided in policy.py.
+ if self.term.verbatim:
+ for next in self.term.verbatim:
+ if next.value[0] == 'ciscoasa':
+ ret_str.append(str(next.value[1]))
+ return '\n'.join(ret_str)
+
+ # protocol
+ if not self.term.protocol:
+ protocol = ['ip']
+ else:
+ # fix the protocol
+ protocol = self.term.protocol
+
+ # source address
+ if self.term.source_address:
+ source_address = self.term.GetAddressOfVersion('source_address', self.af)
+ source_address_exclude = self.term.GetAddressOfVersion(
+ 'source_address_exclude', self.af)
+ if source_address_exclude:
+ source_address = nacaddr.ExcludeAddrs(
+ source_address,
+ source_address_exclude)
+ else:
+ # source address not set
+ source_address = ['any']
+
+ # destination address
+ if self.term.destination_address:
+ destination_address = self.term.GetAddressOfVersion(
+ 'destination_address', self.af)
+ destination_address_exclude = self.term.GetAddressOfVersion(
+ 'destination_address_exclude', self.af)
+ if destination_address_exclude:
+ destination_address = nacaddr.ExcludeAddrs(
+ destination_address,
+ destination_address_exclude)
+ else:
+ # destination address not set
+ destination_address = ['any']
+
+ # options
+ extra_options = []
+ for opt in [str(x) for x in self.term.option]:
+ if opt.find('tcp-established') == 0 and 6 in protocol:
+ extra_options.append('established')
+ elif opt.find('established') == 0 and 6 in protocol:
+ # only needed for TCP, for other protocols policy.py handles high-ports
+ extra_options.append('established')
+ self.options.extend(extra_options)
+
+ # ports
+ source_port = [()]
+ destination_port = [()]
+ if self.term.source_port:
+ source_port = self.term.source_port
+ if self.term.destination_port:
+ destination_port = self.term.destination_port
+
+ # logging
+ if self.term.logging:
+ self.options.append('log')
+ if 'disable' in [x.value for x in self.term.logging]:
+ self.options.append('disable')
+
+ # icmp-types
+ icmp_types = ['']
+ if self.term.icmp_type:
+ icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type,
+ self.term.protocol, self.af)
+
+ for saddr in source_address:
+ for daddr in destination_address:
+ for sport in source_port:
+ for dport in destination_port:
+ for proto in protocol:
+ for icmp_type in icmp_types:
+ # only output address family appropriate IP addresses
+ do_output = False
+ if self.af == 4:
+ if (((type(saddr) is nacaddr.IPv4) or (saddr == 'any')) and
+ ((type(daddr) is nacaddr.IPv4) or (daddr == 'any'))):
+ do_output = True
+ if self.af == 6:
+ if (((type(saddr) is nacaddr.IPv6) or (saddr == 'any')) and
+ ((type(daddr) is nacaddr.IPv6) or (daddr == 'any'))):
+ do_output = True
+ if do_output:
+ ret_str.extend(self._TermletToStr(
+ self.filter_name,
+ _ACTION_TABLE.get(str(self.term.action[0])),
+ proto,
+ saddr,
+ sport,
+ daddr,
+ dport,
+ icmp_type,
+ self.options))
+
+ return '\n'.join(ret_str)
+
+ def _TermPortToProtocol (self,portNumber,proto):
+
+ _ASA_PORTS_TCP = {
+5190: "aol",
+179: "bgp",
+19: "chargen",
+1494: "citrix-ica",
+514: "cmd",
+2748: "ctiqbe",
+13: "daytime",
+9: "discard",
+53: "domain",
+7: "echo",
+512: "exec",
+79: "finger",
+21: "ftp",
+20: "ftp-data",
+70: "gopher",
+443: "https",
+1720: "h323",
+101: "hostname",
+113: "ident",
+143: "imap4",
+194: "irc",
+750: "kerberos",
+543: "klogin",
+544: "kshell",
+389: "ldap",
+636: "ldaps",
+515: "lpd",
+513: "login",
+1352: "lotusnotes",
+139: "netbios-ssn",
+119: "nntp",
+5631: "pcanywhere-data",
+496: "pim-auto-rp",
+109: "pop2",
+110: "pop3",
+1723: "pptp",
+25: "smtp",
+1521: "sqlnet",
+22: "ssh",
+111: "sunrpc",
+49: "tacacs",
+517: "talk",
+23: "telnet",
+540: "uucp",
+43: "whois",
+80: "www",
+2049: "nfs"
+ }
+ _ASA_PORTS_UDP = {
+512: "biff",
+68: "bootpc",
+67: "bootps",
+9: "discard",
+53: "domain",
+195: "dnsix",
+7: "echo",
+500: "isakmp",
+750: "kerberos",
+434: "mobile-ip",
+42: "nameserver",
+137: "netbios-ns",
+138: "netbios-dgm",
+123: "ntp",
+5632: "pcanywhere-status",
+496: "pim-auto-rp",
+1645: "radius",
+1646: "radius-acct",
+520: "rip",
+5510: "secureid-udp",
+161: "snmp",
+162: "snmptrap",
+111: "sunrpc",
+514: "syslog",
+49: "tacacs",
+517: "talk",
+69: "tftp",
+37: "time",
+513: "who",
+177: "xdmcp",
+2049: "nfs"
+ }
+
+ _ASA_TYPES_ICMP = {
+6: "alternate-address",
+31: "conversion-error",
+8: "echo",
+0: "echo-reply",
+16: "information-reply",
+15: "information-request",
+18: "mask-reply",
+17: "mask-request",
+32: "mobile-redirect",
+12: "parameter-problem",
+5: "redirect",
+9: "router-advertisement",
+10: "router-solicitation",
+4: "source-quench",
+11: "time-exceeded",
+14: "timestamp-reply",
+13: "timestamp-request",
+30: "traceroute",
+3: "unreachable"
+ }
+
+
+ if proto == "tcp":
+ if portNumber in _ASA_PORTS_TCP:
+ return _ASA_PORTS_TCP[portNumber]
+ elif proto == "udp":
+ if portNumber in _ASA_PORTS_UDP:
+ return _ASA_PORTS_UDP[portNumber]
+ elif proto == "icmp":
+ if portNumber in _ASA_TYPES_ICMP:
+ return _ASA_TYPES_ICMP[portNumber]
+ return portNumber
+
+ def _TermletToStr(self, filter_name, action, proto, saddr, sport, daddr, dport,
+ icmp_type, option):
+ """Take the various compenents and turn them into a cisco acl line.
+
+ Args:
+ action: str, action
+ proto: str, protocl
+ saddr: str or ipaddr, source address
+ sport: str list or none, the source port
+ daddr: str or ipaddr, the destination address
+ dport: str list or none, the destination port
+ icmp_type: icmp-type numeric specification (if any)
+ option: list or none, optional, eg. 'logging' tokens.
+
+ Returns:
+ string of the cisco acl line, suitable for printing.
+ """
+
+
+ # inet4
+ if type(saddr) is nacaddr.IPv4 or type(saddr) is ipaddr.IPv4Network:
+ if saddr.numhosts > 1:
+ saddr = '%s %s' % (saddr.ip, saddr.netmask)
+ else:
+ saddr = 'host %s' % (saddr.ip)
+ if type(daddr) is nacaddr.IPv4 or type(daddr) is ipaddr.IPv4Network:
+ if daddr.numhosts > 1:
+ daddr = '%s %s' % (daddr.ip, daddr.netmask)
+ else:
+ daddr = 'host %s' % (daddr.ip)
+ # inet6
+ if type(saddr) is nacaddr.IPv6 or type(saddr) is ipaddr.IPv6Network:
+ if saddr.numhosts > 1:
+ saddr = '%s/%s' % (saddr.ip, saddr.prefixlen)
+ else:
+ saddr = 'host %s' % (saddr.ip)
+ if type(daddr) is nacaddr.IPv6 or type(daddr) is ipaddr.IPv6Network:
+ if daddr.numhosts > 1:
+ daddr = '%s/%s' % (daddr.ip, daddr.prefixlen)
+ else:
+ daddr = 'host %s' % (daddr.ip)
+
+ # fix ports
+ if not sport:
+ sport = ''
+ elif sport[0] != sport[1]:
+ sport = ' range %s %s' % (self._TermPortToProtocol(sport[0],proto), self._TermPortToProtocol(sport[1],proto))
+ else:
+ sport = ' eq %s' % (self._TermPortToProtocol(sport[0],proto))
+
+ if not dport:
+ dport = ''
+ elif dport[0] != dport[1]:
+ dport = ' range %s %s' % (self._TermPortToProtocol(dport[0],proto), self._TermPortToProtocol(dport[1],proto))
+ else:
+ dport = ' eq %s' % (self._TermPortToProtocol(dport[0],proto))
+
+ if not option:
+ option = ['']
+
+ # Prevent UDP from appending 'established' to ACL line
+ sane_options = list(option)
+ if proto == 'udp' and 'established' in sane_options:
+ sane_options.remove('established')
+
+ ret_lines = []
+
+ # str(icmp_type) is needed to ensure 0 maps to '0' instead of FALSE
+ icmp_type = str(self._TermPortToProtocol(icmp_type,"icmp"))
+
+ ret_lines.append('access-list %s extended %s %s %s %s %s %s %s %s' %
+ (filter_name, action, proto, saddr,
+ sport, daddr, dport,
+ icmp_type,
+ ' '.join(sane_options)
+ ))
+
+ # remove any trailing spaces and replace multiple spaces with singles
+ stripped_ret_lines = [re.sub('\s+', ' ', x).rstrip() for x in ret_lines]
+ return stripped_ret_lines
+
+# return 'access-list %s extended %s %s %s%s %s%s %s' % (
+# filter_name, action, proto, saddr, sport, daddr, dport, ' '.join(option))
+
+
+class CiscoASA(aclgenerator.ACLGenerator):
+ """A cisco ASA policy object."""
+
+ _PLATFORM = 'ciscoasa'
+ _DEFAULT_PROTOCOL = 'ip'
+ _SUFFIX = '.asa'
+
+ _OPTIONAL_SUPPORTED_KEYWORDS = set(['expiration',
+ 'logging',
+ 'owner',
+ ])
+
+ def _TranslatePolicy(self, pol, exp_info):
+ self.ciscoasa_policies = []
+ current_date = datetime.date.today()
+ exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
+
+ for header, terms in self.policy.filters:
+ filter_options = header.FilterOptions('ciscoasa')
+ filter_name = header.FilterName('ciscoasa')
+
+ new_terms = []
+ # now add the terms
+ for term in terms:
+ if term.expiration:
+ if term.expiration <= exp_info_date:
+ logging.info('INFO: Term %s in policy %s expires '
+ 'in less than two weeks.', term.name, filter_name)
+ if term.expiration <= current_date:
+ logging.warn('WARNING: Term %s in policy %s is expired and '
+ 'will not be rendered.', term.name, filter_name)
+ continue
+
+ new_terms.append(str(Term(term,filter_name)))
+
+ self.ciscoasa_policies.append((header, filter_name, new_terms))
+
+ def __str__(self):
+ target_header = []
+ target = []
+
+ for (header, filter_name, terms) in self.ciscoasa_policies:
+
+ target.append('clear configure access-list %s' % filter_name)
+
+ # add the p4 tags
+ target.extend(aclgenerator.AddRepositoryTags('access-list %s remark '
+ % filter_name))
+
+ # add a header comment if one exists
+ for comment in header.comment:
+ for line in comment.split('\n'):
+ target.append('access-list %s remark %s' % (filter_name,line))
+
+ # now add the terms
+ for term in terms:
+ target.append(str(term))
+
+ # end for header, filter_name, filter_type...
+ return '\n'.join(target)
+
diff --git a/lib/demo.py b/lib/demo.py
new file mode 100755
index 0000000..9f35b72
--- /dev/null
+++ b/lib/demo.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Demo generator for capirca."""
+
+__author__ = 'robankeny@google.com (Robert Ankeny)'
+
+
+import datetime
+from lib import aclgenerator
+
+
+class Term(aclgenerator.Term):
+ """Used to create an individual term.
+
+ The __str__ method must be implemented.
+
+ Args: term policy.Term object
+
+ This is created to be a demo.
+ """
+ _ACTIONS = {'accept': 'allow',
+ 'deny': 'discard',
+ 'reject': 'say go away to',
+ 'next': 'pass it onto the next term',
+ 'reject-with-tcp-rst': 'reset'
+ }
+
+ def __init__ (self, term, term_type):
+ self.term = term
+ self.term_type = term_type
+
+ def __str__(self):
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if 'demo' not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if 'demo' in self.term.platform_exclude:
+ return ''
+
+ ret_str = []
+
+ #NAME
+ ret_str.append(' ' * 4 + 'Term: '+self.term.name+'{')
+
+ #COMMENTS
+ if self.term.comment:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + '#COMMENTS')
+ for comment in self.term.comment:
+ for line in comment.split('\n'):
+ ret_str.append(' ' * 8 + '#'+line)
+
+ #SOURCE ADDRESS
+ source_address = self.term.GetAddressOfVersion(
+ 'source_address', self.AF_MAP.get(self.term_type))
+ source_address_exclude = self.term.GetAddressOfVersion(
+ 'source_address_exclude', self.AF_MAP.get(self.term_type))
+ if source_address:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Source IP\'s')
+ for saddr in source_address:
+ ret_str.append(' ' * 8 + str(saddr))
+
+ #SOURCE ADDRESS EXCLUDE
+ if source_address_exclude:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Excluded Source IP\'s')
+ for ex in source_address:
+ ret_str.append(' ' * 8 + str(ex))
+
+ #SOURCE PORT
+ if self.term.source_port:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Source ports')
+ ret_str.append(' ' * 8 + self._Group(self.term.source_port))
+
+ #DESTINATION
+ destination_address = self.term.GetAddressOfVersion(
+ 'destination_address', self.AF_MAP.get(self.term_type))
+ destination_address_exclude = self.term.GetAddressOfVersion(
+ 'destination_address_exclude', self.AF_MAP.get(self.term_type))
+ if destination_address:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Destination IP\'s')
+ for daddr in destination_address:
+ ret_str.append(' ' * 8 + str(daddr))
+
+ #DESINATION ADDRESS EXCLUDE
+ if destination_address_exclude:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Excluded Destination IP\'s')
+ for ex in destination_address_exclude:
+ ret_str.append(' ' * 8 + str(ex))
+
+ #DESTINATION PORT
+ if self.term.destination_port:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Destination Ports')
+ ret_str.append(' ' * 8 + self._Group(self.term.destination_port))
+
+ #PROTOCOL
+ if self.term.protocol:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Protocol')
+ ret_str.append(' ' * 8 + self._Group(self.term.protocol))
+
+ #OPTION
+ if self.term.option:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Options')
+ for option in self.term.option:
+ ret_str.append(' ' * 8 + option)
+
+ #ACTION
+ for action in self.term.action:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Action: '
+ + self._ACTIONS.get(str(action))+' all traffic')
+ return '\n '.join(ret_str)
+
+ def _Group(self, group):
+ def _FormattedGroup(el):
+ if isinstance(el, str):
+ return el.lower()
+ elif isinstance(el, int):
+ return str(el)
+ elif el[0] == el[1]:
+ return '%d' % el[0]
+ else:
+ return '%d-%d' % (el[0], el[1])
+ if len(group) > 1:
+ rval = ''
+ for item in group:
+ rval = rval + str(item[0])+' '
+ else:
+ rval = _FormattedGroup(group[0])
+ return rval
+
+
+class Demo(aclgenerator.ACLGenerator):
+ """Demo rendering class.
+
+ This class takes a policy object and renders output into
+ a syntax which is not useable by routers. This class should
+ only be used for testing and understanding how to create a
+ generator of your own.
+
+ Args:
+ pol: policy.Policy object
+ Steps to implement this library
+ 1) Import library in aclgen.py
+ 2) Create a 3 letter entry in the table in the render_filters
+ function for the demo library and set it to False
+ 3) In the for header in policy.headers: use the previous entry
+ to add an if statement to create a deep copy of the
+ policy object
+ 4) Create an if statement that will be used if that specific
+ policy object is present will pass the policy file
+ onto the demo Class.
+ 5) The returned object can be then printed to a file using the
+ do_output_filter function
+ 6) Create a policy file with a target set to use demo
+ """
+ _PLATFORM = 'demo'
+ _SUFFIX = '.demo'
+
+ _OPTIONAL_SUPPORTED_KEYWORDS = set(['expiration',])
+
+ def _TranslatePolicy(self, pol, exp_info):
+ current_date = datetime.date.today()
+ exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
+ self.demo_policies = []
+ for header, terms in pol.filters:
+ if not self._PLATFORM in header.platforms:
+ continue
+ filter_options = header.FilterOptions('demo')
+ filter_name = filter_options[0]
+ if len(filter_options) > 1:
+ interface_specific = filter_options[1]
+ else:
+ interface_specific = 'none'
+ filter_type = 'inet'
+ term_names = set()
+ new_terms = []
+ for term in terms:
+ if term.name in term_names:
+ raise DemoFilterError('Duplicate term name')
+ term_names.add(term.name)
+ if term.expiration:
+ if term.expiration <= exp_info_date:
+ logging.info('INFO: Term %s in policy %s expires '
+ 'in less than two weeks.', term.name, filter_name)
+ if term.expiration <= current_date:
+ logging.warn('WARNING: Term %s in policy %s is expired and '
+ 'will not be rendered.', term.name, filter_name)
+ continue
+ new_terms.append(Term(term, filter_type))
+ self.demo_policies.append((header, filter_name, filter_type,
+ interface_specific, new_terms))
+
+ def __str__(self):
+ target = []
+ for (header, filter_name, filter_type,
+ interface_specific, terms) in self.demo_policies:
+ target.append('Header {')
+ target.append(' ' * 4 + 'Name: %s {' % filter_name)
+ target.append(' ' * 8 + 'Type: %s ' % filter_type)
+ for comment in header.comment:
+ for line in comment.split('\n'):
+ target.append(' ' * 8 + 'Comment: %s'%line)
+ target.append(' ' * 8 + 'Family type: %s'%interface_specific)
+ target.append(' ' * 4 +'}')
+ for term in terms:
+ target.append(str(term))
+ target.append(' ' * 4 +'}')
+ target.append(' ')
+ target.append('}')
+ return '\n'.join(target)
+
+
+class Error(Exception):
+ pass
+
+class DemoFilterError(Error):
+ pass
diff --git a/lib/html.py b/lib/html.py
new file mode 100755
index 0000000..5fb0bb1
--- /dev/null
+++ b/lib/html.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+#
+# Copyright 2015 NORDUnet A/S All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""HTML generator for capirca."""
+
+__author__ = 'lundberg@nordu.net (Johan Lundberg)'
+
+
+import datetime
+import logging
+from lib import aclgenerator
+
+
+class Term(aclgenerator.Term):
+ """Used to create an individual term.
+
+ The __str__ method must be implemented.
+
+ Args: term policy.Term object
+
+ """
+ _ACTIONS = {'accept': 'allow',
+ 'deny': 'discard',
+ 'reject': 'say go away to',
+ 'next': 'pass it onto the next term',
+ 'reject-with-tcp-rst': 'reset'
+ }
+
+ def __init__ (self, term, term_type):
+ self.term = term
+ self.term_type = term_type
+
+ def __str__(self):
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if 'html' not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if 'html' in self.term.platform_exclude:
+ return ''
+
+ ret_str = []
+
+ #NAME
+ ret_str.append(' ' * 4 + 'Term: '+self.term.name+'{')
+
+ #COMMENTS
+ if self.term.comment:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + '#COMMENTS')
+ for comment in self.term.comment:
+ for line in comment.split('\n'):
+ ret_str.append(' ' * 8 + '#'+line)
+
+ #SOURCE ADDRESS
+ source_address = self.term.GetAddressOfVersion(
+ 'source_address', self.AF_MAP.get(self.term_type))
+ source_address_exclude = self.term.GetAddressOfVersion(
+ 'source_address_exclude', self.AF_MAP.get(self.term_type))
+ if source_address:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Source IP\'s')
+ for saddr in source_address:
+ ret_str.append(' ' * 8 + str(saddr))
+
+ #SOURCE ADDRESS EXCLUDE
+ if source_address_exclude:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Excluded Source IP\'s')
+ for ex in source_address:
+ ret_str.append(' ' * 8 + str(ex))
+
+ #SOURCE PORT
+ if self.term.source_port:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Source ports')
+ ret_str.append(' ' * 8 + self._Group(self.term.source_port))
+
+ #DESTINATION
+ destination_address = self.term.GetAddressOfVersion(
+ 'destination_address', self.AF_MAP.get(self.term_type))
+ destination_address_exclude = self.term.GetAddressOfVersion(
+ 'destination_address_exclude', self.AF_MAP.get(self.term_type))
+ if destination_address:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Destination IP\'s')
+ for daddr in destination_address:
+ ret_str.append(' ' * 8 + str(daddr))
+
+ #DESINATION ADDRESS EXCLUDE
+ if destination_address_exclude:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Excluded Destination IP\'s')
+ for ex in destination_address_exclude:
+ ret_str.append(' ' * 8 + str(ex))
+
+ #DESTINATION PORT
+ if self.term.destination_port:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Destination Ports')
+ ret_str.append(' ' * 8 + self._Group(self.term.destination_port))
+
+ #PROTOCOL
+ if self.term.protocol:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Protocol')
+ ret_str.append(' ' * 8 + self._Group(self.term.protocol))
+
+ #OPTION
+ if self.term.option:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Options')
+ for option in self.term.option:
+ ret_str.append(' ' * 8 + option)
+
+ #ACTION
+ for action in self.term.action:
+ ret_str.append(' ')
+ ret_str.append(' ' * 8 + 'Action: '
+ + self._ACTIONS.get(str(action))+' all traffic')
+ return '\n '.join(ret_str)
+
+ def _Group(self, group):
+ def _FormattedGroup(el):
+ if isinstance(el, str):
+ return el.lower()
+ elif isinstance(el, int):
+ return str(el)
+ elif el[0] == el[1]:
+ return '%d' % el[0]
+ else:
+ return '%d-%d' % (el[0], el[1])
+ if len(group) > 1:
+ rval = ''
+ for item in group:
+ rval = rval + str(item[0])+' '
+ else:
+ rval = _FormattedGroup(group[0])
+ return rval
+
+
+class HTML(aclgenerator.ACLGenerator):
+ """HTML rendering class.
+
+ This class takes a policy object and renders output into
+ a syntax which is not useable by routers. This class should
+ only be used for visualizing or documenting policies.
+
+ Args:
+ pol: policy.Policy object
+ Steps to implement this library
+ 1) Import library in aclgen.py
+ 2) Create a 3 letter entry in the table in the render_filters
+ function for the HTML library and set it to False
+ 3) In the for header in policy.headers: use the previous entry
+ to add an if statement to create a deep copy of the
+ policy object
+ 4) Create an if statement that will be used if that specific
+ policy object is present will pass the policy file
+ onto the HTML Class.
+ 5) The returned object can be then printed to a file using the
+ do_output_filter function
+ 6) Create a policy file with a target set to use HTML
+ """
+ _PLATFORM = 'html'
+ _SUFFIX = '.html'
+
+ _OPTIONAL_SUPPORTED_KEYWORDS = set(['expiration',])
+
+ def _TranslatePolicy(self, pol, exp_info):
+ current_date = datetime.date.today()
+ exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
+ self.html_policies = []
+ for header, terms in pol.filters:
+ if not self._PLATFORM in header.platforms:
+ continue
+ filter_options = header.FilterOptions('html')
+ filter_name = filter_options[0]
+ if len(filter_options) > 1:
+ interface_specific = filter_options[1]
+ else:
+ interface_specific = 'none'
+ filter_type = 'inet'
+ term_names = set()
+ new_terms = []
+ for term in terms:
+ if term.name in term_names:
+ raise HTMLFilterError('Duplicate term name')
+ term_names.add(term.name)
+
+ new_terms.append(Term(term, filter_type))
+ self.html_policies.append((header, filter_name, filter_type,
+ interface_specific, new_terms))
+
+ def __str__(self):
+ target = []
+ for (header, filter_name, filter_type,
+ interface_specific, terms) in self.html_policies:
+ target.append('Header {')
+ target.append(' ' * 4 + 'Name: %s {' % filter_name)
+ target.append(' ' * 8 + 'Type: %s ' % filter_type)
+ for comment in header.comment:
+ for line in comment.split('\n'):
+ target.append(' ' * 8 + 'Comment: %s'%line)
+ target.append(' ' * 8 + 'Family type: %s'%interface_specific)
+ target.append(' ' * 4 +'}')
+ for term in terms:
+ target.append(str(term))
+ target.append(' ' * 4 +'}')
+ target.append(' ')
+ target.append('}')
+ return '\n'.join(target)
+
+
+class Error(Exception):
+ pass
+
+class HTMLFilterError(Error):
+ pass
diff --git a/lib/ipset.py b/lib/ipset.py
new file mode 100644
index 0000000..2ff4fbb
--- /dev/null
+++ b/lib/ipset.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+#
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Ipset iptables generator. This is a subclass of Iptables generator.
+
+ipset is a system inside the Linux kernel, which can very efficiently store
+and match IPv4 and IPv6 addresses. This can be used to dramatically increase
+performace of iptables firewall.
+
+"""
+
+__author__ = 'vklimovs@google.com (Vjaceslavs Klimovs)'
+
+from string import Template
+
+import iptables
+import nacaddr
+
+
+class Error(Exception):
+ pass
+
+
+class Term(iptables.Term):
+ """Single Ipset term representation."""
+
+ _PLATFORM = 'ipset'
+ _SET_MAX_LENGTH = 31
+ _POSTJUMP_FORMAT = None
+ _PREJUMP_FORMAT = None
+ _TERM_FORMAT = None
+ _COMMENT_FORMAT = Template('-A $filter -m comment --comment "$comment"')
+ _FILTER_TOP_FORMAT = Template('-A $filter')
+
+ def __init__(self, *args, **kwargs):
+ super(Term, self).__init__(*args, **kwargs)
+ # This stores tuples of set name and set contents, keyed by direction.
+ # For example:
+ # { 'src': ('term_name', [ipaddr object, ipaddr object]),
+ # 'dst': ('term_name', [ipaddr object, ipaddr object]) }
+ self.addr_sets = dict()
+
+ def _CalculateAddresses(self, src_addr_list, src_ex_addr_list,
+ dst_addr_list, dst_ex_addr_list):
+ """Calculate source and destination address list for a term.
+
+ Since ipset is very efficient at matching large number of
+ addresses, we never return eny exclude addresses. Instead
+ least positive match is calculated for both source and destination
+ addresses.
+
+ For source and destination address list, three cases are possible.
+ First case is when there is no addresses. In that case we return
+ _all_ips.
+ Second case is when there is strictly one address. In that case,
+ we optimize by not generating a set, and it's then the only
+ element of returned set.
+ Third case case is when there is more than one address in a set.
+ In that case we generate a set and also return _all_ips. Note the
+ difference to the first case where no set is actually generated.
+
+ Args:
+ src_addr_list: source address list of the term.
+ src_ex_addr_list: source address exclude list of the term.
+ dst_addr_list: destination address list of the term.
+ dst_ex_addr_list: destination address exclude list of the term.
+
+ Returns:
+ tuple containing source address list, source exclude address list,
+ destination address list, destination exclude address list in
+ that order.
+
+ """
+ if not src_addr_list:
+ src_addr_list = [self._all_ips]
+ src_addr_list = [src_addr for src_addr in src_addr_list if
+ src_addr.version == self.AF_MAP[self.af]]
+ if src_ex_addr_list:
+ src_ex_addr_list = [src_ex_addr for src_ex_addr in src_ex_addr_list if
+ src_ex_addr.version == self.AF_MAP[self.af]]
+ src_addr_list = nacaddr.ExcludeAddrs(src_addr_list, src_ex_addr_list)
+ if len(src_addr_list) > 1:
+ set_name = self._GenerateSetName(self.term.name, 'src')
+ self.addr_sets['src'] = (set_name, src_addr_list)
+ src_addr_list = [self._all_ips]
+
+ if not dst_addr_list:
+ dst_addr_list = [self._all_ips]
+ dst_addr_list = [dst_addr for dst_addr in dst_addr_list if
+ dst_addr.version == self.AF_MAP[self.af]]
+ if dst_ex_addr_list:
+ dst_ex_addr_list = [dst_ex_addr for dst_ex_addr in dst_ex_addr_list if
+ dst_ex_addr.version == self.AF_MAP[self.af]]
+ dst_addr_list = nacaddr.ExcludeAddrs(dst_addr_list, dst_ex_addr_list)
+ if len(dst_addr_list) > 1:
+ set_name = self._GenerateSetName(self.term.name, 'dst')
+ self.addr_sets['dst'] = (set_name, dst_addr_list)
+ dst_addr_list = [self._all_ips]
+ return (src_addr_list, [], dst_addr_list, [])
+
+ def _GenerateAddressStatement(self, src_addr, dst_addr):
+ """Return the address section of an individual iptables rule.
+
+ See _CalculateAddresses documentation. Three cases are possible here,
+ and they map directly to cases in _CalculateAddresses.
+ First, there can be no addresses for a direction (value is _all_ips then)
+ In that case we return empty string.
+ Second there can be stricly one address. In that case we return single
+ address match (-s or -d).
+ Third case, is when the value is _all_ips but also the set for particular
+ direction is present. That's when we return a set match.
+
+ Args:
+ src_addr: source address of the rule.
+ dst_addr: destination address of the rule.
+
+ Returns:
+ tuple containing source and destination address statement, in
+ that order.
+
+ """
+ src_addr_stmt = ''
+ dst_addr_stmt = ''
+ if src_addr and dst_addr:
+ if src_addr == self._all_ips:
+ if 'src' in self.addr_sets:
+ src_addr_stmt = ('-m set --set %s src' % self.addr_sets['src'][0])
+ else:
+ src_addr_stmt = '-s %s/%d' % (src_addr.ip, src_addr.prefixlen)
+ if dst_addr == self._all_ips:
+ if 'dst' in self.addr_sets:
+ dst_addr_stmt = ('-m set --set %s dst' % self.addr_sets['dst'][0])
+ else:
+ dst_addr_stmt = '-d %s/%d' % (dst_addr.ip, dst_addr.prefixlen)
+ return (src_addr_stmt, dst_addr_stmt)
+
+ def _GenerateSetName(self, term_name, suffix):
+ if self.af == 'inet6':
+ suffix += '-v6'
+ if len(term_name) + len(suffix) + 1 > self._SET_MAX_LENGTH:
+ term_name = term_name[:self._SET_MAX_LENGTH -
+ (len(term_name) + len(suffix) + 1)]
+ return term_name + '-' + suffix
+
+
+class Ipset(iptables.Iptables):
+ """Ipset generator."""
+ _PLATFORM = 'ipset'
+ _SET_TYPE = 'hash:net'
+ _SUFFIX = '.ips'
+ _TERM = Term
+
+ def __str__(self):
+ # Actual rendering happens in __str__, so it has to be called
+ # before we do set specific part.
+ iptables_output = iptables.Iptables.__str__(self)
+ output = []
+ for (_, _, _, _, terms) in self.iptables_policies:
+ for term in terms:
+ output.extend(self._GenerateSetConfig(term))
+ output.append(iptables_output)
+ return '\n'.join(output)
+
+ def _GenerateSetConfig(self, term):
+ """Generate set configuration for supplied term.
+
+ Args:
+ term: input term.
+
+ Returns:
+ string that is configuration of supplied term.
+
+ """
+ output = []
+ for direction in sorted(term.addr_sets, reverse=True):
+ set_hashsize = 2 ** len(term.addr_sets[direction][1]).bit_length()
+ set_maxelem = 2 ** len(term.addr_sets[direction][1]).bit_length()
+ output.append('create %s %s family %s hashsize %i maxelem %i' %
+ (term.addr_sets[direction][0],
+ self._SET_TYPE,
+ term.af,
+ set_hashsize,
+ set_maxelem))
+ for address in term.addr_sets[direction][1]:
+ output.append('add %s %s' % (term.addr_sets[direction][0], address))
+ return output
diff --git a/lib/iptables.py b/lib/iptables.py
new file mode 100644
index 0000000..d465c74
--- /dev/null
+++ b/lib/iptables.py
@@ -0,0 +1,789 @@
+#!/usr/bin/python
+#
+# Copyright 2010 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Iptables generator."""
+
+__author__ = 'watson@google.com (Tony Watson)'
+
+import datetime
+import logging
+import nacaddr
+import re
+from string import Template
+
+import aclgenerator
+
+
+class Term(aclgenerator.Term):
+ """Generate Iptables policy terms."""
+
+ # Validate that term does not contain any fields we do not
+ # support. This prevents us from thinking that our output is
+ # correct in cases where we've omitted fields from term.
+ _PLATFORM = 'iptables'
+ _POSTJUMP_FORMAT = None
+ _PREJUMP_FORMAT = Template('-A $filter -j $term')
+ _TERM_FORMAT = Template('-N $term')
+ _COMMENT_FORMAT = Template('-A $term -m comment --comment "$comment"')
+ _FILTER_TOP_FORMAT = Template('-A $term')
+ _ACTION_TABLE = {
+ 'accept': '-j ACCEPT',
+ 'deny': '-j DROP',
+ 'reject': '-j REJECT --reject-with icmp-host-prohibited',
+ 'reject-with-tcp-rst': '-j REJECT --reject-with tcp-reset',
+ 'next': '-j RETURN'
+ }
+ _PROTO_TABLE = {
+ 'icmpv6': '-p icmpv6',
+ 'icmp': '-p icmp',
+ 'tcp': '-p tcp',
+ 'udp': '-p udp',
+ 'all': '-p all',
+ 'esp': '-p esp',
+ 'ah': '-p ah',
+ 'gre': '-p gre',
+ }
+ _TCP_FLAGS_TABLE = {
+ 'syn': 'SYN',
+ 'ack': 'ACK',
+ 'fin': 'FIN',
+ 'rst': 'RST',
+ 'urg': 'URG',
+ 'psh': 'PSH',
+ 'all': 'ALL',
+ 'none': 'NONE',
+ }
+ _KNOWN_OPTIONS_MATCHERS = {
+ # '! -f' also matches non-fragmented packets.
+ 'first-fragment': '-m u32 --u32 4&0x3FFF=0x2000',
+ 'initial': '--syn',
+ 'tcp-initial': '--syn',
+ 'sample': '',
+ }
+
+ def __init__(self, term, filter_name, trackstate, filter_action, af='inet'):
+ """Setup a new term.
+
+ Args:
+ term: A policy.Term object to represent in iptables.
+ filter_name: The name of the filter chan to attach the term to.
+ trackstate: Specifies if conntrack should be used for new connections
+ filter_action: The default action of the filter.
+ af: Which address family ('inet' or 'inet6') to apply the term to.
+
+ Raises:
+ UnsupportedFilterError: Filter is not supported.
+ """
+ self.trackstate = trackstate
+ self.term = term # term object
+ self.filter = filter_name # actual name of filter
+ self.default_action = filter_action
+ self.options = []
+ self.af = af
+
+ if af == 'inet6':
+ self._all_ips = nacaddr.IPv6('::/0')
+ self._ACTION_TABLE['reject'] = '-j REJECT --reject-with adm-prohibited'
+ else:
+ self._all_ips = nacaddr.IPv4('0.0.0.0/0')
+ self._ACTION_TABLE['reject'] = ('-j REJECT --reject-with '
+ 'icmp-host-prohibited')
+
+ self.term_name = '%s_%s' % (self.filter[:1], self.term.name)
+
+ def __str__(self):
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if self._PLATFORM not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if self._PLATFORM in self.term.platform_exclude:
+ return ''
+
+ ret_str = []
+
+ # Don't render icmpv6 protocol terms under inet, or icmp under inet6
+ if ((self.af == 'inet6' and 'icmp' in self.term.protocol) or
+ (self.af == 'inet' and 'icmpv6' in self.term.protocol)):
+ ret_str.append('# Term %s' % self.term.name)
+ ret_str.append('# not rendered due to protocol/AF mismatch.')
+ return '\n'.join(ret_str)
+
+ # Term verbatim output - this will skip over most normal term
+ # creation code by returning early. Warnings provided in policy.py
+ if self.term.verbatim:
+ for next_verbatim in self.term.verbatim:
+ if next_verbatim.value[0] == self._PLATFORM:
+ ret_str.append(str(next_verbatim.value[1]))
+ return '\n'.join(ret_str)
+
+ # We don't support these keywords for filtering, so unless users
+ # put in a "verbatim:: iptables" statement, any output we emitted
+ # would misleadingly suggest that we applied their filters.
+ # Instead, we fail loudly.
+ if self.term.ether_type:
+ raise UnsupportedFilterError('\n%s %s %s %s' % (
+ 'ether_type unsupported by', self._PLATFORM,
+ '\nError in term', self.term.name))
+ if self.term.address:
+ raise UnsupportedFilterError('\n%s %s %s %s %s' % (
+ 'address unsupported by', self._PLATFORM,
+ '- specify source or dest', '\nError in term:', self.term.name))
+ if self.term.port:
+ raise UnsupportedFilterError('\n%s %s %s %s %s' % (
+ 'port unsupported by', self._PLATFORM,
+ '- specify source or dest', '\nError in term:', self.term.name))
+
+ # Create a new term
+ if self._TERM_FORMAT:
+ ret_str.append(self._TERM_FORMAT.substitute(term=self.term_name))
+
+ if self._PREJUMP_FORMAT:
+ ret_str.append(self._PREJUMP_FORMAT.substitute(filter=self.filter,
+ term=self.term_name))
+
+ if self.term.owner:
+ self.term.comment.append('Owner: %s' % self.term.owner)
+ # reformat long comments, if needed
+ #
+ # iptables allows individual comments up to 256 chars.
+ # But our generator will limit a single comment line to < 120, using:
+ # max = 119 - 27 (static chars in comment command) - [length of term name]
+ comment_max_width = 92 - len(self.term_name)
+ if comment_max_width < 40:
+ comment_max_width = 40
+ comments = aclgenerator.WrapWords(self.term.comment, comment_max_width)
+ # append comments to output
+ if comments and comments[0]:
+ for line in comments:
+ if not line:
+ continue # iptables-restore does not like 0-length comments.
+ # term comments
+ ret_str.append(self._COMMENT_FORMAT.substitute(filter=self.filter,
+ term=self.term_name,
+ comment=str(line)))
+
+ # if terms does not specify action, use filter default action
+ if not self.term.action:
+ self.term.action[0].value = self.default_action
+
+ # Unsupported configuration; in the case of 'accept' or 'next', we
+ # skip the rule. In other cases, we blow up (raise an exception)
+ # to ensure that this is not considered valid configuration.
+ if self.term.source_prefix or self.term.destination_prefix:
+ if str(self.term.action[0]) not in set(['accept', 'next']):
+ raise UnsupportedFilterError('%s %s %s %s %s %s %s %s' % (
+ '\nTerm', self.term.name, 'has action', str(self.term.action[0]),
+ 'with source_prefix or destination_prefix,',
+ ' which is unsupported in', self._PLATFORM, 'iptables output.'))
+ return ('# skipped %s due to source or destination prefix rule' %
+ self.term.name)
+
+ # protocol
+ if self.term.protocol:
+ protocol = self.term.protocol
+ else:
+ protocol = ['all']
+ if self.term.protocol_except:
+ raise UnsupportedFilterError('%s %s %s' % (
+ '\n', self.term.name,
+ 'protocol_except logic not currently supported.'))
+
+ (term_saddr, exclude_saddr,
+ term_daddr, exclude_daddr) = self._CalculateAddresses(
+ self.term.source_address, self.term.source_address_exclude,
+ self.term.destination_address, self.term.destination_address_exclude)
+ if not term_saddr:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ direction='source',
+ af=self.af))
+ return ''
+ if not term_daddr:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ direction='destination',
+ af=self.af))
+ return ''
+
+ # ports
+ source_port = []
+ destination_port = []
+ if self.term.source_port:
+ source_port = self.term.source_port
+ if self.term.destination_port:
+ destination_port = self.term.destination_port
+
+ # icmp-types
+ icmp_types = ['']
+ if self.term.icmp_type:
+ icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type, protocol,
+ self.af)
+
+ source_interface = ''
+ if self.term.source_interface:
+ source_interface = self.term.source_interface
+
+ destination_interface = ''
+ if self.term.destination_interface:
+ destination_interface = self.term.destination_interface
+
+ log_hits = False
+ if self.term.logging:
+ # Iptables sends logs to hosts configured syslog
+ log_hits = True
+
+ # options
+ tcp_flags = []
+ tcp_track_options = []
+ for next_opt in [str(x) for x in self.term.option]:
+ #
+ # Sanity checking and high-ports are added as appropriate in
+ # pre-processing that is done in __str__ within class Iptables.
+ # Option established will add destination port high-ports if protocol
+ # contains only tcp, udp or both. This is done earlier in class Iptables.
+ #
+ if ((next_opt.find('established') == 0 or
+ next_opt.find('tcp-established') == 0)
+ and 'ESTABLISHED' not in [x.strip() for x in self.options]):
+ if next_opt.find('tcp-established') == 0 and protocol != ['tcp']:
+ raise TcpEstablishedError('%s %s %s' % (
+ '\noption tcp-established can only be applied for proto tcp.',
+ '\nError in term:', self.term.name))
+
+ if self.trackstate:
+ # Use nf_conntrack to track state -- works with any proto
+ self.options.append('-m state --state ESTABLISHED,RELATED')
+ elif protocol == ['tcp']:
+ # Simple established-only rule for TCP: Must have ACK field
+ # (SYN/ACK or subsequent ACK), or RST and no other flags.
+ tcp_track_options = [(['ACK'], ['ACK']),
+ (['SYN', 'FIN', 'ACK', 'RST'], ['RST'])]
+
+ # Iterate through flags table, and create list of tcp-flags to append
+ for next_flag in self._TCP_FLAGS_TABLE:
+ if next_opt.find(next_flag) == 0:
+ tcp_flags.append(self._TCP_FLAGS_TABLE.get(next_flag))
+ if next_opt in self._KNOWN_OPTIONS_MATCHERS:
+ self.options.append(self._KNOWN_OPTIONS_MATCHERS[next_opt])
+ if self.term.packet_length:
+ # Policy format is "#-#", but iptables format is "#:#"
+ self.options.append('-m length --length %s' %
+ self.term.packet_length.replace('-', ':'))
+ if self.term.fragment_offset:
+ self.options.append('-m u32 --u32 4&0x1FFF=%s' %
+ self.term.fragment_offset.replace('-', ':'))
+
+ for saddr in exclude_saddr:
+ ret_str.extend(self._FormatPart(
+ '', saddr, '', '', '', '', '', '', '', '', '', '',
+ self._ACTION_TABLE.get('next')))
+ for daddr in exclude_daddr:
+ ret_str.extend(self._FormatPart(
+ '', '', '', daddr, '', '', '', '', '', '', '', '',
+ self._ACTION_TABLE.get('next')))
+
+ for saddr in term_saddr:
+ for daddr in term_daddr:
+ for icmp in icmp_types:
+ for proto in protocol:
+ for tcp_matcher in tcp_track_options or (([], []),):
+ ret_str.extend(self._FormatPart(
+ str(proto),
+ saddr,
+ source_port,
+ daddr,
+ destination_port,
+ self.options,
+ tcp_flags,
+ icmp,
+ tcp_matcher,
+ source_interface,
+ destination_interface,
+ log_hits,
+ self._ACTION_TABLE.get(str(self.term.action[0]))
+ ))
+
+ if self._POSTJUMP_FORMAT:
+ ret_str.append(self._POSTJUMP_FORMAT.substitute(filter=self.filter,
+ term=self.term_name))
+
+ return '\n'.join(str(v) for v in ret_str if v is not '')
+
+ def _CalculateAddresses(self, term_saddr, exclude_saddr,
+ term_daddr, exclude_daddr):
+ """Calculate source and destination address list for a term.
+
+ Args:
+ term_saddr: source address list of the term
+ exclude_saddr: source address exclude list of the term
+ term_daddr: destination address list of the term
+ exclude_daddr: destination address exclude list of the term
+
+ Returns:
+ tuple containing source address list, source exclude address list,
+ destination address list, destination exclude address list in
+ that order
+
+ """
+ # source address
+ term_saddr_excluded = []
+ if not term_saddr:
+ term_saddr = [self._all_ips]
+ if exclude_saddr:
+ term_saddr_excluded.extend(nacaddr.ExcludeAddrs(term_saddr,
+ exclude_saddr))
+
+ # destination address
+ term_daddr_excluded = []
+ if not term_daddr:
+ term_daddr = [self._all_ips]
+ if exclude_daddr:
+ term_daddr_excluded.extend(nacaddr.ExcludeAddrs(term_daddr,
+ exclude_daddr))
+
+ # Just to be safe, always have a result of at least 1 to avoid * by zero
+ # returning incorrect results (10src*10dst=100, but 10src*0dst=0, not 10)
+ bailout_count = len(exclude_saddr) + len(exclude_daddr) + (
+ (len(self.term.source_address) or 1) *
+ (len(self.term.destination_address) or 1))
+ exclude_count = ((len(term_saddr_excluded) or 1) *
+ (len(term_daddr_excluded) or 1))
+
+ # Use bailout jumps for excluded addresses if it results in fewer output
+ # lines than nacaddr.ExcludeAddrs() method.
+ if exclude_count < bailout_count:
+ exclude_saddr = []
+ exclude_daddr = []
+ if term_saddr_excluded:
+ term_saddr = term_saddr_excluded
+ if term_daddr_excluded:
+ term_daddr = term_daddr_excluded
+
+ # With many sources and destinations, iptables needs to generate the
+ # cartesian product of sources and destinations. If there are no
+ # exclude rules, this can instead be written as exclude [0/0 -
+ # srcs], exclude [0/0 - dsts].
+ v4_src_count = len([x for x in term_saddr if x.version == 4])
+ v4_dst_count = len([x for x in term_daddr if x.version == 4])
+ v6_src_count = len([x for x in term_saddr if x.version == 6])
+ v6_dst_count = len([x for x in term_daddr if x.version == 6])
+ num_pairs = v4_src_count * v4_dst_count + v6_src_count * v6_dst_count
+ if num_pairs > 100:
+ new_exclude_source = nacaddr.ExcludeAddrs([self._all_ips], term_saddr)
+ new_exclude_dest = nacaddr.ExcludeAddrs([self._all_ips], term_daddr)
+ # Invert the shortest list that does not already have exclude addresses
+ if len(new_exclude_source) < len(new_exclude_dest) and not exclude_saddr:
+ if len(new_exclude_source) + len(term_daddr) < num_pairs:
+ exclude_saddr = new_exclude_source
+ term_saddr = [self._all_ips]
+ elif not exclude_daddr:
+ if len(new_exclude_dest) + len(term_saddr) < num_pairs:
+ exclude_daddr = new_exclude_dest
+ term_daddr = [self._all_ips]
+ term_saddr = [x for x in term_saddr
+ if x.version == self.AF_MAP[self.af]]
+ exclude_saddr = [x for x in exclude_saddr
+ if x.version == self.AF_MAP[self.af]]
+ term_daddr = [x for x in term_daddr
+ if x.version == self.AF_MAP[self.af]]
+ exclude_daddr = [x for x in exclude_daddr
+ if x.version == self.AF_MAP[self.af]]
+ return (term_saddr, exclude_saddr, term_daddr, exclude_daddr)
+
+ def _FormatPart(self, protocol, saddr, sport, daddr, dport, options,
+ tcp_flags, icmp_type, track_flags, sint, dint, log_hits,
+ action):
+ """Compose one iteration of the term parts into a string.
+
+ Args:
+ protocol: The network protocol
+ saddr: Source IP address
+ sport: Source port numbers
+ daddr: Destination IP address
+ dport: Destination port numbers
+ options: Optional arguments to append to our rule
+ tcp_flags: Which tcp_flag arguments, if any, should be appended
+ icmp_type: What icmp protocol to allow, if any
+ track_flags: A tuple of ([check-flags], [set-flags]) arguments to tcp-flag
+ sint: Optional source interface
+ dint: Optional destination interface
+ log_hits: Boolean, to log matches or not
+ action: What should happen if this rule matches
+ Returns:
+ rval: A single iptables argument line
+ """
+ src, dst = self._GenerateAddressStatement(saddr, daddr)
+
+ filter_top = self._FILTER_TOP_FORMAT.substitute(filter=self.filter,
+ term=self.term_name)
+
+ source_int = ''
+ if sint:
+ source_int = '-i %s' % sint
+
+ destination_int = ''
+ if dint:
+ destination_int = '-o %s' % dint
+
+ log_jump = ''
+ if log_hits:
+ log_jump = '-j LOG --log-prefix %s ' % self.term.name
+
+ if not options:
+ options = []
+
+ proto = self._PROTO_TABLE.get(str(protocol))
+ # Don't drop protocol if we don't recognize it
+ if protocol and not proto:
+ proto = '-p %s' % str(protocol)
+
+ # set conntrack state to NEW, unless policy requested "nostate"
+ if self.trackstate:
+ already_stateful = False
+ # we will add new stateful arguments only if none already exist, such
+ # as from "option:: established"
+ for option in options:
+ if 'state' in option:
+ already_stateful = True
+ if not already_stateful:
+ if 'ACCEPT' in action:
+ # We have to permit established/related since a policy may not
+ # have an existing blank permit for established/related, which
+ # may be more efficient, but slightly less secure.
+ options.append('-m state --state NEW,ESTABLISHED,RELATED')
+
+ if tcp_flags or (track_flags and track_flags[0]):
+ check_fields = ','.join(sorted(set(tcp_flags + track_flags[0])))
+ set_fields = ','.join(sorted(set(tcp_flags + track_flags[1])))
+ flags = '--tcp-flags %s %s' % (check_fields, set_fields)
+ else:
+ flags = ''
+
+ icmp_type = str(icmp_type)
+ if not icmp_type:
+ icmp = ''
+ elif str(protocol) == 'icmpv6':
+ icmp = '--icmpv6-type %s' % icmp_type
+ else:
+ icmp = '--icmp-type %s' % icmp_type
+
+ # format tcp and udp ports
+ sports = dports = ['']
+ if sport:
+ sports = self._GeneratePortStatement(sport, source=True)
+ if dport:
+ dports = self._GeneratePortStatement(dport, dest=True)
+
+ ret_lines = []
+ for sport in sports:
+ for dport in dports:
+ rval = [filter_top]
+ if re.search('multiport', sport) and not re.search('multiport', dport):
+ # Due to bug in iptables, use of multiport module before a single
+ # port specification will result in multiport trying to consume it.
+ # this is a little hack to ensure single ports are listed before
+ # any multiport specification.
+ dport, sport = sport, dport
+ for value in (proto, flags, sport, dport, icmp, src, dst,
+ ' '.join(options), source_int, destination_int):
+ if value:
+ rval.append(str(value))
+ if log_jump:
+ # -j LOG
+ ret_lines.append(' '.join(rval+[log_jump]))
+ # -j ACTION
+ ret_lines.append(' '.join(rval+[action]))
+ return ret_lines
+
+ def _GenerateAddressStatement(self, saddr, daddr):
+ """Return the address section of an individual iptables rule.
+
+ Args:
+ saddr: source address of the rule
+ daddr: destination address of the rule
+
+ Returns:
+ tuple containing source and destination address statement, in
+ that order
+
+ """
+ src = ''
+ dst = ''
+ if not saddr or saddr == self._all_ips:
+ src = ''
+ else:
+ src = '-s %s/%d' % (saddr.ip, saddr.prefixlen)
+ if not daddr or daddr == self._all_ips:
+ dst = ''
+ else:
+ dst = '-d %s/%d' % (daddr.ip, daddr.prefixlen)
+ return (src, dst)
+
+ def _GeneratePortStatement(self, ports, source=False, dest=False):
+ """Return the 'port' section of an individual iptables rule.
+
+ Args:
+ ports: list of ports or port ranges (pairs)
+ source: (bool) generate a source port rule
+ dest: (bool) generate a dest port rule
+
+ Returns:
+ list holding the 'port' sections of an iptables rule.
+
+ Raises:
+ BadPortsError: if too many ports are passed in, or if both 'source'
+ and 'dest' are true.
+ NotImplementedError: if both 'source' and 'dest' are true.
+ """
+ if not ports:
+ return ''
+
+ direction = '' # default: no direction / '--port'. As yet, unused.
+ if source and dest:
+ raise BadPortsError('_GeneratePortStatement called ambiguously.')
+ elif source:
+ direction = 's' # source port / '--sport'
+ elif dest:
+ direction = 'd' # dest port / '--dport'
+ else:
+ raise NotImplementedError('--port support not yet implemented.')
+
+ # Normalize ports and get accurate port count.
+ # iptables multiport module limits to 15, but we use 14 to ensure a range
+ # doesn't tip us over the limit
+ max_ports = 14
+ norm_ports = []
+ portstrings = []
+ count = 0
+ for port in ports:
+ if port[0] == port[1]:
+ norm_ports.append(str(port[0]))
+ count += 1
+ else:
+ norm_ports.append('%d:%d' % (port[0], port[1]))
+ count += 2
+ if count >= max_ports:
+ count = 0
+ portstrings.append('-m multiport --%sports %s' % (direction,
+ ','.join(norm_ports)))
+ norm_ports = []
+ if len(norm_ports) == 1:
+ portstrings.append('--%sport %s' % (direction, norm_ports[0]))
+ else:
+ portstrings.append('-m multiport --%sports %s' % (direction,
+ ','.join(norm_ports)))
+ return portstrings
+
+
+class Iptables(aclgenerator.ACLGenerator):
+ """Generates filters and terms from provided policy object."""
+
+ _PLATFORM = 'iptables'
+ _DEFAULT_PROTOCOL = 'all'
+ _SUFFIX = ''
+ _RENDER_PREFIX = None
+ _RENDER_SUFFIX = None
+ _DEFAULTACTION_FORMAT = '-P %s %s'
+ _DEFAULT_ACTION = 'DROP'
+ _TERM = Term
+ _TERM_MAX_LENGTH = 24
+ _OPTIONAL_SUPPORTED_KEYWORDS = set(['counter',
+ 'destination_interface',
+ 'destination_prefix', # skips these terms
+ 'expiration',
+ 'fragment_offset',
+ 'logging',
+ 'owner',
+ 'packet_length',
+ 'policer', # safely ignored
+ 'qos',
+ 'routing_instance', # safe to skip
+ 'source_interface',
+ 'source_prefix', # skips these terms
+ ])
+
+ def _TranslatePolicy(self, pol, exp_info):
+ """Translate a policy from objects into strings."""
+ self.iptables_policies = []
+ current_date = datetime.date.today()
+ exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
+
+ default_action = None
+ good_default_actions = ['ACCEPT', 'DROP']
+ good_filters = ['INPUT', 'OUTPUT', 'FORWARD']
+ good_afs = ['inet', 'inet6']
+ good_options = ['nostate', 'abbreviateterms', 'truncateterms']
+ all_protocols_stateful = True
+
+ for header, terms in pol.filters:
+ filter_type = None
+ if self._PLATFORM not in header.platforms:
+ continue
+
+ filter_options = header.FilterOptions(self._PLATFORM)[1:]
+ filter_name = header.FilterName(self._PLATFORM)
+
+ if filter_name not in good_filters:
+ logging.warn('Filter is generating a non-standard chain that will not '
+ 'apply to traffic unless linked from INPUT, OUTPUT or '
+ 'FORWARD filters. New chain name is: %s', filter_name)
+
+ # ensure all options after the filter name are expected
+ for opt in filter_options:
+ if opt not in good_default_actions + good_afs + good_options:
+ raise UnsupportedTargetOption('%s %s %s %s' % (
+ '\nUnsupported option found in', self._PLATFORM,
+ 'target definition:', opt))
+
+ # disable stateful?
+ if 'nostate' in filter_options:
+ all_protocols_stateful = False
+
+ # Check for matching af
+ for address_family in good_afs:
+ if address_family in filter_options:
+ # should not specify more than one AF in options
+ if filter_type is not None:
+ raise UnsupportedFilterError('%s %s %s %s' % (
+ '\nMay only specify one of', good_afs, 'in filter options:',
+ filter_options))
+ filter_type = address_family
+ if filter_type is None:
+ filter_type = 'inet'
+
+ if self._PLATFORM == 'iptables' and filter_name == 'FORWARD':
+ default_action = 'DROP'
+
+ # does this policy override the default filter actions?
+ for next_target in header.target:
+ if next_target.platform == self._PLATFORM:
+ if len(next_target.options) > 1:
+ for arg in next_target.options:
+ if arg in good_default_actions:
+ default_action = arg
+ if default_action and default_action not in good_default_actions:
+ raise UnsupportedDefaultAction('%s %s %s %s %s' % (
+ '\nOnly', ', '.join(good_default_actions),
+ 'default filter action allowed;', default_action, 'used.'))
+
+ # add the terms
+ new_terms = []
+ term_names = set()
+ for term in terms:
+ term.name = self.FixTermLength(term.name,
+ 'abbreviateterms' in filter_options,
+ 'truncateterms' in filter_options)
+ if term.name in term_names:
+ raise aclgenerator.DuplicateTermError(
+ 'You have a duplicate term: %s' % term.name)
+ term_names.add(term.name)
+
+ term = self.FixHighPorts(term, af=filter_type,
+ all_protocols_stateful=all_protocols_stateful)
+ if not term:
+ continue
+
+ if term.expiration:
+ if term.expiration <= exp_info_date:
+ logging.info('INFO: Term %s in policy %s expires '
+ 'in less than two weeks.', term.name, filter_name)
+ if term.expiration <= current_date:
+ logging.warn('WARNING: Term %s in policy %s is expired and '
+ 'will not be rendered.', term.name, filter_name)
+ continue
+
+ new_terms.append(self._TERM(term, filter_name, all_protocols_stateful,
+ default_action, filter_type))
+
+ self.iptables_policies.append((header, filter_name, filter_type,
+ default_action, new_terms))
+
+ def __str__(self):
+ target = []
+ pretty_platform = '%s%s' % (self._PLATFORM[0].upper(), self._PLATFORM[1:])
+
+ if self._RENDER_PREFIX:
+ target.append(self._RENDER_PREFIX)
+
+ for (header, filter_name, filter_type, default_action, terms
+ ) in self.iptables_policies:
+ # Add comments for this filter
+ target.append('# %s %s Policy' % (pretty_platform,
+ header.FilterName(self._PLATFORM)))
+
+ # reformat long text comments, if needed
+ comments = aclgenerator.WrapWords(header.comment, 70)
+ if comments and comments[0]:
+ for line in comments:
+ target.append('# %s' % line)
+ target.append('#')
+ # add the p4 tags
+ target.extend(aclgenerator.AddRepositoryTags('# '))
+ target.append('# ' + filter_type)
+
+ # always specify the default filter states for speedway,
+ # if default action policy not specified for iptables, do nothing.
+ if self._PLATFORM == 'speedway':
+ if not default_action:
+ target.append(self._DEFAULTACTION_FORMAT % (filter_name,
+ self._DEFAULT_ACTION))
+ if default_action:
+ target.append(self._DEFAULTACTION_FORMAT % (filter_name,
+ default_action))
+ # add the terms
+ for term in terms:
+ term_str = str(term)
+ if term_str:
+ target.append(term_str)
+
+ if self._RENDER_SUFFIX:
+ target.append(self._RENDER_SUFFIX)
+
+ target.append('')
+ return '\n'.join(target)
+
+
+class Error(Exception):
+ """Base error class."""
+
+
+class BadPortsError(Error):
+ """Too many ports for a single iptables statement."""
+
+
+class UnsupportedFilterError(Error):
+ """Raised when we see an inappropriate filter."""
+
+
+class NoIptablesPolicyError(Error):
+ """Raised when a policy is received that doesn't support iptables."""
+
+
+class TcpEstablishedError(Error):
+ """Raised when a term has tcp-established option but not proto tcp only."""
+
+
+class EstablishedError(Error):
+ """Raised when a term has established option with inappropriate protocol."""
+
+
+class UnsupportedDefaultAction(Error):
+ """Raised when a filter has an impermissible default action specified."""
+
+
+class UnsupportedTargetOption(Error):
+ """Raised when a filter has an impermissible default action specified."""
diff --git a/lib/juniper.py b/lib/juniper.py
new file mode 100644
index 0000000..f793f34
--- /dev/null
+++ b/lib/juniper.py
@@ -0,0 +1,727 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+__author__ = ['pmoody@google.com (Peter Moody)',
+ 'watson@google.com (Tony Watson)']
+
+
+import datetime
+import logging
+
+import aclgenerator
+import nacaddr
+
+
+# generic error class
+class Error(Exception):
+ pass
+
+
+class JuniperTermPortProtocolError(Error):
+ pass
+
+
+class TcpEstablishedWithNonTcp(Error):
+ pass
+
+
+class JuniperDuplicateTermError(Error):
+ pass
+
+
+class UnsupportedFilterError(Error):
+ pass
+
+
+class PrecedenceError(Error):
+ pass
+
+
+class JuniperIndentationError(Error):
+ pass
+
+
+class Config(object):
+ """Config allows a configuration to be assembled easily.
+
+ Configurations are automatically indented following Juniper's style.
+ A textual representation of the config can be extracted with str().
+
+ Attributes:
+ indent: The number of leading spaces on the current line.
+ tabstop: The number of spaces to indent for a new level.
+ """
+
+ def __init__(self, indent=0, tabstop=4):
+ self.indent = indent
+ self._initial_indent = indent
+ self.tabstop = tabstop
+ self.lines = []
+
+ def __str__(self):
+ if self.indent != self._initial_indent:
+ raise JuniperIndentationError(
+ 'Expected indent %d but got %d' % (self._initial_indent, self.indent))
+ return '\n'.join(self.lines)
+
+ def Append(self, line, verbatim=False):
+ """Append one line to the configuration.
+
+ Args:
+ line: The string to append to the config.
+ verbatim: append line without adjusting indentation. Default False.
+ Raises:
+ JuniperIndentationError: If the indentation would be further left
+ than the initial indent. e.g. too many close braces.
+ """
+ if verbatim:
+ self.lines.append(line)
+ return
+
+ if line.endswith('}'):
+ self.indent -= self.tabstop
+ if self.indent < self._initial_indent:
+ raise JuniperIndentationError('Too many close braces.')
+ spaces = ' ' * self.indent
+ self.lines.append(spaces + line.strip())
+ if line.endswith(' {'):
+ self.indent += self.tabstop
+
+
+class Term(aclgenerator.Term):
+ """Representation of an individual Juniper term.
+
+ This is mostly useful for the __str__() method.
+
+ Args:
+ term: policy.Term object
+ term_type: the address family for the term, one of "inet", "inet6",
+ or "bridge"
+ """
+ _DEFAULT_INDENT = 12
+ _ACTIONS = {'accept': 'accept',
+ 'deny': 'discard',
+ 'reject': 'reject',
+ 'next': 'next term',
+ 'reject-with-tcp-rst': 'reject tcp-reset'}
+
+ # the following lookup table is used to map between the various types of
+ # filters the juniper generator can render. As new differences are
+ # encountered, they should be added to this table. Accessing members
+ # of this table looks like:
+ # self._TERM_TYPE('inet').get('saddr') -> 'source-address'
+ #
+ # it's critical that the members of each filter type be the same, that is
+ # to say that if _TERM_TYPE.get('inet').get('foo') returns something,
+ # _TERM_TYPE.get('inet6').get('foo') must return the inet6 equivalent.
+ _TERM_TYPE = {'inet': {'addr': 'address',
+ 'saddr': 'source-address',
+ 'daddr': 'destination-address',
+ 'protocol': 'protocol',
+ 'protocol-except': 'protocol-except',
+ 'tcp-est': 'tcp-established'},
+ 'inet6': {'addr': 'address',
+ 'saddr': 'source-address',
+ 'daddr': 'destination-address',
+ 'protocol': 'next-header',
+ 'protocol-except': 'next-header-except',
+ 'tcp-est': 'tcp-established'},
+ 'bridge': {'addr': 'ip-address',
+ 'saddr': 'ip-source-address',
+ 'daddr': 'ip-destination-address',
+ 'protocol': 'ip-protocol',
+ 'protocol-except': 'ip-protocol-except',
+ 'tcp-est': 'tcp-flags "(ack|rst)"'}}
+
+ def __init__(self, term, term_type):
+ self.term = term
+ self.term_type = term_type
+
+ if term_type not in self._TERM_TYPE:
+ raise ValueError('Unknown Filter Type: %s' % term_type)
+
+ # some options need to modify the actions
+ self.extra_actions = []
+
+ # TODO(pmoody): get rid of all of the default string concatenation here.
+ # eg, indent(8) + 'foo;' -> '%s%s;' % (indent(8), 'foo'). pyglint likes this
+ # more.
+ def __str__(self):
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if 'juniper' not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if 'juniper' in self.term.platform_exclude:
+ return ''
+
+ config = Config(indent=self._DEFAULT_INDENT)
+ from_str = []
+
+ # Don't render icmpv6 protocol terms under inet, or icmp under inet6
+ if ((self.term_type == 'inet6' and 'icmp' in self.term.protocol) or
+ (self.term_type == 'inet' and 'icmpv6' in self.term.protocol)):
+ config.Append('/* Term %s' % self.term.name)
+ config.Append('** not rendered due to protocol/AF mismatch.')
+ config.Append('*/')
+ return str(config)
+
+ # comment
+ # this deals just fine with multi line comments, but we could probably
+ # output them a little cleaner; do things like make sure the
+ # len(output) < 80, etc.
+ if self.term.owner:
+ self.term.comment.append('Owner: %s' % self.term.owner)
+ if self.term.comment:
+ config.Append('/*')
+ for comment in self.term.comment:
+ for line in comment.split('\n'):
+ config.Append('** ' + line)
+ config.Append('*/')
+
+ # Term verbatim output - this will skip over normal term creation
+ # code. Warning generated from policy.py if appropriate.
+ if self.term.verbatim:
+ for next_term in self.term.verbatim:
+ if next_term.value[0] == 'juniper':
+ config.Append(str(next_term.value[1]), verbatim=True)
+ return str(config)
+
+ # Helper for per-address-family keywords.
+ family_keywords = self._TERM_TYPE.get(self.term_type)
+
+ # option
+ # this is going to be a little ugly b/c there are a few little messed
+ # up options we can deal with.
+ if self.term.option:
+ for opt in [str(x) for x in self.term.option]:
+ # there should be a better way to search the array of protocols
+ if opt.startswith('sample'):
+ self.extra_actions.append('sample')
+
+ # only append tcp-established for option established when
+ # tcp is the only protocol, otherwise other protos break on juniper
+ elif opt.startswith('established'):
+ if self.term.protocol == ['tcp']:
+ if 'tcp-established;' not in from_str:
+ from_str.append(family_keywords['tcp-est'] + ';')
+
+ # if tcp-established specified, but more than just tcp is included
+ # in the protocols, raise an error
+ elif opt.startswith('tcp-established'):
+ flag = family_keywords['tcp-est'] + ';'
+ if self.term.protocol == ['tcp']:
+ if flag not in from_str:
+ from_str.append(flag)
+ else:
+ raise TcpEstablishedWithNonTcp(
+ 'tcp-established can only be used with tcp protocol in term %s'
+ % self.term.name)
+ elif opt.startswith('rst'):
+ from_str.append('tcp-flags "rst";')
+ elif opt.startswith('initial') and 'tcp' in self.term.protocol:
+ from_str.append('tcp-initial;')
+ elif opt.startswith('first-fragment'):
+ from_str.append('first-fragment;')
+
+ # we don't have a special way of dealing with this, so we output it and
+ # hope the user knows what they're doing.
+ else:
+ from_str.append('%s;' % opt)
+
+ # term name
+ config.Append('term %s {' % self.term.name)
+
+ # a default action term doesn't have any from { clause
+ has_match_criteria = (self.term.address or
+ self.term.destination_address or
+ self.term.destination_prefix or
+ self.term.destination_port or
+ self.term.precedence or
+ self.term.protocol or
+ self.term.protocol_except or
+ self.term.port or
+ self.term.source_address or
+ self.term.source_prefix or
+ self.term.source_port or
+ self.term.ether_type or
+ self.term.traffic_type)
+
+ if has_match_criteria:
+ config.Append('from {')
+
+ term_af = self.AF_MAP.get(self.term_type)
+
+ # address
+ address = self.term.GetAddressOfVersion('address', term_af)
+ if address:
+ config.Append('%s {' % family_keywords['addr'])
+ for addr in address:
+ config.Append('%s;%s' % (addr, self._Comment(addr)))
+ config.Append('}')
+ elif self.term.address:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ af=self.term_type))
+ return ''
+
+ # source address
+ source_address, source_address_exclude = self._MinimizePrefixes(
+ self.term.GetAddressOfVersion('source_address', term_af),
+ self.term.GetAddressOfVersion('source_address_exclude', term_af))
+
+ if source_address:
+ config.Append('%s {' % family_keywords['saddr'])
+ for addr in source_address:
+ config.Append('%s;%s' % (addr, self._Comment(addr)))
+ for addr in source_address_exclude:
+ config.Append('%s except;%s' % (
+ addr, self._Comment(addr, exclude=True)))
+ config.Append('}')
+ elif self.term.source_address:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ direction='source',
+ af=self.term_type))
+ return ''
+
+ # destination address
+ destination_address, destination_address_exclude = self._MinimizePrefixes(
+ self.term.GetAddressOfVersion('destination_address', term_af),
+ self.term.GetAddressOfVersion('destination_address_exclude', term_af))
+
+ if destination_address:
+ config.Append('%s {' % family_keywords['daddr'])
+ for addr in destination_address:
+ config.Append('%s;%s' % (addr, self._Comment(addr)))
+ for addr in destination_address_exclude:
+ config.Append('%s except;%s' % (
+ addr, self._Comment(addr, exclude=True)))
+ config.Append('}')
+ elif self.term.destination_address:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ direction='destination',
+ af=self.term_type))
+ return ''
+
+ # source prefix list
+ if self.term.source_prefix:
+ config.Append('source-prefix-list {')
+ for pfx in self.term.source_prefix:
+ config.Append(pfx + ';')
+ config.Append('}')
+
+ # destination prefix list
+ if self.term.destination_prefix:
+ config.Append('destination-prefix-list {')
+ for pfx in self.term.destination_prefix:
+ config.Append(pfx + ';')
+ config.Append('}')
+
+ # protocol
+ if self.term.protocol:
+ config.Append(family_keywords['protocol'] +
+ ' ' + self._Group(self.term.protocol))
+
+ # protocol
+ if self.term.protocol_except:
+ config.Append(family_keywords['protocol-except'] + ' '
+ + self._Group(self.term.protocol_except))
+
+ # port
+ if self.term.port:
+ config.Append('port %s' % self._Group(self.term.port))
+
+ # source port
+ if self.term.source_port:
+ config.Append('source-port %s' % self._Group(self.term.source_port))
+
+ # destination port
+ if self.term.destination_port:
+ config.Append('destination-port %s' %
+ self._Group(self.term.destination_port))
+
+ # append any options beloging in the from {} section
+ for next_str in from_str:
+ config.Append(next_str)
+
+ # packet length
+ if self.term.packet_length:
+ config.Append('packet-length %s;' % self.term.packet_length)
+
+ # fragment offset
+ if self.term.fragment_offset:
+ config.Append('fragment-offset %s;' % self.term.fragment_offset)
+
+ # icmp-types
+ icmp_types = ['']
+ if self.term.icmp_type:
+ icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type,
+ self.term.protocol, self.term_type)
+ if icmp_types != ['']:
+ config.Append('icmp-type %s' % self._Group(icmp_types))
+
+ if self.term.ether_type:
+ config.Append('ether-type %s' %
+ self._Group(self.term.ether_type))
+
+ if self.term.traffic_type:
+ config.Append('traffic-type %s' %
+ self._Group(self.term.traffic_type))
+
+ if self.term.precedence:
+ # precedence may be a single integer, or a space separated list
+ policy_precedences = set()
+ # precedence values may only be 0 through 7
+ for precedence in self.term.precedence:
+ if int(precedence) in range(0, 8):
+ policy_precedences.add(precedence)
+ else:
+ raise PrecedenceError('Precedence value %s is out of bounds in %s' %
+ (precedence, self.term.name))
+ config.Append('precedence %s' % self._Group(sorted(policy_precedences)))
+
+ config.Append('}') # end from { ... }
+
+ ####
+ # ACTIONS go below here
+ ####
+ config.Append('then {')
+ # logging
+ if self.term.logging:
+ for log_target in self.term.logging:
+ if str(log_target) == 'local':
+ config.Append('log;')
+ else:
+ config.Append('syslog;')
+
+ if self.term.routing_instance:
+ config.Append('routing-instance %s;' % self.term.routing_instance)
+
+ if self.term.counter:
+ config.Append('count %s;' % self.term.counter)
+
+ if self.term.policer:
+ config.Append('policer %s;' % self.term.policer)
+
+ if self.term.qos:
+ config.Append('forwarding-class %s;' % self.term.qos)
+
+ if self.term.loss_priority:
+ config.Append('loss-priority %s;' % self.term.loss_priority)
+
+ for action in self.extra_actions:
+ config.Append(action + ';')
+
+ # If there is a routing-instance defined, skip reject/accept/etc actions.
+ if not self.term.routing_instance:
+ for action in self.term.action:
+ config.Append(self._ACTIONS.get(action) + ';')
+
+ config.Append('}') # end then{...}
+ config.Append('}') # end term accept-foo-to-bar { ... }
+
+ return str(config)
+
+ def _MinimizePrefixes(self, include, exclude):
+ """Calculate a minimal set of prefixes for Juniper match conditions.
+
+ Args:
+ include: Iterable of nacaddr objects, prefixes to match.
+ exclude: Iterable of nacaddr objects, prefixes to exclude.
+ Returns:
+ A tuple (I,E) where I and E are lists containing the minimized
+ versions of include and exclude, respectively. The order
+ of each input list is preserved.
+ """
+ # Remove any included prefixes that have EXACT matches in the
+ # excluded list. Excluded prefixes take precedence on the router
+ # regardless of the order in which the include/exclude are applied.
+ exclude_set = set(exclude)
+ include_result = [ip for ip in include if ip not in exclude_set]
+
+ # Every address match condition on a Juniper firewall filter
+ # contains an implicit "0/0 except" or "0::0/0 except". If an
+ # excluded prefix is not contained within any less-specific prefix
+ # in the included set, we can elide it. In other words, if the
+ # next-less-specific prefix is the implicit "default except",
+ # there is no need to configure the more specific "except".
+ #
+ # TODO(kbrint): this could be made more efficient with a Patricia trie.
+ exclude_result = []
+ for exclude_prefix in exclude:
+ for include_prefix in include_result:
+ if exclude_prefix in include_prefix:
+ exclude_result.append(exclude_prefix)
+ break
+
+ return include_result, exclude_result
+
+ def _Comment(self, addr, exclude=False, line_length=132):
+ """Returns address comment field if it exists.
+
+ Args:
+ addr: nacaddr.IPv4 object (?)
+ exclude: bool - address excludes have different indentations
+ line_length: integer - this is the length to which a comment will be
+ truncated, no matter what. ie, a 1000 character comment will be
+ truncated to line_length, and then split. if 0, the whole comment
+ is kept. the current default of 132 is somewhat arbitrary.
+
+ Returns:
+ string
+
+ Notes:
+ This method tries to intelligently split long comments up. if we've
+ managed to summarize 4 /32's into a /30, each with a nacaddr text field
+ of something like 'foobar N', normal concatination would make the
+ resulting rendered comment look in mondrian like
+
+ source-address {
+ ...
+ 1.1.1.0/30; /* foobar1, foobar2, foobar3, foo
+ bar4 */
+
+ b/c of the line splitting at 80 chars. this method will split the
+ comments at word breaks and make the previous example look like
+
+ source-address {
+ ....
+ 1.1.1.0/30; /* foobar1, foobar2, foobar3,
+ ** foobar4 */
+ much cleaner.
+ """
+ rval = []
+ # indentation, for multi-line comments, ensures that subsquent lines
+ # are correctly alligned with the first line of the comment.
+ indentation = 0
+ if exclude:
+ # len('1.1.1.1/32 except;') == 21
+ indentation = 21 + self._DEFAULT_INDENT + len(str(addr))
+ else:
+ # len('1.1.1.1/32;') == 14
+ indentation = 14 + self._DEFAULT_INDENT + len(str(addr))
+
+ # length_eol is the width of the line; b/c of the addition of the space
+ # and the /* characters, it needs to be a little less than the actual width
+ # to keep from wrapping
+ length_eol = 77 - indentation
+
+ if isinstance(addr, (nacaddr.IPv4, nacaddr.IPv6)):
+ if addr.text:
+
+ if line_length == 0:
+ # line_length of 0 means that we don't want to truncate the comment.
+ line_length = len(addr.text)
+
+ # There should never be a /* or */, but be safe and ignore those
+ # comments
+ if addr.text.find('/*') >= 0 or addr.text.find('*/') >= 0:
+ logging.debug('Malformed comment [%s] ignoring', addr.text)
+ else:
+
+ text = addr.text[:line_length]
+
+ comment = ' /*'
+ while text:
+ # split the line
+ if len(text) > length_eol:
+ new_length_eol = text[:length_eol].rfind(' ')
+ if new_length_eol <= 0:
+ new_length_eol = length_eol
+ else:
+ new_length_eol = length_eol
+
+ # what line am I gunna output?
+ line = comment + ' ' + text[:new_length_eol].strip()
+ # truncate what's left
+ text = text[new_length_eol:]
+ # setup the comment and indentation for the next go-round
+ comment = ' ' * indentation + '**'
+
+ rval.append(line)
+
+ rval[-1] += ' */'
+ else:
+ # should we be paying attention to any other addr type?
+ logging.debug('Ignoring non IPv4 or IPv6 address: %s', addr)
+ return '\n'.join(rval)
+
+ def _Group(self, group):
+ """If 1 item return it, else return [ item1 item2 ].
+
+ Args:
+ group: a list. could be a list of strings (protocols) or a list of
+ tuples (ports)
+
+ Returns:
+ rval: a string surrounded by '[' and '];' if len(group) > 1
+ or with just ';' appended if len(group) == 1
+ """
+
+ def _FormattedGroup(el):
+ """Return the actual formatting of an individual element.
+
+ Args:
+ el: either a string (protocol) or a tuple (ports)
+
+ Returns:
+ string: either the lower()'ed string or the ports, hyphenated
+ if they're a range, or by itself if it's not.
+ """
+ if isinstance(el, str):
+ return el.lower()
+ elif isinstance(el, int):
+ return str(el)
+ # type is a tuple below here
+ elif el[0] == el[1]:
+ return '%d' % el[0]
+ else:
+ return '%d-%d' % (el[0], el[1])
+
+ if len(group) > 1:
+ rval = '[ ' + ' '.join([_FormattedGroup(x) for x in group]) + ' ];'
+ else:
+ rval = _FormattedGroup(group[0]) + ';'
+ return rval
+
+
+class Juniper(aclgenerator.ACLGenerator):
+ """JCL rendering class.
+
+ This class takes a policy object and renders the output into a syntax
+ which is understood by juniper routers.
+
+ Args:
+ pol: policy.Policy object
+ """
+
+ _PLATFORM = 'juniper'
+ _DEFAULT_PROTOCOL = 'ip'
+ _SUPPORTED_AF = set(('inet', 'inet6', 'bridge'))
+ _SUFFIX = '.jcl'
+
+ _OPTIONAL_SUPPORTED_KEYWORDS = set(['address',
+ 'counter',
+ 'destination_prefix',
+ 'ether_type',
+ 'expiration',
+ 'fragment_offset',
+ 'logging',
+ 'loss_priority',
+ 'owner',
+ 'packet_length',
+ 'policer',
+ 'port',
+ 'precedence',
+ 'protocol_except',
+ 'qos',
+ 'routing_instance',
+ 'source_prefix',
+ 'traffic_type',
+ ])
+
+ def _TranslatePolicy(self, pol, exp_info):
+ self.juniper_policies = []
+ current_date = datetime.date.today()
+ exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
+
+ for header, terms in pol.filters:
+ if self._PLATFORM not in header.platforms:
+ continue
+
+ filter_options = header.FilterOptions(self._PLATFORM)
+ filter_name = header.FilterName(self._PLATFORM)
+
+ # Checks if the non-interface-specific option was specified.
+ # I'm assuming that it will be specified as maximum one time, and
+ # don't check for more appearances of the word in the options.
+ interface_specific = 'not-interface-specific' not in filter_options[1:]
+
+ # Remove the option so that it is not confused with a filter type
+ if not interface_specific:
+ filter_options.remove('not-interface-specific')
+
+ # default to inet4 filters
+ filter_type = 'inet'
+ if len(filter_options) > 1:
+ filter_type = filter_options[1]
+
+ term_names = set()
+ new_terms = []
+ for term in terms:
+ term.name = self.FixTermLength(term.name)
+ if term.name in term_names:
+ raise JuniperDuplicateTermError('You have multiple terms named: %s' %
+ term.name)
+ term_names.add(term.name)
+
+ term = self.FixHighPorts(term, af=filter_type)
+ if not term:
+ continue
+
+ if term.expiration:
+ if term.expiration <= exp_info_date:
+ logging.info('INFO: Term %s in policy %s expires '
+ 'in less than two weeks.', term.name, filter_name)
+ if term.expiration <= current_date:
+ logging.warn('WARNING: Term %s in policy %s is expired and '
+ 'will not be rendered.', term.name, filter_name)
+ continue
+
+ new_terms.append(Term(term, filter_type))
+
+ self.juniper_policies.append((header, filter_name, filter_type,
+ interface_specific, new_terms))
+
+ def __str__(self):
+ config = Config()
+
+ for (header, filter_name, filter_type, interface_specific, terms
+ ) in self.juniper_policies:
+ # add the header information
+ config.Append('firewall {')
+ config.Append('family %s {' % filter_type)
+ config.Append('replace:')
+ config.Append('/*')
+
+ # we want the acl to contain id and date tags, but p4 will expand
+ # the tags here when we submit the generator, so we have to trick
+ # p4 into not knowing these words. like taking c-a-n-d-y from a
+ # baby.
+ for line in aclgenerator.AddRepositoryTags('** '):
+ config.Append(line)
+ config.Append('**')
+
+ for comment in header.comment:
+ for line in comment.split('\n'):
+ config.Append('** ' + line)
+ config.Append('*/')
+
+ config.Append('filter %s {' % filter_name)
+ if interface_specific:
+ config.Append('interface-specific;')
+
+ for term in terms:
+ term_str = str(term)
+ if term_str:
+ config.Append(term_str, verbatim=True)
+
+ config.Append('}') # filter { ... }
+ config.Append('}') # family inet { ... }
+ config.Append('}') # firewall { ... }
+
+ return str(config) + '\n'
diff --git a/lib/junipersrx.py b/lib/junipersrx.py
new file mode 100644
index 0000000..c2e0676
--- /dev/null
+++ b/lib/junipersrx.py
@@ -0,0 +1,448 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""SRX generator."""
+# pylint: disable-msg=W0231
+
+__author__ = 'robankeny@google.com (Robert Ankeny)'
+
+import collections
+import datetime
+import logging
+
+import aclgenerator
+import nacaddr
+
+
+class Error(Exception):
+ """generic error class."""
+
+
+class UnsupportedFilterError(Error):
+ pass
+
+
+class UnsupportedHeader(Error):
+ pass
+
+
+class SRXDuplicateTermError(Error):
+ pass
+
+
+class SRXVerbatimError(Error):
+ pass
+
+
+class SRXOptionError(Error):
+ pass
+
+
+class Term(aclgenerator.Term):
+ """Representation of an individual SRX term.
+
+ This is mostly useful for the __str__() method.
+
+ Args:
+ obj: a policy.Term object
+ term_type: type of filter to generate, e.g. inet or inet6
+ filter_options: list of remaining target options (zones)
+ """
+
+ _ACTIONS = {'accept': 'permit',
+ 'deny': 'deny',
+ 'reject': 'reject',
+ 'count': 'count',
+ 'log': 'log'}
+
+ def __init__(self, term, term_type, zones):
+ self.term = term
+ self.term_type = term_type
+ self.from_zone = zones[1]
+ self.to_zone = zones[3]
+ self.extra_actions = []
+
+ def __str__(self):
+ """Render config output from this term object."""
+ # Verify platform specific terms. Skip whole term if platform does not
+ # match.
+ if self.term.platform:
+ if 'srx' not in self.term.platform:
+ return ''
+ if self.term.platform_exclude:
+ if 'srx' in self.term.platform_exclude:
+ return ''
+ ret_str = []
+
+ # COMMENTS
+ comment_max_width = 68
+ if self.term.owner:
+ self.term.comment.append('Owner: %s' % self.term.owner)
+ comments = aclgenerator.WrapWords(self.term.comment, comment_max_width)
+ if comments and comments[0]:
+ ret_str.append(JuniperSRX.INDENT * 3 + '/*')
+ for line in comments:
+ ret_str.append(JuniperSRX.INDENT * 3 + line)
+ ret_str.append(JuniperSRX.INDENT * 3 + '*/')
+
+ ret_str.append(JuniperSRX.INDENT * 3 + 'policy ' + self.term.name + ' {')
+ ret_str.append(JuniperSRX.INDENT * 4 + 'match {')
+
+ # SOURCE-ADDRESS
+ if self.term.source_address:
+ saddr_check = set()
+ for saddr in self.term.source_address:
+ saddr_check.add(saddr.parent_token)
+ saddr_check = sorted(saddr_check)
+ source_address_string = ''
+ for addr in saddr_check:
+ source_address_string += addr + ' '
+ ret_str.append(JuniperSRX.INDENT * 5 + 'source-address [ ' +
+ source_address_string + '];')
+ else:
+ ret_str.append(JuniperSRX.INDENT * 5 + 'source-address any;')
+
+ # DESTINATION-ADDRESS
+ if self.term.destination_address:
+ daddr_check = []
+ for daddr in self.term.destination_address:
+ daddr_check.append(daddr.parent_token)
+ daddr_check = set(daddr_check)
+ daddr_check = list(daddr_check)
+ daddr_check.sort()
+ destination_address_string = ''
+ for addr in daddr_check:
+ destination_address_string += addr + ' '
+ ret_str.append(JuniperSRX.INDENT * 5 + 'destination-address [ ' +
+ destination_address_string + '];')
+ else:
+ ret_str.append(JuniperSRX.INDENT * 5 + 'destination-address any;')
+
+ # APPLICATION
+ if (not self.term.source_port and not self.term.destination_port and not
+ self.term.icmp_type and not self.term.protocol):
+ ret_str.append(JuniperSRX.INDENT * 5 + 'application any;')
+ else:
+ ret_str.append(JuniperSRX.INDENT * 5 + 'application ' + self.term.name +
+ '-app;')
+
+ ret_str.append(JuniperSRX.INDENT * 4 + '}')
+
+ # ACTIONS
+ for action in self.term.action:
+ ret_str.append(JuniperSRX.INDENT * 4 + 'then {')
+ ret_str.append(JuniperSRX.INDENT * 5 + self._ACTIONS.get(
+ str(action)) + ';')
+
+ # LOGGING
+ if self.term.logging:
+ ret_str.append(JuniperSRX.INDENT * 5 + 'log {')
+ ret_str.append(JuniperSRX.INDENT * 6 + 'session-init;')
+ ret_str.append(JuniperSRX.INDENT * 5 + '}')
+ ret_str.append(JuniperSRX.INDENT * 4 + '}')
+
+ ret_str.append(JuniperSRX.INDENT * 3 + '}')
+
+ # OPTIONS
+ if self.term.option:
+ raise SRXOptionError('Options are not implemented yet, please remove ' +
+ 'from term %s' % self.term.name)
+
+ # VERBATIM
+ if self.term.verbatim:
+ raise SRXVerbatimError('Verbatim is not implemented, please remove ' +
+ 'the offending term %s.' % self.term.name)
+ return '\n'.join(ret_str)
+
+ def _Group(self, group):
+ """If 1 item return it, else return [ item1 item2 ].
+
+ Args:
+ group: a list. could be a list of strings (protocols) or a list of
+ tuples (ports)
+
+ Returns:
+ rval: a string surrounded by '[' and '];' if len(group) > 1
+ or with just ';' appended if len(group) == 1
+ """
+
+ def _FormattedGroup(el):
+ """Return the actual formatting of an individual element.
+
+ Args:
+ el: either a string (protocol) or a tuple (ports)
+
+ Returns:
+ string: either the lower()'ed string or the ports, hyphenated
+ if they're a range, or by itself if it's not.
+ """
+ if isinstance(el, str):
+ return el.lower()
+ elif isinstance(el, int):
+ return str(el)
+ # type is a tuple below here
+ elif el[0] == el[1]:
+ return '%d' % el[0]
+ else:
+ return '%d-%d' % (el[0], el[1])
+
+ if len(group) > 1:
+ rval = '[ ' + ' '.join([_FormattedGroup(x) for x in group]) + ' ];'
+ else:
+ rval = _FormattedGroup(group[0]) + ';'
+ return rval
+
+
+class JuniperSRX(aclgenerator.ACLGenerator):
+ """SRX rendering class.
+
+ This class takes a policy object and renders the output into a syntax
+ which is understood by SRX firewalls.
+
+ Args:
+ pol: policy.Policy object
+ """
+
+ _PLATFORM = 'srx'
+ _SUFFIX = '.srx'
+ _SUPPORTED_AF = set(('inet',))
+ _OPTIONAL_SUPPORTED_KEYWORDS = set(['expiration',
+ 'logging',
+ 'owner',
+ 'routing_instance', # safe to skip
+ 'timeout'
+ ])
+ INDENT = ' '
+
+ def _TranslatePolicy(self, pol, exp_info):
+ """Transform a policy object into a JuniperSRX object.
+
+ Args:
+ pol: policy.Policy object
+ exp_info: print a info message when a term is set to expire
+ in that many weeks
+
+ Raises:
+ UnsupportedFilterError: An unsupported filter was specified
+ UnsupportedHeader: A header option exists that is not understood/usable
+ SRXDuplicateTermError: Two terms were found with same name in same filter
+ """
+ self.srx_policies = []
+ self.addressbook = collections.OrderedDict()
+ self.applications = []
+ self.ports = []
+ self.from_zone = ''
+ self.to_zone = ''
+
+ current_date = datetime.date.today()
+ exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
+
+ for header, terms in pol.filters:
+ if self._PLATFORM not in header.platforms:
+ continue
+
+ filter_options = header.FilterOptions(self._PLATFORM)
+
+ if (len(filter_options) < 4 or filter_options[0] != 'from-zone' or
+ filter_options[2] != 'to-zone'):
+ raise UnsupportedFilterError(
+ 'SRX filter arguments must specify from-zone and to-zone.')
+ self.from_zone = filter_options[1]
+ self.to_zone = filter_options[3]
+
+ if len(filter_options) > 4:
+ filter_type = filter_options[4]
+ else:
+ filter_type = 'inet'
+ if filter_type not in self._SUPPORTED_AF:
+ raise UnsupportedHeader(
+ 'SRX Generator currently does not support %s as a header option' %
+ (filter_type))
+
+ term_dup_check = set()
+ new_terms = []
+ for term in terms:
+ term.name = self.FixTermLength(term.name)
+ if term.name in term_dup_check:
+ raise SRXDuplicateTermError('You have a duplicate term: %s'
+ % term.name)
+ term_dup_check.add(term.name)
+
+ if term.expiration:
+ if term.expiration <= exp_info_date:
+ logging.info('INFO: Term %s in policy %s>%s expires '
+ 'in less than two weeks.', term.name, self.from_zone,
+ self.to_zone)
+ if term.expiration <= current_date:
+ logging.warn('WARNING: Term %s in policy %s>%s is expired.',
+ term.name, self.from_zone, self.to_zone)
+
+ for i in term.source_address_exclude:
+ term.source_address = nacaddr.RemoveAddressFromList(
+ term.source_address, i)
+ for i in term.destination_address_exclude:
+ term.destination_address = nacaddr.RemoveAddressFromList(
+ term.destination_address, i)
+
+ for addr in term.source_address:
+ self._BuildAddressBook(self.from_zone, addr)
+ for addr in term.destination_address:
+ self._BuildAddressBook(self.to_zone, addr)
+
+ new_term = Term(term, filter_type, filter_options)
+ new_terms.append(new_term)
+ tmp_icmptype = new_term.NormalizeIcmpTypes(
+ term.icmp_type, term.protocol, filter_type)
+ # NormalizeIcmpTypes returns [''] for empty, convert to [] for eval
+ normalized_icmptype = tmp_icmptype if tmp_icmptype != [''] else []
+ # rewrites the protocol icmpv6 to icmp6
+ if 'icmpv6' in term.protocol:
+ protocol = list(term.protocol)
+ protocol[protocol.index('icmpv6')] = 'icmp6'
+ else:
+ protocol = term.protocol
+ self.applications.append({'sport': self._BuildPort(term.source_port),
+ 'dport': self._BuildPort(
+ term.destination_port),
+ 'name': term.name,
+ 'protocol': protocol,
+ 'icmp-type': normalized_icmptype,
+ 'timeout': term.timeout})
+ self.srx_policies.append((header, new_terms, filter_options))
+
+ def _BuildAddressBook(self, zone, address):
+ """Create the address book configuration entries.
+
+ Args:
+ zone: the zone these objects will reside in
+ address: a naming library address object
+ """
+ if zone not in self.addressbook:
+ self.addressbook[zone] = collections.OrderedDict()
+ if address.parent_token not in self.addressbook[zone]:
+ self.addressbook[zone][address.parent_token] = []
+ name = address.parent_token
+ for ip in self.addressbook[zone][name]:
+ if str(address) == str(ip[0]):
+ return
+ counter = len(self.addressbook[zone][address.parent_token])
+ name = '%s_%s' % (name, str(counter))
+ self.addressbook[zone][address.parent_token].append((address, name))
+
+ def _BuildPort(self, ports):
+ """Transform specified ports into list and ranges.
+
+ Args:
+ ports: a policy terms list of ports
+
+ Returns:
+ port_list: list of ports and port ranges
+ """
+ port_list = []
+ for i in ports:
+ if i[0] == i[1]:
+ port_list.append(str(i[0]))
+ else:
+ port_list.append('%s-%s' % (str(i[0]), str(i[1])))
+ return port_list
+
+ def __str__(self):
+ """Render the output of the JuniperSRX policy into config."""
+ target = []
+ target.append('security {')
+ target.append(self.INDENT + 'zones {')
+ for zone in self.addressbook:
+ target.append(self.INDENT * 2 + 'security-zone ' + zone + ' {')
+ target.append(self.INDENT * 3 + 'replace: address-book {')
+ for group in self.addressbook[zone]:
+ for address, name in self.addressbook[zone][group]:
+ target.append(self.INDENT * 4 + 'address ' + name + ' ' +
+ str(address) + ';')
+ for group in self.addressbook[zone]:
+ target.append(self.INDENT * 4 + 'address-set ' + group + ' {')
+ for address, name in self.addressbook[zone][group]:
+ target.append(self.INDENT * 5 + 'address ' + name + ';')
+
+ target.append(self.INDENT * 4 + '}')
+ target.append(self.INDENT * 3 + '}')
+ target.append(self.INDENT * 2 + '}')
+ target.append(self.INDENT + '}')
+
+ target.append(self.INDENT + 'replace: policies {')
+
+ target.append(self.INDENT * 2 + '/*')
+ target.extend(aclgenerator.AddRepositoryTags(self.INDENT * 2))
+ target.append(self.INDENT * 2 + '*/')
+
+ for (_, terms, filter_options) in self.srx_policies:
+ target.append(self.INDENT * 2 + 'from-zone ' + filter_options[1] +
+ ' to-zone ' + filter_options[3] + ' {')
+ for term in terms:
+ target.append(str(term))
+ target.append(self.INDENT * 2 +'}')
+ target.append(self.INDENT + '}')
+ target.append('}')
+
+ # APPLICATIONS
+ target.append('replace: applications {')
+ done_apps = []
+ for app in self.applications:
+ app_list = []
+ if app in done_apps:
+ continue
+ if app['protocol'] or app['sport'] or app['dport'] or app['icmp-type']:
+ if app['icmp-type']:
+ target.append(self.INDENT + 'application ' + app['name'] + '-app {')
+ if app['timeout']:
+ timeout = app['timeout']
+ else:
+ timeout = 60
+ for i, code in enumerate(app['icmp-type']):
+ target.append(
+ self.INDENT * 2 +
+ 'term t%d protocol icmp icmp-type %s inactivity-timeout %d;' %
+ (i+1, str(code), int(timeout)))
+ else:
+ i = 1
+ target.append(self.INDENT +
+ 'application-set ' + app['name'] + '-app {')
+
+ for proto in (app['protocol'] or ['']):
+ for sport in (app['sport'] or ['']):
+ for dport in (app['dport'] or ['']):
+ chunks = []
+ if proto: chunks.append(' protocol %s' % proto)
+ if sport: chunks.append(' source-port %s' % sport)
+ if dport: chunks.append(' destination-port %s' % dport)
+ if app['timeout']:
+ chunks.append(' inactivity-timeout %d' % int(app['timeout']))
+ if chunks:
+ target.append(self.INDENT * 2 +
+ 'application ' + app['name'] + '-app%d;' % i)
+ app_list.append(self.INDENT + 'application ' + app['name'] +
+ '-app%d {' % i)
+ app_list.append(self.INDENT * 2 + 'term t%d' % i +
+ ''.join(chunks) + ';')
+ app_list.append(self.INDENT + '}')
+ i += 1
+ target.append(self.INDENT + '}')
+ done_apps.append(app)
+ if app_list:
+ target.extend(app_list)
+
+ target.append('}')
+ return '\n'.join(target)
diff --git a/lib/nacaddr.py b/lib/nacaddr.py
new file mode 100644
index 0000000..fc06f17
--- /dev/null
+++ b/lib/nacaddr.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""A subclass of the ipaddr library that includes comments for ipaddr objects."""
+
+__author__ = 'watson@google.com (Tony Watson)'
+
+from third_party import ipaddr
+
+def IP(ipaddress, comment='', token=''):
+ """Take an ip string and return an object of the correct type.
+
+ Args:
+ ip_string: the ip address.
+ comment:: option comment field
+ token:: option token name where this address was extracted from
+
+ Returns:
+ ipaddr.IPv4 or ipaddr.IPv6 object or raises ValueError.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6 address.
+
+ Notes:
+ this is sort of a poor-mans factory method.
+ """
+ a = ipaddr.IPNetwork(ipaddress)
+ if a.version == 4:
+ return IPv4(ipaddress, comment, token)
+ elif a.version == 6:
+ return IPv6(ipaddress, comment, token)
+
+class IPv4(ipaddr.IPv4Network):
+ """This subclass allows us to keep text comments related to each object."""
+
+ def __init__(self, ip_string, comment='', token=''):
+ ipaddr.IPv4Network.__init__(self, ip_string)
+ self.text = comment
+ self.token = token
+ self.parent_token = token
+
+ def AddComment(self, comment=''):
+ """Append comment to self.text, comma seperated.
+
+ Don't add the comment if it's the same as self.text.
+
+ Args: comment
+ """
+ if self.text:
+ if comment and comment not in self.text:
+ self.text += ', ' + comment
+ else:
+ self.text = comment
+
+ def supernet(self, prefixlen_diff=1):
+ """Override ipaddr.IPv4 supernet so we can maintain comments.
+
+ See ipaddr.IPv4.Supernet for complete documentation.
+ """
+ if self.prefixlen == 0:
+ return self
+ if self.prefixlen - prefixlen_diff < 0:
+ raise PrefixlenDiffInvalidError(
+ 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (
+ self.prefixlen, prefixlen_diff))
+ ret_addr = IPv4(ipaddr.IPv4Network.supernet(self, prefixlen_diff),
+ comment=self.text, token=self.token)
+ return ret_addr
+
+ # Backwards compatibility name from v1.
+ Supernet = supernet
+
+
+class IPv6(ipaddr.IPv6Network):
+ """This subclass allows us to keep text comments related to each object."""
+
+ def __init__(self, ip_string, comment='', token=''):
+ ipaddr.IPv6Network.__init__(self, ip_string)
+ self.text = comment
+ self.token = token
+ self.parent_token = token
+
+ def supernet(self, prefixlen_diff=1):
+ """Override ipaddr.IPv6Network supernet so we can maintain comments.
+
+ See ipaddr.IPv6Network.Supernet for complete documentation.
+ """
+ if self.prefixlen == 0:
+ return self
+ if self.prefixlen - prefixlen_diff < 0:
+ raise PrefixlenDiffInvalidError(
+ 'current prefixlen is %d, cannot have a prefixlen_diff of %d' % (
+ self.prefixlen, prefixlen_diff))
+ ret_addr = IPv6(ipaddr.IPv6Network.supernet(self, prefixlen_diff),
+ comment=self.text, token=self.token)
+ return ret_addr
+
+ # Backwards compatibility name from v1.
+ Supernet = supernet
+
+ def AddComment(self, comment=''):
+ """Append comment to self.text, comma seperated.
+
+ Don't add the comment if it's the same as self.text.
+
+ Args: comment
+ """
+ if self.text:
+ if comment and comment not in self.text:
+ self.text += ', ' + comment
+ else:
+ self.text = comment
+
+
+def CollapseAddrListRecursive(addresses):
+ """Recursively loops through the addresses, collapsing concurent netblocks.
+
+ Example:
+
+ ip1 = ipaddr.IPv4Network('1.1.0.0/24')
+ ip2 = ipaddr.IPv4Network('1.1.1.0/24')
+ ip3 = ipaddr.IPv4Network('1.1.2.0/24')
+ ip4 = ipaddr.IPv4Network('1.1.3.0/24')
+ ip5 = ipaddr.IPv4Network('1.1.4.0/24')
+ ip6 = ipaddr.IPv4Network('1.1.0.1/22')
+
+ CollapseAddrRecursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
+ [IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
+
+ Note, this shouldn't be called directly, but is called via
+ CollapseAddr([])
+
+ Args:
+ addresses: List of IPv4 or IPv6 objects
+
+ Returns:
+ List of IPv4 or IPv6 objects (depending on what we were passed)
+ """
+ ret_array = []
+ optimized = False
+
+ for cur_addr in addresses:
+ if not ret_array:
+ ret_array.append(cur_addr)
+ continue
+ if ret_array[-1].Contains(cur_addr):
+ # save the comment from the subsumed address
+ ret_array[-1].AddComment(cur_addr.text)
+ optimized = True
+ elif cur_addr == ret_array[-1].Supernet().Subnet()[1]:
+ ret_array.append(ret_array.pop().Supernet())
+ # save the text from the subsumed address
+ ret_array[-1].AddComment(cur_addr.text)
+ optimized = True
+ else:
+ ret_array.append(cur_addr)
+
+ if optimized:
+ return CollapseAddrListRecursive(ret_array)
+ return ret_array
+
+
+def CollapseAddrList(addresses):
+ """Collapse an array of IP objects.
+
+ Example: CollapseAddr(
+ [IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) -> [IPv4('1.1.0.0/23')]
+ Note: this works just as well with IPv6 addresses too.
+
+ Args:
+ addresses: list of ipaddr.IPNetwork objects
+
+ Returns:
+ list of ipaddr.IPNetwork objects
+ """
+ return CollapseAddrListRecursive(
+ sorted(addresses, key=ipaddr._BaseNet._get_networks_key))
+
+
+def SortAddrList(addresses):
+ """Return a sorted list of nacaddr objects."""
+ return sorted(addresses, key=ipaddr._BaseNet._get_networks_key)
+
+
+def RemoveAddressFromList(superset, exclude):
+ """Remove a single address from a list of addresses.
+
+ Args:
+ superset: a List of nacaddr IPv4 or IPv6 addresses
+ exclude: a single nacaddr IPv4 or IPv6 address
+
+ Returns:
+ a List of nacaddr IPv4 or IPv6 addresses
+ """
+ ret_array = []
+ for addr in superset:
+ if exclude == addr or addr in exclude:
+ # this is a bug in ipaddr v1. IP('1.1.1.1').AddressExclude(IP('1.1.1.1'))
+ # raises an error. Not tested in v2 yet.
+ pass
+ elif exclude.version == addr.version and exclude in addr:
+ ret_array.extend([IP(x) for x in addr.AddressExclude(exclude)])
+ else:
+ ret_array.append(addr)
+ return ret_array
+
+
+def AddressListExclude(superset, excludes):
+ """Remove a list of addresses from another list of addresses.
+
+ Args:
+ superset: a List of nacaddr IPv4 or IPv6 addresses
+ excludes: a List nacaddr IPv4 or IPv6 addresses
+
+ Returns:
+ a List of nacaddr IPv4 or IPv6 addresses
+ """
+ superset = CollapseAddrList(superset)
+ excludes = CollapseAddrList(excludes)
+
+ ret_array = []
+
+ for ex in excludes:
+ superset = RemoveAddressFromList(superset, ex)
+ return CollapseAddrList(superset)
+
+
+ExcludeAddrs = AddressListExclude
+
+
+class PrefixlenDiffInvalidError(ipaddr.NetmaskValueError):
+ """Holdover from ipaddr v1."""
+
+
+if __name__ == '__main__':
+ pass
diff --git a/lib/naming.py b/lib/naming.py
new file mode 100644
index 0000000..40196bc
--- /dev/null
+++ b/lib/naming.py
@@ -0,0 +1,502 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Parse naming definition files.
+
+Network access control applications use definition files which contain
+information about networks and services. This naming class
+will provide an easy interface into using these definitions.
+
+Sample usage with definition files contained in ./acl/defs:
+ defs = Naming('acl/defs/')
+
+ services = defs.GetService('DNS')
+ returns ['53/tcp', '53/udp', ...]
+
+ networks = defs.GetNet('INTERNAL')
+ returns a list of nacaddr.IPv4 object
+
+The definition files are contained in a single directory and
+may consist of multiple files ending in .net or .svc extensions,
+indicating network or service definitions respectively. The
+format of the files consists of a 'token' value, followed by a
+list of values and optional comments, such as:
+
+INTERNAL = 10.0.0.0/8 # RFC-1918
+ 172.16.0.0/12 # RFC-1918
+ 192.168.0.0/16 # RFC-1918
+or
+
+DNS = 53/tcp
+ 53/udp
+
+"""
+
+__author__ = 'watson@google.com (Tony Watson)'
+
+import glob
+
+import nacaddr
+
+
+class Error(Exception):
+ """Create our own base error class to be inherited by other error classes."""
+
+
+class NamespaceCollisionError(Error):
+ """Used to report on duplicate symbol names found while parsing."""
+
+
+class BadNetmaskTypeError(Error):
+ """Used to report on duplicate symbol names found while parsing."""
+
+
+class NoDefinitionsError(Error):
+ """Raised if no definitions are found."""
+
+
+class ParseError(Error):
+ """Raised if an error occurs during parsing."""
+
+
+class UndefinedAddressError(Error):
+ """Raised if an address is referenced but not defined."""
+
+
+class UndefinedServiceError(Error):
+ """Raised if a service is referenced but not defined."""
+
+
+class UnexpectedDefinitionType(Error):
+ """An unexpected/unknown definition type was used."""
+
+
+class _ItemUnit(object):
+ """This class is a container for an index key and a list of associated values.
+
+ An ItemUnit will contain the name of either a service or network group,
+ and a list of the associated values separated by spaces.
+
+ Attributes:
+ name: A string representing a unique token value.
+ items: a list of strings containing values for the token.
+ """
+
+ def __init__(self, symbol):
+ self.name = symbol
+ self.items = []
+
+
+class Naming(object):
+ """Object to hold naming objects from NETWORK and SERVICES definition files.
+
+ Attributes:
+ current_symbol: The current token being handled while parsing data.
+ services: A collection of all of the current service item tokens.
+ networks: A collection of all the current network item tokens.
+ """
+
+ def __init__(self, naming_dir=None, naming_file=None, naming_type=None):
+ """Set the default values for a new Naming object."""
+ self.current_symbol = None
+ self.services = {}
+ self.networks = {}
+ self.unseen_services = {}
+ self.unseen_networks = {}
+ if naming_file and naming_type:
+ filename = os.path.sep.join([naming_dir, naming_file])
+ file_handle = gfile.GFile(filename, 'r')
+ self._ParseFile(file_handle, naming_type)
+ elif naming_dir:
+ self._Parse(naming_dir, 'services')
+ self._CheckUnseen('services')
+
+ self._Parse(naming_dir, 'networks')
+ self._CheckUnseen('networks')
+
+ def _CheckUnseen(self, def_type):
+ if def_type == 'services':
+ if self.unseen_services:
+ raise UndefinedServiceError('%s %s' % (
+ 'The following tokens were nested as a values, but not defined',
+ self.unseen_services))
+ if def_type == 'networks':
+ if self.unseen_networks:
+ raise UndefinedAddressError('%s %s' % (
+ 'The following tokens were nested as a values, but not defined',
+ self.unseen_networks))
+
+ def GetIpParents(self, query):
+ """Return network tokens that contain IP in query.
+
+ Args:
+ query: an ip string ('10.1.1.1') or nacaddr.IP object
+ """
+ base_parents = []
+ recursive_parents = []
+ # convert string to nacaddr, if arg is ipaddr then convert str() to nacaddr
+ if type(query) != nacaddr.IPv4 and type(query) != nacaddr.IPv6:
+ if query[:1].isdigit():
+ query = nacaddr.IP(query)
+ # Get parent token for an IP
+ if type(query) == nacaddr.IPv4 or type(query) == nacaddr.IPv6:
+ for token in self.networks:
+ for item in self.networks[token].items:
+ item = item.split('#')[0].strip()
+ if item[:1].isdigit() and nacaddr.IP(item).Contains(query):
+ base_parents.append(token)
+ # Get parent token for another token
+ else:
+ for token in self.networks:
+ for item in self.networks[token].items:
+ item = item.split('#')[0].strip()
+ if item[:1].isalpha() and item == query:
+ base_parents.append(token)
+ # look for nested tokens
+ for bp in base_parents:
+ done = False
+ for token in self.networks:
+ if bp in self.networks[token].items:
+ # ignore IPs, only look at token values
+ if bp[:1].isalpha():
+ if bp not in recursive_parents:
+ recursive_parents.append(bp)
+ recursive_parents.extend(self.GetIpParents(bp))
+ done = True
+ # if no nested tokens, just append value
+ if not done:
+ if bp[:1].isalpha() and bp not in recursive_parents:
+ recursive_parents.append(bp)
+ return sorted(list(set(recursive_parents)))
+
+ def GetServiceParents(self, query):
+ """Given a query token, return list of services definitions with that token.
+
+ Args:
+ query: a service token name.
+ """
+ return self._GetParents(query, self.services)
+
+ def GetNetParents(self, query):
+ """Given a query token, return list of network definitions with that token.
+
+ Args:
+ query: a network token name.
+ """
+ return self._GetParents(query, self.networks)
+
+ def _GetParents(self, query, query_group):
+ """Given a naming item dict, return any tokens containing the value.
+
+ Args:
+ query: a service or token name, such as 53/tcp or DNS
+ query_group: either services or networks dict
+ """
+ base_parents = []
+ recursive_parents = []
+ # collect list of tokens containing query
+ for token in query_group:
+ if query in query_group[token].items:
+ base_parents.append(token)
+ if not base_parents:
+ return []
+ # iterate through tokens containing query, doing recursion if necessary
+ for bp in base_parents:
+ for token in query_group:
+ if bp in query_group[token].items and bp not in recursive_parents:
+ recursive_parents.append(bp)
+ recursive_parents.extend(self._GetParents(bp, query_group))
+ if bp not in recursive_parents:
+ recursive_parents.append(bp)
+ return recursive_parents
+
+ def GetService(self, query):
+ """Given a service name, return a list of associated ports and protocols.
+
+ Args:
+ query: Service name symbol or token.
+
+ Returns:
+ A list of service values such as ['80/tcp', '443/tcp', '161/udp', ...]
+
+ Raises:
+ UndefinedServiceError: If the service name isn't defined.
+ """
+ expandset = set()
+ already_done = set()
+ data = []
+ service_name = ''
+ data = query.split('#') # Get the token keyword and remove any comment
+ service_name = data[0].split()[0] # strip and cast from list to string
+ if service_name not in self.services:
+ raise UndefinedServiceError('\nNo such service: %s' % query)
+
+ already_done.add(service_name)
+
+ for next_item in self.services[service_name].items:
+ # Remove any trailing comment.
+ service = next_item.split('#')[0].strip()
+ # Recognized token, not a value.
+ if not '/' in service:
+ # Make sure we are not descending into recursion hell.
+ if service not in already_done:
+ already_done.add(service)
+ try:
+ expandset.update(self.GetService(service))
+ except UndefinedServiceError as e:
+ # One of the services in query is undefined, refine the error msg.
+ raise UndefinedServiceError('%s (in %s)' % (e, query))
+ else:
+ expandset.add(service)
+ return sorted(expandset)
+
+ def GetServiceByProto(self, query, proto):
+ """Given a service name, return list of ports in the service by protocol.
+
+ Args:
+ query: Service name to lookup.
+ proto: A particular protocol to restrict results by, such as 'tcp'.
+
+ Returns:
+ A list of service values of type 'proto', such as ['80', '443', ...]
+
+ Raises:
+ UndefinedServiceError: If the service name isn't defined.
+ """
+ services_set = set()
+ proto = proto.upper()
+ data = []
+ servicename = ''
+ data = query.split('#') # Get the token keyword and remove any comment
+ servicename = data[0].split()[0] # strip and cast from list to string
+ if servicename not in self.services:
+ raise UndefinedServiceError('%s %s' % ('\nNo such service,', servicename))
+
+ for service in self.GetService(servicename):
+ if service and '/' in service:
+ parts = service.split('/')
+ if parts[1].upper() == proto:
+ services_set.add(parts[0])
+ return sorted(services_set)
+
+ def GetNetAddr(self, token):
+ """Given a network token, return a list of netaddr.IPv4 objects.
+
+ Args:
+ token: A name of a network definition, such as 'INTERNAL'
+
+ Returns:
+ A list of netaddr.IPv4 objects.
+
+ Raises:
+ UndefinedAddressError: if the network name isn't defined.
+ """
+ return self.GetNet(token)
+
+ def GetNet(self, query):
+ """Expand a network token into a list of nacaddr.IPv4 objects.
+
+ Args:
+ query: Network definition token which may include comment text
+
+ Raises:
+ BadNetmaskTypeError: Results when an unknown netmask_type is
+ specified. Acceptable values are 'cidr', 'netmask', and 'hostmask'.
+
+ Returns:
+ List of nacaddr.IPv4 objects
+
+ Raises:
+ UndefinedAddressError: for an undefined token value
+ """
+ returnlist = []
+ data = []
+ token = ''
+ data = query.split('#') # Get the token keyword and remove any comment
+ token = data[0].split()[0] # Remove whitespace and cast from list to string
+ if token not in self.networks:
+ raise UndefinedAddressError('%s %s' % ('\nUNDEFINED:', str(token)))
+
+ for next in self.networks[token].items:
+ comment = ''
+ if next.find('#') > -1:
+ (net, comment) = next.split('#', 1)
+ else:
+ net = next
+ try:
+ net = net.strip()
+ addr = nacaddr.IP(net)
+ # we want to make sure that we're storing the network addresses
+ # ie, FOO = 192.168.1.1/24 should actually return 192.168.1.0/24
+ if addr.ip != addr.network:
+ addr = nacaddr.IP('%s/%d' % (addr.network, addr.prefixlen))
+
+ addr.text = comment.lstrip()
+ addr.token = token
+ returnlist.append(addr)
+ except ValueError:
+ # if net was something like 'FOO', or the name of another token which
+ # needs to be dereferenced, nacaddr.IP() will return a ValueError
+ returnlist.extend(self.GetNet(net))
+ for next in returnlist:
+ next.parent_token = token
+ return returnlist
+
+ def _Parse(self, defdirectory, def_type):
+ """Parse files of a particular type for tokens and values.
+
+ Given a directory name and the type (services|networks) to
+ process, grab all the appropriate files in that directory
+ and parse them for definitions.
+
+ Args:
+ defdirectory: Path to directory containing definition files.
+ def_type: Type of definitions to parse
+
+ Raises:
+ NoDefinitionsError: if no definitions are found.
+ """
+ file_names = []
+ get_files = {'services': lambda: glob.glob(defdirectory + '/*.svc'),
+ 'networks': lambda: glob.glob(defdirectory + '/*.net')}
+
+ if def_type in get_files:
+ file_names = get_files[def_type]()
+ else:
+ raise NoDefinitionsError('Unknown definitions type.')
+ if not file_names:
+ raise NoDefinitionsError('No definition files for %s in %s found.' %
+ (def_type, defdirectory))
+
+ for current_file in file_names:
+ try:
+ file_handle = open(current_file, 'r').readlines()
+ for line in file_handle:
+ self._ParseLine(line, def_type)
+ except IOError as error_info:
+ raise NoDefinitionsError('%s', error_info)
+
+ def _ParseFile(self, file_handle, def_type):
+ for line in file_handle:
+ self._ParseLine(line, def_type)
+
+ def ParseServiceList(self, data):
+ """Take an array of service data and import into class.
+
+ This method allows us to pass an array of data that contains service
+ definitions that are appended to any definitions read from files.
+
+ Args:
+ data: array of text lines containing service definitions.
+ """
+ for line in data:
+ self._ParseLine(line, 'services')
+
+ def ParseNetworkList(self, data):
+ """Take an array of network data and import into class.
+
+ This method allows us to pass an array of data that contains network
+ definitions that are appended to any definitions read from files.
+
+ Args:
+ data: array of text lines containing net definitions.
+
+ """
+ for line in data:
+ self._ParseLine(line, 'networks')
+
+ def _ParseLine(self, line, definition_type):
+ """Parse a single line of a service definition file.
+
+ This routine is used to parse a single line of a service
+ definition file, building a list of 'self.services' objects
+ as each line of the file is iterated through.
+
+ Args:
+ line: A single line from a service definition files.
+ definition_type: Either 'networks' or 'services'
+
+ Raises:
+ UnexpectedDefinitionType: when called with unexpected type of defintions
+ NamespaceCollisionError: when overlapping tokens are found.
+ ParseError: If errors occur
+ """
+ if definition_type not in ['services', 'networks']:
+ raise UnexpectedDefinitionType('%s %s' % (
+ 'Received an unexpected defintion type:', definition_type))
+ line = line.strip()
+ if not line or line.startswith('#'): # Skip comments and blanks.
+ return
+ comment = ''
+ if line.find('#') > -1: # if there is a comment, save it
+ (line, comment) = line.split('#', 1)
+ line_parts = line.split('=') # Split on var = val lines.
+ # the value field still has the comment at this point
+ # If there was '=', then do var and value
+ if len(line_parts) > 1:
+ self.current_symbol = line_parts[0].strip() # varname left of '='
+ if definition_type == 'services':
+ if self.current_symbol in self.services:
+ raise NamespaceCollisionError('%s %s' % (
+ '\nMultiple definitions found for service: ',
+ self.current_symbol))
+ elif definition_type == 'networks':
+ if self.current_symbol in self.networks:
+ raise NamespaceCollisionError('%s %s' % (
+ '\nMultiple definitions found for service: ',
+ self.current_symbol))
+
+ self.unit = _ItemUnit(self.current_symbol)
+ if definition_type == 'services':
+ self.services[self.current_symbol] = self.unit
+ # unseen_services is a list of service TOKENS found in the values
+ # of newly defined services, but not previously defined themselves.
+ # When we define a new service, we should remove it (if it exists)
+ # from the list of unseen_services.
+ if self.current_symbol in self.unseen_services:
+ self.unseen_services.pop(self.current_symbol)
+ elif definition_type == 'networks':
+ self.networks[self.current_symbol] = self.unit
+ if self.current_symbol in self.unseen_networks:
+ self.unseen_networks.pop(self.current_symbol)
+ else:
+ raise ParseError('Unknown definitions type.')
+ values = line_parts[1]
+ # No '=', so this is a value only line
+ else:
+ values = line_parts[0] # values for previous var are continued this line
+ for value_piece in values.split():
+ if not value_piece:
+ continue
+ if not self.current_symbol:
+ break
+ if comment:
+ self.unit.items.append(value_piece + ' # ' + comment)
+ else:
+ self.unit.items.append(value_piece)
+ # token?
+ if value_piece[0].isalpha() and ':' not in value_piece:
+ if definition_type == 'services':
+ # already in top definitions list?
+ if value_piece not in self.services:
+ # already have it as an unused value?
+ if value_piece not in self.unseen_services:
+ self.unseen_services[value_piece] = True
+ if definition_type == 'networks':
+ if value_piece not in self.networks:
+ if value_piece not in self.unseen_networks:
+ self.unseen_networks[value_piece] = True
diff --git a/lib/packetfilter.py b/lib/packetfilter.py
new file mode 100644
index 0000000..c9742b9
--- /dev/null
+++ b/lib/packetfilter.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+#
+# Copyright 2012 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""PacketFilter (PF) generator."""
+
+__author__ = 'msu@google.com (Martin Suess)'
+
+import aclgenerator
+import datetime
+import logging
+
+
+class Error(Exception):
+ """Base error class."""
+
+
+class UnsupportedActionError(Error):
+ """Raised when we see an unsupported action."""
+
+
+class UnsupportedTargetOption(Error):
+ """Raised when we see an unsupported option."""
+
+
+class Term(aclgenerator.Term):
+ """Generate PacketFilter policy terms."""
+
+ # Validate that term does not contain any fields we do not
+ # support. This prevents us from thinking that our output is
+ # correct in cases where we've omitted fields from term.
+ _PLATFORM = 'packetfilter'
+ _ACTION_TABLE = {
+ 'accept': 'pass',
+ 'deny': 'block drop',
+ 'reject': 'block return',
+ }
+ _TCP_FLAGS_TABLE = {
+ 'syn': 'S',
+ 'ack': 'A',
+ 'fin': 'F',
+ 'rst': 'R',
+ 'urg': 'U',
+ 'psh': 'P',
+ 'all': 'ALL',
+ 'none': 'NONE',
+ }
+
+ def __init__(self, term, filter_name, af='inet'):
+ """Setup a new term.
+
+ Args:
+ term: A policy.Term object to represent in packetfilter.
+ filter_name: The name of the filter chan to attach the term to.
+ af: Which address family ('inet' or 'inet6') to apply the term to.
+
+ Raises:
+ aclgenerator.UnsupportedFilterError: Filter is not supported.
+ """
+ self.term = term # term object
+ self.filter = filter_name # actual name of filter
+ self.options = []
+ self.default_action = 'deny'
+ self.af = af
+
+ def __str__(self):
+ """Render config output from this term object."""
+ ret_str = []
+
+ # Create a new term
+ ret_str.append('\n# term %s' % self.term.name)
+ # append comments to output
+ for line in self.term.comment:
+ if not line:
+ continue
+ ret_str.append('# %s' % str(line))
+
+ # if terms does not specify action, use filter default action
+ if not self.term.action:
+ self.term.action[0].value = self.default_action
+ if str(self.term.action[0]) not in self._ACTION_TABLE:
+ raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
+ '\n', self.term.name, self.term.action[0],
+ 'action not currently supported.'))
+
+ # protocol
+ if self.term.protocol:
+ protocol = self.term.protocol
+ else:
+ protocol = []
+ if self.term.protocol_except:
+ raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
+ '\n', self.term.name,
+ 'protocol_except logic not currently supported.'))
+
+ # source address
+ term_saddrs = self._CheckAddressAf(self.term.source_address)
+ if not term_saddrs:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ direction='source',
+ af=self.af))
+ return ''
+ term_saddr = self._GenerateAddrStatement(
+ term_saddrs, self.term.source_address_exclude)
+
+ # destination address
+ term_daddrs = self._CheckAddressAf(self.term.destination_address)
+ if not term_daddrs:
+ logging.warn(self.NO_AF_LOG_FORMAT.substitute(term=self.term.name,
+ direction='destination',
+ af=self.af))
+ return ''
+ term_daddr = self._GenerateAddrStatement(
+ term_daddrs, self.term.destination_address_exclude)
+
+ # ports
+ source_port = []
+ destination_port = []
+ if self.term.source_port:
+ source_port = self._GeneratePortStatement(self.term.source_port)
+ if self.term.destination_port:
+ destination_port = self._GeneratePortStatement(self.term.destination_port)
+
+ # icmp-type
+ icmp_types = ['']
+ if self.term.icmp_type:
+ if self.af != 'mixed':
+ af = self.af
+ elif protocol == ['icmp']:
+ af = 'inet'
+ elif protocol == ['icmp6']:
+ af = 'inet6'
+ else:
+ raise aclgenerator.UnsupportedFilterError('%s %s %s' % (
+ '\n', self.term.name,
+ 'icmp protocol is not defined or not supported.'))
+ icmp_types = self.NormalizeIcmpTypes(
+ self.term.icmp_type, protocol, af)
+
+ # options
+ opts = [str(x) for x in self.term.option]
+ tcp_flags = []
+ for next_opt in opts:
+ # Iterate through flags table, and create list of tcp-flags to append
+ for next_flag in self._TCP_FLAGS_TABLE:
+ if next_opt.find(next_flag) == 0:
+ tcp_flags.append(self._TCP_FLAGS_TABLE.get(next_flag))
+
+ ret_str.extend(self._FormatPart(
+ self._ACTION_TABLE.get(str(self.term.action[0])),
+ self.term.logging,
+ self.af,
+ protocol,
+ term_saddr,
+ source_port,
+ term_daddr,
+ destination_port,
+ tcp_flags,
+ icmp_types,
+ self.options,
+ ))
+
+ return '\n'.join(str(v) for v in ret_str if v is not '')
+
+ def _CheckAddressAf(self, addrs):
+ """Verify that the requested address-family matches the address's family."""
+ if not addrs:
+ return ['any']
+ if self.af == 'mixed':
+ return addrs
+ af_addrs = []
+ af = self.NormalizeAddressFamily(self.af)
+ for addr in addrs:
+ if addr.version == af:
+ af_addrs.append(addr)
+ return af_addrs
+
+ def _FormatPart(self, action, log, af, proto, src_addr, src_port,
+ dst_addr, dst_port, tcp_flags, icmp_types, options):
+ """Format the string which will become a single PF entry."""
+ line = ['%s' % action]
+ if log and 'true' in [str(l) for l in log]:
+ line.append('log')
+
+ line.append('quick')
+ if af != 'mixed':
+ line.append(af)
+
+ if proto:
+ line.append(self._GenerateProtoStatement(proto))
+
+ line.append('from %s' % src_addr)
+ if src_port:
+ line.append('port %s' % src_port)
+
+ line.append('to %s' % dst_addr)
+ if dst_port:
+ line.append('port %s' % dst_port)
+
+ if 'tcp' in proto and tcp_flags:
+ line.append('flags')
+ line.append('/'.join(tcp_flags))
+
+ if 'icmp' in proto and icmp_types:
+ type_strs = [str(icmp_type) for icmp_type in icmp_types]
+ type_strs = ', '.join(type_strs)
+ if type_strs:
+ line.append('icmp-type { %s }' % type_strs)
+
+ if options:
+ line.extend(options)
+
+ return [' '.join(line)]
+
+ def _GenerateProtoStatement(self, protocols):
+ proto = ''
+ if protocols:
+ proto = 'proto { %s }' % ' '.join(protocols)
+ return proto
+
+ def _GenerateAddrStatement(self, addrs, exclude_addrs):
+ addresses = [str(addr) for addr in addrs]
+ for exclude_addr in exclude_addrs:
+ addresses.append('!%s' % str(exclude_addr))
+ return '{ %s }' % ', '.join(addresses)
+
+ def _GeneratePortStatement(self, ports):
+ port_list = []
+ for port_tuple in ports:
+ for port in port_tuple:
+ port_list.append(str(port))
+ return '{ %s }' % ' '.join(list(set(port_list)))
+
+
+class PacketFilter(aclgenerator.ACLGenerator):
+ """Generates filters and terms from provided policy object."""
+
+ _PLATFORM = 'packetfilter'
+ _DEFAULT_PROTOCOL = 'all'
+ _SUFFIX = '.pf'
+ _TERM = Term
+ _OPTIONAL_SUPPORTED_KEYWORDS = set(['expiration',
+ 'logging',
+ 'routing_instance',
+ ])
+
+ def _TranslatePolicy(self, pol, exp_info):
+ self.pf_policies = []
+ current_date = datetime.date.today()
+ exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
+
+ good_afs = ['inet', 'inet6', 'mixed']
+ good_options = []
+ filter_type = None
+
+ for header, terms in pol.filters:
+ if self._PLATFORM not in header.platforms:
+ continue
+
+ filter_options = header.FilterOptions(self._PLATFORM)[1:]
+ filter_name = header.FilterName(self._PLATFORM)
+
+ # ensure all options after the filter name are expected
+ for opt in filter_options:
+ if opt not in good_afs + good_options:
+ raise UnsupportedTargetOption('%s %s %s %s' % (
+ '\nUnsupported option found in', self._PLATFORM,
+ 'target definition:', opt))
+
+ # Check for matching af
+ for address_family in good_afs:
+ if address_family in filter_options:
+ # should not specify more than one AF in options
+ if filter_type is not None:
+ raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % (
+ '\nMay only specify one of', good_afs, 'in filter options:',
+ filter_options))
+ filter_type = address_family
+ if filter_type is None:
+ filter_type = 'mixed'
+
+ # add the terms
+ new_terms = []
+ term_names = set()
+ for term in terms:
+ term.name = self.FixTermLength(term.name)
+ if term.name in term_names:
+ raise aclgenerator.DuplicateTermError(
+ 'You have a duplicate term: %s' % term.name)
+ term_names.add(term.name)
+
+ if not term:
+ continue
+
+ if term.expiration:
+ if term.expiration <= exp_info_date:
+ logging.info('INFO: Term %s in policy %s expires '
+ 'in less than two weeks.', term.name, filter_name)
+ if term.expiration <= current_date:
+ logging.warn('WARNING: Term %s in policy %s is expired and '
+ 'will not be rendered.', term.name, filter_name)
+ continue
+
+ new_terms.append(self._TERM(term, filter_name, filter_type))
+
+ self.pf_policies.append((header, filter_name, filter_type, new_terms))
+
+ def __str__(self):
+ """Render the output of the PF policy into config."""
+ target = []
+ pretty_platform = '%s%s' % (self._PLATFORM[0].upper(), self._PLATFORM[1:])
+
+ for (header, filter_name, filter_type, terms) in self.pf_policies:
+ # Add comments for this filter
+ target.append('# %s %s Policy' % (pretty_platform,
+ header.FilterName(self._PLATFORM)))
+
+ # reformat long text comments, if needed
+ comments = aclgenerator.WrapWords(header.comment, 70)
+ if comments and comments[0]:
+ for line in comments:
+ target.append('# %s' % line)
+ target.append('#')
+ # add the p4 tags
+ target.extend(aclgenerator.AddRepositoryTags('# '))
+ target.append('# ' + filter_type)
+
+ # add the terms
+ for term in terms:
+ term_str = str(term)
+ if term_str:
+ target.append(term_str)
+ target.append('')
+
+ return '\n'.join(target)
diff --git a/lib/policy.py b/lib/policy.py
new file mode 100644
index 0000000..a6b8ad6
--- /dev/null
+++ b/lib/policy.py
@@ -0,0 +1,1821 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Parses the generic policy files and return a policy object for acl rendering.
+"""
+
+import datetime
+import os
+import sys
+
+import logging
+import nacaddr
+import naming
+
+from third_party.ply import lex
+from third_party.ply import yacc
+
+
+DEFINITIONS = None
+DEFAULT_DEFINITIONS = './def'
+_ACTIONS = set(('accept', 'deny', 'reject', 'next', 'reject-with-tcp-rst'))
+_LOGGING = set(('true', 'True', 'syslog', 'local', 'disable'))
+_OPTIMIZE = True
+_SHADE_CHECK = False
+
+
+class Error(Exception):
+ """Generic error class."""
+
+
+class FileNotFoundError(Error):
+ """Policy file unable to be read."""
+
+
+class FileReadError(Error):
+ """Policy file unable to be read."""
+
+
+class RecursionTooDeepError(Error):
+ """Included files exceed maximum recursion depth."""
+
+
+class ParseError(Error):
+ """ParseError in the input."""
+
+
+class TermAddressExclusionError(Error):
+ """Excluded address block is not contained in the accepted address block."""
+
+
+class TermObjectTypeError(Error):
+ """Error with an object passed to Term."""
+
+
+class TermPortProtocolError(Error):
+ """Error when a requested protocol doesn't have any of the requested ports."""
+
+
+class TermProtocolEtherTypeError(Error):
+ """Error when both ether-type & upper-layer protocol matches are requested."""
+
+
+class TermNoActionError(Error):
+ """Error when a term hasn't defined an action."""
+
+
+class TermInvalidIcmpType(Error):
+ """Error when a term has invalid icmp-types specified."""
+
+
+class InvalidTermActionError(Error):
+ """Error when an action is invalid."""
+
+
+class InvalidTermLoggingError(Error):
+ """Error when a option is set for logging."""
+
+
+class UndefinedAddressError(Error):
+ """Error when an undefined address is referenced."""
+
+
+class NoTermsError(Error):
+ """Error when no terms were found."""
+
+
+class ShadingError(Error):
+ """Error when a term is shaded by a prior term."""
+
+
+def TranslatePorts(ports, protocols, term_name):
+ """Return all ports of all protocols requested.
+
+ Args:
+ ports: list of ports, eg ['SMTP', 'DNS', 'HIGH_PORTS']
+ protocols: list of protocols, eg ['tcp', 'udp']
+ term_name: name of current term, used for warning messages
+
+ Returns:
+ ret_array: list of ports tuples such as [(25,25), (53,53), (1024,65535)]
+
+ Note:
+ Duplication will be taken care of in Term.CollapsePortList
+ """
+ ret_array = []
+ for proto in protocols:
+ for port in ports:
+ service_by_proto = DEFINITIONS.GetServiceByProto(port, proto)
+ if not service_by_proto:
+ logging.warn('%s %s %s %s %s %s%s %s', 'Term', term_name,
+ 'has service', port, 'which is not defined with protocol',
+ proto,
+ ', but will be permitted. Unless intended, you should',
+ 'consider splitting the protocols into separate terms!')
+
+ for p in [x.split('-') for x in service_by_proto]:
+ if len(p) == 1:
+ ret_array.append((int(p[0]), int(p[0])))
+ else:
+ ret_array.append((int(p[0]), int(p[1])))
+ return ret_array
+
+
+# classes for storing the object types in the policy files.
+class Policy(object):
+ """The policy object contains everything found in a given policy file."""
+
+ def __init__(self, header, terms):
+ """Initiator for the Policy object.
+
+ Args:
+ header: __main__.Header object. contains comments which should be passed
+ on to the rendered acls as well as the type of acls this policy file
+ should render to.
+
+ terms: list __main__.Term. an array of Term objects which must be rendered
+ in each of the rendered acls.
+
+ Attributes:
+ filters: list of tuples containing (header, terms).
+ """
+ self.filters = []
+ self.AddFilter(header, terms)
+
+ def AddFilter(self, header, terms):
+ """Add another header & filter."""
+ self.filters.append((header, terms))
+ self._TranslateTerms(terms)
+ if _SHADE_CHECK:
+ self._DetectShading(terms)
+
+ def _TranslateTerms(self, terms):
+ """."""
+ if not terms:
+ raise NoTermsError('no terms found')
+ for term in terms:
+ # TODO(pmoody): this probably belongs in Term.SanityCheck(),
+ # or at the very least, in some method under class Term()
+ if term.translated:
+ continue
+ if term.port:
+ term.port = TranslatePorts(term.port, term.protocol, term.name)
+ if not term.port:
+ raise TermPortProtocolError(
+ 'no ports of the correct protocol for term %s' % (
+ term.name))
+ if term.source_port:
+ term.source_port = TranslatePorts(term.source_port, term.protocol,
+ term.name)
+ if not term.source_port:
+ raise TermPortProtocolError(
+ 'no source ports of the correct protocol for term %s' % (
+ term.name))
+ if term.destination_port:
+ term.destination_port = TranslatePorts(term.destination_port,
+ term.protocol, term.name)
+ if not term.destination_port:
+ raise TermPortProtocolError(
+ 'no destination ports of the correct protocol for term %s' % (
+ term.name))
+
+ # If argument is true, we optimize, otherwise just sort addresses
+ term.AddressCleanup(_OPTIMIZE)
+ # Reset _OPTIMIZE global to default value
+ globals()['_OPTIMIZE'] = True
+ term.SanityCheck()
+ term.translated = True
+
+ @property
+ def headers(self):
+ """Returns the headers from each of the configured filters.
+
+ Returns:
+ headers
+ """
+ return [x[0] for x in self.filters]
+
+ def _DetectShading(self, terms):
+ """Finds terms which are shaded (impossible to reach).
+
+ Iterate through each term, looking at each prior term. If a prior term
+ contains every component of the current term then the current term would
+ never be hit and is thus shaded. This can be a mistake.
+
+ Args:
+ terms: list of Term objects.
+
+ Raises:
+ ShadingError: When a term is impossible to reach.
+ """
+ # Reset _OPTIMIZE global to default value
+ globals()['_SHADE_CHECK'] = False
+ shading_errors = []
+ for index, term in enumerate(terms):
+ for prior_index in xrange(index):
+ # Check each term that came before for shading. Terms with next as an
+ # action do not terminate evaluation, so cannot shade.
+ if (term in terms[prior_index]
+ and 'next' not in terms[prior_index].action):
+ shading_errors.append(
+ ' %s is shaded by %s.' % (
+ term.name, terms[prior_index].name))
+ if shading_errors:
+ raise ShadingError('\n'.join(shading_errors))
+
+
+class Term(object):
+ """The Term object is used to store each of the terms.
+
+ Args:
+ obj: an object of type VarType or a list of objects of type VarType
+
+ members:
+ address/source_address/destination_address/: list of
+ VarType.(S|D)?ADDRESS's
+ address_exclude/source_address_exclude/destination_address_exclude: list of
+ VarType.(S|D)?ADDEXCLUDE's
+ port/source_port/destination_port: list of VarType.(S|D)?PORT's
+ options: list of VarType.OPTION's.
+ protocol: list of VarType.PROTOCOL's.
+ counter: VarType.COUNTER
+ action: list of VarType.ACTION's
+ comments: VarType.COMMENT
+ expiration: VarType.EXPIRATION
+ verbatim: VarType.VERBATIM
+ logging: VarType.LOGGING
+ qos: VarType.QOS
+ policer: VarType.POLICER
+ """
+ ICMP_TYPE = {4: {'echo-reply': 0,
+ 'unreachable': 3,
+ 'source-quench': 4,
+ 'redirect': 5,
+ 'alternate-address': 6,
+ 'echo-request': 8,
+ 'router-advertisement': 9,
+ 'router-solicitation': 10,
+ 'time-exceeded': 11,
+ 'parameter-problem': 12,
+ 'timestamp-request': 13,
+ 'timestamp-reply': 14,
+ 'information-request': 15,
+ 'information-reply': 16,
+ 'mask-request': 17,
+ 'mask-reply': 18,
+ 'conversion-error': 31,
+ 'mobile-redirect': 32,
+ },
+ 6: {'destination-unreachable': 1,
+ 'packet-too-big': 2,
+ 'time-exceeded': 3,
+ 'parameter-problem': 4,
+ 'echo-request': 128,
+ 'echo-reply': 129,
+ 'multicast-listener-query': 130,
+ 'multicast-listener-report': 131,
+ 'multicast-listener-done': 132,
+ 'router-solicit': 133,
+ 'router-advertisement': 134,
+ 'neighbor-solicit': 135,
+ 'neighbor-advertisement': 136,
+ 'redirect-message': 137,
+ 'router-renumbering': 138,
+ 'icmp-node-information-query': 139,
+ 'icmp-node-information-response': 140,
+ 'inverse-neighbor-discovery-solicitation': 141,
+ 'inverse-neighbor-discovery-advertisement': 142,
+ 'version-2-multicast-listener-report': 143,
+ 'home-agent-address-discovery-request': 144,
+ 'home-agent-address-discovery-reply': 145,
+ 'mobile-prefix-solicitation': 146,
+ 'mobile-prefix-advertisement': 147,
+ 'certification-path-solicitation': 148,
+ 'certification-path-advertisement': 149,
+ 'multicast-router-advertisement': 151,
+ 'multicast-router-solicitation': 152,
+ 'multicast-router-termination': 153,
+ },
+ }
+
+ def __init__(self, obj):
+ self.name = None
+
+ self.action = []
+ self.address = []
+ self.address_exclude = []
+ self.comment = []
+ self.counter = None
+ self.expiration = None
+ self.destination_address = []
+ self.destination_address_exclude = []
+ self.destination_port = []
+ self.destination_prefix = []
+ self.logging = []
+ self.loss_priority = None
+ self.option = []
+ self.owner = None
+ self.policer = None
+ self.port = []
+ self.precedence = []
+ self.principals = []
+ self.protocol = []
+ self.protocol_except = []
+ self.qos = None
+ self.routing_instance = None
+ self.source_address = []
+ self.source_address_exclude = []
+ self.source_port = []
+ self.source_prefix = []
+ self.verbatim = []
+ # juniper specific.
+ self.packet_length = None
+ self.fragment_offset = None
+ self.icmp_type = []
+ self.ether_type = []
+ self.traffic_type = []
+ self.translated = False
+ # iptables specific
+ self.source_interface = None
+ self.destination_interface = None
+ self.platform = []
+ self.platform_exclude = []
+ self.timeout = None
+ self.AddObject(obj)
+ self.flattened = False
+ self.flattened_addr = None
+ self.flattened_saddr = None
+ self.flattened_daddr = None
+
+ def __contains__(self, other):
+ """Determine if other term is contained in this term."""
+ if self.verbatim or other.verbatim:
+ # short circuit these
+ if sorted(self.verbatim) != sorted(other.verbatim):
+ return False
+
+ # check prototols
+ # either protocol or protocol-except may be used, not both at the same time.
+ if self.protocol:
+ if other.protocol:
+ if not self.CheckProtocolIsContained(other.protocol, self.protocol):
+ return False
+ # this term has protocol, other has protocol_except.
+ elif other.protocol_except:
+ return False
+ else:
+ # other does not have protocol or protocol_except. since we do other
+ # cannot be contained in self.
+ return False
+ elif self.protocol_except:
+ if other.protocol_except:
+ if self.CheckProtocolIsContained(
+ self.protocol_except, other.protocol_except):
+ return False
+ elif other.protocol:
+ for proto in other.protocol:
+ if proto in self.protocol_except:
+ return False
+ else:
+ return False
+
+ # combine addresses with exclusions for proper contains comparisons.
+ if not self.flattened:
+ self.FlattenAll()
+ if not other.flattened:
+ other.FlattenAll()
+
+ # flat 'address' is compared against other flat (saddr|daddr).
+ # if NONE of these evaluate to True other is not contained.
+ if not (
+ self.CheckAddressIsContained(
+ self.flattened_addr, other.flattened_addr)
+ or self.CheckAddressIsContained(
+ self.flattened_addr, other.flattened_saddr)
+ or self.CheckAddressIsContained(
+ self.flattened_addr, other.flattened_daddr)):
+ return False
+
+ # compare flat address from other to flattened self (saddr|daddr).
+ if not (
+ # other's flat address needs both self saddr & daddr to contain in order
+ # for the term to be contained. We already compared the flattened_addr
+ # attributes of both above, which was not contained.
+ self.CheckAddressIsContained(
+ other.flattened_addr, self.flattened_saddr)
+ and self.CheckAddressIsContained(
+ other.flattened_addr, self.flattened_daddr)):
+ return False
+
+ # basic saddr/daddr check.
+ if not (
+ self.CheckAddressIsContained(
+ self.flattened_saddr, other.flattened_saddr)):
+ return False
+ if not (
+ self.CheckAddressIsContained(
+ self.flattened_daddr, other.flattened_daddr)):
+ return False
+
+ if not (
+ self.CheckPrincipalsContained(
+ self.principals, other.principals)):
+ return False
+
+ # check ports
+ # like the address directive, the port directive is special in that it can
+ # be either source or destination.
+ if self.port:
+ if not (self.CheckPortIsContained(self.port, other.port) or
+ self.CheckPortIsContained(self.port, other.sport) or
+ self.CheckPortIsContained(self.port, other.dport)):
+ return False
+ if not self.CheckPortIsContained(self.source_port, other.source_port):
+ return False
+ if not self.CheckPortIsContained(self.destination_port,
+ other.destination_port):
+ return False
+
+ # prefix lists
+ if self.source_prefix:
+ if sorted(self.source_prefix) != sorted(other.source_prefix):
+ return False
+ if self.destination_prefix:
+ if sorted(self.destination_prefix) != sorted(
+ other.destination_prefix):
+ return False
+
+ # check precedence
+ if self.precedence:
+ if not other.precedence:
+ return False
+ for precedence in other.precedence:
+ if precedence not in self.precedence:
+ return False
+ # check various options
+ if self.option:
+ if not other.option:
+ return False
+ for opt in other.option:
+ if opt not in self.option:
+ return False
+ if self.fragment_offset:
+ # fragment_offset looks like 'integer-integer' or just, 'integer'
+ sfo = [int(x) for x in self.fragment_offset.split('-')]
+ if other.fragment_offset:
+ ofo = [int(x) for x in other.fragment_offset.split('-')]
+ if sfo[0] < ofo[0] or sorted(sfo[1:]) > sorted(ofo[1:]):
+ return False
+ else:
+ return False
+ if self.packet_length:
+ # packet_length looks like 'integer-integer' or just, 'integer'
+ spl = [int(x) for x in self.packet_length.split('-')]
+ if other.packet_length:
+ opl = [int(x) for x in other.packet_length.split('-')]
+ if spl[0] < opl[0] or sorted(spl[1:]) > sorted(opl[1:]):
+ return False
+ else:
+ return False
+ if self.icmp_type:
+ if sorted(self.icmp_type) is not sorted(other.icmp_type):
+ return False
+
+ # check platform
+ if self.platform:
+ if sorted(self.platform) is not sorted(other.platform):
+ return False
+ if self.platform_exclude:
+ if sorted(self.platform_exclude) is not sorted(other.platform_exclude):
+ return False
+
+ # we have containment
+ return True
+
+ def __str__(self):
+ ret_str = []
+ ret_str.append(' name: %s' % self.name)
+ if self.address:
+ ret_str.append(' address: %s' % self.address)
+ if self.address_exclude:
+ ret_str.append(' address_exclude: %s' % self.address_exclude)
+ if self.source_address:
+ ret_str.append(' source_address: %s' % self.source_address)
+ if self.source_address_exclude:
+ ret_str.append(' source_address_exclude: %s' %
+ self.source_address_exclude)
+ if self.destination_address:
+ ret_str.append(' destination_address: %s' % self.destination_address)
+ if self.destination_address_exclude:
+ ret_str.append(' destination_address_exclude: %s' %
+ self.destination_address_exclude)
+ if self.source_prefix:
+ ret_str.append(' source_prefix: %s' % self.source_prefix)
+ if self.destination_prefix:
+ ret_str.append(' destination_prefix: %s' % self.destination_prefix)
+ if self.protocol:
+ ret_str.append(' protocol: %s' % self.protocol)
+ if self.protocol_except:
+ ret_str.append(' protocol-except: %s' % self.protocol_except)
+ if self.owner:
+ ret_str.append(' owner: %s' % self.owner)
+ if self.port:
+ ret_str.append(' port: %s' % self.port)
+ if self.source_port:
+ ret_str.append(' source_port: %s' % self.source_port)
+ if self.destination_port:
+ ret_str.append(' destination_port: %s' % self.destination_port)
+ if self.action:
+ ret_str.append(' action: %s' % self.action)
+ if self.option:
+ ret_str.append(' option: %s' % self.option)
+ if self.qos:
+ ret_str.append(' qos: %s' % self.qos)
+ if self.logging:
+ ret_str.append(' logging: %s' % self.logging)
+ if self.counter:
+ ret_str.append(' counter: %s' % self.counter)
+ if self.source_interface:
+ ret_str.append(' source_interface: %s' % self.source_interface)
+ if self.destination_interface:
+ ret_str.append(' destination_interface: %s' % self.destination_interface)
+ if self.expiration:
+ ret_str.append(' expiration: %s' % self.expiration)
+ if self.platform:
+ ret_str.append(' platform: %s' % self.platform)
+ if self.platform_exclude:
+ ret_str.append(' platform_exclude: %s' % self.platform_exclude)
+ if self.timeout:
+ ret_str.append(' timeout: %s' % self.timeout)
+ return '\n'.join(ret_str)
+
+ def __eq__(self, other):
+ # action
+ if sorted(self.action) != sorted(other.action):
+ return False
+
+ # addresses.
+ if not (sorted(self.address) == sorted(other.address) and
+ sorted(self.source_address) == sorted(other.source_address) and
+ sorted(self.source_address_exclude) ==
+ sorted(other.source_address_exclude) and
+ sorted(self.destination_address) ==
+ sorted(other.destination_address) and
+ sorted(self.destination_address_exclude) ==
+ sorted(other.destination_address_exclude)):
+ return False
+
+ # prefix lists
+ if not (sorted(self.source_prefix) == sorted(other.source_prefix) and
+ sorted(self.destination_prefix) ==
+ sorted(other.destination_prefix)):
+ return False
+
+ # ports
+ if not (sorted(self.port) == sorted(other.port) and
+ sorted(self.source_port) == sorted(other.source_port) and
+ sorted(self.destination_port) == sorted(other.destination_port)):
+ return False
+
+ # protocol
+ if not (sorted(self.protocol) == sorted(other.protocol) and
+ sorted(self.protocol_except) == sorted(other.protocol_except)):
+ return False
+
+ # option
+ if sorted(self.option) != sorted(other.option):
+ return False
+
+ # qos
+ if self.qos != other.qos:
+ return False
+
+ # verbatim
+ if self.verbatim != other.verbatim:
+ return False
+
+ # policer
+ if self.policer != other.policer:
+ return False
+
+ # interface
+ if self.source_interface != other.source_interface:
+ return False
+
+ if self.destination_interface != other.destination_interface:
+ return False
+
+ if sorted(self.logging) != sorted(other.logging):
+ return False
+ if self.qos != other.qos:
+ return False
+ if self.packet_length != other.packet_length:
+ return False
+ if self.fragment_offset != other.fragment_offset:
+ return False
+ if sorted(self.icmp_type) != sorted(other.icmp_type):
+ return False
+ if sorted(self.ether_type) != sorted(other.ether_type):
+ return False
+ if sorted(self.traffic_type) != sorted(other.traffic_type):
+ return False
+
+ # platform
+ if not (sorted(self.platform) == sorted(other.platform) and
+ sorted(self.platform_exclude) == sorted(other.platform_exclude)):
+ return False
+
+ # timeout
+ if self.timeout != other.timeout:
+ return False
+
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def FlattenAll(self):
+ """Reduce source, dest, and address fields to their post-exclude state.
+
+ Populates the self.flattened_addr, self.flattened_saddr,
+ self.flattened_daddr by removing excludes from includes.
+ """
+ # No excludes, set flattened attributes and move along.
+ self.flattened = True
+ if not (self.source_address_exclude or self.destination_address_exclude or
+ self.address_exclude):
+ self.flattened_saddr = self.source_address
+ self.flattened_daddr = self.destination_address
+ self.flattened_addr = self.address
+ return
+
+ if self.source_address_exclude:
+ self.flattened_saddr = self._FlattenAddresses(
+ self.source_address, self.source_address_exclude)
+ if self.destination_address_exclude:
+ self.flattened_daddr = self._FlattenAddresses(
+ self.destination_address, self.destination_address_exclude)
+ if self.address_exclude:
+ self.flattened_addr = self._FlattenAddresses(
+ self.address, self.address_exclude)
+
+
+ @staticmethod
+ def _FlattenAddresses(include, exclude):
+ """Reduce an include and exclude list to a single include list.
+
+ Using recursion, whittle away exclude addresses from address include
+ addresses which contain the exclusion.
+
+ Args:
+ include: list of include addresses.
+ exclude: list of exclude addresses.
+ Returns:
+ a single flattened list of nacaddr objects.
+ """
+ if not exclude:
+ return include
+
+ for index, in_addr in enumerate(include):
+ for ex_addr in exclude:
+ if ex_addr in in_addr:
+ reduced_list = in_addr.address_exclude(ex_addr)
+ include.pop(index)
+ include.extend(
+ Term._FlattenAddresses(reduced_list, exclude[1:]))
+ return include
+
+ def GetAddressOfVersion(self, addr_type, af=None):
+ """Returns addresses of the appropriate Address Family.
+
+ Args:
+ addr_type: string, this will be either
+ 'source_address', 'source_address_exclude',
+ 'destination_address' or 'destination_address_exclude'
+ af: int or None, either Term.INET4 or Term.INET6
+
+ Returns:
+ list of addresses of the correct family.
+ """
+ if not af:
+ return eval('self.' + addr_type)
+
+ return filter(lambda x: x.version == af, eval('self.' + addr_type))
+
+ def AddObject(self, obj):
+ """Add an object of unknown type to this term.
+
+ Args:
+ obj: single or list of either
+ [Address, Port, Option, Protocol, Counter, Action, Comment, Expiration]
+
+ Raises:
+ InvalidTermActionError: if the action defined isn't an accepted action.
+ eg, action:: godofoobar
+ TermObjectTypeError: if AddObject is called with an object it doesn't
+ understand.
+ InvalidTermLoggingError: when a option is set for logging not known.
+ """
+ if type(obj) is list:
+ for x in obj:
+ # do we have a list of addresses?
+ # expanded address fields consolidate naked address fields with
+ # saddr/daddr.
+ if x.var_type is VarType.SADDRESS:
+ saddr = DEFINITIONS.GetNetAddr(x.value)
+ self.source_address.extend(saddr)
+ elif x.var_type is VarType.DADDRESS:
+ daddr = DEFINITIONS.GetNetAddr(x.value)
+ self.destination_address.extend(daddr)
+ elif x.var_type is VarType.ADDRESS:
+ addr = DEFINITIONS.GetNetAddr(x.value)
+ self.address.extend(addr)
+ # do we have address excludes?
+ elif x.var_type is VarType.SADDREXCLUDE:
+ saddr_exclude = DEFINITIONS.GetNetAddr(x.value)
+ self.source_address_exclude.extend(saddr_exclude)
+ elif x.var_type is VarType.DADDREXCLUDE:
+ daddr_exclude = DEFINITIONS.GetNetAddr(x.value)
+ self.destination_address_exclude.extend(daddr_exclude)
+ elif x.var_type is VarType.ADDREXCLUDE:
+ addr_exclude = DEFINITIONS.GetNetAddr(x.value)
+ self.address_exclude.extend(addr_exclude)
+ # do we have a list of ports?
+ elif x.var_type is VarType.PORT:
+ self.port.append(x.value)
+ elif x.var_type is VarType.SPORT:
+ self.source_port.append(x.value)
+ elif x.var_type is VarType.DPORT:
+ self.destination_port.append(x.value)
+ # do we have a list of protocols?
+ elif x.var_type is VarType.PROTOCOL:
+ self.protocol.append(x.value)
+ # do we have a list of protocol-exceptions?
+ elif x.var_type is VarType.PROTOCOL_EXCEPT:
+ self.protocol_except.append(x.value)
+ # do we have a list of options?
+ elif x.var_type is VarType.OPTION:
+ self.option.append(x.value)
+ elif x.var_type is VarType.PRINCIPALS:
+ self.principals.append(x.value)
+ elif x.var_type is VarType.SPFX:
+ self.source_prefix.append(x.value)
+ elif x.var_type is VarType.DPFX:
+ self.destination_prefix.append(x.value)
+ elif x.var_type is VarType.ETHER_TYPE:
+ self.ether_type.append(x.value)
+ elif x.var_type is VarType.TRAFFIC_TYPE:
+ self.traffic_type.append(x.value)
+ elif x.var_type is VarType.PRECEDENCE:
+ self.precedence.append(x.value)
+ elif x.var_type is VarType.PLATFORM:
+ self.platform.append(x.value)
+ elif x.var_type is VarType.PLATFORMEXCLUDE:
+ self.platform_exclude.append(x.value)
+ else:
+ raise TermObjectTypeError(
+ '%s isn\'t a type I know how to deal with (contains \'%s\')' % (
+ type(x), x.value))
+ else:
+ # stupid no switch statement in python
+ if obj.var_type is VarType.COMMENT:
+ self.comment.append(str(obj))
+ elif obj.var_type is VarType.OWNER:
+ self.owner = obj.value
+ elif obj.var_type is VarType.EXPIRATION:
+ self.expiration = obj.value
+ elif obj.var_type is VarType.LOSS_PRIORITY:
+ self.loss_priority = obj.value
+ elif obj.var_type is VarType.ROUTING_INSTANCE:
+ self.routing_instance = obj.value
+ elif obj.var_type is VarType.PRECEDENCE:
+ self.precedence = obj.value
+ elif obj.var_type is VarType.VERBATIM:
+ self.verbatim.append(obj)
+ elif obj.var_type is VarType.ACTION:
+ if str(obj) not in _ACTIONS:
+ raise InvalidTermActionError('%s is not a valid action' % obj)
+ self.action.append(obj.value)
+ elif obj.var_type is VarType.COUNTER:
+ self.counter = obj
+ elif obj.var_type is VarType.ICMP_TYPE:
+ self.icmp_type.extend(obj.value)
+ elif obj.var_type is VarType.LOGGING:
+ if str(obj) not in _LOGGING:
+ raise InvalidTermLoggingError('%s is not a valid logging option' %
+ obj)
+ self.logging.append(obj)
+ # police man, tryin'a take you jail
+ elif obj.var_type is VarType.POLICER:
+ self.policer = obj.value
+ # qos?
+ elif obj.var_type is VarType.QOS:
+ self.qos = obj.value
+ elif obj.var_type is VarType.PACKET_LEN:
+ self.packet_length = obj.value
+ elif obj.var_type is VarType.FRAGMENT_OFFSET:
+ self.fragment_offset = obj.value
+ elif obj.var_type is VarType.SINTERFACE:
+ self.source_interface = obj.value
+ elif obj.var_type is VarType.DINTERFACE:
+ self.destination_interface = obj.value
+ elif obj.var_type is VarType.TIMEOUT:
+ self.timeout = obj.value
+ else:
+ raise TermObjectTypeError(
+ '%s isn\'t a type I know how to deal with' % (type(obj)))
+
+ def SanityCheck(self):
+ """Sanity check the definition of the term.
+
+ Raises:
+ ParseError: if term has both verbatim and non-verbatim tokens
+ TermInvalidIcmpType: if term has invalid icmp-types specified
+ TermNoActionError: if the term doesn't have an action defined.
+ TermPortProtocolError: if the term has a service/protocol definition pair
+ which don't match up, eg. SNMP and tcp
+ TermAddressExclusionError: if one of the *-exclude directives is defined,
+ but that address isn't contained in the non *-exclude directive. eg:
+ source-address::CORP_INTERNAL source-exclude:: LOCALHOST
+ TermProtocolEtherTypeError: if the term has both ether-type and
+ upper-layer protocol restrictions
+ InvalidTermActionError: action and routing-instance both defined
+
+ This should be called when the term is fully formed, and
+ all of the options are set.
+
+ """
+ if self.verbatim:
+ if (self.action or self.source_port or self.destination_port or
+ self.port or self.protocol or self.option):
+ raise ParseError(
+ 'term "%s" has both verbatim and non-verbatim tokens.' % self.name)
+ else:
+ if not self.action and not self.routing_instance:
+ raise TermNoActionError('no action specified for term %s' % self.name)
+ # have we specified a port with a protocol that doesn't support ports?
+ if self.source_port or self.destination_port or self.port:
+ if 'tcp' not in self.protocol and 'udp' not in self.protocol:
+ raise TermPortProtocolError(
+ 'ports specified with a protocol that doesn\'t support ports. '
+ 'Term: %s ' % self.name)
+ # TODO(pmoody): do we have mutually exclusive options?
+ # eg. tcp-established + tcp-initial?
+
+ if self.ether_type and (
+ self.protocol or
+ self.address or
+ self.destination_address or
+ self.destination_address_exclude or
+ self.destination_port or
+ self.destination_prefix or
+ self.source_address or
+ self.source_address_exclude or
+ self.source_port or
+ self.source_prefix):
+ raise TermProtocolEtherTypeError(
+ 'ether-type not supported when used with upper-layer protocol '
+ 'restrictions. Term: %s' % self.name)
+ # validate icmp-types if specified, but addr_family will have to be checked
+ # in the generators as policy module doesn't know about that at this point.
+ if self.icmp_type:
+ for icmptype in self.icmp_type:
+ if (icmptype not in self.ICMP_TYPE[4] and icmptype not in
+ self.ICMP_TYPE[6]):
+ raise TermInvalidIcmpType('Term %s contains an invalid icmp-type:'
+ '%s' % (self.name, icmptype))
+
+ def AddressCleanup(self, optimize=True):
+ """Do Address and Port collapsing.
+
+ Notes:
+ Collapses both the address definitions and the port definitions
+ to their smallest possible length.
+
+ Args:
+ optimize: boolean value indicating whether to optimize addresses
+ """
+ if optimize:
+ cleanup = nacaddr.CollapseAddrList
+ else:
+ cleanup = nacaddr.SortAddrList
+
+ # address collapsing.
+ if self.address:
+ self.address = cleanup(self.address)
+ if self.source_address:
+ self.source_address = cleanup(self.source_address)
+ if self.source_address_exclude:
+ self.source_address_exclude = cleanup(self.source_address_exclude)
+ if self.destination_address:
+ self.destination_address = cleanup(self.destination_address)
+ if self.destination_address_exclude:
+ self.destination_address_exclude = cleanup(
+ self.destination_address_exclude)
+
+ # port collapsing.
+ if self.port:
+ self.port = self.CollapsePortList(self.port)
+ if self.source_port:
+ self.source_port = self.CollapsePortList(self.source_port)
+ if self.destination_port:
+ self.destination_port = self.CollapsePortList(self.destination_port)
+
+ def CollapsePortListRecursive(self, ports):
+ """Given a sorted list of ports, collapse to the smallest required list.
+
+ Args:
+ ports: sorted list of port tuples
+
+ Returns:
+ ret_ports: collapsed list of ports
+ """
+ optimized = False
+ ret_ports = []
+ for port in ports:
+ if not ret_ports:
+ ret_ports.append(port)
+ # we should be able to count on ret_ports[-1][0] <= port[0]
+ elif ret_ports[-1][1] >= port[1]:
+ # (10, 20) and (12, 13) -> (10, 20)
+ optimized = True
+ elif port[0] < ret_ports[-1][1] < port[1]:
+ # (10, 20) and (15, 30) -> (10, 30)
+ ret_ports[-1] = (ret_ports[-1][0], port[1])
+ optimized = True
+ elif ret_ports[-1][1] + 1 == port[0]:
+ # (10, 20) and (21, 30) -> (10, 30)
+ ret_ports[-1] = (ret_ports[-1][0], port[1])
+ optimized = True
+ else:
+ # (10, 20) and (22, 30) -> (10, 20), (22, 30)
+ ret_ports.append(port)
+
+ if optimized:
+ return self.CollapsePortListRecursive(ret_ports)
+ return ret_ports
+
+ def CollapsePortList(self, ports):
+ """Given a list of ports, Collapse to the smallest required.
+
+ Args:
+ ports: a list of port strings eg: [(80,80), (53,53) (2000, 2009),
+ (1024,65535)]
+
+ Returns:
+ ret_array: the collapsed sorted list of ports, eg: [(53,53), (80,80),
+ (1024,65535)]
+ """
+ return self.CollapsePortListRecursive(sorted(ports))
+
+ def CheckPrincipalsContained(self, superset, subset):
+ """Check to if the given list of principals is wholly contained.
+
+ Args:
+ superset: list of principals
+ subset: list of principals
+
+ Returns:
+ bool: True if subset is contained in superset. false otherwise.
+ """
+ # Skip set comparison if neither term has principals.
+ if not superset and not subset:
+ return True
+
+ # Convert these lists to sets to use set comparison.
+ sup = set(superset)
+ sub = set(subset)
+ return sub.issubset(sup)
+
+ def CheckProtocolIsContained(self, superset, subset):
+ """Check if the given list of protocols is wholly contained.
+
+ Args:
+ superset: list of protocols
+ subset: list of protocols
+
+ Returns:
+ bool: True if subset is contained in superset. false otherwise.
+ """
+ if not superset:
+ return True
+ if not subset:
+ return False
+
+ # Convert these lists to sets to use set comparison.
+ sup = set(superset)
+ sub = set(subset)
+ return sub.issubset(sup)
+
+ def CheckPortIsContained(self, superset, subset):
+ """Check if the given list of ports is wholly contained.
+
+ Args:
+ superset: list of port tuples
+ subset: list of port tuples
+
+ Returns:
+ bool: True if subset is contained in superset, false otherwise
+ """
+ if not superset:
+ return True
+ if not subset:
+ return False
+
+ for sub_port in subset:
+ not_contains = True
+ for sup_port in superset:
+ if (int(sub_port[0]) >= int(sup_port[0])
+ and int(sub_port[1]) <= int(sup_port[1])):
+ not_contains = False
+ break
+ if not_contains:
+ return False
+ return True
+
+ def CheckAddressIsContained(self, superset, subset):
+ """Check if subset is wholey contained by superset.
+
+ Args:
+ superset: list of the superset addresses
+ subset: list of the subset addresses
+
+ Returns:
+ True or False.
+ """
+ if not superset:
+ return True
+ if not subset:
+ return False
+
+ for sub_addr in subset:
+ sub_contained = False
+ for sup_addr in superset:
+ # ipaddr ensures that version numbers match for inclusion.
+ if sub_addr in sup_addr:
+ sub_contained = True
+ break
+ if not sub_contained:
+ return False
+ return True
+
+
+class VarType(object):
+ """Generic object meant to store lots of basic policy types."""
+
+ COMMENT = 0
+ COUNTER = 1
+ ACTION = 2
+ SADDRESS = 3
+ DADDRESS = 4
+ ADDRESS = 5
+ SPORT = 6
+ DPORT = 7
+ PROTOCOL_EXCEPT = 8
+ OPTION = 9
+ PROTOCOL = 10
+ SADDREXCLUDE = 11
+ DADDREXCLUDE = 12
+ LOGGING = 13
+ QOS = 14
+ POLICER = 15
+ PACKET_LEN = 16
+ FRAGMENT_OFFSET = 17
+ ICMP_TYPE = 18
+ SPFX = 19
+ DPFX = 20
+ ETHER_TYPE = 21
+ TRAFFIC_TYPE = 22
+ VERBATIM = 23
+ LOSS_PRIORITY = 24
+ ROUTING_INSTANCE = 25
+ PRECEDENCE = 26
+ SINTERFACE = 27
+ EXPIRATION = 28
+ DINTERFACE = 29
+ PLATFORM = 30
+ PLATFORMEXCLUDE = 31
+ PORT = 32
+ TIMEOUT = 33
+ OWNER = 34
+ PRINCIPALS = 35
+ ADDREXCLUDE = 36
+
+ def __init__(self, var_type, value):
+ self.var_type = var_type
+ if self.var_type == self.COMMENT:
+ # remove the double quotes
+ comment = value.strip('"')
+ # make all of the lines start w/o leading whitespace.
+ self.value = '\n'.join([x.lstrip() for x in comment.splitlines()])
+ else:
+ self.value = value
+
+ def __str__(self):
+ return self.value
+
+ def __eq__(self, other):
+ return self.var_type == other.var_type and self.value == other.value
+
+
+class Header(object):
+ """The header of the policy file contains the targets and a global comment."""
+
+ def __init__(self):
+ self.target = []
+ self.comment = []
+
+ def AddObject(self, obj):
+ """Add and object to the Header.
+
+ Args:
+ obj: of type VarType.COMMENT or Target
+ """
+ if type(obj) == Target:
+ self.target.append(obj)
+ elif obj.var_type == VarType.COMMENT:
+ self.comment.append(str(obj))
+
+ @property
+ def platforms(self):
+ """The platform targets of this particular header."""
+ return map(lambda x: x.platform, self.target)
+
+ def FilterOptions(self, platform):
+ """Given a platform return the options.
+
+ Args:
+ platform: string
+
+ Returns:
+ list or None
+ """
+ for target in self.target:
+ if target.platform == platform:
+ return target.options
+ return []
+
+ def FilterName(self, platform):
+ """Given a filter_type, return the filter name.
+
+ Args:
+ platform: string
+
+ Returns:
+ filter_name: string or None
+
+ Notes:
+ !! Deprecated in favor of Header.FilterOptions(platform) !!
+ """
+ for target in self.target:
+ if target.platform == platform:
+ if target.options:
+ return target.options[0]
+ return None
+
+
+# This could be a VarType object, but I'm keeping it as it's class
+# b/c we're almost certainly going to have to do something more exotic with
+# it shortly to account for various rendering options like default iptables
+# policies or output file names, etc. etc.
+class Target(object):
+ """The type of acl to be rendered from this policy file."""
+
+ def __init__(self, target):
+ self.platform = target[0]
+ if len(target) > 1:
+ self.options = target[1:]
+ else:
+ self.options = None
+
+ def __str__(self):
+ return self.platform
+
+ def __eq__(self, other):
+ return self.platform == other.platform and self.options == other.options
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+# Lexing/Parsing starts here
+tokens = (
+ 'ACTION',
+ 'ADDR',
+ 'ADDREXCLUDE',
+ 'COMMENT',
+ 'COUNTER',
+ 'DADDR',
+ 'DADDREXCLUDE',
+ 'DPFX',
+ 'DPORT',
+ 'DINTERFACE',
+ 'DQUOTEDSTRING',
+ 'ETHER_TYPE',
+ 'EXPIRATION',
+ 'FRAGMENT_OFFSET',
+ 'HEADER',
+ 'ICMP_TYPE',
+ 'INTEGER',
+ 'LOGGING',
+ 'LOSS_PRIORITY',
+ 'OPTION',
+ 'OWNER',
+ 'PACKET_LEN',
+ 'PLATFORM',
+ 'PLATFORMEXCLUDE',
+ 'POLICER',
+ 'PORT',
+ 'PRECEDENCE',
+ 'PRINCIPALS',
+ 'PROTOCOL',
+ 'PROTOCOL_EXCEPT',
+ 'QOS',
+ 'ROUTING_INSTANCE',
+ 'SADDR',
+ 'SADDREXCLUDE',
+ 'SINTERFACE',
+ 'SPFX',
+ 'SPORT',
+ 'STRING',
+ 'TARGET',
+ 'TERM',
+ 'TIMEOUT',
+ 'TRAFFIC_TYPE',
+ 'VERBATIM',
+)
+
+literals = r':{},-'
+t_ignore = ' \t'
+
+reserved = {
+ 'action': 'ACTION',
+ 'address': 'ADDR',
+ 'address-exclude': 'ADDREXCLUDE',
+ 'comment': 'COMMENT',
+ 'counter': 'COUNTER',
+ 'destination-address': 'DADDR',
+ 'destination-exclude': 'DADDREXCLUDE',
+ 'destination-interface': 'DINTERFACE',
+ 'destination-prefix': 'DPFX',
+ 'destination-port': 'DPORT',
+ 'ether-type': 'ETHER_TYPE',
+ 'expiration': 'EXPIRATION',
+ 'fragment-offset': 'FRAGMENT_OFFSET',
+ 'header': 'HEADER',
+ 'icmp-type': 'ICMP_TYPE',
+ 'logging': 'LOGGING',
+ 'loss-priority': 'LOSS_PRIORITY',
+ 'option': 'OPTION',
+ 'owner': 'OWNER',
+ 'packet-length': 'PACKET_LEN',
+ 'platform': 'PLATFORM',
+ 'platform-exclude': 'PLATFORMEXCLUDE',
+ 'policer': 'POLICER',
+ 'port': 'PORT',
+ 'precedence': 'PRECEDENCE',
+ 'principals': 'PRINCIPALS',
+ 'protocol': 'PROTOCOL',
+ 'protocol-except': 'PROTOCOL_EXCEPT',
+ 'qos': 'QOS',
+ 'routing-instance': 'ROUTING_INSTANCE',
+ 'source-address': 'SADDR',
+ 'source-exclude': 'SADDREXCLUDE',
+ 'source-interface': 'SINTERFACE',
+ 'source-prefix': 'SPFX',
+ 'source-port': 'SPORT',
+ 'target': 'TARGET',
+ 'term': 'TERM',
+ 'timeout': 'TIMEOUT',
+ 'traffic-type': 'TRAFFIC_TYPE',
+ 'verbatim': 'VERBATIM',
+}
+
+
+# disable linting warnings for lexx/yacc code
+# pylint: disable-msg=W0613,C6102,C6104,C6105,C6108,C6409
+
+
+def t_IGNORE_COMMENT(t):
+ r'\#.*'
+ pass
+
+
+def t_DQUOTEDSTRING(t):
+ r'"[^"]*?"'
+ t.lexer.lineno += str(t.value).count('\n')
+ return t
+
+
+def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += len(t.value)
+
+
+def t_error(t):
+ print "Illegal character '%s' on line %s" % (t.value[0], t.lineno)
+ t.lexer.skip(1)
+
+
+def t_INTEGER(t):
+ r'\d+'
+ return t
+
+
+def t_STRING(t):
+ r'\w+([-_+.@]\w*)*'
+ # we have an identifier; let's check if it's a keyword or just a string.
+ t.type = reserved.get(t.value, 'STRING')
+ return t
+
+
+###
+## parser starts here
+###
+def p_target(p):
+ """ target : target header terms
+ | """
+ if len(p) > 1:
+ if type(p[1]) is Policy:
+ p[1].AddFilter(p[2], p[3])
+ p[0] = p[1]
+ else:
+ p[0] = Policy(p[2], p[3])
+
+
+def p_header(p):
+ """ header : HEADER '{' header_spec '}' """
+ p[0] = p[3]
+
+
+def p_header_spec(p):
+ """ header_spec : header_spec target_spec
+ | header_spec comment_spec
+ | """
+ if len(p) > 1:
+ if type(p[1]) == Header:
+ p[1].AddObject(p[2])
+ p[0] = p[1]
+ else:
+ p[0] = Header()
+ p[0].AddObject(p[2])
+
+
+# we may want to change this at some point if we want to be clever with things
+# like being able to set a default input/output policy for iptables policies.
+def p_target_spec(p):
+ """ target_spec : TARGET ':' ':' strings_or_ints """
+ p[0] = Target(p[4])
+
+
+def p_terms(p):
+ """ terms : terms TERM STRING '{' term_spec '}'
+ | """
+ if len(p) > 1:
+ p[5].name = p[3]
+ if type(p[1]) == list:
+ p[1].append(p[5])
+ p[0] = p[1]
+ else:
+ p[0] = [p[5]]
+
+
+def p_term_spec(p):
+ """ term_spec : term_spec action_spec
+ | term_spec addr_spec
+ | term_spec comment_spec
+ | term_spec counter_spec
+ | term_spec ether_type_spec
+ | term_spec exclude_spec
+ | term_spec expiration_spec
+ | term_spec fragment_offset_spec
+ | term_spec icmp_type_spec
+ | term_spec interface_spec
+ | term_spec logging_spec
+ | term_spec losspriority_spec
+ | term_spec option_spec
+ | term_spec owner_spec
+ | term_spec packet_length_spec
+ | term_spec platform_spec
+ | term_spec policer_spec
+ | term_spec port_spec
+ | term_spec precedence_spec
+ | term_spec principals_spec
+ | term_spec prefix_list_spec
+ | term_spec protocol_spec
+ | term_spec qos_spec
+ | term_spec routinginstance_spec
+ | term_spec timeout_spec
+ | term_spec traffic_type_spec
+ | term_spec verbatim_spec
+ | """
+ if len(p) > 1:
+ if type(p[1]) == Term:
+ p[1].AddObject(p[2])
+ p[0] = p[1]
+ else:
+ p[0] = Term(p[2])
+
+
+def p_routinginstance_spec(p):
+ """ routinginstance_spec : ROUTING_INSTANCE ':' ':' STRING """
+ p[0] = VarType(VarType.ROUTING_INSTANCE, p[4])
+
+
+def p_losspriority_spec(p):
+ """ losspriority_spec : LOSS_PRIORITY ':' ':' STRING """
+ p[0] = VarType(VarType.LOSS_PRIORITY, p[4])
+
+
+def p_precedence_spec(p):
+ """ precedence_spec : PRECEDENCE ':' ':' one_or_more_ints """
+ p[0] = VarType(VarType.PRECEDENCE, p[4])
+
+
+def p_icmp_type_spec(p):
+ """ icmp_type_spec : ICMP_TYPE ':' ':' one_or_more_strings """
+ p[0] = VarType(VarType.ICMP_TYPE, p[4])
+
+
+def p_packet_length_spec(p):
+ """ packet_length_spec : PACKET_LEN ':' ':' INTEGER
+ | PACKET_LEN ':' ':' INTEGER '-' INTEGER """
+ if len(p) == 4:
+ p[0] = VarType(VarType.PACKET_LEN, str(p[4]))
+ else:
+ p[0] = VarType(VarType.PACKET_LEN, str(p[4]) + '-' + str(p[6]))
+
+
+def p_fragment_offset_spec(p):
+ """ fragment_offset_spec : FRAGMENT_OFFSET ':' ':' INTEGER
+ | FRAGMENT_OFFSET ':' ':' INTEGER '-' INTEGER """
+ if len(p) == 4:
+ p[0] = VarType(VarType.FRAGMENT_OFFSET, str(p[4]))
+ else:
+ p[0] = VarType(VarType.FRAGMENT_OFFSET, str(p[4]) + '-' + str(p[6]))
+
+
+def p_exclude_spec(p):
+ """ exclude_spec : SADDREXCLUDE ':' ':' one_or_more_strings
+ | DADDREXCLUDE ':' ':' one_or_more_strings
+ | ADDREXCLUDE ':' ':' one_or_more_strings
+ | PROTOCOL_EXCEPT ':' ':' one_or_more_strings """
+
+ p[0] = []
+ for ex in p[4]:
+ if p[1].find('source-exclude') >= 0:
+ p[0].append(VarType(VarType.SADDREXCLUDE, ex))
+ elif p[1].find('destination-exclude') >= 0:
+ p[0].append(VarType(VarType.DADDREXCLUDE, ex))
+ elif p[1].find('address-exclude') >= 0:
+ p[0].append(VarType(VarType.ADDREXCLUDE, ex))
+ elif p[1].find('protocol-except') >= 0:
+ p[0].append(VarType(VarType.PROTOCOL_EXCEPT, ex))
+
+
+def p_prefix_list_spec(p):
+ """ prefix_list_spec : DPFX ':' ':' one_or_more_strings
+ | SPFX ':' ':' one_or_more_strings """
+ p[0] = []
+ for pfx in p[4]:
+ if p[1].find('source-prefix') >= 0:
+ p[0].append(VarType(VarType.SPFX, pfx))
+ elif p[1].find('destination-prefix') >= 0:
+ p[0].append(VarType(VarType.DPFX, pfx))
+
+
+def p_addr_spec(p):
+ """ addr_spec : SADDR ':' ':' one_or_more_strings
+ | DADDR ':' ':' one_or_more_strings
+ | ADDR ':' ':' one_or_more_strings """
+ p[0] = []
+ for addr in p[4]:
+ if p[1].find('source-address') >= 0:
+ p[0].append(VarType(VarType.SADDRESS, addr))
+ elif p[1].find('destination-address') >= 0:
+ p[0].append(VarType(VarType.DADDRESS, addr))
+ else:
+ p[0].append(VarType(VarType.ADDRESS, addr))
+
+
+def p_port_spec(p):
+ """ port_spec : SPORT ':' ':' one_or_more_strings
+ | DPORT ':' ':' one_or_more_strings
+ | PORT ':' ':' one_or_more_strings """
+ p[0] = []
+ for port in p[4]:
+ if p[1].find('source-port') >= 0:
+ p[0].append(VarType(VarType.SPORT, port))
+ elif p[1].find('destination-port') >= 0:
+ p[0].append(VarType(VarType.DPORT, port))
+ else:
+ p[0].append(VarType(VarType.PORT, port))
+
+
+def p_protocol_spec(p):
+ """ protocol_spec : PROTOCOL ':' ':' strings_or_ints """
+ p[0] = []
+ for proto in p[4]:
+ p[0].append(VarType(VarType.PROTOCOL, proto))
+
+
+def p_ether_type_spec(p):
+ """ ether_type_spec : ETHER_TYPE ':' ':' one_or_more_strings """
+ p[0] = []
+ for proto in p[4]:
+ p[0].append(VarType(VarType.ETHER_TYPE, proto))
+
+
+def p_traffic_type_spec(p):
+ """ traffic_type_spec : TRAFFIC_TYPE ':' ':' one_or_more_strings """
+ p[0] = []
+ for proto in p[4]:
+ p[0].append(VarType(VarType.TRAFFIC_TYPE, proto))
+
+
+def p_policer_spec(p):
+ """ policer_spec : POLICER ':' ':' STRING """
+ p[0] = VarType(VarType.POLICER, p[4])
+
+
+def p_logging_spec(p):
+ """ logging_spec : LOGGING ':' ':' STRING """
+ p[0] = VarType(VarType.LOGGING, p[4])
+
+
+def p_option_spec(p):
+ """ option_spec : OPTION ':' ':' one_or_more_strings """
+ p[0] = []
+ for opt in p[4]:
+ p[0].append(VarType(VarType.OPTION, opt))
+
+def p_principals_spec(p):
+ """ principals_spec : PRINCIPALS ':' ':' one_or_more_strings """
+ p[0] = []
+ for opt in p[4]:
+ p[0].append(VarType(VarType.PRINCIPALS, opt))
+
+def p_action_spec(p):
+ """ action_spec : ACTION ':' ':' STRING """
+ p[0] = VarType(VarType.ACTION, p[4])
+
+
+def p_counter_spec(p):
+ """ counter_spec : COUNTER ':' ':' STRING """
+ p[0] = VarType(VarType.COUNTER, p[4])
+
+
+def p_expiration_spec(p):
+ """ expiration_spec : EXPIRATION ':' ':' INTEGER '-' INTEGER '-' INTEGER """
+ p[0] = VarType(VarType.EXPIRATION, datetime.date(int(p[4]),
+ int(p[6]),
+ int(p[8])))
+
+
+def p_comment_spec(p):
+ """ comment_spec : COMMENT ':' ':' DQUOTEDSTRING """
+ p[0] = VarType(VarType.COMMENT, p[4])
+
+
+def p_owner_spec(p):
+ """ owner_spec : OWNER ':' ':' STRING """
+ p[0] = VarType(VarType.OWNER, p[4])
+
+
+def p_verbatim_spec(p):
+ """ verbatim_spec : VERBATIM ':' ':' STRING DQUOTEDSTRING """
+ p[0] = VarType(VarType.VERBATIM, [p[4], p[5].strip('"')])
+
+
+def p_qos_spec(p):
+ """ qos_spec : QOS ':' ':' STRING """
+ p[0] = VarType(VarType.QOS, p[4])
+
+
+def p_interface_spec(p):
+ """ interface_spec : SINTERFACE ':' ':' STRING
+ | DINTERFACE ':' ':' STRING """
+ if p[1].find('source-interface') >= 0:
+ p[0] = VarType(VarType.SINTERFACE, p[4])
+ elif p[1].find('destination-interface') >= 0:
+ p[0] = VarType(VarType.DINTERFACE, p[4])
+
+
+def p_platform_spec(p):
+ """ platform_spec : PLATFORM ':' ':' one_or_more_strings
+ | PLATFORMEXCLUDE ':' ':' one_or_more_strings """
+ p[0] = []
+ for platform in p[4]:
+ if p[1].find('platform-exclude') >= 0:
+ p[0].append(VarType(VarType.PLATFORMEXCLUDE, platform))
+ elif p[1].find('platform') >= 0:
+ p[0].append(VarType(VarType.PLATFORM, platform))
+
+
+def p_timeout_spec(p):
+ """ timeout_spec : TIMEOUT ':' ':' INTEGER """
+ p[0] = VarType(VarType.TIMEOUT, p[4])
+
+
+def p_one_or_more_strings(p):
+ """ one_or_more_strings : one_or_more_strings STRING
+ | STRING
+ | """
+ if len(p) > 1:
+ if type(p[1]) == type([]):
+ p[1].append(p[2])
+ p[0] = p[1]
+ else:
+ p[0] = [p[1]]
+
+
+def p_one_or_more_ints(p):
+ """ one_or_more_ints : one_or_more_ints INTEGER
+ | INTEGER
+ | """
+ if len(p) > 1:
+ if type(p[1]) == type([]):
+ p[1].append(p[2])
+ p[0] = p[1]
+ else:
+ p[0] = [p[1]]
+
+
+def p_strings_or_ints(p):
+ """ strings_or_ints : strings_or_ints STRING
+ | strings_or_ints INTEGER
+ | STRING
+ | INTEGER
+ | """
+ if len(p) > 1:
+ if type(p[1]) is list:
+ p[1].append(p[2])
+ p[0] = p[1]
+ else:
+ p[0] = [p[1]]
+
+
+def p_error(p):
+ """."""
+ next_token = yacc.token()
+ if next_token is None:
+ use_token = 'EOF'
+ else:
+ use_token = repr(next_token.value)
+
+ if p:
+ raise ParseError(' ERROR on "%s" (type %s, line %d, Next %s)'
+ % (p.value, p.type, p.lineno, use_token))
+ else:
+ raise ParseError(' ERROR you likely have unablanaced "{"\'s')
+
+# pylint: enable-msg=W0613,C6102,C6104,C6105,C6108,C6409
+
+
+def _ReadFile(filename):
+ """Read data from a file if it exists.
+
+ Args:
+ filename: str - Filename
+
+ Returns:
+ data: str contents of file.
+
+ Raises:
+ FileNotFoundError: if requested file does not exist.
+ FileReadError: Any error resulting from trying to open/read file.
+ """
+ if os.path.exists(filename):
+ try:
+ data = open(filename, 'r').read()
+ return data
+ except IOError:
+ raise FileReadError('Unable to open or read file %s' % filename)
+ else:
+ raise FileNotFoundError('Unable to open policy file %s' % filename)
+
+
+def _Preprocess(data, max_depth=5, base_dir=''):
+ """Search input for include statements and import specified include file.
+
+ Search input for include statements and if found, import specified file
+ and recursively search included data for includes as well up to max_depth.
+
+ Args:
+ data: A string of Policy file data.
+ max_depth: Maximum depth of included files
+ base_dir: Base path string where to look for policy or include files
+
+ Returns:
+ A string containing result of the processed input data
+
+ Raises:
+ RecursionTooDeepError: nested include files exceed maximum
+ """
+ if not max_depth:
+ raise RecursionTooDeepError('%s' % (
+ 'Included files exceed maximum recursion depth of %s.' % max_depth))
+ rval = []
+ lines = [x.rstrip() for x in data.splitlines()]
+ for index, line in enumerate(lines):
+ words = line.split()
+ if len(words) > 1 and words[0] == '#include':
+ # remove any quotes around included filename
+ include_file = words[1].strip('\'"')
+ data = _ReadFile(os.path.join(base_dir, include_file))
+ # recursively handle includes in included data
+ inc_data = _Preprocess(data, max_depth - 1, base_dir=base_dir)
+ rval.extend(inc_data)
+ else:
+ rval.append(line)
+ return rval
+
+
+def ParseFile(filename, definitions=None, optimize=True, base_dir='',
+ shade_check=False):
+ """Parse the policy contained in file, optionally provide a naming object.
+
+ Read specified policy file and parse into a policy object.
+
+ Args:
+ filename: Name of policy file to parse.
+ definitions: optional naming library definitions object.
+ optimize: bool - whether to summarize networks and services.
+ base_dir: base path string to look for acls or include files.
+ shade_check: bool - whether to raise an exception when a term is shaded.
+
+ Returns:
+ policy object.
+ """
+ data = _ReadFile(filename)
+ p = ParsePolicy(data, definitions, optimize, base_dir=base_dir,
+ shade_check=shade_check)
+ return p
+
+
+def ParsePolicy(data, definitions=None, optimize=True, base_dir='',
+ shade_check=False):
+ """Parse the policy in 'data', optionally provide a naming object.
+
+ Parse a blob of policy text into a policy object.
+
+ Args:
+ data: a string blob of policy data to parse.
+ definitions: optional naming library definitions object.
+ optimize: bool - whether to summarize networks and services.
+ base_dir: base path string to look for acls or include files.
+ shade_check: bool - whether to raise an exception when a term is shaded.
+
+ Returns:
+ policy object.
+ """
+ try:
+ if definitions:
+ globals()['DEFINITIONS'] = definitions
+ else:
+ globals()['DEFINITIONS'] = naming.Naming(DEFAULT_DEFINITIONS)
+ if not optimize:
+ globals()['_OPTIMIZE'] = False
+ if shade_check:
+ globals()['_SHADE_CHECK'] = True
+
+ lexer = lex.lex()
+
+ preprocessed_data = '\n'.join(_Preprocess(data, base_dir=base_dir))
+ p = yacc.yacc(write_tables=False, debug=0, errorlog=yacc.NullLogger())
+
+ return p.parse(preprocessed_data, lexer=lexer)
+
+ except IndexError:
+ return False
+
+
+# if you call this from the command line, you can specify a jcl file for it to
+# read.
+if __name__ == '__main__':
+ ret = 0
+ if len(sys.argv) > 1:
+ try:
+ ret = ParsePolicy(open(sys.argv[1], 'r').read())
+ except IOError:
+ print('ERROR: \'%s\' either does not exist or is not readable' %
+ (sys.argv[1]))
+ ret = 1
+ else:
+ # default to reading stdin
+ ret = ParsePolicy(sys.stdin.read())
+ sys.exit(ret)
diff --git a/lib/policyreader.py b/lib/policyreader.py
new file mode 100644
index 0000000..8124221
--- /dev/null
+++ b/lib/policyreader.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python2.4
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Utility to provide exploration of policy definition files.
+
+Allows read only access of policy definition files. The library
+creates a Policy object, which has filters containing terms.
+
+This library does no expansion on the tokens directly, such as in policy.py.
+
+TODO: This library is currently incomplete, and does not allow access to
+ every argument of a policy term.
+"""
+
+__author__ = 'watson@google.com (Tony Watson)'
+
+from capirca import naming
+
+
+class FileOpenError(Exception):
+ """Trouble opening a file."""
+
+
+class Filter(object):
+ """Simple filter with a name a list of terms."""
+
+ def __init__(self, filtername=''):
+ self.name = filtername
+ self.term = []
+
+ def __str__(self):
+ rval = []
+ title = 'Filter: %s' % str(self.name)
+ rval.append('\n%s' % title)
+ rval.append('-' * len(title))
+ for term in self.term:
+ rval.append(str(term))
+ return '\n\n'.join(rval)
+
+
+class Term(object):
+ """Simple term with a name a list of attributes."""
+
+ def __init__(self, termname=''):
+ self.name = termname
+ self.source = []
+ self.destination = []
+ self.sport = []
+ self.dport = []
+ self.action = []
+ self.option = []
+ self.protocol = []
+
+ def __str__(self):
+ rval = []
+ rval.append(' Term: %s' % self.name)
+ rval.append(' Source-address:: %s' % ' '.join(self.source))
+ rval.append(' Destination-address:: %s' % ' '.join(self.destination))
+ rval.append(' Source-port:: %s' % ' '.join(self.sport))
+ rval.append(' Destination-port:: %s' % ' '.join(self.dport))
+ rval.append(' Protocol:: %s' % ' '.join(self.protocol))
+ rval.append(' Option:: %s' % ' '.join(self.option))
+ rval.append(' Action:: %s' % ' '.join(self.action))
+ return '\n'.join(rval)
+
+
+class Policy(object):
+ """Holds basic attributes of an unexpanded policy definition file."""
+
+ def __init__(self, filename, defs_data=None):
+ """Build policy object and naming definitions from provided filenames.
+
+ Args:
+ filename: location of a .pol file
+ defs_data: location of naming definitions directory, if any
+ """
+ self.defs = naming.Naming(defs_data)
+ self.filter = []
+ try:
+ self.data = open(filename, 'r').readlines()
+ except IOError, error_info:
+ info = str(filename) + ' cannot be opened'
+ raise FileOpenError('%s\n%s' % (info, error_info))
+
+ indent = 0
+ in_header = False
+ in_term = False
+ filt = Filter()
+ term = Term()
+ in_string = False
+
+ for line in self.data:
+ words = line.strip().split()
+ quotes = len(line.split('"')) + 1
+ if quotes % 2: # are we in or out of double quotes
+ in_string = not in_string # flip status of quote status
+ if not in_string:
+ if '{' in words:
+ indent += 1
+ if words:
+ if words[0] == 'header':
+ in_header = True
+ if words[0] == 'term':
+ in_term = True
+ term = Term(words[1])
+ if in_header and words[0] == 'target::':
+ if filt.name != words[2]: # avoid empty dupe filters due to
+ filt = Filter(words[2]) # multiple target header lines
+ if in_term:
+ if words[0] == 'source-address::':
+ term.source.extend(words[1:])
+ if words[0] == 'destination-address::':
+ term.destination.extend(words[1:])
+ if words[0] == 'source-port::':
+ term.sport.extend(words[1:])
+ if words[0] == 'destination-port::':
+ term.dport.extend(words[1:])
+ if words[0] == 'action::':
+ term.action.extend(words[1:])
+ if words[0] == 'protocol::':
+ term.protocol.extend(words[1:])
+ if words[0] == 'option::':
+ term.option.extend(words[1:])
+
+ if '}' in words:
+ indent -= 1
+ if in_header:
+ self.filter.append(filt)
+ in_header = False
+ if in_term:
+ filt.term.append(term)
+ in_term = False
+
+ def __str__(self):
+ return '\n'.join(str(next) for next in self.filter)
+
+ def Matches(self, src=None, dst=None, dport=None, sport=None,
+ filtername=None):
+ """Return list of term names that match specific attributes.
+
+ Args:
+ src: source ip address '12.1.1.1'
+ dst: destination ip address '10.1.1.1'
+ dport: any port/protocol combo, such as '80/tcp' or '53/udp'
+ sport: any port/protocol combo, such as '80/tcp' or '53/udp'
+ filtername: a filter name or None to search all filters
+
+ Returns:
+ results: list of lists, each list is index to filter & term in the policy
+
+ Example:
+ p=policyreader.Policy('policy_path', 'definitions_path')
+
+ p.Matches(dst='209.85.216.5', dport='25/tcp')
+ [[0, 26]]
+ print p.filter[0].term[26].name
+
+ for match in p.Matches(dst='209.85.216.5'):
+ print p.filter[match[0]].term[match[1]].name
+
+ """
+ rval = []
+ results = []
+ filter_list = []
+ dport_parents = None
+ sport_parents = None
+ destination_parents = None
+ source_parents = None
+ if dport:
+ dport_parents = self.defs.GetServiceParents(dport)
+ if sport:
+ sport_parents = self.defs.GetServiceParents(sport)
+ if dst:
+ destination_parents = self.defs.GetIpParents(dst)
+ try:
+ destination_parents.remove('ANY')
+ destination_parents.remove('RESERVED')
+ except ValueError:
+ pass # ignore and continue
+ if src:
+ source_parents = self.defs.GetIpParents(src)
+ try:
+ source_parents.remove('ANY')
+ source_parents.remove('RESERVED')
+ except ValueError:
+ pass # ignore and continue
+ if not filtername:
+ filter_list = self.filter
+ else:
+ for idx, next in enumerate(self.filter):
+ if filtername == next.name:
+ filter_list = [self.filter[idx]]
+ if not filter_list:
+ raise 'invalid filter name: %s' % filtername
+
+ for findex, xfilter in enumerate(filter_list):
+ mterms = []
+ mterms.append(set()) # dport
+ mterms.append(set()) # sport
+ mterms.append(set()) # dst
+ mterms.append(set()) # src
+ for tindex, term in enumerate(xfilter.term):
+ if dport_parents:
+ for token in dport_parents:
+ if token in term.dport:
+ mterms[0].add(tindex)
+ else:
+ mterms[0].add(tindex)
+ if sport_parents:
+ for token in sport_parents:
+ if token in term.sport:
+ mterms[1].add(tindex)
+ else:
+ mterms[1].add(tindex)
+ if destination_parents:
+ for token in destination_parents:
+ if token in term.destination:
+ mterms[2].add(tindex)
+ else:
+ mterms[2].add(tindex)
+ if source_parents:
+ for token in source_parents:
+ if token in term.source:
+ mterms[3].add(tindex)
+ else:
+ mterms[3].add(tindex)
+ rval.append(list(mterms[0] & mterms[1] & mterms[2] & mterms[3]))
+ for findex, fresult in enumerate(rval):
+ for next in list(fresult):
+ results.append([findex, next])
+ return results
diff --git a/lib/port.py b/lib/port.py
new file mode 100755
index 0000000..f28ac52
--- /dev/null
+++ b/lib/port.py
@@ -0,0 +1,55 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Common library for network ports and protocol handling."""
+
+__author__ = 'watson@google.com (Tony Watson)'
+
+
+class Error(Exception):
+ """Base error class."""
+
+
+class BadPortValue(Error):
+ """Invalid port format."""
+
+
+class BadPortRange(Error):
+ """Invalid port range."""
+
+
+def Port(port):
+ """Sanitize a port value.
+
+ Args:
+ port: a port value
+
+ Returns:
+ port: a port value
+
+ Raises:
+ BadPortValue: port is not valid integer or string
+ BadPortRange: port is outside valid range
+ """
+ pval = -1
+ try:
+ pval = int(port)
+ except ValueError:
+ raise BadPortValue('port %s is not valid.' % port)
+ if pval < 0 or pval > 65535:
+ raise BadPortRange('port %s is out of range 0-65535.' % port)
+ return pval
diff --git a/lib/setup.py b/lib/setup.py
new file mode 100644
index 0000000..72ab5d0
--- /dev/null
+++ b/lib/setup.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from distutils.core import setup
+
+import capirca
+
+setup(name='capirca',
+ maintainer='Google',
+ maintainer_email='capirca-dev@googlegroups.com',
+ version=ipaddr.__version__,
+ url='http://code.google.com/p/capirca/',
+ license='Apache License, Version 2.0',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Topic :: Internet',
+ 'Topic :: Software Development :: Libraries',
+ 'Topic :: System :: Networking',
+ 'Topic :: Security'],
+ py_modules=['naming', 'policy', 'nacaddr', 'cisco', 'ciscoasa', 'juniper',
+ 'junipersrx', 'iptables', 'policyreader', 'aclcheck',
+ 'aclgenerator', 'port', 'packetfilter', 'speedway', 'demo'])
diff --git a/lib/speedway.py b/lib/speedway.py
new file mode 100755
index 0000000..233bbe0
--- /dev/null
+++ b/lib/speedway.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python2.4
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Speedway iptables generator.
+
+ This is a subclass of Iptables library. The primary difference is
+ that this library produced 'iptable-restore' compatible output."""
+
+__author__ = 'watson@google.com (Tony Watson)'
+
+from string import Template
+import iptables
+
+
+class Error(Exception):
+ pass
+
+
+class Term(iptables.Term):
+ """Generate Iptables policy terms."""
+ _PLATFORM = 'speedway'
+ _PREJUMP_FORMAT = None
+ _POSTJUMP_FORMAT = Template('-A $filter -j $term')
+
+
+class Speedway(iptables.Iptables):
+ """Generates filters and terms from provided policy object."""
+
+ _PLATFORM = 'speedway'
+ _DEFAULT_PROTOCOL = 'all'
+ _SUFFIX = '.ipt'
+
+ _RENDER_PREFIX = '*filter'
+ _RENDER_SUFFIX = 'COMMIT'
+ _DEFAULTACTION_FORMAT = ':%s %s'
+
+ _TERM = Term