include vswitchd/automake.mk
include ovsdb/automake.mk
include xenserver/automake.mk
-
+include python/ovs/automake.mk
echo 'const char ovs_bindir[] = "$(bindir)";') > lib/dirs.c.tmp
mv lib/dirs.c.tmp lib/dirs.c
-install-data-local:
+install-data-local: lib-install-data-local
+lib-install-data-local:
$(MKDIR_P) $(DESTDIR)$(RUNDIR)
$(MKDIR_P) $(DESTDIR)$(PKIDIR)
$(MKDIR_P) $(DESTDIR)$(LOGDIR)
+++ /dev/null
-import re
-
-class Error(Exception):
- def __init__(self, msg):
- Exception.__init__(self)
- self.msg = msg
-
-def getMember(json, name, validTypes, description, default=None):
- if name in json:
- member = json[name]
- if len(validTypes) and type(member) not in validTypes:
- raise Error("%s: type mismatch for '%s' member"
- % (description, name))
- return member
- return default
-
-def mustGetMember(json, name, expectedType, description):
- member = getMember(json, name, expectedType, description)
- if member == None:
- raise Error("%s: missing '%s' member" % (description, name))
- return member
-
-class DbSchema:
- def __init__(self, name, tables):
- self.name = name
- self.tables = tables
-
- @staticmethod
- def fromJson(json):
- name = mustGetMember(json, 'name', [unicode], 'database')
- tablesJson = mustGetMember(json, 'tables', [dict], 'database')
- tables = {}
- for tableName, tableJson in tablesJson.iteritems():
- tables[tableName] = TableSchema.fromJson(tableJson,
- "%s table" % tableName)
- return DbSchema(name, tables)
-
-class IdlSchema(DbSchema):
- def __init__(self, name, tables, idlPrefix, idlHeader):
- DbSchema.__init__(self, name, tables)
- self.idlPrefix = idlPrefix
- self.idlHeader = idlHeader
-
- @staticmethod
- def fromJson(json):
- schema = DbSchema.fromJson(json)
- idlPrefix = mustGetMember(json, 'idlPrefix', [unicode], 'database')
- idlHeader = mustGetMember(json, 'idlHeader', [unicode], 'database')
- return IdlSchema(schema.name, schema.tables, idlPrefix, idlHeader)
-
-class TableSchema:
- def __init__(self, columns):
- self.columns = columns
-
- @staticmethod
- def fromJson(json, description):
- columnsJson = mustGetMember(json, 'columns', [dict], description)
- columns = {}
- for name, json in columnsJson.iteritems():
- columns[name] = ColumnSchema.fromJson(
- json, "column %s in %s" % (name, description))
- return TableSchema(columns)
-
-class ColumnSchema:
- def __init__(self, type, persistent):
- self.type = type
- self.persistent = persistent
-
- @staticmethod
- def fromJson(json, description):
- type = Type.fromJson(mustGetMember(json, 'type', [dict, unicode],
- description),
- 'type of %s' % description)
- ephemeral = getMember(json, 'ephemeral', [bool], description)
- persistent = ephemeral != True
- return ColumnSchema(type, persistent)
-
-def escapeCString(src):
- dst = ""
- for c in src:
- if c in "\\\"":
- dst += "\\" + c
- elif ord(c) < 32:
- if c == '\n':
- dst += '\\n'
- elif c == '\r':
- dst += '\\r'
- elif c == '\a':
- dst += '\\a'
- elif c == '\b':
- dst += '\\b'
- elif c == '\f':
- dst += '\\f'
- elif c == '\t':
- dst += '\\t'
- elif c == '\v':
- dst += '\\v'
- else:
- dst += '\\%03o' % ord(c)
- else:
- dst += c
- return dst
-
-def returnUnchanged(x):
- return x
-
-class UUID:
- x = "[0-9a-fA-f]"
- uuidRE = re.compile("^(%s{8})-(%s{4})-(%s{4})-(%s{4})-(%s{4})(%s{8})$"
- % (x, x, x, x, x, x))
-
- def __init__(self, value):
- self.value = value
-
- @staticmethod
- def fromString(s):
- if not uuidRE.match(s):
- raise Error("%s is not a valid UUID" % s)
- return UUID(s)
-
- @staticmethod
- def fromJson(json):
- if UUID.isValidJson(json):
- return UUID(json[1])
- else:
- raise Error("%s is not valid JSON for a UUID" % json)
-
- @staticmethod
- def isValidJson(json):
- return len(json) == 2 and json[0] == "uuid" and uuidRE.match(json[1])
-
- def toJson(self):
- return ["uuid", self.value]
-
- def cInitUUID(self, var):
- m = re.match(self.value)
- return ["%s.parts[0] = 0x%s;" % (var, m.group(1)),
- "%s.parts[1] = 0x%s%s;" % (var, m.group(2), m.group(3)),
- "%s.parts[2] = 0x%s%s;" % (var, m.group(4), m.group(5)),
- "%s.parts[3] = 0x%s;" % (var, m.group(6))]
-
-class Atom:
- def __init__(self, type, value):
- self.type = type
- self.value = value
-
- @staticmethod
- def fromJson(type_, json):
- if ((type_ == 'integer' and type(json) in [int, long])
- or (type_ == 'real' and type(json) in [int, long, float])
- or (type_ == 'boolean' and json in [True, False])
- or (type_ == 'string' and type(json) in [str, unicode])):
- return Atom(type_, json)
- elif type_ == 'uuid':
- return UUID.fromJson(json)
- else:
- raise Error("%s is not valid JSON for type %s" % (json, type_))
-
- def toJson(self):
- if self.type == 'uuid':
- return self.value.toString()
- else:
- return self.value
-
- def cInitAtom(self, var):
- if self.type == 'integer':
- return ['%s.integer = %d;' % (var, self.value)]
- elif self.type == 'real':
- return ['%s.real = %.15g;' % (var, self.value)]
- elif self.type == 'boolean':
- if self.value:
- return ['%s.boolean = true;']
- else:
- return ['%s.boolean = false;']
- elif self.type == 'string':
- return ['%s.string = xstrdup("%s");'
- % (var, escapeCString(self.value))]
- elif self.type == 'uuid':
- return self.value.cInitUUID(var)
-
- def toEnglish(self, escapeLiteral=returnUnchanged):
- if self.type == 'integer':
- return '%d' % self.value
- elif self.type == 'real':
- return '%.15g' % self.value
- elif self.type == 'boolean':
- if self.value:
- return 'true'
- else:
- return 'false'
- elif self.type == 'string':
- return escapeLiteral(self.value)
- elif self.type == 'uuid':
- return self.value.value
-
-# Returns integer x formatted in decimal with thousands set off by commas.
-def commafy(x):
- return _commafy("%d" % x)
-def _commafy(s):
- if s.startswith('-'):
- return '-' + _commafy(s[1:])
- elif len(s) <= 3:
- return s
- else:
- return _commafy(s[:-3]) + ',' + _commafy(s[-3:])
-
-class BaseType:
- def __init__(self, type,
- enum=None,
- refTable=None, refType="strong",
- minInteger=None, maxInteger=None,
- minReal=None, maxReal=None,
- minLength=None, maxLength=None):
- self.type = type
- self.enum = enum
- self.refTable = refTable
- self.refType = refType
- self.minInteger = minInteger
- self.maxInteger = maxInteger
- self.minReal = minReal
- self.maxReal = maxReal
- self.minLength = minLength
- self.maxLength = maxLength
-
- @staticmethod
- def fromJson(json, description):
- if type(json) == unicode:
- return BaseType(json)
- else:
- atomicType = mustGetMember(json, 'type', [unicode], description)
- enum = getMember(json, 'enum', [], description)
- if enum:
- enumType = Type(atomicType, None, 0, 'unlimited')
- enum = Datum.fromJson(enumType, enum)
- refTable = getMember(json, 'refTable', [unicode], description)
- refType = getMember(json, 'refType', [unicode], description)
- if refType == None:
- refType = "strong"
- minInteger = getMember(json, 'minInteger', [int, long], description)
- maxInteger = getMember(json, 'maxInteger', [int, long], description)
- minReal = getMember(json, 'minReal', [int, long, float], description)
- maxReal = getMember(json, 'maxReal', [int, long, float], description)
- minLength = getMember(json, 'minLength', [int], description)
- maxLength = getMember(json, 'minLength', [int], description)
- return BaseType(atomicType, enum, refTable, refType, minInteger, maxInteger, minReal, maxReal, minLength, maxLength)
-
- def toEnglish(self, escapeLiteral=returnUnchanged):
- if self.type == 'uuid' and self.refTable:
- s = escapeLiteral(self.refTable)
- if self.refType == 'weak':
- s = "weak reference to " + s
- return s
- else:
- return self.type
-
- def constraintsToEnglish(self, escapeLiteral=returnUnchanged):
- if self.enum:
- literals = [value.toEnglish(escapeLiteral)
- for value in self.enum.values]
- if len(literals) == 2:
- return 'either %s or %s' % (literals[0], literals[1])
- else:
- return 'one of %s, %s, or %s' % (literals[0],
- ', '.join(literals[1:-1]),
- literals[-1])
- elif self.minInteger != None and self.maxInteger != None:
- return 'in range %s to %s' % (commafy(self.minInteger),
- commafy(self.maxInteger))
- elif self.minInteger != None:
- return 'at least %s' % commafy(self.minInteger)
- elif self.maxInteger != None:
- return 'at most %s' % commafy(self.maxInteger)
- elif self.minReal != None and self.maxReal != None:
- return 'in range %g to %g' % (self.minReal, self.maxReal)
- elif self.minReal != None:
- return 'at least %g' % self.minReal
- elif self.maxReal != None:
- return 'at most %g' % self.maxReal
- elif self.minLength != None and self.maxLength != None:
- if self.minLength == self.maxLength:
- return 'exactly %d characters long' % (self.minLength)
- else:
- return 'between %d and %d characters long' % (self.minLength, self.maxLength)
- elif self.minLength != None:
- return 'at least %d characters long' % self.minLength
- elif self.maxLength != None:
- return 'at most %d characters long' % self.maxLength
- else:
- return ''
-
- def toCType(self, prefix):
- if self.refTable:
- return "struct %s%s *" % (prefix, self.refTable.lower())
- else:
- return {'integer': 'int64_t ',
- 'real': 'double ',
- 'uuid': 'struct uuid ',
- 'boolean': 'bool ',
- 'string': 'char *'}[self.type]
-
- def toAtomicType(self):
- return "OVSDB_TYPE_%s" % self.type.upper()
-
- def copyCValue(self, dst, src):
- args = {'dst': dst, 'src': src}
- if self.refTable:
- return ("%(dst)s = %(src)s->header_.uuid;") % args
- elif self.type == 'string':
- return "%(dst)s = xstrdup(%(src)s);" % args
- else:
- return "%(dst)s = %(src)s;" % args
-
- def initCDefault(self, var, isOptional):
- if self.refTable:
- return "%s = NULL;" % var
- elif self.type == 'string' and not isOptional:
- return "%s = \"\";" % var
- else:
- return {'integer': '%s = 0;',
- 'real': '%s = 0.0;',
- 'uuid': 'uuid_zero(&%s);',
- 'boolean': '%s = false;',
- 'string': '%s = NULL;'}[self.type] % var
-
- def cInitBaseType(self, indent, var):
- stmts = []
- stmts.append('ovsdb_base_type_init(&%s, OVSDB_TYPE_%s);' % (
- var, self.type.upper()),)
- if self.enum:
- stmts.append("%s.enum_ = xmalloc(sizeof *%s.enum_);"
- % (var, var))
- stmts += self.enum.cInitDatum("%s.enum_" % var)
- if self.type == 'integer':
- if self.minInteger != None:
- stmts.append('%s.u.integer.min = INT64_C(%d);' % (var, self.minInteger))
- if self.maxInteger != None:
- stmts.append('%s.u.integer.max = INT64_C(%d);' % (var, self.maxInteger))
- elif self.type == 'real':
- if self.minReal != None:
- stmts.append('%s.u.real.min = %d;' % (var, self.minReal))
- if self.maxReal != None:
- stmts.append('%s.u.real.max = %d;' % (var, self.maxReal))
- elif self.type == 'string':
- if self.minLength != None:
- stmts.append('%s.u.string.minLen = %d;' % (var, self.minLength))
- if self.maxLength != None:
- stmts.append('%s.u.string.maxLen = %d;' % (var, self.maxLength))
- elif self.type == 'uuid':
- if self.refTable != None:
- stmts.append('%s.u.uuid.refTableName = "%s";' % (var, escapeCString(self.refTable)))
- return '\n'.join([indent + stmt for stmt in stmts])
-
-class Type:
- def __init__(self, key, value=None, min=1, max=1):
- self.key = key
- self.value = value
- self.min = min
- self.max = max
-
- @staticmethod
- def fromJson(json, description):
- if type(json) == unicode:
- return Type(BaseType(json))
- else:
- keyJson = mustGetMember(json, 'key', [dict, unicode], description)
- key = BaseType.fromJson(keyJson, 'key in %s' % description)
-
- valueJson = getMember(json, 'value', [dict, unicode], description)
- if valueJson:
- value = BaseType.fromJson(valueJson,
- 'value in %s' % description)
- else:
- value = None
-
- min = getMember(json, 'min', [int], description, 1)
- max = getMember(json, 'max', [int, unicode], description, 1)
- return Type(key, value, min, max)
-
- def isScalar(self):
- return self.min == 1 and self.max == 1 and not self.value
-
- def isOptional(self):
- return self.min == 0 and self.max == 1
-
- def isOptionalPointer(self):
- return (self.min == 0 and self.max == 1 and not self.value
- and (self.key.type == 'string' or self.key.refTable))
-
- def toEnglish(self, escapeLiteral=returnUnchanged):
- keyName = self.key.toEnglish(escapeLiteral)
- if self.value:
- valueName = self.value.toEnglish(escapeLiteral)
-
- if self.isScalar():
- return keyName
- elif self.isOptional():
- if self.value:
- return "optional %s-%s pair" % (keyName, valueName)
- else:
- return "optional %s" % keyName
- else:
- if self.max == "unlimited":
- if self.min:
- quantity = "%d or more " % self.min
- else:
- quantity = ""
- elif self.min:
- quantity = "%d to %d " % (self.min, self.max)
- else:
- quantity = "up to %d " % self.max
-
- if self.value:
- return "map of %s%s-%s pairs" % (quantity, keyName, valueName)
- else:
- if keyName.endswith('s'):
- plural = keyName + "es"
- else:
- plural = keyName + "s"
- return "set of %s%s" % (quantity, plural)
-
- def constraintsToEnglish(self, escapeLiteral=returnUnchanged):
- s = ""
-
- constraints = []
- keyConstraints = self.key.constraintsToEnglish(escapeLiteral)
- if keyConstraints:
- if self.value:
- constraints += ['key ' + keyConstraints]
- else:
- constraints += [keyConstraints]
-
- if self.value:
- valueConstraints = self.value.constraintsToEnglish(escapeLiteral)
- if valueConstraints:
- constraints += ['value ' + valueConstraints]
-
- return ', '.join(constraints)
-
- def cDeclComment(self):
- if self.min == 1 and self.max == 1 and self.key.type == "string":
- return "\t/* Always nonnull. */"
- else:
- return ""
-
- def cInitType(self, indent, var):
- initKey = self.key.cInitBaseType(indent, "%s.key" % var)
- if self.value:
- initValue = self.value.cInitBaseType(indent, "%s.value" % var)
- else:
- initValue = ('%sovsdb_base_type_init(&%s.value, '
- 'OVSDB_TYPE_VOID);' % (indent, var))
- initMin = "%s%s.n_min = %s;" % (indent, var, self.min)
- if self.max == "unlimited":
- max = "UINT_MAX"
- else:
- max = self.max
- initMax = "%s%s.n_max = %s;" % (indent, var, max)
- return "\n".join((initKey, initValue, initMin, initMax))
-
-class Datum:
- def __init__(self, type, values):
- self.type = type
- self.values = values
-
- @staticmethod
- def fromJson(type_, json):
- if not type_.value:
- if len(json) == 2 and json[0] == "set":
- values = []
- for atomJson in json[1]:
- values += [Atom.fromJson(type_.key, atomJson)]
- else:
- values = [Atom.fromJson(type_.key, json)]
- else:
- if len(json) != 2 or json[0] != "map":
- raise Error("%s is not valid JSON for a map" % json)
- values = []
- for pairJson in json[1]:
- values += [(Atom.fromJson(type_.key, pairJson[0]),
- Atom.fromJson(type_.value, pairJson[1]))]
- return Datum(type_, values)
-
- def cInitDatum(self, var):
- if len(self.values) == 0:
- return ["ovsdb_datum_init_empty(%s);" % var]
-
- s = ["%s->n = %d;" % (var, len(self.values))]
- s += ["%s->keys = xmalloc(%d * sizeof *%s->keys);"
- % (var, len(self.values), var)]
-
- for i in range(len(self.values)):
- key = self.values[i]
- if self.type.value:
- key = key[0]
- s += key.cInitAtom("%s->keys[%d]" % (var, i))
-
- if self.type.value:
- s += ["%s->values = xmalloc(%d * sizeof *%s->values);"
- % (var, len(self.values), var)]
- for i in range(len(self.values)):
- value = self.values[i][1]
- s += key.cInitAtom("%s->values[%d]" % (var, i))
- else:
- s += ["%s->values = NULL;" % var]
-
- if len(self.values) > 1:
- s += ["ovsdb_datum_sort_assert(%s, OVSDB_TYPE_%s);"
- % (var, self.type.key.upper())]
-
- return s
# ovsdb-idlc
EXTRA_DIST += \
- ovsdb/OVSDB.py \
ovsdb/SPECS \
ovsdb/simplejson/__init__.py \
ovsdb/simplejson/_speedups.c \
ovsdb/ovsdb-idlc.1
DISTCLEANFILES += ovsdb/ovsdb-idlc
SUFFIXES += .ovsidl
-OVSDB_IDLC = $(PYTHON) $(srcdir)/ovsdb/ovsdb-idlc.in
+OVSDB_IDLC = $(run_python) $(srcdir)/ovsdb/ovsdb-idlc.in
.ovsidl.c:
$(OVSDB_IDLC) c-idl-source $< > $@.tmp
mv $@.tmp $@
EXTRA_DIST += ovsdb/ovsdb-doc.in
noinst_SCRIPTS += ovsdb/ovsdb-doc
DISTCLEANFILES += ovsdb/ovsdb-doc
-OVSDB_DOC = $(PYTHON) $(srcdir)/ovsdb/ovsdb-doc.in
+OVSDB_DOC = $(run_python) $(srcdir)/ovsdb/ovsdb-doc.in
# ovsdb-dot
EXTRA_DIST += ovsdb/ovsdb-dot.in
noinst_SCRIPTS += ovsdb/ovsdb-dot
DISTCLEANFILES += ovsdb/ovsdb-dot
-OVSDB_DOT = $(PYTHON) $(srcdir)/ovsdb/ovsdb-dot.in
+OVSDB_DOT = $(run_python) $(srcdir)/ovsdb/ovsdb-dot.in
include ovsdb/ovsdbmonitor/automake.mk
import sys
import xml.dom.minidom
-sys.path.insert(0, "@abs_top_srcdir@/ovsdb")
-import simplejson as json
-
-from OVSDB import *
+import ovs.json
+from ovs.db import error
+import ovs.db.schema
argv0 = sys.argv[0]
elif c == "'":
return r'\(cq'
else:
- raise Error("bad escape")
+ raise error.Error("bad escape")
# Escape - \ " ' as needed by nroff.
s = re.sub('([-"\'\\\\])', escape, s)
elif node.hasAttribute('group'):
s += node.attributes['group'].nodeValue
else:
- raise Error("'ref' lacks column and table attributes")
+ raise error.Error("'ref' lacks column and table attributes")
return s + font
elif node.tagName == 'var':
s = r'\fI'
s += inlineXmlToNroff(child, r'\fI')
return s + font
else:
- raise Error("element <%s> unknown or invalid here" % node.tagName)
+ raise error.Error("element <%s> unknown or invalid here" % node.tagName)
else:
- raise Error("unknown node %s in inline xml" % node)
+ raise error.Error("unknown node %s in inline xml" % node)
def blockXmlToNroff(nodes, para='.PP'):
s = ''
s += ".IP \\(bu\n" + blockXmlToNroff(liNode.childNodes, ".IP")
elif (liNode.nodeType != node.TEXT_NODE
or not liNode.data.isspace()):
- raise Error("<ul> element may only have <li> children")
+ raise error.Error("<ul> element may only have <li> children")
s += ".RE\n"
elif node.tagName == 'dl':
if s != "":
prev = 'dd'
elif (liNode.nodeType != node.TEXT_NODE
or not liNode.data.isspace()):
- raise Error("<dl> element may only have <dt> and <dd> children")
+ raise error.Error("<dl> element may only have <dt> and <dd> children")
s += blockXmlToNroff(liNode.childNodes, ".IP")
s += ".RE\n"
elif node.tagName == 'p':
else:
s += inlineXmlToNroff(node, r'\fR')
else:
- raise Error("unknown node %s in block xml" % node)
+ raise error.Error("unknown node %s in block xml" % node)
if s != "" and not s.endswith('\n'):
s += '\n'
return s
body += '.ST "%s:"\n' % textToNroff(title)
body += subIntro + subBody
else:
- raise Error("unknown element %s in <table>" % node.tagName)
+ raise error.Error("unknown element %s in <table>" % node.tagName)
return summary, intro, body
def tableSummaryToNroff(summary, level=0):
return s
def docsToNroff(schemaFile, xmlFile, erFile, title=None):
- schema = DbSchema.fromJson(json.load(open(schemaFile, "r")))
+ schema = ovs.db.schema.DbSchema.from_json(ovs.json.from_file(schemaFile))
doc = xml.dom.minidom.parse(xmlFile).documentElement
schemaDate = os.stat(schemaFile).st_mtime
if len(line):
print line
- except Error, e:
+ except error.Error, e:
sys.stderr.write("%s: %s\n" % (argv0, e.msg))
sys.exit(1)
import re
import sys
-sys.path.insert(0, "@abs_top_srcdir@/ovsdb")
-import simplejson as json
-
-from OVSDB import *
-
argv0 = sys.argv[0]
def printEdge(tableName, baseType, label):
', '.join(['%s=%s' % (k,v) for k,v in options.items()]))
def schemaToDot(schemaFile):
- schema = DbSchema.fromJson(json.load(open(schemaFile, "r")))
+ schema = DbSchema.fromJson(ovs.json.from_file(schemaFile))
print "digraph %s {" % schema.name
for tableName, table in schema.tables.iteritems():
import re
import sys
-sys.path.insert(0, "@abs_top_srcdir@/ovsdb")
-import simplejson as json
-
-from OVSDB import *
+import ovs.json
+import ovs.db.error
+import ovs.db.schema
argv0 = sys.argv[0]
-class Datum:
- def __init__(self, type, values):
- self.type = type
- self.values = values
-
- @staticmethod
- def fromJson(type_, json):
- if not type_.value:
- if len(json) == 2 and json[0] == "set":
- values = []
- for atomJson in json[1]:
- values += [Atom.fromJson(type_.key, atomJson)]
- else:
- values = [Atom.fromJson(type_.key, json)]
- else:
- if len(json) != 2 or json[0] != "map":
- raise Error("%s is not valid JSON for a map" % json)
- values = []
- for pairJson in json[1]:
- values += [(Atom.fromJson(type_.key, pairJson[0]),
- Atom.fromJson(type_.value, pairJson[1]))]
- return Datum(type_, values)
-
- def cInitDatum(self, var):
- if len(self.values) == 0:
- return ["ovsdb_datum_init_empty(%s);" % var]
-
- s = ["%s->n = %d;" % (var, len(self.values))]
- s += ["%s->keys = xmalloc(%d * sizeof *%s->keys);"
- % (var, len(self.values), var)]
-
- for i in range(len(self.values)):
- key = self.values[i]
- if self.type.value:
- key = key[0]
- s += key.cInitAtom("%s->keys[%d]" % (var, i))
-
- if self.type.value:
- s += ["%s->values = xmalloc(%d * sizeof *%s->values);"
- % (var, len(self.values), var)]
- for i in range(len(self.values)):
- value = self.values[i][1]
- s += key.cInitAtom("%s->values[%d]" % (var, i))
- else:
- s += ["%s->values = NULL;" % var]
-
- if len(self.values) > 1:
- s += ["ovsdb_datum_sort_assert(%s, OVSDB_TYPE_%s);"
- % (var, self.type.key.upper())]
-
- return s
-
def parseSchema(filename):
- return IdlSchema.fromJson(json.load(open(filename, "r")))
+ return ovs.db.schema.IdlSchema.from_json(ovs.json.from_file(filename))
def annotateSchema(schemaFile, annotationFile):
- schemaJson = json.load(open(schemaFile, "r"))
+ schemaJson = ovs.json.from_file(schemaFile)
execfile(annotationFile, globals(), {"s": schemaJson})
- json.dump(schemaJson, sys.stdout)
+ ovs.json.to_stream(schemaJson, sys.stdout)
def constify(cType, const):
if (const
def cMembers(prefix, columnName, column, const):
type = column.type
- if type.min == 1 and type.max == 1:
+ if type.n_min == 1 and type.n_max == 1:
singleton = True
pointer = ''
else:
singleton = False
- if type.isOptionalPointer():
+ if type.is_optional_pointer():
pointer = ''
else:
pointer = '*'
'comment': type.cDeclComment()}
members = [m]
- if not singleton and not type.isOptionalPointer():
+ if not singleton and not type.is_optional_pointer():
members.append({'name': 'n_%s' % columnName,
'type': 'size_t ',
'comment': ''})
keyVar = "row->%s" % columnName
valueVar = None
- if (type.min == 1 and type.max == 1) or type.isOptionalPointer():
+ if (type.n_min == 1 and type.n_max == 1) or type.is_optional_pointer():
print
print " assert(inited);"
print " if (datum->n >= 1) {"
- if not type.key.refTable:
- print " %s = datum->keys[0].%s;" % (keyVar, type.key.type)
+ if not type.key.ref_table:
+ print " %s = datum->keys[0].%s;" % (keyVar, type.key.type.to_string())
else:
- print " %s = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_classes[%sTABLE_%s], &datum->keys[0].uuid));" % (keyVar, prefix, type.key.refTable.lower(), prefix, prefix.upper(), type.key.refTable.upper())
+ print " %s = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_classes[%sTABLE_%s], &datum->keys[0].uuid));" % (keyVar, prefix, type.key.ref_table.lower(), prefix, prefix.upper(), type.key.ref_table.upper())
if valueVar:
- if type.value.refTable:
- print " %s = datum->values[0].%s;" % (valueVar, type.value.type)
+ if type.value.ref_table:
+ print " %s = datum->values[0].%s;" % (valueVar, type.value.type.to_string())
else:
- print " %s = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_classes[%sTABLE_%s], &datum->values[0].uuid));" % (valueVar, prefix, type.value.refTable.lower(), prefix, prefix.upper(), type.value.refTable.upper())
+ print " %s = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_classes[%sTABLE_%s], &datum->values[0].uuid));" % (valueVar, prefix, type.value.ref_table.lower(), prefix, prefix.upper(), type.value.ref_table.upper())
print " } else {"
- print " %s" % type.key.initCDefault(keyVar, type.min == 0)
+ print " %s" % type.key.initCDefault(keyVar, type.n_min == 0)
if valueVar:
- print " %s" % type.value.initCDefault(valueVar, type.min == 0)
+ print " %s" % type.value.initCDefault(valueVar, type.n_min == 0)
print " }"
else:
- if type.max != 'unlimited':
- print " size_t n = MIN(%d, datum->n);" % type.max
+ if type.n_max != sys.maxint:
+ print " size_t n = MIN(%d, datum->n);" % type.n_max
nMax = "n"
else:
nMax = "datum->n"
print " row->n_%s = 0;" % columnName
print " for (i = 0; i < %s; i++) {" % nMax
refs = []
- if type.key.refTable:
- print " struct %s%s *keyRow = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_classes[%sTABLE_%s], &datum->keys[i].uuid));" % (prefix, type.key.refTable.lower(), prefix, type.key.refTable.lower(), prefix, prefix.upper(), type.key.refTable.upper())
+ if type.key.ref_table:
+ print " struct %s%s *keyRow = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_classes[%sTABLE_%s], &datum->keys[i].uuid));" % (prefix, type.key.ref_table.lower(), prefix, type.key.ref_table.lower(), prefix, prefix.upper(), type.key.ref_table.upper())
keySrc = "keyRow"
refs.append('keyRow')
else:
- keySrc = "datum->keys[i].%s" % type.key.type
- if type.value and type.value.refTable:
- print " struct %s%s *valueRow = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_classes[%sTABLE_%s], &datum->values[i].uuid));" % (prefix, type.value.refTable.lower(), prefix, type.value.refTable.lower(), prefix, prefix.upper(), type.value.refTable.upper())
+ keySrc = "datum->keys[i].%s" % type.key.type.to_string()
+ if type.value and type.value.ref_table:
+ print " struct %s%s *valueRow = %s%s_cast(ovsdb_idl_get_row_arc(row_, &%stable_classes[%sTABLE_%s], &datum->values[i].uuid));" % (prefix, type.value.ref_table.lower(), prefix, type.value.ref_table.lower(), prefix, prefix.upper(), type.value.ref_table.upper())
valueSrc = "valueRow"
refs.append('valueRow')
elif valueVar:
- valueSrc = "datum->values[i].%s" % type.value.type
+ valueSrc = "datum->values[i].%s" % type.value.type.to_string()
if refs:
print " if (%s) {" % ' && '.join(refs)
indent = " "
# Unparse functions.
for columnName, column in sorted(table.columns.iteritems()):
type = column.type
- if (type.min != 1 or type.max != 1) and not type.isOptionalPointer():
+ if (type.n_min != 1 or type.n_max != 1) and not type.is_optional_pointer():
print '''
static void
%(s)s_unparse_%(c)s(struct ovsdb_idl_row *row_)
'args': ', '.join(['%(type)s%(name)s' % m for m in members])}
print "{"
print " struct ovsdb_datum datum;"
- if type.min == 1 and type.max == 1:
+ if type.n_min == 1 and type.n_max == 1:
print
print " assert(inited);"
print " datum.n = 1;"
print " datum.keys = xmalloc(sizeof *datum.keys);"
- print " " + type.key.copyCValue("datum.keys[0].%s" % type.key.type, keyVar)
+ print " " + type.key.copyCValue("datum.keys[0].%s" % type.key.type.to_string(), keyVar)
if type.value:
print " datum.values = xmalloc(sizeof *datum.values);"
- print " "+ type.value.copyCValue("datum.values[0].%s" % type.value.type, valueVar)
+ print " "+ type.value.copyCValue("datum.values[0].%s" % type.value.type.to_string(), valueVar)
else:
print " datum.values = NULL;"
- elif type.isOptionalPointer():
+ elif type.is_optional_pointer():
print
print " assert(inited);"
print " if (%s) {" % keyVar
print " datum.n = 1;"
print " datum.keys = xmalloc(sizeof *datum.keys);"
- print " " + type.key.copyCValue("datum.keys[0].%s" % type.key.type, keyVar)
+ print " " + type.key.copyCValue("datum.keys[0].%s" % type.key.type.to_string(), keyVar)
print " } else {"
print " datum.n = 0;"
print " datum.keys = NULL;"
else:
print " datum.values = NULL;"
print " for (i = 0; i < %s; i++) {" % nVar
- print " " + type.key.copyCValue("datum.keys[i].%s" % type.key.type, "%s[i]" % keyVar)
+ print " " + type.key.copyCValue("datum.keys[i].%s" % type.key.type.to_string(), "%s[i]" % keyVar)
if type.value:
- print " " + type.value.copyCValue("datum.values[i].%s" % type.value.type, "%s[i]" % valueVar)
+ print " " + type.value.copyCValue("datum.values[i].%s" % type.value.type.to_string(), "%s[i]" % valueVar)
print " }"
if type.value:
valueType = type.value.toAtomicType()
def escape(match):
c = match.group(0)
if c == '\0':
- raise Error("strings may not contain null bytes")
+ raise ovs.db.error.Error("strings may not contain null bytes")
elif c == '\\':
return '\\\\'
elif c == '\n':
sys.exit(1)
func(*args[1:])
- except Error, e:
- sys.stderr.write("%s: %s\n" % (argv0, e.msg))
+ except ovs.db.error.Error, e:
+ sys.stderr.write("%s: %s\n" % (argv0, e))
sys.exit(1)
# Local variables:
--- /dev/null
+# This file intentionally left blank.
--- /dev/null
+run_python = PYTHONPATH=$(top_srcdir)/python:$$PYTHON_PATH $(PYTHON)
+
+ovs_pyfiles = \
+ python/ovs/__init__.py \
+ python/ovs/daemon.py \
+ python/ovs/db/__init__.py \
+ python/ovs/db/data.py \
+ python/ovs/db/error.py \
+ python/ovs/db/idl.py \
+ python/ovs/db/parser.py \
+ python/ovs/db/schema.py \
+ python/ovs/db/types.py \
+ python/ovs/fatal_signal.py \
+ python/ovs/json.py \
+ python/ovs/jsonrpc.py \
+ python/ovs/ovsuuid.py \
+ python/ovs/poller.py \
+ python/ovs/process.py \
+ python/ovs/reconnect.py \
+ python/ovs/socket_util.py \
+ python/ovs/stream.py \
+ python/ovs/timeval.py \
+ python/ovs/util.py
+EXTRA_DIST += $(ovs_pyfiles) python/ovs/dirs.py
+
+if HAVE_PYTHON
+nobase_pkgdata_DATA = $(ovs_pyfiles)
+ovs-install-data-local:
+ $(MKDIR_P) python/ovs
+ (echo 'PKGDATADIR = """$(pkgdatadir)"""' && \
+ echo 'RUNDIR = """@RUNDIR@"""' && \
+ echo 'LOGDIR = """@LOGDIR@"""' && \
+ echo 'BINDIR = """$(bindir)"""') > python/ovs/dirs.py.tmp
+ $(MKDIR_P) $(DESTDIR)$(pkgdatadir)/python/ovs
+ $(INSTALL_DATA) python/ovs/dirs.py.tmp $(DESTDIR)$(pkgdatadir)/python/ovs/dirs.py
+ rm python/ovs/dirs.py.tmp
+endif
+install-data-local: ovs-install-data-local
+
+uninstall-local: ovs-uninstall-local
+ovs-uninstall-local:
+ rm -f $(DESTDIR)$(pkgdatadir)/python/ovs/dirs.py
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import fcntl
+import logging
+import os
+import resource
+import signal
+import sys
+import time
+
+import ovs.dirs
+import ovs.fatal_signal
+#import ovs.lockfile
+import ovs.process
+import ovs.socket_util
+import ovs.timeval
+import ovs.util
+
+# --detach: Should we run in the background?
+_detach = False
+
+# --pidfile: Name of pidfile (null if none).
+_pidfile = None
+
+# --overwrite-pidfile: Create pidfile even if one already exists and is locked?
+_overwrite_pidfile = False
+
+# --no-chdir: Should we chdir to "/"?
+_chdir = True
+
+# --monitor: Should a supervisory process monitor the daemon and restart it if
+# it dies due to an error signal?
+_monitor = False
+
+# File descriptor used by daemonize_start() and daemonize_complete().
+_daemonize_fd = None
+
+def make_pidfile_name(name):
+ """Returns the file name that would be used for a pidfile if 'name' were
+ provided to set_pidfile()."""
+ if name is None or name == "":
+ return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)
+ else:
+ return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name)
+
+def set_pidfile(name):
+ """Sets up a following call to daemonize() to create a pidfile named
+ 'name'. If 'name' begins with '/', then it is treated as an absolute path.
+ Otherwise, it is taken relative to ovs.util.RUNDIR, which is
+ $(prefix)/var/run by default.
+
+ If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is
+ used."""
+ global _pidfile
+ _pidfile = make_pidfile_name(name)
+
+def get_pidfile():
+ """Returns an absolute path to the configured pidfile, or None if no
+ pidfile is configured. The caller must not modify or free the returned
+ string."""
+ return _pidfile
+
+def set_no_chdir():
+ """Sets that we do not chdir to "/"."""
+ global _chdir
+ _chdir = False
+
+def is_chdir_enabled():
+ """Will we chdir to "/" as part of daemonizing?"""
+ return _chdir
+
+def ignore_existing_pidfile():
+ """Normally, die_if_already_running() will terminate the program with a
+ message if a locked pidfile already exists. If this function is called,
+ die_if_already_running() will merely log a warning."""
+ global _overwrite_pidfile
+ _overwrite_pidfile = True
+
+def set_detach():
+ """Sets up a following call to daemonize() to detach from the foreground
+ session, running this process in the background."""
+ global _detach
+ _detach = True
+
+def get_detach():
+ """Will daemonize() really detach?"""
+ return _detach
+
+def set_monitor():
+ """Sets up a following call to daemonize() to fork a supervisory process to
+ monitor the daemon and restart it if it dies due to an error signal."""
+ global _monitor
+ _monitor = True
+
+def _already_running():
+ """If a pidfile has been configured and that pidfile already exists and is
+ locked by a running process, returns True. Otherwise, returns False."""
+ if _pidfile is not None:
+ try:
+ file = open(_pidfile, "r+")
+ try:
+ try:
+ fcntl.lockf(file, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError, e:
+ if e.errno in [errno.EACCES, errno.EAGAIN]:
+ return True
+ logging.error("error locking %s (%s)"
+ % (_pidfile, os.strerror(e.errno)))
+ return False
+ finally:
+ # This releases the lock, which we don't really want.
+ file.close()
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ return False
+ logging.error("error opening %s (%s)"
+ % (_pidfile, os.strerror(e.errno)))
+ return False
+
+def die_if_already_running():
+ """If a locked pidfile exists, issue a warning message and, unless
+ ignore_existing_pidfile() has been called, terminate the program."""
+ if _already_running():
+ if not _overwrite_pidfile:
+ sys.stderr.write("%s: already running\n" % get_pidfile())
+ sys.exit(1)
+ else:
+ logging.warn("%s: %s already running"
+ % (get_pidfile(), ovs.util.PROGRAM_NAME))
+
+def _make_pidfile():
+ """If a pidfile has been configured, creates it and stores the running
+ process's pid in it. Ensures that the pidfile will be deleted when the
+ process exits."""
+ if _pidfile is not None:
+ # Create pidfile via temporary file, so that observers never see an
+ # empty pidfile or an unlocked pidfile.
+ pid = os.getpid()
+ tmpfile = "%s.tmp%d" % (_pidfile, pid)
+ ovs.fatal_signal.add_file_to_unlink(tmpfile)
+
+ try:
+ # This is global to keep Python from garbage-collecting and
+ # therefore closing our file after this function exits. That would
+ # unlock the lock for us, and we don't want that.
+ global file
+
+ file = open(tmpfile, "w")
+ except IOError, e:
+ logging.error("%s: create failed: %s"
+ % (tmpfile, os.strerror(e.errno)))
+ return
+
+ try:
+ fcntl.lockf(file, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError, e:
+ logging.error("%s: fcntl failed: %s"
+ % (tmpfile, os.strerror(e.errno)))
+ file.close()
+ return
+
+ try:
+ file.write("%s\n" % pid)
+ file.flush()
+ ovs.fatal_signal.add_file_to_unlink(_pidfile)
+ except OSError, e:
+ logging.error("%s: write failed: %s"
+ % (tmpfile, os.strerror(e.errno)))
+ file.close()
+ return
+
+ try:
+ os.rename(tmpfile, _pidfile)
+ except OSError, e:
+ ovs.fatal_signal.remove_file_to_unlink(_pidfile)
+ logging.error("failed to rename \"%s\" to \"%s\": %s"
+ % (tmpfile, _pidfile, os.strerror(e.errno)))
+ file.close()
+ return
+
+def daemonize():
+ """If configured with set_pidfile() or set_detach(), creates the pid file
+ and detaches from the foreground session."""
+ daemonize_start()
+ daemonize_complete()
+
+def _waitpid(pid, options):
+ while True:
+ try:
+ return os.waitpid(pid, options)
+ except OSError, e:
+ if e.errno == errno.EINTR:
+ pass
+ return -e.errno, 0
+
+def _fork_and_wait_for_startup():
+ try:
+ rfd, wfd = os.pipe()
+ except OSError, e:
+ sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
+ sys.exit(1)
+
+ try:
+ pid = os.fork()
+ except OSError, e:
+ sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
+ sys.exit(1)
+
+ if pid > 0:
+ # Running in parent process.
+ os.close(wfd)
+ ovs.fatal_signal.fork()
+ try:
+ s = os.read(rfd, 1)
+ except OSError, e:
+ s = ""
+ if len(s) != 1:
+ retval, status = _waitpid(pid, 0)
+ if (retval == pid and
+ os.WIFEXITED(status) and os.WEXITSTATUS(status)):
+ # Child exited with an error. Convey the same error to
+ # our parent process as a courtesy.
+ sys.exit(os.WEXITSTATUS(status))
+ else:
+ sys.stderr.write("fork child failed to signal startup\n")
+ sys.exit(1)
+
+ os.close(rfd)
+ else:
+ # Running in parent process.
+ os.close(rfd)
+ ovs.timeval.postfork()
+ #ovs.lockfile.postfork()
+
+ global _daemonize_fd
+ _daemonize_fd = wfd
+ return pid
+
+def _fork_notify_startup(fd):
+ if fd is not None:
+ error, bytes_written = ovs.socket_util.write_fully(fd, "0")
+ if error:
+ sys.stderr.write("could not write to pipe\n")
+ sys.exit(1)
+ os.close(fd)
+
+def _should_restart(status):
+ if os.WIFSIGNALED(status):
+ for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL",
+ "SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"):
+ if (signame in signal.__dict__ and
+ os.WTERMSIG(status) == signal.__dict__[signame]):
+ return True
+ return False
+
+def _monitor_daemon(daemon_pid):
+ # XXX should log daemon's stderr output at startup time
+ # XXX should use setproctitle module if available
+ last_restart = None
+ while True:
+ retval, status = _waitpid(daemon_pid, 0)
+ if retval < 0:
+ sys.stderr.write("waitpid failed\n")
+ sys.exit(1)
+ elif retval == daemon_pid:
+ status_msg = ("pid %d died, %s"
+ % (daemon_pid, ovs.process.status_msg(status)))
+
+ if _should_restart(status):
+ if os.WCOREDUMP(status):
+ # Disable further core dumps to save disk space.
+ try:
+ resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
+ except resource.error:
+ logging.warning("failed to disable core dumps")
+
+ # Throttle restarts to no more than once every 10 seconds.
+ if (last_restart is not None and
+ ovs.timeval.msec() < last_restart + 10000):
+ logging.warning("%s, waiting until 10 seconds since last "
+ "restart" % status_msg)
+ while True:
+ now = ovs.timeval.msec()
+ wakeup = last_restart + 10000
+ if now > wakeup:
+ break
+ print "sleep %f" % ((wakeup - now) / 1000.0)
+ time.sleep((wakeup - now) / 1000.0)
+ last_restart = ovs.timeval.msec()
+
+ logging.error("%s, restarting" % status_msg)
+ daemon_pid = _fork_and_wait_for_startup()
+ if not daemon_pid:
+ break
+ else:
+ logging.info("%s, exiting" % status_msg)
+ sys.exit(0)
+
+ # Running in new daemon process.
+
+def _close_standard_fds():
+ """Close stdin, stdout, stderr. If we're started from e.g. an SSH session,
+ then this keeps us from holding that session open artificially."""
+ null_fd = ovs.socket_util.get_null_fd()
+ if null_fd >= 0:
+ os.dup2(null_fd, 0)
+ os.dup2(null_fd, 1)
+ os.dup2(null_fd, 2)
+
+def daemonize_start():
+ """If daemonization is configured, then starts daemonization, by forking
+ and returning in the child process. The parent process hangs around until
+ the child lets it know either that it completed startup successfully (by
+ calling daemon_complete()) or that it failed to start up (by exiting with a
+ nonzero exit code)."""
+
+ if _detach:
+ if _fork_and_wait_for_startup() > 0:
+ # Running in parent process.
+ sys.exit(0)
+ # Running in daemon or monitor process.
+
+ if _monitor:
+ saved_daemonize_fd = _daemonize_fd
+ daemon_pid = _fork_and_wait_for_startup()
+ if daemon_pid > 0:
+ # Running in monitor process.
+ _fork_notify_startup(saved_daemonize_fd)
+ _close_standard_fds()
+ _monitor_daemon(daemon_pid)
+ # Running in daemon process
+
+ _make_pidfile()
+
+def daemonize_complete():
+ """If daemonization is configured, then this function notifies the parent
+ process that the child process has completed startup successfully."""
+ _fork_notify_startup(_daemonize_fd)
+
+ if _detach:
+ os.setsid()
+ if _chdir:
+ os.chdir("/")
+ _close_standard_fds()
+
+def usage():
+ sys.stdout.write("""
+Daemon options:
+ --detach run in background as daemon
+ --no-chdir do not chdir to '/'
+ --pidfile[=FILE] create pidfile (default: %s/%s.pid)
+ --overwrite-pidfile with --pidfile, start even if already running
+""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME))
+
+def read_pidfile(pidfile):
+ """Opens and reads a PID from 'pidfile'. Returns the nonnegative PID if
+ successful, otherwise a negative errno value."""
+ try:
+ file = open(pidfile, "r")
+ except IOError, e:
+ logging.warning("%s: open: %s" % (pidfile, os.strerror(e.errno)))
+ return -e.errno
+
+ # Python fcntl doesn't directly support F_GETLK so we have to just try
+ # to lock it. If we get a conflicting lock that's "success"; otherwise
+ # the file is not locked.
+ try:
+ fcntl.lockf(file, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ # File isn't locked if we get here, so treat that as an error.
+ logging.warning("%s: pid file is not locked" % pidfile)
+ try:
+ # As a side effect, this drops the lock.
+ file.close()
+ except IOError:
+ pass
+ return -errno.ESRCH
+ except IOError, e:
+ if e.errno not in [errno.EACCES, errno.EAGAIN]:
+ logging.warn("%s: fcntl: %s" % (pidfile, os.strerror(e.errno)))
+ return -e.errno
+
+ try:
+ try:
+ return int(file.readline())
+ except IOError, e:
+ logging.warning("%s: read: %s" % (pidfile, e.strerror))
+ return -e.errno
+ except ValueError:
+ logging.warning("%s does not contain a pid" % pidfile)
+ return -errno.EINVAL
+ finally:
+ try:
+ file.close()
+ except IOError:
+ pass
+
+# XXX Python's getopt does not support options with optional arguments, so we
+# have to separate --pidfile (with no argument) from --pidfile-name (with an
+# argument). Need to write our own getopt I guess.
+LONG_OPTIONS = ["detach", "no-chdir", "pidfile", "pidfile-name=",
+ "overwrite-pidfile", "monitor"]
+
+def parse_opt(option, arg):
+ if option == '--detach':
+ set_detach()
+ elif option == '--no-chdir':
+ set_no_chdir()
+ elif option == '--pidfile':
+ set_pidfile(None)
+ elif option == '--pidfile-name':
+ set_pidfile(arg)
+ elif option == '--overwrite-pidfile':
+ ignore_existing_pidfile()
+ elif option == '--monitor':
+ set_monitor()
+ else:
+ return False
+ return True
--- /dev/null
+# This file intentionally left blank.
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import os
+import re
+import select
+import sys
+import uuid
+
+import ovs.poller
+import ovs.socket_util
+import ovs.json
+import ovs.jsonrpc
+import ovs.ovsuuid
+
+import ovs.db.parser
+from ovs.db import error
+import ovs.db.types
+
+class ConstraintViolation(error.Error):
+ def __init__(self, msg, json=None):
+ error.Error.__init__(self, msg, json, tag="constraint violation")
+
+def escapeCString(src):
+ dst = ""
+ for c in src:
+ if c in "\\\"":
+ dst += "\\" + c
+ elif ord(c) < 32:
+ if c == '\n':
+ dst += '\\n'
+ elif c == '\r':
+ dst += '\\r'
+ elif c == '\a':
+ dst += '\\a'
+ elif c == '\b':
+ dst += '\\b'
+ elif c == '\f':
+ dst += '\\f'
+ elif c == '\t':
+ dst += '\\t'
+ elif c == '\v':
+ dst += '\\v'
+ else:
+ dst += '\\%03o' % ord(c)
+ else:
+ dst += c
+ return dst
+
+def returnUnchanged(x):
+ return x
+
+class Atom(object):
+ def __init__(self, type, value=None):
+ self.type = type
+ if value is not None:
+ self.value = value
+ else:
+ self.value = type.default_atom()
+
+ def __cmp__(self, other):
+ if not isinstance(other, Atom) or self.type != other.type:
+ return NotImplemented
+ elif self.value < other.value:
+ return -1
+ elif self.value > other.value:
+ return 1
+ else:
+ return 0
+
+ def __hash__(self):
+ return hash(self.value)
+
+ @staticmethod
+ def default(type):
+ return Atom(type)
+
+ def is_default(self):
+ return self == default(self.type)
+
+ @staticmethod
+ def from_json(base, json, symtab=None):
+ type_ = base.type
+ json = ovs.db.parser.float_to_int(json)
+ if ((type_ == ovs.db.types.IntegerType and type(json) in [int, long])
+ or (type_ == ovs.db.types.RealType and type(json) in [int, long, float])
+ or (type_ == ovs.db.types.BooleanType and type(json) == bool)
+ or (type_ == ovs.db.types.StringType and type(json) in [str, unicode])):
+ atom = Atom(type_, json)
+ elif type_ == ovs.db.types.UuidType:
+ atom = Atom(type_, ovs.ovsuuid.UUID.from_json(json, symtab))
+ else:
+ raise error.Error("expected %s" % type_.to_string(), json)
+ atom.check_constraints(base)
+ return atom
+
+ def check_constraints(self, base):
+ """Checks whether 'atom' meets the constraints (if any) defined in
+ 'base' and raises an ovs.db.error.Error if any constraint is violated.
+
+ 'base' and 'atom' must have the same type.
+
+ Checking UUID constraints is deferred to transaction commit time, so
+ this function does nothing for UUID constraints."""
+ assert base.type == self.type
+ if base.enum is not None and self not in base.enum:
+ raise ConstraintViolation(
+ "%s is not one of the allowed values (%s)"
+ % (self.to_string(), base.enum.to_string()))
+ elif base.type in [ovs.db.types.IntegerType, ovs.db.types.RealType]:
+ if ((base.min is None or self.value >= base.min) and
+ (base.max is None or self.value <= base.max)):
+ pass
+ elif base.min is not None and base.max is not None:
+ raise ConstraintViolation(
+ "%s is not in the valid range %.15g to %.15g (inclusive)"
+ % (self.to_string(), base.min, base.max))
+ elif base.min is not None:
+ raise ConstraintViolation(
+ "%s is less than minimum allowed value %.15g"
+ % (self.to_string(), base.min))
+ else:
+ raise ConstraintViolation(
+ "%s is greater than maximum allowed value %.15g"
+ % (self.to_string(), base.max))
+ elif base.type == ovs.db.types.StringType:
+ # XXX The C version validates that the string is valid UTF-8 here.
+ # Do we need to do that in Python too?
+ s = self.value
+ length = len(s)
+ if length < base.min_length:
+ raise ConstraintViolation(
+ "\"%s\" length %d is less than minimum allowed length %d"
+ % (s, length, base.min_length))
+ elif length > base.max_length:
+ raise ConstraintViolation(
+ "\"%s\" length %d is greater than maximum allowed "
+ "length %d" % (s, length, base.max_length))
+
+ def to_json(self):
+ if self.type == ovs.db.types.UuidType:
+ return self.value.to_json()
+ else:
+ return self.value
+
+ def cInitAtom(self, var):
+ if self.type == ovs.db.types.IntegerType:
+ return ['%s.integer = %d;' % (var, self.value)]
+ elif self.type == ovs.db.types.RealType:
+ return ['%s.real = %.15g;' % (var, self.value)]
+ elif self.type == ovs.db.types.BooleanType:
+ if self.value:
+ return ['%s.boolean = true;']
+ else:
+ return ['%s.boolean = false;']
+ elif self.type == ovs.db.types.StringType:
+ return ['%s.string = xstrdup("%s");'
+ % (var, escapeCString(self.value))]
+ elif self.type == ovs.db.types.UuidType:
+ return self.value.cInitUUID(var)
+
+ def toEnglish(self, escapeLiteral=returnUnchanged):
+ if self.type == ovs.db.types.IntegerType:
+ return '%d' % self.value
+ elif self.type == ovs.db.types.RealType:
+ return '%.15g' % self.value
+ elif self.type == ovs.db.types.BooleanType:
+ if self.value:
+ return 'true'
+ else:
+ return 'false'
+ elif self.type == ovs.db.types.StringType:
+ return escapeLiteral(self.value)
+ elif self.type == ovs.db.types.UuidType:
+ return self.value.value
+
+ __need_quotes_re = re.compile("$|true|false|[^_a-zA-Z]|.*[^-._a-zA-Z]")
+ @staticmethod
+ def __string_needs_quotes(s):
+ return Atom.__need_quotes_re.match(s)
+
+ def to_string(self):
+ if self.type == ovs.db.types.IntegerType:
+ return '%d' % self.value
+ elif self.type == ovs.db.types.RealType:
+ return '%.15g' % self.value
+ elif self.type == ovs.db.types.BooleanType:
+ if self.value:
+ return 'true'
+ else:
+ return 'false'
+ elif self.type == ovs.db.types.StringType:
+ if Atom.__string_needs_quotes(self.value):
+ return ovs.json.to_string(self.value)
+ else:
+ return self.value
+ elif self.type == ovs.db.types.UuidType:
+ return str(self.value)
+
+ @staticmethod
+ def new(x):
+ if type(x) in [int, long]:
+ t = ovs.db.types.IntegerType
+ elif type(x) == float:
+ t = ovs.db.types.RealType
+ elif x in [False, True]:
+ t = ovs.db.types.RealType
+ elif type(x) in [str, unicode]:
+ t = ovs.db.types.StringType
+ elif isinstance(x, uuid):
+ t = ovs.db.types.UuidType
+ else:
+ raise TypeError
+ return Atom(t, x)
+
+class Datum(object):
+ def __init__(self, type, values={}):
+ self.type = type
+ self.values = values
+
+ def __cmp__(self, other):
+ if not isinstance(other, Datum):
+ return NotImplemented
+ elif self.values < other.values:
+ return -1
+ elif self.values > other.values:
+ return 1
+ else:
+ return 0
+
+ __hash__ = None
+
+ def __contains__(self, item):
+ return item in self.values
+
+ def clone(self):
+ return Datum(self.type, dict(self.values))
+
+ @staticmethod
+ def default(type):
+ if type.n_min == 0:
+ values = {}
+ elif type.is_map():
+ values = {type.key.default(): type.value.default()}
+ else:
+ values = {type.key.default(): None}
+ return Datum(type, values)
+
+ @staticmethod
+ def is_default(self):
+ return self == default(self.type)
+
+ def check_constraints(self):
+ """Checks that each of the atoms in 'datum' conforms to the constraints
+ specified by its 'type' and raises an ovs.db.error.Error.
+
+ This function is not commonly useful because the most ordinary way to
+ obtain a datum is ultimately via Datum.from_json() or Atom.from_json(),
+ which check constraints themselves."""
+ for keyAtom, valueAtom in self.values:
+ keyAtom.check_constraints()
+ if valueAtom is not None:
+ valueAtom.check_constraints()
+
+ @staticmethod
+ def from_json(type_, json, symtab=None):
+ """Parses 'json' as a datum of the type described by 'type'. If
+ successful, returns a new datum. On failure, raises an
+ ovs.db.error.Error.
+
+ Violations of constraints expressed by 'type' are treated as errors.
+
+ If 'symtab' is nonnull, then named UUIDs in 'symtab' are accepted.
+ Refer to ovsdb/SPECS for information about this, and for the syntax
+ that this function accepts."""
+ is_map = type_.is_map()
+ if (is_map or
+ (type(json) == list and len(json) > 0 and json[0] == "set")):
+ if is_map:
+ class_ = "map"
+ else:
+ class_ = "set"
+
+ inner = ovs.db.parser.unwrap_json(json, class_, list)
+ n = len(inner)
+ if n < type_.n_min or n > type_.n_max:
+ raise error.Error("%s must have %d to %d members but %d are "
+ "present" % (class_, type_.n_min,
+ type_.n_max, n),
+ json)
+
+ values = {}
+ for element in inner:
+ if is_map:
+ key, value = ovs.db.parser.parse_json_pair(element)
+ keyAtom = Atom.from_json(type_.key, key, symtab)
+ valueAtom = Atom.from_json(type_.value, value, symtab)
+ else:
+ keyAtom = Atom.from_json(type_.key, element, symtab)
+ valueAtom = None
+
+ if keyAtom in values:
+ if is_map:
+ raise error.Error("map contains duplicate key")
+ else:
+ raise error.Error("set contains duplicate")
+
+ values[keyAtom] = valueAtom
+
+ return Datum(type_, values)
+ else:
+ keyAtom = Atom.from_json(type_.key, json, symtab)
+ return Datum(type_, {keyAtom: None})
+
+ def to_json(self):
+ if len(self.values) == 1 and not self.type.is_map():
+ key = self.values.keys()[0]
+ return key.to_json()
+ elif not self.type.is_map():
+ return ["set", [k.to_json() for k in sorted(self.values.keys())]]
+ else:
+ return ["map", [[k.to_json(), v.to_json()]
+ for k, v in sorted(self.values.items())]]
+
+ def to_string(self):
+ if self.type.n_max > 1 or len(self.values) == 0:
+ if self.type.is_map():
+ s = "{"
+ else:
+ s = "["
+ else:
+ s = ""
+
+ i = 0
+ for key in sorted(self.values):
+ if i > 0:
+ s += ", "
+ i += 1
+
+ if self.type.is_map():
+ s += "%s=%s" % (key.to_string(), self.values[key].to_string())
+ else:
+ s += key.to_string()
+
+ if self.type.n_max > 1 or len(self.values) == 0:
+ if self.type.is_map():
+ s += "}"
+ else:
+ s += "]"
+ return s
+
+ def as_list(self):
+ if self.type.is_map():
+ return [[k.value, v.value] for k, v in self.values.iteritems()]
+ else:
+ return [k.value for k in self.values.iterkeys()]
+
+ def as_scalar(self):
+ if len(self.values) == 1:
+ if self.type.is_map():
+ k, v = self.values.iteritems()[0]
+ return [k.value, v.value]
+ else:
+ return self.values.keys()[0].value
+ else:
+ return None
+
+ def __getitem__(self, key):
+ if not isinstance(key, Atom):
+ key = Atom.new(key)
+ if not self.type.is_map():
+ raise IndexError
+ elif key not in self.values:
+ raise KeyError
+ else:
+ return self.values[key].value
+
+ def get(self, key, default=None):
+ if not isinstance(key, Atom):
+ key = Atom.new(key)
+ if key in self.values:
+ return self.values[key].value
+ else:
+ return default
+
+ def __str__(self):
+ return self.to_string()
+
+ def conforms_to_type(self):
+ n = len(self.values)
+ return n >= self.type.n_min and n <= self.type.n_max
+
+ def cInitDatum(self, var):
+ if len(self.values) == 0:
+ return ["ovsdb_datum_init_empty(%s);" % var]
+
+ s = ["%s->n = %d;" % (var, len(self.values))]
+ s += ["%s->keys = xmalloc(%d * sizeof *%s->keys);"
+ % (var, len(self.values), var)]
+
+ i = 0
+ for key, value in sorted(self.values.items()):
+ s += key.cInitAtom("%s->keys[%d]" % (var, i))
+ i += 1
+
+ if self.type.value:
+ s += ["%s->values = xmalloc(%d * sizeof *%s->values);"
+ % (var, len(self.values), var)]
+ i = 0
+ for key, value in sorted(self.values.items()):
+ s += value.cInitAtom("%s->values[%d]" % (var, i))
+ i += 1
+ else:
+ s += ["%s->values = NULL;" % var]
+
+ if len(self.values) > 1:
+ s += ["ovsdb_datum_sort_assert(%s, OVSDB_TYPE_%s);"
+ % (var, self.type.key.type.to_string().upper())]
+
+ return s
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import ovs.json
+
+class Error(Exception):
+ def __init__(self, msg, json=None, tag=None):
+ Exception.__init__(self)
+ self.msg = msg
+ self.json = json
+ if tag is None:
+ if json is None:
+ self.tag = "ovsdb error"
+ else:
+ self.tag = "syntax error"
+ else:
+ self.tag = tag
+
+ def __str__(self):
+ syntax = ""
+ if self.json is not None:
+ syntax = "syntax \"%s\": " % ovs.json.to_string(self.json)
+ return "%s%s: %s" % (syntax, self.tag, self.msg)
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import ovs.jsonrpc
+import ovs.db.schema
+from ovs.db import error
+import ovs.ovsuuid
+
+class Idl:
+ """Open vSwitch Database Interface Definition Language (OVSDB IDL).
+
+ The OVSDB IDL maintains an in-memory replica of a database. It issues RPC
+ requests to an OVSDB database server and parses the responses, converting
+ raw JSON into data structures that are easier for clients to digest.
+
+ The IDL also assists with issuing database transactions. The client
+ creates a transaction, manipulates the IDL data structures, and commits or
+ aborts the transaction. The IDL then composes and issues the necessary
+ JSON-RPC requests and reports to the client whether the transaction
+ completed successfully.
+
+ If 'schema_cb' is provided, it should be a callback function that accepts
+ an ovs.db.schema.DbSchema as its argument. It should determine whether the
+ schema is acceptable and raise an ovs.db.error.Error if it is not. It may
+ also delete any tables or columns from the schema that the client has no
+ interest in monitoring, to save time and bandwidth during monitoring. Its
+ return value is ignored."""
+
+ def __init__(self, remote, db_name, schema_cb=None):
+ """Creates and returns a connection to the database named 'db_name' on
+ 'remote', which should be in a form acceptable to
+ ovs.jsonrpc.session.open(). The connection will maintain an in-memory
+ replica of the remote database."""
+ self.remote = remote
+ self.session = ovs.jsonrpc.Session.open(remote)
+ self.db_name = db_name
+ self.last_seqno = None
+ self.schema = None
+ self.state = None
+ self.change_seqno = 0
+ self.data = {}
+ self.schema_cb = schema_cb
+
+ def close(self):
+ self.session.close()
+
+ def run(self):
+ """Processes a batch of messages from the database server. Returns
+ True if the database as seen through the IDL changed, False if it did
+ not change. The initial fetch of the entire contents of the remote
+ database is considered to be one kind of change.
+
+ This function can return occasional false positives, that is, report
+ that the database changed even though it didn't. This happens if the
+ connection to the database drops and reconnects, which causes the
+ database contents to be reloaded even if they didn't change. (It could
+ also happen if the database server sends out a "change" that reflects
+ what we already thought was in the database, but the database server is
+ not supposed to do that.)
+
+ As an alternative to checking the return value, the client may check
+ for changes in the value returned by self.get_seqno()."""
+ initial_change_seqno = self.change_seqno
+ self.session.run()
+ if self.session.is_connected():
+ seqno = self.session.get_seqno()
+ if seqno != self.last_seqno:
+ self.last_seqno = seqno
+ self.state = (self.__send_schema_request, None)
+ if self.state:
+ self.state[0]()
+ return initial_change_seqno != self.change_seqno
+
+ def wait(self, poller):
+ """Arranges for poller.block() to wake up when self.run() has something
+ to do or when activity occurs on a transaction on 'self'."""
+ self.session.wait(poller)
+ if self.state and self.state[1]:
+ self.state[1](poller)
+
+ def get_seqno(self):
+ """Returns a number that represents the IDL's state. When the IDL
+ updated (by self.run()), the return value changes."""
+ return self.change_seqno
+
+ def __send_schema_request(self):
+ msg = ovs.jsonrpc.Message.create_request("get_schema", [self.db_name])
+ self.session.send(msg)
+ self.state = (lambda: self.__recv_schema(msg.id), self.__recv_wait)
+
+ def __recv_schema(self, id):
+ msg = self.session.recv()
+ if msg and msg.type == ovs.jsonrpc.Message.T_REPLY and msg.id == id:
+ try:
+ self.schema = ovs.db.schema.DbSchema.from_json(msg.result)
+ except error.Error, e:
+ logging.error("%s: parse error in received schema: %s"
+ % (self.remote, e))
+ self.__error()
+ return
+
+ if self.schema_cb:
+ try:
+ self.schema_cb(self.schema)
+ except error.Error, e:
+ logging.error("%s: error validating schema: %s"
+ % (self.remote, e))
+ self.__error()
+ return
+
+ self.__send_monitor_request()
+ elif msg:
+ logging.error("%s: unexpected message expecting schema: %s"
+ % (self.remote, msg))
+ self.__error()
+
+ def __recv_wait(self, poller):
+ self.session.recv_wait(poller)
+
+ def __send_monitor_request(self):
+ monitor_requests = {}
+ for table in self.schema.tables.itervalues():
+ monitor_requests[table.name] = {"columns": table.columns.keys()}
+ msg = ovs.jsonrpc.Message.create_request(
+ "monitor", [self.db_name, None, monitor_requests])
+ self.session.send(msg)
+ self.state = (lambda: self.__recv_monitor_reply(msg.id),
+ self.__recv_wait)
+
+ def __recv_monitor_reply(self, id):
+ msg = self.session.recv()
+ if msg and msg.type == ovs.jsonrpc.Message.T_REPLY and msg.id == id:
+ try:
+ self.change_seqno += 1
+ self.state = (self.__recv_update, self.__recv_wait)
+ self.__clear()
+ self.__parse_update(msg.result)
+ except error.Error, e:
+ logging.error("%s: parse error in received schema: %s"
+ % (self.remote, e))
+ self.__error()
+ elif msg:
+ logging.error("%s: unexpected message expecting schema: %s"
+ % (self.remote, msg))
+ self.__error()
+
+ def __recv_update(self):
+ msg = self.session.recv()
+ if (msg and msg.type == ovs.jsonrpc.Message.T_NOTIFY and
+ type(msg.params) == list and len(msg.params) == 2 and
+ msg.params[0] is None):
+ self.__parse_update(msg.params[1])
+ elif msg:
+ logging.error("%s: unexpected message expecting update: %s"
+ % (self.remote, msg))
+ self.__error()
+
+ def __error(self):
+ self.session.force_reconnect()
+
+ def __parse_update(self, update):
+ try:
+ self.__do_parse_update(update)
+ except error.Error, e:
+ logging.error("%s: error parsing update: %s" % (self.remote, e))
+
+ def __do_parse_update(self, table_updates):
+ if type(table_updates) != dict:
+ raise error.Error("<table-updates> is not an object",
+ table_updates)
+
+ for table_name, table_update in table_updates.iteritems():
+ table = self.schema.tables.get(table_name)
+ if not table:
+ raise error.Error("<table-updates> includes unknown "
+ "table \"%s\"" % table_name)
+
+ if type(table_update) != dict:
+ raise error.Error("<table-update> for table \"%s\" is not "
+ "an object" % table_name, table_update)
+
+ for uuid_string, row_update in table_update.iteritems():
+ if not ovs.ovsuuid.UUID.is_valid_string(uuid_string):
+ raise error.Error("<table-update> for table \"%s\" "
+ "contains bad UUID \"%s\" as member "
+ "name" % (table_name, uuid_string),
+ table_update)
+ uuid = ovs.ovsuuid.UUID.from_string(uuid_string)
+
+ if type(row_update) != dict:
+ raise error.Error("<table-update> for table \"%s\" "
+ "contains <row-update> for %s that "
+ "is not an object"
+ % (table_name, uuid_string))
+
+ old = row_update.get("old", None)
+ new = row_update.get("new", None)
+
+ if old is not None and type(old) != dict:
+ raise error.Error("\"old\" <row> is not object", old)
+ if new is not None and type(new) != dict:
+ raise error.Error("\"new\" <row> is not object", new)
+ if (old is not None) + (new is not None) != len(row_update):
+ raise error.Error("<row-update> contains unexpected "
+ "member", row_update)
+ if not old and not new:
+ raise error.Error("<row-update> missing \"old\" and "
+ "\"new\" members", row_update)
+
+ if self.__parse_row_update(table, uuid, old, new):
+ self.change_seqno += 1
+
+ def __parse_row_update(self, table, uuid, old, new):
+ """Returns True if a column changed, False otherwise."""
+ row = self.data[table.name].get(uuid)
+ if not new:
+ # Delete row.
+ if row:
+ del self.data[table.name][uuid]
+ else:
+ # XXX rate-limit
+ logging.warning("cannot delete missing row %s from table %s"
+ % (uuid, table.name))
+ return False
+ elif not old:
+ # Insert row.
+ if not row:
+ row = self.__create_row(table, uuid)
+ else:
+ # XXX rate-limit
+ logging.warning("cannot add existing row %s to table %s"
+ % (uuid, table.name))
+ self.__modify_row(table, row, new)
+ else:
+ if not row:
+ row = self.__create_row(table, uuid)
+ # XXX rate-limit
+ logging.warning("cannot modify missing row %s in table %s"
+ % (uuid, table_name))
+ self.__modify_row(table, row, new)
+ return True
+
+ def __modify_row(self, table, row, row_json):
+ changed = False
+ for column_name, datum_json in row_json.iteritems():
+ column = table.columns.get(column_name)
+ if not column:
+ # XXX rate-limit
+ logging.warning("unknown column %s updating table %s"
+ % (column_name, table.name))
+ continue
+
+ try:
+ datum = ovs.db.data.Datum.from_json(column.type, datum_json)
+ except error.Error, e:
+ # XXX rate-limit
+ logging.warning("error parsing column %s in table %s: %s"
+ % (column_name, table_name, e))
+ continue
+
+ if datum != row.__dict__[column_name]:
+ row.__dict__[column_name] = datum
+ changed = True
+ else:
+ # Didn't really change but the OVSDB monitor protocol always
+ # includes every value in a row.
+ pass
+ return changed
+
+ def __clear(self):
+ if self.data != {}:
+ for table_name in self.schema.tables:
+ if self.data[table_name] != {}:
+ self.change_seqno += 1
+ break
+
+ self.data = {}
+ for table_name in self.schema.tables:
+ self.data[table_name] = {}
+
+ def __create_row(self, table, uuid):
+ class Row(object):
+ pass
+ row = self.data[table.name][uuid] = Row()
+ for column in table.columns.itervalues():
+ row.__dict__[column.name] = ovs.db.data.Datum.default(column.type)
+ return row
+
+ def force_reconnect(self):
+ """Forces the IDL to drop its connection to the database and reconnect.
+ In the meantime, the contents of the IDL will not change."""
+ self.session.force_reconnect()
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+from ovs.db import error
+
+class Parser(object):
+ def __init__(self, json, name):
+ self.name = name
+ self.json = json
+ if type(json) != dict:
+ self.__raise_error("Object expected.")
+ self.used = set()
+
+ def __get(self, name, types, optional, default=None):
+ if name in self.json:
+ self.used.add(name)
+ member = float_to_int(self.json[name])
+ if is_identifier(member) and "id" in types:
+ return member
+ if len(types) and type(member) not in types:
+ self.__raise_error("Type mismatch for member '%s'." % name)
+ return member
+ else:
+ if not optional:
+ self.__raise_error("Required '%s' member is missing." % name)
+ return default
+
+ def get(self, name, types):
+ return self.__get(name, types, False)
+
+ def get_optional(self, name, types, default=None):
+ return self.__get(name, types, True, default)
+
+ def __raise_error(self, message):
+ raise error.Error("Parsing %s failed: %s" % (self.name, message),
+ self.json)
+
+ def finish(self):
+ missing = set(self.json) - set(self.used)
+ if missing:
+ name = missing.pop()
+ if len(missing) > 1:
+ self.__raise_error("Member '%s' and %d other members "
+ "are present but not allowed here"
+ % (name, len(missing)))
+ elif missing:
+ self.__raise_error("Member '%s' and 1 other member "
+ "are present but not allowed here" % name)
+ else:
+ self.__raise_error("Member '%s' is present but not "
+ "allowed here" % name)
+
+def float_to_int(x):
+ # XXX still needed?
+ if type(x) == float:
+ integer = int(x)
+ if integer == x and integer >= -2**53 and integer < 2**53:
+ return integer
+ return x
+
+id_re = re.compile("[_a-zA-Z][_a-zA-Z0-9]*$")
+def is_identifier(s):
+ return type(s) in [str, unicode] and id_re.match(s)
+
+def json_type_to_string(type):
+ if type == None:
+ return "null"
+ elif type == bool:
+ return "boolean"
+ elif type == dict:
+ return "object"
+ elif type == list:
+ return "array"
+ elif type in [int, long, float]:
+ return "number"
+ elif type in [str, unicode]:
+ return "string"
+ else:
+ return "<invalid>"
+
+def unwrap_json(json, name, need_type):
+ if (type(json) != list or len(json) != 2 or json[0] != name or
+ type(json[1]) != need_type):
+ raise error.Error("expected [\"%s\", <%s>]"
+ % (name, json_type_to_string(need_type)), json)
+ return json[1]
+
+def parse_json_pair(json):
+ if type(json) != list or len(json) != 2:
+ raise error.Error("expected 2-element array", json)
+ return json
+
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+from ovs.db import error
+import ovs.db.parser
+from ovs.db import types
+
+class DbSchema(object):
+ """Schema for an OVSDB database."""
+
+ def __init__(self, name, tables):
+ self.name = name
+ self.tables = tables
+
+ # Validate that all ref_tables refer to the names of tables
+ # that exist.
+ for table in self.tables.itervalues():
+ for column in table.columns.itervalues():
+ self.__check_ref_table(column, column.type.key, "key")
+ self.__check_ref_table(column, column.type.value, "value")
+
+ @staticmethod
+ def from_json(json):
+ parser = ovs.db.parser.Parser(json, "database schema")
+ name = parser.get("name", ['id'])
+ tablesJson = parser.get("tables", [dict])
+ parser.finish()
+
+ tables = {}
+ for tableName, tableJson in tablesJson.iteritems():
+ if tableName.startswith('_'):
+ raise error.Error("names beginning with \"_\" are reserved",
+ json)
+ elif not ovs.db.parser.is_identifier(tableName):
+ raise error.Error("name must be a valid id", json)
+ tables[tableName] = TableSchema.from_json(tableJson, tableName)
+
+ return DbSchema(name, tables)
+
+ def to_json(self):
+ tables = {}
+ for table in self.tables.itervalues():
+ tables[table.name] = table.to_json()
+ return {"name": self.name, "tables": tables}
+
+ def __check_ref_table(self, column, base, base_name):
+ if (base and base.type == types.UuidType and base.ref_table and
+ base.ref_table not in self.tables):
+ raise error.Error("column %s %s refers to undefined table %s"
+ % (column.name, base_name, base.ref_table),
+ tag="syntax error")
+
+class IdlSchema(DbSchema):
+ def __init__(self, name, tables, idlPrefix, idlHeader):
+ DbSchema.__init__(self, name, tables)
+ self.idlPrefix = idlPrefix
+ self.idlHeader = idlHeader
+
+ @staticmethod
+ def from_json(json):
+ parser = ovs.db.parser.Parser(json, "IDL schema")
+ idlPrefix = parser.get("idlPrefix", [unicode])
+ idlHeader = parser.get("idlHeader", [unicode])
+
+ subjson = dict(json)
+ del subjson["idlPrefix"]
+ del subjson["idlHeader"]
+ schema = DbSchema.from_json(subjson)
+
+ return IdlSchema(schema.name, schema.tables, idlPrefix, idlHeader)
+
+class TableSchema(object):
+ def __init__(self, name, columns, mutable=True, max_rows=sys.maxint):
+ self.name = name
+ self.columns = columns
+ self.mutable = mutable
+ self.max_rows = max_rows
+
+ @staticmethod
+ def from_json(json, name):
+ parser = ovs.db.parser.Parser(json, "table schema for table %s" % name)
+ columnsJson = parser.get("columns", [dict])
+ mutable = parser.get_optional("mutable", [bool], True)
+ max_rows = parser.get_optional("maxRows", [int])
+ parser.finish()
+
+ if max_rows == None:
+ max_rows = sys.maxint
+ elif max_rows <= 0:
+ raise error.Error("maxRows must be at least 1", json)
+
+ if not columnsJson:
+ raise error.Error("table must have at least one column", json)
+
+ columns = {}
+ for columnName, columnJson in columnsJson.iteritems():
+ if columnName.startswith('_'):
+ raise error.Error("names beginning with \"_\" are reserved",
+ json)
+ elif not ovs.db.parser.is_identifier(columnName):
+ raise error.Error("name must be a valid id", json)
+ columns[columnName] = ColumnSchema.from_json(columnJson,
+ columnName)
+
+ return TableSchema(name, columns, mutable, max_rows)
+
+ def to_json(self):
+ json = {}
+ if not self.mutable:
+ json["mutable"] = False
+
+ json["columns"] = columns = {}
+ for column in self.columns.itervalues():
+ if not column.name.startswith("_"):
+ columns[column.name] = column.to_json()
+
+ if self.max_rows != sys.maxint:
+ json["maxRows"] = self.max_rows
+
+ return json
+
+class ColumnSchema(object):
+ def __init__(self, name, mutable, persistent, type):
+ self.name = name
+ self.mutable = mutable
+ self.persistent = persistent
+ self.type = type
+
+ @staticmethod
+ def from_json(json, name):
+ parser = ovs.db.parser.Parser(json, "schema for column %s" % name)
+ mutable = parser.get_optional("mutable", [bool], True)
+ ephemeral = parser.get_optional("ephemeral", [bool], False)
+ type = types.Type.from_json(parser.get("type", [dict, unicode]))
+ parser.finish()
+
+ return ColumnSchema(name, mutable, not ephemeral, type)
+
+ def to_json(self):
+ json = {"type": self.type.to_json()}
+ if not self.mutable:
+ json["mutable"] = False
+ if not self.persistent:
+ json["ephemeral"] = True
+ return json
+
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+from ovs.db import error
+import ovs.db.parser
+import ovs.db.data
+import ovs.ovsuuid
+
+class AtomicType(object):
+ def __init__(self, name, default):
+ self.name = name
+ self.default = default
+
+ @staticmethod
+ def from_string(s):
+ if s != "void":
+ for atomic_type in ATOMIC_TYPES:
+ if s == atomic_type.name:
+ return atomic_type
+ raise error.Error("\"%s\" is not an atomic type" % s)
+
+ @staticmethod
+ def from_json(json):
+ if type(json) not in [str, unicode]:
+ raise error.Error("atomic-type expected", json)
+ try:
+ return AtomicType.from_string(json)
+ except error.Error:
+ raise error.Error("\"%s\" is not an atomic-type" % json, json)
+
+ def __str__(self):
+ return self.name
+
+ def to_string(self):
+ return self.name
+
+ def to_json(self):
+ return self.name
+
+ def default_atom(self):
+ return ovs.db.data.Atom(self, self.default)
+
+VoidType = AtomicType("void", None)
+IntegerType = AtomicType("integer", 0)
+RealType = AtomicType("real", 0.0)
+BooleanType = AtomicType("boolean", False)
+StringType = AtomicType("string", "")
+UuidType = AtomicType("uuid", ovs.ovsuuid.UUID.zero())
+
+ATOMIC_TYPES = [VoidType, IntegerType, RealType, BooleanType, StringType,
+ UuidType]
+
+def escapeCString(src):
+ dst = ""
+ for c in src:
+ if c in "\\\"":
+ dst += "\\" + c
+ elif ord(c) < 32:
+ if c == '\n':
+ dst += '\\n'
+ elif c == '\r':
+ dst += '\\r'
+ elif c == '\a':
+ dst += '\\a'
+ elif c == '\b':
+ dst += '\\b'
+ elif c == '\f':
+ dst += '\\f'
+ elif c == '\t':
+ dst += '\\t'
+ elif c == '\v':
+ dst += '\\v'
+ else:
+ dst += '\\%03o' % ord(c)
+ else:
+ dst += c
+ return dst
+
+def commafy(x):
+ """Returns integer x formatted in decimal with thousands set off by
+ commas."""
+ return _commafy("%d" % x)
+def _commafy(s):
+ if s.startswith('-'):
+ return '-' + _commafy(s[1:])
+ elif len(s) <= 3:
+ return s
+ else:
+ return _commafy(s[:-3]) + ',' + _commafy(s[-3:])
+
+def returnUnchanged(x):
+ return x
+
+class BaseType(object):
+ def __init__(self, type_, enum=None, min=None, max=None,
+ min_length = 0, max_length=sys.maxint, ref_table=None):
+ assert isinstance(type_, AtomicType)
+ self.type = type_
+ self.enum = enum
+ self.min = min
+ self.max = max
+ self.min_length = min_length
+ self.max_length = max_length
+ self.ref_table = ref_table
+
+ def default(self):
+ return ovs.db.data.Atom.default(self.type)
+
+ def __eq__(self, other):
+ if not isinstance(other, BaseType):
+ return NotImplemented
+ return (self.type == other.type and self.enum == other.enum and
+ self.min == other.min and self.max == other.max and
+ self.min_length == other.min_length and
+ self.max_length == other.max_length and
+ self.ref_table == other.ref_table)
+
+ def __ne__(self, other):
+ if not isinstance(other, BaseType):
+ return NotImplemented
+ else:
+ return not (self == other)
+
+ @staticmethod
+ def __parse_uint(parser, name, default):
+ value = parser.get_optional(name, [int, long])
+ if value is None:
+ value = default
+ else:
+ max_value = 2**32 - 1
+ if value < 0 or value > max_value:
+ raise error.Error("%s out of valid range 0 to %d"
+ % (name, max_value), value)
+ return value
+
+ @staticmethod
+ def from_json(json):
+ if type(json) == unicode:
+ return BaseType(AtomicType.from_json(json))
+
+ parser = ovs.db.parser.Parser(json, "ovsdb type")
+ atomic_type = AtomicType.from_json(parser.get("type", [str, unicode]))
+
+ base = BaseType(atomic_type)
+
+ enum = parser.get_optional("enum", [])
+ if enum is not None:
+ base.enum = ovs.db.data.Datum.from_json(BaseType.get_enum_type(base.type), enum)
+ elif base.type == IntegerType:
+ base.min = parser.get_optional("minInteger", [int, long])
+ base.max = parser.get_optional("maxInteger", [int, long])
+ if base.min is not None and base.max is not None and base.min > base.max:
+ raise error.Error("minInteger exceeds maxInteger", json)
+ elif base.type == RealType:
+ base.min = parser.get_optional("minReal", [int, long, float])
+ base.max = parser.get_optional("maxReal", [int, long, float])
+ if base.min is not None and base.max is not None and base.min > base.max:
+ raise error.Error("minReal exceeds maxReal", json)
+ elif base.type == StringType:
+ base.min_length = BaseType.__parse_uint(parser, "minLength", 0)
+ base.max_length = BaseType.__parse_uint(parser, "maxLength",
+ sys.maxint)
+ if base.min_length > base.max_length:
+ raise error.Error("minLength exceeds maxLength", json)
+ elif base.type == UuidType:
+ base.ref_table = parser.get_optional("refTable", ['id'])
+ if base.ref_table:
+ base.ref_type = parser.get_optional("refType", [str, unicode],
+ "strong")
+ if base.ref_type not in ['strong', 'weak']:
+ raise error.Error("refType must be \"strong\" or \"weak\" "
+ "(not \"%s\")" % base.ref_type)
+ parser.finish()
+
+ return base
+
+ def to_json(self):
+ if not self.has_constraints():
+ return self.type.to_json()
+
+ json = {'type': self.type.to_json()}
+
+ if self.enum:
+ json['enum'] = self.enum.to_json()
+
+ if self.type == IntegerType:
+ if self.min is not None:
+ json['minInteger'] = self.min
+ if self.max is not None:
+ json['maxInteger'] = self.max
+ elif self.type == RealType:
+ if self.min is not None:
+ json['minReal'] = self.min
+ if self.max is not None:
+ json['maxReal'] = self.max
+ elif self.type == StringType:
+ if self.min_length != 0:
+ json['minLength'] = self.min_length
+ if self.max_length != sys.maxint:
+ json['maxLength'] = self.max_length
+ elif self.type == UuidType:
+ if self.ref_table:
+ json['refTable'] = self.ref_table
+ if self.ref_type != 'strong':
+ json['refType'] = self.ref_type
+ return json
+
+ def clone(self):
+ return BaseType(self.type, self.enum.clone(), self.min, self.max,
+ self.min_length, self.max_length, self.ref_table)
+
+ def is_valid(self):
+ if self.type in (VoidType, BooleanType, UuidType):
+ return True
+ elif self.type in (IntegerType, RealType):
+ return self.min is None or self.max is None or self.min <= self.max
+ elif self.type == StringType:
+ return self.min_length <= self.max_length
+ else:
+ return False
+
+ def has_constraints(self):
+ return (self.enum is not None or self.min is not None or self.max is not None or
+ self.min_length != 0 or self.max_length != sys.maxint or
+ self.ref_table is not None)
+
+ def without_constraints(self):
+ return BaseType(self.type)
+
+ @staticmethod
+ def get_enum_type(atomic_type):
+ """Returns the type of the 'enum' member for a BaseType whose
+ 'type' is 'atomic_type'."""
+ return Type(BaseType(atomic_type), None, 1, sys.maxint)
+
+ def is_ref(self):
+ return self.type == UuidType and self.ref_table is not None
+
+ def is_strong_ref(self):
+ return self.is_ref() and self.ref_type == 'strong'
+
+ def is_weak_ref(self):
+ return self.is_ref() and self.ref_type == 'weak'
+
+ def toEnglish(self, escapeLiteral=returnUnchanged):
+ if self.type == UuidType and self.ref_table:
+ s = escapeLiteral(self.ref_table)
+ if self.ref_type == 'weak':
+ s = "weak reference to " + s
+ return s
+ else:
+ return self.type.to_string()
+
+ def constraintsToEnglish(self, escapeLiteral=returnUnchanged):
+ if self.enum:
+ literals = [value.toEnglish(escapeLiteral)
+ for value in self.enum.values]
+ if len(literals) == 2:
+ return 'either %s or %s' % (literals[0], literals[1])
+ else:
+ return 'one of %s, %s, or %s' % (literals[0],
+ ', '.join(literals[1:-1]),
+ literals[-1])
+ elif self.min is not None and self.max is not None:
+ if self.type == IntegerType:
+ return 'in range %s to %s' % (commafy(self.min),
+ commafy(self.max))
+ else:
+ return 'in range %g to %g' % (self.min, self.max)
+ elif self.min is not None:
+ if self.type == IntegerType:
+ return 'at least %s' % commafy(self.min)
+ else:
+ return 'at least %g' % self.min
+ elif self.max is not None:
+ if self.type == IntegerType:
+ return 'at most %s' % commafy(self.max)
+ else:
+ return 'at most %g' % self.max
+ elif self.min_length is not None and self.max_length is not None:
+ if self.min_length == self.max_length:
+ return 'exactly %d characters long' % (self.min_length)
+ else:
+ return 'between %d and %d characters long' % (self.min_length, self.max_length)
+ elif self.min_length is not None:
+ return 'at least %d characters long' % self.min_length
+ elif self.max_length is not None:
+ return 'at most %d characters long' % self.max_length
+ else:
+ return ''
+
+ def toCType(self, prefix):
+ if self.ref_table:
+ return "struct %s%s *" % (prefix, self.ref_table.lower())
+ else:
+ return {IntegerType: 'int64_t ',
+ RealType: 'double ',
+ UuidType: 'struct uuid ',
+ BooleanType: 'bool ',
+ StringType: 'char *'}[self.type]
+
+ def toAtomicType(self):
+ return "OVSDB_TYPE_%s" % self.type.to_string().upper()
+
+ def copyCValue(self, dst, src):
+ args = {'dst': dst, 'src': src}
+ if self.ref_table:
+ return ("%(dst)s = %(src)s->header_.uuid;") % args
+ elif self.type == StringType:
+ return "%(dst)s = xstrdup(%(src)s);" % args
+ else:
+ return "%(dst)s = %(src)s;" % args
+
+ def initCDefault(self, var, is_optional):
+ if self.ref_table:
+ return "%s = NULL;" % var
+ elif self.type == StringType and not is_optional:
+ return "%s = \"\";" % var
+ else:
+ pattern = {IntegerType: '%s = 0;',
+ RealType: '%s = 0.0;',
+ UuidType: 'uuid_zero(&%s);',
+ BooleanType: '%s = false;',
+ StringType: '%s = NULL;'}[self.type]
+ return pattern % var
+
+ def cInitBaseType(self, indent, var):
+ stmts = []
+ stmts.append('ovsdb_base_type_init(&%s, OVSDB_TYPE_%s);' % (
+ var, self.type.to_string().upper()),)
+ if self.enum:
+ stmts.append("%s.enum_ = xmalloc(sizeof *%s.enum_);"
+ % (var, var))
+ stmts += self.enum.cInitDatum("%s.enum_" % var)
+ if self.type == IntegerType:
+ if self.min is not None:
+ stmts.append('%s.u.integer.min = INT64_C(%d);' % (var, self.min))
+ if self.max is not None:
+ stmts.append('%s.u.integer.max = INT64_C(%d);' % (var, self.max))
+ elif self.type == RealType:
+ if self.min is not None:
+ stmts.append('%s.u.real.min = %d;' % (var, self.min))
+ if self.max is not None:
+ stmts.append('%s.u.real.max = %d;' % (var, self.max))
+ elif self.type == StringType:
+ if self.min_length is not None:
+ stmts.append('%s.u.string.minLen = %d;' % (var, self.min_length))
+ if self.max_length is not None:
+ stmts.append('%s.u.string.maxLen = %d;' % (var, self.max_length))
+ elif self.type == UuidType:
+ if self.ref_table is not None:
+ stmts.append('%s.u.uuid.refTableName = "%s";' % (var, escapeCString(self.ref_table)))
+ return '\n'.join([indent + stmt for stmt in stmts])
+
+class Type(object):
+ def __init__(self, key, value=None, n_min=1, n_max=1):
+ self.key = key
+ self.value = value
+ self.n_min = n_min
+ self.n_max = n_max
+
+ def clone(self):
+ if self.value is None:
+ value = None
+ else:
+ value = self.value.clone()
+ return Type(self.key.clone(), value, self.n_min, self.n_max)
+
+ def __eq__(self, other):
+ if not isinstance(other, Type):
+ return NotImplemented
+ return (self.key == other.key and self.value == other.value and
+ self.n_min == other.n_min and self.n_max == other.n_max)
+
+ def __ne__(self, other):
+ if not isinstance(other, BaseType):
+ return NotImplemented
+ else:
+ return not (self == other)
+
+ def is_valid(self):
+ return (self.key.type != VoidType and self.key.is_valid() and
+ (self.value is None or
+ (self.value.type != VoidType and self.value.is_valid())) and
+ self.n_min <= 1 and
+ self.n_min <= self.n_max and
+ self.n_max >= 1)
+
+ def is_scalar(self):
+ return self.n_min == 1 and self.n_max == 1 and not self.value
+
+ def is_optional(self):
+ return self.n_min == 0 and self.n_max == 1
+
+ def is_composite(self):
+ return self.n_max > 1
+
+ def is_set(self):
+ return self.value is None and (self.n_min != 1 or self.n_max != 1)
+
+ def is_map(self):
+ return self.value is not None
+
+ def is_optional_pointer(self):
+ return (self.is_optional() and not self.value
+ and (self.key.type == StringType or self.key.ref_table))
+
+ @staticmethod
+ def __n_from_json(json, default):
+ if json is None:
+ return default
+ elif type(json) == int and json >= 0 and json <= sys.maxint:
+ return json
+ else:
+ raise error.Error("bad min or max value", json)
+
+ @staticmethod
+ def from_json(json):
+ if type(json) in [str, unicode]:
+ return Type(BaseType.from_json(json))
+
+ parser = ovs.db.parser.Parser(json, "ovsdb type")
+ key_json = parser.get("key", [dict, unicode])
+ value_json = parser.get_optional("value", [dict, unicode])
+ min_json = parser.get_optional("min", [int])
+ max_json = parser.get_optional("max", [int, str, unicode])
+ parser.finish()
+
+ key = BaseType.from_json(key_json)
+ if value_json:
+ value = BaseType.from_json(value_json)
+ else:
+ value = None
+
+ n_min = Type.__n_from_json(min_json, 1)
+
+ if max_json == 'unlimited':
+ n_max = sys.maxint
+ else:
+ n_max = Type.__n_from_json(max_json, 1)
+
+ type_ = Type(key, value, n_min, n_max)
+ if not type_.is_valid():
+ raise error.Error("ovsdb type fails constraint checks", json)
+ return type_
+
+ def to_json(self):
+ if self.is_scalar() and not self.key.has_constraints():
+ return self.key.to_json()
+
+ json = {"key": self.key.to_json()}
+ if self.value is not None:
+ json["value"] = self.value.to_json()
+ if self.n_min != 1:
+ json["min"] = self.n_min
+ if self.n_max == sys.maxint:
+ json["max"] = "unlimited"
+ elif self.n_max != 1:
+ json["max"] = self.n_max
+ return json
+
+ def toEnglish(self, escapeLiteral=returnUnchanged):
+ keyName = self.key.toEnglish(escapeLiteral)
+ if self.value:
+ valueName = self.value.toEnglish(escapeLiteral)
+
+ if self.is_scalar():
+ return keyName
+ elif self.is_optional():
+ if self.value:
+ return "optional %s-%s pair" % (keyName, valueName)
+ else:
+ return "optional %s" % keyName
+ else:
+ if self.n_max == sys.maxint:
+ if self.n_min:
+ quantity = "%d or more " % self.n_min
+ else:
+ quantity = ""
+ elif self.n_min:
+ quantity = "%d to %d " % (self.n_min, self.n_max)
+ else:
+ quantity = "up to %d " % self.n_max
+
+ if self.value:
+ return "map of %s%s-%s pairs" % (quantity, keyName, valueName)
+ else:
+ if keyName.endswith('s'):
+ plural = keyName + "es"
+ else:
+ plural = keyName + "s"
+ return "set of %s%s" % (quantity, plural)
+
+ def constraintsToEnglish(self, escapeLiteral=returnUnchanged):
+ s = ""
+
+ constraints = []
+ keyConstraints = self.key.constraintsToEnglish(escapeLiteral)
+ if keyConstraints:
+ if self.value:
+ constraints += ['key ' + keyConstraints]
+ else:
+ constraints += [keyConstraints]
+
+ if self.value:
+ valueConstraints = self.value.constraintsToEnglish(escapeLiteral)
+ if valueConstraints:
+ constraints += ['value ' + valueConstraints]
+
+ return ', '.join(constraints)
+
+ def cDeclComment(self):
+ if self.n_min == 1 and self.n_max == 1 and self.key.type == StringType:
+ return "\t/* Always nonnull. */"
+ else:
+ return ""
+
+ def cInitType(self, indent, var):
+ initKey = self.key.cInitBaseType(indent, "%s.key" % var)
+ if self.value:
+ initValue = self.value.cInitBaseType(indent, "%s.value" % var)
+ else:
+ initValue = ('%sovsdb_base_type_init(&%s.value, '
+ 'OVSDB_TYPE_VOID);' % (indent, var))
+ initMin = "%s%s.n_min = %s;" % (indent, var, self.n_min)
+ if self.n_max == sys.maxint:
+ max = "UINT_MAX"
+ else:
+ max = self.n_max
+ initMax = "%s%s.n_max = %s;" % (indent, var, max)
+ return "\n".join((initKey, initValue, initMin, initMax))
+
--- /dev/null
+# These are the default directories. They will be replaced by the
+# configured directories at install time.
+
+PKGDATADIR = "/usr/local/share/openvswitch"
+RUNDIR = "/var/run"
+LOGDIR = "/usr/local/var/log"
+BINDIR = "/usr/local/bin"
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import atexit
+import logging
+import os
+import signal
+
+_inited = False
+_hooks = []
+
+def _init():
+ global _inited
+ if not _inited:
+ _inited = True
+
+ for signr in (signal.SIGTERM, signal.SIGINT,
+ signal.SIGHUP, signal.SIGALRM):
+ if signal.getsignal(signr) == signal.SIG_DFL:
+ signal.signal(signr, _signal_handler)
+ atexit.register(_atexit_handler)
+
+def add_hook(hook, cancel, run_at_exit):
+ _init()
+
+ global _hooks
+ _hooks.append((hook, cancel, run_at_exit))
+
+def fork():
+ """Clears all of the fatal signal hooks without executing them. If any of
+ the hooks passed a 'cancel' function to add_hook(), then those functions
+ will be called, allowing them to free resources, etc.
+
+ Following a fork, one of the resulting processes can call this function to
+ allow it to terminate without calling the hooks registered before calling
+ this function. New hooks registered after calling this function will take
+ effect normally."""
+ global _hooks
+ for hook, cancel, run_at_exit in _hooks:
+ if cancel:
+ cancel()
+
+ _hooks = []
+\f
+_added_hook = False
+_files = {}
+
+def add_file_to_unlink(file):
+ """Registers 'file' to be unlinked when the program terminates via
+ sys.exit() or a fatal signal."""
+ global _added_hook
+ if not _added_hook:
+ _added_hook = True
+ add_hook(_unlink_files, _cancel_files, True)
+ _files[file] = None
+
+def remove_file_to_unlink(file):
+ """Unregisters 'file' from being unlinked when the program terminates via
+ sys.exit() or a fatal signal."""
+ if file in _files:
+ del _files[file]
+
+def unlink_file_now(file):
+ """Like fatal_signal_remove_file_to_unlink(), but also unlinks 'file'.
+ Returns 0 if successful, otherwise a positive errno value."""
+ error = _unlink(file)
+ if error:
+ logging.warning("could not unlink \"%s\" (%s)"
+ % (file, os.strerror(error)))
+ remove_file_to_unlink(file)
+ return error
+
+def _unlink_files():
+ for file in _files:
+ _unlink(file)
+
+def _cancel_files():
+ global _added_hook
+ global _files
+ _added_hook = False
+ _files = {}
+
+def _unlink(file):
+ try:
+ os.unlink(file)
+ return 0
+ except OSError, e:
+ return e.errno
+\f
+def _signal_handler(signr, frame):
+ _call_hooks(signr)
+
+ # Re-raise the signal with the default handling so that the program
+ # termination status reflects that we were killed by this signal.
+ signal.signal(signr, signal.SIG_DFL)
+ os.kill(os.getpid(), signr)
+
+def _atexit_handler():
+ _call_hooks(0)
+
+recurse = False
+def _call_hooks(signr):
+ global recurse
+ if recurse:
+ return
+ recurse = True
+
+ for hook, cancel, run_at_exit in _hooks:
+ if signr != 0 or run_at_exit:
+ hook()
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import StringIO
+import sys
+
+escapes = {ord('"'): u"\\\"",
+ ord("\\"): u"\\\\",
+ ord("\b"): u"\\b",
+ ord("\f"): u"\\f",
+ ord("\n"): u"\\n",
+ ord("\r"): u"\\r",
+ ord("\t"): u"\\t"}
+for i in range(32):
+ if i not in escapes:
+ escapes[i] = u"\\u%04x" % i
+
+def __dump_string(stream, s):
+ stream.write(u"\"")
+ for c in s:
+ x = ord(c)
+ escape = escapes.get(x)
+ if escape:
+ stream.write(escape)
+ else:
+ stream.write(c)
+ stream.write(u"\"")
+
+def to_stream(obj, stream, pretty=False, sort_keys=True):
+ if obj is None:
+ stream.write(u"null")
+ elif obj is False:
+ stream.write(u"false")
+ elif obj is True:
+ stream.write(u"true")
+ elif type(obj) in (int, long):
+ stream.write(u"%d" % obj)
+ elif type(obj) == float:
+ stream.write("%.15g" % obj)
+ elif type(obj) == unicode:
+ __dump_string(stream, obj)
+ elif type(obj) == str:
+ __dump_string(stream, unicode(obj))
+ elif type(obj) == dict:
+ stream.write(u"{")
+ if sort_keys:
+ items = sorted(obj.items())
+ else:
+ items = obj.iteritems()
+ i = 0
+ for key, value in items:
+ if i > 0:
+ stream.write(u",")
+ i += 1
+ __dump_string(stream, unicode(key))
+ stream.write(u":")
+ to_stream(value, stream, pretty, sort_keys)
+ stream.write(u"}")
+ elif type(obj) in (list, tuple):
+ stream.write(u"[")
+ i = 0
+ for value in obj:
+ if i > 0:
+ stream.write(u",")
+ i += 1
+ to_stream(value, stream, pretty, sort_keys)
+ stream.write(u"]")
+ else:
+ raise Error("can't serialize %s as JSON" % obj)
+
+def to_file(obj, name, pretty=False, sort_keys=True):
+ stream = open(name, "w")
+ try:
+ to_stream(obj, stream, pretty, sort_keys)
+ finally:
+ stream.close()
+
+def to_string(obj, pretty=False, sort_keys=True):
+ output = StringIO.StringIO()
+ to_stream(obj, output, pretty, sort_keys)
+ s = output.getvalue()
+ output.close()
+ return s
+
+def from_stream(stream):
+ p = Parser(check_trailer=True)
+ while True:
+ buf = stream.read(4096)
+ if buf == "" or p.feed(buf) != len(buf):
+ break
+ return p.finish()
+
+def from_file(name):
+ stream = open(name, "r")
+ try:
+ return from_stream(stream)
+ finally:
+ stream.close()
+
+def from_string(s):
+ try:
+ s = unicode(s, 'utf-8')
+ except UnicodeDecodeError, e:
+ seq = ' '.join(["0x%2x" % ord(c) for c in e.object[e.start:e.end]])
+ raise Error("\"%s\" is not a valid UTF-8 string: "
+ "invalid UTF-8 sequence %s" % (s, seq),
+ tag="constraint violation")
+ p = Parser(check_trailer=True)
+ p.feed(s)
+ return p.finish()
+
+class Parser(object):
+ ## Maximum height of parsing stack. ##
+ MAX_HEIGHT = 1000
+
+ def __init__(self, check_trailer=False):
+ self.check_trailer = check_trailer
+
+ # Lexical analysis.
+ self.lex_state = Parser.__lex_start
+ self.buffer = ""
+ self.line_number = 0
+ self.column_number = 0
+ self.byte_number = 0
+
+ # Parsing.
+ self.parse_state = Parser.__parse_start
+ self.stack = []
+ self.member_name = None
+
+ # Parse status.
+ self.done = False
+ self.error = None
+
+ def __lex_start_space(self, c):
+ pass
+ def __lex_start_alpha(self, c):
+ self.buffer = c
+ self.lex_state = Parser.__lex_keyword
+ def __lex_start_token(self, c):
+ self.__parser_input(c)
+ def __lex_start_number(self, c):
+ self.buffer = c
+ self.lex_state = Parser.__lex_number
+ def __lex_start_string(self, c):
+ self.lex_state = Parser.__lex_string
+ def __lex_start_error(self, c):
+ if ord(c) >= 32 and ord(c) < 128:
+ self.__error("invalid character '%s'" % c)
+ else:
+ self.__error("invalid character U+%04x" % ord(c))
+
+ __lex_start_actions = {}
+ for c in " \t\n\r":
+ __lex_start_actions[c] = __lex_start_space
+ for c in "abcdefghijklmnopqrstuvwxyz":
+ __lex_start_actions[c] = __lex_start_alpha
+ for c in "[{]}:,":
+ __lex_start_actions[c] = __lex_start_token
+ for c in "-0123456789":
+ __lex_start_actions[c] = __lex_start_number
+ __lex_start_actions['"'] = __lex_start_string
+ def __lex_start(self, c):
+ Parser.__lex_start_actions.get(
+ c, Parser.__lex_start_error)(self, c)
+ return True
+
+ __lex_alpha = {}
+ for c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
+ __lex_alpha[c] = True
+ def __lex_finish_keyword(self):
+ if self.buffer == "false":
+ self.__parser_input(False)
+ elif self.buffer == "true":
+ self.__parser_input(True)
+ elif self.buffer == "null":
+ self.__parser_input(None)
+ else:
+ self.__error("invalid keyword '%s'" % self.buffer)
+ def __lex_keyword(self, c):
+ if c in Parser.__lex_alpha:
+ self.buffer += c
+ return True
+ else:
+ self.__lex_finish_keyword()
+ return False
+
+ __number_re = re.compile("(-)?(0|[1-9][0-9]*)(?:\.([0-9]+))?(?:[eE]([-+]?[0-9]+))?$")
+ def __lex_finish_number(self):
+ s = self.buffer
+ m = Parser.__number_re.match(s)
+ if m:
+ sign, integer, fraction, exp = m.groups()
+ if (exp is not None and
+ (long(exp) > sys.maxint or long(exp) < -sys.maxint - 1)):
+ self.__error("exponent outside valid range")
+ return
+
+ if fraction is not None and len(fraction.lstrip('0')) == 0:
+ fraction = None
+
+ sig_string = integer
+ if fraction is not None:
+ sig_string += fraction
+ significand = int(sig_string)
+
+ pow10 = 0
+ if fraction is not None:
+ pow10 -= len(fraction)
+ if exp is not None:
+ pow10 += long(exp)
+
+ if significand == 0:
+ self.__parser_input(0)
+ return
+ elif significand <= 2**63:
+ while pow10 > 0 and significand <= 2*63:
+ significand *= 10
+ pow10 -= 1
+ while pow10 < 0 and significand % 10 == 0:
+ significand /= 10
+ pow10 += 1
+ if (pow10 == 0 and
+ ((not sign and significand < 2**63) or
+ (sign and significand <= 2**63))):
+ if sign:
+ self.__parser_input(-significand)
+ else:
+ self.__parser_input(significand)
+ return
+
+ value = float(s)
+ if value == float("inf") or value == float("-inf"):
+ self.__error("number outside valid range")
+ return
+ if value == 0:
+ # Suppress negative zero.
+ value = 0
+ self.__parser_input(value)
+ elif re.match("-?0[0-9]", s):
+ self.__error("leading zeros not allowed")
+ elif re.match("-([^0-9]|$)", s):
+ self.__error("'-' must be followed by digit")
+ elif re.match("-?(0|[1-9][0-9]*)\.([^0-9]|$)", s):
+ self.__error("decimal point must be followed by digit")
+ elif re.search("e[-+]?([^0-9]|$)", s):
+ self.__error("exponent must contain at least one digit")
+ else:
+ self.__error("syntax error in number")
+
+ def __lex_number(self, c):
+ if c in ".0123456789eE-+":
+ self.buffer += c
+ return True
+ else:
+ self.__lex_finish_number()
+ return False
+
+ __4hex_re = re.compile("[0-9a-fA-F]{4}")
+ def __lex_4hex(self, s):
+ if len(s) < 4:
+ self.__error("quoted string ends within \\u escape")
+ elif not Parser.__4hex_re.match(s):
+ self.__error("malformed \\u escape")
+ elif s == "0000":
+ self.__error("null bytes not supported in quoted strings")
+ else:
+ return int(s, 16)
+ @staticmethod
+ def __is_leading_surrogate(c):
+ """Returns true if 'c' is a Unicode code point for a leading
+ surrogate."""
+ return c >= 0xd800 and c <= 0xdbff
+ @staticmethod
+ def __is_trailing_surrogate(c):
+ """Returns true if 'c' is a Unicode code point for a trailing
+ surrogate."""
+ return c >= 0xdc00 and c <= 0xdfff
+ @staticmethod
+ def __utf16_decode_surrogate_pair(leading, trailing):
+ """Returns the unicode code point corresponding to leading surrogate
+ 'leading' and trailing surrogate 'trailing'. The return value will not
+ make any sense if 'leading' or 'trailing' are not in the correct ranges
+ for leading or trailing surrogates."""
+ # Leading surrogate: 110110wwwwxxxxxx
+ # Trailing surrogate: 110111xxxxxxxxxx
+ # Code point: 000uuuuuxxxxxxxxxxxxxxxx
+ w = (leading >> 6) & 0xf
+ u = w + 1
+ x0 = leading & 0x3f
+ x1 = trailing & 0x3ff
+ return (u << 16) | (x0 << 10) | x1
+ __unescape = {'"': u'"',
+ "\\": u"\\",
+ "/": u"/",
+ "b": u"\b",
+ "f": u"\f",
+ "n": u"\n",
+ "r": u"\r",
+ "t": u"\t"}
+ def __lex_finish_string(self):
+ inp = self.buffer
+ out = u""
+ while len(inp):
+ backslash = inp.find('\\')
+ if backslash == -1:
+ out += inp
+ break
+ out += inp[:backslash]
+ inp = inp[backslash + 1:]
+ if inp == "":
+ self.__error("quoted string may not end with backslash")
+ return
+
+ replacement = Parser.__unescape.get(inp[0])
+ if replacement is not None:
+ out += replacement
+ inp = inp[1:]
+ continue
+ elif inp[0] != u'u':
+ self.__error("bad escape \\%s" % inp[0])
+ return
+
+ c0 = self.__lex_4hex(inp[1:5])
+ if c0 is None:
+ return
+ inp = inp[5:]
+
+ if Parser.__is_leading_surrogate(c0):
+ if inp[:2] != u'\\u':
+ self.__error("malformed escaped surrogate pair")
+ return
+ c1 = self.__lex_4hex(inp[2:6])
+ if c1 is None:
+ return
+ if not Parser.__is_trailing_surrogate(c1):
+ self.__error("second half of escaped surrogate pair is "
+ "not trailing surrogate")
+ return
+ code_point = Parser.__utf16_decode_surrogate_pair(c0, c1)
+ inp = inp[6:]
+ else:
+ code_point = c0
+ out += unichr(code_point)
+ self.__parser_input('string', out)
+
+ def __lex_string_escape(self, c):
+ self.buffer += c
+ self.lex_state = Parser.__lex_string
+ return True
+ def __lex_string(self, c):
+ if c == '\\':
+ self.buffer += c
+ self.lex_state = Parser.__lex_string_escape
+ elif c == '"':
+ self.__lex_finish_string()
+ elif ord(c) >= 0x20:
+ self.buffer += c
+ else:
+ self.__error("U+%04X must be escaped in quoted string" % ord(c))
+ return True
+
+ def __lex_input(self, c):
+ self.byte_number += 1
+ if c == '\n':
+ self.column_number = 0
+ self.line_number += 1
+ else:
+ self.column_number += 1
+
+ eat = self.lex_state(self, c)
+ assert eat is True or eat is False
+ return eat
+
+ def __parse_start(self, token, string):
+ if token == '{':
+ self.__push_object()
+ elif token == '[':
+ self.__push_array()
+ else:
+ self.__error("syntax error at beginning of input")
+ def __parse_end(self, token, string):
+ self.__error("trailing garbage at end of input")
+ def __parse_object_init(self, token, string):
+ if token == '}':
+ self.__parser_pop()
+ else:
+ self.__parse_object_name(token, string)
+ def __parse_object_name(self, token, string):
+ if token == 'string':
+ self.member_name = string
+ self.parse_state = Parser.__parse_object_colon
+ else:
+ self.__error("syntax error parsing object expecting string")
+ def __parse_object_colon(self, token, string):
+ if token == ":":
+ self.parse_state = Parser.__parse_object_value
+ else:
+ self.__error("syntax error parsing object expecting ':'")
+ def __parse_object_value(self, token, string):
+ self.__parse_value(token, string, Parser.__parse_object_next)
+ def __parse_object_next(self, token, string):
+ if token == ",":
+ self.parse_state = Parser.__parse_object_name
+ elif token == "}":
+ self.__parser_pop()
+ else:
+ self.__error("syntax error expecting '}' or ','")
+ def __parse_array_init(self, token, string):
+ if token == ']':
+ self.__parser_pop()
+ else:
+ self.__parse_array_value(token, string)
+ def __parse_array_value(self, token, string):
+ self.__parse_value(token, string, Parser.__parse_array_next)
+ def __parse_array_next(self, token, string):
+ if token == ",":
+ self.parse_state = Parser.__parse_array_value
+ elif token == "]":
+ self.__parser_pop()
+ else:
+ self.__error("syntax error expecting ']' or ','")
+ def __parser_input(self, token, string=None):
+ self.lex_state = Parser.__lex_start
+ self.buffer = ""
+ #old_state = self.parse_state
+ self.parse_state(self, token, string)
+ #print ("token=%s string=%s old_state=%s new_state=%s"
+ # % (token, string, old_state, self.parse_state))
+
+ def __put_value(self, value):
+ top = self.stack[-1]
+ if type(top) == dict:
+ top[self.member_name] = value
+ else:
+ top.append(value)
+
+ def __parser_push(self, new_json, next_state):
+ if len(self.stack) < Parser.MAX_HEIGHT:
+ if len(self.stack) > 0:
+ self.__put_value(new_json)
+ self.stack.append(new_json)
+ self.parse_state = next_state
+ else:
+ self.__error("input exceeds maximum nesting depth %d" %
+ Parser.MAX_HEIGHT)
+ def __push_object(self):
+ self.__parser_push({}, Parser.__parse_object_init)
+ def __push_array(self):
+ self.__parser_push([], Parser.__parse_array_init)
+
+ def __parser_pop(self):
+ if len(self.stack) == 1:
+ self.parse_state = Parser.__parse_end
+ if not self.check_trailer:
+ self.done = True
+ else:
+ self.stack.pop()
+ top = self.stack[-1]
+ if type(top) == list:
+ self.parse_state = Parser.__parse_array_next
+ else:
+ self.parse_state = Parser.__parse_object_next
+
+ def __parse_value(self, token, string, next_state):
+ if token in [False, None, True] or type(token) in [int, long, float]:
+ self.__put_value(token)
+ elif token == 'string':
+ self.__put_value(string)
+ else:
+ if token == '{':
+ self.__push_object()
+ elif token == '[':
+ self.__push_array()
+ else:
+ self.__error("syntax error expecting value")
+ return
+ self.parse_state = next_state
+
+ def __error(self, message):
+ if self.error is None:
+ self.error = ("line %d, column %d, byte %d: %s"
+ % (self.line_number, self.column_number,
+ self.byte_number, message))
+ self.done = True
+
+ def feed(self, s):
+ i = 0
+ while True:
+ if self.done or i >= len(s):
+ return i
+ if self.__lex_input(s[i]):
+ i += 1
+
+ def is_done(self):
+ return self.done
+
+ def finish(self):
+ if self.lex_state == Parser.__lex_start:
+ pass
+ elif self.lex_state in (Parser.__lex_string,
+ Parser.__lex_string_escape):
+ self.__error("unexpected end of input in quoted string")
+ else:
+ self.__lex_input(" ")
+
+ if self.parse_state == Parser.__parse_start:
+ self.__error("empty input stream")
+ elif self.parse_state != Parser.__parse_end:
+ self.__error("unexpected end of input")
+
+ if self.error == None:
+ assert len(self.stack) == 1
+ return self.stack.pop()
+ else:
+ return self.error
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import os
+
+import ovs.poller
+import ovs.reconnect
+import ovs.stream
+import ovs.timeval
+
+EOF = -1
+
+class Message(object):
+ T_REQUEST = 0 # Request.
+ T_NOTIFY = 1 # Notification.
+ T_REPLY = 2 # Successful reply.
+ T_ERROR = 3 # Error reply.
+
+ __types = {T_REQUEST: "request",
+ T_NOTIFY: "notification",
+ T_REPLY: "reply",
+ T_ERROR: "error"}
+ __next_id = 0
+
+ def __init__(self, type, method, params, result, error, id):
+ self.type = type
+ self.method = method
+ self.params = params
+ self.result = result
+ self.error = error
+ self.id = id
+
+ _next_id = 0
+ @staticmethod
+ def _create_id():
+ this_id = Message._next_id
+ Message._next_id += 1
+ return this_id
+
+ @staticmethod
+ def create_request(method, params):
+ return Message(Message.T_REQUEST, method, params, None, None,
+ Message._create_id())
+
+ @staticmethod
+ def create_notify(method, params):
+ return Message(Message.T_NOTIFY, method, params, None, None,
+ None)
+
+ @staticmethod
+ def create_reply(result, id):
+ return Message(Message.T_REPLY, None, None, result, None, id)
+
+ @staticmethod
+ def create_error(error, id):
+ return Message(Message.T_ERROR, None, None, None, error, id)
+
+ @staticmethod
+ def type_to_string(type):
+ return Message.__types[type]
+
+ @staticmethod
+ def __validate_arg(value, name, must_have):
+ if (value is not None) == (must_have != 0):
+ return None
+ else:
+ type_name = Message.type_to_string(self.type)
+ if must_have:
+ verb = "must"
+ else:
+ verb = "must not"
+ return "%s %s have \"%s\"" % (type_name, verb, name)
+
+ def is_valid(self):
+ if self.params is not None and type(self.params) != list:
+ return "\"params\" must be JSON array"
+
+ pattern = {Message.T_REQUEST: 0x11001,
+ Message.T_NOTIFY: 0x11000,
+ Message.T_REPLY: 0x00101,
+ Message.T_ERROR: 0x00011}.get(self.type)
+ if pattern is None:
+ return "invalid JSON-RPC message type %s" % self.type
+
+ return (
+ Message.__validate_arg(self.method, "method", pattern & 0x10000) or
+ Message.__validate_arg(self.params, "params", pattern & 0x1000) or
+ Message.__validate_arg(self.result, "result", pattern & 0x100) or
+ Message.__validate_arg(self.error, "error", pattern & 0x10) or
+ Message.__validate_arg(self.id, "id", pattern & 0x1))
+
+ @staticmethod
+ def from_json(json):
+ if type(json) != dict:
+ return "message is not a JSON object"
+
+ # Make a copy to avoid modifying the caller's dict.
+ json = dict(json)
+
+ if "method" in json:
+ method = json.pop("method")
+ if type(method) not in [str, unicode]:
+ return "method is not a JSON string"
+ else:
+ method = None
+
+ params = json.pop("params", None)
+ result = json.pop("result", None)
+ error = json.pop("error", None)
+ id = json.pop("id", None)
+ if len(json):
+ return "message has unexpected member \"%s\"" % json.popitem()[0]
+
+ if result is not None:
+ msg_type = Message.T_REPLY
+ elif error is not None:
+ msg_type = Message.T_ERROR
+ elif id is not None:
+ msg_type = Message.T_REQUEST
+ else:
+ msg_type = Message.T_NOTIFY
+
+ msg = Message(msg_type, method, params, result, error, id)
+ validation_error = msg.is_valid()
+ if validation_error is not None:
+ return validation_error
+ else:
+ return msg
+
+ def to_json(self):
+ json = {}
+
+ if self.method is not None:
+ json["method"] = self.method
+
+ if self.params is not None:
+ json["params"] = self.params
+
+ if self.result is not None or self.type == Message.T_ERROR:
+ json["result"] = self.result
+
+ if self.error is not None or self.type == Message.T_REPLY:
+ json["error"] = self.error
+
+ if self.id is not None or self.type == Message.T_NOTIFY:
+ json["id"] = self.id
+
+ return json
+
+ def __str__(self):
+ s = [Message.type_to_string(self.type)]
+ if self.method is not None:
+ s.append("method=\"%s\"" % self.method)
+ if self.params is not None:
+ s.append("params=" + ovs.json.to_string(self.params))
+ if self.error is not None:
+ s.append("error=" + ovs.json.to_string(self.error))
+ if self.id is not None:
+ s.append("id=" + ovs.json.to_string(self.id))
+ return ", ".join(s)
+
+class Connection(object):
+ def __init__(self, stream):
+ self.name = stream.get_name()
+ self.stream = stream
+ self.status = 0
+ self.input = ""
+ self.output = ""
+ self.parser = None
+
+ def close(self):
+ self.stream.close()
+ self.stream = None
+
+ def run(self):
+ if self.status:
+ return
+
+ while len(self.output):
+ retval = self.stream.send(self.output)
+ if retval >= 0:
+ self.output = self.output[retval:]
+ else:
+ if retval != -errno.EAGAIN:
+ logging.warn("%s: send error: %s" % (self.name,
+ os.strerror(-retval)))
+ self.error(-retval)
+ break
+
+ def wait(self, poller):
+ if not self.status:
+ self.stream.run_wait(poller)
+ if len(self.output):
+ self.stream.send_wait()
+
+ def get_status(self):
+ return self.status
+
+ def get_backlog(self):
+ if self.status != 0:
+ return 0
+ else:
+ return len(self.output)
+
+ def get_name(self):
+ return self.name
+
+ def __log_msg(self, title, msg):
+ logging.debug("%s: %s %s" % (self.name, title, msg))
+
+ def send(self, msg):
+ if self.status:
+ return self.status
+
+ self.__log_msg("send", msg)
+
+ was_empty = len(self.output) == 0
+ self.output += ovs.json.to_string(msg.to_json())
+ if was_empty:
+ self.run()
+ return self.status
+
+ def send_block(self, msg):
+ error = self.send(msg)
+ if error:
+ return error
+
+ while True:
+ self.run()
+ if not self.get_backlog() or self.get_status():
+ return self.status
+
+ poller = ovs.poller.Poller()
+ self.wait(poller)
+ poller.block()
+
+ def recv(self):
+ if self.status:
+ return self.status, None
+
+ while True:
+ if len(self.input) == 0:
+ error, data = self.stream.recv(4096)
+ if error:
+ if error == errno.EAGAIN:
+ return error, None
+ else:
+ # XXX rate-limit
+ logging.warning("%s: receive error: %s"
+ % (self.name, os.strerror(error)))
+ self.error(error)
+ return self.status, None
+ elif len(data) == 0:
+ self.error(EOF)
+ return EOF, None
+ else:
+ self.input += data
+ else:
+ if self.parser is None:
+ self.parser = ovs.json.Parser()
+ self.input = self.input[self.parser.feed(self.input):]
+ if self.parser.is_done():
+ msg = self.__process_msg()
+ if msg:
+ return 0, msg
+ else:
+ return self.status, None
+
+ def recv_block(self):
+ while True:
+ error, msg = self.recv()
+ if error != errno.EAGAIN:
+ return error, msg
+
+ self.run()
+
+ poller = ovs.poller.Poller()
+ self.wait(poller)
+ self.recv_wait(poller)
+ poller.block()
+
+ def transact_block(self, request):
+ id = request.id
+
+ error = self.send(request)
+ reply = None
+ while not error:
+ error, reply = self.recv_block()
+ if reply and reply.type == Message.T_REPLY and reply.id == id:
+ break
+ return error, reply
+
+ def __process_msg(self):
+ json = self.parser.finish()
+ self.parser = None
+ if type(json) in [str, unicode]:
+ # XXX rate-limit
+ logging.warning("%s: error parsing stream: %s" % (self.name, json))
+ self.error(errno.EPROTO)
+ return
+
+ msg = Message.from_json(json)
+ if not isinstance(msg, Message):
+ # XXX rate-limit
+ logging.warning("%s: received bad JSON-RPC message: %s"
+ % (self.name, msg))
+ self.error(errno.EPROTO)
+ return
+
+ self.__log_msg("received", msg)
+ return msg
+
+ def recv_wait(self, poller):
+ if self.status or len(self.input) > 0:
+ poller.immediate_wake()
+ else:
+ self.stream.recv_wait(poller)
+
+ def error(self, error):
+ if self.status == 0:
+ self.status = error
+ self.stream.close()
+ self.output = ""
+
+class Session(object):
+ """A JSON-RPC session with reconnection."""
+
+ def __init__(self, reconnect, rpc):
+ self.reconnect = reconnect
+ self.rpc = rpc
+ self.stream = None
+ self.pstream = None
+ self.seqno = 0
+
+ @staticmethod
+ def open(name):
+ """Creates and returns a Session that maintains a JSON-RPC session to
+ 'name', which should be a string acceptable to ovs.stream.Stream or
+ ovs.stream.PassiveStream's initializer.
+
+ If 'name' is an active connection method, e.g. "tcp:127.1.2.3", the new
+ session connects and reconnects, with back-off, to 'name'.
+
+ If 'name' is a passive connection method, e.g. "ptcp:", the new session
+ listens for connections to 'name'. It maintains at most one connection
+ at any given time. Any new connection causes the previous one (if any)
+ to be dropped."""
+ reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec())
+ reconnect.set_name(name)
+ reconnect.enable(ovs.timeval.msec())
+
+ if ovs.stream.PassiveStream.is_valid_name(name):
+ self.reconnect.set_passive(True, ovs.timeval.msec())
+
+ return Session(reconnect, None)
+
+ @staticmethod
+ def open_unreliably(jsonrpc):
+ reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec())
+ reconnect.set_quiet(True)
+ reconnect.set_name(jsonrpc.get_name())
+ reconnect.set_max_tries(0)
+ reconnect.connected(ovs.timeval.msec())
+ return Session(reconnect, jsonrpc)
+
+ def close(self):
+ if self.rpc is not None:
+ self.rpc.close()
+ self.rpc = None
+ if self.stream is not None:
+ self.stream.close()
+ self.stream = None
+ if self.pstream is not None:
+ self.pstream.close()
+ self.pstream = None
+
+ def __disconnect(self):
+ if self.rpc is not None:
+ self.rpc.error(EOF)
+ self.rpc.close()
+ self.rpc = None
+ self.seqno += 1
+ elif self.stream is not None:
+ self.stream.close()
+ self.stream = None
+ self.seqno += 1
+
+ def __connect(self):
+ self.__disconnect()
+
+ name = self.reconnect.get_name()
+ if not self.reconnect.is_passive():
+ error, self.stream = ovs.stream.Stream.open(name)
+ if not error:
+ self.reconnect.connecting(ovs.timeval.msec())
+ else:
+ self.reconnect.connect_failed(ovs.timeval.msec(), error)
+ elif self.pstream is not None:
+ error, self.pstream = ovs.stream.PassiveStream.open(name)
+ if not error:
+ self.reconnect.listening(ovs.timeval.msec())
+ else:
+ self.reconnect.connect_failed(ovs.timeval.msec(), error)
+
+ self.seqno += 1
+
+ def run(self):
+ if self.pstream is not None:
+ error, stream = self.pstream.accept()
+ if error == 0:
+ if self.rpc or self.stream:
+ # XXX rate-limit
+ logging.info("%s: new connection replacing active "
+ "connection" % self.reconnect.get_name())
+ self.__disconnect()
+ self.reconnect.connected(ovs.timeval.msec())
+ self.rpc = Connection(stream)
+ elif error != errno.EAGAIN:
+ self.reconnect.listen_error(ovs.timeval.msec(), error)
+ self.pstream.close()
+ self.pstream = None
+
+ if self.rpc:
+ self.rpc.run()
+ error = self.rpc.get_status()
+ if error != 0:
+ self.reconnect.disconnected(ovs.timeval.msec(), error)
+ self.__disconnect()
+ elif self.stream is not None:
+ self.stream.run()
+ error = self.stream.connect()
+ if error == 0:
+ self.reconnect.connected(ovs.timeval.msec())
+ self.rpc = Connection(self.stream)
+ self.stream = None
+ elif error != errno.EAGAIN:
+ self.reconnect.connect_failed(ovs.timeval.msec(), error)
+ self.stream.close()
+ self.stream = None
+
+ action = self.reconnect.run(ovs.timeval.msec())
+ if action == ovs.reconnect.CONNECT:
+ self.__connect()
+ elif action == ovs.reconnect.DISCONNECT:
+ self.reconnect.disconnected(ovs.timeval.msec(), 0)
+ self.__disconnect()
+ elif action == ovs.reconnect.PROBE:
+ if self.rpc:
+ request = Message.create_request("echo", [])
+ request.id = "echo"
+ self.rpc.send(request)
+ else:
+ assert action == None
+
+ def wait(self, poller):
+ if self.rpc is not None:
+ self.rpc.wait(poller)
+ elif self.stream is not None:
+ self.stream.run_wait(poller)
+ self.stream.connect_wait(poller)
+ if self.pstream is not None:
+ self.pstream.wait(poller)
+ self.reconnect.wait(poller, ovs.timeval.msec())
+
+ def get_backlog(self):
+ if self.rpc is not None:
+ return self.rpc.get_backlog()
+ else:
+ return 0
+
+ def get_name(self):
+ return self.reconnect.get_name()
+
+ def send(self, msg):
+ if self.rpc is not None:
+ return self.rpc.send(msg)
+ else:
+ return errno.ENOTCONN
+
+ def recv(self):
+ if self.rpc is not None:
+ error, msg = self.rpc.recv()
+ if not error:
+ self.reconnect.received(ovs.timeval.msec())
+ if msg.type == Message.T_REQUEST and msg.method == "echo":
+ # Echo request. Send reply.
+ self.send(Message.create_reply(msg.params, msg.id))
+ elif msg.type == Message.T_REPLY and msg.id == "echo":
+ # It's a reply to our echo request. Suppress it.
+ pass
+ else:
+ return msg
+ return None
+
+ def recv_wait(self, poller):
+ if self.rpc is not None:
+ self.rpc.recv_wait(poller)
+
+ def is_alive(self):
+ if self.rpc is not None or self.stream is not None:
+ return True
+ else:
+ max_tries = self.reconnect.get_max_tries()
+ return max_tries is None or max_tries > 0
+
+ def is_connected(self):
+ return self.rpc is not None
+
+ def get_seqno(self):
+ return self.seqno
+
+ def force_reconnect(self):
+ self.reconnect.force_reconnect(ovs.timeval.msec())
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import uuid
+
+from ovs.db import error
+import ovs.db.parser
+
+class UUID(uuid.UUID):
+ x = "[0-9a-fA-f]"
+ uuidRE = re.compile("^(%s{8})-(%s{4})-(%s{4})-(%s{4})-(%s{4})(%s{8})$"
+ % (x, x, x, x, x, x))
+
+ def __init__(self, s):
+ uuid.UUID.__init__(self, hex=s)
+
+ @staticmethod
+ def zero():
+ return UUID('00000000-0000-0000-0000-000000000000')
+
+ def is_zero(self):
+ return self.int == 0
+
+ @staticmethod
+ def is_valid_string(s):
+ return UUID.uuidRE.match(s) != None
+
+ @staticmethod
+ def from_string(s):
+ if not UUID.is_valid_string(s):
+ raise error.Error("%s is not a valid UUID" % s)
+ return UUID(s)
+
+ @staticmethod
+ def from_json(json, symtab=None):
+ try:
+ s = ovs.db.parser.unwrap_json(json, "uuid", unicode)
+ if not UUID.uuidRE.match(s):
+ raise error.Error("\"%s\" is not a valid UUID" % s, json)
+ return UUID(s)
+ except error.Error, e:
+ try:
+ name = ovs.db.parser.unwrap_json(json, "named-uuid", unicode)
+ except error.Error:
+ raise e
+
+ if name not in symtab:
+ symtab[name] = uuid4()
+ return symtab[name]
+
+ def to_json(self):
+ return ["uuid", str(self)]
+
+ def cInitUUID(self, var):
+ m = re.match(str(self))
+ return ["%s.parts[0] = 0x%s;" % (var, m.group(1)),
+ "%s.parts[1] = 0x%s%s;" % (var, m.group(2), m.group(3)),
+ "%s.parts[2] = 0x%s%s;" % (var, m.group(4), m.group(5)),
+ "%s.parts[3] = 0x%s;" % (var, m.group(6))]
+
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import select
+
+class Poller(object):
+ """High-level wrapper around the "poll" system call.
+
+ Intended usage is for the program's main loop to go about its business
+ servicing whatever events it needs to. Then, when it runs out of immediate
+ tasks, it calls each subordinate module or object's "wait" function, which
+ in turn calls one (or more) of the functions Poller.fd_wait(),
+ Poller.immediate_wake(), and Poller.timer_wait() to register to be awakened
+ when the appropriate event occurs. Then the main loop calls
+ Poller.block(), which blocks until one of the registered events happens."""
+
+ def __init__(self):
+ self.__reset()
+
+ def fd_wait(self, fd, events):
+ """Registers 'fd' as waiting for the specified 'events' (which should
+ be select.POLLIN or select.POLLOUT or their bitwise-OR). The following
+ call to self.block() will wake up when 'fd' becomes ready for one or
+ more of the requested events.
+
+ The event registration is one-shot: only the following call to
+ self.block() is affected. The event will need to be re-registered
+ after self.block() is called if it is to persist.
+
+ 'fd' may be an integer file descriptor or an object with a fileno()
+ method that returns an integer file descriptor."""
+ self.poll.register(fd, events)
+
+ def __timer_wait(self, msec):
+ if self.timeout < 0 or msec < self.timeout:
+ self.timeout = msec
+
+ def timer_wait(self, msec):
+ """Causes the following call to self.block() to block for no more than
+ 'msec' milliseconds. If 'msec' is nonpositive, the following call to
+ self.block() will not block at all.
+
+ The timer registration is one-shot: only the following call to
+ self.block() is affected. The timer will need to be re-registered
+ after self.block() is called if it is to persist."""
+ if msec <= 0:
+ self.immediate_wake()
+ else:
+ self.__timer_wait(msec)
+
+ def timer_wait_until(self, msec):
+ """Causes the following call to self.block() to wake up when the
+ current time, as returned by Time.msec(), reaches 'msec' or later. If
+ 'msec' is earlier than the current time, the following call to
+ self.block() will not block at all.
+
+ The timer registration is one-shot: only the following call to
+ self.block() is affected. The timer will need to be re-registered
+ after self.block() is called if it is to persist."""
+ now = Time.msec()
+ if msec <= now:
+ self.immediate_wake()
+ else:
+ self.__timer_wait(msec - now)
+
+ def immediate_wake(self):
+ """Causes the following call to self.block() to wake up immediately,
+ without blocking."""
+ self.timeout = 0
+
+ def block(self):
+ """Blocks until one or more of the events registered with
+ self.fd_wait() occurs, or until the minimum duration registered with
+ self.timer_wait() elapses, or not at all if self.immediate_wake() has
+ been called."""
+ try:
+ try:
+ events = self.poll.poll(self.timeout)
+ self.__log_wakeup(events)
+ except select.error, e:
+ # XXX rate-limit
+ error, msg = e
+ if error != errno.EINTR:
+ logging.error("poll: %s" % e[1])
+ finally:
+ self.__reset()
+
+ def __log_wakeup(self, events):
+ if not events:
+ logging.debug("%d-ms timeout" % self.timeout)
+ else:
+ for fd, revents in events:
+ if revents != 0:
+ s = ""
+ if revents & select.POLLIN:
+ s += "[POLLIN]"
+ if revents & select.POLLOUT:
+ s += "[POLLOUT]"
+ if revents & select.POLLERR:
+ s += "[POLLERR]"
+ if revents & select.POLLHUP:
+ s += "[POLLHUP]"
+ if revents & select.POLLNVAL:
+ s += "[POLLNVAL]"
+ logging.debug("%s on fd %d" % (s, fd))
+
+ def __reset(self):
+ self.poll = select.poll()
+ self.timeout = -1
+
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import signal
+
+def _signal_status_msg(type, signr):
+ s = "%s by signal %d" % (type, signr)
+ for name in signal.__dict__:
+ if name.startswith("SIG") and signal.__dict__[name] == signr:
+ return "%s (%s)" % (s, name)
+ return s
+
+def status_msg(status):
+ """Given 'status', which is a process status in the form reported by
+ waitpid(2) and returned by process_status(), returns a string describing
+ how the process terminated."""
+ if os.WIFEXITED(status):
+ s = "exit status %d" % os.WEXITSTATUS(status)
+ elif os.WIFSIGNALED(status):
+ s = _signal_status_msg("killed", os.WTERMSIG(status))
+ elif os.WIFSTOPPED(status):
+ s = _signal_status_msg("stopped", os.WSTOPSIG(status))
+ else:
+ s = "terminated abnormally (%x)" % status
+ if os.WCOREDUMP(status):
+ s += ", core dumped"
+ return s
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+
+# Values returned by Reconnect.run()
+CONNECT = 'connect'
+DISCONNECT = 'disconnect'
+PROBE = 'probe'
+
+EOF = -1
+
+class Reconnect(object):
+ """A finite-state machine for connecting and reconnecting to a network
+ resource with exponential backoff. It also provides optional support for
+ detecting a connection on which the peer is no longer responding.
+
+ The library does not implement anything networking related, only an FSM for
+ networking code to use.
+
+ Many Reconnect methods take a "now" argument. This makes testing easier
+ since there is no hidden state. When not testing, just pass the return
+ value of ovs.time.msec(). (Perhaps this design should be revisited
+ later.)"""
+
+ class Void(object):
+ name = "VOID"
+ is_connected = False
+
+ @staticmethod
+ def deadline(fsm):
+ return None
+
+ @staticmethod
+ def run(fsm, now):
+ return None
+
+ class Listening(object):
+ name = "LISTENING"
+ is_connected = False
+
+ @staticmethod
+ def deadline(fsm):
+ return None
+
+ @staticmethod
+ def run(fsm, now):
+ return None
+
+ class Backoff(object):
+ name = "BACKOFF"
+ is_connected = False
+
+ @staticmethod
+ def deadline(fsm):
+ return fsm.state_entered + fsm.backoff
+
+ @staticmethod
+ def run(fsm, now):
+ return CONNECT
+
+ class ConnectInProgress(object):
+ name = "CONNECT_IN_PROGRESS"
+ is_connected = False
+
+ @staticmethod
+ def deadline(fsm):
+ return fsm.state_entered + max(1000, fsm.backoff)
+
+ @staticmethod
+ def run(fsm, now):
+ return DISCONNECT
+
+ class Active(object):
+ name = "ACTIVE"
+ is_connected = True
+
+ @staticmethod
+ def deadline(fsm):
+ if fsm.probe_interval:
+ base = max(fsm.last_received, fsm.state_entered)
+ return base + fsm.probe_interval
+ return None
+
+ @staticmethod
+ def run(fsm, now):
+ logging.debug("%s: idle %d ms, sending inactivity probe"
+ % (fsm.name,
+ now - max(fsm.last_received, fsm.state_entered)))
+ fsm._transition(now, Reconnect.Idle)
+ return PROBE
+
+ class Idle(object):
+ name = "IDLE"
+ is_connected = True
+
+ @staticmethod
+ def deadline(fsm):
+ return fsm.state_entered + fsm.probe_interval
+
+ @staticmethod
+ def run(fsm, now):
+ logging.error("%s: no response to inactivity probe after %.3g "
+ "seconds, disconnecting"
+ % (fsm.name, (now - fsm.state_entered) / 1000.0))
+ return DISCONNECT
+
+ class Reconnect:
+ name = "RECONNECT"
+ is_connected = False
+
+ @staticmethod
+ def deadline(fsm):
+ return fsm.state_entered
+
+ @staticmethod
+ def run(fsm, now):
+ return DISCONNECT
+
+ def __init__(self, now):
+ """Creates and returns a new reconnect FSM with default settings. The
+ FSM is initially disabled. The caller will likely want to call
+ self.enable() and self.set_name() on the returned object."""
+
+ self.name = "void"
+ self.min_backoff = 1000
+ self.max_backoff = 8000
+ self.probe_interval = 5000
+ self.passive = False
+ self.info_level = logging.info
+
+ self.state = Reconnect.Void
+ self.state_entered = now
+ self.backoff = 0
+ self.last_received = now
+ self.last_connected = now
+ self.max_tries = None
+
+ self.creation_time = now
+ self.n_attempted_connections = 0
+ self.n_successful_connections = 0
+ self.total_connected_duration = 0
+ self.seqno = 0
+
+ def set_quiet(self, quiet):
+ """If 'quiet' is true, this object will log informational messages at
+ debug level, by default keeping them out of log files. This is
+ appropriate if the connection is one that is expected to be
+ short-lived, so that the log messages are merely distracting.
+
+ If 'quiet' is false, this object logs informational messages at info
+ level. This is the default.
+
+ This setting has no effect on the log level of debugging, warning, or
+ error messages."""
+ if quiet:
+ self.info_level = logging.debug
+ else:
+ self.info_level = logging.info
+
+ def get_name(self):
+ return self.name
+
+ def set_name(self, name):
+ """Sets this object's name to 'name'. If 'name' is None, then "void"
+ is used instead.
+
+ The name is used in log messages."""
+ if name is None:
+ self.name = "void"
+ else:
+ self.name = name
+
+ def get_min_backoff(self):
+ """Return the minimum number of milliseconds to back off between
+ consecutive connection attempts. The default is 1000 ms."""
+ return self.min_backoff
+
+ def get_max_backoff(self):
+ """Return the maximum number of milliseconds to back off between
+ consecutive connection attempts. The default is 8000 ms."""
+ return self.max_backoff
+
+ def get_probe_interval(self):
+ """Returns the "probe interval" in milliseconds. If this is zero, it
+ disables the connection keepalive feature. If it is nonzero, then if
+ the interval passes while the FSM is connected and without
+ self.received() being called, self.run() returns ovs.reconnect.PROBE.
+ If the interval passes again without self.received() being called,
+ self.run() returns ovs.reconnect.DISCONNECT."""
+ return self.probe_interval
+
+ def set_max_tries(self, max_tries):
+ """Limits the maximum number of times that this object will ask the
+ client to try to reconnect to 'max_tries'. None (the default) means an
+ unlimited number of tries.
+
+ After the number of tries has expired, the FSM will disable itself
+ instead of backing off and retrying."""
+ self.max_tries = max_tries
+
+ def get_max_tries(self):
+ """Returns the current remaining number of connection attempts,
+ None if the number is unlimited."""
+ return self.max_tries
+
+ def set_backoff(self, min_backoff, max_backoff):
+ """Configures the backoff parameters for this FSM. 'min_backoff' is
+ the minimum number of milliseconds, and 'max_backoff' is the maximum,
+ between connection attempts.
+
+ 'min_backoff' must be at least 1000, and 'max_backoff' must be greater
+ than or equal to 'min_backoff'."""
+ self.min_backoff = max(min_backoff, 1000)
+ if self.max_backoff:
+ self.max_backoff = max(max_backoff, 1000)
+ else:
+ self.max_backoff = 8000
+ if self.min_backoff > self.max_backoff:
+ self.max_backoff = self.min_backoff
+
+ if (self.state == Reconnect.Backoff and
+ self.backoff > self.max_backoff):
+ self.backoff = self.max_backoff
+
+ def set_probe_interval(self, probe_interval):
+ """Sets the "probe interval" to 'probe_interval', in milliseconds. If
+ this is zero, it disables the connection keepalive feature. If it is
+ nonzero, then if the interval passes while this FSM is connected and
+ without self.received() being called, self.run() returns
+ ovs.reconnect.PROBE. If the interval passes again without
+ self.received() being called, self.run() returns
+ ovs.reconnect.DISCONNECT.
+
+ If 'probe_interval' is nonzero, then it will be forced to a value of at
+ least 1000 ms."""
+ if probe_interval:
+ self.probe_interval = max(1000, probe_interval)
+ else:
+ self.probe_interval = 0
+
+ def is_passive(self):
+ """Returns true if 'fsm' is in passive mode, false if 'fsm' is in
+ active mode (the default)."""
+ return self.passive
+
+ def set_passive(self, passive, now):
+ """Configures this FSM for active or passive mode. In active mode (the
+ default), the FSM is attempting to connect to a remote host. In
+ passive mode, the FSM is listening for connections from a remote host."""
+ if self.passive != passive:
+ self.passive = passive
+
+ if ((passive and self.state in (Reconnect.ConnectInProgress,
+ Reconnect.Reconnect)) or
+ (not passive and self.state == Reconnect.Listening
+ and self.__may_retry())):
+ self._transition(now, Reconnect.Backoff)
+ self.backoff = 0
+
+ def is_enabled(self):
+ """Returns true if this FSM has been enabled with self.enable().
+ Calling another function that indicates a change in connection state,
+ such as self.disconnected() or self.force_reconnect(), will also enable
+ a reconnect FSM."""
+ return self.state != Reconnect.Void
+
+ def enable(self, now):
+ """If this FSM is disabled (the default for newly created FSMs),
+ enables it, so that the next call to reconnect_run() for 'fsm' will
+ return ovs.reconnect.CONNECT.
+
+ If this FSM is not disabled, this function has no effect."""
+ if self.state == Reconnect.Void and self.__may_retry():
+ self._transition(now, Reconnect.Backoff)
+ self.backoff = 0
+
+ def disable(self, now):
+ """Disables this FSM. Until 'fsm' is enabled again, self.run() will
+ always return 0."""
+ if self.state != Reconnect.Void:
+ self._transition(now, Reconnect.Void)
+
+ def force_reconnect(self, now):
+ """If this FSM is enabled and currently connected (or attempting to
+ connect), forces self.run() to return ovs.reconnect.DISCONNECT the next
+ time it is called, which should cause the client to drop the connection
+ (or attempt), back off, and then reconnect."""
+ if self.state in (Reconnect.ConnectInProgress,
+ Reconnect.Active,
+ Reconnect.Idle):
+ self._transition(now, Reconnect.Reconnect)
+
+ def disconnected(self, now, error):
+ """Tell this FSM that the connection dropped or that a connection
+ attempt failed. 'error' specifies the reason: a positive value
+ represents an errno value, EOF indicates that the connection was closed
+ by the peer (e.g. read() returned 0), and 0 indicates no specific
+ error.
+
+ The FSM will back off, then reconnect."""
+ if self.state not in (Reconnect.Backoff, Reconnect.Void):
+ # Report what happened
+ if self.state in (Reconnect.Active, Reconnect.Idle):
+ if error > 0:
+ logging.warning("%s: connection dropped (%s)"
+ % (self.name, os.strerror(error)))
+ elif error == EOF:
+ self.info_level("%s: connection closed by peer"
+ % self.name)
+ else:
+ self.info_level("%s: connection dropped" % self.name)
+ elif self.state == Reconnect.Listening:
+ if error > 0:
+ logging.warning("%s: error listening for connections (%s)"
+ % (self.name, os.strerror(error)))
+ else:
+ self.info_level("%s: error listening for connections"
+ % self.name)
+ else:
+ if self.passive:
+ type = "listen"
+ else:
+ type = "connection"
+ if error > 0:
+ logging.warning("%s: %s attempt failed (%s)"
+ % (self.name, type, os.strerror(error)))
+ else:
+ self.info_level("%s: %s attempt timed out"
+ % (self.name, type))
+
+ # Back off
+ if (self.state in (Reconnect.Active, Reconnect.Idle) and
+ (self.last_received - self.last_connected >= self.backoff or
+ self.passive)):
+ if self.passive:
+ self.backoff = 0
+ else:
+ self.backoff = self.min_backoff
+ else:
+ if self.backoff < self.min_backoff:
+ self.backoff = self.min_backoff
+ elif self.backoff >= self.max_backoff / 2:
+ self.backoff = self.max_backoff
+ else:
+ self.backoff *= 2
+
+ if self.passive:
+ self.info_level("%s: waiting %.3g seconds before trying "
+ "to listen again"
+ % (self.name, self.backoff / 1000.0))
+ else:
+ self.info_level("%s: waiting %.3g seconds before reconnect"
+ % (self.name, self.backoff / 1000.0))
+
+ if self.__may_retry():
+ self._transition(now, Reconnect.Backoff)
+ else:
+ self._transition(now, Reconnect.Void)
+
+ def connecting(self, now):
+ """Tell this FSM that a connection or listening attempt is in progress.
+
+ The FSM will start a timer, after which the connection or listening
+ attempt will be aborted (by returning ovs.reconnect.DISCONNECT from
+ self.run())."""
+ if self.state != Reconnect.ConnectInProgress:
+ if self.passive:
+ self.info_level("%s: listening..." % self.name)
+ else:
+ self.info_level("%s: connecting..." % self.name)
+ self._transition(now, Reconnect.ConnectInProgress)
+
+ def listening(self, now):
+ """Tell this FSM that the client is listening for connection attempts.
+ This state last indefinitely until the client reports some change.
+
+ The natural progression from this state is for the client to report
+ that a connection has been accepted or is in progress of being
+ accepted, by calling self.connecting() or self.connected().
+
+ The client may also report that listening failed (e.g. accept()
+ returned an unexpected error such as ENOMEM) by calling
+ self.listen_error(), in which case the FSM will back off and eventually
+ return ovs.reconnect.CONNECT from self.run() to tell the client to try
+ listening again."""
+ if self.state != Reconnect.Listening:
+ self.info_level("%s: listening..." % self.name)
+ self._transition(now, Reconnect.Listening)
+
+ def listen_error(self, now, error):
+ """Tell this FSM that the client's attempt to accept a connection
+ failed (e.g. accept() returned an unexpected error such as ENOMEM).
+
+ If the FSM is currently listening (self.listening() was called), it
+ will back off and eventually return ovs.reconnect.CONNECT from
+ self.run() to tell the client to try listening again. If there is an
+ active connection, this will be delayed until that connection drops."""
+ if self.state == Reconnect.Listening:
+ self.disconnected(now, error)
+
+ def connected(self, now):
+ """Tell this FSM that the connection was successful.
+
+ The FSM will start the probe interval timer, which is reset by
+ self.received(). If the timer expires, a probe will be sent (by
+ returning ovs.reconnect.PROBE from self.run(). If the timer expires
+ again without being reset, the connection will be aborted (by returning
+ ovs.reconnect.DISCONNECT from self.run()."""
+ if not self.state.is_connected:
+ self.connecting(now)
+
+ self.info_level("%s: connected" % self.name)
+ self._transition(now, Reconnect.Active)
+ self.last_connected = now
+
+ def connect_failed(self, now, error):
+ """Tell this FSM that the connection attempt failed.
+
+ The FSM will back off and attempt to reconnect."""
+ self.connecting(now)
+ self.disconnected(now, error)
+
+ def received(self, now):
+ """Tell this FSM that some data was received. This resets the probe
+ interval timer, so that the connection is known not to be idle."""
+ if self.state != Reconnect.Active:
+ self._transition(now, Reconnect.Active)
+ self.last_received = now
+
+ def _transition(self, now, state):
+ if self.state == Reconnect.ConnectInProgress:
+ self.n_attempted_connections += 1
+ if state == Reconnect.Active:
+ self.n_successful_connections += 1
+
+ connected_before = self.state.is_connected
+ connected_now = state.is_connected
+ if connected_before != connected_now:
+ if connected_before:
+ self.total_connected_duration += now - self.last_connected
+ self.seqno += 1
+
+ logging.debug("%s: entering %s" % (self.name, state.name))
+ self.state = state
+ self.state_entered = now
+
+ def run(self, now):
+ """Assesses whether any action should be taken on this FSM. The return
+ value is one of:
+
+ - None: The client need not take any action.
+
+ - Active client, ovs.reconnect.CONNECT: The client should start a
+ connection attempt and indicate this by calling
+ self.connecting(). If the connection attempt has definitely
+ succeeded, it should call self.connected(). If the connection
+ attempt has definitely failed, it should call
+ self.connect_failed().
+
+ The FSM is smart enough to back off correctly after successful
+ connections that quickly abort, so it is OK to call
+ self.connected() after a low-level successful connection
+ (e.g. connect()) even if the connection might soon abort due to a
+ failure at a high-level (e.g. SSL negotiation failure).
+
+ - Passive client, ovs.reconnect.CONNECT: The client should try to
+ listen for a connection, if it is not already listening. It
+ should call self.listening() if successful, otherwise
+ self.connecting() or reconnected_connect_failed() if the attempt
+ is in progress or definitely failed, respectively.
+
+ A listening passive client should constantly attempt to accept a
+ new connection and report an accepted connection with
+ self.connected().
+
+ - ovs.reconnect.DISCONNECT: The client should abort the current
+ connection or connection attempt or listen attempt and call
+ self.disconnected() or self.connect_failed() to indicate it.
+
+ - ovs.reconnect.PROBE: The client should send some kind of request
+ to the peer that will elicit a response, to ensure that the
+ connection is indeed in working order. (This will only be
+ returned if the "probe interval" is nonzero--see
+ self.set_probe_interval())."""
+ if now >= self.state.deadline(self):
+ return self.state.run(self, now)
+ else:
+ return None
+
+ def wait(self, poller, now):
+ """Causes the next call to poller.block() to wake up when self.run()
+ should be called."""
+ timeout = self.timeout(now)
+ if timeout >= 0:
+ poller.timer_wait(timeout)
+
+ def timeout(self, now):
+ """Returns the number of milliseconds after which self.run() should be
+ called if nothing else notable happens in the meantime, or a negative
+ number if this is currently unnecessary."""
+ deadline = self.state.deadline(self)
+ if deadline is not None:
+ remaining = deadline - now
+ return max(0, remaining)
+ else:
+ return None
+
+ def is_connected(self):
+ """Returns True if this FSM is currently believed to be connected, that
+ is, if self.connected() was called more recently than any call to
+ self.connect_failed() or self.disconnected() or self.disable(), and
+ False otherwise."""
+ return self.state.is_connected
+
+ def get_connection_duration(self, now):
+ """Returns the number of milliseconds for which this FSM has been
+ continuously connected to its peer. (If this FSM is not currently
+ connected, this is 0.)"""
+ if self.is_connected():
+ return now - self.last_connected
+ else:
+ return 0
+
+ def get_stats(self, now):
+ class Stats(object):
+ pass
+ stats = Stats()
+ stats.creation_time = self.creation_time
+ stats.last_connected = self.last_connected
+ stats.last_received = self.last_received
+ stats.backoff = self.backoff
+ stats.seqno = self.seqno
+ stats.is_connected = self.is_connected()
+ stats.current_connection_duration = self.get_connection_duration(now)
+ stats.total_connected_duration = (stats.current_connection_duration +
+ self.total_connected_duration)
+ stats.n_attempted_connections = self.n_attempted_connections
+ stats.n_successful_connections = self.n_successful_connections
+ stats.state = self.state.name
+ stats.state_elapsed = now - self.state_entered
+ return stats
+
+ def __may_retry(self):
+ if self.max_tries is None:
+ return True
+ elif self.max_tries > 0:
+ self.max_tries -= 1
+ return True
+ else:
+ return False
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import os
+import select
+import socket
+import sys
+
+import ovs.fatal_signal
+
+def make_unix_socket(style, nonblock, bind_path, connect_path):
+ """Creates a Unix domain socket in the given 'style' (either
+ socket.SOCK_DGRAM or socket.SOCK_STREAM) that is bound to 'bind_path' (if
+ 'bind_path' is not None) and connected to 'connect_path' (if 'connect_path'
+ is not None). If 'nonblock' is true, the socket is made non-blocking.
+
+ Returns (error, socket): on success 'error' is 0 and 'socket' is a new
+ socket object, on failure 'error' is a positive errno value and 'socket' is
+ None."""
+
+ try:
+ sock = socket.socket(socket.AF_UNIX, style)
+ except socket.error, e:
+ return get_exception_errno(e), None
+
+ try:
+ if nonblock:
+ set_nonblocking(sock)
+ if bind_path is not None:
+ # Delete bind_path but ignore ENOENT.
+ try:
+ os.unlink(bind_path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ return e.errno, None
+
+ ovs.fatal_signal.add_file_to_unlink(bind_path)
+ sock.bind(bind_path)
+
+ try:
+ if sys.hexversion >= 0x02060000:
+ os.fchmod(sock.fileno(), 0700)
+ else:
+ os.chmod("/dev/fd/%d" % sock.fileno(), 0700)
+ except OSError, e:
+ pass
+ if connect_path is not None:
+ try:
+ sock.connect(connect_path)
+ except socket.error, e:
+ if get_exception_errno(e) != errno.EINPROGRESS:
+ raise
+ return 0, sock
+ except socket.error, e:
+ sock.close()
+ try:
+ os.unlink(bind_path)
+ except OSError, e:
+ pass
+ if bind_path is not None:
+ ovs.fatal_signal.add_file_to_unlink(bind_path)
+ return get_exception_errno(e), None
+
+def check_connection_completion(sock):
+ p = select.poll()
+ p.register(sock, select.POLLOUT)
+ if len(p.poll(0)) == 1:
+ return get_socket_error(sock)
+ else:
+ return errno.EAGAIN
+
+def get_socket_error(sock):
+ """Returns the errno value associated with 'socket' (0 if no error) and
+ resets the socket's error status."""
+ return sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+
+def get_exception_errno(e):
+ """A lot of methods on Python socket objects raise socket.error, but that
+ exception is documented as having two completely different forms of
+ arguments: either a string or a (errno, string) tuple. We only want the
+ errno."""
+ if type(e.args) == tuple:
+ return e.args[0]
+ else:
+ return errno.EPROTO
+
+null_fd = -1
+def get_null_fd():
+ """Returns a readable and writable fd for /dev/null, if successful,
+ otherwise a negative errno value. The caller must not close the returned
+ fd (because the same fd will be handed out to subsequent callers)."""
+ global null_fd
+ if null_fd < 0:
+ try:
+ null_fd = os.open("/dev/null", os.O_RDWR)
+ except OSError, e:
+ logging.error("could not open /dev/null: %s"
+ % os.strerror(e.errno))
+ return -e.errno
+ return null_fd
+
+def write_fully(fd, buf):
+ """Returns an (error, bytes_written) tuple where 'error' is 0 on success,
+ otherwise a positive errno value, and 'bytes_written' is the number of
+ bytes that were written before the error occurred. 'error' is 0 if and
+ only if 'bytes_written' is len(buf)."""
+ bytes_written = 0
+ if len(buf) == 0:
+ return 0, 0
+ while True:
+ try:
+ retval = os.write(fd, buf)
+ assert retval >= 0
+ if retval == len(buf):
+ return 0, bytes_written + len(buf)
+ elif retval == 0:
+ logging.warning("write returned 0")
+ return errno.EPROTO, bytes_written
+ else:
+ bytes_written += retval
+ buf = buf[:retval]
+ except OSError, e:
+ return e.errno, bytes_written
+
+def set_nonblocking(sock):
+ try:
+ sock.setblocking(0)
+ except socket.error, e:
+ logging.error("could not set nonblocking mode on socket: %s"
+ % os.strerror(get_socket_error(e)))
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import os
+import select
+import socket
+import sys
+
+import ovs.poller
+import ovs.socket_util
+
+class Stream(object):
+ """Bidirectional byte stream. Currently only Unix domain sockets
+ are implemented."""
+ n_unix_sockets = 0
+
+ # States.
+ __S_CONNECTING = 0
+ __S_CONNECTED = 1
+ __S_DISCONNECTED = 2
+
+ # Kinds of events that one might wait for.
+ W_CONNECT = 0 # Connect complete (success or failure).
+ W_RECV = 1 # Data received.
+ W_SEND = 2 # Send buffer room available.
+
+ @staticmethod
+ def is_valid_name(name):
+ """Returns True if 'name' is a stream name in the form "TYPE:ARGS" and
+ TYPE is a supported stream type (currently only "unix:"), otherwise
+ False."""
+ return name.startswith("unix:")
+
+ def __init__(self, socket, name, bind_path, status):
+ self.socket = socket
+ self.name = name
+ self.bind_path = bind_path
+ if status == errno.EAGAIN:
+ self.state = Stream.__S_CONNECTING
+ elif status == 0:
+ self.state = Stream.__S_CONNECTED
+ else:
+ self.state = Stream.__S_DISCONNECTED
+
+ self.error = 0
+
+ @staticmethod
+ def open(name):
+ """Attempts to connect a stream to a remote peer. 'name' is a
+ connection name in the form "TYPE:ARGS", where TYPE is an active stream
+ class's name and ARGS are stream class-specific. Currently the only
+ supported TYPE is "unix".
+
+ Returns (error, stream): on success 'error' is 0 and 'stream' is the
+ new Stream, on failure 'error' is a positive errno value and 'stream'
+ is None.
+
+ Never returns errno.EAGAIN or errno.EINPROGRESS. Instead, returns 0
+ and a new Stream. The connect() method can be used to check for
+ successful connection completion."""
+ if not Stream.is_valid_name(name):
+ return errno.EAFNOSUPPORT, None
+
+ Stream.n_unix_sockets += 1
+ bind_path = "/tmp/stream-unix.%ld.%d" % (os.getpid(),
+ Stream.n_unix_sockets)
+ connect_path = name[5:]
+ error, sock = ovs.socket_util.make_unix_socket(socket.SOCK_STREAM,
+ True, bind_path,
+ connect_path)
+ if error:
+ return error, None
+ else:
+ status = ovs.socket_util.check_connection_completion(sock)
+ return 0, Stream(sock, name, bind_path, status)
+
+ @staticmethod
+ def open_block(tuple):
+ """Blocks until a Stream completes its connection attempt, either
+ succeeding or failing. 'tuple' should be the tuple returned by
+ Stream.open(). Returns a tuple of the same form.
+
+ Typical usage:
+ error, stream = Stream.open_block(Stream.open("tcp:1.2.3.4:5"))"""
+
+ error, stream = tuple
+ if not error:
+ while True:
+ error = stream.connect()
+ if error != errno.EAGAIN:
+ break
+ stream.run()
+ poller = ovs.poller.Poller()
+ stream.run_wait()
+ stream.connect_wait(poller)
+ poller.block()
+ assert error != errno.EINPROGRESS
+
+ if error and stream:
+ stream.close()
+ stream = None
+ return error, stream
+
+ def close(self):
+ self.socket.close()
+ if self.bind_path is not None:
+ ovs.fatal_signal.unlink_file_now(self.bind_path)
+ self.bind_path = None
+
+ def __scs_connecting(self):
+ retval = ovs.socket_util.check_connection_completion(self.socket)
+ assert retval != errno.EINPROGRESS
+ if retval == 0:
+ self.state = Stream.__S_CONNECTED
+ elif retval != errno.EAGAIN:
+ self.state = Stream.__S_DISCONNECTED
+ self.error = retval
+
+ def connect(self):
+ """Tries to complete the connection on this stream. If the connection
+ is complete, returns 0 if the connection was successful or a positive
+ errno value if it failed. If the connection is still in progress,
+ returns errno.EAGAIN."""
+ last_state = -1 # Always differs from initial self.state
+ while self.state != last_state:
+ if self.state == Stream.__S_CONNECTING:
+ self.__scs_connecting()
+ elif self.state == Stream.__S_CONNECTED:
+ return 0
+ elif self.state == Stream.__S_DISCONNECTED:
+ return self.error
+
+ def recv(self, n):
+ """Tries to receive up to 'n' bytes from this stream. Returns a
+ (error, string) tuple:
+
+ - If successful, 'error' is zero and 'string' contains between 1
+ and 'n' bytes of data.
+
+ - On error, 'error' is a positive errno value.
+
+ - If the connection has been closed in the normal fashion or if 'n'
+ is 0, the tuple is (0, "").
+
+ The recv function will not block waiting for data to arrive. If no
+ data have been received, it returns (errno.EAGAIN, "") immediately."""
+
+ retval = self.connect()
+ if retval != 0:
+ return (retval, "")
+ elif n == 0:
+ return (0, "")
+
+ try:
+ return (0, self.socket.recv(n))
+ except socket.error, e:
+ return (ovs.socket_util.get_exception_errno(e), "")
+
+ def send(self, buf):
+ """Tries to send 'buf' on this stream.
+
+ If successful, returns the number of bytes sent, between 1 and
+ len(buf). 0 is only a valid return value if len(buf) is 0.
+
+ On error, returns a negative errno value.
+
+ Will not block. If no bytes can be immediately accepted for
+ transmission, returns -errno.EAGAIN immediately."""
+
+ retval = self.connect()
+ if retval != 0:
+ return -retval
+ elif len(buf) == 0:
+ return 0
+
+ try:
+ return self.socket.send(buf)
+ except socket.error, e:
+ return -ovs.socket_util.get_exception_errno(e)
+
+ def run(self):
+ pass
+
+ def run_wait(self, poller):
+ pass
+
+ def wait(self, poller, wait):
+ assert wait in (Stream.W_CONNECT, Stream.W_RECV, Stream.W_SEND)
+
+ if self.state == Stream.__S_DISCONNECTED:
+ poller.immediate_wake()
+ return
+
+ if self.state == Stream.__S_CONNECTING:
+ wait = Stream.W_CONNECT
+ if wait in (Stream.W_CONNECT, Stream.W_SEND):
+ poller.fd_wait(self.socket, select.POLLOUT)
+ else:
+ poller.fd_wait(self.socket, select.POLLIN)
+
+ def connect_wait(self, poller):
+ self.wait(poller, Stream.W_CONNECT)
+
+ def recv_wait(self, poller):
+ self.wait(poller, Stream.W_RECV)
+
+ def send_wait(self, poller):
+ self.wait(poller, Stream.W_SEND)
+
+ def get_name(self):
+ return self.name
+
+ def __del__(self):
+ # Don't delete the file: we might have forked.
+ self.socket.close()
+
+class PassiveStream(object):
+ @staticmethod
+ def is_valid_name(name):
+ """Returns True if 'name' is a passive stream name in the form
+ "TYPE:ARGS" and TYPE is a supported passive stream type (currently only
+ "punix:"), otherwise False."""
+ return name.startswith("punix:")
+
+ def __init__(self, sock, name, bind_path):
+ self.name = name
+ self.socket = sock
+ self.bind_path = bind_path
+
+ @staticmethod
+ def open(name):
+ """Attempts to start listening for remote stream connections. 'name'
+ is a connection name in the form "TYPE:ARGS", where TYPE is an passive
+ stream class's name and ARGS are stream class-specific. Currently the
+ only supported TYPE is "punix".
+
+ Returns (error, pstream): on success 'error' is 0 and 'pstream' is the
+ new PassiveStream, on failure 'error' is a positive errno value and
+ 'pstream' is None."""
+ if not PassiveStream.is_valid_name(name):
+ return errno.EAFNOSUPPORT, None
+
+ bind_path = name[6:]
+ error, sock = ovs.socket_util.make_unix_socket(socket.SOCK_STREAM,
+ True, bind_path, None)
+ if error:
+ return error, None
+
+ try:
+ sock.listen(10)
+ except socket.error, e:
+ logging.error("%s: listen: %s" % (name, os.strerror(e.error)))
+ sock.close()
+ return e.error, None
+
+ return 0, PassiveStream(sock, name, bind_path)
+
+ def close(self):
+ """Closes this PassiveStream."""
+ self.socket.close()
+ if self.bind_path is not None:
+ ovs.fatal_signal.unlink_file_now(self.bind_path)
+ self.bind_path = None
+
+ def accept(self):
+ """Tries to accept a new connection on this passive stream. Returns
+ (error, stream): if successful, 'error' is 0 and 'stream' is the new
+ Stream object, and on failure 'error' is a positive errno value and
+ 'stream' is None.
+
+ Will not block waiting for a connection. If no connection is ready to
+ be accepted, returns (errno.EAGAIN, None) immediately."""
+
+ while True:
+ try:
+ sock, addr = self.socket.accept()
+ ovs.socket_util.set_nonblocking(sock)
+ return 0, Stream(sock, "unix:%s" % addr, None, 0)
+ except socket.error, e:
+ error = ovs.socket_util.get_exception_errno(e)
+ if error != errno.EAGAIN:
+ # XXX rate-limit
+ logging.debug("accept: %s" % os.strerror(error))
+ return error, None
+
+ def wait(self, poller):
+ poller.fd_wait(self.socket, select.POLLIN)
+
+ def __del__(self):
+ # Don't delete the file: we might have forked.
+ self.socket.close()
+
+def usage(name, active, passive, bootstrap):
+ print
+ if active:
+ print("Active %s connection methods:" % name)
+ print(" unix:FILE "
+ "Unix domain socket named FILE");
+
+ if passive:
+ print("Passive %s connection methods:" % name)
+ print(" punix:FILE "
+ "listen on Unix domain socket FILE")
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+def msec():
+ """Returns the current time, as the amount of time since the epoch, in
+ milliseconds, as a float."""
+ return time.time() * 1000.0
+
+def postfork():
+ # Just a stub for now
+ pass
--- /dev/null
+# Copyright (c) 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+_argv0 = sys.argv[0]
+PROGRAM_NAME = _argv0[_argv0.rfind('/') + 1:]
+
+def abs_file_name(dir, file_name):
+ """If 'file_name' starts with '/', returns a copy of 'file_name'.
+ Otherwise, returns an absolute path to 'file_name' considering it relative
+ to 'dir', which itself must be absolute. 'dir' may be None or the empty
+ string, in which case the current working directory is used.
+
+ Returns None if 'dir' is null and getcwd() fails.
+
+ This differs from os.path.abspath() in that it will never change the
+ meaning of a file name."""
+ if file_name.startswith('/'):
+ return file_name
+ else:
+ if dir is None or dir == "":
+ try:
+ dir = os.getcwd()
+ except OSError:
+ return None
+
+ if dir.endswith('/'):
+ return dir + file_name
+ else:
+ return "%s/%s" % (dir, file_name)
HAVE_PYTHON='@HAVE_PYTHON@'
PERL='@PERL@'
PYTHON='@PYTHON@'
+
+PYTHONPATH=$PYTHONPATH:$abs_top_srcdir/python
+export PYTHONPATH
tests/classifier.at \
tests/check-structs.at \
tests/daemon.at \
+ tests/daemon-py.at \
tests/vconn.at \
tests/dir_name.at \
tests/aes128.at \
tests/uuid.at \
tests/json.at \
tests/jsonrpc.at \
+ tests/jsonrpc-py.at \
tests/timeval.at \
tests/lockfile.at \
tests/reconnect.at \
tests/ovsdb-server.at \
tests/ovsdb-monitor.at \
tests/ovsdb-idl.at \
+ tests/ovsdb-idl-py.at \
tests/ovs-vsctl.at \
tests/interface-reconfigure.at
TESTSUITE = $(srcdir)/tests/testsuite
tests/testpki-privkey2.pem \
tests/testpki-req.pem \
tests/testpki-req2.pem
+
+# Python tests.
+EXTRA_DIST += \
+ tests/test-daemon.py \
+ tests/test-json.py \
+ tests/test-jsonrpc.py \
+ tests/test-ovsdb.py \
+ tests/test-reconnect.py
--- /dev/null
+AT_BANNER([daemon unit tests - Python])
+
+AT_SETUP([daemon - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CAPTURE_FILE([pid])
+AT_CAPTURE_FILE([expected])
+# Start the daemon and wait for the pidfile to get created
+# and that its contents are the correct pid.
+AT_CHECK([$PYTHON $srcdir/test-daemon.py --pidfile-name=$PWD/pid& echo $! > expected], [0], [ignore], [ignore])
+OVS_WAIT_UNTIL([test -s pid], [kill `cat expected`])
+AT_CHECK(
+ [pid=`cat pid` && expected=`cat expected` && test "$pid" = "$expected"],
+ [0], [], [], [kill `cat expected`])
+AT_CHECK([kill -0 `cat pid`], [0], [], [], [kill `cat expected`])
+# Kill the daemon and make sure that the pidfile gets deleted.
+kill `cat expected`
+OVS_WAIT_WHILE([kill -0 `cat expected`])
+AT_CHECK([test ! -e pid])
+AT_CLEANUP
+
+AT_SETUP([daemon --monitor - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CAPTURE_FILE([pid])
+AT_CAPTURE_FILE([parent])
+AT_CAPTURE_FILE([parentpid])
+AT_CAPTURE_FILE([newpid])
+# Start the daemon and wait for the pidfile to get created.
+AT_CHECK([$PYTHON $srcdir/test-daemon.py --pidfile-name=$PWD/pid --monitor& echo $! > parent], [0], [ignore], [ignore])
+OVS_WAIT_UNTIL([test -s pid], [kill `cat parent`])
+# Check that the pidfile names a running process,
+# and that the parent process of that process is our child process.
+AT_CHECK([kill -0 `cat pid`], [0], [], [], [kill `cat parent`])
+AT_CHECK([ps -o ppid= -p `cat pid` > parentpid],
+ [0], [], [], [kill `cat parent`])
+AT_CHECK(
+ [parentpid=`cat parentpid` &&
+ parent=`cat parent` &&
+ test $parentpid = $parent],
+ [0], [], [], [kill `cat parent`])
+# Kill the daemon process, making it look like a segfault,
+# and wait for a new child process to get spawned.
+AT_CHECK([cp pid oldpid], [0], [], [], [kill `cat parent`])
+AT_CHECK([kill -SEGV `cat pid`], [0], [], [ignore], [kill `cat parent`])
+OVS_WAIT_WHILE([kill -0 `cat oldpid`], [kill `cat parent`])
+OVS_WAIT_UNTIL([test -s pid && test `cat pid` != `cat oldpid`],
+ [kill `cat parent`])
+AT_CHECK([cp pid newpid], [0], [], [], [kill `cat parent`])
+# Check that the pidfile names a running process,
+# and that the parent process of that process is our child process.
+AT_CHECK([ps -o ppid= -p `cat pid` > parentpid],
+ [0], [], [], [kill `cat parent`])
+AT_CHECK(
+ [parentpid=`cat parentpid` &&
+ parent=`cat parent` &&
+ test $parentpid = $parent],
+ [0], [], [], [kill `cat parent`])
+# Kill the daemon process with SIGTERM, and wait for the daemon
+# and the monitor processes to go away and the pidfile to get deleted.
+AT_CHECK([kill `cat pid`], [0], [], [ignore], [kill `cat parent`])
+OVS_WAIT_WHILE([kill -0 `cat parent` || kill -0 `cat newpid` || test -e pid],
+ [kill `cat parent`])
+AT_CLEANUP
+
+AT_SETUP([daemon --detach - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CAPTURE_FILE([pid])
+# Start the daemon and make sure that the pidfile exists immediately.
+# We don't wait for the pidfile to get created because the daemon is
+# supposed to do so before the parent exits.
+AT_CHECK([$PYTHON $srcdir/test-daemon.py --pidfile-name=$PWD/pid --detach], [0], [ignore], [ignore])
+AT_CHECK([test -s pid])
+AT_CHECK([kill -0 `cat pid`])
+# Kill the daemon and make sure that the pidfile gets deleted.
+cp pid saved-pid
+kill `cat pid`
+OVS_WAIT_WHILE([kill -0 `cat saved-pid`])
+AT_CHECK([test ! -e pid])
+AT_CLEANUP
+
+AT_SETUP([daemon --detach --monitor - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+m4_define([CHECK],
+ [AT_CHECK([$1], [$2], [$3], [$4], [kill `cat daemon monitor`])])
+AT_CAPTURE_FILE([daemon])
+AT_CAPTURE_FILE([olddaemon])
+AT_CAPTURE_FILE([newdaemon])
+AT_CAPTURE_FILE([monitor])
+AT_CAPTURE_FILE([newmonitor])
+AT_CAPTURE_FILE([init])
+# Start the daemon and make sure that the pidfile exists immediately.
+# We don't wait for the pidfile to get created because the daemon is
+# supposed to do so before the parent exits.
+AT_CHECK([$PYTHON $srcdir/test-daemon.py --pidfile-name=$PWD/daemon --detach --monitor], [0], [ignore], [ignore])
+AT_CHECK([test -s daemon])
+# Check that the pidfile names a running process,
+# and that the parent process of that process is a running process,
+# and that the parent process of that process is init.
+CHECK([kill -0 `cat daemon`])
+CHECK([ps -o ppid= -p `cat daemon` > monitor])
+CHECK([kill -0 `cat monitor`])
+CHECK([ps -o ppid= -p `cat monitor` > init])
+CHECK([test `cat init` = 1])
+# Kill the daemon process, making it look like a segfault,
+# and wait for a new daemon process to get spawned.
+CHECK([cp daemon olddaemon])
+CHECK([kill -SEGV `cat daemon`], [0], [ignore], [ignore])
+OVS_WAIT_WHILE([kill -0 `cat olddaemon`], [kill `cat olddaemon daemon`])
+OVS_WAIT_UNTIL([test -s daemon && test `cat daemon` != `cat olddaemon`],
+ [kill `cat olddaemon daemon`])
+CHECK([cp daemon newdaemon])
+# Check that the pidfile names a running process,
+# and that the parent process of that process is our child process.
+CHECK([kill -0 `cat daemon`])
+CHECK([diff olddaemon newdaemon], [1], [ignore])
+CHECK([ps -o ppid= -p `cat daemon` > newmonitor])
+CHECK([diff monitor newmonitor])
+CHECK([kill -0 `cat newmonitor`])
+CHECK([ps -o ppid= -p `cat newmonitor` > init])
+CHECK([test `cat init` = 1])
+# Kill the daemon process with SIGTERM, and wait for the daemon
+# and the monitor processes to go away and the pidfile to get deleted.
+CHECK([kill `cat daemon`], [0], [], [ignore])
+OVS_WAIT_WHILE(
+ [kill -0 `cat monitor` || kill -0 `cat newdaemon` || test -e daemon],
+ [kill `cat monitor newdaemon`])
+m4_undefine([CHECK])
+AT_CLEANUP
+
+AT_SETUP([daemon --detach startup errors - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CAPTURE_FILE([pid])
+AT_CHECK([$PYTHON $srcdir/test-daemon.py --pidfile-name=$PWD/pid --detach --bail], [1], [], [stderr])
+AT_CHECK([grep 'test-daemon.py: exiting after daemonize_start() as requested' stderr],
+ [0], [ignore], [])
+AT_CHECK([test ! -s pid])
+AT_CLEANUP
+
+AT_SETUP([daemon --detach --monitor startup errors - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CAPTURE_FILE([pid])
+AT_CHECK([$PYTHON $srcdir/test-daemon.py --pidfile-name=$PWD/pid --detach --monitor --bail], [1], [], [stderr])
+AT_CHECK([grep 'test-daemon.py: exiting after daemonize_start() as requested' stderr],
+ [0], [ignore], [])
+AT_CHECK([test ! -s pid])
+AT_CLEANUP
+
+AT_SETUP([daemon --detach closes standard fds - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CAPTURE_FILE([pid])
+AT_CAPTURE_FILE([status])
+AT_CAPTURE_FILE([stderr])
+AT_CHECK([(yes 2>stderr; echo $? > status) | $PYTHON $srcdir/test-daemon.py --pidfile-name=$PWD/pid --detach], [0], [], [])
+AT_CHECK([kill `cat pid`])
+AT_CHECK([test -s status])
+if grep '[[bB]]roken pipe' stderr >/dev/null 2>&1; then
+ # Something in the environment caused SIGPIPE to be ignored, but
+ # 'yes' at least told us that it got EPIPE. Good enough; we know
+ # that stdout was closed.
+ :
+else
+ # Otherwise make sure that 'yes' died from SIGPIPE.
+ AT_CHECK([kill -l `cat status`], [0], [PIPE
+])
+fi
+AT_CLEANUP
+
+AT_SETUP([daemon --detach --monitor closes standard fds])
+AT_CAPTURE_FILE([pid])
+AT_CAPTURE_FILE([status])
+AT_CAPTURE_FILE([stderr])
+OVSDB_INIT([db])
+AT_CHECK([(yes 2>stderr; echo $? > status) | $PYTHON $srcdir/test-daemon.py --pidfile-name=$PWD/pid --detach], [0], [], [])
+AT_CHECK([kill `cat pid`])
+AT_CHECK([test -s status])
+if grep '[[bB]]roken pipe' stderr >/dev/null 2>&1; then
+ # Something in the environment caused SIGPIPE to be ignored, but
+ # 'yes' at least told us that it got EPIPE. Good enough; we know
+ # that stdout was closed.
+ :
+else
+ # Otherwise make sure that 'yes' died from SIGPIPE.
+ AT_CHECK([kill -l `cat status`], [0], [PIPE
+])
+fi
+AT_CLEANUP
-AT_BANNER([daemon unit tests])
+AT_BANNER([daemon unit tests - C])
AT_SETUP([daemon])
AT_SKIP_IF([test "$CHECK_LCOV" = true]) # lcov wrapper make pids differ
-m4_define([JSON_CHECK_POSITIVE],
+m4_define([JSON_CHECK_POSITIVE_C],
[AT_SETUP([$1])
AT_KEYWORDS([json positive])
AT_CHECK([printf %s "AS_ESCAPE([$2])" > input])
])
AT_CLEANUP])
-m4_define([JSON_CHECK_NEGATIVE],
+m4_define([JSON_CHECK_POSITIVE_PY],
+ [AT_SETUP([$1])
+ AT_KEYWORDS([json positive Python])
+ AT_SKIP_IF([test $HAVE_PYTHON = no])
+ AT_CHECK([printf %s "AS_ESCAPE([$2])" > input])
+ AT_CAPTURE_FILE([input])
+ AT_CHECK([$PYTHON $srcdir/test-json.py $4 input], [0], [stdout], [])
+ AT_CHECK([cat stdout], [0], [$3
+])
+ AT_CLEANUP])
+
+m4_define([JSON_CHECK_POSITIVE],
+ [JSON_CHECK_POSITIVE_C([$1 - C], [$2], [$3], [$4])
+ JSON_CHECK_POSITIVE_PY([$1 - Python], [$2], [$3], [$4])])
+
+m4_define([JSON_CHECK_NEGATIVE_C],
[AT_SETUP([$1])
AT_KEYWORDS([json negative])
AT_CHECK([printf %s "AS_ESCAPE([$2])" > input])
])
AT_CLEANUP])
+m4_define([JSON_CHECK_NEGATIVE_PY],
+ [AT_SETUP([$1])
+ AT_KEYWORDS([json negative Python])
+ AT_SKIP_IF([test $HAVE_PYTHON = no])
+ AT_CHECK([printf %s "AS_ESCAPE([$2])" > input])
+ AT_CAPTURE_FILE([input])
+ AT_CHECK([$PYTHON $srcdir/test-json.py $4 input], [1], [stdout], [])
+ AT_CHECK([[sed 's/^error: [^:]*:/error:/' < stdout]], [0], [$3
+])
+ AT_CLEANUP])
+
+m4_define([JSON_CHECK_NEGATIVE],
+ [JSON_CHECK_NEGATIVE_C([$1 - C], [$2], [$3], [$4])
+ JSON_CHECK_NEGATIVE_PY([$1 - Python], [$2], [$3], [$4])])
+
AT_BANNER([JSON -- arrays])
JSON_CHECK_POSITIVE([empty array], [[ [ ] ]], [[[]]])
[[["\u0000"]]],
[error: null bytes not supported in quoted strings])
-AT_SETUP([end of input in quoted string])
+AT_SETUP([end of input in quoted string - C])
AT_KEYWORDS([json negative])
AT_CHECK([printf '"xxx' | test-json -], [1],
[error: line 0, column 4, byte 4: unexpected end of input in quoted string
])
AT_CLEANUP
+AT_SETUP([end of input in quoted string - Python])
+AT_KEYWORDS([json negative Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CHECK([printf '"xxx' > input
+$PYTHON $srcdir/test-json.py input], [1],
+ [error: line 0, column 4, byte 4: unexpected end of input in quoted string
+])
+AT_CLEANUP
+
AT_BANNER([JSON -- objects])
JSON_CHECK_POSITIVE([empty object], [[{ }]], [[{}]])
--- /dev/null
+AT_BANNER([JSON-RPC - Python])
+
+AT_SETUP([JSON-RPC request and successful reply - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CHECK([$PYTHON $srcdir/test-jsonrpc.py --detach --pidfile-name=$PWD/pid listen punix:socket])
+AT_CHECK([test -s pid])
+AT_CHECK([kill -0 `cat pid`])
+AT_CHECK(
+ [[$PYTHON $srcdir/test-jsonrpc.py request unix:socket echo '[{"a": "b", "x": null}]']], [0],
+ [[{"error":null,"id":0,"result":[{"a":"b","x":null}]}
+]], [ignore], [test ! -e pid || kill `cat pid`])
+AT_CHECK([kill `cat pid`])
+AT_CLEANUP
+
+AT_SETUP([JSON-RPC request and error reply - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CHECK([$PYTHON $srcdir/test-jsonrpc.py --detach --pidfile-name=$PWD/pid listen punix:socket])
+AT_CHECK([test -s pid])
+AT_CHECK([kill -0 `cat pid`])
+AT_CHECK(
+ [[$PYTHON $srcdir/test-jsonrpc.py request unix:socket bad-request '[]']], [0],
+ [[{"error":{"error":"unknown method"},"id":0,"result":null}
+]], [ignore], [test ! -e pid || kill `cat pid`])
+AT_CHECK([kill `cat pid`])
+AT_CLEANUP
+
+AT_SETUP([JSON-RPC notification - Python])
+AT_SKIP_IF([test $HAVE_PYTHON = no])
+AT_CHECK([$PYTHON $srcdir/test-jsonrpc.py --detach --pidfile-name=$PWD/pid listen punix:socket])
+AT_CHECK([test -s pid])
+# When a daemon dies it deletes its pidfile, so make a copy.
+AT_CHECK([cp pid pid2])
+AT_CHECK([kill -0 `cat pid2`])
+AT_CHECK([[$PYTHON $srcdir/test-jsonrpc.py notify unix:socket shutdown '[]']], [0], [],
+ [ignore], [kill `cat pid2`])
+AT_CHECK(
+ [pid=`cat pid2`
+ # First try a quick sleep, so that the test completes very quickly
+ # in the normal case. POSIX doesn't require fractional times to
+ # work, so this might not work.
+ sleep 0.1; if kill -0 $pid; then :; else echo success; exit 0; fi
+ # Then wait up to 2 seconds.
+ sleep 1; if kill -0 $pid; then :; else echo success; exit 0; fi
+ sleep 1; if kill -0 $pid; then :; else echo success; exit 0; fi
+ echo failure; exit 1], [0], [success
+], [ignore])
+AT_CHECK([test ! -e pid])
+AT_CLEANUP
-AT_BANNER([JSON-RPC])
+AT_BANNER([JSON-RPC - C])
AT_SETUP([JSON-RPC request and successful reply])
AT_CHECK([test-jsonrpc --detach --pidfile=$PWD/pid listen punix:socket])
AT_BANNER([OVSDB -- columns])
-OVSDB_CHECK_POSITIVE([ordinary column],
+OVSDB_CHECK_POSITIVE_CPY([ordinary column],
[[parse-column mycol '{"type": "integer"}']],
[[{"type":"integer"}]])
-OVSDB_CHECK_POSITIVE([immutable column],
+OVSDB_CHECK_POSITIVE_CPY([immutable column],
[[parse-column mycol '{"type": "real", "mutable": false}']],
[[{"mutable":false,"type":"real"}]])
-OVSDB_CHECK_POSITIVE([ephemeral column],
+OVSDB_CHECK_POSITIVE_CPY([ephemeral column],
[[parse-column mycol '{"type": "uuid", "ephemeral": true}']],
[[{"ephemeral":true,"type":"uuid"}]])
AT_BANNER([OVSDB -- default values])
-OVSDB_CHECK_POSITIVE([default atoms],
+OVSDB_CHECK_POSITIVE_CPY([default atoms],
[default-atoms],
[[integer: OK
real: OK
string: OK
uuid: OK]])
-OVSDB_CHECK_POSITIVE([default data],
+OVSDB_CHECK_POSITIVE_CPY([default data],
[default-data],
[[key integer, value void, n_min 0: OK
key integer, value integer, n_min 0: OK
AT_BANNER([OVSDB -- atoms without constraints])
-OVSDB_CHECK_POSITIVE([integer atom from JSON],
+OVSDB_CHECK_POSITIVE_CPY([integer atom from JSON],
[[parse-atoms '["integer"]' \
'[0]' \
'[-1]' \
9223372036854775807
-9223372036854775808])
-OVSDB_CHECK_POSITIVE([real atom from JSON],
+OVSDB_CHECK_POSITIVE_CPY([real atom from JSON],
[[parse-atoms '["real"]' \
'[0]' \
'[0.0]' \
1e+37
0.00390625])
-OVSDB_CHECK_POSITIVE([boolean atom from JSON],
+OVSDB_CHECK_POSITIVE_CPY([boolean atom from JSON],
[[parse-atoms '["boolean"]' '[true]' '[false]' ]],
[true
false])
[true
false])
-OVSDB_CHECK_POSITIVE([string atom from JSON],
+OVSDB_CHECK_POSITIVE_CPY([string atom from JSON],
[[parse-atoms '["string"]' '[""]' '["true"]' '["\"\\\/\b\f\n\r\t"]']],
[""
"true"
"true"
"\"\\/\b\f\n\r\t"])
-OVSDB_CHECK_POSITIVE([uuid atom from JSON],
+OVSDB_CHECK_POSITIVE_CPY([uuid atom from JSON],
[[parse-atoms '["uuid"]' '["uuid", "550e8400-e29b-41d4-a716-446655440000"]']],
[[["uuid","550e8400-e29b-41d4-a716-446655440000"]]])
[[parse-atom-strings '["uuid"]' '550e8400-e29b-41d4-a716-446655440000']],
[550e8400-e29b-41d4-a716-446655440000])
-OVSDB_CHECK_POSITIVE([integer atom sorting],
+OVSDB_CHECK_POSITIVE_CPY([integer atom sorting],
[[sort-atoms '["integer"]' '[55,0,-1,2,1]']],
[[[-1,0,1,2,55]]])
-OVSDB_CHECK_POSITIVE([real atom sorting],
+OVSDB_CHECK_POSITIVE_CPY([real atom sorting],
[[sort-atoms '["real"]' '[1.25,1.23,0.0,-0.0,-1e99]']],
[[[-1e+99,0,0,1.23,1.25]]])
-OVSDB_CHECK_POSITIVE([boolean atom sorting],
+OVSDB_CHECK_POSITIVE_CPY([boolean atom sorting],
[[sort-atoms '["boolean"]' '[true,false,true,false,false]']],
[[[false,false,false,true,true]]])
-OVSDB_CHECK_POSITIVE([string atom sorting],
+OVSDB_CHECK_POSITIVE_CPY([string atom sorting],
[[sort-atoms '["string"]' '["abd","abc","\b","xxx"]']],
[[["\b","abc","abd","xxx"]]])
-OVSDB_CHECK_POSITIVE([uuid atom sorting],
+OVSDB_CHECK_POSITIVE_CPY([uuid atom sorting],
[[sort-atoms '["uuid"]' '[
["uuid", "00000000-0000-0000-0000-000000000001"],
["uuid", "00000000-1000-0000-0000-000000000000"],
["uuid", "00001000-0000-0000-0000-000000000000"]]']],
[[[["uuid","00000000-0000-0000-0000-000000000000"],["uuid","00000000-0000-0000-0000-000000000001"],["uuid","00000000-0000-0000-0000-000000000010"],["uuid","00000000-0000-0000-0000-000000000100"],["uuid","00000000-0000-0000-0000-000000001000"],["uuid","00000000-0000-0000-0000-000000010000"],["uuid","00000000-0000-0000-0000-000000100000"],["uuid","00000000-0000-0000-0000-000001000000"],["uuid","00000000-0000-0000-0000-000010000000"],["uuid","00000000-0000-0000-0000-000100000000"],["uuid","00000000-0000-0000-0000-001000000000"],["uuid","00000000-0000-0000-0000-010000000000"],["uuid","00000000-0000-0000-0000-100000000000"],["uuid","00000000-0000-0000-0001-000000000000"],["uuid","00000000-0000-0000-0010-000000000000"],["uuid","00000000-0000-0000-0100-000000000000"],["uuid","00000000-0000-0000-1000-000000000000"],["uuid","00000000-0000-0001-0000-000000000000"],["uuid","00000000-0000-0010-0000-000000000000"],["uuid","00000000-0000-0100-0000-000000000000"],["uuid","00000000-0000-1000-0000-000000000000"],["uuid","00000000-0001-0000-0000-000000000000"],["uuid","00000000-0010-0000-0000-000000000000"],["uuid","00000000-0100-0000-0000-000000000000"],["uuid","00000000-1000-0000-0000-000000000000"],["uuid","00000001-0000-0000-0000-000000000000"],["uuid","00000010-0000-0000-0000-000000000000"],["uuid","00000100-0000-0000-0000-000000000000"],["uuid","00001000-0000-0000-0000-000000000000"],["uuid","00010000-0000-0000-0000-000000000000"],["uuid","00100000-0000-0000-0000-000000000000"],["uuid","01000000-0000-0000-0000-000000000000"],["uuid","10000000-0000-0000-0000-000000000000"]]]])
-OVSDB_CHECK_POSITIVE([real not acceptable integer JSON atom],
+OVSDB_CHECK_POSITIVE_CPY([real not acceptable integer JSON atom],
[[parse-atoms '["integer"]' '[0.5]' ]],
[syntax "0.5": syntax error: expected integer])
dnl <C0> is not allowed anywhere in a UTF-8 string.
dnl <ED A0 80> is a surrogate and not allowed in UTF-8.
-OVSDB_CHECK_POSITIVE([no invalid UTF-8 sequences in strings],
+OVSDB_CHECK_POSITIVE_CPY([no invalid UTF-8 sequences in strings],
[parse-atoms '[["string"]]' \
'@<:@"m4_esyscmd([printf "\300"])"@:>@' \
'@<:@"m4_esyscmd([printf "\355\240\200"])"@:>@' \
],
[constraint violation: "m4_esyscmd([printf "\300"])" is not a valid UTF-8 string: invalid UTF-8 sequence 0xc0
-constraint violation: "m4_esyscmd([printf "\355\240\200"])" is not a valid UTF-8 string: invalid UTF-8 sequence 0xed 0xa0])
+constraint violation: "m4_esyscmd([printf "\355\240\200"])" is not a valid UTF-8 string: invalid UTF-8 sequence 0xed 0xa0],
+ [], [], [xfail])
OVSDB_CHECK_NEGATIVE([real not acceptable integer string atom],
[[parse-atom-strings '["integer"]' '0.5' ]],
["0.5" is not a valid integer])
-OVSDB_CHECK_POSITIVE([string "true" not acceptable boolean JSON atom],
+OVSDB_CHECK_POSITIVE_CPY([string "true" not acceptable boolean JSON atom],
[[parse-atoms '["boolean"]' '["true"]' ]],
[syntax ""true"": syntax error: expected boolean])
[[parse-atom-strings '["boolean"]' '"true"' ]],
[""true"" is not a valid boolean (use "true" or "false")])
-OVSDB_CHECK_POSITIVE([integer not acceptable string JSON atom],
+OVSDB_CHECK_POSITIVE_CPY([integer not acceptable string JSON atom],
[[parse-atoms '["string"]' '[1]']],
[syntax "1": syntax error: expected string])
-OVSDB_CHECK_POSITIVE([uuid atom must be expressed as JSON array],
+OVSDB_CHECK_POSITIVE_CPY([uuid atom must be expressed as JSON array],
[[parse-atoms '["uuid"]' '["550e8400-e29b-41d4-a716-446655440000"]']],
[[syntax ""550e8400-e29b-41d4-a716-446655440000"": syntax error: expected ["uuid", <string>]]])
\f
AT_BANNER([OVSDB -- atoms with enum constraints])
-OVSDB_CHECK_POSITIVE([integer atom enum],
+OVSDB_CHECK_POSITIVE_CPY([integer atom enum],
[[parse-atoms '[{"type": "integer", "enum": ["set", [1, 6, 8, 10]]}]' \
'[0]' \
'[1]' \
10
constraint violation: 11 is not one of the allowed values ([1, 6, 8, 10])]])
-OVSDB_CHECK_POSITIVE([real atom enum],
+OVSDB_CHECK_POSITIVE_CPY([real atom enum],
[[parse-atoms '[{"type": "real", "enum": ["set", [-1.5, 1.5]]}]' \
'[-2]' \
'[-1]' \
1.5
constraint violation: 2 is not one of the allowed values ([-1.5, 1.5])]])
-OVSDB_CHECK_POSITIVE([boolean atom enum],
+OVSDB_CHECK_POSITIVE_CPY([boolean atom enum],
[[parse-atoms '[{"type": "boolean", "enum": false}]' \
'[false]' \
'[true]']],
[[false
constraint violation: true is not one of the allowed values ([false])]])
-OVSDB_CHECK_POSITIVE([string atom enum],
+OVSDB_CHECK_POSITIVE_CPY([string atom enum],
[[parse-atoms '[{"type": "string", "enum": ["set", ["abc", "def"]]}]' \
'[""]' \
'["ab"]' \
constraint violation: defg is not one of the allowed values ([abc, def])
constraint violation: DEF is not one of the allowed values ([abc, def])]])
-OVSDB_CHECK_POSITIVE([uuid atom enum],
+OVSDB_CHECK_POSITIVE_CPY([uuid atom enum],
[[parse-atoms '[{"type": "uuid", "enum": ["set", [["uuid", "6d53a6dd-2da7-4924-9927-97f613812382"], ["uuid", "52cbc842-137a-4db5-804f-9f34106a0ba3"]]]}]' \
'["uuid", "6d53a6dd-2da7-4924-9927-97f613812382"]' \
'["uuid", "52cbc842-137a-4db5-804f-9f34106a0ba3"]' \
\f
AT_BANNER([OVSDB -- atoms with other constraints])
-OVSDB_CHECK_POSITIVE([integers >= 5],
+OVSDB_CHECK_POSITIVE_CPY([integers >= 5],
[[parse-atoms '[{"type": "integer", "minInteger": 5}]' \
'[0]' \
'[4]' \
6
12345])
-OVSDB_CHECK_POSITIVE([integers <= -1],
+OVSDB_CHECK_POSITIVE_CPY([integers <= -1],
[[parse-atoms '[{"type": "integer", "maxInteger": -1}]' \
'[0]' \
'[-1]' \
-2
-123])
-OVSDB_CHECK_POSITIVE([integers in range -10 to 10],
+OVSDB_CHECK_POSITIVE_CPY([integers in range -10 to 10],
[[parse-atoms '[{"type": "integer", "minInteger": -10, "maxInteger": 10}]' \
'[-20]' \
'[-11]' \
constraint violation: 11 is not in the valid range -10 to 10 (inclusive)
constraint violation: 123576 is not in the valid range -10 to 10 (inclusive)])
-OVSDB_CHECK_POSITIVE([reals >= 5],
+OVSDB_CHECK_POSITIVE_CPY([reals >= 5],
[[parse-atoms '[{"type": "real", "minReal": 5}]' \
'[0]' \
'[4]' \
6
12345])
-OVSDB_CHECK_POSITIVE([reals <= -1],
+OVSDB_CHECK_POSITIVE_CPY([reals <= -1],
[[parse-atoms '[{"type": "real", "maxReal": -1}]' \
'[0]' \
'[-1]' \
-2
-123])
-OVSDB_CHECK_POSITIVE([reals in range -10 to 10],
+OVSDB_CHECK_POSITIVE_CPY([reals in range -10 to 10],
[[parse-atoms '[{"type": "real", "minReal": -10, "maxReal": 10}]' \
'[-20]' \
'[-11]' \
constraint violation: 11 is not in the valid range -10 to 10 (inclusive)
constraint violation: 123576 is not in the valid range -10 to 10 (inclusive)])
-OVSDB_CHECK_POSITIVE([strings at least 2 characters long],
+OVSDB_CHECK_POSITIVE_CPY([strings at least 2 characters long],
[[parse-atoms '{"type": "string", "minLength": 2}' \
'[""]' \
'["a"]' \
constraint violation: "a" length 1 is less than minimum allowed length 2
"ab"
"abc"
-constraint violation: "𝄞" length 1 is less than minimum allowed length 2]])
+constraint violation: "𝄞" length 1 is less than minimum allowed length 2]],
+ [], [], [xfail])
-OVSDB_CHECK_POSITIVE([strings no more than 2 characters long],
+OVSDB_CHECK_POSITIVE_CPY([strings no more than 2 characters long],
[[parse-atoms '{"type": "string", "maxLength": 2}' \
'[""]' \
'["a"]' \
AT_BANNER([OSVDB -- simple data])
-OVSDB_CHECK_POSITIVE([integer JSON datum],
+OVSDB_CHECK_POSITIVE_CPY([integer JSON datum],
[[parse-data '["integer"]' '[0]' '["set",[1]]' '[-1]']],
[0
1
-1
1])
-OVSDB_CHECK_POSITIVE([real JSON datum],
+OVSDB_CHECK_POSITIVE_CPY([real JSON datum],
[[parse-data '["real"]' '[0]' '["set",[1.0]]' '[-1.25]']],
[0
1
1
-1.25])
-OVSDB_CHECK_POSITIVE([boolean JSON datum],
+OVSDB_CHECK_POSITIVE_CPY([boolean JSON datum],
[[parse-data '["boolean"]' '["set", [true]]' '[false]' ]],
[true
false])
[true
false])
-OVSDB_CHECK_POSITIVE([string JSON datum],
+OVSDB_CHECK_POSITIVE_CPY([string JSON datum],
[[parse-data '["string"]' '["set",[""]]' '["true"]' '["\"\\\/\b\f\n\r\t"]']],
[""
"true"
\f
AT_BANNER([OVSDB -- set data])
-OVSDB_CHECK_POSITIVE([JSON optional boolean],
+OVSDB_CHECK_POSITIVE_CPY([JSON optional boolean],
[[parse-data '{"key": "boolean", "min": 0}' \
'[true]' \
'["set", [false]]' \
[]]],
[set])
-OVSDB_CHECK_POSITIVE([JSON set of 0 or more integers],
+OVSDB_CHECK_POSITIVE_CPY([JSON set of 0 or more integers],
[[parse-data '{"key": "integer", "min": 0, "max": "unlimited"}' \
'["set", [0]]' \
'[1]' \
[0, 1, 2, 3, 4, 5, 6, 7, 8]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]])
-OVSDB_CHECK_POSITIVE([JSON set of 1 to 3 uuids],
+OVSDB_CHECK_POSITIVE_CPY([JSON set of 1 to 3 uuids],
[[parse-data '{"key": "uuid", "min": 1, "max": 3}' \
'["set", [["uuid", "550e8400-e29b-41d4-a716-446655440000"]]]' \
'["uuid", "b5078be0-7664-4299-b836-8bcc03ef941f"]' \
[[[550e8400-e29b-41d4-a716-446655440000]
[550e8400-e29b-41d4-a716-446655440000, 90558331-09af-4d2f-a572-509cad2e9088, c5051240-30ff-43ed-b4b9-93cf3f050813]]])
-OVSDB_CHECK_POSITIVE([JSON set of 0 to 3 strings],
+OVSDB_CHECK_POSITIVE_CPY([JSON set of 0 to 3 strings],
[[parse-data '{"key": "string", "min": 0, "max": 3}' \
'["set", []]' \
'["a longer string"]' \
["a relatively long string", "short string"]
["a relatively long string", "short string", zzz]]])
-OVSDB_CHECK_NEGATIVE([duplicate boolean not allowed in JSON set],
+OVSDB_CHECK_NEGATIVE_CPY([duplicate boolean not allowed in JSON set],
[[parse-data '{"key": "boolean", "max": 5}' '["set", [true, true]]']],
[ovsdb error: set contains duplicate])
[[parse-data-strings '{"key": "boolean", "max": 5}' 'true, true']],
[set contains duplicate value])
-OVSDB_CHECK_NEGATIVE([duplicate integer not allowed in JSON set],
+OVSDB_CHECK_NEGATIVE_CPY([duplicate integer not allowed in JSON set],
[[parse-data '{"key": "integer", "max": 5}' '["set", [1, 2, 3, 1]]']],
[ovsdb error: set contains duplicate])
[[parse-data-strings '{"key": "integer", "max": 5}' '[1, 2, 3, 1]']],
[set contains duplicate value])
-OVSDB_CHECK_NEGATIVE([duplicate real not allowed in JSON set],
+OVSDB_CHECK_NEGATIVE_CPY([duplicate real not allowed in JSON set],
[[parse-data '{"key": "real", "max": 5}' '["set", [0.0, -0.0]]']],
[ovsdb error: set contains duplicate])
[[parse-data-strings '{"key": "real", "max": 5}' '0.0, -0.0']],
[set contains duplicate value])
-OVSDB_CHECK_NEGATIVE([duplicate string not allowed in JSON set],
+OVSDB_CHECK_NEGATIVE_CPY([duplicate string not allowed in JSON set],
[[parse-data '{"key": "string", "max": 5}' '["set", ["asdf", "ASDF", "asdf"]]']],
[ovsdb error: set contains duplicate])
[[parse-data-strings '{"key": "string", "max": 5}' 'asdf, ASDF, "asdf"']],
[set contains duplicate value])
-OVSDB_CHECK_NEGATIVE([duplicate uuid not allowed in JSON set],
+OVSDB_CHECK_NEGATIVE_CPY([duplicate uuid not allowed in JSON set],
[[parse-data '{"key": "uuid", "max": 5}' \
'["set", [["uuid", "7ef21525-0088-4a28-a418-5518413e43ea"],
["uuid", "355ad037-f1da-40aa-b47c-ff9c7e8c6a38"],
\f
AT_BANNER([OVSDB -- map data])
-OVSDB_CHECK_POSITIVE([JSON map of 1 integer to boolean],
+OVSDB_CHECK_POSITIVE_CPY([JSON map of 1 integer to boolean],
[[parse-data '{"key": "integer", "value": "boolean"}' \
'["map", [[1, true]]]']],
[[["map",[[1,true]]]]])
'1=true']],
[[1=true]])
-OVSDB_CHECK_POSITIVE([JSON map of at least 1 integer to boolean],
+OVSDB_CHECK_POSITIVE_CPY([JSON map of at least 1 integer to boolean],
[[parse-data '{"key": "integer", "value": "boolean", "max": "unlimited"}' \
'["map", [[1, true]]]' \
'["map", [[0, true], [1, false], [2, true], [3, true], [4, true]]]' \
{0=true, 1=false, 2=true, 3=true, 4=true}
{0=true, 3=false, 4=false}]])
-OVSDB_CHECK_POSITIVE([JSON map of 1 boolean to integer],
+OVSDB_CHECK_POSITIVE_CPY([JSON map of 1 boolean to integer],
[[parse-data '{"key": "boolean", "value": "integer"}' \
'["map", [[true, 1]]]']],
[[["map",[[true,1]]]]])
'true=1']],
[[true=1]])
-OVSDB_CHECK_POSITIVE([JSON map of 1 uuid to real],
+OVSDB_CHECK_POSITIVE_CPY([JSON map of 1 uuid to real],
[[parse-data '{"key": "uuid", "value": "real", "min": 1, "max": 5}' \
'["map", [[["uuid", "cad8542b-6ee1-486b-971b-7dcbf6e14979"], 1.0],
[["uuid", "6b94b968-2702-4f64-9457-314a34d69b8c"], 2.0],
1c92b8ca-d5e4-4628-a85d-1dc2d099a99a=5.0']],
[[{1c92b8ca-d5e4-4628-a85d-1dc2d099a99a=5, 25bfa475-d072-4f60-8be1-00f48643e9cb=4, 6b94b968-2702-4f64-9457-314a34d69b8c=2, cad8542b-6ee1-486b-971b-7dcbf6e14979=1, d2c4a168-24de-47eb-a8a3-c1abfc814979=3}]])
-OVSDB_CHECK_POSITIVE([JSON map of 10 string to string],
+OVSDB_CHECK_POSITIVE_CPY([JSON map of 10 string to string],
[[parse-data '{"key": "string", "value": "string", "min": 1, "max": 10}' \
'["map", [["2 gills", "1 chopin"],
["2 chopins", "1 pint"],
"2 kilderkins"= "1 barrel"}']],
[[{"2 chopins"="1 pint", "2 demibushel"="1 firkin", "2 firkins"="1 kilderkin", "2 gallons"="1 peck", "2 gills"="1 chopin", "2 kilderkins"="1 barrel", "2 pecks"="1 demibushel", "2 pints"="1 quart", "2 pottles"="1 gallon", "2 quarts"="1 pottle"}]])
-OVSDB_CHECK_NEGATIVE([duplicate integer key not allowed in JSON map],
+OVSDB_CHECK_NEGATIVE_CPY([duplicate integer key not allowed in JSON map],
[[parse-data '{"key": "integer", "value": "boolean", "max": 5}' \
'["map", [[1, true], [2, false], [1, false]]]']],
[ovsdb error: map contains duplicate key])
--- /dev/null
+AT_BANNER([OVSDB -- interface description language (IDL) - Python])
+
+# OVSDB_CHECK_IDL(TITLE, [PRE-IDL-TXN], TRANSACTIONS, OUTPUT, [KEYWORDS],
+# [FILTER])
+#
+# Creates a database with a schema derived from idltest.ovsidl, runs
+# each PRE-IDL-TXN (if any), starts an ovsdb-server on that database,
+# and runs "test-ovsdb idl" passing each of the TRANSACTIONS along.
+#
+# Checks that the overall output is OUTPUT. Before comparison, the
+# output is sorted (using "sort") and UUIDs in the output are replaced
+# by markers of the form <N> where N is a number. The first unique
+# UUID is replaced by <0>, the next by <1>, and so on. If a given
+# UUID appears more than once it is always replaced by the same
+# marker. If FILTER is supplied then the output is also filtered
+# through the specified program.
+#
+# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
+m4_define([OVSDB_CHECK_IDL_PY],
+ [AT_SETUP([$1])
+ AT_SKIP_IF([test $HAVE_PYTHON = no])
+ AT_KEYWORDS([ovsdb server idl positive Python $5])
+ AT_CHECK([ovsdb-tool create db $abs_srcdir/idltest.ovsschema],
+ [0], [stdout], [ignore])
+ AT_CHECK([ovsdb-server '-vPATTERN:console:ovsdb-server|%c|%m' --detach --pidfile=$PWD/pid --remote=punix:socket --unixctl=$PWD/unixctl db], [0], [ignore], [ignore])
+ m4_if([$2], [], [],
+ [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore], [kill `cat pid`])])
+ AT_CHECK([$PYTHON $srcdir/test-ovsdb.py -t10 idl unix:socket $3],
+ [0], [stdout], [ignore], [kill `cat pid`])
+ AT_CHECK([sort stdout | perl $srcdir/uuidfilt.pl]m4_if([$6],,, [[| $6]]),
+ [0], [$4], [], [kill `cat pid`])
+ OVSDB_SERVER_SHUTDOWN
+ AT_CLEANUP])
+
+OVSDB_CHECK_IDL_PY([simple idl, initially empty, no ops - Python],
+ [],
+ [],
+ [000: empty
+001: done
+])
+
+OVSDB_CHECK_IDL([simple idl, initially empty, various ops - Python],
+ [],
+ [['["idltest",
+ {"op": "insert",
+ "table": "simple",
+ "row": {"i": 1,
+ "r": 2.0,
+ "b": true,
+ "s": "mystring",
+ "u": ["uuid", "84f5c8f5-ac76-4dbc-a24f-8860eb407fc1"],
+ "ia": ["set", [1, 2, 3]],
+ "ra": ["set", [-0.5]],
+ "ba": ["set", [true, false]],
+ "sa": ["set", ["abc", "def"]],
+ "ua": ["set", [["uuid", "69443985-7806-45e2-b35f-574a04e720f9"],
+ ["uuid", "aad11ef0-816a-4b01-93e6-03b8b4256b98"]]]}},
+ {"op": "insert",
+ "table": "simple",
+ "row": {}}]' \
+ '["idltest",
+ {"op": "update",
+ "table": "simple",
+ "where": [],
+ "row": {"b": true}}]' \
+ '["idltest",
+ {"op": "update",
+ "table": "simple",
+ "where": [],
+ "row": {"r": 123.5}}]' \
+ '["idltest",
+ {"op": "insert",
+ "table": "simple",
+ "row": {"i": -1,
+ "r": 125,
+ "b": false,
+ "s": "",
+ "ia": ["set", [1]],
+ "ra": ["set", [1.5]],
+ "ba": ["set", [false]],
+ "sa": ["set", []],
+ "ua": ["set", []]}}]' \
+ '["idltest",
+ {"op": "update",
+ "table": "simple",
+ "where": [["i", "<", 1]],
+ "row": {"s": "newstring"}}]' \
+ '["idltest",
+ {"op": "delete",
+ "table": "simple",
+ "where": [["i", "==", 0]]}]' \
+ 'reconnect']],
+ [[000: empty
+001: {"error":null,"result":[{"uuid":["uuid","<0>"]},{"uuid":["uuid","<1>"]}]}
+002: i=0 r=0 b=false s= u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+002: i=1 r=2 b=true s=mystring u=<3> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<4> <5>] uuid=<0>
+003: {"error":null,"result":[{"count":2}]}
+004: i=0 r=0 b=true s= u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+004: i=1 r=2 b=true s=mystring u=<3> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<4> <5>] uuid=<0>
+005: {"error":null,"result":[{"count":2}]}
+006: i=0 r=123.5 b=true s= u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+006: i=1 r=123.5 b=true s=mystring u=<3> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<4> <5>] uuid=<0>
+007: {"error":null,"result":[{"uuid":["uuid","<6>"]}]}
+008: i=-1 r=125 b=false s= u=<2> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6>
+008: i=0 r=123.5 b=true s= u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+008: i=1 r=123.5 b=true s=mystring u=<3> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<4> <5>] uuid=<0>
+009: {"error":null,"result":[{"count":2}]}
+010: i=-1 r=125 b=false s=newstring u=<2> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6>
+010: i=0 r=123.5 b=true s=newstring u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+010: i=1 r=123.5 b=true s=mystring u=<3> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<4> <5>] uuid=<0>
+011: {"error":null,"result":[{"count":1}]}
+012: i=-1 r=125 b=false s=newstring u=<2> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6>
+012: i=1 r=123.5 b=true s=mystring u=<3> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<4> <5>] uuid=<0>
+013: reconnect
+014: i=-1 r=125 b=false s=newstring u=<2> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6>
+014: i=1 r=123.5 b=true s=mystring u=<3> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<4> <5>] uuid=<0>
+015: done
+]])
+
+OVSDB_CHECK_IDL([simple idl, initially populated - Python],
+ [['["idltest",
+ {"op": "insert",
+ "table": "simple",
+ "row": {"i": 1,
+ "r": 2.0,
+ "b": true,
+ "s": "mystring",
+ "u": ["uuid", "84f5c8f5-ac76-4dbc-a24f-8860eb407fc1"],
+ "ia": ["set", [1, 2, 3]],
+ "ra": ["set", [-0.5]],
+ "ba": ["set", [true, false]],
+ "sa": ["set", ["abc", "def"]],
+ "ua": ["set", [["uuid", "69443985-7806-45e2-b35f-574a04e720f9"],
+ ["uuid", "aad11ef0-816a-4b01-93e6-03b8b4256b98"]]]}},
+ {"op": "insert",
+ "table": "simple",
+ "row": {}}]']],
+ [['["idltest",
+ {"op": "update",
+ "table": "simple",
+ "where": [],
+ "row": {"b": true}}]']],
+ [[000: i=0 r=0 b=false s= u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+000: i=1 r=2 b=true s=mystring u=<2> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<3> <4>] uuid=<5>
+001: {"error":null,"result":[{"count":2}]}
+002: i=0 r=0 b=true s= u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+002: i=1 r=2 b=true s=mystring u=<2> ia=[1 2 3] ra=[-0.5] ba=[false true] sa=[abc def] ua=[<3> <4>] uuid=<5>
+003: done
+]])
AT_BANNER([OVSDB -- schemas])
-OVSDB_CHECK_POSITIVE([schema with valid refTables],
+OVSDB_CHECK_POSITIVE_CPY([schema with valid refTables],
[[parse-schema \
'{"name": "mydb",
"tables": {
"refTable": "a"}}}}}}}']],
[[{"name":"mydb","tables":{"a":{"columns":{"map":{"type":{"key":{"refTable":"b","type":"uuid"},"value":{"refTable":"a","type":"uuid"}}}}},"b":{"columns":{"aRef":{"type":{"key":{"refTable":"a","type":"uuid"}}}}}}}]])
-OVSDB_CHECK_NEGATIVE([schema with invalid refTables],
+OVSDB_CHECK_NEGATIVE_CPY([schema with invalid refTables],
[[parse-schema \
'{"name": "mydb",
"tables": {
"key": {
"type": "uuid",
"refTable": "a"}}}}}}}']],
- [[test-ovsdb: syntax error: column map key refers to undefined table c]])
+ [[syntax error: column map key refers to undefined table c]])
AT_BANNER([OVSDB -- tables])
-OVSDB_CHECK_POSITIVE([table with one column],
+OVSDB_CHECK_POSITIVE_CPY([table with one column],
[[parse-table mytable '{"columns": {"name": {"type": "string"}}}']],
[[{"columns":{"name":{"type":"string"}}}]])
-OVSDB_CHECK_POSITIVE([immutable table with one column],
+OVSDB_CHECK_POSITIVE_CPY([immutable table with one column],
[[parse-table mytable \
'{"columns": {"name": {"type": "string"}},
"mutable": false}']],
[[{"columns":{"name":{"type":"string"}},"mutable":false}]])
-OVSDB_CHECK_POSITIVE([table with maxRows of 2],
+OVSDB_CHECK_POSITIVE_CPY([table with maxRows of 2],
[[parse-table mytable '{"columns": {"name": {"type": "string"}},
"maxRows": 2}']],
[[{"columns":{"name":{"type":"string"}},"maxRows":2}]])
-OVSDB_CHECK_NEGATIVE([column names may not begin with _],
+OVSDB_CHECK_NEGATIVE_CPY([column names may not begin with _],
[[parse-table mytable \
'{"columns": {"_column": {"type": "integer"}}}']],
[[names beginning with "_" are reserved]],
[table])
-OVSDB_CHECK_NEGATIVE([table must have at least one column (1)],
+OVSDB_CHECK_NEGATIVE_CPY([table must have at least one column (1)],
[[parse-table mytable '{}']],
[[Parsing table schema for table mytable failed: Required 'columns' member is missing.]])
-OVSDB_CHECK_NEGATIVE([table must have at least one column (2)],
+OVSDB_CHECK_NEGATIVE_CPY([table must have at least one column (2)],
[[parse-table mytable '{"columns": {}}']],
[[table must have at least one column]])
-OVSDB_CHECK_NEGATIVE([table maxRows must be positive],
+OVSDB_CHECK_NEGATIVE_CPY([table maxRows must be positive],
[[parse-table mytable '{"columns": {"name": {"type": "string"}},
"maxRows": 0}']],
[[syntax "{"columns":{"name":{"type":"string"}},"maxRows":0}": syntax error: maxRows must be at least 1]])
AT_BANNER([OVSDB -- atomic types])
-OVSDB_CHECK_POSITIVE([integer],
+OVSDB_CHECK_POSITIVE_CPY([integer],
[[parse-atomic-type '["integer"]' ]], ["integer"])
-OVSDB_CHECK_POSITIVE([real],
+OVSDB_CHECK_POSITIVE_CPY([real],
[[parse-atomic-type '["real"]' ]], ["real"])
-OVSDB_CHECK_POSITIVE([boolean],
+OVSDB_CHECK_POSITIVE_CPY([boolean],
[[parse-atomic-type '["boolean"]' ]], ["boolean"])
-OVSDB_CHECK_POSITIVE([string],
+OVSDB_CHECK_POSITIVE_CPY([string],
[[parse-atomic-type '["string"]' ]], ["string"])
-OVSDB_CHECK_POSITIVE([uuid],
+OVSDB_CHECK_POSITIVE_CPY([uuid],
[[parse-atomic-type '["uuid"]' ]], ["uuid"])
-OVSDB_CHECK_NEGATIVE([void is not a valid atomic-type],
+OVSDB_CHECK_NEGATIVE_CPY([void is not a valid atomic-type],
[[parse-atomic-type '["void"]' ]], ["void" is not an atomic-type])
AT_BANNER([OVSDB -- base types])
-OVSDB_CHECK_POSITIVE([integer enum],
+OVSDB_CHECK_POSITIVE_CPY([integer enum],
[[parse-base-type '{"type": "integer", "enum": ["set", [-1, 4, 5]]}' ]],
[[{"enum":["set",[-1,4,5]],"type":"integer"}]])
-OVSDB_CHECK_POSITIVE([integer >= 5],
+OVSDB_CHECK_POSITIVE_CPY([integer >= 5],
[[parse-base-type '{"type": "integer", "minInteger": 5}' ]],
[{"minInteger":5,"type":"integer"}])
-OVSDB_CHECK_POSITIVE([integer <= 7],
+OVSDB_CHECK_POSITIVE_CPY([integer <= 7],
[[parse-base-type '{"type": "integer", "maxInteger": 7}' ]],
[{"maxInteger":7,"type":"integer"}])
-OVSDB_CHECK_POSITIVE([integer between -5 and 10],
+OVSDB_CHECK_POSITIVE_CPY([integer between -5 and 10],
[[parse-base-type '{"type": "integer", "minInteger": -5, "maxInteger": 10}']],
[{"maxInteger":10,"minInteger":-5,"type":"integer"}])
-OVSDB_CHECK_NEGATIVE([integer max may not be less than min],
+OVSDB_CHECK_NEGATIVE_CPY([integer max may not be less than min],
[[parse-base-type '{"type": "integer", "minInteger": 5, "maxInteger": 3}']],
[minInteger exceeds maxInteger])
-OVSDB_CHECK_POSITIVE([real enum],
+OVSDB_CHECK_POSITIVE_CPY([real enum],
[[parse-base-type '{"type": "real", "enum": ["set", [1.5, 0, 2.75]]}' ]],
[[{"enum":["set",[0,1.5,2.75]],"type":"real"}]])
-OVSDB_CHECK_POSITIVE([real >= -1.5],
+OVSDB_CHECK_POSITIVE_CPY([real >= -1.5],
[[parse-base-type '{"type": "real", "minReal": -1.5}']],
[{"minReal":-1.5,"type":"real"}])
-OVSDB_CHECK_POSITIVE([real <= 1e5],
+OVSDB_CHECK_POSITIVE_CPY([real <= 1e5],
[[parse-base-type '{"type": "real", "maxReal": 1e5}']],
[{"maxReal":100000,"type":"real"}])
-OVSDB_CHECK_POSITIVE([real between -2.5 and 3.75],
+OVSDB_CHECK_POSITIVE_CPY([real between -2.5 and 3.75],
[[parse-base-type '{"type": "real", "minReal": -2.5, "maxReal": 3.75}']],
[{"maxReal":3.75,"minReal":-2.5,"type":"real"}])
-OVSDB_CHECK_NEGATIVE([real max may not be less than min],
+OVSDB_CHECK_NEGATIVE_CPY([real max may not be less than min],
[[parse-base-type '{"type": "real", "minReal": 555, "maxReal": 444}']],
[minReal exceeds maxReal])
-OVSDB_CHECK_POSITIVE([boolean],
+OVSDB_CHECK_POSITIVE_CPY([boolean],
[[parse-base-type '[{"type": "boolean"}]' ]], ["boolean"])
-OVSDB_CHECK_POSITIVE([boolean enum],
+OVSDB_CHECK_POSITIVE_CPY([boolean enum],
[[parse-base-type '{"type": "boolean", "enum": true}' ]],
[[{"enum":true,"type":"boolean"}]])
-OVSDB_CHECK_POSITIVE([string enum],
+OVSDB_CHECK_POSITIVE_CPY([string enum],
[[parse-base-type '{"type": "string", "enum": ["set", ["def", "abc"]]}']],
[[{"enum":["set",["abc","def"]],"type":"string"}]])
-OVSDB_CHECK_POSITIVE([string minLength],
+OVSDB_CHECK_POSITIVE_CPY([string minLength],
[[parse-base-type '{"type": "string", "minLength": 1}']],
[{"minLength":1,"type":"string"}])
-OVSDB_CHECK_POSITIVE([string maxLength],
+OVSDB_CHECK_POSITIVE_CPY([string maxLength],
[[parse-base-type '{"type": "string", "maxLength": 5}']],
[{"maxLength":5,"type":"string"}])
-OVSDB_CHECK_POSITIVE([string minLength and maxLength],
+OVSDB_CHECK_POSITIVE_CPY([string minLength and maxLength],
[[parse-base-type '{"type": "string", "minLength": 1, "maxLength": 5}']],
[{"maxLength":5,"minLength":1,"type":"string"}])
-OVSDB_CHECK_NEGATIVE([maxLength must not be less than minLength],
+OVSDB_CHECK_NEGATIVE_CPY([maxLength must not be less than minLength],
[[parse-base-type '{"type": "string", "minLength": 5, "maxLength": 3}']],
[minLength exceeds maxLength])
-OVSDB_CHECK_NEGATIVE([maxLength must not be negative],
+OVSDB_CHECK_NEGATIVE_CPY([maxLength must not be negative],
[[parse-base-type '{"type": "string", "maxLength": -1}']],
[maxLength out of valid range 0 to 4294967295])
-OVSDB_CHECK_POSITIVE([uuid enum],
+OVSDB_CHECK_POSITIVE_CPY([uuid enum],
[[parse-base-type '{"type": "uuid", "enum": ["uuid", "36bf19c0-ad9d-4232-bb85-b3d73dfe2123"]}' ]],
[[{"enum":["uuid","36bf19c0-ad9d-4232-bb85-b3d73dfe2123"],"type":"uuid"}]])
-OVSDB_CHECK_POSITIVE([uuid refTable],
+OVSDB_CHECK_POSITIVE_CPY([uuid refTable],
[[parse-base-type '{"type": "uuid", "refTable": "myTable"}' ]],
[{"refTable":"myTable","type":"uuid"}])
-OVSDB_CHECK_NEGATIVE([uuid refTable must be valid id],
+OVSDB_CHECK_NEGATIVE_CPY([uuid refTable must be valid id],
[[parse-base-type '{"type": "uuid", "refTable": "a-b-c"}' ]],
[Type mismatch for member 'refTable'])
-OVSDB_CHECK_NEGATIVE([void is not a valid base-type],
+OVSDB_CHECK_NEGATIVE_CPY([void is not a valid base-type],
[[parse-base-type '["void"]' ]], ["void" is not an atomic-type])
-OVSDB_CHECK_NEGATIVE(["type" member must be present],
+OVSDB_CHECK_NEGATIVE_CPY(["type" member must be present],
[[parse-base-type '{}']], [Parsing ovsdb type failed: Required 'type' member is missing.])
AT_BANNER([OVSDB -- simple types])
-OVSDB_CHECK_POSITIVE([simple integer],
+OVSDB_CHECK_POSITIVE_CPY([simple integer],
[[parse-type '["integer"]' ]], ["integer"])
-OVSDB_CHECK_POSITIVE([simple real],
+OVSDB_CHECK_POSITIVE_CPY([simple real],
[[parse-type '["real"]' ]], ["real"])
-OVSDB_CHECK_POSITIVE([simple boolean],
+OVSDB_CHECK_POSITIVE_CPY([simple boolean],
[[parse-type '["boolean"]' ]], ["boolean"])
-OVSDB_CHECK_POSITIVE([simple string],
+OVSDB_CHECK_POSITIVE_CPY([simple string],
[[parse-type '["string"]' ]], ["string"])
-OVSDB_CHECK_POSITIVE([simple uuid],
+OVSDB_CHECK_POSITIVE_CPY([simple uuid],
[[parse-type '["uuid"]' ]], ["uuid"])
-OVSDB_CHECK_POSITIVE([integer in object],
+OVSDB_CHECK_POSITIVE_CPY([integer in object],
[[parse-type '{"key": "integer"}' ]], ["integer"])
-OVSDB_CHECK_POSITIVE([real in object with explicit min and max],
+OVSDB_CHECK_POSITIVE_CPY([real in object with explicit min and max],
[[parse-type '{"key": "real", "min": 1, "max": 1}' ]], ["real"])
-OVSDB_CHECK_NEGATIVE([key type is required],
+OVSDB_CHECK_NEGATIVE_CPY([key type is required],
[[parse-type '{}' ]], [Required 'key' member is missing.])
-OVSDB_CHECK_NEGATIVE([void is not a valid type],
+OVSDB_CHECK_NEGATIVE_CPY([void is not a valid type],
[[parse-type '["void"]' ]], ["void" is not an atomic-type])
AT_BANNER([OVSDB -- set types])
-OVSDB_CHECK_POSITIVE([optional boolean],
+OVSDB_CHECK_POSITIVE_CPY([optional boolean],
[[parse-type '{"key": "boolean", "min": 0}' ]],
[[{"key":"boolean","min":0}]],
[set])
-OVSDB_CHECK_POSITIVE([set of 1 to 3 uuids],
+OVSDB_CHECK_POSITIVE_CPY([set of 1 to 3 uuids],
[[parse-type '{"key": "uuid", "min": 1, "max": 3}' ]],
[[{"key":"uuid","max":3}]])
-OVSDB_CHECK_POSITIVE([set of 0 to 3 strings],
+OVSDB_CHECK_POSITIVE_CPY([set of 0 to 3 strings],
[[parse-type '{"key": "string", "min": 0, "max": 3}' ]],
[[{"key":"string","max":3,"min":0}]])
-OVSDB_CHECK_POSITIVE([set of 0 or more integers],
+OVSDB_CHECK_POSITIVE_CPY([set of 0 or more integers],
[[parse-type '{"key": "integer", "min": 0, "max": "unlimited"}']],
[[{"key":"integer","max":"unlimited","min":0}]])
-OVSDB_CHECK_POSITIVE([set of 1 or more reals],
+OVSDB_CHECK_POSITIVE_CPY([set of 1 or more reals],
[[parse-type '{"key": "real", "min": 1, "max": "unlimited"}']],
[[{"key":"real","max":"unlimited"}]])
-OVSDB_CHECK_NEGATIVE([set max cannot be less than min],
+OVSDB_CHECK_NEGATIVE_CPY([set max cannot be less than min],
[[parse-type '{"key": "real", "min": 5, "max": 3}' ]],
[ovsdb type fails constraint checks])
-OVSDB_CHECK_NEGATIVE([set max cannot be negative],
+OVSDB_CHECK_NEGATIVE_CPY([set max cannot be negative],
[[parse-type '{"key": "real", "max": -1}' ]],
[bad min or max value])
-OVSDB_CHECK_NEGATIVE([set min cannot be negative],
+OVSDB_CHECK_NEGATIVE_CPY([set min cannot be negative],
[[parse-type '{"key": "real", "min": -1}' ]],
[bad min or max value])
-OVSDB_CHECK_NEGATIVE([set min cannot be greater than one],
+OVSDB_CHECK_NEGATIVE_CPY([set min cannot be greater than one],
[[parse-type '{"key": "real", "min": 10, "max": "unlimited"}']],
[ovsdb type fails constraint checks])
AT_BANNER([OVSDB -- map types])
-OVSDB_CHECK_POSITIVE([map of 1 integer to boolean],
+OVSDB_CHECK_POSITIVE_CPY([map of 1 integer to boolean],
[[parse-type '{"key": "integer", "value": "boolean"}' ]],
[[{"key":"integer","value":"boolean"}]])
-OVSDB_CHECK_POSITIVE([map of 1 boolean to integer, explicit min and max],
+OVSDB_CHECK_POSITIVE_CPY([map of 1 boolean to integer, explicit min and max],
[[parse-type '{"key": "boolean", "value": "integer", "min": 1, "max": 1}' ]],
[[{"key":"boolean","value":"integer"}]])
-OVSDB_CHECK_POSITIVE([map of 1 to 5 uuid to real],
+OVSDB_CHECK_POSITIVE_CPY([map of 1 to 5 uuid to real],
[[parse-type '{"key": "uuid", "value": "real", "min": 1, "max": 5}' ]],
[[{"key":"uuid","max":5,"value":"real"}]])
-OVSDB_CHECK_POSITIVE([map of 0 to 10 string to uuid],
+OVSDB_CHECK_POSITIVE_CPY([map of 0 to 10 string to uuid],
[[parse-type '{"key": "string", "value": "uuid", "min": 0, "max": 10}' ]],
[[{"key":"string","max":10,"min":0,"value":"uuid"}]])
-OVSDB_CHECK_POSITIVE([map of 1 to 20 real to string],
+OVSDB_CHECK_POSITIVE_CPY([map of 1 to 20 real to string],
[[parse-type '{"key": "real", "value": "string", "min": 1, "max": 20}' ]],
[[{"key":"real","max":20,"value":"string"}]])
-OVSDB_CHECK_POSITIVE([map of 0 or more string to real],
+OVSDB_CHECK_POSITIVE_CPY([map of 0 or more string to real],
[[parse-type '{"key": "string", "value": "real", "min": 0, "max": "unlimited"}' ]],
[[{"key":"string","max":"unlimited","min":0,"value":"real"}]])
-OVSDB_CHECK_NEGATIVE([map key type is required],
+OVSDB_CHECK_NEGATIVE_CPY([map key type is required],
[[parse-type '{"value": "integer"}' ]],
[Required 'key' member is missing.])
], [])
AT_CLEANUP])
+# OVSDB_CHECK_POSITIVE_PY(TITLE, TEST-OVSDB-ARGS, OUTPUT, [KEYWORDS], [PREREQ],
+# [XFAIL])
+#
+# Runs "test-ovsdb.py TEST-OVSDB-ARGS" and checks that it exits with
+# status 0 and prints OUTPUT on stdout.
+#
+# If XFAIL is nonempty then the test is expected to fail (presumably because
+# this test works in the C implementation but does not work in Python yet)
+#
+# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
+m4_define([OVSDB_CHECK_POSITIVE_PY],
+ [AT_SETUP([$1])
+ AT_SKIP_IF([test $HAVE_PYTHON = no])
+ m4_if([$6], [], [], [AT_XFAIL_IF([:])])
+ AT_KEYWORDS([ovsdb positive Python $4])
+ AT_CHECK([$PYTHON $srcdir/test-ovsdb.py $2], [0], [$3
+], [])
+ AT_CLEANUP])
+
+# OVSDB_CHECK_POSITIVE_CPY(TITLE, TEST-OVSDB-ARGS, OUTPUT, [KEYWORDS],
+# [PREREQ], [PY-XFAIL])
+#
+# Runs identical C and Python tests, as specified.
+m4_define([OVSDB_CHECK_POSITIVE_CPY],
+ [OVSDB_CHECK_POSITIVE([$1 - C], [$2], [$3], [$4], [$5])
+ OVSDB_CHECK_POSITIVE_PY([$1 - Python], [$2], [$3], [$4], [$5], [$6])])
+
# OVSDB_CHECK_NEGATIVE(TITLE, TEST-OVSDB-ARGS, OUTPUT, [KEYWORDS], [PREREQ])
#
# Runs "test-ovsdb TEST-OVSDB-ARGS" and checks that it exits with
[0], [ignore], [ignore])
AT_CLEANUP])
+# OVSDB_CHECK_NEGATIVE_PY(TITLE, TEST-OVSDB-ARGS, OUTPUT, [KEYWORDS], [PREREQ])
+#
+# Runs "test-ovsdb TEST-OVSDB-ARGS" and checks that it exits with
+# status 1 and that its output on stdout contains substring OUTPUT.
+# TITLE is provided to AT_SETUP and KEYWORDS to AT_KEYWORDS.
+m4_define([OVSDB_CHECK_NEGATIVE_PY],
+ [AT_SETUP([$1])
+ AT_SKIP_IF([test $HAVE_PYTHON = no])
+ AT_KEYWORDS([ovsdb negative $4])
+ AT_CHECK([$PYTHON $srcdir/test-ovsdb.py $2], [1], [], [stderr])
+ m4_assert(m4_len([$3]))
+ AT_CHECK(
+ [if grep -F -e "AS_ESCAPE([$3])" stderr
+ then
+ :
+ else
+ exit 99
+ fi],
+ [0], [ignore], [ignore])
+ AT_CLEANUP])
+
+# OVSDB_CHECK_NEGATIVE_CPY(TITLE, TEST-OVSDB-ARGS, OUTPUT, [KEYWORDS],
+# [PREREQ])
+#
+# Runs identical C and Python tests, as specified.
+m4_define([OVSDB_CHECK_NEGATIVE_CPY],
+ [OVSDB_CHECK_NEGATIVE([$1 - C], [$2], [$3], [$4], [$5])
+ OVSDB_CHECK_NEGATIVE_PY([$1 - Python], [$2], [$3], [$4], [$5])])
+
m4_include([tests/ovsdb-log.at])
m4_include([tests/ovsdb-types.at])
m4_include([tests/ovsdb-data.at])
m4_include([tests/ovsdb-server.at])
m4_include([tests/ovsdb-monitor.at])
m4_include([tests/ovsdb-idl.at])
+m4_include([tests/ovsdb-idl-py.at])
m4_define([__RECONNECT_CHECK],
[AT_SETUP([$1])
+ $2
AT_KEYWORDS([reconnect])
- AT_DATA([input], [$2])
- AT_CHECK([$3], [0], [$4])
+ AT_DATA([input], [$3])
+ AT_CHECK([$4], [0], [$5])
AT_CLEANUP])
m4_define([RECONNECT_CHECK],
- [__RECONNECT_CHECK([$1], [$2], [test-reconnect < input], [$3])])
+ [__RECONNECT_CHECK(
+ [$1 - C],
+ [],
+ [$2],
+ [test-reconnect < input],
+ [$3])
+ __RECONNECT_CHECK(
+ [$1 - Python],
+ [AT_SKIP_IF([test $HAVE_PYTHON = no])],
+ [$2],
+ [$PYTHON $srcdir/test-reconnect.py < input],
+ [$3])])
######################################################################
RECONNECT_CHECK([nothing happens if not enabled],
--- /dev/null
+# Copyright (c) 2010 Nicira Networks.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import getopt
+import sys
+import time
+
+import ovs.daemon
+import ovs.util
+
+def main(argv):
+ try:
+ options, args = getopt.gnu_getopt(
+ argv[1:], 'b', ["bail", "help"] + ovs.daemon.LONG_OPTIONS)
+ except getopt.GetoptError, geo:
+ sys.stderr.write("%s: %s\n" % (ovs.util.PROGRAM_NAME, geo.msg))
+ sys.exit(1)
+
+ bail = False
+ for key, value in options:
+ if key == '--help':
+ usage()
+ elif key in ['-b', '--bail']:
+ bail = True
+ elif not ovs.daemon.parse_opt(key, value):
+ sys.stderr.write("%s: unhandled option %s\n"
+ % (ovs.util.PROGRAM_NAME, key))
+ sys.exit(1)
+
+ ovs.daemon.die_if_already_running()
+ ovs.daemon.daemonize_start()
+ if bail:
+ sys.stderr.write("%s: exiting after daemonize_start() as requested\n"
+ % ovs.util.PROGRAM_NAME)
+ sys.exit(1)
+ ovs.daemon.daemonize_complete()
+
+ while True:
+ time.sleep(1)
+
+def usage():
+ sys.stdout.write("""\
+%s: Open vSwitch daemonization test program for Python
+usage: %s [OPTIONS]
+""" % ovs.util.PROGRAM_NAME)
+ ovs.daemon.usage()
+ sys.stdout.write("""
+Other options:
+ -h, --help display this help message
+ -b, --bail exit with an error after daemonize_start()
+""")
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main(sys.argv)
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import codecs
+import getopt
+import sys
+
+import ovs.json
+
+def print_json(json):
+ if type(json) in [str, unicode]:
+ print "error: %s" % json
+ return False
+ else:
+ ovs.json.to_stream(json, sys.stdout)
+ sys.stdout.write("\n")
+ return True
+
+def parse_multiple(stream):
+ buf = stream.read(4096)
+ ok = True
+ parser = None
+ while len(buf):
+ if parser is None and buf[0] in " \t\r\n":
+ buf = buf[1:]
+ else:
+ if parser is None:
+ parser = ovs.json.Parser()
+ n = parser.feed(buf)
+ buf = buf[n:]
+ if len(buf):
+ if not print_json(parser.finish()):
+ ok = False
+ parser = None
+ if len(buf) == 0:
+ buf = stream.read(4096)
+ if parser and not print_json(parser.finish()):
+ ok = False
+ return ok
+
+def main(argv):
+ argv0 = argv[0]
+
+ # Make stdout and stderr UTF-8, even if they are redirected to a file.
+ sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
+ sys.stderr = codecs.getwriter("utf-8")(sys.stderr)
+
+ try:
+ options, args = getopt.gnu_getopt(argv[1:], '', ['multiple'])
+ except getopt.GetoptError, geo:
+ sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
+ sys.exit(1)
+
+ multiple = False
+ for key, value in options:
+ if key == '--multiple':
+ multiple = True
+ else:
+ sys.stderr.write("%s: unhandled option %s\n" % (argv0, key))
+ sys.exit(1)
+
+ if len(args) != 1:
+ sys.stderr.write("usage: %s [--multiple] INPUT.json\n" % argv0)
+ sys.exit(1)
+
+ input_file = args[0]
+ if input_file == "-":
+ stream = sys.stdin
+ else:
+ stream = open(input_file, "r")
+
+ if multiple:
+ ok = parse_multiple(stream)
+ else:
+ ok = print_json(ovs.json.from_stream(stream))
+
+ if not ok:
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main(sys.argv)
error = jsonrpc_send_block(rpc, msg);
if (error) {
- ovs_fatal(error, "could not send request");
+ ovs_fatal(error, "could not send notification");
}
jsonrpc_close(rpc);
}
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import getopt
+import os
+import sys
+
+import ovs.daemon
+import ovs.json
+import ovs.jsonrpc
+import ovs.poller
+import ovs.stream
+
+def handle_rpc(rpc, msg):
+ done = False
+ reply = None
+
+ if msg.type == ovs.jsonrpc.Message.T_REQUEST:
+ if msg.method == "echo":
+ reply = ovs.jsonrpc.Message.create_reply(msg.params, msg.id)
+ else:
+ reply = ovs.jsonrpc.Message.create_error(
+ {"error": "unknown method"}, msg.id)
+ sys.stderr.write("unknown request %s" % msg.method)
+ elif msg.type == ovs.jsonrpc.Message.T_NOTIFY:
+ if msg.method == "shutdown":
+ done = True
+ else:
+ rpc.error(errno.ENOTTY)
+ sys.stderr.write("unknown notification %s" % msg.method)
+ else:
+ rpc.error(errno.EPROTO)
+ sys.stderr.write("unsolicited JSON-RPC reply or error\n")
+
+ if reply:
+ rpc.send(reply)
+ return done
+
+def do_listen(name):
+ ovs.daemon.die_if_already_running()
+
+ error, pstream = ovs.stream.PassiveStream.open(name)
+ if error:
+ sys.stderr.write("could not listen on \"%s\": %s\n"
+ % (name, os.strerror(error)))
+ sys.exit(1)
+
+ ovs.daemon.daemonize()
+
+ rpcs = []
+ done = False
+ while True:
+ # Accept new connections.
+ error, stream = pstream.accept()
+ if stream:
+ rpcs.append(ovs.jsonrpc.Connection(stream))
+ elif error != errno.EAGAIN:
+ sys.stderr.write("PassiveStream.accept() failed\n")
+ sys.exit(1)
+
+ # Service existing connections.
+ dead_rpcs = []
+ for rpc in rpcs:
+ rpc.run()
+
+ error = 0
+ if not rpc.get_backlog():
+ error, msg = rpc.recv()
+ if not error:
+ if handle_rpc(rpc, msg):
+ done = True
+
+ error = rpc.get_status()
+ if error:
+ rpc.close()
+ dead_rpcs.append(rpc)
+ rpcs = [rpc for rpc in rpcs if not rpc in dead_rpcs]
+
+ if done and not rpcs:
+ break
+
+ poller = ovs.poller.Poller()
+ pstream.wait(poller)
+ for rpc in rpcs:
+ rpc.wait(poller)
+ if not rpc.get_backlog():
+ rpc.recv_wait(poller)
+ poller.block()
+ pstream.close()
+
+def do_request(name, method, params_string):
+ params = ovs.json.from_string(params_string)
+ msg = ovs.jsonrpc.Message.create_request(method, params)
+ s = msg.is_valid()
+ if s:
+ sys.stderr.write("not a valid JSON-RPC request: %s\n" % s)
+ sys.exit(1)
+
+ error, stream = ovs.stream.Stream.open_block(ovs.stream.Stream.open(name))
+ if error:
+ sys.stderr.write("could not open \"%s\": %s\n"
+ % (name, os.strerror(error)))
+ sys.exit(1)
+
+ rpc = ovs.jsonrpc.Connection(stream)
+
+ error = rpc.send(msg)
+ if error:
+ sys.stderr.write("could not send request: %s\n" % os.strerror(error))
+ sys.exit(1)
+
+ error, msg = rpc.recv_block()
+ if error:
+ sys.stderr.write("error waiting for reply: %s\n" % os.strerror(error))
+ sys.exit(1)
+
+ print ovs.json.to_string(msg.to_json())
+
+ rpc.close()
+
+def do_notify(name, method, params_string):
+ params = ovs.json.from_string(params_string)
+ msg = ovs.jsonrpc.Message.create_notify(method, params)
+ s = msg.is_valid()
+ if s:
+ sys.stderr.write("not a valid JSON-RPC notification: %s\n" % s)
+ sys.exit(1)
+
+ error, stream = ovs.stream.Stream.open_block(ovs.stream.Stream.open(name))
+ if error:
+ sys.stderr.write("could not open \"%s\": %s\n"
+ % (name, os.strerror(error)))
+ sys.exit(1)
+
+ rpc = ovs.jsonrpc.Connection(stream)
+
+ error = rpc.send_block(msg)
+ if error:
+ sys.stderr.write("could not send notification: %s\n"
+ % os.strerror(error))
+ sys.exit(1)
+
+ rpc.close()
+
+def main(argv):
+ try:
+ options, args = getopt.gnu_getopt(
+ argv[1:], 'h', ["help"] + ovs.daemon.LONG_OPTIONS)
+ except getopt.GetoptError, geo:
+ sys.stderr.write("%s: %s\n" % (ovs.util.PROGRAM_NAME, geo.msg))
+ sys.exit(1)
+
+ for key, value in options:
+ if key in ['h', '--help']:
+ usage()
+ elif not ovs.daemon.parse_opt(key, value):
+ sys.stderr.write("%s: unhandled option %s\n"
+ % (ovs.util.PROGRAM_NAME, key))
+ sys.exit(1)
+
+ commands = {"listen": (do_listen, 1),
+ "request": (do_request, 3),
+ "notify": (do_notify, 3),
+ "help": (usage, (0,))}
+
+ command_name = args[0]
+ args = args[1:]
+ if not command_name in commands:
+ sys.stderr.write("%s: unknown command \"%s\" "
+ "(use --help for help)\n" % (argv0, command_name))
+ sys.exit(1)
+
+ func, n_args = commands[command_name]
+ if type(n_args) == tuple:
+ if len(args) < n_args[0]:
+ sys.stderr.write("%s: \"%s\" requires at least %d arguments but "
+ "only %d provided\n"
+ % (argv0, command_name, n_args, len(args)))
+ sys.exit(1)
+ elif type(n_args) == int:
+ if len(args) != n_args:
+ sys.stderr.write("%s: \"%s\" requires %d arguments but %d "
+ "provided\n"
+ % (argv0, command_name, n_args, len(args)))
+ sys.exit(1)
+ else:
+ assert False
+
+ func(*args)
+
+def usage():
+ sys.stdout.write("""\
+%s: JSON-RPC test utility for Python
+usage: %s [OPTIONS] COMMAND [ARG...]
+ listen LOCAL listen for connections on LOCAL
+ request REMOTE METHOD PARAMS send request, print reply
+ notify REMOTE METHOD PARAMS send notification and exit
+""" % (ovs.util.PROGRAM_NAME, ovs.util.PROGRAM_NAME))
+ ovs.stream.usage("JSON-RPC", True, True, True)
+ ovs.daemon.usage()
+ sys.stdout.write("""
+Other options:
+ -h, --help display this help message
+""")
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main(sys.argv)
+
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import codecs
+import getopt
+import re
+import os
+import signal
+import sys
+
+from ovs.db import error
+import ovs.db.idl
+import ovs.db.schema
+from ovs.db import data
+from ovs.db import types
+import ovs.ovsuuid
+import ovs.poller
+import ovs.util
+
+def unbox_json(json):
+ if type(json) == list and len(json) == 1:
+ return json[0]
+ else:
+ return json
+
+def do_default_atoms():
+ for type in types.ATOMIC_TYPES:
+ if type == types.VoidType:
+ continue
+
+ sys.stdout.write("%s: " % type.to_string())
+
+ atom = data.Atom.default(type)
+ if atom != data.Atom.default(type):
+ sys.stdout.write("wrong\n")
+ sys.exit(1)
+
+ sys.stdout.write("OK\n")
+
+def do_default_data():
+ any_errors = False
+ for n_min in 0, 1:
+ for key in types.ATOMIC_TYPES:
+ if key == types.VoidType:
+ continue
+ for value in types.ATOMIC_TYPES:
+ if value == types.VoidType:
+ valueBase = None
+ else:
+ valueBase = types.BaseType(value)
+ type = types.Type(types.BaseType(key), valueBase, n_min, 1)
+ assert type.is_valid()
+
+ sys.stdout.write("key %s, value %s, n_min %d: "
+ % (key.to_string(), value.to_string(), n_min))
+
+ datum = data.Datum.default(type)
+ if datum != data.Datum.default(type):
+ sys.stdout.write("wrong\n")
+ any_errors = True
+ else:
+ sys.stdout.write("OK\n")
+ if any_errors:
+ sys.exit(1)
+
+def do_parse_atomic_type(type_string):
+ type_json = unbox_json(ovs.json.from_string(type_string))
+ atomic_type = types.AtomicType.from_json(type_json)
+ print ovs.json.to_string(atomic_type.to_json(), sort_keys=True)
+
+def do_parse_base_type(type_string):
+ type_json = unbox_json(ovs.json.from_string(type_string))
+ base_type = types.BaseType.from_json(type_json)
+ print ovs.json.to_string(base_type.to_json(), sort_keys=True)
+
+def do_parse_type(type_string):
+ type_json = unbox_json(ovs.json.from_string(type_string))
+ type = types.Type.from_json(type_json)
+ print ovs.json.to_string(type.to_json(), sort_keys=True)
+
+def do_parse_atoms(type_string, *atom_strings):
+ type_json = unbox_json(ovs.json.from_string(type_string))
+ base = types.BaseType.from_json(type_json)
+ for atom_string in atom_strings:
+ atom_json = unbox_json(ovs.json.from_string(atom_string))
+ try:
+ atom = data.Atom.from_json(base, atom_json)
+ print ovs.json.to_string(atom.to_json())
+ except error.Error, e:
+ print e
+
+def do_parse_data(type_string, *data_strings):
+ type_json = unbox_json(ovs.json.from_string(type_string))
+ type = types.Type.from_json(type_json)
+ for datum_string in data_strings:
+ datum_json = unbox_json(ovs.json.from_string(datum_string))
+ datum = data.Datum.from_json(type, datum_json)
+ print ovs.json.to_string(datum.to_json())
+
+def do_sort_atoms(type_string, atom_strings):
+ type_json = unbox_json(ovs.json.from_string(type_string))
+ base = types.BaseType.from_json(type_json)
+ atoms = [data.Atom.from_json(base, atom_json)
+ for atom_json in unbox_json(ovs.json.from_string(atom_strings))]
+ print ovs.json.to_string([data.Atom.to_json(atom)
+ for atom in sorted(atoms)])
+
+def do_parse_column(name, column_string):
+ column_json = unbox_json(ovs.json.from_string(column_string))
+ column = ovs.db.schema.ColumnSchema.from_json(column_json, name)
+ print ovs.json.to_string(column.to_json(), sort_keys=True)
+
+def do_parse_table(name, table_string):
+ table_json = unbox_json(ovs.json.from_string(table_string))
+ table = ovs.db.schema.TableSchema.from_json(table_json, name)
+ print ovs.json.to_string(table.to_json(), sort_keys=True)
+
+def do_parse_rows(table_string, *rows):
+ table_json = unbox_json(ovs.json.from_string(table_string))
+ table = ovs.db.schema.TableSchema.from_json(table_json, name)
+
+def do_parse_schema(schema_string):
+ schema_json = unbox_json(ovs.json.from_string(schema_string))
+ schema = ovs.db.schema.DbSchema.from_json(schema_json)
+ print ovs.json.to_string(schema.to_json(), sort_keys=True)
+
+def print_idl(idl, step):
+ n = 0
+ for uuid, row in idl.data["simple"].iteritems():
+ s = ("%03d: i=%s r=%s b=%s s=%s u=%s "
+ "ia=%s ra=%s ba=%s sa=%s ua=%s uuid=%s"
+ % (step, row.i, row.r, row.b, row.s, row.u,
+ row.ia, row.ra, row.ba, row.sa, row.ua, uuid))
+ print(re.sub('""|,', "", s))
+ n += 1
+ if not n:
+ print("%03d: empty" % step)
+
+def substitute_uuids(json, symtab):
+ if type(json) in [str, unicode]:
+ symbol = symtab.get(json)
+ if symbol:
+ return str(symbol)
+ elif type(json) == list:
+ return [substitute_uuids(element, symtab) for element in json]
+ elif type(json) == dict:
+ d = {}
+ for key, value in json.iteritems():
+ d[key] = substitute_uuids(value, symtab)
+ return d
+ return json
+
+def parse_uuids(json, symtab):
+ if type(json) in [str, unicode] and ovs.ovsuuid.UUID.is_valid_string(json):
+ name = "#%d#" % len(symtab)
+ sys.stderr.write("%s = %s\n" % (name, json))
+ symtab[name] = json
+ elif type(json) == list:
+ for element in json:
+ parse_uuids(element, symtab)
+ elif type(json) == dict:
+ for value in json.itervalues():
+ parse_uuids(value, symtab)
+
+def do_idl(remote, *commands):
+ idl = ovs.db.idl.Idl(remote, "idltest")
+
+ if commands:
+ error, stream = ovs.stream.Stream.open_block(
+ ovs.stream.Stream.open(remote))
+ if error:
+ sys.stderr.write("failed to connect to \"%s\"" % remote)
+ sys.exit(1)
+ rpc = ovs.jsonrpc.Connection(stream)
+ else:
+ rpc = None
+
+ symtab = {}
+ seqno = 0
+ step = 0
+ for command in commands:
+ if command.startswith("+"):
+ # The previous transaction didn't change anything.
+ command = command[1:]
+ else:
+ # Wait for update.
+ while idl.get_seqno() == seqno and not idl.run():
+ rpc.run()
+
+ poller = ovs.poller.Poller()
+ idl.wait(poller)
+ rpc.wait(poller)
+ poller.block()
+
+ print_idl(idl, step)
+ step += 1
+
+ seqno = idl.get_seqno()
+
+ if command == "reconnect":
+ print("%03d: reconnect" % step)
+ step += 1
+ idl.force_reconnect()
+ elif not command.startswith("["):
+ idl_set(idl, command, step)
+ step += 1
+ else:
+ json = ovs.json.from_string(command)
+ if type(json) in [str, unicode]:
+ sys.stderr.write("\"%s\": %s\n" % (command, json))
+ sys.exit(1)
+ json = substitute_uuids(json, symtab)
+ request = ovs.jsonrpc.Message.create_request("transact", json)
+ error, reply = rpc.transact_block(request)
+ if error:
+ sys.stderr.write("jsonrpc transaction failed: %s"
+ % os.strerror(error))
+ sys.exit(1)
+ sys.stdout.write("%03d: " % step)
+ sys.stdout.flush()
+ step += 1
+ if reply.result is not None:
+ parse_uuids(reply.result, symtab)
+ reply.id = None
+ sys.stdout.write("%s\n" % ovs.json.to_string(reply.to_json()))
+
+ if rpc:
+ rpc.close()
+ while idl.get_seqno() == seqno and not idl.run():
+ poller = ovs.poller.Poller()
+ idl.wait(poller)
+ poller.block()
+ print_idl(idl, step)
+ step += 1
+ idl.close()
+ print("%03d: done" % step)
+
+def usage():
+ print """\
+%(program_name)s: test utility for Open vSwitch database Python bindings
+usage: %(program_name)s [OPTIONS] COMMAND ARG...
+
+The following commands are supported:
+default-atoms
+ test ovsdb_atom_default()
+default-data
+ test ovsdb_datum_default()
+parse-atomic-type TYPE
+ parse TYPE as OVSDB atomic type, and re-serialize
+parse-base-type TYPE
+ parse TYPE as OVSDB base type, and re-serialize
+parse-type JSON
+ parse JSON as OVSDB type, and re-serialize
+parse-atoms TYPE ATOM...
+ parse JSON ATOMs as atoms of TYPE, and re-serialize
+parse-atom-strings TYPE ATOM...
+ parse string ATOMs as atoms of given TYPE, and re-serialize
+sort-atoms TYPE ATOM...
+ print JSON ATOMs in sorted order
+parse-data TYPE DATUM...
+ parse JSON DATUMs as data of given TYPE, and re-serialize
+parse-column NAME OBJECT
+ parse column NAME with info OBJECT, and re-serialize
+parse-table NAME OBJECT
+ parse table NAME with info OBJECT
+parse-schema JSON
+ parse JSON as an OVSDB schema, and re-serialize
+idl SERVER [TRANSACTION...]
+ connect to SERVER and dump the contents of the database
+ as seen initially by the IDL implementation and after
+ executing each TRANSACTION. (Each TRANSACTION must modify
+ the database or this command will hang.)
+
+The following options are also available:
+ -t, --timeout=SECS give up after SECS seconds
+ -h, --help display this help message\
+""" % {'program_name': ovs.util.PROGRAM_NAME}
+ sys.exit(0)
+
+def main(argv):
+ # Make stdout and stderr UTF-8, even if they are redirected to a file.
+ sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
+ sys.stderr = codecs.getwriter("utf-8")(sys.stderr)
+
+ try:
+ options, args = getopt.gnu_getopt(argv[1:], 't:h',
+ ['timeout',
+ 'help'])
+ except getopt.GetoptError, geo:
+ sys.stderr.write("%s: %s\n" % (ovs.util.PROGRAM_NAME, geo.msg))
+ sys.exit(1)
+
+ for key, value in options:
+ if key in ['-h', '--help']:
+ usage()
+ elif key in ['-t', '--timeout']:
+ try:
+ timeout = int(value)
+ if timeout < 1:
+ raise TypeError
+ except TypeError:
+ raise error.Error("value %s on -t or --timeout is not at "
+ "least 1" % value)
+ signal.alarm(timeout)
+ else:
+ sys.exit(0)
+
+ optKeys = [key for key, value in options]
+
+ if not args:
+ sys.stderr.write("%s: missing command argument "
+ "(use --help for help)\n" % ovs.util.PROGRAM_NAME)
+ sys.exit(1)
+
+ commands = {"default-atoms": (do_default_atoms, 0),
+ "default-data": (do_default_data, 0),
+ "parse-atomic-type": (do_parse_atomic_type, 1),
+ "parse-base-type": (do_parse_base_type, 1),
+ "parse-type": (do_parse_type, 1),
+ "parse-atoms": (do_parse_atoms, (2,)),
+ "parse-data": (do_parse_data, (2,)),
+ "sort-atoms": (do_sort_atoms, 2),
+ "parse-column": (do_parse_column, 2),
+ "parse-table": (do_parse_table, 2),
+ "parse-schema": (do_parse_schema, 1),
+ "idl": (do_idl, (1,))}
+
+ command_name = args[0]
+ args = args[1:]
+ if not command_name in commands:
+ sys.stderr.write("%s: unknown command \"%s\" "
+ "(use --help for help)\n" % (ovs.util.PROGRAM_NAME,
+ command_name))
+ sys.exit(1)
+
+ func, n_args = commands[command_name]
+ if type(n_args) == tuple:
+ if len(args) < n_args[0]:
+ sys.stderr.write("%s: \"%s\" requires at least %d arguments but "
+ "only %d provided\n"
+ % (ovs.util.PROGRAM_NAME, command_name,
+ n_args, len(args)))
+ sys.exit(1)
+ elif type(n_args) == int:
+ if len(args) != n_args:
+ sys.stderr.write("%s: \"%s\" requires %d arguments but %d "
+ "provided\n"
+ % (ovs.util.PROGRAM_NAME, command_name,
+ n_args, len(args)))
+ sys.exit(1)
+ else:
+ assert False
+
+ func(*args)
+
+if __name__ == '__main__':
+ try:
+ main(sys.argv)
+ except error.Error, e:
+ sys.stderr.write("%s\n" % e)
+ sys.exit(1)
--- /dev/null
+# Copyright (c) 2009, 2010 Nicira Networks.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+import logging
+import sys
+
+import ovs.reconnect
+
+def do_enable(arg):
+ r.enable(now)
+
+def do_disable(arg):
+ r.disable(now)
+
+def do_force_reconnect(arg):
+ r.force_reconnect(now)
+
+def error_from_string(s):
+ if not s:
+ return 0
+ elif s == "ECONNREFUSED":
+ return errno.ECONNREFUSED
+ elif s == "EOF":
+ return EOF
+ else:
+ sys.stderr.write("unknown error '%s'\n" % s)
+ sys.exit(1)
+
+def do_disconnected(arg):
+ r.disconnected(now, error_from_string(arg))
+
+def do_connecting(arg):
+ r.connecting(now)
+
+def do_connect_failed(arg):
+ r.connect_failed(now, error_from_string(arg))
+
+def do_connected(arg):
+ r.connected(now)
+
+def do_received(arg):
+ r.received(now)
+
+def do_run(arg):
+ global now
+ if arg is not None:
+ now += int(arg)
+
+ action = r.run(now)
+ if action is None:
+ pass
+ elif action == ovs.reconnect.CONNECT:
+ print " should connect"
+ elif action == ovs.reconnect.DISCONNECT:
+ print " should disconnect"
+ elif action == ovs.reconnect.PROBE:
+ print " should send probe"
+ else:
+ assert False
+
+def do_advance(arg):
+ global now
+ now += int(arg)
+
+def do_timeout(arg):
+ global now
+ timeout = r.timeout(now)
+ if timeout >= 0:
+ print " advance %d ms" % timeout
+ now += timeout
+ else:
+ print " no timeout"
+
+def do_set_max_tries(arg):
+ r.set_max_tries(int(arg))
+
+def diff_stats(old, new):
+ if (old.state != new.state or
+ old.state_elapsed != new.state_elapsed or
+ old.backoff != new.backoff):
+ print(" in %s for %d ms (%d ms backoff)"
+ % (new.state, new.state_elapsed, new.backoff))
+
+ if (old.creation_time != new.creation_time or
+ old.last_received != new.last_received or
+ old.last_connected != new.last_connected):
+ print(" created %d, last received %d, last connected %d"
+ % (new.creation_time, new.last_received, new.last_connected))
+
+ if (old.n_successful_connections != new.n_successful_connections or
+ old.n_attempted_connections != new.n_attempted_connections or
+ old.seqno != new.seqno):
+ print(" %d successful connections out of %d attempts, seqno %d"
+ % (new.n_successful_connections, new.n_attempted_connections,
+ new.seqno))
+
+ if (old.is_connected != new.is_connected or
+ old.current_connection_duration != new.current_connection_duration or
+ old.total_connected_duration != new.total_connected_duration):
+ if new.is_connected:
+ negate = ""
+ else:
+ negate = "not "
+ print(" %sconnected (%d ms), total %d ms connected"
+ % (negate, new.current_connection_duration,
+ new.total_connected_duration))
+
+def do_set_passive(arg):
+ r.set_passive(True, now)
+
+def do_listening(arg):
+ r.listening(now)
+
+def do_listen_error(arg):
+ r.listen_error(now, int(arg))
+
+def main():
+ commands = {
+ "enable": do_enable,
+ "disable": do_disable,
+ "force-reconnect": do_force_reconnect,
+ "disconnected": do_disconnected,
+ "connecting": do_connecting,
+ "connect-failed": do_connect_failed,
+ "connected": do_connected,
+ "received": do_received,
+ "run": do_run,
+ "advance": do_advance,
+ "timeout": do_timeout,
+ "set-max-tries": do_set_max_tries,
+ "passive": do_set_passive,
+ "listening": do_listening,
+ "listen-error": do_listen_error
+ }
+
+ logging.basicConfig(level=logging.CRITICAL)
+
+ global now
+ global r
+
+ now = 1000
+ r = ovs.reconnect.Reconnect(now)
+ r.set_name("remote")
+ prev = r.get_stats(now)
+ print "### t=%d ###" % now
+ old_time = now
+ old_max_tries = r.get_max_tries()
+ while True:
+ line = sys.stdin.readline()
+ if line == "":
+ break
+
+ print line[:-1]
+ if line[0] == "#":
+ continue
+
+ args = line.split()
+ if len(args) == 0:
+ continue
+
+ command = args[0]
+ if len(args) > 1:
+ op = args[1]
+ else:
+ op = None
+ commands[command](op)
+
+ if old_time != now:
+ print
+ print "### t=%d ###" % now
+ old_time = now
+
+ cur = r.get_stats(now)
+ diff_stats(prev, cur)
+ prev = cur
+ if r.get_max_tries() != old_max_tries:
+ old_max_tries = r.get_max_tries()
+ print " %d tries left" % old_max_tries
+
+if __name__ == '__main__':
+ main()
+
+
m4_include([tests/classifier.at])
m4_include([tests/check-structs.at])
m4_include([tests/daemon.at])
+m4_include([tests/daemon-py.at])
m4_include([tests/vconn.at])
m4_include([tests/dir_name.at])
m4_include([tests/aes128.at])
m4_include([tests/uuid.at])
m4_include([tests/json.at])
m4_include([tests/jsonrpc.at])
+m4_include([tests/jsonrpc-py.at])
m4_include([tests/timeval.at])
m4_include([tests/lockfile.at])
m4_include([tests/reconnect.at])
Open vSwitch-aware replacement for Citrix script of the same name.
+ uuid.py
+
+ This is uuid.py from Python 2.5. It is installed into the
+ Open vSwitch RPM because XenServer 5.5 and 5.6 use Python 2.4,
+ which do not have uuid.py.
+
To install, build the Open vSwitch RPM with a command like this:
rpmbuild -D "openvswitch_version $full_version" \
xenserver/usr_sbin_brctl \
xenserver/usr_sbin_xen-bugtool \
xenserver/usr_share_openvswitch_scripts_refresh-network-uuids \
- xenserver/usr_share_openvswitch_scripts_sysconfig.template
+ xenserver/usr_share_openvswitch_scripts_sysconfig.template \
+ xenserver/uuid.py
install -d -m 755 $RPM_BUILD_ROOT/lib/modules/%{xen_version}/kernel/extra/openvswitch
find datapath/linux-2.6 -name *.ko -exec install -m 755 \{\} $RPM_BUILD_ROOT/lib/modules/%{xen_version}/kernel/extra/openvswitch \;
+install xenserver/uuid.py $RPM_BUILD_ROOT/usr/share/openvswitch/python
# Get rid of stuff we don't want to make RPM happy.
rm \
/etc/profile.d/openvswitch.sh
/lib/modules/%{xen_version}/kernel/extra/openvswitch/openvswitch_mod.ko
/lib/modules/%{xen_version}/kernel/extra/openvswitch/brcompat_mod.ko
+/usr/share/openvswitch/python/ovs/__init__.py
+/usr/share/openvswitch/python/ovs/daemon.py
+/usr/share/openvswitch/python/ovs/db/__init__.py
+/usr/share/openvswitch/python/ovs/db/data.py
+/usr/share/openvswitch/python/ovs/db/error.py
+/usr/share/openvswitch/python/ovs/db/idl.py
+/usr/share/openvswitch/python/ovs/db/parser.py
+/usr/share/openvswitch/python/ovs/db/schema.py
+/usr/share/openvswitch/python/ovs/db/types.py
+/usr/share/openvswitch/python/ovs/dirs.py
+/usr/share/openvswitch/python/ovs/fatal_signal.py
+/usr/share/openvswitch/python/ovs/json.py
+/usr/share/openvswitch/python/ovs/jsonrpc.py
+/usr/share/openvswitch/python/ovs/ovsuuid.py
+/usr/share/openvswitch/python/ovs/poller.py
+/usr/share/openvswitch/python/ovs/process.py
+/usr/share/openvswitch/python/ovs/reconnect.py
+/usr/share/openvswitch/python/ovs/socket_util.py
+/usr/share/openvswitch/python/ovs/stream.py
+/usr/share/openvswitch/python/ovs/timeval.py
+/usr/share/openvswitch/python/ovs/util.py
+/usr/share/openvswitch/python/uuid.py
/usr/share/openvswitch/scripts/refresh-network-uuids
/usr/share/openvswitch/scripts/interface-reconfigure
/usr/share/openvswitch/scripts/InterfaceReconfigure.py
/usr/share/man/man8/ovs-vsctl.8.gz
/usr/share/man/man8/ovs-vswitchd.8.gz
/var/lib/openvswitch
-%exclude /usr/lib/xsconsole/plugins-base/*.pyc
-%exclude /usr/lib/xsconsole/plugins-base/*.pyo
-%exclude /usr/share/openvswitch/scripts/*.pyc
-%exclude /usr/share/openvswitch/scripts/*.pyo
+%exclude /usr/lib/xsconsole/plugins-base/*.py[co]
+%exclude /usr/share/openvswitch/scripts/*.py[co]
+%exclude /usr/share/openvswitch/python/*.py[co]
+%exclude /usr/share/openvswitch/python/ovs/*.py[co]
+%exclude /usr/share/openvswitch/python/ovs/db/*.py[co]
--- /dev/null
+r"""UUID objects (universally unique identifiers) according to RFC 4122.
+
+This module provides immutable UUID objects (class UUID) and the functions
+uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
+UUIDs as specified in RFC 4122.
+
+If all you want is a unique ID, you should probably call uuid1() or uuid4().
+Note that uuid1() may compromise privacy since it creates a UUID containing
+the computer's network address. uuid4() creates a random UUID.
+
+Typical usage:
+
+ >>> import uuid
+
+ # make a UUID based on the host ID and current time
+ >>> uuid.uuid1()
+ UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
+
+ # make a UUID using an MD5 hash of a namespace UUID and a name
+ >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
+ UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
+
+ # make a random UUID
+ >>> uuid.uuid4()
+ UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
+
+ # make a UUID using a SHA-1 hash of a namespace UUID and a name
+ >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
+ UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
+
+ # make a UUID from a string of hex digits (braces and hyphens ignored)
+ >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
+
+ # convert a UUID to a string of hex digits in standard form
+ >>> str(x)
+ '00010203-0405-0607-0809-0a0b0c0d0e0f'
+
+ # get the raw 16 bytes of the UUID
+ >>> x.bytes
+ '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
+
+ # make a UUID from a 16-byte string
+ >>> uuid.UUID(bytes=x.bytes)
+ UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
+"""
+
+__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
+
+RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
+ 'reserved for NCS compatibility', 'specified in RFC 4122',
+ 'reserved for Microsoft compatibility', 'reserved for future definition']
+
+class UUID(object):
+ """Instances of the UUID class represent UUIDs as specified in RFC 4122.
+ UUID objects are immutable, hashable, and usable as dictionary keys.
+ Converting a UUID to a string with str() yields something in the form
+ '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
+ five possible forms: a similar string of hexadecimal digits, or a tuple
+ of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
+ 48-bit values respectively) as an argument named 'fields', or a string
+ of 16 bytes (with all the integer fields in big-endian order) as an
+ argument named 'bytes', or a string of 16 bytes (with the first three
+ fields in little-endian order) as an argument named 'bytes_le', or a
+ single 128-bit integer as an argument named 'int'.
+
+ UUIDs have these read-only attributes:
+
+ bytes the UUID as a 16-byte string (containing the six
+ integer fields in big-endian byte order)
+
+ bytes_le the UUID as a 16-byte string (with time_low, time_mid,
+ and time_hi_version in little-endian byte order)
+
+ fields a tuple of the six integer fields of the UUID,
+ which are also available as six individual attributes
+ and two derived attributes:
+
+ time_low the first 32 bits of the UUID
+ time_mid the next 16 bits of the UUID
+ time_hi_version the next 16 bits of the UUID
+ clock_seq_hi_variant the next 8 bits of the UUID
+ clock_seq_low the next 8 bits of the UUID
+ node the last 48 bits of the UUID
+
+ time the 60-bit timestamp
+ clock_seq the 14-bit sequence number
+
+ hex the UUID as a 32-character hexadecimal string
+
+ int the UUID as a 128-bit integer
+
+ urn the UUID as a URN as specified in RFC 4122
+
+ variant the UUID variant (one of the constants RESERVED_NCS,
+ RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
+
+ version the UUID version number (1 through 5, meaningful only
+ when the variant is RFC_4122)
+ """
+
+ def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
+ int=None, version=None):
+ r"""Create a UUID from either a string of 32 hexadecimal digits,
+ a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
+ in little-endian order as the 'bytes_le' argument, a tuple of six
+ integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
+ 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
+ the 'fields' argument, or a single 128-bit integer as the 'int'
+ argument. When a string of hex digits is given, curly braces,
+ hyphens, and a URN prefix are all optional. For example, these
+ expressions all yield the same UUID:
+
+ UUID('{12345678-1234-5678-1234-567812345678}')
+ UUID('12345678123456781234567812345678')
+ UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
+ UUID(bytes='\x12\x34\x56\x78'*4)
+ UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
+ '\x12\x34\x56\x78\x12\x34\x56\x78')
+ UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
+ UUID(int=0x12345678123456781234567812345678)
+
+ Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
+ be given. The 'version' argument is optional; if given, the resulting
+ UUID will have its variant and version set according to RFC 4122,
+ overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
+ """
+
+ if [hex, bytes, bytes_le, fields, int].count(None) != 4:
+ raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
+ if hex is not None:
+ hex = hex.replace('urn:', '').replace('uuid:', '')
+ hex = hex.strip('{}').replace('-', '')
+ if len(hex) != 32:
+ raise ValueError('badly formed hexadecimal UUID string')
+ int = long(hex, 16)
+ if bytes_le is not None:
+ if len(bytes_le) != 16:
+ raise ValueError('bytes_le is not a 16-char string')
+ bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
+ bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
+ bytes_le[8:])
+ if bytes is not None:
+ if len(bytes) != 16:
+ raise ValueError('bytes is not a 16-char string')
+ int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
+ if fields is not None:
+ if len(fields) != 6:
+ raise ValueError('fields is not a 6-tuple')
+ (time_low, time_mid, time_hi_version,
+ clock_seq_hi_variant, clock_seq_low, node) = fields
+ if not 0 <= time_low < 1<<32L:
+ raise ValueError('field 1 out of range (need a 32-bit value)')
+ if not 0 <= time_mid < 1<<16L:
+ raise ValueError('field 2 out of range (need a 16-bit value)')
+ if not 0 <= time_hi_version < 1<<16L:
+ raise ValueError('field 3 out of range (need a 16-bit value)')
+ if not 0 <= clock_seq_hi_variant < 1<<8L:
+ raise ValueError('field 4 out of range (need an 8-bit value)')
+ if not 0 <= clock_seq_low < 1<<8L:
+ raise ValueError('field 5 out of range (need an 8-bit value)')
+ if not 0 <= node < 1<<48L:
+ raise ValueError('field 6 out of range (need a 48-bit value)')
+ clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
+ int = ((time_low << 96L) | (time_mid << 80L) |
+ (time_hi_version << 64L) | (clock_seq << 48L) | node)
+ if int is not None:
+ if not 0 <= int < 1<<128L:
+ raise ValueError('int is out of range (need a 128-bit value)')
+ if version is not None:
+ if not 1 <= version <= 5:
+ raise ValueError('illegal version number')
+ # Set the variant to RFC 4122.
+ int &= ~(0xc000 << 48L)
+ int |= 0x8000 << 48L
+ # Set the version number.
+ int &= ~(0xf000 << 64L)
+ int |= version << 76L
+ self.__dict__['int'] = int
+
+ def __cmp__(self, other):
+ if isinstance(other, UUID):
+ return cmp(self.int, other.int)
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(self.int)
+
+ def __int__(self):
+ return self.int
+
+ def __repr__(self):
+ return 'UUID(%r)' % str(self)
+
+ def __setattr__(self, name, value):
+ raise TypeError('UUID objects are immutable')
+
+ def __str__(self):
+ hex = '%032x' % self.int
+ return '%s-%s-%s-%s-%s' % (
+ hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
+
+ def get_bytes(self):
+ bytes = ''
+ for shift in range(0, 128, 8):
+ bytes = chr((self.int >> shift) & 0xff) + bytes
+ return bytes
+
+ bytes = property(get_bytes)
+
+ def get_bytes_le(self):
+ bytes = self.bytes
+ return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
+ bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
+
+ bytes_le = property(get_bytes_le)
+
+ def get_fields(self):
+ return (self.time_low, self.time_mid, self.time_hi_version,
+ self.clock_seq_hi_variant, self.clock_seq_low, self.node)
+
+ fields = property(get_fields)
+
+ def get_time_low(self):
+ return self.int >> 96L
+
+ time_low = property(get_time_low)
+
+ def get_time_mid(self):
+ return (self.int >> 80L) & 0xffff
+
+ time_mid = property(get_time_mid)
+
+ def get_time_hi_version(self):
+ return (self.int >> 64L) & 0xffff
+
+ time_hi_version = property(get_time_hi_version)
+
+ def get_clock_seq_hi_variant(self):
+ return (self.int >> 56L) & 0xff
+
+ clock_seq_hi_variant = property(get_clock_seq_hi_variant)
+
+ def get_clock_seq_low(self):
+ return (self.int >> 48L) & 0xff
+
+ clock_seq_low = property(get_clock_seq_low)
+
+ def get_time(self):
+ return (((self.time_hi_version & 0x0fffL) << 48L) |
+ (self.time_mid << 32L) | self.time_low)
+
+ time = property(get_time)
+
+ def get_clock_seq(self):
+ return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
+ self.clock_seq_low)
+
+ clock_seq = property(get_clock_seq)
+
+ def get_node(self):
+ return self.int & 0xffffffffffff
+
+ node = property(get_node)
+
+ def get_hex(self):
+ return '%032x' % self.int
+
+ hex = property(get_hex)
+
+ def get_urn(self):
+ return 'urn:uuid:' + str(self)
+
+ urn = property(get_urn)
+
+ def get_variant(self):
+ if not self.int & (0x8000 << 48L):
+ return RESERVED_NCS
+ elif not self.int & (0x4000 << 48L):
+ return RFC_4122
+ elif not self.int & (0x2000 << 48L):
+ return RESERVED_MICROSOFT
+ else:
+ return RESERVED_FUTURE
+
+ variant = property(get_variant)
+
+ def get_version(self):
+ # The version bits are only meaningful for RFC 4122 UUIDs.
+ if self.variant == RFC_4122:
+ return int((self.int >> 76L) & 0xf)
+
+ version = property(get_version)
+
+def _find_mac(command, args, hw_identifiers, get_index):
+ import os
+ for dir in ['', '/sbin/', '/usr/sbin']:
+ executable = os.path.join(dir, command)
+ if not os.path.exists(executable):
+ continue
+
+ try:
+ # LC_ALL to get English output, 2>/dev/null to
+ # prevent output on stderr
+ cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
+ pipe = os.popen(cmd)
+ except IOError:
+ continue
+
+ for line in pipe:
+ words = line.lower().split()
+ for i in range(len(words)):
+ if words[i] in hw_identifiers:
+ return int(words[get_index(i)].replace(':', ''), 16)
+ return None
+
+def _ifconfig_getnode():
+ """Get the hardware address on Unix by running ifconfig."""
+
+ # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
+ for args in ('', '-a', '-av'):
+ mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
+ if mac:
+ return mac
+
+ import socket
+ ip_addr = socket.gethostbyname(socket.gethostname())
+
+ # Try getting the MAC addr from arp based on our IP address (Solaris).
+ mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
+ if mac:
+ return mac
+
+ # This might work on HP-UX.
+ mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
+ if mac:
+ return mac
+
+ return None
+
+def _ipconfig_getnode():
+ """Get the hardware address on Windows by running ipconfig.exe."""
+ import os, re
+ dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
+ try:
+ import ctypes
+ buffer = ctypes.create_string_buffer(300)
+ ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
+ dirs.insert(0, buffer.value.decode('mbcs'))
+ except:
+ pass
+ for dir in dirs:
+ try:
+ pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
+ except IOError:
+ continue
+ for line in pipe:
+ value = line.split(':')[-1].strip().lower()
+ if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
+ return int(value.replace('-', ''), 16)
+
+def _netbios_getnode():
+ """Get the hardware address on Windows using NetBIOS calls.
+ See http://support.microsoft.com/kb/118623 for details."""
+ import win32wnet, netbios
+ ncb = netbios.NCB()
+ ncb.Command = netbios.NCBENUM
+ ncb.Buffer = adapters = netbios.LANA_ENUM()
+ adapters._pack()
+ if win32wnet.Netbios(ncb) != 0:
+ return
+ adapters._unpack()
+ for i in range(adapters.length):
+ ncb.Reset()
+ ncb.Command = netbios.NCBRESET
+ ncb.Lana_num = ord(adapters.lana[i])
+ if win32wnet.Netbios(ncb) != 0:
+ continue
+ ncb.Reset()
+ ncb.Command = netbios.NCBASTAT
+ ncb.Lana_num = ord(adapters.lana[i])
+ ncb.Callname = '*'.ljust(16)
+ ncb.Buffer = status = netbios.ADAPTER_STATUS()
+ if win32wnet.Netbios(ncb) != 0:
+ continue
+ status._unpack()
+ bytes = map(ord, status.adapter_address)
+ return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
+ (bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
+
+# Thanks to Thomas Heller for ctypes and for his help with its use here.
+
+# If ctypes is available, use it to find system routines for UUID generation.
+_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
+try:
+ import ctypes, ctypes.util
+ _buffer = ctypes.create_string_buffer(16)
+
+ # The uuid_generate_* routines are provided by libuuid on at least
+ # Linux and FreeBSD, and provided by libc on Mac OS X.
+ for libname in ['uuid', 'c']:
+ try:
+ lib = ctypes.CDLL(ctypes.util.find_library(libname))
+ except:
+ continue
+ if hasattr(lib, 'uuid_generate_random'):
+ _uuid_generate_random = lib.uuid_generate_random
+ if hasattr(lib, 'uuid_generate_time'):
+ _uuid_generate_time = lib.uuid_generate_time
+
+ # On Windows prior to 2000, UuidCreate gives a UUID containing the
+ # hardware address. On Windows 2000 and later, UuidCreate makes a
+ # random UUID and UuidCreateSequential gives a UUID containing the
+ # hardware address. These routines are provided by the RPC runtime.
+ # NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
+ # 6 bytes returned by UuidCreateSequential are fixed, they don't appear
+ # to bear any relationship to the MAC address of any network device
+ # on the box.
+ try:
+ lib = ctypes.windll.rpcrt4
+ except:
+ lib = None
+ _UuidCreate = getattr(lib, 'UuidCreateSequential',
+ getattr(lib, 'UuidCreate', None))
+except:
+ pass
+
+def _unixdll_getnode():
+ """Get the hardware address on Unix using ctypes."""
+ _uuid_generate_time(_buffer)
+ return UUID(bytes=_buffer.raw).node
+
+def _windll_getnode():
+ """Get the hardware address on Windows using ctypes."""
+ if _UuidCreate(_buffer) == 0:
+ return UUID(bytes=_buffer.raw).node
+
+def _random_getnode():
+ """Get a random node ID, with eighth bit set as suggested by RFC 4122."""
+ import random
+ return random.randrange(0, 1<<48L) | 0x010000000000L
+
+_node = None
+
+def getnode():
+ """Get the hardware address as a 48-bit positive integer.
+
+ The first time this runs, it may launch a separate program, which could
+ be quite slow. If all attempts to obtain the hardware address fail, we
+ choose a random 48-bit number with its eighth bit set to 1 as recommended
+ in RFC 4122.
+ """
+
+ global _node
+ if _node is not None:
+ return _node
+
+ import sys
+ if sys.platform == 'win32':
+ getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
+ else:
+ getters = [_unixdll_getnode, _ifconfig_getnode]
+
+ for getter in getters + [_random_getnode]:
+ try:
+ _node = getter()
+ except:
+ continue
+ if _node is not None:
+ return _node
+
+_last_timestamp = None
+
+def uuid1(node=None, clock_seq=None):
+ """Generate a UUID from a host ID, sequence number, and the current time.
+ If 'node' is not given, getnode() is used to obtain the hardware
+ address. If 'clock_seq' is given, it is used as the sequence number;
+ otherwise a random 14-bit sequence number is chosen."""
+
+ # When the system provides a version-1 UUID generator, use it (but don't
+ # use UuidCreate here because its UUIDs don't conform to RFC 4122).
+ if _uuid_generate_time and node is clock_seq is None:
+ _uuid_generate_time(_buffer)
+ return UUID(bytes=_buffer.raw)
+
+ global _last_timestamp
+ import time
+ nanoseconds = int(time.time() * 1e9)
+ # 0x01b21dd213814000 is the number of 100-ns intervals between the
+ # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
+ timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
+ if timestamp <= _last_timestamp:
+ timestamp = _last_timestamp + 1
+ _last_timestamp = timestamp
+ if clock_seq is None:
+ import random
+ clock_seq = random.randrange(1<<14L) # instead of stable storage
+ time_low = timestamp & 0xffffffffL
+ time_mid = (timestamp >> 32L) & 0xffffL
+ time_hi_version = (timestamp >> 48L) & 0x0fffL
+ clock_seq_low = clock_seq & 0xffL
+ clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
+ if node is None:
+ node = getnode()
+ return UUID(fields=(time_low, time_mid, time_hi_version,
+ clock_seq_hi_variant, clock_seq_low, node), version=1)
+
+def uuid3(namespace, name):
+ """Generate a UUID from the MD5 hash of a namespace UUID and a name."""
+ import md5
+ hash = md5.md5(namespace.bytes + name).digest()
+ return UUID(bytes=hash[:16], version=3)
+
+def uuid4():
+ """Generate a random UUID."""
+
+ # When the system provides a version-4 UUID generator, use it.
+ if _uuid_generate_random:
+ _uuid_generate_random(_buffer)
+ return UUID(bytes=_buffer.raw)
+
+ # Otherwise, get randomness from urandom or the 'random' module.
+ try:
+ import os
+ return UUID(bytes=os.urandom(16), version=4)
+ except:
+ import random
+ bytes = [chr(random.randrange(256)) for i in range(16)]
+ return UUID(bytes=bytes, version=4)
+
+def uuid5(namespace, name):
+ """Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
+ import sha
+ hash = sha.sha(namespace.bytes + name).digest()
+ return UUID(bytes=hash[:16], version=5)
+
+# The following standard UUIDs are for use with uuid3() or uuid5().
+
+NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
+NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
+NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
+NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')