Files under the xenserver directory are licensed on a file-by-file
basis. Some files are under an uncertain license that may not be
DFSG-compliant or GPL-compatible. Refer to each file for details.
+
+The files under ovsdb/simplejson are covered by the following license:
+
+ Copyright (c) 2006 Bob Ippolito
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
AM_LDFLAGS = -export-dynamic
endif
+BUILT_SOURCES =
CLEANFILES =
DISTCLEANFILES =
EXTRA_DIST = INSTALL.bridge \
-e 's,[@]localstatedir[@],$(localstatedir),g' \
-e 's,[@]pkgdatadir[@],$(pkgdatadir),g' \
-e 's,[@]sysconfdir[@],$(sysconfdir),g' \
+ -e 's,[@]abs_top_srcdir[@],$(abs_top_srcdir),g' \
> $@.tmp
@if head -n 1 $@.tmp | grep -q '#!'; then \
echo chmod +x $@.tmp; \
VLOG_MODULE(openflowd)
VLOG_MODULE(ovsdb_client)
VLOG_MODULE(ovsdb_file)
+VLOG_MODULE(ovsdb_idl)
VLOG_MODULE(ovsdb_log)
VLOG_MODULE(ovsdb_jsonrpc_server)
VLOG_MODULE(ovsdb_server)
man_MANS += ovsdb/ovsdb-server.1
DISTCLEANFILES += ovsdb/ovsdb-server.1
EXTRA_DIST += ovsdb/ovsdb-server.1.in
+
+# ovsdb-idlc
+EXTRA_DIST += \
+ ovsdb/simplejson/__init__.py \
+ ovsdb/simplejson/_speedups.c \
+ ovsdb/simplejson/decoder.py \
+ ovsdb/simplejson/encoder.py \
+ ovsdb/simplejson/scanner.py \
+ ovsdb/simplejson/tests/__init__.py \
+ ovsdb/simplejson/tests/test_check_circular.py \
+ ovsdb/simplejson/tests/test_decode.py \
+ ovsdb/simplejson/tests/test_default.py \
+ ovsdb/simplejson/tests/test_dump.py \
+ ovsdb/simplejson/tests/test_encode_basestring_ascii.py \
+ ovsdb/simplejson/tests/test_fail.py \
+ ovsdb/simplejson/tests/test_float.py \
+ ovsdb/simplejson/tests/test_indent.py \
+ ovsdb/simplejson/tests/test_pass1.py \
+ ovsdb/simplejson/tests/test_pass2.py \
+ ovsdb/simplejson/tests/test_pass3.py \
+ ovsdb/simplejson/tests/test_recursion.py \
+ ovsdb/simplejson/tests/test_scanstring.py \
+ ovsdb/simplejson/tests/test_separators.py \
+ ovsdb/simplejson/tests/test_unicode.py \
+ ovsdb/simplejson/tool.py
+noinst_SCRIPTS += ovsdb/ovsdb-idlc
+EXTRA_DIST += \
+ ovsdb/ovsdb-idlc.in \
+ ovsdb/ovsdb-idlc.1
+DISTCLEANFILES += ovsdb/ovsdb-idlc
--- /dev/null
+.\" -*- nroff -*-
+.TH ovsdb\-idlc 1 "November 2009" "Open vSwitch" "Open vSwitch Manual"
+.ds PN ovsdb\-idlc
+.
+.SH NAME
+ovsdb\-idlc \- Open vSwitch IDL (Interface Definition Language) compiler
+.
+.SH SYNOPSIS
+\fBovsdb\-idlc \fBvalidate\fI schema\fR
+.br
+\fBovsdb\-idlc \fBovsdb\-schema\fI schema\fR
+.br
+\fBovsdb\-idlc \fBc\-idl\-header\fI schema\fR
+.br
+\fBovsdb\-idlc \fBc\-idl\-source\fI schema\fR
+.br
+\fBovsdb\-idlc --help\fR
+.br
+\fBovsdb\-idlc --version\fR
+.
+.SH DESCRIPTION
+The \fBovsdb\-idlc\fR program is a command-line tool for translating
+Open vSwitch database interface definition language (IDL) schemas into
+other formats. It is used while building Open vSwitch, not at
+installation or configuration time. Thus, it is not normally
+installed as part of Open vSwitch.
+.
+.PP
+The \fIschema\fR files used as \fBovsdb\-idlc\fR input have the same
+format as the OVSDB schemas, specified in the OVSDB specification,
+with a few additions:
+.
+.IP "\fB//\fR comments"
+Lines that begin with \fB//\fR (two forward slashes) are ignored and
+thus can be used for comments.
+.
+.IP "\fB""\fBkeyRefTable\fR"" member of <type>"
+A <type> whose \fBkey\fR is \fB"uuid"\fR may have an additional member
+named \fB"keyRefTable"\fR, whose value is a table name. This
+expresses the constraint that keys of the given <type> are UUIDs that
+reference rows in the named table. This allows the IDL to supply a
+structure pointer in place of a raw UUID in its marshalled version of
+the given type.
+.
+.IP "\fB""valueRefTable""\fR member of <type>"
+Analogous to \fB"keyRefTable"\fR in meaning and effect, except that it
+applies to the \fB"value"\fR member of the <type>.
+.SS "Commands"
+.
+.IP "\fBvalidate\fI schema\fR"
+Reads \fIschema\fR and checks its format, without producing any output.
+.
+.IP "\fBovsdb\-schema\fI schema\fR"
+Reads \fIschema\fR and prints it on standard output with the parts
+that are not part of the OVSDB schema specification stripped out.
+.
+.IP "\fBc\-idl\-header\fI schema\fR"
+Reads \fIschema\fR and prints on standard output a C header file that
+defines a structure for each table defined by the schema.
+.
+.IP "\fBc\-idl\-source\fI schema\fR"
+Reads \fIschema\fR and prints on standard output a C source file that
+implements C bindings for the database defined by the schema.
+.
+.SS "Options"
+.so lib/common.man
+.
+.SH "BUGS"
+\fBovsdb\-idlc\fR is more lenient about the format of OVSDB schemas
+than other OVSDB tools, so the \fBovsdb\-schema\fR command may output
+schemas that other programs refuse to read.
+.
+.SH "SEE ALSO"
+The OVSDB specification.
--- /dev/null
+#! @PYTHON@
+
+import getopt
+import re
+import sha
+import sys
+
+sys.path.insert(0, "@abs_top_srcdir@/ovsdb")
+import simplejson as json
+
+argv0 = sys.argv[0]
+
+class Error(Exception):
+ def __init__(self, msg):
+ Exception.__init__(self)
+ self.msg = msg
+
+def getMember(json, name, validTypes, description, default=None):
+ if name in json:
+ member = json[name]
+ if type(member) not in validTypes:
+ raise Error("%s: type mismatch for '%s' member"
+ % (description, name))
+ return member
+ return default
+
+def mustGetMember(json, name, expectedType, description):
+ member = getMember(json, name, expectedType, description)
+ if member == None:
+ raise Error("%s: missing '%s' member" % (description, name))
+ return member
+
+class DbSchema:
+ def __init__(self, name, comment, tables):
+ self.name = name
+ self.comment = comment
+ self.tables = tables
+
+ @staticmethod
+ def fromJson(json):
+ name = mustGetMember(json, 'name', [unicode], 'database')
+ comment = getMember(json, 'comment', [unicode], 'database')
+ tablesJson = mustGetMember(json, 'tables', [dict], 'database')
+ tables = {}
+ for name, json in tablesJson.iteritems():
+ tables[name] = TableSchema.fromJson(json, "%s table" % name)
+ return DbSchema(name, comment, tables)
+
+ def toJson(self):
+ d = {"name": self.name,
+ "tables": {}}
+ for name, table in self.tables.iteritems():
+ d["tables"][name] = table.toJson()
+ if self.comment != None:
+ d["comment"] = self.comment
+ return d
+
+class TableSchema:
+ def __init__(self, comment, columns):
+ self.comment = comment
+ self.columns = columns
+
+ @staticmethod
+ def fromJson(json, description):
+ comment = getMember(json, 'comment', [unicode], description)
+ columnsJson = mustGetMember(json, 'columns', [dict], description)
+ columns = {}
+ for name, json in columnsJson.iteritems():
+ columns[name] = ColumnSchema.fromJson(
+ json, "column %s in %s" % (name, description))
+ return TableSchema(comment, columns)
+
+ def toJson(self):
+ d = {"columns": {}}
+ for name, column in self.columns.iteritems():
+ d["columns"][name] = column.toJson()
+ if self.comment != None:
+ d["comment"] = self.comment
+ return d
+
+class ColumnSchema:
+ def __init__(self, comment, type, persistent):
+ self.comment = comment
+ self.type = type
+ self.persistent = persistent
+
+ @staticmethod
+ def fromJson(json, description):
+ comment = getMember(json, 'comment', [unicode], description)
+ type = Type.fromJson(mustGetMember(json, 'type', [dict, unicode],
+ description),
+ 'type of %s' % description)
+ ephemeral = getMember(json, 'ephemeral', [True,False], description)
+ persistent = ephemeral != True
+ return ColumnSchema(comment, type, persistent)
+
+ def toJson(self):
+ d = {"type": self.type.toJson()}
+ if self.persistent == False:
+ d["ephemeral"] = True
+ if self.comment != None:
+ d["comment"] = self.comment
+ return d
+
+class Type:
+ def __init__(self, key, keyRefTable=None, value=None, valueRefTable=None,
+ min=1, max=1):
+ self.key = key
+ self.keyRefTable = keyRefTable
+ self.value = value
+ self.valueRefTable = valueRefTable
+ self.min = min
+ self.max = max
+
+ @staticmethod
+ def fromJson(json, description):
+ if type(json) == unicode:
+ return Type(json)
+ else:
+ key = mustGetMember(json, 'key', [unicode], description)
+ keyRefTable = getMember(json, 'keyRefTable', [unicode], description)
+ value = getMember(json, 'value', [unicode], description)
+ valueRefTable = getMember(json, 'valueRefTable', [unicode], description)
+ min = getMember(json, 'min', [int], description, 1)
+ max = getMember(json, 'max', [int, unicode], description, 1)
+ return Type(key, keyRefTable, value, valueRefTable, min, max)
+
+ def toJson(self):
+ if self.value == None and self.min == 1 and self.max == 1:
+ return self.key
+ else:
+ d = {"key": self.key}
+ if self.value != None:
+ d["value"] = self.value
+ if self.min != 1:
+ d["min"] = self.min
+ if self.max != 1:
+ d["max"] = self.max
+ return d
+
+def parseSchema(filename):
+ file = open(filename, "r")
+ s = ""
+ for line in file:
+ if not line.startswith('//'):
+ s += line
+ return DbSchema.fromJson(json.loads(s))
+
+def cBaseType(prefix, type, refTable=None):
+ if type == 'uuid' and refTable:
+ return "struct %s%s *" % (prefix, refTable.lower())
+ else:
+ return {'integer': 'int64_t ',
+ 'real': 'double ',
+ 'uuid': 'struct uuid ',
+ 'boolean': 'bool ',
+ 'string': 'char *'}[type]
+
+def printCIDLHeader(schema):
+ prefix = 'ovsrec_'
+ print '''\
+/* Generated automatically -- do not modify! -*- buffer-read-only: t -*- */
+
+#ifndef %(prefix)sIDL_HEADER
+#define %(prefix)sIDL_HEADER 1
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include "uuid.h"''' % {'prefix': prefix.upper()}
+ for tableName, table in schema.tables.iteritems():
+ print
+ if table.comment != None:
+ print "/* %s table (%s). */" % (tableName, table.comment)
+ else:
+ print "/* %s table. */" % (tableName)
+ print "struct %s%s {" % (prefix, tableName.lower())
+ print "\t/* Columns automatically included in every table. */"
+ print "\tstruct uuid uuid_;"
+ print "\tstruct uuid version_;"
+ for columnName, column in table.columns.iteritems():
+ print "\n\t/* %s column. */" % columnName
+ type = column.type
+ if type.min == 1 and type.max == 1:
+ singleton = True
+ pointer = ''
+ else:
+ singleton = False
+ pointer = '*'
+ if type.value:
+ print "\tkey_%s%s%s;" % (cBaseType(prefix, type.key, type.keyRefTable), pointer, columnName)
+ print "\tvalue_%s%s%s;" % (cBaseType(prefix, type.value, type.valueRefTable), pointer, columnName)
+ else:
+ print "\t%s%s%s;" % (cBaseType(prefix, type.key, type.keyRefTable), pointer, columnName)
+ if not singleton:
+ print "\tsize_t n_%s;" % columnName
+ print "};"
+ print
+ print "#endif /* %(prefix)sIDL_HEADER */" % {'prefix': prefix.upper()}
+
+def ovsdb_escape(string):
+ def escape(match):
+ c = match.group(0)
+ if c == '\0':
+ raise Error("strings may not contain null bytes")
+ elif c == '\\':
+ return '\\\\'
+ elif c == '\n':
+ return '\\n'
+ elif c == '\r':
+ return '\\r'
+ elif c == '\t':
+ return '\\t'
+ elif c == '\b':
+ return '\\b'
+ elif c == '\a':
+ return '\\a'
+ else:
+ return '\\x%02x' % ord(c)
+ return re.sub(r'["\\\000-\037]', escape, string)
+
+def printOVSDBSchema(schema):
+ json.dump(schema.toJson(), sys.stdout, sort_keys=True, indent=2)
+
+def usage():
+ print """\
+%(argv0)s: ovsdb schema compiler
+usage: %(argv0)s [OPTIONS] ACTION SCHEMA
+where SCHEMA is the ovsdb schema to read (in JSON format).
+
+One of the following actions must specified:
+ validate validate schema without taking any other action
+ c-idl-header print C header file for IDL
+ c-idl-source print C source file for IDL implementation
+ ovsdb-schema print ovsdb parseable schema
+
+The following options are also available:
+ -h, --help display this help message
+ -V, --version display version information\
+""" % {'argv0': argv0}
+ sys.exit(0)
+
+if __name__ == "__main__":
+ try:
+ try:
+ options, args = getopt.gnu_getopt(sys.argv[1:], 'hV',
+ ['help',
+ 'version'])
+ except getopt.GetoptError, geo:
+ sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
+ sys.exit(1)
+
+ optKeys = [key for key, value in options]
+ if '-h' in optKeys or '--help' in optKeys:
+ usage()
+ elif '-V' in optKeys or '--version' in optKeys:
+ print "ovsdb-idlc (Open vSwitch) @VERSION@"
+ sys.exit(0)
+
+ if len(args) != 2:
+ sys.stderr.write("%s: exactly two non-option arguments are "
+ "required (use --help for help)\n" % argv0)
+ sys.exit(1)
+
+ action, inputFile = args
+ schema = parseSchema(inputFile)
+ if action == 'validate':
+ pass
+ elif action == 'ovsdb-schema':
+ printOVSDBSchema(schema)
+ elif action == 'c-idl-header':
+ printCIDLHeader(schema)
+ elif action == 'c-idl-source':
+ printCIDLSource(schema)
+ else:
+ sys.stderr.write(
+ "%s: unknown action '%s' (use --help for help)\n" %
+ (argv0, action))
+ sys.exit(1)
+ except Error, e:
+ sys.stderr.write("%s: %s\n" % (argv0, e.msg))
+ sys.exit(1)
+
+# Local variables:
+# mode: python
+# End:
--- /dev/null
+r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
+JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
+interchange format.
+
+:mod:`simplejson` exposes an API familiar to users of the standard library
+:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
+version of the :mod:`json` library contained in Python 2.6, but maintains
+compatibility with Python 2.4 and Python 2.5 and (currently) has
+significant performance advantages, even without using the optional C
+extension for speedups.
+
+Encoding basic Python object hierarchies::
+
+ >>> import simplejson as json
+ >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
+ '["foo", {"bar": ["baz", null, 1.0, 2]}]'
+ >>> print json.dumps("\"foo\bar")
+ "\"foo\bar"
+ >>> print json.dumps(u'\u1234')
+ "\u1234"
+ >>> print json.dumps('\\')
+ "\\"
+ >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
+ {"a": 0, "b": 0, "c": 0}
+ >>> from StringIO import StringIO
+ >>> io = StringIO()
+ >>> json.dump(['streaming API'], io)
+ >>> io.getvalue()
+ '["streaming API"]'
+
+Compact encoding::
+
+ >>> import simplejson as json
+ >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
+ '[1,2,3,{"4":5,"6":7}]'
+
+Pretty printing::
+
+ >>> import simplejson as json
+ >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
+ >>> print '\n'.join([l.rstrip() for l in s.splitlines()])
+ {
+ "4": 5,
+ "6": 7
+ }
+
+Decoding JSON::
+
+ >>> import simplejson as json
+ >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
+ >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
+ True
+ >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
+ True
+ >>> from StringIO import StringIO
+ >>> io = StringIO('["streaming API"]')
+ >>> json.load(io)[0] == 'streaming API'
+ True
+
+Specializing JSON object decoding::
+
+ >>> import simplejson as json
+ >>> def as_complex(dct):
+ ... if '__complex__' in dct:
+ ... return complex(dct['real'], dct['imag'])
+ ... return dct
+ ...
+ >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
+ ... object_hook=as_complex)
+ (1+2j)
+ >>> import decimal
+ >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
+ True
+
+Specializing JSON object encoding::
+
+ >>> import simplejson as json
+ >>> def encode_complex(obj):
+ ... if isinstance(obj, complex):
+ ... return [obj.real, obj.imag]
+ ... raise TypeError(repr(o) + " is not JSON serializable")
+ ...
+ >>> json.dumps(2 + 1j, default=encode_complex)
+ '[2.0, 1.0]'
+ >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
+ '[2.0, 1.0]'
+ >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
+ '[2.0, 1.0]'
+
+
+Using simplejson.tool from the shell to validate and pretty-print::
+
+ $ echo '{"json":"obj"}' | python -m simplejson.tool
+ {
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+ Expecting property name: line 1 column 2 (char 2)
+"""
+__version__ = '2.0.9'
+__all__ = [
+ 'dump', 'dumps', 'load', 'loads',
+ 'JSONDecoder', 'JSONEncoder',
+]
+
+__author__ = 'Bob Ippolito <bob@redivi.com>'
+
+from decoder import JSONDecoder
+from encoder import JSONEncoder
+
+_default_encoder = JSONEncoder(
+ skipkeys=False,
+ ensure_ascii=True,
+ check_circular=True,
+ allow_nan=True,
+ indent=None,
+ separators=None,
+ encoding='utf-8',
+ default=None,
+)
+
+def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
+ allow_nan=True, cls=None, indent=None, separators=None,
+ encoding='utf-8', default=None, **kw):
+ """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
+ ``.write()``-supporting file-like object).
+
+ If ``skipkeys`` is true then ``dict`` keys that are not basic types
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ will be skipped instead of raising a ``TypeError``.
+
+ If ``ensure_ascii`` is false, then the some chunks written to ``fp``
+ may be ``unicode`` instances, subject to normal Python ``str`` to
+ ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
+ understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
+ to cause an error.
+
+ If ``check_circular`` is false, then the circular reference check
+ for container types will be skipped and a circular reference will
+ result in an ``OverflowError`` (or worse).
+
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
+ in strict compliance of the JSON specification, instead of using the
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+ If ``indent`` is a non-negative integer, then JSON array elements and object
+ members will be pretty-printed with that indent level. An indent level
+ of 0 will only insert newlines. ``None`` is the most compact representation.
+
+ If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+ then it will be used instead of the default ``(', ', ': ')`` separators.
+ ``(',', ':')`` is the most compact JSON representation.
+
+ ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+ ``default(obj)`` is a function that should return a serializable version
+ of obj or raise TypeError. The default simply raises TypeError.
+
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+ ``.default()`` method to serialize additional types), specify it with
+ the ``cls`` kwarg.
+
+ """
+ # cached encoder
+ if (not skipkeys and ensure_ascii and
+ check_circular and allow_nan and
+ cls is None and indent is None and separators is None and
+ encoding == 'utf-8' and default is None and not kw):
+ iterable = _default_encoder.iterencode(obj)
+ else:
+ if cls is None:
+ cls = JSONEncoder
+ iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+ separators=separators, encoding=encoding,
+ default=default, **kw).iterencode(obj)
+ # could accelerate with writelines in some versions of Python, at
+ # a debuggability cost
+ for chunk in iterable:
+ fp.write(chunk)
+
+
+def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
+ allow_nan=True, cls=None, indent=None, separators=None,
+ encoding='utf-8', default=None, **kw):
+ """Serialize ``obj`` to a JSON formatted ``str``.
+
+ If ``skipkeys`` is false then ``dict`` keys that are not basic types
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ will be skipped instead of raising a ``TypeError``.
+
+ If ``ensure_ascii`` is false, then the return value will be a
+ ``unicode`` instance subject to normal Python ``str`` to ``unicode``
+ coercion rules instead of being escaped to an ASCII ``str``.
+
+ If ``check_circular`` is false, then the circular reference check
+ for container types will be skipped and a circular reference will
+ result in an ``OverflowError`` (or worse).
+
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
+ strict compliance of the JSON specification, instead of using the
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+ If ``indent`` is a non-negative integer, then JSON array elements and
+ object members will be pretty-printed with that indent level. An indent
+ level of 0 will only insert newlines. ``None`` is the most compact
+ representation.
+
+ If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+ then it will be used instead of the default ``(', ', ': ')`` separators.
+ ``(',', ':')`` is the most compact JSON representation.
+
+ ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+ ``default(obj)`` is a function that should return a serializable version
+ of obj or raise TypeError. The default simply raises TypeError.
+
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+ ``.default()`` method to serialize additional types), specify it with
+ the ``cls`` kwarg.
+
+ """
+ # cached encoder
+ if (not skipkeys and ensure_ascii and
+ check_circular and allow_nan and
+ cls is None and indent is None and separators is None and
+ encoding == 'utf-8' and default is None and not kw):
+ return _default_encoder.encode(obj)
+ if cls is None:
+ cls = JSONEncoder
+ return cls(
+ skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+ separators=separators, encoding=encoding, default=default,
+ **kw).encode(obj)
+
+
+_default_decoder = JSONDecoder(encoding=None, object_hook=None)
+
+
+def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, **kw):
+ """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
+ a JSON document) to a Python object.
+
+ If the contents of ``fp`` is encoded with an ASCII based encoding other
+ than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
+ be specified. Encodings that are not ASCII based (such as UCS-2) are
+ not allowed, and should be wrapped with
+ ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
+ object and passed to ``loads()``
+
+ ``object_hook`` is an optional function that will be called with the
+ result of any object literal decode (a ``dict``). The return value of
+ ``object_hook`` will be used instead of the ``dict``. This feature
+ can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+ kwarg.
+
+ """
+ return loads(fp.read(),
+ encoding=encoding, cls=cls, object_hook=object_hook,
+ parse_float=parse_float, parse_int=parse_int,
+ parse_constant=parse_constant, **kw)
+
+
+def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, **kw):
+ """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
+ document) to a Python object.
+
+ If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
+ other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
+ must be specified. Encodings that are not ASCII based (such as UCS-2)
+ are not allowed and should be decoded to ``unicode`` first.
+
+ ``object_hook`` is an optional function that will be called with the
+ result of any object literal decode (a ``dict``). The return value of
+ ``object_hook`` will be used instead of the ``dict``. This feature
+ can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+ ``parse_float``, if specified, will be called with the string
+ of every JSON float to be decoded. By default this is equivalent to
+ float(num_str). This can be used to use another datatype or parser
+ for JSON floats (e.g. decimal.Decimal).
+
+ ``parse_int``, if specified, will be called with the string
+ of every JSON int to be decoded. By default this is equivalent to
+ int(num_str). This can be used to use another datatype or parser
+ for JSON integers (e.g. float).
+
+ ``parse_constant``, if specified, will be called with one of the
+ following strings: -Infinity, Infinity, NaN, null, true, false.
+ This can be used to raise an exception if invalid JSON numbers
+ are encountered.
+
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+ kwarg.
+
+ """
+ if (cls is None and encoding is None and object_hook is None and
+ parse_int is None and parse_float is None and
+ parse_constant is None and not kw):
+ return _default_decoder.decode(s)
+ if cls is None:
+ cls = JSONDecoder
+ if object_hook is not None:
+ kw['object_hook'] = object_hook
+ if parse_float is not None:
+ kw['parse_float'] = parse_float
+ if parse_int is not None:
+ kw['parse_int'] = parse_int
+ if parse_constant is not None:
+ kw['parse_constant'] = parse_constant
+ return cls(encoding=encoding, **kw).decode(s)
--- /dev/null
+#include "Python.h"
+#include "structmember.h"
+#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TYPE)
+#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+#endif
+#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
+typedef int Py_ssize_t;
+#define PY_SSIZE_T_MAX INT_MAX
+#define PY_SSIZE_T_MIN INT_MIN
+#define PyInt_FromSsize_t PyInt_FromLong
+#define PyInt_AsSsize_t PyInt_AsLong
+#endif
+#ifndef Py_IS_FINITE
+#define Py_IS_FINITE(X) (!Py_IS_INFINITY(X) && !Py_IS_NAN(X))
+#endif
+
+#ifdef __GNUC__
+#define UNUSED __attribute__((__unused__))
+#else
+#define UNUSED
+#endif
+
+#define DEFAULT_ENCODING "utf-8"
+
+#define PyScanner_Check(op) PyObject_TypeCheck(op, &PyScannerType)
+#define PyScanner_CheckExact(op) (Py_TYPE(op) == &PyScannerType)
+#define PyEncoder_Check(op) PyObject_TypeCheck(op, &PyEncoderType)
+#define PyEncoder_CheckExact(op) (Py_TYPE(op) == &PyEncoderType)
+
+static PyTypeObject PyScannerType;
+static PyTypeObject PyEncoderType;
+
+typedef struct _PyScannerObject {
+ PyObject_HEAD
+ PyObject *encoding;
+ PyObject *strict;
+ PyObject *object_hook;
+ PyObject *parse_float;
+ PyObject *parse_int;
+ PyObject *parse_constant;
+} PyScannerObject;
+
+static PyMemberDef scanner_members[] = {
+ {"encoding", T_OBJECT, offsetof(PyScannerObject, encoding), READONLY, "encoding"},
+ {"strict", T_OBJECT, offsetof(PyScannerObject, strict), READONLY, "strict"},
+ {"object_hook", T_OBJECT, offsetof(PyScannerObject, object_hook), READONLY, "object_hook"},
+ {"parse_float", T_OBJECT, offsetof(PyScannerObject, parse_float), READONLY, "parse_float"},
+ {"parse_int", T_OBJECT, offsetof(PyScannerObject, parse_int), READONLY, "parse_int"},
+ {"parse_constant", T_OBJECT, offsetof(PyScannerObject, parse_constant), READONLY, "parse_constant"},
+ {NULL}
+};
+
+typedef struct _PyEncoderObject {
+ PyObject_HEAD
+ PyObject *markers;
+ PyObject *defaultfn;
+ PyObject *encoder;
+ PyObject *indent;
+ PyObject *key_separator;
+ PyObject *item_separator;
+ PyObject *sort_keys;
+ PyObject *skipkeys;
+ int fast_encode;
+ int allow_nan;
+} PyEncoderObject;
+
+static PyMemberDef encoder_members[] = {
+ {"markers", T_OBJECT, offsetof(PyEncoderObject, markers), READONLY, "markers"},
+ {"default", T_OBJECT, offsetof(PyEncoderObject, defaultfn), READONLY, "default"},
+ {"encoder", T_OBJECT, offsetof(PyEncoderObject, encoder), READONLY, "encoder"},
+ {"indent", T_OBJECT, offsetof(PyEncoderObject, indent), READONLY, "indent"},
+ {"key_separator", T_OBJECT, offsetof(PyEncoderObject, key_separator), READONLY, "key_separator"},
+ {"item_separator", T_OBJECT, offsetof(PyEncoderObject, item_separator), READONLY, "item_separator"},
+ {"sort_keys", T_OBJECT, offsetof(PyEncoderObject, sort_keys), READONLY, "sort_keys"},
+ {"skipkeys", T_OBJECT, offsetof(PyEncoderObject, skipkeys), READONLY, "skipkeys"},
+ {NULL}
+};
+
+static Py_ssize_t
+ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars);
+static PyObject *
+ascii_escape_unicode(PyObject *pystr);
+static PyObject *
+ascii_escape_str(PyObject *pystr);
+static PyObject *
+py_encode_basestring_ascii(PyObject* self UNUSED, PyObject *pystr);
+void init_speedups(void);
+static PyObject *
+scan_once_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr);
+static PyObject *
+scan_once_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr);
+static PyObject *
+_build_rval_index_tuple(PyObject *rval, Py_ssize_t idx);
+static PyObject *
+scanner_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
+static int
+scanner_init(PyObject *self, PyObject *args, PyObject *kwds);
+static void
+scanner_dealloc(PyObject *self);
+static int
+scanner_clear(PyObject *self);
+static PyObject *
+encoder_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
+static int
+encoder_init(PyObject *self, PyObject *args, PyObject *kwds);
+static void
+encoder_dealloc(PyObject *self);
+static int
+encoder_clear(PyObject *self);
+static int
+encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ssize_t indent_level);
+static int
+encoder_listencode_obj(PyEncoderObject *s, PyObject *rval, PyObject *obj, Py_ssize_t indent_level);
+static int
+encoder_listencode_dict(PyEncoderObject *s, PyObject *rval, PyObject *dct, Py_ssize_t indent_level);
+static PyObject *
+_encoded_const(PyObject *const);
+static void
+raise_errmsg(char *msg, PyObject *s, Py_ssize_t end);
+static PyObject *
+encoder_encode_string(PyEncoderObject *s, PyObject *obj);
+static int
+_convertPyInt_AsSsize_t(PyObject *o, Py_ssize_t *size_ptr);
+static PyObject *
+_convertPyInt_FromSsize_t(Py_ssize_t *size_ptr);
+static PyObject *
+encoder_encode_float(PyEncoderObject *s, PyObject *obj);
+
+#define S_CHAR(c) (c >= ' ' && c <= '~' && c != '\\' && c != '"')
+#define IS_WHITESPACE(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || ((c) == '\r'))
+
+#define MIN_EXPANSION 6
+#ifdef Py_UNICODE_WIDE
+#define MAX_EXPANSION (2 * MIN_EXPANSION)
+#else
+#define MAX_EXPANSION MIN_EXPANSION
+#endif
+
+static int
+_convertPyInt_AsSsize_t(PyObject *o, Py_ssize_t *size_ptr)
+{
+ /* PyObject to Py_ssize_t converter */
+ *size_ptr = PyInt_AsSsize_t(o);
+ if (*size_ptr == -1 && PyErr_Occurred());
+ return 1;
+ return 0;
+}
+
+static PyObject *
+_convertPyInt_FromSsize_t(Py_ssize_t *size_ptr)
+{
+ /* Py_ssize_t to PyObject converter */
+ return PyInt_FromSsize_t(*size_ptr);
+}
+
+static Py_ssize_t
+ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars)
+{
+ /* Escape unicode code point c to ASCII escape sequences
+ in char *output. output must have at least 12 bytes unused to
+ accommodate an escaped surrogate pair "\uXXXX\uXXXX" */
+ output[chars++] = '\\';
+ switch (c) {
+ case '\\': output[chars++] = (char)c; break;
+ case '"': output[chars++] = (char)c; break;
+ case '\b': output[chars++] = 'b'; break;
+ case '\f': output[chars++] = 'f'; break;
+ case '\n': output[chars++] = 'n'; break;
+ case '\r': output[chars++] = 'r'; break;
+ case '\t': output[chars++] = 't'; break;
+ default:
+#ifdef Py_UNICODE_WIDE
+ if (c >= 0x10000) {
+ /* UTF-16 surrogate pair */
+ Py_UNICODE v = c - 0x10000;
+ c = 0xd800 | ((v >> 10) & 0x3ff);
+ output[chars++] = 'u';
+ output[chars++] = "0123456789abcdef"[(c >> 12) & 0xf];
+ output[chars++] = "0123456789abcdef"[(c >> 8) & 0xf];
+ output[chars++] = "0123456789abcdef"[(c >> 4) & 0xf];
+ output[chars++] = "0123456789abcdef"[(c ) & 0xf];
+ c = 0xdc00 | (v & 0x3ff);
+ output[chars++] = '\\';
+ }
+#endif
+ output[chars++] = 'u';
+ output[chars++] = "0123456789abcdef"[(c >> 12) & 0xf];
+ output[chars++] = "0123456789abcdef"[(c >> 8) & 0xf];
+ output[chars++] = "0123456789abcdef"[(c >> 4) & 0xf];
+ output[chars++] = "0123456789abcdef"[(c ) & 0xf];
+ }
+ return chars;
+}
+
+static PyObject *
+ascii_escape_unicode(PyObject *pystr)
+{
+ /* Take a PyUnicode pystr and return a new ASCII-only escaped PyString */
+ Py_ssize_t i;
+ Py_ssize_t input_chars;
+ Py_ssize_t output_size;
+ Py_ssize_t max_output_size;
+ Py_ssize_t chars;
+ PyObject *rval;
+ char *output;
+ Py_UNICODE *input_unicode;
+
+ input_chars = PyUnicode_GET_SIZE(pystr);
+ input_unicode = PyUnicode_AS_UNICODE(pystr);
+
+ /* One char input can be up to 6 chars output, estimate 4 of these */
+ output_size = 2 + (MIN_EXPANSION * 4) + input_chars;
+ max_output_size = 2 + (input_chars * MAX_EXPANSION);
+ rval = PyString_FromStringAndSize(NULL, output_size);
+ if (rval == NULL) {
+ return NULL;
+ }
+ output = PyString_AS_STRING(rval);
+ chars = 0;
+ output[chars++] = '"';
+ for (i = 0; i < input_chars; i++) {
+ Py_UNICODE c = input_unicode[i];
+ if (S_CHAR(c)) {
+ output[chars++] = (char)c;
+ }
+ else {
+ chars = ascii_escape_char(c, output, chars);
+ }
+ if (output_size - chars < (1 + MAX_EXPANSION)) {
+ /* There's more than four, so let's resize by a lot */
+ Py_ssize_t new_output_size = output_size * 2;
+ /* This is an upper bound */
+ if (new_output_size > max_output_size) {
+ new_output_size = max_output_size;
+ }
+ /* Make sure that the output size changed before resizing */
+ if (new_output_size != output_size) {
+ output_size = new_output_size;
+ if (_PyString_Resize(&rval, output_size) == -1) {
+ return NULL;
+ }
+ output = PyString_AS_STRING(rval);
+ }
+ }
+ }
+ output[chars++] = '"';
+ if (_PyString_Resize(&rval, chars) == -1) {
+ return NULL;
+ }
+ return rval;
+}
+
+static PyObject *
+ascii_escape_str(PyObject *pystr)
+{
+ /* Take a PyString pystr and return a new ASCII-only escaped PyString */
+ Py_ssize_t i;
+ Py_ssize_t input_chars;
+ Py_ssize_t output_size;
+ Py_ssize_t chars;
+ PyObject *rval;
+ char *output;
+ char *input_str;
+
+ input_chars = PyString_GET_SIZE(pystr);
+ input_str = PyString_AS_STRING(pystr);
+
+ /* Fast path for a string that's already ASCII */
+ for (i = 0; i < input_chars; i++) {
+ Py_UNICODE c = (Py_UNICODE)(unsigned char)input_str[i];
+ if (!S_CHAR(c)) {
+ /* If we have to escape something, scan the string for unicode */
+ Py_ssize_t j;
+ for (j = i; j < input_chars; j++) {
+ c = (Py_UNICODE)(unsigned char)input_str[j];
+ if (c > 0x7f) {
+ /* We hit a non-ASCII character, bail to unicode mode */
+ PyObject *uni;
+ uni = PyUnicode_DecodeUTF8(input_str, input_chars, "strict");
+ if (uni == NULL) {
+ return NULL;
+ }
+ rval = ascii_escape_unicode(uni);
+ Py_DECREF(uni);
+ return rval;
+ }
+ }
+ break;
+ }
+ }
+
+ if (i == input_chars) {
+ /* Input is already ASCII */
+ output_size = 2 + input_chars;
+ }
+ else {
+ /* One char input can be up to 6 chars output, estimate 4 of these */
+ output_size = 2 + (MIN_EXPANSION * 4) + input_chars;
+ }
+ rval = PyString_FromStringAndSize(NULL, output_size);
+ if (rval == NULL) {
+ return NULL;
+ }
+ output = PyString_AS_STRING(rval);
+ output[0] = '"';
+
+ /* We know that everything up to i is ASCII already */
+ chars = i + 1;
+ memcpy(&output[1], input_str, i);
+
+ for (; i < input_chars; i++) {
+ Py_UNICODE c = (Py_UNICODE)(unsigned char)input_str[i];
+ if (S_CHAR(c)) {
+ output[chars++] = (char)c;
+ }
+ else {
+ chars = ascii_escape_char(c, output, chars);
+ }
+ /* An ASCII char can't possibly expand to a surrogate! */
+ if (output_size - chars < (1 + MIN_EXPANSION)) {
+ /* There's more than four, so let's resize by a lot */
+ output_size *= 2;
+ if (output_size > 2 + (input_chars * MIN_EXPANSION)) {
+ output_size = 2 + (input_chars * MIN_EXPANSION);
+ }
+ if (_PyString_Resize(&rval, output_size) == -1) {
+ return NULL;
+ }
+ output = PyString_AS_STRING(rval);
+ }
+ }
+ output[chars++] = '"';
+ if (_PyString_Resize(&rval, chars) == -1) {
+ return NULL;
+ }
+ return rval;
+}
+
+static void
+raise_errmsg(char *msg, PyObject *s, Py_ssize_t end)
+{
+ /* Use the Python function simplejson.decoder.errmsg to raise a nice
+ looking ValueError exception */
+ static PyObject *errmsg_fn = NULL;
+ PyObject *pymsg;
+ if (errmsg_fn == NULL) {
+ PyObject *decoder = PyImport_ImportModule("simplejson.decoder");
+ if (decoder == NULL)
+ return;
+ errmsg_fn = PyObject_GetAttrString(decoder, "errmsg");
+ Py_DECREF(decoder);
+ if (errmsg_fn == NULL)
+ return;
+ }
+ pymsg = PyObject_CallFunction(errmsg_fn, "(zOO&)", msg, s, _convertPyInt_FromSsize_t, &end);
+ if (pymsg) {
+ PyErr_SetObject(PyExc_ValueError, pymsg);
+ Py_DECREF(pymsg);
+ }
+}
+
+static PyObject *
+join_list_unicode(PyObject *lst)
+{
+ /* return u''.join(lst) */
+ static PyObject *joinfn = NULL;
+ if (joinfn == NULL) {
+ PyObject *ustr = PyUnicode_FromUnicode(NULL, 0);
+ if (ustr == NULL)
+ return NULL;
+
+ joinfn = PyObject_GetAttrString(ustr, "join");
+ Py_DECREF(ustr);
+ if (joinfn == NULL)
+ return NULL;
+ }
+ return PyObject_CallFunctionObjArgs(joinfn, lst, NULL);
+}
+
+static PyObject *
+join_list_string(PyObject *lst)
+{
+ /* return ''.join(lst) */
+ static PyObject *joinfn = NULL;
+ if (joinfn == NULL) {
+ PyObject *ustr = PyString_FromStringAndSize(NULL, 0);
+ if (ustr == NULL)
+ return NULL;
+
+ joinfn = PyObject_GetAttrString(ustr, "join");
+ Py_DECREF(ustr);
+ if (joinfn == NULL)
+ return NULL;
+ }
+ return PyObject_CallFunctionObjArgs(joinfn, lst, NULL);
+}
+
+static PyObject *
+_build_rval_index_tuple(PyObject *rval, Py_ssize_t idx) {
+ /* return (rval, idx) tuple, stealing reference to rval */
+ PyObject *tpl;
+ PyObject *pyidx;
+ /*
+ steal a reference to rval, returns (rval, idx)
+ */
+ if (rval == NULL) {
+ return NULL;
+ }
+ pyidx = PyInt_FromSsize_t(idx);
+ if (pyidx == NULL) {
+ Py_DECREF(rval);
+ return NULL;
+ }
+ tpl = PyTuple_New(2);
+ if (tpl == NULL) {
+ Py_DECREF(pyidx);
+ Py_DECREF(rval);
+ return NULL;
+ }
+ PyTuple_SET_ITEM(tpl, 0, rval);
+ PyTuple_SET_ITEM(tpl, 1, pyidx);
+ return tpl;
+}
+
+static PyObject *
+scanstring_str(PyObject *pystr, Py_ssize_t end, char *encoding, int strict, Py_ssize_t *next_end_ptr)
+{
+ /* Read the JSON string from PyString pystr.
+ end is the index of the first character after the quote.
+ encoding is the encoding of pystr (must be an ASCII superset)
+ if strict is zero then literal control characters are allowed
+ *next_end_ptr is a return-by-reference index of the character
+ after the end quote
+
+ Return value is a new PyString (if ASCII-only) or PyUnicode
+ */
+ PyObject *rval;
+ Py_ssize_t len = PyString_GET_SIZE(pystr);
+ Py_ssize_t begin = end - 1;
+ Py_ssize_t next = begin;
+ int has_unicode = 0;
+ char *buf = PyString_AS_STRING(pystr);
+ PyObject *chunks = PyList_New(0);
+ if (chunks == NULL) {
+ goto bail;
+ }
+ if (end < 0 || len <= end) {
+ PyErr_SetString(PyExc_ValueError, "end is out of bounds");
+ goto bail;
+ }
+ while (1) {
+ /* Find the end of the string or the next escape */
+ Py_UNICODE c = 0;
+ PyObject *chunk = NULL;
+ for (next = end; next < len; next++) {
+ c = (unsigned char)buf[next];
+ if (c == '"' || c == '\\') {
+ break;
+ }
+ else if (strict && c <= 0x1f) {
+ raise_errmsg("Invalid control character at", pystr, next);
+ goto bail;
+ }
+ else if (c > 0x7f) {
+ has_unicode = 1;
+ }
+ }
+ if (!(c == '"' || c == '\\')) {
+ raise_errmsg("Unterminated string starting at", pystr, begin);
+ goto bail;
+ }
+ /* Pick up this chunk if it's not zero length */
+ if (next != end) {
+ PyObject *strchunk = PyString_FromStringAndSize(&buf[end], next - end);
+ if (strchunk == NULL) {
+ goto bail;
+ }
+ if (has_unicode) {
+ chunk = PyUnicode_FromEncodedObject(strchunk, encoding, NULL);
+ Py_DECREF(strchunk);
+ if (chunk == NULL) {
+ goto bail;
+ }
+ }
+ else {
+ chunk = strchunk;
+ }
+ if (PyList_Append(chunks, chunk)) {
+ Py_DECREF(chunk);
+ goto bail;
+ }
+ Py_DECREF(chunk);
+ }
+ next++;
+ if (c == '"') {
+ end = next;
+ break;
+ }
+ if (next == len) {
+ raise_errmsg("Unterminated string starting at", pystr, begin);
+ goto bail;
+ }
+ c = buf[next];
+ if (c != 'u') {
+ /* Non-unicode backslash escapes */
+ end = next + 1;
+ switch (c) {
+ case '"': break;
+ case '\\': break;
+ case '/': break;
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ default: c = 0;
+ }
+ if (c == 0) {
+ raise_errmsg("Invalid \\escape", pystr, end - 2);
+ goto bail;
+ }
+ }
+ else {
+ c = 0;
+ next++;
+ end = next + 4;
+ if (end >= len) {
+ raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1);
+ goto bail;
+ }
+ /* Decode 4 hex digits */
+ for (; next < end; next++) {
+ Py_UNICODE digit = buf[next];
+ c <<= 4;
+ switch (digit) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ c |= (digit - '0'); break;
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ c |= (digit - 'a' + 10); break;
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ c |= (digit - 'A' + 10); break;
+ default:
+ raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
+ goto bail;
+ }
+ }
+#ifdef Py_UNICODE_WIDE
+ /* Surrogate pair */
+ if ((c & 0xfc00) == 0xd800) {
+ Py_UNICODE c2 = 0;
+ if (end + 6 >= len) {
+ raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+ goto bail;
+ }
+ if (buf[next++] != '\\' || buf[next++] != 'u') {
+ raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+ goto bail;
+ }
+ end += 6;
+ /* Decode 4 hex digits */
+ for (; next < end; next++) {
+ c2 <<= 4;
+ Py_UNICODE digit = buf[next];
+ switch (digit) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ c2 |= (digit - '0'); break;
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ c2 |= (digit - 'a' + 10); break;
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ c2 |= (digit - 'A' + 10); break;
+ default:
+ raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
+ goto bail;
+ }
+ }
+ if ((c2 & 0xfc00) != 0xdc00) {
+ raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+ goto bail;
+ }
+ c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
+ }
+ else if ((c & 0xfc00) == 0xdc00) {
+ raise_errmsg("Unpaired low surrogate", pystr, end - 5);
+ goto bail;
+ }
+#endif
+ }
+ if (c > 0x7f) {
+ has_unicode = 1;
+ }
+ if (has_unicode) {
+ chunk = PyUnicode_FromUnicode(&c, 1);
+ if (chunk == NULL) {
+ goto bail;
+ }
+ }
+ else {
+ char c_char = Py_CHARMASK(c);
+ chunk = PyString_FromStringAndSize(&c_char, 1);
+ if (chunk == NULL) {
+ goto bail;
+ }
+ }
+ if (PyList_Append(chunks, chunk)) {
+ Py_DECREF(chunk);
+ goto bail;
+ }
+ Py_DECREF(chunk);
+ }
+
+ rval = join_list_string(chunks);
+ if (rval == NULL) {
+ goto bail;
+ }
+ Py_CLEAR(chunks);
+ *next_end_ptr = end;
+ return rval;
+bail:
+ *next_end_ptr = -1;
+ Py_XDECREF(chunks);
+ return NULL;
+}
+
+
+static PyObject *
+scanstring_unicode(PyObject *pystr, Py_ssize_t end, int strict, Py_ssize_t *next_end_ptr)
+{
+ /* Read the JSON string from PyUnicode pystr.
+ end is the index of the first character after the quote.
+ if strict is zero then literal control characters are allowed
+ *next_end_ptr is a return-by-reference index of the character
+ after the end quote
+
+ Return value is a new PyUnicode
+ */
+ PyObject *rval;
+ Py_ssize_t len = PyUnicode_GET_SIZE(pystr);
+ Py_ssize_t begin = end - 1;
+ Py_ssize_t next = begin;
+ const Py_UNICODE *buf = PyUnicode_AS_UNICODE(pystr);
+ PyObject *chunks = PyList_New(0);
+ if (chunks == NULL) {
+ goto bail;
+ }
+ if (end < 0 || len <= end) {
+ PyErr_SetString(PyExc_ValueError, "end is out of bounds");
+ goto bail;
+ }
+ while (1) {
+ /* Find the end of the string or the next escape */
+ Py_UNICODE c = 0;
+ PyObject *chunk = NULL;
+ for (next = end; next < len; next++) {
+ c = buf[next];
+ if (c == '"' || c == '\\') {
+ break;
+ }
+ else if (strict && c <= 0x1f) {
+ raise_errmsg("Invalid control character at", pystr, next);
+ goto bail;
+ }
+ }
+ if (!(c == '"' || c == '\\')) {
+ raise_errmsg("Unterminated string starting at", pystr, begin);
+ goto bail;
+ }
+ /* Pick up this chunk if it's not zero length */
+ if (next != end) {
+ chunk = PyUnicode_FromUnicode(&buf[end], next - end);
+ if (chunk == NULL) {
+ goto bail;
+ }
+ if (PyList_Append(chunks, chunk)) {
+ Py_DECREF(chunk);
+ goto bail;
+ }
+ Py_DECREF(chunk);
+ }
+ next++;
+ if (c == '"') {
+ end = next;
+ break;
+ }
+ if (next == len) {
+ raise_errmsg("Unterminated string starting at", pystr, begin);
+ goto bail;
+ }
+ c = buf[next];
+ if (c != 'u') {
+ /* Non-unicode backslash escapes */
+ end = next + 1;
+ switch (c) {
+ case '"': break;
+ case '\\': break;
+ case '/': break;
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ default: c = 0;
+ }
+ if (c == 0) {
+ raise_errmsg("Invalid \\escape", pystr, end - 2);
+ goto bail;
+ }
+ }
+ else {
+ c = 0;
+ next++;
+ end = next + 4;
+ if (end >= len) {
+ raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1);
+ goto bail;
+ }
+ /* Decode 4 hex digits */
+ for (; next < end; next++) {
+ Py_UNICODE digit = buf[next];
+ c <<= 4;
+ switch (digit) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ c |= (digit - '0'); break;
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ c |= (digit - 'a' + 10); break;
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ c |= (digit - 'A' + 10); break;
+ default:
+ raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
+ goto bail;
+ }
+ }
+#ifdef Py_UNICODE_WIDE
+ /* Surrogate pair */
+ if ((c & 0xfc00) == 0xd800) {
+ Py_UNICODE c2 = 0;
+ if (end + 6 >= len) {
+ raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+ goto bail;
+ }
+ if (buf[next++] != '\\' || buf[next++] != 'u') {
+ raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+ goto bail;
+ }
+ end += 6;
+ /* Decode 4 hex digits */
+ for (; next < end; next++) {
+ c2 <<= 4;
+ Py_UNICODE digit = buf[next];
+ switch (digit) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ c2 |= (digit - '0'); break;
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ c2 |= (digit - 'a' + 10); break;
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ c2 |= (digit - 'A' + 10); break;
+ default:
+ raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
+ goto bail;
+ }
+ }
+ if ((c2 & 0xfc00) != 0xdc00) {
+ raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+ goto bail;
+ }
+ c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
+ }
+ else if ((c & 0xfc00) == 0xdc00) {
+ raise_errmsg("Unpaired low surrogate", pystr, end - 5);
+ goto bail;
+ }
+#endif
+ }
+ chunk = PyUnicode_FromUnicode(&c, 1);
+ if (chunk == NULL) {
+ goto bail;
+ }
+ if (PyList_Append(chunks, chunk)) {
+ Py_DECREF(chunk);
+ goto bail;
+ }
+ Py_DECREF(chunk);
+ }
+
+ rval = join_list_unicode(chunks);
+ if (rval == NULL) {
+ goto bail;
+ }
+ Py_DECREF(chunks);
+ *next_end_ptr = end;
+ return rval;
+bail:
+ *next_end_ptr = -1;
+ Py_XDECREF(chunks);
+ return NULL;
+}
+
+PyDoc_STRVAR(pydoc_scanstring,
+ "scanstring(basestring, end, encoding, strict=True) -> (str, end)\n"
+ "\n"
+ "Scan the string s for a JSON string. End is the index of the\n"
+ "character in s after the quote that started the JSON string.\n"
+ "Unescapes all valid JSON string escape sequences and raises ValueError\n"
+ "on attempt to decode an invalid string. If strict is False then literal\n"
+ "control characters are allowed in the string.\n"
+ "\n"
+ "Returns a tuple of the decoded string and the index of the character in s\n"
+ "after the end quote."
+);
+
+static PyObject *
+py_scanstring(PyObject* self UNUSED, PyObject *args)
+{
+ PyObject *pystr;
+ PyObject *rval;
+ Py_ssize_t end;
+ Py_ssize_t next_end = -1;
+ char *encoding = NULL;
+ int strict = 1;
+ if (!PyArg_ParseTuple(args, "OO&|zi:scanstring", &pystr, _convertPyInt_AsSsize_t, &end, &encoding, &strict)) {
+ return NULL;
+ }
+ if (encoding == NULL) {
+ encoding = DEFAULT_ENCODING;
+ }
+ if (PyString_Check(pystr)) {
+ rval = scanstring_str(pystr, end, encoding, strict, &next_end);
+ }
+ else if (PyUnicode_Check(pystr)) {
+ rval = scanstring_unicode(pystr, end, strict, &next_end);
+ }
+ else {
+ PyErr_Format(PyExc_TypeError,
+ "first argument must be a string, not %.80s",
+ Py_TYPE(pystr)->tp_name);
+ return NULL;
+ }
+ return _build_rval_index_tuple(rval, next_end);
+}
+
+PyDoc_STRVAR(pydoc_encode_basestring_ascii,
+ "encode_basestring_ascii(basestring) -> str\n"
+ "\n"
+ "Return an ASCII-only JSON representation of a Python string"
+);
+
+static PyObject *
+py_encode_basestring_ascii(PyObject* self UNUSED, PyObject *pystr)
+{
+ /* Return an ASCII-only JSON representation of a Python string */
+ /* METH_O */
+ if (PyString_Check(pystr)) {
+ return ascii_escape_str(pystr);
+ }
+ else if (PyUnicode_Check(pystr)) {
+ return ascii_escape_unicode(pystr);
+ }
+ else {
+ PyErr_Format(PyExc_TypeError,
+ "first argument must be a string, not %.80s",
+ Py_TYPE(pystr)->tp_name);
+ return NULL;
+ }
+}
+
+static void
+scanner_dealloc(PyObject *self)
+{
+ /* Deallocate scanner object */
+ scanner_clear(self);
+ Py_TYPE(self)->tp_free(self);
+}
+
+static int
+scanner_traverse(PyObject *self, visitproc visit, void *arg)
+{
+ PyScannerObject *s;
+ assert(PyScanner_Check(self));
+ s = (PyScannerObject *)self;
+ Py_VISIT(s->encoding);
+ Py_VISIT(s->strict);
+ Py_VISIT(s->object_hook);
+ Py_VISIT(s->parse_float);
+ Py_VISIT(s->parse_int);
+ Py_VISIT(s->parse_constant);
+ return 0;
+}
+
+static int
+scanner_clear(PyObject *self)
+{
+ PyScannerObject *s;
+ assert(PyScanner_Check(self));
+ s = (PyScannerObject *)self;
+ Py_CLEAR(s->encoding);
+ Py_CLEAR(s->strict);
+ Py_CLEAR(s->object_hook);
+ Py_CLEAR(s->parse_float);
+ Py_CLEAR(s->parse_int);
+ Py_CLEAR(s->parse_constant);
+ return 0;
+}
+
+static PyObject *
+_parse_object_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
+ /* Read a JSON object from PyString pystr.
+ idx is the index of the first character after the opening curly brace.
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the closing curly brace.
+
+ Returns a new PyObject (usually a dict, but object_hook can change that)
+ */
+ char *str = PyString_AS_STRING(pystr);
+ Py_ssize_t end_idx = PyString_GET_SIZE(pystr) - 1;
+ PyObject *rval = PyDict_New();
+ PyObject *key = NULL;
+ PyObject *val = NULL;
+ char *encoding = PyString_AS_STRING(s->encoding);
+ int strict = PyObject_IsTrue(s->strict);
+ Py_ssize_t next_idx;
+ if (rval == NULL)
+ return NULL;
+
+ /* skip whitespace after { */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* only loop if the object is non-empty */
+ if (idx <= end_idx && str[idx] != '}') {
+ while (idx <= end_idx) {
+ /* read key */
+ if (str[idx] != '"') {
+ raise_errmsg("Expecting property name", pystr, idx);
+ goto bail;
+ }
+ key = scanstring_str(pystr, idx + 1, encoding, strict, &next_idx);
+ if (key == NULL)
+ goto bail;
+ idx = next_idx;
+
+ /* skip whitespace between key and : delimiter, read :, skip whitespace */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+ if (idx > end_idx || str[idx] != ':') {
+ raise_errmsg("Expecting : delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* read any JSON data type */
+ val = scan_once_str(s, pystr, idx, &next_idx);
+ if (val == NULL)
+ goto bail;
+
+ if (PyDict_SetItem(rval, key, val) == -1)
+ goto bail;
+
+ Py_CLEAR(key);
+ Py_CLEAR(val);
+ idx = next_idx;
+
+ /* skip whitespace before } or , */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* bail if the object is closed or we didn't get the , delimiter */
+ if (idx > end_idx) break;
+ if (str[idx] == '}') {
+ break;
+ }
+ else if (str[idx] != ',') {
+ raise_errmsg("Expecting , delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+
+ /* skip whitespace after , delimiter */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+ }
+ }
+ /* verify that idx < end_idx, str[idx] should be '}' */
+ if (idx > end_idx || str[idx] != '}') {
+ raise_errmsg("Expecting object", pystr, end_idx);
+ goto bail;
+ }
+ /* if object_hook is not None: rval = object_hook(rval) */
+ if (s->object_hook != Py_None) {
+ val = PyObject_CallFunctionObjArgs(s->object_hook, rval, NULL);
+ if (val == NULL)
+ goto bail;
+ Py_DECREF(rval);
+ rval = val;
+ val = NULL;
+ }
+ *next_idx_ptr = idx + 1;
+ return rval;
+bail:
+ Py_XDECREF(key);
+ Py_XDECREF(val);
+ Py_DECREF(rval);
+ return NULL;
+}
+
+static PyObject *
+_parse_object_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
+ /* Read a JSON object from PyUnicode pystr.
+ idx is the index of the first character after the opening curly brace.
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the closing curly brace.
+
+ Returns a new PyObject (usually a dict, but object_hook can change that)
+ */
+ Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
+ Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
+ PyObject *val = NULL;
+ PyObject *rval = PyDict_New();
+ PyObject *key = NULL;
+ int strict = PyObject_IsTrue(s->strict);
+ Py_ssize_t next_idx;
+ if (rval == NULL)
+ return NULL;
+
+ /* skip whitespace after { */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* only loop if the object is non-empty */
+ if (idx <= end_idx && str[idx] != '}') {
+ while (idx <= end_idx) {
+ /* read key */
+ if (str[idx] != '"') {
+ raise_errmsg("Expecting property name", pystr, idx);
+ goto bail;
+ }
+ key = scanstring_unicode(pystr, idx + 1, strict, &next_idx);
+ if (key == NULL)
+ goto bail;
+ idx = next_idx;
+
+ /* skip whitespace between key and : delimiter, read :, skip whitespace */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+ if (idx > end_idx || str[idx] != ':') {
+ raise_errmsg("Expecting : delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* read any JSON term */
+ val = scan_once_unicode(s, pystr, idx, &next_idx);
+ if (val == NULL)
+ goto bail;
+
+ if (PyDict_SetItem(rval, key, val) == -1)
+ goto bail;
+
+ Py_CLEAR(key);
+ Py_CLEAR(val);
+ idx = next_idx;
+
+ /* skip whitespace before } or , */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* bail if the object is closed or we didn't get the , delimiter */
+ if (idx > end_idx) break;
+ if (str[idx] == '}') {
+ break;
+ }
+ else if (str[idx] != ',') {
+ raise_errmsg("Expecting , delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+
+ /* skip whitespace after , delimiter */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+ }
+ }
+
+ /* verify that idx < end_idx, str[idx] should be '}' */
+ if (idx > end_idx || str[idx] != '}') {
+ raise_errmsg("Expecting object", pystr, end_idx);
+ goto bail;
+ }
+
+ /* if object_hook is not None: rval = object_hook(rval) */
+ if (s->object_hook != Py_None) {
+ val = PyObject_CallFunctionObjArgs(s->object_hook, rval, NULL);
+ if (val == NULL)
+ goto bail;
+ Py_DECREF(rval);
+ rval = val;
+ val = NULL;
+ }
+ *next_idx_ptr = idx + 1;
+ return rval;
+bail:
+ Py_XDECREF(key);
+ Py_XDECREF(val);
+ Py_DECREF(rval);
+ return NULL;
+}
+
+static PyObject *
+_parse_array_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
+ /* Read a JSON array from PyString pystr.
+ idx is the index of the first character after the opening brace.
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the closing brace.
+
+ Returns a new PyList
+ */
+ char *str = PyString_AS_STRING(pystr);
+ Py_ssize_t end_idx = PyString_GET_SIZE(pystr) - 1;
+ PyObject *val = NULL;
+ PyObject *rval = PyList_New(0);
+ Py_ssize_t next_idx;
+ if (rval == NULL)
+ return NULL;
+
+ /* skip whitespace after [ */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* only loop if the array is non-empty */
+ if (idx <= end_idx && str[idx] != ']') {
+ while (idx <= end_idx) {
+
+ /* read any JSON term and de-tuplefy the (rval, idx) */
+ val = scan_once_str(s, pystr, idx, &next_idx);
+ if (val == NULL)
+ goto bail;
+
+ if (PyList_Append(rval, val) == -1)
+ goto bail;
+
+ Py_CLEAR(val);
+ idx = next_idx;
+
+ /* skip whitespace between term and , */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* bail if the array is closed or we didn't get the , delimiter */
+ if (idx > end_idx) break;
+ if (str[idx] == ']') {
+ break;
+ }
+ else if (str[idx] != ',') {
+ raise_errmsg("Expecting , delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+
+ /* skip whitespace after , */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+ }
+ }
+
+ /* verify that idx < end_idx, str[idx] should be ']' */
+ if (idx > end_idx || str[idx] != ']') {
+ raise_errmsg("Expecting object", pystr, end_idx);
+ goto bail;
+ }
+ *next_idx_ptr = idx + 1;
+ return rval;
+bail:
+ Py_XDECREF(val);
+ Py_DECREF(rval);
+ return NULL;
+}
+
+static PyObject *
+_parse_array_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
+ /* Read a JSON array from PyString pystr.
+ idx is the index of the first character after the opening brace.
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the closing brace.
+
+ Returns a new PyList
+ */
+ Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
+ Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
+ PyObject *val = NULL;
+ PyObject *rval = PyList_New(0);
+ Py_ssize_t next_idx;
+ if (rval == NULL)
+ return NULL;
+
+ /* skip whitespace after [ */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* only loop if the array is non-empty */
+ if (idx <= end_idx && str[idx] != ']') {
+ while (idx <= end_idx) {
+
+ /* read any JSON term */
+ val = scan_once_unicode(s, pystr, idx, &next_idx);
+ if (val == NULL)
+ goto bail;
+
+ if (PyList_Append(rval, val) == -1)
+ goto bail;
+
+ Py_CLEAR(val);
+ idx = next_idx;
+
+ /* skip whitespace between term and , */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+ /* bail if the array is closed or we didn't get the , delimiter */
+ if (idx > end_idx) break;
+ if (str[idx] == ']') {
+ break;
+ }
+ else if (str[idx] != ',') {
+ raise_errmsg("Expecting , delimiter", pystr, idx);
+ goto bail;
+ }
+ idx++;
+
+ /* skip whitespace after , */
+ while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+ }
+ }
+
+ /* verify that idx < end_idx, str[idx] should be ']' */
+ if (idx > end_idx || str[idx] != ']') {
+ raise_errmsg("Expecting object", pystr, end_idx);
+ goto bail;
+ }
+ *next_idx_ptr = idx + 1;
+ return rval;
+bail:
+ Py_XDECREF(val);
+ Py_DECREF(rval);
+ return NULL;
+}
+
+static PyObject *
+_parse_constant(PyScannerObject *s, char *constant, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
+ /* Read a JSON constant from PyString pystr.
+ constant is the constant string that was found
+ ("NaN", "Infinity", "-Infinity").
+ idx is the index of the first character of the constant
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the constant.
+
+ Returns the result of parse_constant
+ */
+ PyObject *cstr;
+ PyObject *rval;
+ /* constant is "NaN", "Infinity", or "-Infinity" */
+ cstr = PyString_InternFromString(constant);
+ if (cstr == NULL)
+ return NULL;
+
+ /* rval = parse_constant(constant) */
+ rval = PyObject_CallFunctionObjArgs(s->parse_constant, cstr, NULL);
+ idx += PyString_GET_SIZE(cstr);
+ Py_DECREF(cstr);
+ *next_idx_ptr = idx;
+ return rval;
+}
+
+static PyObject *
+_match_number_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t start, Py_ssize_t *next_idx_ptr) {
+ /* Read a JSON number from PyString pystr.
+ idx is the index of the first character of the number
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the number.
+
+ Returns a new PyObject representation of that number:
+ PyInt, PyLong, or PyFloat.
+ May return other types if parse_int or parse_float are set
+ */
+ char *str = PyString_AS_STRING(pystr);
+ Py_ssize_t end_idx = PyString_GET_SIZE(pystr) - 1;
+ Py_ssize_t idx = start;
+ int is_float = 0;
+ PyObject *rval;
+ PyObject *numstr;
+
+ /* read a sign if it's there, make sure it's not the end of the string */
+ if (str[idx] == '-') {
+ idx++;
+ if (idx > end_idx) {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+ }
+
+ /* read as many integer digits as we find as long as it doesn't start with 0 */
+ if (str[idx] >= '1' && str[idx] <= '9') {
+ idx++;
+ while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+ }
+ /* if it starts with 0 we only expect one integer digit */
+ else if (str[idx] == '0') {
+ idx++;
+ }
+ /* no integer digits, error */
+ else {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+
+ /* if the next char is '.' followed by a digit then read all float digits */
+ if (idx < end_idx && str[idx] == '.' && str[idx + 1] >= '0' && str[idx + 1] <= '9') {
+ is_float = 1;
+ idx += 2;
+ while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+ }
+
+ /* if the next char is 'e' or 'E' then maybe read the exponent (or backtrack) */
+ if (idx < end_idx && (str[idx] == 'e' || str[idx] == 'E')) {
+
+ /* save the index of the 'e' or 'E' just in case we need to backtrack */
+ Py_ssize_t e_start = idx;
+ idx++;
+
+ /* read an exponent sign if present */
+ if (idx < end_idx && (str[idx] == '-' || str[idx] == '+')) idx++;
+
+ /* read all digits */
+ while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+
+ /* if we got a digit, then parse as float. if not, backtrack */
+ if (str[idx - 1] >= '0' && str[idx - 1] <= '9') {
+ is_float = 1;
+ }
+ else {
+ idx = e_start;
+ }
+ }
+
+ /* copy the section we determined to be a number */
+ numstr = PyString_FromStringAndSize(&str[start], idx - start);
+ if (numstr == NULL)
+ return NULL;
+ if (is_float) {
+ /* parse as a float using a fast path if available, otherwise call user defined method */
+ if (s->parse_float != (PyObject *)&PyFloat_Type) {
+ rval = PyObject_CallFunctionObjArgs(s->parse_float, numstr, NULL);
+ }
+ else {
+ rval = PyFloat_FromDouble(PyOS_ascii_atof(PyString_AS_STRING(numstr)));
+ }
+ }
+ else {
+ /* parse as an int using a fast path if available, otherwise call user defined method */
+ if (s->parse_int != (PyObject *)&PyInt_Type) {
+ rval = PyObject_CallFunctionObjArgs(s->parse_int, numstr, NULL);
+ }
+ else {
+ rval = PyInt_FromString(PyString_AS_STRING(numstr), NULL, 10);
+ }
+ }
+ Py_DECREF(numstr);
+ *next_idx_ptr = idx;
+ return rval;
+}
+
+static PyObject *
+_match_number_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t start, Py_ssize_t *next_idx_ptr) {
+ /* Read a JSON number from PyUnicode pystr.
+ idx is the index of the first character of the number
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the number.
+
+ Returns a new PyObject representation of that number:
+ PyInt, PyLong, or PyFloat.
+ May return other types if parse_int or parse_float are set
+ */
+ Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
+ Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
+ Py_ssize_t idx = start;
+ int is_float = 0;
+ PyObject *rval;
+ PyObject *numstr;
+
+ /* read a sign if it's there, make sure it's not the end of the string */
+ if (str[idx] == '-') {
+ idx++;
+ if (idx > end_idx) {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+ }
+
+ /* read as many integer digits as we find as long as it doesn't start with 0 */
+ if (str[idx] >= '1' && str[idx] <= '9') {
+ idx++;
+ while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+ }
+ /* if it starts with 0 we only expect one integer digit */
+ else if (str[idx] == '0') {
+ idx++;
+ }
+ /* no integer digits, error */
+ else {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+
+ /* if the next char is '.' followed by a digit then read all float digits */
+ if (idx < end_idx && str[idx] == '.' && str[idx + 1] >= '0' && str[idx + 1] <= '9') {
+ is_float = 1;
+ idx += 2;
+ while (idx < end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+ }
+
+ /* if the next char is 'e' or 'E' then maybe read the exponent (or backtrack) */
+ if (idx < end_idx && (str[idx] == 'e' || str[idx] == 'E')) {
+ Py_ssize_t e_start = idx;
+ idx++;
+
+ /* read an exponent sign if present */
+ if (idx < end_idx && (str[idx] == '-' || str[idx] == '+')) idx++;
+
+ /* read all digits */
+ while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+
+ /* if we got a digit, then parse as float. if not, backtrack */
+ if (str[idx - 1] >= '0' && str[idx - 1] <= '9') {
+ is_float = 1;
+ }
+ else {
+ idx = e_start;
+ }
+ }
+
+ /* copy the section we determined to be a number */
+ numstr = PyUnicode_FromUnicode(&str[start], idx - start);
+ if (numstr == NULL)
+ return NULL;
+ if (is_float) {
+ /* parse as a float using a fast path if available, otherwise call user defined method */
+ if (s->parse_float != (PyObject *)&PyFloat_Type) {
+ rval = PyObject_CallFunctionObjArgs(s->parse_float, numstr, NULL);
+ }
+ else {
+ rval = PyFloat_FromString(numstr, NULL);
+ }
+ }
+ else {
+ /* no fast path for unicode -> int, just call */
+ rval = PyObject_CallFunctionObjArgs(s->parse_int, numstr, NULL);
+ }
+ Py_DECREF(numstr);
+ *next_idx_ptr = idx;
+ return rval;
+}
+
+static PyObject *
+scan_once_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr)
+{
+ /* Read one JSON term (of any kind) from PyString pystr.
+ idx is the index of the first character of the term
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the number.
+
+ Returns a new PyObject representation of the term.
+ */
+ char *str = PyString_AS_STRING(pystr);
+ Py_ssize_t length = PyString_GET_SIZE(pystr);
+ if (idx >= length) {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+ switch (str[idx]) {
+ case '"':
+ /* string */
+ return scanstring_str(pystr, idx + 1,
+ PyString_AS_STRING(s->encoding),
+ PyObject_IsTrue(s->strict),
+ next_idx_ptr);
+ case '{':
+ /* object */
+ return _parse_object_str(s, pystr, idx + 1, next_idx_ptr);
+ case '[':
+ /* array */
+ return _parse_array_str(s, pystr, idx + 1, next_idx_ptr);
+ case 'n':
+ /* null */
+ if ((idx + 3 < length) && str[idx + 1] == 'u' && str[idx + 2] == 'l' && str[idx + 3] == 'l') {
+ Py_INCREF(Py_None);
+ *next_idx_ptr = idx + 4;
+ return Py_None;
+ }
+ break;
+ case 't':
+ /* true */
+ if ((idx + 3 < length) && str[idx + 1] == 'r' && str[idx + 2] == 'u' && str[idx + 3] == 'e') {
+ Py_INCREF(Py_True);
+ *next_idx_ptr = idx + 4;
+ return Py_True;
+ }
+ break;
+ case 'f':
+ /* false */
+ if ((idx + 4 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'l' && str[idx + 3] == 's' && str[idx + 4] == 'e') {
+ Py_INCREF(Py_False);
+ *next_idx_ptr = idx + 5;
+ return Py_False;
+ }
+ break;
+ case 'N':
+ /* NaN */
+ if ((idx + 2 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'N') {
+ return _parse_constant(s, "NaN", idx, next_idx_ptr);
+ }
+ break;
+ case 'I':
+ /* Infinity */
+ if ((idx + 7 < length) && str[idx + 1] == 'n' && str[idx + 2] == 'f' && str[idx + 3] == 'i' && str[idx + 4] == 'n' && str[idx + 5] == 'i' && str[idx + 6] == 't' && str[idx + 7] == 'y') {
+ return _parse_constant(s, "Infinity", idx, next_idx_ptr);
+ }
+ break;
+ case '-':
+ /* -Infinity */
+ if ((idx + 8 < length) && str[idx + 1] == 'I' && str[idx + 2] == 'n' && str[idx + 3] == 'f' && str[idx + 4] == 'i' && str[idx + 5] == 'n' && str[idx + 6] == 'i' && str[idx + 7] == 't' && str[idx + 8] == 'y') {
+ return _parse_constant(s, "-Infinity", idx, next_idx_ptr);
+ }
+ break;
+ }
+ /* Didn't find a string, object, array, or named constant. Look for a number. */
+ return _match_number_str(s, pystr, idx, next_idx_ptr);
+}
+
+static PyObject *
+scan_once_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr)
+{
+ /* Read one JSON term (of any kind) from PyUnicode pystr.
+ idx is the index of the first character of the term
+ *next_idx_ptr is a return-by-reference index to the first character after
+ the number.
+
+ Returns a new PyObject representation of the term.
+ */
+ Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
+ Py_ssize_t length = PyUnicode_GET_SIZE(pystr);
+ if (idx >= length) {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+ switch (str[idx]) {
+ case '"':
+ /* string */
+ return scanstring_unicode(pystr, idx + 1,
+ PyObject_IsTrue(s->strict),
+ next_idx_ptr);
+ case '{':
+ /* object */
+ return _parse_object_unicode(s, pystr, idx + 1, next_idx_ptr);
+ case '[':
+ /* array */
+ return _parse_array_unicode(s, pystr, idx + 1, next_idx_ptr);
+ case 'n':
+ /* null */
+ if ((idx + 3 < length) && str[idx + 1] == 'u' && str[idx + 2] == 'l' && str[idx + 3] == 'l') {
+ Py_INCREF(Py_None);
+ *next_idx_ptr = idx + 4;
+ return Py_None;
+ }
+ break;
+ case 't':
+ /* true */
+ if ((idx + 3 < length) && str[idx + 1] == 'r' && str[idx + 2] == 'u' && str[idx + 3] == 'e') {
+ Py_INCREF(Py_True);
+ *next_idx_ptr = idx + 4;
+ return Py_True;
+ }
+ break;
+ case 'f':
+ /* false */
+ if ((idx + 4 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'l' && str[idx + 3] == 's' && str[idx + 4] == 'e') {
+ Py_INCREF(Py_False);
+ *next_idx_ptr = idx + 5;
+ return Py_False;
+ }
+ break;
+ case 'N':
+ /* NaN */
+ if ((idx + 2 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'N') {
+ return _parse_constant(s, "NaN", idx, next_idx_ptr);
+ }
+ break;
+ case 'I':
+ /* Infinity */
+ if ((idx + 7 < length) && str[idx + 1] == 'n' && str[idx + 2] == 'f' && str[idx + 3] == 'i' && str[idx + 4] == 'n' && str[idx + 5] == 'i' && str[idx + 6] == 't' && str[idx + 7] == 'y') {
+ return _parse_constant(s, "Infinity", idx, next_idx_ptr);
+ }
+ break;
+ case '-':
+ /* -Infinity */
+ if ((idx + 8 < length) && str[idx + 1] == 'I' && str[idx + 2] == 'n' && str[idx + 3] == 'f' && str[idx + 4] == 'i' && str[idx + 5] == 'n' && str[idx + 6] == 'i' && str[idx + 7] == 't' && str[idx + 8] == 'y') {
+ return _parse_constant(s, "-Infinity", idx, next_idx_ptr);
+ }
+ break;
+ }
+ /* Didn't find a string, object, array, or named constant. Look for a number. */
+ return _match_number_unicode(s, pystr, idx, next_idx_ptr);
+}
+
+static PyObject *
+scanner_call(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ /* Python callable interface to scan_once_{str,unicode} */
+ PyObject *pystr;
+ PyObject *rval;
+ Py_ssize_t idx;
+ Py_ssize_t next_idx = -1;
+ static char *kwlist[] = {"string", "idx", NULL};
+ PyScannerObject *s;
+ assert(PyScanner_Check(self));
+ s = (PyScannerObject *)self;
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&:scan_once", kwlist, &pystr, _convertPyInt_AsSsize_t, &idx))
+ return NULL;
+
+ if (PyString_Check(pystr)) {
+ rval = scan_once_str(s, pystr, idx, &next_idx);
+ }
+ else if (PyUnicode_Check(pystr)) {
+ rval = scan_once_unicode(s, pystr, idx, &next_idx);
+ }
+ else {
+ PyErr_Format(PyExc_TypeError,
+ "first argument must be a string, not %.80s",
+ Py_TYPE(pystr)->tp_name);
+ return NULL;
+ }
+ return _build_rval_index_tuple(rval, next_idx);
+}
+
+static PyObject *
+scanner_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyScannerObject *s;
+ s = (PyScannerObject *)type->tp_alloc(type, 0);
+ if (s != NULL) {
+ s->encoding = NULL;
+ s->strict = NULL;
+ s->object_hook = NULL;
+ s->parse_float = NULL;
+ s->parse_int = NULL;
+ s->parse_constant = NULL;
+ }
+ return (PyObject *)s;
+}
+
+static int
+scanner_init(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ /* Initialize Scanner object */
+ PyObject *ctx;
+ static char *kwlist[] = {"context", NULL};
+ PyScannerObject *s;
+
+ assert(PyScanner_Check(self));
+ s = (PyScannerObject *)self;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:make_scanner", kwlist, &ctx))
+ return -1;
+
+ /* PyString_AS_STRING is used on encoding */
+ s->encoding = PyObject_GetAttrString(ctx, "encoding");
+ if (s->encoding == Py_None) {
+ Py_DECREF(Py_None);
+ s->encoding = PyString_InternFromString(DEFAULT_ENCODING);
+ }
+ else if (PyUnicode_Check(s->encoding)) {
+ PyObject *tmp = PyUnicode_AsEncodedString(s->encoding, NULL, NULL);
+ Py_DECREF(s->encoding);
+ s->encoding = tmp;
+ }
+ if (s->encoding == NULL || !PyString_Check(s->encoding))
+ goto bail;
+
+ /* All of these will fail "gracefully" so we don't need to verify them */
+ s->strict = PyObject_GetAttrString(ctx, "strict");
+ if (s->strict == NULL)
+ goto bail;
+ s->object_hook = PyObject_GetAttrString(ctx, "object_hook");
+ if (s->object_hook == NULL)
+ goto bail;
+ s->parse_float = PyObject_GetAttrString(ctx, "parse_float");
+ if (s->parse_float == NULL)
+ goto bail;
+ s->parse_int = PyObject_GetAttrString(ctx, "parse_int");
+ if (s->parse_int == NULL)
+ goto bail;
+ s->parse_constant = PyObject_GetAttrString(ctx, "parse_constant");
+ if (s->parse_constant == NULL)
+ goto bail;
+
+ return 0;
+
+bail:
+ Py_CLEAR(s->encoding);
+ Py_CLEAR(s->strict);
+ Py_CLEAR(s->object_hook);
+ Py_CLEAR(s->parse_float);
+ Py_CLEAR(s->parse_int);
+ Py_CLEAR(s->parse_constant);
+ return -1;
+}
+
+PyDoc_STRVAR(scanner_doc, "JSON scanner object");
+
+static
+PyTypeObject PyScannerType = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /* tp_internal */
+ "simplejson._speedups.Scanner", /* tp_name */
+ sizeof(PyScannerObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ scanner_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ scanner_call, /* tp_call */
+ 0, /* tp_str */
+ 0,/* PyObject_GenericGetAttr, */ /* tp_getattro */
+ 0,/* PyObject_GenericSetAttr, */ /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ scanner_doc, /* tp_doc */
+ scanner_traverse, /* tp_traverse */
+ scanner_clear, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ scanner_members, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ scanner_init, /* tp_init */
+ 0,/* PyType_GenericAlloc, */ /* tp_alloc */
+ scanner_new, /* tp_new */
+ 0,/* PyObject_GC_Del, */ /* tp_free */
+};
+
+static PyObject *
+encoder_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyEncoderObject *s;
+ s = (PyEncoderObject *)type->tp_alloc(type, 0);
+ if (s != NULL) {
+ s->markers = NULL;
+ s->defaultfn = NULL;
+ s->encoder = NULL;
+ s->indent = NULL;
+ s->key_separator = NULL;
+ s->item_separator = NULL;
+ s->sort_keys = NULL;
+ s->skipkeys = NULL;
+ }
+ return (PyObject *)s;
+}
+
+static int
+encoder_init(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ /* initialize Encoder object */
+ static char *kwlist[] = {"markers", "default", "encoder", "indent", "key_separator", "item_separator", "sort_keys", "skipkeys", "allow_nan", NULL};
+
+ PyEncoderObject *s;
+ PyObject *allow_nan;
+
+ assert(PyEncoder_Check(self));
+ s = (PyEncoderObject *)self;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOOOOOOOO:make_encoder", kwlist,
+ &s->markers, &s->defaultfn, &s->encoder, &s->indent, &s->key_separator, &s->item_separator, &s->sort_keys, &s->skipkeys, &allow_nan))
+ return -1;
+
+ Py_INCREF(s->markers);
+ Py_INCREF(s->defaultfn);
+ Py_INCREF(s->encoder);
+ Py_INCREF(s->indent);
+ Py_INCREF(s->key_separator);
+ Py_INCREF(s->item_separator);
+ Py_INCREF(s->sort_keys);
+ Py_INCREF(s->skipkeys);
+ s->fast_encode = (PyCFunction_Check(s->encoder) && PyCFunction_GetFunction(s->encoder) == (PyCFunction)py_encode_basestring_ascii);
+ s->allow_nan = PyObject_IsTrue(allow_nan);
+ return 0;
+}
+
+static PyObject *
+encoder_call(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ /* Python callable interface to encode_listencode_obj */
+ static char *kwlist[] = {"obj", "_current_indent_level", NULL};
+ PyObject *obj;
+ PyObject *rval;
+ Py_ssize_t indent_level;
+ PyEncoderObject *s;
+ assert(PyEncoder_Check(self));
+ s = (PyEncoderObject *)self;
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&:_iterencode", kwlist,
+ &obj, _convertPyInt_AsSsize_t, &indent_level))
+ return NULL;
+ rval = PyList_New(0);
+ if (rval == NULL)
+ return NULL;
+ if (encoder_listencode_obj(s, rval, obj, indent_level)) {
+ Py_DECREF(rval);
+ return NULL;
+ }
+ return rval;
+}
+
+static PyObject *
+_encoded_const(PyObject *obj)
+{
+ /* Return the JSON string representation of None, True, False */
+ if (obj == Py_None) {
+ static PyObject *s_null = NULL;
+ if (s_null == NULL) {
+ s_null = PyString_InternFromString("null");
+ }
+ Py_INCREF(s_null);
+ return s_null;
+ }
+ else if (obj == Py_True) {
+ static PyObject *s_true = NULL;
+ if (s_true == NULL) {
+ s_true = PyString_InternFromString("true");
+ }
+ Py_INCREF(s_true);
+ return s_true;
+ }
+ else if (obj == Py_False) {
+ static PyObject *s_false = NULL;
+ if (s_false == NULL) {
+ s_false = PyString_InternFromString("false");
+ }
+ Py_INCREF(s_false);
+ return s_false;
+ }
+ else {
+ PyErr_SetString(PyExc_ValueError, "not a const");
+ return NULL;
+ }
+}
+
+static PyObject *
+encoder_encode_float(PyEncoderObject *s, PyObject *obj)
+{
+ /* Return the JSON representation of a PyFloat */
+ double i = PyFloat_AS_DOUBLE(obj);
+ if (!Py_IS_FINITE(i)) {
+ if (!s->allow_nan) {
+ PyErr_SetString(PyExc_ValueError, "Out of range float values are not JSON compliant");
+ return NULL;
+ }
+ if (i > 0) {
+ return PyString_FromString("Infinity");
+ }
+ else if (i < 0) {
+ return PyString_FromString("-Infinity");
+ }
+ else {
+ return PyString_FromString("NaN");
+ }
+ }
+ /* Use a better float format here? */
+ return PyObject_Repr(obj);
+}
+
+static PyObject *
+encoder_encode_string(PyEncoderObject *s, PyObject *obj)
+{
+ /* Return the JSON representation of a string */
+ if (s->fast_encode)
+ return py_encode_basestring_ascii(NULL, obj);
+ else
+ return PyObject_CallFunctionObjArgs(s->encoder, obj, NULL);
+}
+
+static int
+_steal_list_append(PyObject *lst, PyObject *stolen)
+{
+ /* Append stolen and then decrement its reference count */
+ int rval = PyList_Append(lst, stolen);
+ Py_DECREF(stolen);
+ return rval;
+}
+
+static int
+encoder_listencode_obj(PyEncoderObject *s, PyObject *rval, PyObject *obj, Py_ssize_t indent_level)
+{
+ /* Encode Python object obj to a JSON term, rval is a PyList */
+ PyObject *newobj;
+ int rv;
+
+ if (obj == Py_None || obj == Py_True || obj == Py_False) {
+ PyObject *cstr = _encoded_const(obj);
+ if (cstr == NULL)
+ return -1;
+ return _steal_list_append(rval, cstr);
+ }
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ {
+ PyObject *encoded = encoder_encode_string(s, obj);
+ if (encoded == NULL)
+ return -1;
+ return _steal_list_append(rval, encoded);
+ }
+ else if (PyInt_Check(obj) || PyLong_Check(obj)) {
+ PyObject *encoded = PyObject_Str(obj);
+ if (encoded == NULL)
+ return -1;
+ return _steal_list_append(rval, encoded);
+ }
+ else if (PyFloat_Check(obj)) {
+ PyObject *encoded = encoder_encode_float(s, obj);
+ if (encoded == NULL)
+ return -1;
+ return _steal_list_append(rval, encoded);
+ }
+ else if (PyList_Check(obj) || PyTuple_Check(obj)) {
+ return encoder_listencode_list(s, rval, obj, indent_level);
+ }
+ else if (PyDict_Check(obj)) {
+ return encoder_listencode_dict(s, rval, obj, indent_level);
+ }
+ else {
+ PyObject *ident = NULL;
+ if (s->markers != Py_None) {
+ int has_key;
+ ident = PyLong_FromVoidPtr(obj);
+ if (ident == NULL)
+ return -1;
+ has_key = PyDict_Contains(s->markers, ident);
+ if (has_key) {
+ if (has_key != -1)
+ PyErr_SetString(PyExc_ValueError, "Circular reference detected");
+ Py_DECREF(ident);
+ return -1;
+ }
+ if (PyDict_SetItem(s->markers, ident, obj)) {
+ Py_DECREF(ident);
+ return -1;
+ }
+ }
+ newobj = PyObject_CallFunctionObjArgs(s->defaultfn, obj, NULL);
+ if (newobj == NULL) {
+ Py_XDECREF(ident);
+ return -1;
+ }
+ rv = encoder_listencode_obj(s, rval, newobj, indent_level);
+ Py_DECREF(newobj);
+ if (rv) {
+ Py_XDECREF(ident);
+ return -1;
+ }
+ if (ident != NULL) {
+ if (PyDict_DelItem(s->markers, ident)) {
+ Py_XDECREF(ident);
+ return -1;
+ }
+ Py_XDECREF(ident);
+ }
+ return rv;
+ }
+}
+
+static int
+encoder_listencode_dict(PyEncoderObject *s, PyObject *rval, PyObject *dct, Py_ssize_t indent_level)
+{
+ /* Encode Python dict dct a JSON term, rval is a PyList */
+ static PyObject *open_dict = NULL;
+ static PyObject *close_dict = NULL;
+ static PyObject *empty_dict = NULL;
+ PyObject *kstr = NULL;
+ PyObject *ident = NULL;
+ PyObject *key, *value;
+ Py_ssize_t pos;
+ int skipkeys;
+ Py_ssize_t idx;
+
+ if (open_dict == NULL || close_dict == NULL || empty_dict == NULL) {
+ open_dict = PyString_InternFromString("{");
+ close_dict = PyString_InternFromString("}");
+ empty_dict = PyString_InternFromString("{}");
+ if (open_dict == NULL || close_dict == NULL || empty_dict == NULL)
+ return -1;
+ }
+ if (PyDict_Size(dct) == 0)
+ return PyList_Append(rval, empty_dict);
+
+ if (s->markers != Py_None) {
+ int has_key;
+ ident = PyLong_FromVoidPtr(dct);
+ if (ident == NULL)
+ goto bail;
+ has_key = PyDict_Contains(s->markers, ident);
+ if (has_key) {
+ if (has_key != -1)
+ PyErr_SetString(PyExc_ValueError, "Circular reference detected");
+ goto bail;
+ }
+ if (PyDict_SetItem(s->markers, ident, dct)) {
+ goto bail;
+ }
+ }
+
+ if (PyList_Append(rval, open_dict))
+ goto bail;
+
+ if (s->indent != Py_None) {
+ /* TODO: DOES NOT RUN */
+ indent_level += 1;
+ /*
+ newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+ separator = _item_separator + newline_indent
+ buf += newline_indent
+ */
+ }
+
+ /* TODO: C speedup not implemented for sort_keys */
+
+ pos = 0;
+ skipkeys = PyObject_IsTrue(s->skipkeys);
+ idx = 0;
+ while (PyDict_Next(dct, &pos, &key, &value)) {
+ PyObject *encoded;
+
+ if (PyString_Check(key) || PyUnicode_Check(key)) {
+ Py_INCREF(key);
+ kstr = key;
+ }
+ else if (PyFloat_Check(key)) {
+ kstr = encoder_encode_float(s, key);
+ if (kstr == NULL)
+ goto bail;
+ }
+ else if (PyInt_Check(key) || PyLong_Check(key)) {
+ kstr = PyObject_Str(key);
+ if (kstr == NULL)
+ goto bail;
+ }
+ else if (key == Py_True || key == Py_False || key == Py_None) {
+ kstr = _encoded_const(key);
+ if (kstr == NULL)
+ goto bail;
+ }
+ else if (skipkeys) {
+ continue;
+ }
+ else {
+ /* TODO: include repr of key */
+ PyErr_SetString(PyExc_ValueError, "keys must be a string");
+ goto bail;
+ }
+
+ if (idx) {
+ if (PyList_Append(rval, s->item_separator))
+ goto bail;
+ }
+
+ encoded = encoder_encode_string(s, kstr);
+ Py_CLEAR(kstr);
+ if (encoded == NULL)
+ goto bail;
+ if (PyList_Append(rval, encoded)) {
+ Py_DECREF(encoded);
+ goto bail;
+ }
+ Py_DECREF(encoded);
+ if (PyList_Append(rval, s->key_separator))
+ goto bail;
+ if (encoder_listencode_obj(s, rval, value, indent_level))
+ goto bail;
+ idx += 1;
+ }
+ if (ident != NULL) {
+ if (PyDict_DelItem(s->markers, ident))
+ goto bail;
+ Py_CLEAR(ident);
+ }
+ if (s->indent != Py_None) {
+ /* TODO: DOES NOT RUN */
+ indent_level -= 1;
+ /*
+ yield '\n' + (' ' * (_indent * _current_indent_level))
+ */
+ }
+ if (PyList_Append(rval, close_dict))
+ goto bail;
+ return 0;
+
+bail:
+ Py_XDECREF(kstr);
+ Py_XDECREF(ident);
+ return -1;
+}
+
+
+static int
+encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ssize_t indent_level)
+{
+ /* Encode Python list seq to a JSON term, rval is a PyList */
+ static PyObject *open_array = NULL;
+ static PyObject *close_array = NULL;
+ static PyObject *empty_array = NULL;
+ PyObject *ident = NULL;
+ PyObject *s_fast = NULL;
+ Py_ssize_t num_items;
+ PyObject **seq_items;
+ Py_ssize_t i;
+
+ if (open_array == NULL || close_array == NULL || empty_array == NULL) {
+ open_array = PyString_InternFromString("[");
+ close_array = PyString_InternFromString("]");
+ empty_array = PyString_InternFromString("[]");
+ if (open_array == NULL || close_array == NULL || empty_array == NULL)
+ return -1;
+ }
+ ident = NULL;
+ s_fast = PySequence_Fast(seq, "_iterencode_list needs a sequence");
+ if (s_fast == NULL)
+ return -1;
+ num_items = PySequence_Fast_GET_SIZE(s_fast);
+ if (num_items == 0) {
+ Py_DECREF(s_fast);
+ return PyList_Append(rval, empty_array);
+ }
+
+ if (s->markers != Py_None) {
+ int has_key;
+ ident = PyLong_FromVoidPtr(seq);
+ if (ident == NULL)
+ goto bail;
+ has_key = PyDict_Contains(s->markers, ident);
+ if (has_key) {
+ if (has_key != -1)
+ PyErr_SetString(PyExc_ValueError, "Circular reference detected");
+ goto bail;
+ }
+ if (PyDict_SetItem(s->markers, ident, seq)) {
+ goto bail;
+ }
+ }
+
+ seq_items = PySequence_Fast_ITEMS(s_fast);
+ if (PyList_Append(rval, open_array))
+ goto bail;
+ if (s->indent != Py_None) {
+ /* TODO: DOES NOT RUN */
+ indent_level += 1;
+ /*
+ newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+ separator = _item_separator + newline_indent
+ buf += newline_indent
+ */
+ }
+ for (i = 0; i < num_items; i++) {
+ PyObject *obj = seq_items[i];
+ if (i) {
+ if (PyList_Append(rval, s->item_separator))
+ goto bail;
+ }
+ if (encoder_listencode_obj(s, rval, obj, indent_level))
+ goto bail;
+ }
+ if (ident != NULL) {
+ if (PyDict_DelItem(s->markers, ident))
+ goto bail;
+ Py_CLEAR(ident);
+ }
+ if (s->indent != Py_None) {
+ /* TODO: DOES NOT RUN */
+ indent_level -= 1;
+ /*
+ yield '\n' + (' ' * (_indent * _current_indent_level))
+ */
+ }
+ if (PyList_Append(rval, close_array))
+ goto bail;
+ Py_DECREF(s_fast);
+ return 0;
+
+bail:
+ Py_XDECREF(ident);
+ Py_DECREF(s_fast);
+ return -1;
+}
+
+static void
+encoder_dealloc(PyObject *self)
+{
+ /* Deallocate Encoder */
+ encoder_clear(self);
+ Py_TYPE(self)->tp_free(self);
+}
+
+static int
+encoder_traverse(PyObject *self, visitproc visit, void *arg)
+{
+ PyEncoderObject *s;
+ assert(PyEncoder_Check(self));
+ s = (PyEncoderObject *)self;
+ Py_VISIT(s->markers);
+ Py_VISIT(s->defaultfn);
+ Py_VISIT(s->encoder);
+ Py_VISIT(s->indent);
+ Py_VISIT(s->key_separator);
+ Py_VISIT(s->item_separator);
+ Py_VISIT(s->sort_keys);
+ Py_VISIT(s->skipkeys);
+ return 0;
+}
+
+static int
+encoder_clear(PyObject *self)
+{
+ /* Deallocate Encoder */
+ PyEncoderObject *s;
+ assert(PyEncoder_Check(self));
+ s = (PyEncoderObject *)self;
+ Py_CLEAR(s->markers);
+ Py_CLEAR(s->defaultfn);
+ Py_CLEAR(s->encoder);
+ Py_CLEAR(s->indent);
+ Py_CLEAR(s->key_separator);
+ Py_CLEAR(s->item_separator);
+ Py_CLEAR(s->sort_keys);
+ Py_CLEAR(s->skipkeys);
+ return 0;
+}
+
+PyDoc_STRVAR(encoder_doc, "_iterencode(obj, _current_indent_level) -> iterable");
+
+static
+PyTypeObject PyEncoderType = {
+ PyObject_HEAD_INIT(NULL)
+ 0, /* tp_internal */
+ "simplejson._speedups.Encoder", /* tp_name */
+ sizeof(PyEncoderObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ encoder_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ encoder_call, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ encoder_doc, /* tp_doc */
+ encoder_traverse, /* tp_traverse */
+ encoder_clear, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ encoder_members, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ encoder_init, /* tp_init */
+ 0, /* tp_alloc */
+ encoder_new, /* tp_new */
+ 0, /* tp_free */
+};
+
+static PyMethodDef speedups_methods[] = {
+ {"encode_basestring_ascii",
+ (PyCFunction)py_encode_basestring_ascii,
+ METH_O,
+ pydoc_encode_basestring_ascii},
+ {"scanstring",
+ (PyCFunction)py_scanstring,
+ METH_VARARGS,
+ pydoc_scanstring},
+ {NULL, NULL, 0, NULL}
+};
+
+PyDoc_STRVAR(module_doc,
+"simplejson speedups\n");
+
+void
+init_speedups(void)
+{
+ PyObject *m;
+ PyScannerType.tp_new = PyType_GenericNew;
+ if (PyType_Ready(&PyScannerType) < 0)
+ return;
+ PyEncoderType.tp_new = PyType_GenericNew;
+ if (PyType_Ready(&PyEncoderType) < 0)
+ return;
+ m = Py_InitModule3("_speedups", speedups_methods, module_doc);
+ Py_INCREF((PyObject*)&PyScannerType);
+ PyModule_AddObject(m, "make_scanner", (PyObject*)&PyScannerType);
+ Py_INCREF((PyObject*)&PyEncoderType);
+ PyModule_AddObject(m, "make_encoder", (PyObject*)&PyEncoderType);
+}
--- /dev/null
+"""Implementation of JSONDecoder
+"""
+import re
+import sys
+import struct
+
+from simplejson.scanner import make_scanner
+try:
+ from simplejson._speedups import scanstring as c_scanstring
+except ImportError:
+ c_scanstring = None
+
+__all__ = ['JSONDecoder']
+
+FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
+
+def _floatconstants():
+ _BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
+ if sys.byteorder != 'big':
+ _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
+ nan, inf = struct.unpack('dd', _BYTES)
+ return nan, inf, -inf
+
+NaN, PosInf, NegInf = _floatconstants()
+
+
+def linecol(doc, pos):
+ lineno = doc.count('\n', 0, pos) + 1
+ if lineno == 1:
+ colno = pos
+ else:
+ colno = pos - doc.rindex('\n', 0, pos)
+ return lineno, colno
+
+
+def errmsg(msg, doc, pos, end=None):
+ # Note that this function is called from _speedups
+ lineno, colno = linecol(doc, pos)
+ if end is None:
+ #fmt = '{0}: line {1} column {2} (char {3})'
+ #return fmt.format(msg, lineno, colno, pos)
+ fmt = '%s: line %d column %d (char %d)'
+ return fmt % (msg, lineno, colno, pos)
+ endlineno, endcolno = linecol(doc, end)
+ #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
+ #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
+ fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
+ return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
+
+
+_CONSTANTS = {
+ '-Infinity': NegInf,
+ 'Infinity': PosInf,
+ 'NaN': NaN,
+}
+
+STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
+BACKSLASH = {
+ '"': u'"', '\\': u'\\', '/': u'/',
+ 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
+}
+
+DEFAULT_ENCODING = "utf-8"
+
+def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
+ """Scan the string s for a JSON string. End is the index of the
+ character in s after the quote that started the JSON string.
+ Unescapes all valid JSON string escape sequences and raises ValueError
+ on attempt to decode an invalid string. If strict is False then literal
+ control characters are allowed in the string.
+
+ Returns a tuple of the decoded string and the index of the character in s
+ after the end quote."""
+ if encoding is None:
+ encoding = DEFAULT_ENCODING
+ chunks = []
+ _append = chunks.append
+ begin = end - 1
+ while 1:
+ chunk = _m(s, end)
+ if chunk is None:
+ raise ValueError(
+ errmsg("Unterminated string starting at", s, begin))
+ end = chunk.end()
+ content, terminator = chunk.groups()
+ # Content is contains zero or more unescaped string characters
+ if content:
+ if not isinstance(content, unicode):
+ content = unicode(content, encoding)
+ _append(content)
+ # Terminator is the end of string, a literal control character,
+ # or a backslash denoting that an escape sequence follows
+ if terminator == '"':
+ break
+ elif terminator != '\\':
+ if strict:
+ msg = "Invalid control character %r at" % (terminator,)
+ #msg = "Invalid control character {0!r} at".format(terminator)
+ raise ValueError(errmsg(msg, s, end))
+ else:
+ _append(terminator)
+ continue
+ try:
+ esc = s[end]
+ except IndexError:
+ raise ValueError(
+ errmsg("Unterminated string starting at", s, begin))
+ # If not a unicode escape sequence, must be in the lookup table
+ if esc != 'u':
+ try:
+ char = _b[esc]
+ except KeyError:
+ msg = "Invalid \\escape: " + repr(esc)
+ raise ValueError(errmsg(msg, s, end))
+ end += 1
+ else:
+ # Unicode escape sequence
+ esc = s[end + 1:end + 5]
+ next_end = end + 5
+ if len(esc) != 4:
+ msg = "Invalid \\uXXXX escape"
+ raise ValueError(errmsg(msg, s, end))
+ uni = int(esc, 16)
+ # Check for surrogate pair on UCS-4 systems
+ if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
+ msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
+ if not s[end + 5:end + 7] == '\\u':
+ raise ValueError(errmsg(msg, s, end))
+ esc2 = s[end + 7:end + 11]
+ if len(esc2) != 4:
+ raise ValueError(errmsg(msg, s, end))
+ uni2 = int(esc2, 16)
+ uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+ next_end += 6
+ char = unichr(uni)
+ end = next_end
+ # Append the unescaped character
+ _append(char)
+ return u''.join(chunks), end
+
+
+# Use speedup if available
+scanstring = c_scanstring or py_scanstring
+
+WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
+WHITESPACE_STR = ' \t\n\r'
+
+def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+ pairs = {}
+ # Use a slice to prevent IndexError from being raised, the following
+ # check will raise a more specific ValueError if the string is empty
+ nextchar = s[end:end + 1]
+ # Normally we expect nextchar == '"'
+ if nextchar != '"':
+ if nextchar in _ws:
+ end = _w(s, end).end()
+ nextchar = s[end:end + 1]
+ # Trivial empty object
+ if nextchar == '}':
+ return pairs, end + 1
+ elif nextchar != '"':
+ raise ValueError(errmsg("Expecting property name", s, end))
+ end += 1
+ while True:
+ key, end = scanstring(s, end, encoding, strict)
+
+ # To skip some function call overhead we optimize the fast paths where
+ # the JSON key separator is ": " or just ":".
+ if s[end:end + 1] != ':':
+ end = _w(s, end).end()
+ if s[end:end + 1] != ':':
+ raise ValueError(errmsg("Expecting : delimiter", s, end))
+
+ end += 1
+
+ try:
+ if s[end] in _ws:
+ end += 1
+ if s[end] in _ws:
+ end = _w(s, end + 1).end()
+ except IndexError:
+ pass
+
+ try:
+ value, end = scan_once(s, end)
+ except StopIteration:
+ raise ValueError(errmsg("Expecting object", s, end))
+ pairs[key] = value
+
+ try:
+ nextchar = s[end]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end]
+ except IndexError:
+ nextchar = ''
+ end += 1
+
+ if nextchar == '}':
+ break
+ elif nextchar != ',':
+ raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
+
+ try:
+ nextchar = s[end]
+ if nextchar in _ws:
+ end += 1
+ nextchar = s[end]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end]
+ except IndexError:
+ nextchar = ''
+
+ end += 1
+ if nextchar != '"':
+ raise ValueError(errmsg("Expecting property name", s, end - 1))
+
+ if object_hook is not None:
+ pairs = object_hook(pairs)
+ return pairs, end
+
+def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+ values = []
+ nextchar = s[end:end + 1]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end:end + 1]
+ # Look-ahead for trivial empty array
+ if nextchar == ']':
+ return values, end + 1
+ _append = values.append
+ while True:
+ try:
+ value, end = scan_once(s, end)
+ except StopIteration:
+ raise ValueError(errmsg("Expecting object", s, end))
+ _append(value)
+ nextchar = s[end:end + 1]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end:end + 1]
+ end += 1
+ if nextchar == ']':
+ break
+ elif nextchar != ',':
+ raise ValueError(errmsg("Expecting , delimiter", s, end))
+
+ try:
+ if s[end] in _ws:
+ end += 1
+ if s[end] in _ws:
+ end = _w(s, end + 1).end()
+ except IndexError:
+ pass
+
+ return values, end
+
+class JSONDecoder(object):
+ """Simple JSON <http://json.org> decoder
+
+ Performs the following translations in decoding by default:
+
+ +---------------+-------------------+
+ | JSON | Python |
+ +===============+===================+
+ | object | dict |
+ +---------------+-------------------+
+ | array | list |
+ +---------------+-------------------+
+ | string | unicode |
+ +---------------+-------------------+
+ | number (int) | int, long |
+ +---------------+-------------------+
+ | number (real) | float |
+ +---------------+-------------------+
+ | true | True |
+ +---------------+-------------------+
+ | false | False |
+ +---------------+-------------------+
+ | null | None |
+ +---------------+-------------------+
+
+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
+ their corresponding ``float`` values, which is outside the JSON spec.
+
+ """
+
+ def __init__(self, encoding=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, strict=True):
+ """``encoding`` determines the encoding used to interpret any ``str``
+ objects decoded by this instance (utf-8 by default). It has no
+ effect when decoding ``unicode`` objects.
+
+ Note that currently only encodings that are a superset of ASCII work,
+ strings of other encodings should be passed in as ``unicode``.
+
+ ``object_hook``, if specified, will be called with the result
+ of every JSON object decoded and its return value will be used in
+ place of the given ``dict``. This can be used to provide custom
+ deserializations (e.g. to support JSON-RPC class hinting).
+
+ ``parse_float``, if specified, will be called with the string
+ of every JSON float to be decoded. By default this is equivalent to
+ float(num_str). This can be used to use another datatype or parser
+ for JSON floats (e.g. decimal.Decimal).
+
+ ``parse_int``, if specified, will be called with the string
+ of every JSON int to be decoded. By default this is equivalent to
+ int(num_str). This can be used to use another datatype or parser
+ for JSON integers (e.g. float).
+
+ ``parse_constant``, if specified, will be called with one of the
+ following strings: -Infinity, Infinity, NaN.
+ This can be used to raise an exception if invalid JSON numbers
+ are encountered.
+
+ """
+ self.encoding = encoding
+ self.object_hook = object_hook
+ self.parse_float = parse_float or float
+ self.parse_int = parse_int or int
+ self.parse_constant = parse_constant or _CONSTANTS.__getitem__
+ self.strict = strict
+ self.parse_object = JSONObject
+ self.parse_array = JSONArray
+ self.parse_string = scanstring
+ self.scan_once = make_scanner(self)
+
+ def decode(self, s, _w=WHITESPACE.match):
+ """Return the Python representation of ``s`` (a ``str`` or ``unicode``
+ instance containing a JSON document)
+
+ """
+ obj, end = self.raw_decode(s, idx=_w(s, 0).end())
+ end = _w(s, end).end()
+ if end != len(s):
+ raise ValueError(errmsg("Extra data", s, end, len(s)))
+ return obj
+
+ def raw_decode(self, s, idx=0):
+ """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
+ with a JSON document) and return a 2-tuple of the Python
+ representation and the index in ``s`` where the document ended.
+
+ This can be used to decode a JSON document from a string that may
+ have extraneous data at the end.
+
+ """
+ try:
+ obj, end = self.scan_once(s, idx)
+ except StopIteration:
+ raise ValueError("No JSON object could be decoded")
+ return obj, end
--- /dev/null
+"""Implementation of JSONEncoder
+"""
+import re
+
+try:
+ from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
+except ImportError:
+ c_encode_basestring_ascii = None
+try:
+ from simplejson._speedups import make_encoder as c_make_encoder
+except ImportError:
+ c_make_encoder = None
+
+ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
+ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
+HAS_UTF8 = re.compile(r'[\x80-\xff]')
+ESCAPE_DCT = {
+ '\\': '\\\\',
+ '"': '\\"',
+ '\b': '\\b',
+ '\f': '\\f',
+ '\n': '\\n',
+ '\r': '\\r',
+ '\t': '\\t',
+}
+for i in range(0x20):
+ #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
+
+# Assume this produces an infinity on all machines (probably not guaranteed)
+INFINITY = float('1e66666')
+FLOAT_REPR = repr
+
+def encode_basestring(s):
+ """Return a JSON representation of a Python string
+
+ """
+ def replace(match):
+ return ESCAPE_DCT[match.group(0)]
+ return '"' + ESCAPE.sub(replace, s) + '"'
+
+
+def py_encode_basestring_ascii(s):
+ """Return an ASCII-only JSON representation of a Python string
+
+ """
+ if isinstance(s, str) and HAS_UTF8.search(s) is not None:
+ s = s.decode('utf-8')
+ def replace(match):
+ s = match.group(0)
+ try:
+ return ESCAPE_DCT[s]
+ except KeyError:
+ n = ord(s)
+ if n < 0x10000:
+ #return '\\u{0:04x}'.format(n)
+ return '\\u%04x' % (n,)
+ else:
+ # surrogate pair
+ n -= 0x10000
+ s1 = 0xd800 | ((n >> 10) & 0x3ff)
+ s2 = 0xdc00 | (n & 0x3ff)
+ #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
+ return '\\u%04x\\u%04x' % (s1, s2)
+ return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
+
+
+encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
+
+class JSONEncoder(object):
+ """Extensible JSON <http://json.org> encoder for Python data structures.
+
+ Supports the following objects and types by default:
+
+ +-------------------+---------------+
+ | Python | JSON |
+ +===================+===============+
+ | dict | object |
+ +-------------------+---------------+
+ | list, tuple | array |
+ +-------------------+---------------+
+ | str, unicode | string |
+ +-------------------+---------------+
+ | int, long, float | number |
+ +-------------------+---------------+
+ | True | true |
+ +-------------------+---------------+
+ | False | false |
+ +-------------------+---------------+
+ | None | null |
+ +-------------------+---------------+
+
+ To extend this to recognize other objects, subclass and implement a
+ ``.default()`` method with another method that returns a serializable
+ object for ``o`` if possible, otherwise it should call the superclass
+ implementation (to raise ``TypeError``).
+
+ """
+ item_separator = ', '
+ key_separator = ': '
+ def __init__(self, skipkeys=False, ensure_ascii=True,
+ check_circular=True, allow_nan=True, sort_keys=False,
+ indent=None, separators=None, encoding='utf-8', default=None):
+ """Constructor for JSONEncoder, with sensible defaults.
+
+ If skipkeys is false, then it is a TypeError to attempt
+ encoding of keys that are not str, int, long, float or None. If
+ skipkeys is True, such items are simply skipped.
+
+ If ensure_ascii is true, the output is guaranteed to be str
+ objects with all incoming unicode characters escaped. If
+ ensure_ascii is false, the output will be unicode object.
+
+ If check_circular is true, then lists, dicts, and custom encoded
+ objects will be checked for circular references during encoding to
+ prevent an infinite recursion (which would cause an OverflowError).
+ Otherwise, no such check takes place.
+
+ If allow_nan is true, then NaN, Infinity, and -Infinity will be
+ encoded as such. This behavior is not JSON specification compliant,
+ but is consistent with most JavaScript based encoders and decoders.
+ Otherwise, it will be a ValueError to encode such floats.
+
+ If sort_keys is true, then the output of dictionaries will be
+ sorted by key; this is useful for regression tests to ensure
+ that JSON serializations can be compared on a day-to-day basis.
+
+ If indent is a non-negative integer, then JSON array
+ elements and object members will be pretty-printed with that
+ indent level. An indent level of 0 will only insert newlines.
+ None is the most compact representation.
+
+ If specified, separators should be a (item_separator, key_separator)
+ tuple. The default is (', ', ': '). To get the most compact JSON
+ representation you should specify (',', ':') to eliminate whitespace.
+
+ If specified, default is a function that gets called for objects
+ that can't otherwise be serialized. It should return a JSON encodable
+ version of the object or raise a ``TypeError``.
+
+ If encoding is not None, then all input strings will be
+ transformed into unicode using that encoding prior to JSON-encoding.
+ The default is UTF-8.
+
+ """
+
+ self.skipkeys = skipkeys
+ self.ensure_ascii = ensure_ascii
+ self.check_circular = check_circular
+ self.allow_nan = allow_nan
+ self.sort_keys = sort_keys
+ self.indent = indent
+ if separators is not None:
+ self.item_separator, self.key_separator = separators
+ if default is not None:
+ self.default = default
+ self.encoding = encoding
+
+ def default(self, o):
+ """Implement this method in a subclass such that it returns
+ a serializable object for ``o``, or calls the base implementation
+ (to raise a ``TypeError``).
+
+ For example, to support arbitrary iterators, you could
+ implement default like this::
+
+ def default(self, o):
+ try:
+ iterable = iter(o)
+ except TypeError:
+ pass
+ else:
+ return list(iterable)
+ return JSONEncoder.default(self, o)
+
+ """
+ raise TypeError(repr(o) + " is not JSON serializable")
+
+ def encode(self, o):
+ """Return a JSON string representation of a Python data structure.
+
+ >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
+ '{"foo": ["bar", "baz"]}'
+
+ """
+ # This is for extremely simple cases and benchmarks.
+ if isinstance(o, basestring):
+ if isinstance(o, str):
+ _encoding = self.encoding
+ if (_encoding is not None
+ and not (_encoding == 'utf-8')):
+ o = o.decode(_encoding)
+ if self.ensure_ascii:
+ return encode_basestring_ascii(o)
+ else:
+ return encode_basestring(o)
+ # This doesn't pass the iterator directly to ''.join() because the
+ # exceptions aren't as detailed. The list call should be roughly
+ # equivalent to the PySequence_Fast that ''.join() would do.
+ chunks = self.iterencode(o, _one_shot=True)
+ if not isinstance(chunks, (list, tuple)):
+ chunks = list(chunks)
+ return ''.join(chunks)
+
+ def iterencode(self, o, _one_shot=False):
+ """Encode the given object and yield each string
+ representation as available.
+
+ For example::
+
+ for chunk in JSONEncoder().iterencode(bigobject):
+ mysocket.write(chunk)
+
+ """
+ if self.check_circular:
+ markers = {}
+ else:
+ markers = None
+ if self.ensure_ascii:
+ _encoder = encode_basestring_ascii
+ else:
+ _encoder = encode_basestring
+ if self.encoding != 'utf-8':
+ def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
+ if isinstance(o, str):
+ o = o.decode(_encoding)
+ return _orig_encoder(o)
+
+ def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
+ # Check for specials. Note that this type of test is processor- and/or
+ # platform-specific, so do tests which don't depend on the internals.
+
+ if o != o:
+ text = 'NaN'
+ elif o == _inf:
+ text = 'Infinity'
+ elif o == _neginf:
+ text = '-Infinity'
+ else:
+ return _repr(o)
+
+ if not allow_nan:
+ raise ValueError(
+ "Out of range float values are not JSON compliant: " +
+ repr(o))
+
+ return text
+
+
+ if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
+ _iterencode = c_make_encoder(
+ markers, self.default, _encoder, self.indent,
+ self.key_separator, self.item_separator, self.sort_keys,
+ self.skipkeys, self.allow_nan)
+ else:
+ _iterencode = _make_iterencode(
+ markers, self.default, _encoder, self.indent, floatstr,
+ self.key_separator, self.item_separator, self.sort_keys,
+ self.skipkeys, _one_shot)
+ return _iterencode(o, 0)
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+ ## HACK: hand-optimized bytecode; turn globals into locals
+ False=False,
+ True=True,
+ ValueError=ValueError,
+ basestring=basestring,
+ dict=dict,
+ float=float,
+ id=id,
+ int=int,
+ isinstance=isinstance,
+ list=list,
+ long=long,
+ str=str,
+ tuple=tuple,
+ ):
+
+ def _iterencode_list(lst, _current_indent_level):
+ if not lst:
+ yield '[]'
+ return
+ if markers is not None:
+ markerid = id(lst)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = lst
+ buf = '['
+ if _indent is not None:
+ _current_indent_level += 1
+ newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+ separator = _item_separator + newline_indent
+ buf += newline_indent
+ else:
+ newline_indent = None
+ separator = _item_separator
+ first = True
+ for value in lst:
+ if first:
+ first = False
+ else:
+ buf = separator
+ if isinstance(value, basestring):
+ yield buf + _encoder(value)
+ elif value is None:
+ yield buf + 'null'
+ elif value is True:
+ yield buf + 'true'
+ elif value is False:
+ yield buf + 'false'
+ elif isinstance(value, (int, long)):
+ yield buf + str(value)
+ elif isinstance(value, float):
+ yield buf + _floatstr(value)
+ else:
+ yield buf
+ if isinstance(value, (list, tuple)):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif isinstance(value, dict):
+ chunks = _iterencode_dict(value, _current_indent_level)
+ else:
+ chunks = _iterencode(value, _current_indent_level)
+ for chunk in chunks:
+ yield chunk
+ if newline_indent is not None:
+ _current_indent_level -= 1
+ yield '\n' + (' ' * (_indent * _current_indent_level))
+ yield ']'
+ if markers is not None:
+ del markers[markerid]
+
+ def _iterencode_dict(dct, _current_indent_level):
+ if not dct:
+ yield '{}'
+ return
+ if markers is not None:
+ markerid = id(dct)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = dct
+ yield '{'
+ if _indent is not None:
+ _current_indent_level += 1
+ newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+ item_separator = _item_separator + newline_indent
+ yield newline_indent
+ else:
+ newline_indent = None
+ item_separator = _item_separator
+ first = True
+ if _sort_keys:
+ items = dct.items()
+ items.sort(key=lambda kv: kv[0])
+ else:
+ items = dct.iteritems()
+ for key, value in items:
+ if isinstance(key, basestring):
+ pass
+ # JavaScript is weakly typed for these, so it makes sense to
+ # also allow them. Many encoders seem to do something like this.
+ elif isinstance(key, float):
+ key = _floatstr(key)
+ elif key is True:
+ key = 'true'
+ elif key is False:
+ key = 'false'
+ elif key is None:
+ key = 'null'
+ elif isinstance(key, (int, long)):
+ key = str(key)
+ elif _skipkeys:
+ continue
+ else:
+ raise TypeError("key " + repr(key) + " is not a string")
+ if first:
+ first = False
+ else:
+ yield item_separator
+ yield _encoder(key)
+ yield _key_separator
+ if isinstance(value, basestring):
+ yield _encoder(value)
+ elif value is None:
+ yield 'null'
+ elif value is True:
+ yield 'true'
+ elif value is False:
+ yield 'false'
+ elif isinstance(value, (int, long)):
+ yield str(value)
+ elif isinstance(value, float):
+ yield _floatstr(value)
+ else:
+ if isinstance(value, (list, tuple)):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif isinstance(value, dict):
+ chunks = _iterencode_dict(value, _current_indent_level)
+ else:
+ chunks = _iterencode(value, _current_indent_level)
+ for chunk in chunks:
+ yield chunk
+ if newline_indent is not None:
+ _current_indent_level -= 1
+ yield '\n' + (' ' * (_indent * _current_indent_level))
+ yield '}'
+ if markers is not None:
+ del markers[markerid]
+
+ def _iterencode(o, _current_indent_level):
+ if isinstance(o, basestring):
+ yield _encoder(o)
+ elif o is None:
+ yield 'null'
+ elif o is True:
+ yield 'true'
+ elif o is False:
+ yield 'false'
+ elif isinstance(o, (int, long)):
+ yield str(o)
+ elif isinstance(o, float):
+ yield _floatstr(o)
+ elif isinstance(o, (list, tuple)):
+ for chunk in _iterencode_list(o, _current_indent_level):
+ yield chunk
+ elif isinstance(o, dict):
+ for chunk in _iterencode_dict(o, _current_indent_level):
+ yield chunk
+ else:
+ if markers is not None:
+ markerid = id(o)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = o
+ o = _default(o)
+ for chunk in _iterencode(o, _current_indent_level):
+ yield chunk
+ if markers is not None:
+ del markers[markerid]
+
+ return _iterencode
--- /dev/null
+"""JSON token scanner
+"""
+import re
+try:
+ from simplejson._speedups import make_scanner as c_make_scanner
+except ImportError:
+ c_make_scanner = None
+
+__all__ = ['make_scanner']
+
+NUMBER_RE = re.compile(
+ r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
+ (re.VERBOSE | re.MULTILINE | re.DOTALL))
+
+def py_make_scanner(context):
+ parse_object = context.parse_object
+ parse_array = context.parse_array
+ parse_string = context.parse_string
+ match_number = NUMBER_RE.match
+ encoding = context.encoding
+ strict = context.strict
+ parse_float = context.parse_float
+ parse_int = context.parse_int
+ parse_constant = context.parse_constant
+ object_hook = context.object_hook
+
+ def _scan_once(string, idx):
+ try:
+ nextchar = string[idx]
+ except IndexError:
+ raise StopIteration
+
+ if nextchar == '"':
+ return parse_string(string, idx + 1, encoding, strict)
+ elif nextchar == '{':
+ return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
+ elif nextchar == '[':
+ return parse_array((string, idx + 1), _scan_once)
+ elif nextchar == 'n' and string[idx:idx + 4] == 'null':
+ return None, idx + 4
+ elif nextchar == 't' and string[idx:idx + 4] == 'true':
+ return True, idx + 4
+ elif nextchar == 'f' and string[idx:idx + 5] == 'false':
+ return False, idx + 5
+
+ m = match_number(string, idx)
+ if m is not None:
+ integer, frac, exp = m.groups()
+ if frac or exp:
+ res = parse_float(integer + (frac or '') + (exp or ''))
+ else:
+ res = parse_int(integer)
+ return res, m.end()
+ elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
+ return parse_constant('NaN'), idx + 3
+ elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
+ return parse_constant('Infinity'), idx + 8
+ elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
+ return parse_constant('-Infinity'), idx + 9
+ else:
+ raise StopIteration
+
+ return _scan_once
+
+make_scanner = c_make_scanner or py_make_scanner
--- /dev/null
+import unittest
+import doctest
+
+def additional_tests():
+ import simplejson
+ import simplejson.encoder
+ import simplejson.decoder
+ suite = unittest.TestSuite()
+ for mod in (simplejson, simplejson.encoder, simplejson.decoder):
+ suite.addTest(doctest.DocTestSuite(mod))
+ suite.addTest(doctest.DocFileSuite('../../index.rst'))
+ return suite
+
+def main():
+ suite = additional_tests()
+ runner = unittest.TextTestRunner()
+ runner.run(suite)
+
+if __name__ == '__main__':
+ import os
+ import sys
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+ main()
--- /dev/null
+from unittest import TestCase
+import simplejson as json
+
+def default_iterable(obj):
+ return list(obj)
+
+class TestCheckCircular(TestCase):
+ def test_circular_dict(self):
+ dct = {}
+ dct['a'] = dct
+ self.assertRaises(ValueError, json.dumps, dct)
+
+ def test_circular_list(self):
+ lst = []
+ lst.append(lst)
+ self.assertRaises(ValueError, json.dumps, lst)
+
+ def test_circular_composite(self):
+ dct2 = {}
+ dct2['a'] = []
+ dct2['a'].append(dct2)
+ self.assertRaises(ValueError, json.dumps, dct2)
+
+ def test_circular_default(self):
+ json.dumps([set()], default=default_iterable)
+ self.assertRaises(TypeError, json.dumps, [set()])
+
+ def test_circular_off_default(self):
+ json.dumps([set()], default=default_iterable, check_circular=False)
+ self.assertRaises(TypeError, json.dumps, [set()], check_circular=False)
--- /dev/null
+import decimal
+from unittest import TestCase
+
+import simplejson as json
+
+class TestDecode(TestCase):
+ def test_decimal(self):
+ rval = json.loads('1.1', parse_float=decimal.Decimal)
+ self.assert_(isinstance(rval, decimal.Decimal))
+ self.assertEquals(rval, decimal.Decimal('1.1'))
+
+ def test_float(self):
+ rval = json.loads('1', parse_int=float)
+ self.assert_(isinstance(rval, float))
+ self.assertEquals(rval, 1.0)
+
+ def test_decoder_optimizations(self):
+ # Several optimizations were made that skip over calls to
+ # the whitespace regex, so this test is designed to try and
+ # exercise the uncommon cases. The array cases are already covered.
+ rval = json.loads('{ "key" : "value" , "k":"v" }')
+ self.assertEquals(rval, {"key":"value", "k":"v"})
--- /dev/null
+from unittest import TestCase
+
+import simplejson as json
+
+class TestDefault(TestCase):
+ def test_default(self):
+ self.assertEquals(
+ json.dumps(type, default=repr),
+ json.dumps(repr(type)))
--- /dev/null
+from unittest import TestCase
+from cStringIO import StringIO
+
+import simplejson as json
+
+class TestDump(TestCase):
+ def test_dump(self):
+ sio = StringIO()
+ json.dump({}, sio)
+ self.assertEquals(sio.getvalue(), '{}')
+
+ def test_dumps(self):
+ self.assertEquals(json.dumps({}), '{}')
+
+ def test_encode_truefalse(self):
+ self.assertEquals(json.dumps(
+ {True: False, False: True}, sort_keys=True),
+ '{"false": true, "true": false}')
+ self.assertEquals(json.dumps(
+ {2: 3.0, 4.0: 5L, False: 1, 6L: True, "7": 0}, sort_keys=True),
+ '{"false": 1, "2": 3.0, "4.0": 5, "6": true, "7": 0}')
--- /dev/null
+from unittest import TestCase
+
+import simplejson.encoder
+
+CASES = [
+ (u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
+ (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
+ (u'controls', '"controls"'),
+ (u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
+ (u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
+ (u' s p a c e d ', '" s p a c e d "'),
+ (u'\U0001d120', '"\\ud834\\udd20"'),
+ (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
+ ('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
+ (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
+ ('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
+ (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
+ (u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
+ (u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
+ (u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
+ (u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
+]
+
+class TestEncodeBaseStringAscii(TestCase):
+ def test_py_encode_basestring_ascii(self):
+ self._test_encode_basestring_ascii(simplejson.encoder.py_encode_basestring_ascii)
+
+ def test_c_encode_basestring_ascii(self):
+ if not simplejson.encoder.c_encode_basestring_ascii:
+ return
+ self._test_encode_basestring_ascii(simplejson.encoder.c_encode_basestring_ascii)
+
+ def _test_encode_basestring_ascii(self, encode_basestring_ascii):
+ fname = encode_basestring_ascii.__name__
+ for input_string, expect in CASES:
+ result = encode_basestring_ascii(input_string)
+ self.assertEquals(result, expect,
+ '%r != %r for %s(%r)' % (result, expect, fname, input_string))
--- /dev/null
+from unittest import TestCase
+
+import simplejson as json
+
+# Fri Dec 30 18:57:26 2005
+JSONDOCS = [
+ # http://json.org/JSON_checker/test/fail1.json
+ '"A JSON payload should be an object or array, not a string."',
+ # http://json.org/JSON_checker/test/fail2.json
+ '["Unclosed array"',
+ # http://json.org/JSON_checker/test/fail3.json
+ '{unquoted_key: "keys must be quoted}',
+ # http://json.org/JSON_checker/test/fail4.json
+ '["extra comma",]',
+ # http://json.org/JSON_checker/test/fail5.json
+ '["double extra comma",,]',
+ # http://json.org/JSON_checker/test/fail6.json
+ '[ , "<-- missing value"]',
+ # http://json.org/JSON_checker/test/fail7.json
+ '["Comma after the close"],',
+ # http://json.org/JSON_checker/test/fail8.json
+ '["Extra close"]]',
+ # http://json.org/JSON_checker/test/fail9.json
+ '{"Extra comma": true,}',
+ # http://json.org/JSON_checker/test/fail10.json
+ '{"Extra value after close": true} "misplaced quoted value"',
+ # http://json.org/JSON_checker/test/fail11.json
+ '{"Illegal expression": 1 + 2}',
+ # http://json.org/JSON_checker/test/fail12.json
+ '{"Illegal invocation": alert()}',
+ # http://json.org/JSON_checker/test/fail13.json
+ '{"Numbers cannot have leading zeroes": 013}',
+ # http://json.org/JSON_checker/test/fail14.json
+ '{"Numbers cannot be hex": 0x14}',
+ # http://json.org/JSON_checker/test/fail15.json
+ '["Illegal backslash escape: \\x15"]',
+ # http://json.org/JSON_checker/test/fail16.json
+ '["Illegal backslash escape: \\\'"]',
+ # http://json.org/JSON_checker/test/fail17.json
+ '["Illegal backslash escape: \\017"]',
+ # http://json.org/JSON_checker/test/fail18.json
+ '[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
+ # http://json.org/JSON_checker/test/fail19.json
+ '{"Missing colon" null}',
+ # http://json.org/JSON_checker/test/fail20.json
+ '{"Double colon":: null}',
+ # http://json.org/JSON_checker/test/fail21.json
+ '{"Comma instead of colon", null}',
+ # http://json.org/JSON_checker/test/fail22.json
+ '["Colon instead of comma": false]',
+ # http://json.org/JSON_checker/test/fail23.json
+ '["Bad value", truth]',
+ # http://json.org/JSON_checker/test/fail24.json
+ "['single quote']",
+ # http://code.google.com/p/simplejson/issues/detail?id=3
+ u'["A\u001FZ control characters in string"]',
+]
+
+SKIPS = {
+ 1: "why not have a string payload?",
+ 18: "spec doesn't specify any nesting limitations",
+}
+
+class TestFail(TestCase):
+ def test_failures(self):
+ for idx, doc in enumerate(JSONDOCS):
+ idx = idx + 1
+ if idx in SKIPS:
+ json.loads(doc)
+ continue
+ try:
+ json.loads(doc)
+ except ValueError:
+ pass
+ else:
+ self.fail("Expected failure for fail%d.json: %r" % (idx, doc))
--- /dev/null
+import math
+from unittest import TestCase
+
+import simplejson as json
+
+class TestFloat(TestCase):
+ def test_floats(self):
+ for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100, 3.1]:
+ self.assertEquals(float(json.dumps(num)), num)
+ self.assertEquals(json.loads(json.dumps(num)), num)
+
+ def test_ints(self):
+ for num in [1, 1L, 1<<32, 1<<64]:
+ self.assertEquals(json.dumps(num), str(num))
+ self.assertEquals(int(json.dumps(num)), num)
--- /dev/null
+from unittest import TestCase
+
+import simplejson as json
+import textwrap
+
+class TestIndent(TestCase):
+ def test_indent(self):
+ h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
+ {'nifty': 87}, {'field': 'yes', 'morefield': False} ]
+
+ expect = textwrap.dedent("""\
+ [
+ [
+ "blorpie"
+ ],
+ [
+ "whoops"
+ ],
+ [],
+ "d-shtaeou",
+ "d-nthiouh",
+ "i-vhbjkhnth",
+ {
+ "nifty": 87
+ },
+ {
+ "field": "yes",
+ "morefield": false
+ }
+ ]""")
+
+
+ d1 = json.dumps(h)
+ d2 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
+
+ h1 = json.loads(d1)
+ h2 = json.loads(d2)
+
+ self.assertEquals(h1, h)
+ self.assertEquals(h2, h)
+ self.assertEquals(d2, expect)
--- /dev/null
+from unittest import TestCase
+
+import simplejson as json
+
+# from http://json.org/JSON_checker/test/pass1.json
+JSON = r'''
+[
+ "JSON Test Pattern pass1",
+ {"object with 1 member":["array with 1 element"]},
+ {},
+ [],
+ -42,
+ true,
+ false,
+ null,
+ {
+ "integer": 1234567890,
+ "real": -9876.543210,
+ "e": 0.123456789e-12,
+ "E": 1.234567890E+34,
+ "": 23456789012E666,
+ "zero": 0,
+ "one": 1,
+ "space": " ",
+ "quote": "\"",
+ "backslash": "\\",
+ "controls": "\b\f\n\r\t",
+ "slash": "/ & \/",
+ "alpha": "abcdefghijklmnopqrstuvwyz",
+ "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
+ "digit": "0123456789",
+ "special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
+ "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
+ "true": true,
+ "false": false,
+ "null": null,
+ "array":[ ],
+ "object":{ },
+ "address": "50 St. James Street",
+ "url": "http://www.JSON.org/",
+ "comment": "// /* <!-- --",
+ "# -- --> */": " ",
+ " s p a c e d " :[1,2 , 3
+
+,
+
+4 , 5 , 6 ,7 ],
+ "compact": [1,2,3,4,5,6,7],
+ "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
+ "quotes": "" \u0022 %22 0x22 034 "",
+ "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
+: "A key can be any string"
+ },
+ 0.5 ,98.6
+,
+99.44
+,
+
+1066
+
+
+,"rosebud"]
+'''
+
+class TestPass1(TestCase):
+ def test_parse(self):
+ # test in/out equivalence and parsing
+ res = json.loads(JSON)
+ out = json.dumps(res)
+ self.assertEquals(res, json.loads(out))
+ try:
+ json.dumps(res, allow_nan=False)
+ except ValueError:
+ pass
+ else:
+ self.fail("23456789012E666 should be out of range")
--- /dev/null
+from unittest import TestCase
+import simplejson as json
+
+# from http://json.org/JSON_checker/test/pass2.json
+JSON = r'''
+[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]
+'''
+
+class TestPass2(TestCase):
+ def test_parse(self):
+ # test in/out equivalence and parsing
+ res = json.loads(JSON)
+ out = json.dumps(res)
+ self.assertEquals(res, json.loads(out))
--- /dev/null
+from unittest import TestCase
+
+import simplejson as json
+
+# from http://json.org/JSON_checker/test/pass3.json
+JSON = r'''
+{
+ "JSON Test Pattern pass3": {
+ "The outermost value": "must be an object or array.",
+ "In this test": "It is an object."
+ }
+}
+'''
+
+class TestPass3(TestCase):
+ def test_parse(self):
+ # test in/out equivalence and parsing
+ res = json.loads(JSON)
+ out = json.dumps(res)
+ self.assertEquals(res, json.loads(out))
--- /dev/null
+from unittest import TestCase
+
+import simplejson as json
+
+class JSONTestObject:
+ pass
+
+
+class RecursiveJSONEncoder(json.JSONEncoder):
+ recurse = False
+ def default(self, o):
+ if o is JSONTestObject:
+ if self.recurse:
+ return [JSONTestObject]
+ else:
+ return 'JSONTestObject'
+ return json.JSONEncoder.default(o)
+
+
+class TestRecursion(TestCase):
+ def test_listrecursion(self):
+ x = []
+ x.append(x)
+ try:
+ json.dumps(x)
+ except ValueError:
+ pass
+ else:
+ self.fail("didn't raise ValueError on list recursion")
+ x = []
+ y = [x]
+ x.append(y)
+ try:
+ json.dumps(x)
+ except ValueError:
+ pass
+ else:
+ self.fail("didn't raise ValueError on alternating list recursion")
+ y = []
+ x = [y, y]
+ # ensure that the marker is cleared
+ json.dumps(x)
+
+ def test_dictrecursion(self):
+ x = {}
+ x["test"] = x
+ try:
+ json.dumps(x)
+ except ValueError:
+ pass
+ else:
+ self.fail("didn't raise ValueError on dict recursion")
+ x = {}
+ y = {"a": x, "b": x}
+ # ensure that the marker is cleared
+ json.dumps(x)
+
+ def test_defaultrecursion(self):
+ enc = RecursiveJSONEncoder()
+ self.assertEquals(enc.encode(JSONTestObject), '"JSONTestObject"')
+ enc.recurse = True
+ try:
+ enc.encode(JSONTestObject)
+ except ValueError:
+ pass
+ else:
+ self.fail("didn't raise ValueError on default recursion")
--- /dev/null
+import sys
+import decimal
+from unittest import TestCase
+
+import simplejson as json
+import simplejson.decoder
+
+class TestScanString(TestCase):
+ def test_py_scanstring(self):
+ self._test_scanstring(simplejson.decoder.py_scanstring)
+
+ def test_c_scanstring(self):
+ if not simplejson.decoder.c_scanstring:
+ return
+ self._test_scanstring(simplejson.decoder.c_scanstring)
+
+ def _test_scanstring(self, scanstring):
+ self.assertEquals(
+ scanstring('"z\\ud834\\udd20x"', 1, None, True),
+ (u'z\U0001d120x', 16))
+
+ if sys.maxunicode == 65535:
+ self.assertEquals(
+ scanstring(u'"z\U0001d120x"', 1, None, True),
+ (u'z\U0001d120x', 6))
+ else:
+ self.assertEquals(
+ scanstring(u'"z\U0001d120x"', 1, None, True),
+ (u'z\U0001d120x', 5))
+
+ self.assertEquals(
+ scanstring('"\\u007b"', 1, None, True),
+ (u'{', 8))
+
+ self.assertEquals(
+ scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
+ (u'A JSON payload should be an object or array, not a string.', 60))
+
+ self.assertEquals(
+ scanstring('["Unclosed array"', 2, None, True),
+ (u'Unclosed array', 17))
+
+ self.assertEquals(
+ scanstring('["extra comma",]', 2, None, True),
+ (u'extra comma', 14))
+
+ self.assertEquals(
+ scanstring('["double extra comma",,]', 2, None, True),
+ (u'double extra comma', 21))
+
+ self.assertEquals(
+ scanstring('["Comma after the close"],', 2, None, True),
+ (u'Comma after the close', 24))
+
+ self.assertEquals(
+ scanstring('["Extra close"]]', 2, None, True),
+ (u'Extra close', 14))
+
+ self.assertEquals(
+ scanstring('{"Extra comma": true,}', 2, None, True),
+ (u'Extra comma', 14))
+
+ self.assertEquals(
+ scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
+ (u'Extra value after close', 26))
+
+ self.assertEquals(
+ scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
+ (u'Illegal expression', 21))
+
+ self.assertEquals(
+ scanstring('{"Illegal invocation": alert()}', 2, None, True),
+ (u'Illegal invocation', 21))
+
+ self.assertEquals(
+ scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
+ (u'Numbers cannot have leading zeroes', 37))
+
+ self.assertEquals(
+ scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
+ (u'Numbers cannot be hex', 24))
+
+ self.assertEquals(
+ scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
+ (u'Too deep', 30))
+
+ self.assertEquals(
+ scanstring('{"Missing colon" null}', 2, None, True),
+ (u'Missing colon', 16))
+
+ self.assertEquals(
+ scanstring('{"Double colon":: null}', 2, None, True),
+ (u'Double colon', 15))
+
+ self.assertEquals(
+ scanstring('{"Comma instead of colon", null}', 2, None, True),
+ (u'Comma instead of colon', 25))
+
+ self.assertEquals(
+ scanstring('["Colon instead of comma": false]', 2, None, True),
+ (u'Colon instead of comma', 25))
+
+ self.assertEquals(
+ scanstring('["Bad value", truth]', 2, None, True),
+ (u'Bad value', 12))
+
+ def test_issue3623(self):
+ self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1,
+ "xxx")
+ self.assertRaises(UnicodeDecodeError,
+ json.encoder.encode_basestring_ascii, "xx\xff")
--- /dev/null
+import textwrap
+from unittest import TestCase
+
+import simplejson as json
+
+
+class TestSeparators(TestCase):
+ def test_separators(self):
+ h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
+ {'nifty': 87}, {'field': 'yes', 'morefield': False} ]
+
+ expect = textwrap.dedent("""\
+ [
+ [
+ "blorpie"
+ ] ,
+ [
+ "whoops"
+ ] ,
+ [] ,
+ "d-shtaeou" ,
+ "d-nthiouh" ,
+ "i-vhbjkhnth" ,
+ {
+ "nifty" : 87
+ } ,
+ {
+ "field" : "yes" ,
+ "morefield" : false
+ }
+ ]""")
+
+
+ d1 = json.dumps(h)
+ d2 = json.dumps(h, indent=2, sort_keys=True, separators=(' ,', ' : '))
+
+ h1 = json.loads(d1)
+ h2 = json.loads(d2)
+
+ self.assertEquals(h1, h)
+ self.assertEquals(h2, h)
+ self.assertEquals(d2, expect)
--- /dev/null
+from unittest import TestCase
+
+import simplejson as json
+
+class TestUnicode(TestCase):
+ def test_encoding1(self):
+ encoder = json.JSONEncoder(encoding='utf-8')
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ s = u.encode('utf-8')
+ ju = encoder.encode(u)
+ js = encoder.encode(s)
+ self.assertEquals(ju, js)
+
+ def test_encoding2(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ s = u.encode('utf-8')
+ ju = json.dumps(u, encoding='utf-8')
+ js = json.dumps(s, encoding='utf-8')
+ self.assertEquals(ju, js)
+
+ def test_encoding3(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ j = json.dumps(u)
+ self.assertEquals(j, '"\\u03b1\\u03a9"')
+
+ def test_encoding4(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ j = json.dumps([u])
+ self.assertEquals(j, '["\\u03b1\\u03a9"]')
+
+ def test_encoding5(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ j = json.dumps(u, ensure_ascii=False)
+ self.assertEquals(j, u'"%s"' % (u,))
+
+ def test_encoding6(self):
+ u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
+ j = json.dumps([u], ensure_ascii=False)
+ self.assertEquals(j, u'["%s"]' % (u,))
+
+ def test_big_unicode_encode(self):
+ u = u'\U0001d120'
+ self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
+ self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
+
+ def test_big_unicode_decode(self):
+ u = u'z\U0001d120x'
+ self.assertEquals(json.loads('"' + u + '"'), u)
+ self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
+
+ def test_unicode_decode(self):
+ for i in range(0, 0xd7ff):
+ u = unichr(i)
+ s = '"\\u%04x"' % (i,)
+ self.assertEquals(json.loads(s), u)
+
+ def test_default_encoding(self):
+ self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
+ {'a': u'\xe9'})
+
+ def test_unicode_preservation(self):
+ self.assertEquals(type(json.loads(u'""')), unicode)
+ self.assertEquals(type(json.loads(u'"a"')), unicode)
+ self.assertEquals(type(json.loads(u'["a"]')[0]), unicode)
\ No newline at end of file
--- /dev/null
+r"""Command-line tool to validate and pretty-print JSON
+
+Usage::
+
+ $ echo '{"json":"obj"}' | python -m simplejson.tool
+ {
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+ Expecting property name: line 1 column 2 (char 2)
+
+"""
+import sys
+import simplejson
+
+def main():
+ if len(sys.argv) == 1:
+ infile = sys.stdin
+ outfile = sys.stdout
+ elif len(sys.argv) == 2:
+ infile = open(sys.argv[1], 'rb')
+ outfile = sys.stdout
+ elif len(sys.argv) == 3:
+ infile = open(sys.argv[1], 'rb')
+ outfile = open(sys.argv[2], 'wb')
+ else:
+ raise SystemExit(sys.argv[0] + " [infile [outfile]]")
+ try:
+ obj = simplejson.load(infile)
+ except ValueError, e:
+ raise SystemExit(e)
+ simplejson.dump(obj, outfile, sort_keys=True, indent=4)
+ outfile.write('\n')
+
+
+if __name__ == '__main__':
+ main()
vswitchd/proc-net-compat.h \
vswitchd/ovs-vswitchd.c \
vswitchd/ovs-vswitchd.h \
+ vswitchd/vswitch-idl.h \
vswitchd/xenserver.c \
vswitchd/xenserver.h
vswitchd_ovs_vswitchd_LDADD = \
vswitchd/ovs-vswitchd.conf.5.in \
vswitchd/ovs-vswitchd.8.in \
vswitchd/ovs-brcompatd.8.in
+
+EXTRA_DIST += vswitchd/vswitch.ovsidl
+BUILT_SOURCES += vswitchd/vswitch-idl.h
+DISTCLEANFILES += \
+ vswitchd/vswitch.ovsidl \
+ vswitchd/vswitch-idl.h
+vswitchd/vswitch-idl.h: vswitchd/vswitch.ovsidl ovsdb/ovsdb-idlc
+ ovsdb/ovsdb-idlc c-idl-header $(srcdir)/vswitchd/vswitch.ovsidl > $@
#include "unixctl.h"
#include "vconn.h"
#include "vconn-ssl.h"
+#include "vswitch-idl.h"
#include "xenserver.h"
#include "xtoxll.h"
--- /dev/null
+//
+// This is an ovsdb-idl schema. The OVSDB IDL compiler, ovsdb-idlc,
+// can translate it into an OVSDB schema (which simply entails
+// deleting some members from the schema) or C headers or source for
+// use with the IDL at runtime.
+//
+
+{"name": "ovs_vswitchd_db",
+ "comment": "Configuration for one Open vSwitch daemon.",
+ "tables": {
+ "OpenVSwitch": {
+ "comment": "Configuration for an Open vSwitch daemon.",
+ "columns": {
+ "bridges": {
+ "comment": "Set of bridges managed by the daemon.",
+ "type": {"key": "uuid", "keyRefTable": "Bridge",
+ "min": 0, "max": "unlimited"}},
+ "management_id": {
+ "comment": "Exactly 12 hex digits that identify the daemon.",
+ "type": "string"},
+ "controller": {
+ "comment": "Default Controller used by bridges.",
+ "type": {"key": "uuid", "keyRefTable": "Controller", "min": 0, "max": 1}},
+ "ssl": {
+ "comment": "SSL used globally by the daemon.",
+ "type": {"key": "uuid", "keyRefTable": "SSL", "min": 0, "max": 1}}}},
+ "Bridge": {
+ "comment": "Configuration for a bridge within an OpenVSwitch.",
+ "columns": {
+ "name": {
+ "comment": "Bridge identifier. Should be alphanumeric and no more than about 8 bytes long. Must be unique among the names of ports, interfaces, and bridges on a host.",
+ "type": "string"},
+ "datapath_id": {
+ "comment": "OpenFlow datapath ID. Exactly 12 hex digits.",
+ "type": {"key": "string", "min": 0, "max": 1}},
+ "hwaddr": {
+ "comment": "Ethernet address to use for bridge. Exactly 12 hex digits in the form XX:XX:XX:XX:XX:XX.",
+ "type": {"key": "string", "min": 0, "max": 1}},
+ "ports": {
+ "comment": "Ports included in the bridge.",
+ "type": {"key": "uuid", "keyRefTable": "Port", "min": 0, "max": "unlimited"}},
+ "mirrors": {
+ "comment": "Port mirroring configuration.",
+ "type": {"key": "uuid", "keyRefTable": "Mirror", "min": 0, "max": "unlimited"}},
+ "netflow": {
+ "comment": "NetFlow configuration.",
+ "type": {"key": "uuid", "keyRefTable": "Netflow", "min": 0, "max": "unlimited"}},
+ "controller": {
+ "comment": "OpenFlow controller. If unset, defaults to that specified by the parent OpenVSwitch.",
+ "type": {"key": "uuid", "keyRefTable": "Controller", "min": 0, "max": 1}}}},
+ "Port": {
+ "comment": "A port within a Bridge. May contain a single Interface or multiple (bonded) Interfaces.",
+ "columns": {
+ "name": {
+ "comment": "Port name. Should be alphanumeric and no more than about 8 bytes long. May be the same as the interface name, for non-bonded ports. Must otherwise be unique among the names of ports, interfaces, and bridges on a host.",
+ "type": "string"},
+ "interfaces": {
+ "comment": "The Port's Interfaces. If there is more than one, this is a bonded Port.",
+ "type": {"key": "uuid", "keyRefTable": "Interface", "min": 1, "max": "unlimited"}},
+ "trunks": {
+ "comment": "The 802.1Q VLAN(s) that this port trunks. Should be empty if this port trunks all VLAN(s) or if this is not a trunk port.",
+ "type": {"key": "integer", "min": 0, "max": 4096}},
+ "tag": {
+ "comment": "This port's implicitly tagged VLAN. Should be empty if this is a trunk port.",
+ "type": {"key": "integer", "min": 0, "max": 1}},
+ "updelay": {
+ "comment": "For a bonded port, the number of milliseconds for which carrier must stay up on an interface before the interface is considered to be up. Ignored for non-bonded ports.",
+ "type": "integer"},
+ "downdelay": {
+ "comment": "For a bonded port, the number of milliseconds for which carrier must stay down on an interface before the interface is considered to be down. Ignored for non-bonded ports.",
+ "type": "integer"}}},
+ "Interface": {
+ "comment": "An interface within a Port.",
+ "columns": {
+ "name": {
+ "comment": "Interface name. Should be alphanumeric and no more than about 8 bytes long. May be the same as the port name, for non-bonded ports. Must otherwise be unique among the names of ports, interfaces, and bridges on a host.",
+ "type": "string"},
+ "internal": {
+ "comment": "An \"internal\" port is one that is implemented in software as a logical device.",
+ "type": "boolean"},
+ "ingress_policing_rate": {
+ "comment": "Maximum rate for data received on this interface, in kbps. Set to 0 to disable policing.",
+ "type": "integer"},
+ "ingress_policing_burst": {
+ "comment": "Maximum burst size for data received on this interface, in kb. The default burst size if set to 0 is 10 kb.",
+ "type": "integer"}}},
+ "Mirror": {
+ "comment": "A port mirror within a Bridge.",
+ "columns": {
+ "name": {
+ "comment": "Arbitrary identifier for the Mirror.",
+ "type": "string"},
+ "select_src_port": {
+ "comment": "Ports on which arriving packets are selected for mirroring.",
+ "type": {"key": "uuid", "keyRefTable": "Port", "min": 0, "max": "unlimited"}},
+ "select_dst_port": {
+ "comment": "Ports on which departing packets are selected for mirroring.",
+ "type": {"key": "uuid", "keyRefTable": "Port", "min": 0, "max": "unlimited"}},
+ "select_vlan": {
+ "comment": "VLANs on which packets are selected for mirroring.",
+ "type": {"key": "integer", "min": 0, "max": 4096}},
+ "output_port": {
+ "comment": "Output port for selected packets. Mutually exclusive with output_vlan.",
+ "type": {"key": "uuid", "keyRefTable": "Port", "min": 0, "max": 1}},
+ "output_vlan": {
+ "comment": "Output VLAN for selected packets. Mutually exclusive with output_vlan.",
+ "type": {"key": "integer", "min": 0, "max": 1}}}},
+ "Netflow": {
+ "comment": "A Netflow target.",
+ "columns": {
+ "target": {
+ "comment": "Netflow target in the form \"IP:PORT\".",
+ "type": "string"},
+ "engine_type": {
+ "comment": "Engine type to use in NetFlow messages. Defaults to datapath ID if not specified.",
+ "type": "integer"},
+ "engine_id": {
+ "comment": "Engine ID to use in NetFlow messages. Defaults to datapath ID if not specified.",
+ "type": "integer"},
+ "add_id_to_interface": {
+ "comment": "Place least-significant 7 bits of engine ID into most significant bits of ingress and egress interface fields of NetFlow records?",
+ "type": "boolean"}}},
+ "Controller": {
+ "comment": "An OpenFlow controller.",
+ "columns": {
+ "target": {
+ "comment": "Connection method for controller, e.g. \"ssl:...\", \"tcp:...\". The special string \"discover\" enables controller discovery.",
+ "type": "string"},
+ "max_backoff": {
+ "comment": "Maximum number of milliseconds to wait between connection attempts. Default is implementation-specific.",
+ "type": {"key": "integer", "min": 0, "max": 1}},
+ "inactivity_probe": {
+ "comment": "Maximum number of milliseconds of idle time on connection to controller before sending an inactivity probe message. Default is implementation-specific.",
+ "type": {"key": "integer", "min": 0, "max": 1}},
+ "fail_mode": {
+ "comment": "Either \"standalone\" or \"secure\", or empty to use the implementation's default.",
+ "type": {"key": "string", "min": 0, "max": 1}},
+ "discovery_accept_regex": {
+ "comment": "If \"target\" is \"discovery\", a POSIX extended regular expression against which the discovered controller location is validated. If not specified, the default is implementation-specific.",
+ "type": {"key": "string", "min": 0, "max": 1}},
+ "discovery_update_resolv_conf": {
+ "comment": "If \"target\" is \"discovery\", whether to update /etc/resolv.conf when the controller is discovered. If not specified, the default is implementation-specific.",
+ "type": {"key": "boolean", "min": 0, "max": 1}},
+ "connection_mode": {
+ "comment": "Either \"in-band\" or \"out-of-band\". If not specified, the default is implementation-specific.",
+ "type": {"key": "string", "min": 0, "max": 1}},
+ "local_ip": {
+ "comment": "If \"target\" is not \"discovery\", the IP address to configure on the local port.",
+ "type": {"key": "string", "min": 0, "max": 1}},
+ "local_netmask": {
+ "comment": "If \"target\" is not \"discovery\", the IP netmask to configure on the local port.",
+ "type": {"key": "string", "min": 0, "max": 1}},
+ "local_gateway": {
+ "comment": "If \"target\" is not \"discovery\", the IP gateway to configure on the local port.",
+ "type": {"key": "string", "min": 0, "max": 1}},
+ "controller_rate_limit": {
+ "comment": "The maximum rate at which packets will be forwarded to the OpenFlow controller, in packets per second. If not specified, the default is implementation-specific.",
+ "type": {"key": "integer", "min": 0, "max": 1}},
+ "controller_burst_limit": {
+ "comment": "The maximum number of unused packet credits that the bridge will allow to accumulate, in packets. If not specified, the default is implementation-specific.",
+ "type": {"key": "integer", "min": 0, "max": 1}}}},
+ "SSL": {
+ "comment": "SSL configuration for an OpenVSwitch.",
+ "columns": {
+ "private_key": {
+ "comment": "Name of a PEM file containing the private key used as the switch's identity for SSL connections to the controller.",
+ "type": "string"},
+ "certificate": {
+ "comment": "Name of a PEM file containing a certificate, signed by the certificate authority (CA) used by the controller and manager, that certifies the switch's private key, identifying a trustworthy switch.",
+ "type": "string"},
+ "ca_cert": {
+ "comment": "Name of a PEM file containing the CA certificate used to verify that the switch is connected to a trustworthy controller.",
+ "type": "string"}}}}}
+++ /dev/null
-{"name": "ovs_vswitchd_db",
- "comment": "Configuration for one Open vSwitch daemon.",
- "tables": {
- "OpenVSwitch": {
- "comment": "Configuration for an Open vSwitch daemon.",
- "columns": {
- "bridges": {
- "comment": "Set of bridges managed by the daemon.",
- "type": {"key": "uuid", "min": 0, "max": "unlimited"}},
- "management_id": {
- "comment": "Exactly 12 hex digits that identify the daemon.",
- "type": "string"},
- "controller": {
- "comment": "Default Controller used by bridges.",
- "type": {"key": "uuid", "min": 0, "max": 1}},
- "ssl": {
- "comment": "SSL used globally by the daemon.",
- "type": {"key": "uuid", "min": 0, "max": 1}}}},
- "Bridge": {
- "comment": "Configuration for a bridge within an OpenVSwitch.",
- "columns": {
- "name": {
- "comment": "Bridge identifier. Should be alphanumeric and no more than about 8 bytes long. Must be unique among the names of ports, interfaces, and bridges on a host.",
- "type": "string"},
- "datapath_id": {
- "comment": "OpenFlow datapath ID. Exactly 12 hex digits.",
- "type": {"key": "string", "min": 0, "max": 1}},
- "hwaddr": {
- "comment": "Ethernet address to use for bridge. Exactly 12 hex digits in the form XX:XX:XX:XX:XX:XX.",
- "type": {"key": "string", "min": 0, "max": 1}},
- "ports": {
- "comment": "Ports included in the bridge.",
- "type": {"key": "uuid", "min": 0, "max": "unlimited"}},
- "mirrors": {
- "comment": "Port mirroring configuration.",
- "type": {"key": "uuid", "min": 0, "max": "unlimited"}},
- "netflow": {
- "comment": "NetFlow configuration.",
- "type": {"key": "uuid", "min": 0, "max": "unlimited"}},
- "controller": {
- "comment": "OpenFlow controller. If unset, defaults to that specified by the parent OpenVSwitch.",
- "type": {"key": "uuid", "min": 0, "max": 1}}}},
- "Port": {
- "comment": "A port within a Bridge. May contain a single Interface or multiple (bonded) Interfaces.",
- "columns": {
- "name": {
- "comment": "Port name. Should be alphanumeric and no more than about 8 bytes long. May be the same as the interface name, for non-bonded ports. Must otherwise be unique among the names of ports, interfaces, and bridges on a host.",
- "type": "string"},
- "interfaces": {
- "comment": "The Port's Interfaces. If there is more than one, this is a bonded Port.",
- "type": {"key": "uuid", "min": 1, "max": "unlimited"}},
- "trunks": {
- "comment": "The 802.1Q VLAN(s) that this port trunks. Should be empty if this port trunks all VLAN(s) or if this is not a trunk port.",
- "type": {"key": "integer", "min": 0, "max": 4096}},
- "tag": {
- "comment": "This port's implicitly tagged VLAN. Should be empty if this is a trunk port.",
- "type": {"key": "integer", "min": 0, "max": 1}},
- "updelay": {
- "comment": "For a bonded port, the number of milliseconds for which carrier must stay up on an interface before the interface is considered to be up. Ignored for non-bonded ports.",
- "type": "integer"},
- "downdelay": {
- "comment": "For a bonded port, the number of milliseconds for which carrier must stay down on an interface before the interface is considered to be down. Ignored for non-bonded ports.",
- "type": "integer"}}},
- "Interface": {
- "comment": "An interface within a Port.",
- "columns": {
- "name": {
- "comment": "Interface name. Should be alphanumeric and no more than about 8 bytes long. May be the same as the port name, for non-bonded ports. Must otherwise be unique among the names of ports, interfaces, and bridges on a host.",
- "type": "string"},
- "internal": {
- "comment": "An \"internal\" port is one that is implemented in software as a logical device.",
- "type": "boolean"},
- "ingress_policing_rate": {
- "comment": "Maximum rate for data received on this interface, in kbps. Set to 0 to disable policing.",
- "type": "integer"},
- "ingress_policing_burst": {
- "comment": "Maximum burst size for data received on this interface, in kb. The default burst size if set to 0 is 10 kb.",
- "type": "integer"}}},
- "Mirror": {
- "comment": "A port mirror within a Bridge.",
- "columns": {
- "name": {
- "comment": "Arbitrary identifier for the Mirror.",
- "type": "string"},
- "select_src_port": {
- "comment": "Ports on which arriving packets are selected for mirroring.",
- "type": {"key": "uuid", "min": 0, "max": "unlimited"}},
- "select_dst_port": {
- "comment": "Ports on which departing packets are selected for mirroring.",
- "type": {"key": "uuid", "min": 0, "max": "unlimited"}},
- "select_vlan": {
- "comment": "VLANs on which packets are selected for mirroring.",
- "type": {"key": "integer", "min": 0, "max": 4096}},
- "output_port": {
- "comment": "Output port for selected packets. Mutually exclusive with output_vlan.",
- "type": {"key": "uuid", "min": 0, "max": 1}},
- "output_vlan": {
- "comment": "Output VLAN for selected packets. Mutually exclusive with output_vlan.",
- "type": {"key": "integer", "min": 0, "max": 1}}}},
- "Netflow": {
- "comment": "A Netflow target.",
- "columns": {
- "target": {
- "comment": "Netflow target in the form \"IP:PORT\".",
- "type": "string"},
- "engine_type": {
- "comment": "Engine type to use in NetFlow messages. Defaults to datapath ID if not specified.",
- "type": "integer"},
- "engine_id": {
- "comment": "Engine ID to use in NetFlow messages. Defaults to datapath ID if not specified.",
- "type": "integer"},
- "add_id_to_interface": {
- "comment": "Place least-significant 7 bits of engine ID into most significant bits of ingress and egress interface fields of NetFlow records?",
- "type": "boolean"}}},
- "Controller": {
- "comment": "An OpenFlow controller.",
- "columns": {
- "target": {
- "comment": "Connection method for controller, e.g. \"ssl:...\", \"tcp:...\". The special string \"discover\" enables controller discovery.",
- "type": "string"},
- "max_backoff": {
- "comment": "Maximum number of milliseconds to wait between connection attempts. Default is implementation-specific.",
- "type": {"key": "integer", "min": 0, "max": 1}},
- "inactivity_probe": {
- "comment": "Maximum number of milliseconds of idle time on connection to controller before sending an inactivity probe message. Default is implementation-specific.",
- "type": {"key": "integer", "min": 0, "max": 1}},
- "fail_mode": {
- "comment": "Either \"standalone\" or \"secure\", or empty to use the implementation's default.",
- "type": {"key": "string", "min": 0, "max": 1}},
- "discovery_accept_regex": {
- "comment": "If \"target\" is \"discovery\", a POSIX extended regular expression against which the discovered controller location is validated. If not specified, the default is implementation-specific.",
- "type": {"key": "string", "min": 0, "max": 1}},
- "discovery_update_resolv_conf": {
- "comment": "If \"target\" is \"discovery\", whether to update /etc/resolv.conf when the controller is discovered. If not specified, the default is implementation-specific.",
- "type": {"key": "boolean", "min": 0, "max": 1}},
- "connection_mode": {
- "comment": "Either \"in-band\" or \"out-of-band\". If not specified, the default is implementation-specific.",
- "type": {"key": "string", "min": 0, "max": 1}},
- "local_ip": {
- "comment": "If \"target\" is not \"discovery\", the IP address to configure on the local port.",
- "type": {"key": "string", "min": 0, "max": 1}},
- "local_netmask": {
- "comment": "If \"target\" is not \"discovery\", the IP netmask to configure on the local port.",
- "type": {"key": "string", "min": 0, "max": 1}},
- "local_gateway": {
- "comment": "If \"target\" is not \"discovery\", the IP gateway to configure on the local port.",
- "type": {"key": "string", "min": 0, "max": 1}},
- "controller_rate_limit": {
- "comment": "The maximum rate at which packets will be forwarded to the OpenFlow controller, in packets per second. If not specified, the default is implementation-specific.",
- "type": {"key": "integer", "min": 0, "max": 1}},
- "controller_burst_limit": {
- "comment": "The maximum number of unused packet credits that the bridge will allow to accumulate, in packets. If not specified, the default is implementation-specific.",
- "type": {"key": "integer", "min": 0, "max": 1}}}},
- "SSL": {
- "comment": "SSL configuration for an OpenVSwitch.",
- "columns": {
- "private_key": {
- "comment": "Name of a PEM file containing the private key used as the switch's identity for SSL connections to the controller.",
- "type": "string"},
- "certificate": {
- "comment": "Name of a PEM file containing a certificate, signed by the certificate authority (CA) used by the controller and manager, that certifies the switch's private key, identifying a trustworthy switch.",
- "type": "string"},
- "ca_cert": {
- "comment": "Name of a PEM file containing the CA certificate used to verify that the switch is connected to a trustworthy controller.",
- "type": "string"}}}}}