Bug 920372 - Import Chromium seccomp-bpf compiler, rev 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a. r=kang
authorJed Davis <jld@mozilla.com>
Tue, 20 May 2014 18:37:45 -0700
changeset 184041 1541c3c6e894c3162edff99f6a0114c23620d4be
parent 184040 94e7e63ae385c87cbb1de52a1641b628b72e8401
child 184042 b56d5602d0cdcc7f06a82538e52fde16aa0d84a3
push id26810
push usercbook@mozilla.com
push dateWed, 21 May 2014 11:46:36 +0000
treeherdermozilla-central@50fb8c4db2fd [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskang
bugs920372
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 920372 - Import Chromium seccomp-bpf compiler, rev 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a. r=kang Newly imported: * sandbox/linux/seccomp-bpf/ * sandbox/linux/sandbox_export.h * base/posix/eintr_wrapper.h Updated: * base/basictypes.h * base/macros.h At the time of this writing (see future patches for this bug) the only things we're using from sandbox/linux/seccomp-bpf/ are codegen.cc and basicblock.cc, and the header files they require. However, we may use more of this code in the future, and it seems cleaner in general to import the entire subtree.
security/sandbox/chromium/LICENSE
security/sandbox/chromium/base/basictypes.h
security/sandbox/chromium/base/macros.h
security/sandbox/chromium/base/posix/eintr_wrapper.h
security/sandbox/chromium/sandbox/linux/sandbox_export.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/basicblock.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/basicblock.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/bpf_tests.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/codegen.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/codegen.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/codegen_unittest.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/demo.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/die.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/die.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/errorcode.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/errorcode.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/errorcode_unittest.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/instruction.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/linux_seccomp.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/sandbox_bpf.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/sandbox_bpf_unittest.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall_iterator.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall_iterator.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall_iterator_unittest.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall_unittest.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/trap.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/trap.h
security/sandbox/chromium/sandbox/linux/seccomp-bpf/verifier.cc
security/sandbox/chromium/sandbox/linux/seccomp-bpf/verifier.h
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/LICENSE
@@ -0,0 +1,27 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- a/security/sandbox/chromium/base/basictypes.h
+++ b/security/sandbox/chromium/base/basictypes.h
@@ -1,369 +1,58 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+// This file contains definitions of our old basic integral types
+// ((u)int{8,16,32,64}) and further includes. I recommend that you use the C99
+// standard types instead, and include <stdint.h>/<stddef.h>/etc. as needed.
+// Note that the macros and macro-like constructs that were formerly defined in
+// this file are now available separately in base/macros.h.
+
 #ifndef BASE_BASICTYPES_H_
 #define BASE_BASICTYPES_H_
 
-#include <limits.h>         // So we can set the bounds of our types
-#include <stddef.h>         // For size_t
-#include <string.h>         // for memcpy
+#include <limits.h>  // So we can set the bounds of our types.
+#include <stddef.h>  // For size_t.
+#include <stdint.h>  // For intptr_t.
 
-#include "base/port.h"    // Types that only need exist on certain systems
+#include "base/macros.h"
+#include "base/port.h"  // Types that only need exist on certain systems.
 
-#ifndef COMPILER_MSVC
-// stdint.h is part of C99 but MSVC doesn't have it.
-#include <stdint.h>         // For intptr_t.
-#endif
+// DEPRECATED: Please use (u)int{8,16,32,64}_t instead (and include <stdint.h>).
+typedef int8_t int8;
+typedef uint8_t uint8;
+typedef int16_t int16;
+typedef int32_t int32;
+typedef uint16_t uint16;
+typedef uint32_t uint32;
 
-typedef signed char         schar;
-typedef signed char         int8;
-typedef short               int16;
-typedef int                 int32;
-
+// TODO(vtl): Figure what's up with the 64-bit types. Can we just define them as
+// |int64_t|/|uint64_t|?
 // The NSPR system headers define 64-bit as |long| when possible, except on
 // Mac OS X.  In order to not have typedef mismatches, we do the same on LP64.
 //
 // On Mac OS X, |long long| is used for 64-bit types for compatibility with
 // <inttypes.h> format macros even in the LP64 model.
 #if defined(__LP64__) && !defined(OS_MACOSX) && !defined(OS_OPENBSD)
-typedef long                int64;
-#else
-typedef long long           int64;
-#endif
-
-// NOTE: It is DANGEROUS to compare signed with unsigned types in loop
-// conditions and other conditional expressions, and it is DANGEROUS to
-// compute object/allocation sizes, indices, and offsets with signed types.
-// Integer overflow behavior for signed types is UNDEFINED in the C/C++
-// standards, but is defined for unsigned types.
-//
-// Use the unsigned types if your variable represents a bit pattern (e.g. a
-// hash value), object or allocation size, object count, offset,
-// array/vector index, etc.
-//
-// Do NOT use 'unsigned' to express "this value should always be positive";
-// use assertions for this.
-//
-// See the Chromium style guide for more information.
-// https://sites.google.com/a/chromium.org/dev/developers/coding-style
-
-typedef unsigned char      uint8;
-typedef unsigned short     uint16;
-typedef unsigned int       uint32;
-
-// See the comment above about NSPR and 64-bit.
-#if defined(__LP64__) && !defined(OS_MACOSX) && !defined(OS_OPENBSD)
+typedef long int64;
 typedef unsigned long uint64;
 #else
+typedef long long int64;
 typedef unsigned long long uint64;
 #endif
 
-// A type to represent a Unicode code-point value. As of Unicode 4.0,
-// such values require up to 21 bits.
-// (For type-checking on pointers, make this explicitly signed,
-// and it should always be the signed version of whatever int32 is.)
-typedef signed int         char32;
-
+// DEPRECATED: Please use std::numeric_limits (from <limits>) instead.
 const uint8  kuint8max  = (( uint8) 0xFF);
 const uint16 kuint16max = ((uint16) 0xFFFF);
 const uint32 kuint32max = ((uint32) 0xFFFFFFFF);
 const uint64 kuint64max = ((uint64) GG_LONGLONG(0xFFFFFFFFFFFFFFFF));
 const  int8  kint8min   = ((  int8) 0x80);
 const  int8  kint8max   = ((  int8) 0x7F);
 const  int16 kint16min  = (( int16) 0x8000);
 const  int16 kint16max  = (( int16) 0x7FFF);
 const  int32 kint32min  = (( int32) 0x80000000);
 const  int32 kint32max  = (( int32) 0x7FFFFFFF);
 const  int64 kint64min  = (( int64) GG_LONGLONG(0x8000000000000000));
 const  int64 kint64max  = (( int64) GG_LONGLONG(0x7FFFFFFFFFFFFFFF));
 
-// Put this in the private: declarations for a class to be uncopyable.
-#define DISALLOW_COPY(TypeName) \
-  TypeName(const TypeName&)
-
-// Put this in the private: declarations for a class to be unassignable.
-#define DISALLOW_ASSIGN(TypeName) \
-  void operator=(const TypeName&)
-
-// A macro to disallow the copy constructor and operator= functions
-// This should be used in the private: declarations for a class
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
-  TypeName(const TypeName&);               \
-  void operator=(const TypeName&)
-
-// An older, deprecated, politically incorrect name for the above.
-// NOTE: The usage of this macro was banned from our code base, but some
-// third_party libraries are yet using it.
-// TODO(tfarina): Figure out how to fix the usage of this macro in the
-// third_party libraries and get rid of it.
-#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) DISALLOW_COPY_AND_ASSIGN(TypeName)
-
-// A macro to disallow all the implicit constructors, namely the
-// default constructor, copy constructor and operator= functions.
-//
-// This should be used in the private: declarations for a class
-// that wants to prevent anyone from instantiating it. This is
-// especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
-  TypeName();                                    \
-  DISALLOW_COPY_AND_ASSIGN(TypeName)
-
-// The arraysize(arr) macro returns the # of elements in an array arr.
-// The expression is a compile-time constant, and therefore can be
-// used in defining new arrays, for example.  If you use arraysize on
-// a pointer by mistake, you will get a compile-time error.
-//
-// One caveat is that arraysize() doesn't accept any array of an
-// anonymous type or a type defined inside a function.  In these rare
-// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below.  This is
-// due to a limitation in C++'s template system.  The limitation might
-// eventually be removed, but it hasn't happened yet.
-
-// This template function declaration is used in defining arraysize.
-// Note that the function doesn't need an implementation, as we only
-// use its type.
-template <typename T, size_t N>
-char (&ArraySizeHelper(T (&array)[N]))[N];
-
-// That gcc wants both of these prototypes seems mysterious. VC, for
-// its part, can't decide which to use (another mystery). Matching of
-// template overloads: the final frontier.
-#ifndef _MSC_VER
-template <typename T, size_t N>
-char (&ArraySizeHelper(const T (&array)[N]))[N];
-#endif
-
-#define arraysize(array) (sizeof(ArraySizeHelper(array)))
-
-// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
-// but can be used on anonymous types or types defined inside
-// functions.  It's less safe than arraysize as it accepts some
-// (although not all) pointers.  Therefore, you should use arraysize
-// whenever possible.
-//
-// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
-// size_t.
-//
-// ARRAYSIZE_UNSAFE catches a few type errors.  If you see a compiler error
-//
-//   "warning: division by zero in ..."
-//
-// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
-// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
-//
-// The following comments are on the implementation details, and can
-// be ignored by the users.
-//
-// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
-// the array) and sizeof(*(arr)) (the # of bytes in one array
-// element).  If the former is divisible by the latter, perhaps arr is
-// indeed an array, in which case the division result is the # of
-// elements in the array.  Otherwise, arr cannot possibly be an array,
-// and we generate a compiler error to prevent the code from
-// compiling.
-//
-// Since the size of bool is implementation-defined, we need to cast
-// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
-// result has type size_t.
-//
-// This macro is not perfect as it wrongfully accepts certain
-// pointers, namely where the pointer size is divisible by the pointee
-// size.  Since all our code has to go through a 32-bit compiler,
-// where a pointer is 4 bytes, this means all pointers to a type whose
-// size is 3 or greater than 4 will be (righteously) rejected.
-
-#define ARRAYSIZE_UNSAFE(a) \
-  ((sizeof(a) / sizeof(*(a))) / \
-   static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
-
-
-// Use implicit_cast as a safe version of static_cast or const_cast
-// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
-// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
-// a const pointer to Foo).
-// When you use implicit_cast, the compiler checks that the cast is safe.
-// Such explicit implicit_casts are necessary in surprisingly many
-// situations where C++ demands an exact type match instead of an
-// argument type convertible to a target type.
-//
-// The From type can be inferred, so the preferred syntax for using
-// implicit_cast is the same as for static_cast etc.:
-//
-//   implicit_cast<ToType>(expr)
-//
-// implicit_cast would have been part of the C++ standard library,
-// but the proposal was submitted too late.  It will probably make
-// its way into the language in the future.
-template<typename To, typename From>
-inline To implicit_cast(From const &f) {
-  return f;
-}
-
-// The COMPILE_ASSERT macro can be used to verify that a compile time
-// expression is true. For example, you could use it to verify the
-// size of a static array:
-//
-//   COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
-//                  content_type_names_incorrect_size);
-//
-// or to make sure a struct is smaller than a certain size:
-//
-//   COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
-//
-// The second argument to the macro is the name of the variable. If
-// the expression is false, most compilers will issue a warning/error
-// containing the name of the variable.
-
-template <bool>
-struct CompileAssert {
-};
-
-#undef COMPILE_ASSERT
-#define COMPILE_ASSERT(expr, msg) \
-  typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
-
-// Implementation details of COMPILE_ASSERT:
-//
-// - COMPILE_ASSERT works by defining an array type that has -1
-//   elements (and thus is invalid) when the expression is false.
-//
-// - The simpler definition
-//
-//     #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
-//
-//   does not work, as gcc supports variable-length arrays whose sizes
-//   are determined at run-time (this is gcc's extension and not part
-//   of the C++ standard).  As a result, gcc fails to reject the
-//   following code with the simple definition:
-//
-//     int foo;
-//     COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
-//                               // not a compile-time constant.
-//
-// - By using the type CompileAssert<(bool(expr))>, we ensures that
-//   expr is a compile-time constant.  (Template arguments must be
-//   determined at compile-time.)
-//
-// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
-//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written
-//
-//     CompileAssert<bool(expr)>
-//
-//   instead, these compilers will refuse to compile
-//
-//     COMPILE_ASSERT(5 > 0, some_message);
-//
-//   (They seem to think the ">" in "5 > 0" marks the end of the
-//   template argument list.)
-//
-// - The array size is (bool(expr) ? 1 : -1), instead of simply
-//
-//     ((expr) ? 1 : -1).
-//
-//   This is to avoid running into a bug in MS VC 7.1, which
-//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
-
-
-// bit_cast<Dest,Source> is a template function that implements the
-// equivalent of "*reinterpret_cast<Dest*>(&source)".  We need this in
-// very low-level functions like the protobuf library and fast math
-// support.
-//
-//   float f = 3.14159265358979;
-//   int i = bit_cast<int32>(f);
-//   // i = 0x40490fdb
-//
-// The classical address-casting method is:
-//
-//   // WRONG
-//   float f = 3.14159265358979;            // WRONG
-//   int i = * reinterpret_cast<int*>(&f);  // WRONG
-//
-// The address-casting method actually produces undefined behavior
-// according to ISO C++ specification section 3.10 -15 -.  Roughly, this
-// section says: if an object in memory has one type, and a program
-// accesses it with a different type, then the result is undefined
-// behavior for most values of "different type".
-//
-// This is true for any cast syntax, either *(int*)&f or
-// *reinterpret_cast<int*>(&f).  And it is particularly true for
-// conversions between integral lvalues and floating-point lvalues.
-//
-// The purpose of 3.10 -15- is to allow optimizing compilers to assume
-// that expressions with different types refer to different memory.  gcc
-// 4.0.1 has an optimizer that takes advantage of this.  So a
-// non-conforming program quietly produces wildly incorrect output.
-//
-// The problem is not the use of reinterpret_cast.  The problem is type
-// punning: holding an object in memory of one type and reading its bits
-// back using a different type.
-//
-// The C++ standard is more subtle and complex than this, but that
-// is the basic idea.
-//
-// Anyways ...
-//
-// bit_cast<> calls memcpy() which is blessed by the standard,
-// especially by the example in section 3.9 .  Also, of course,
-// bit_cast<> wraps up the nasty logic in one place.
-//
-// Fortunately memcpy() is very fast.  In optimized mode, with a
-// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
-// code with the minimal amount of data movement.  On a 32-bit system,
-// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
-// compiles to two loads and two stores.
-//
-// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
-//
-// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
-// is likely to surprise you.
-
-template <class Dest, class Source>
-inline Dest bit_cast(const Source& source) {
-  // Compile time assertion: sizeof(Dest) == sizeof(Source)
-  // A compile error here means your Dest and Source have different sizes.
-  typedef char VerifySizesAreEqual [sizeof(Dest) == sizeof(Source) ? 1 : -1];
-
-  Dest dest;
-  memcpy(&dest, &source, sizeof(dest));
-  return dest;
-}
-
-// Used to explicitly mark the return value of a function as unused. If you are
-// really sure you don't want to do anything with the return value of a function
-// that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
-//
-//   scoped_ptr<MyType> my_var = ...;
-//   if (take(my_var.get()) == SUCCESS)
-//     ignore_result(my_var.release());
-//
-template<typename T>
-inline void ignore_result(const T&) {
-}
-
-// The following enum should be used only as a constructor argument to indicate
-// that the variable has static storage class, and that the constructor should
-// do nothing to its state.  It indicates to the reader that it is legal to
-// declare a static instance of the class, provided the constructor is given
-// the base::LINKER_INITIALIZED argument.  Normally, it is unsafe to declare a
-// static variable that has a constructor or a destructor because invocation
-// order is undefined.  However, IF the type can be initialized by filling with
-// zeroes (which the loader does for static variables), AND the destructor also
-// does nothing to the storage, AND there are no virtual methods, then a
-// constructor declared as
-//       explicit MyClass(base::LinkerInitialized x) {}
-// and invoked as
-//       static MyClass my_variable_name(base::LINKER_INITIALIZED);
-namespace base {
-enum LinkerInitialized { LINKER_INITIALIZED };
-
-// Use these to declare and define a static local variable (static T;) so that
-// it is leaked so that its destructors are not called at exit. If you need
-// thread-safe initialization, use base/lazy_instance.h instead.
-#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
-  static type& name = *new type arguments
-
-}  // base
-
 #endif  // BASE_BASICTYPES_H_
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/base/macros.h
@@ -0,0 +1,313 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains macros and macro-like constructs (e.g., templates) that
+// are commonly used throughout Chromium source. (It may also contain things
+// that are closely related to things that are commonly used that belong in this
+// file.)
+
+#ifndef BASE_MACROS_H_
+#define BASE_MACROS_H_
+
+#include <stddef.h>  // For size_t.
+#include <string.h>  // For memcpy.
+
+#include "base/compiler_specific.h"  // For ALLOW_UNUSED.
+
+// Put this in the private: declarations for a class to be uncopyable.
+#define DISALLOW_COPY(TypeName) \
+  TypeName(const TypeName&)
+
+// Put this in the private: declarations for a class to be unassignable.
+#define DISALLOW_ASSIGN(TypeName) \
+  void operator=(const TypeName&)
+
+// A macro to disallow the copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+  TypeName(const TypeName&);               \
+  void operator=(const TypeName&)
+
+// An older, deprecated, politically incorrect name for the above.
+// NOTE: The usage of this macro was banned from our code base, but some
+// third_party libraries are yet using it.
+// TODO(tfarina): Figure out how to fix the usage of this macro in the
+// third_party libraries and get rid of it.
+#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+  TypeName();                                    \
+  DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example.  If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+// One caveat is that arraysize() doesn't accept any array of an
+// anonymous type or a type defined inside a function.  In these rare
+// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below.  This is
+// due to a limitation in C++'s template system.  The limitation might
+// eventually be removed, but it hasn't happened yet.
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+#ifndef _MSC_VER
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
+// but can be used on anonymous types or types defined inside
+// functions.  It's less safe than arraysize as it accepts some
+// (although not all) pointers.  Therefore, you should use arraysize
+// whenever possible.
+//
+// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
+// size_t.
+//
+// ARRAYSIZE_UNSAFE catches a few type errors.  If you see a compiler error
+//
+//   "warning: division by zero in ..."
+//
+// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
+// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
+//
+// The following comments are on the implementation details, and can
+// be ignored by the users.
+//
+// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
+// the array) and sizeof(*(arr)) (the # of bytes in one array
+// element).  If the former is divisible by the latter, perhaps arr is
+// indeed an array, in which case the division result is the # of
+// elements in the array.  Otherwise, arr cannot possibly be an array,
+// and we generate a compiler error to prevent the code from
+// compiling.
+//
+// Since the size of bool is implementation-defined, we need to cast
+// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
+// result has type size_t.
+//
+// This macro is not perfect as it wrongfully accepts certain
+// pointers, namely where the pointer size is divisible by the pointee
+// size.  Since all our code has to go through a 32-bit compiler,
+// where a pointer is 4 bytes, this means all pointers to a type whose
+// size is 3 or greater than 4 will be (righteously) rejected.
+
+#define ARRAYSIZE_UNSAFE(a) \
+  ((sizeof(a) / sizeof(*(a))) / \
+   static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+
+// Use implicit_cast as a safe version of static_cast or const_cast
+// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
+// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
+// a const pointer to Foo).
+// When you use implicit_cast, the compiler checks that the cast is safe.
+// Such explicit implicit_casts are necessary in surprisingly many
+// situations where C++ demands an exact type match instead of an
+// argument type convertible to a target type.
+//
+// The From type can be inferred, so the preferred syntax for using
+// implicit_cast is the same as for static_cast etc.:
+//
+//   implicit_cast<ToType>(expr)
+//
+// implicit_cast would have been part of the C++ standard library,
+// but the proposal was submitted too late.  It will probably make
+// its way into the language in the future.
+template<typename To, typename From>
+inline To implicit_cast(From const &f) {
+  return f;
+}
+
+// The COMPILE_ASSERT macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+//   COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
+//                  content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+//   COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+#undef COMPILE_ASSERT
+
+#if __cplusplus >= 201103L
+
+// Under C++11, just use static_assert.
+#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
+
+#else
+
+template <bool>
+struct CompileAssert {
+};
+
+#define COMPILE_ASSERT(expr, msg) \
+  typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] ALLOW_UNUSED
+
+// Implementation details of COMPILE_ASSERT:
+//
+// - COMPILE_ASSERT works by defining an array type that has -1
+//   elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+//     #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+//   does not work, as gcc supports variable-length arrays whose sizes
+//   are determined at run-time (this is gcc's extension and not part
+//   of the C++ standard).  As a result, gcc fails to reject the
+//   following code with the simple definition:
+//
+//     int foo;
+//     COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
+//                               // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+//   expr is a compile-time constant.  (Template arguments must be
+//   determined at compile-time.)
+//
+// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
+//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written
+//
+//     CompileAssert<bool(expr)>
+//
+//   instead, these compilers will refuse to compile
+//
+//     COMPILE_ASSERT(5 > 0, some_message);
+//
+//   (They seem to think the ">" in "5 > 0" marks the end of the
+//   template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+//     ((expr) ? 1 : -1).
+//
+//   This is to avoid running into a bug in MS VC 7.1, which
+//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+#endif
+
+// bit_cast<Dest,Source> is a template function that implements the
+// equivalent of "*reinterpret_cast<Dest*>(&source)".  We need this in
+// very low-level functions like the protobuf library and fast math
+// support.
+//
+//   float f = 3.14159265358979;
+//   int i = bit_cast<int32>(f);
+//   // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+//   // WRONG
+//   float f = 3.14159265358979;            // WRONG
+//   int i = * reinterpret_cast<int*>(&f);  // WRONG
+//
+// The address-casting method actually produces undefined behavior
+// according to ISO C++ specification section 3.10 -15 -.  Roughly, this
+// section says: if an object in memory has one type, and a program
+// accesses it with a different type, then the result is undefined
+// behavior for most values of "different type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f).  And it is particularly true for
+// conversions between integral lvalues and floating-point lvalues.
+//
+// The purpose of 3.10 -15- is to allow optimizing compilers to assume
+// that expressions with different types refer to different memory.  gcc
+// 4.0.1 has an optimizer that takes advantage of this.  So a
+// non-conforming program quietly produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast.  The problem is type
+// punning: holding an object in memory of one type and reading its bits
+// back using a different type.
+//
+// The C++ standard is more subtle and complex than this, but that
+// is the basic idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard,
+// especially by the example in section 3.9 .  Also, of course,
+// bit_cast<> wraps up the nasty logic in one place.
+//
+// Fortunately memcpy() is very fast.  In optimized mode, with a
+// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
+// code with the minimal amount of data movement.  On a 32-bit system,
+// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
+// compiles to two loads and two stores.
+//
+// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
+//
+// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
+// is likely to surprise you.
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+  COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+// Used to explicitly mark the return value of a function as unused. If you are
+// really sure you don't want to do anything with the return value of a function
+// that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
+//
+//   scoped_ptr<MyType> my_var = ...;
+//   if (TakeOwnership(my_var.get()) == SUCCESS)
+//     ignore_result(my_var.release());
+//
+template<typename T>
+inline void ignore_result(const T&) {
+}
+
+// The following enum should be used only as a constructor argument to indicate
+// that the variable has static storage class, and that the constructor should
+// do nothing to its state.  It indicates to the reader that it is legal to
+// declare a static instance of the class, provided the constructor is given
+// the base::LINKER_INITIALIZED argument.  Normally, it is unsafe to declare a
+// static variable that has a constructor or a destructor because invocation
+// order is undefined.  However, IF the type can be initialized by filling with
+// zeroes (which the loader does for static variables), AND the destructor also
+// does nothing to the storage, AND there are no virtual methods, then a
+// constructor declared as
+//       explicit MyClass(base::LinkerInitialized x) {}
+// and invoked as
+//       static MyClass my_variable_name(base::LINKER_INITIALIZED);
+namespace base {
+enum LinkerInitialized { LINKER_INITIALIZED };
+
+// Use these to declare and define a static local variable (static T;) so that
+// it is leaked so that its destructors are not called at exit. If you need
+// thread-safe initialization, use base/lazy_instance.h instead.
+#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
+  static type& name = *new type arguments
+
+}  // base
+
+#endif  // BASE_MACROS_H_
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/base/posix/eintr_wrapper.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This provides a wrapper around system calls which may be interrupted by a
+// signal and return EINTR. See man 7 signal.
+// To prevent long-lasting loops (which would likely be a bug, such as a signal
+// that should be masked) to go unnoticed, there is a limit after which the
+// caller will nonetheless see an EINTR in Debug builds.
+//
+// On Windows, this wrapper macro does nothing.
+//
+// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
+// value of close is significant. See http://crbug.com/269623.
+
+#ifndef BASE_POSIX_EINTR_WRAPPER_H_
+#define BASE_POSIX_EINTR_WRAPPER_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+
+#include <errno.h>
+
+#if defined(NDEBUG)
+
+#define HANDLE_EINTR(x) ({ \
+  typeof(x) eintr_wrapper_result; \
+  do { \
+    eintr_wrapper_result = (x); \
+  } while (eintr_wrapper_result == -1 && errno == EINTR); \
+  eintr_wrapper_result; \
+})
+
+#else
+
+#define HANDLE_EINTR(x) ({ \
+  int eintr_wrapper_counter = 0; \
+  typeof(x) eintr_wrapper_result; \
+  do { \
+    eintr_wrapper_result = (x); \
+  } while (eintr_wrapper_result == -1 && errno == EINTR && \
+           eintr_wrapper_counter++ < 100); \
+  eintr_wrapper_result; \
+})
+
+#endif  // NDEBUG
+
+#define IGNORE_EINTR(x) ({ \
+  typeof(x) eintr_wrapper_result; \
+  do { \
+    eintr_wrapper_result = (x); \
+    if (eintr_wrapper_result == -1 && errno == EINTR) { \
+      eintr_wrapper_result = 0; \
+    } \
+  } while (0); \
+  eintr_wrapper_result; \
+})
+
+#else
+
+#define HANDLE_EINTR(x) (x)
+#define IGNORE_EINTR(x) (x)
+
+#endif  // OS_POSIX
+
+#endif  // BASE_POSIX_EINTR_WRAPPER_H_
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/sandbox_export.h
@@ -0,0 +1,23 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SANDBOX_EXPORT_H_
+#define SANDBOX_LINUX_SANDBOX_EXPORT_H_
+
+#if defined(COMPONENT_BUILD)
+
+#if defined(SANDBOX_IMPLEMENTATION)
+#define SANDBOX_EXPORT __attribute__((visibility("default")))
+#define SANDBOX_EXPORT_PRIVATE __attribute__((visibility("default")))
+#else
+#define SANDBOX_EXPORT
+#define SANDBOX_EXPORT_PRIVATE
+#endif  // defined(SANDBOX_IMPLEMENTATION)
+
+#else  // defined(COMPONENT_BUILD)
+#define SANDBOX_EXPORT
+#define SANDBOX_EXPORT_PRIVATE
+#endif  // defined(COMPONENT_BUILD)
+
+#endif  // SANDBOX_LINUX_SANDBOX_EXPORT_H_
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/basicblock.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/basicblock.h"
+
+namespace sandbox {
+
+BasicBlock::BasicBlock() {}
+
+BasicBlock::~BasicBlock() {}
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/basicblock.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_BASICBLOCK_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_BASICBLOCK_H__
+
+#include <vector>
+
+#include "sandbox/linux/seccomp-bpf/instruction.h"
+
+namespace sandbox {
+
+struct BasicBlock {
+  BasicBlock();
+  ~BasicBlock();
+
+  // Our implementation of the code generator uses a "Less" operator to
+  // identify common sequences of basic blocks. This would normally be
+  // really easy to do, but STL requires us to wrap the comparator into
+  // a class. We begrudgingly add some code here that provides this wrapping.
+  template <class T>
+  class Less {
+   public:
+    Less(const T& data,
+         int (*cmp)(const BasicBlock*, const BasicBlock*, const T& data))
+        : data_(data), cmp_(cmp) {}
+
+    bool operator()(const BasicBlock* a, const BasicBlock* b) const {
+      return cmp_(a, b, data_) < 0;
+    }
+
+   private:
+    const T& data_;
+    int (*cmp_)(const BasicBlock*, const BasicBlock*, const T&);
+  };
+
+  // Basic blocks are essentially nothing more than a set of instructions.
+  std::vector<Instruction*> instructions;
+
+  // In order to compute relative branch offsets we need to keep track of
+  // how far our block is away from the very last basic block. The "offset_"
+  // is measured in number of BPF instructions.
+  int offset;
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_BASICBLOCK_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/bpf_tests.h
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "build/build_config.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+
+namespace sandbox {
+
+// A BPF_DEATH_TEST is just the same as a BPF_TEST, but it assumes that the
+// test will fail with a particular known error condition. Use the DEATH_XXX()
+// macros from unit_tests.h to specify the expected error condition.
+// A BPF_DEATH_TEST is always disabled under ThreadSanitizer, see
+// crbug.com/243968.
+#define BPF_DEATH_TEST(test_case_name, test_name, death, policy, aux...) \
+  void BPF_TEST_##test_name(sandbox::BPFTests<aux>::AuxType& BPF_AUX);   \
+  TEST(test_case_name, DISABLE_ON_TSAN(test_name)) {                     \
+    sandbox::BPFTests<aux>::TestArgs arg(BPF_TEST_##test_name, policy);  \
+    sandbox::BPFTests<aux>::RunTestInProcess(                            \
+        sandbox::BPFTests<aux>::TestWrapper, &arg, death);               \
+  }                                                                      \
+  void BPF_TEST_##test_name(sandbox::BPFTests<aux>::AuxType& BPF_AUX)
+
+// BPF_TEST() is a special version of SANDBOX_TEST(). It turns into a no-op,
+// if the host does not have kernel support for running BPF filters.
+// Also, it takes advantage of the Die class to avoid calling LOG(FATAL), from
+// inside our tests, as we don't need or even want all the error handling that
+// LOG(FATAL) would do.
+// BPF_TEST() takes a C++ data type as an optional fourth parameter. If
+// present, this sets up a variable that can be accessed as "BPF_AUX". This
+// variable will be passed as an argument to the "policy" function. Policies
+// would typically use it as an argument to SandboxBPF::Trap(), if they want to
+// communicate data between the BPF_TEST() and a Trap() function.
+#define BPF_TEST(test_case_name, test_name, policy, aux...) \
+  BPF_DEATH_TEST(test_case_name, test_name, DEATH_SUCCESS(), policy, aux)
+
+// Assertions are handled exactly the same as with a normal SANDBOX_TEST()
+#define BPF_ASSERT SANDBOX_ASSERT
+
+// The "Aux" type is optional. We use an "empty" type by default, so that if
+// the caller doesn't provide any type, all the BPF_AUX related data compiles
+// to nothing.
+template <class Aux = int[0]>
+class BPFTests : public UnitTests {
+ public:
+  typedef Aux AuxType;
+
+  class TestArgs {
+   public:
+    TestArgs(void (*t)(AuxType&), sandbox::SandboxBPF::EvaluateSyscall p)
+        : test_(t), policy_(p), aux_() {}
+
+    void (*test() const)(AuxType&) { return test_; }
+    sandbox::SandboxBPF::EvaluateSyscall policy() const { return policy_; }
+
+   private:
+    friend class BPFTests;
+
+    void (*test_)(AuxType&);
+    sandbox::SandboxBPF::EvaluateSyscall policy_;
+    AuxType aux_;
+  };
+
+  static void TestWrapper(void* void_arg) {
+    TestArgs* arg = reinterpret_cast<TestArgs*>(void_arg);
+    sandbox::Die::EnableSimpleExit();
+    if (sandbox::SandboxBPF::SupportsSeccompSandbox(-1) ==
+        sandbox::SandboxBPF::STATUS_AVAILABLE) {
+      // Ensure the the sandbox is actually available at this time
+      int proc_fd;
+      BPF_ASSERT((proc_fd = open("/proc", O_RDONLY | O_DIRECTORY)) >= 0);
+      BPF_ASSERT(sandbox::SandboxBPF::SupportsSeccompSandbox(proc_fd) ==
+                 sandbox::SandboxBPF::STATUS_AVAILABLE);
+
+      // Initialize and then start the sandbox with our custom policy
+      sandbox::SandboxBPF sandbox;
+      sandbox.set_proc_fd(proc_fd);
+      sandbox.SetSandboxPolicyDeprecated(arg->policy(), &arg->aux_);
+      BPF_ASSERT(sandbox.StartSandbox(
+          sandbox::SandboxBPF::PROCESS_SINGLE_THREADED));
+
+      arg->test()(arg->aux_);
+    } else {
+      printf("This BPF test is not fully running in this configuration!\n");
+      // Android and Valgrind are the only configurations where we accept not
+      // having kernel BPF support.
+      if (!IsAndroid() && !IsRunningOnValgrind()) {
+        const bool seccomp_bpf_is_supported = false;
+        BPF_ASSERT(seccomp_bpf_is_supported);
+      }
+      // Call the compiler and verify the policy. That's the least we can do,
+      // if we don't have kernel support.
+      sandbox::SandboxBPF sandbox;
+      sandbox.SetSandboxPolicyDeprecated(arg->policy(), &arg->aux_);
+      sandbox::SandboxBPF::Program* program =
+          sandbox.AssembleFilter(true /* force_verification */);
+      delete program;
+      sandbox::UnitTests::IgnoreThisTest();
+    }
+  }
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(BPFTests);
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/codegen.cc
@@ -0,0 +1,774 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+#include "base/logging.h"
+#include "sandbox/linux/seccomp-bpf/codegen.h"
+
+namespace {
+
+// Helper function for Traverse().
+void TraverseRecursively(std::set<sandbox::Instruction*>* visited,
+                         sandbox::Instruction* instruction) {
+  if (visited->find(instruction) == visited->end()) {
+    visited->insert(instruction);
+    switch (BPF_CLASS(instruction->code)) {
+      case BPF_JMP:
+        if (BPF_OP(instruction->code) != BPF_JA) {
+          TraverseRecursively(visited, instruction->jf_ptr);
+        }
+        TraverseRecursively(visited, instruction->jt_ptr);
+        break;
+      case BPF_RET:
+        break;
+      default:
+        TraverseRecursively(visited, instruction->next);
+        break;
+    }
+  }
+}
+
+}  // namespace
+
+namespace sandbox {
+
+CodeGen::CodeGen() : compiled_(false) {}
+
+CodeGen::~CodeGen() {
+  for (Instructions::iterator iter = instructions_.begin();
+       iter != instructions_.end();
+       ++iter) {
+    delete *iter;
+  }
+  for (BasicBlocks::iterator iter = basic_blocks_.begin();
+       iter != basic_blocks_.end();
+       ++iter) {
+    delete *iter;
+  }
+}
+
+void CodeGen::PrintProgram(const SandboxBPF::Program& program) {
+  for (SandboxBPF::Program::const_iterator iter = program.begin();
+       iter != program.end();
+       ++iter) {
+    int ip = (int)(iter - program.begin());
+    fprintf(stderr, "%3d) ", ip);
+    switch (BPF_CLASS(iter->code)) {
+      case BPF_LD:
+        if (iter->code == BPF_LD + BPF_W + BPF_ABS) {
+          fprintf(stderr, "LOAD %d  // ", (int)iter->k);
+          if (iter->k == offsetof(struct arch_seccomp_data, nr)) {
+            fprintf(stderr, "System call number\n");
+          } else if (iter->k == offsetof(struct arch_seccomp_data, arch)) {
+            fprintf(stderr, "Architecture\n");
+          } else if (iter->k ==
+                     offsetof(struct arch_seccomp_data, instruction_pointer)) {
+            fprintf(stderr, "Instruction pointer (LSB)\n");
+          } else if (iter->k ==
+                     offsetof(struct arch_seccomp_data, instruction_pointer) +
+                         4) {
+            fprintf(stderr, "Instruction pointer (MSB)\n");
+          } else if (iter->k >= offsetof(struct arch_seccomp_data, args) &&
+                     iter->k < offsetof(struct arch_seccomp_data, args) + 48 &&
+                     (iter->k - offsetof(struct arch_seccomp_data, args)) % 4 ==
+                         0) {
+            fprintf(
+                stderr,
+                "Argument %d (%cSB)\n",
+                (int)(iter->k - offsetof(struct arch_seccomp_data, args)) / 8,
+                (iter->k - offsetof(struct arch_seccomp_data, args)) % 8 ? 'M'
+                                                                         : 'L');
+          } else {
+            fprintf(stderr, "???\n");
+          }
+        } else {
+          fprintf(stderr, "LOAD ???\n");
+        }
+        break;
+      case BPF_JMP:
+        if (BPF_OP(iter->code) == BPF_JA) {
+          fprintf(stderr, "JMP %d\n", ip + iter->k + 1);
+        } else {
+          fprintf(stderr, "if A %s 0x%x; then JMP %d else JMP %d\n",
+              BPF_OP(iter->code) == BPF_JSET ? "&" :
+              BPF_OP(iter->code) == BPF_JEQ ? "==" :
+              BPF_OP(iter->code) == BPF_JGE ? ">=" :
+              BPF_OP(iter->code) == BPF_JGT ? ">"  : "???",
+              (int)iter->k,
+              ip + iter->jt + 1, ip + iter->jf + 1);
+        }
+        break;
+      case BPF_RET:
+        fprintf(stderr, "RET 0x%x  // ", iter->k);
+        if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP) {
+          fprintf(stderr, "Trap #%d\n", iter->k & SECCOMP_RET_DATA);
+        } else if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
+          fprintf(stderr, "errno = %d\n", iter->k & SECCOMP_RET_DATA);
+        } else if (iter->k == SECCOMP_RET_ALLOW) {
+          fprintf(stderr, "Allowed\n");
+        } else {
+          fprintf(stderr, "???\n");
+        }
+        break;
+      case BPF_ALU:
+        fprintf(stderr, BPF_OP(iter->code) == BPF_NEG
+            ? "A := -A\n" : "A := A %s 0x%x\n",
+            BPF_OP(iter->code) == BPF_ADD ? "+"  :
+            BPF_OP(iter->code) == BPF_SUB ? "-"  :
+            BPF_OP(iter->code) == BPF_MUL ? "*"  :
+            BPF_OP(iter->code) == BPF_DIV ? "/"  :
+            BPF_OP(iter->code) == BPF_MOD ? "%"  :
+            BPF_OP(iter->code) == BPF_OR  ? "|"  :
+            BPF_OP(iter->code) == BPF_XOR ? "^"  :
+            BPF_OP(iter->code) == BPF_AND ? "&"  :
+            BPF_OP(iter->code) == BPF_LSH ? "<<" :
+            BPF_OP(iter->code) == BPF_RSH ? ">>" : "???",
+            (int)iter->k);
+        break;
+      default:
+        fprintf(stderr, "???\n");
+        break;
+    }
+  }
+  return;
+}
+
+Instruction* CodeGen::MakeInstruction(uint16_t code,
+                                      uint32_t k,
+                                      Instruction* next) {
+  // We can handle non-jumping instructions and "always" jumps. Both of
+  // them are followed by exactly one "next" instruction.
+  // We allow callers to defer specifying "next", but then they must call
+  // "joinInstructions" later.
+  if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) {
+    SANDBOX_DIE(
+        "Must provide both \"true\" and \"false\" branch "
+        "for a BPF_JMP");
+  }
+  if (next && BPF_CLASS(code) == BPF_RET) {
+    SANDBOX_DIE("Cannot append instructions after a return statement");
+  }
+  if (BPF_CLASS(code) == BPF_JMP) {
+    // "Always" jumps use the "true" branch target, only.
+    Instruction* insn = new Instruction(code, 0, next, NULL);
+    instructions_.push_back(insn);
+    return insn;
+  } else {
+    // Non-jumping instructions do not use any of the branch targets.
+    Instruction* insn = new Instruction(code, k, next);
+    instructions_.push_back(insn);
+    return insn;
+  }
+}
+
+Instruction* CodeGen::MakeInstruction(uint16_t code, const ErrorCode& err) {
+  if (BPF_CLASS(code) != BPF_RET) {
+    SANDBOX_DIE("ErrorCodes can only be used in return expressions");
+  }
+  if (err.error_type_ != ErrorCode::ET_SIMPLE &&
+      err.error_type_ != ErrorCode::ET_TRAP) {
+    SANDBOX_DIE("ErrorCode is not suitable for returning from a BPF program");
+  }
+  return MakeInstruction(code, err.err_);
+}
+
+Instruction* CodeGen::MakeInstruction(uint16_t code,
+                                      uint32_t k,
+                                      Instruction* jt,
+                                      Instruction* jf) {
+  // We can handle all conditional jumps. They are followed by both a
+  // "true" and a "false" branch.
+  if (BPF_CLASS(code) != BPF_JMP || BPF_OP(code) == BPF_JA) {
+    SANDBOX_DIE("Expected a BPF_JMP instruction");
+  }
+  if (!jt && !jf) {
+    // We allow callers to defer specifying exactly one of the branch
+    // targets. It must then be set later by calling "JoinInstructions".
+    SANDBOX_DIE("Branches must jump to a valid instruction");
+  }
+  Instruction* insn = new Instruction(code, k, jt, jf);
+  instructions_.push_back(insn);
+  return insn;
+}
+
+void CodeGen::JoinInstructions(Instruction* head, Instruction* tail) {
+  // Merge two instructions, or set the branch target for an "always" jump.
+  // This function should be called, if the caller didn't initially provide
+  // a value for "next" when creating the instruction.
+  if (BPF_CLASS(head->code) == BPF_JMP) {
+    if (BPF_OP(head->code) == BPF_JA) {
+      if (head->jt_ptr) {
+        SANDBOX_DIE("Cannot append instructions in the middle of a sequence");
+      }
+      head->jt_ptr = tail;
+    } else {
+      if (!head->jt_ptr && head->jf_ptr) {
+        head->jt_ptr = tail;
+      } else if (!head->jf_ptr && head->jt_ptr) {
+        head->jf_ptr = tail;
+      } else {
+        SANDBOX_DIE("Cannot append instructions after a jump");
+      }
+    }
+  } else if (BPF_CLASS(head->code) == BPF_RET) {
+    SANDBOX_DIE("Cannot append instructions after a return statement");
+  } else if (head->next) {
+    SANDBOX_DIE("Cannot append instructions in the middle of a sequence");
+  } else {
+    head->next = tail;
+  }
+  return;
+}
+
+void CodeGen::Traverse(Instruction* instruction,
+                       void (*fnc)(Instruction*, void*),
+                       void* aux) {
+  std::set<Instruction*> visited;
+  TraverseRecursively(&visited, instruction);
+  for (std::set<Instruction*>::const_iterator iter = visited.begin();
+       iter != visited.end();
+       ++iter) {
+    fnc(*iter, aux);
+  }
+}
+
+void CodeGen::FindBranchTargets(const Instruction& instructions,
+                                BranchTargets* branch_targets) {
+  // Follow all possible paths through the "instructions" graph and compute
+  // a list of branch targets. This will later be needed to compute the
+  // boundaries of basic blocks.
+  // We maintain a set of all instructions that we have previously seen. This
+  // set ultimately converges on all instructions in the program.
+  std::set<const Instruction*> seen_instructions;
+  Instructions stack;
+  for (const Instruction* insn = &instructions; insn;) {
+    seen_instructions.insert(insn);
+    if (BPF_CLASS(insn->code) == BPF_JMP) {
+      // Found a jump. Increase count of incoming edges for each of the jump
+      // targets.
+      ++(*branch_targets)[insn->jt_ptr];
+      if (BPF_OP(insn->code) != BPF_JA) {
+        ++(*branch_targets)[insn->jf_ptr];
+        stack.push_back(const_cast<Instruction*>(insn));
+      }
+      // Start a recursive decent for depth-first traversal.
+      if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) {
+        // We haven't seen the "true" branch yet. Traverse it now. We have
+        // already remembered the "false" branch on the stack and will
+        // traverse it later.
+        insn = insn->jt_ptr;
+        continue;
+      } else {
+        // Now try traversing the "false" branch.
+        insn = NULL;
+      }
+    } else {
+      // This is a non-jump instruction, just continue to the next instruction
+      // (if any). It's OK if "insn" becomes NULL when reaching a return
+      // instruction.
+      if (!insn->next != (BPF_CLASS(insn->code) == BPF_RET)) {
+        SANDBOX_DIE(
+            "Internal compiler error; return instruction must be at "
+            "the end of the BPF program");
+      }
+      if (seen_instructions.find(insn->next) == seen_instructions.end()) {
+        insn = insn->next;
+      } else {
+        // We have seen this instruction before. That could happen if it is
+        // a branch target. No need to continue processing.
+        insn = NULL;
+      }
+    }
+    while (!insn && !stack.empty()) {
+      // We are done processing all the way to a leaf node, backtrack up the
+      // stack to any branches that we haven't processed yet. By definition,
+      // this has to be a "false" branch, as we always process the "true"
+      // branches right away.
+      insn = stack.back();
+      stack.pop_back();
+      if (seen_instructions.find(insn->jf_ptr) == seen_instructions.end()) {
+        // We haven't seen the "false" branch yet. So, that's where we'll
+        // go now.
+        insn = insn->jf_ptr;
+      } else {
+        // We have seen both the "true" and the "false" branch, continue
+        // up the stack.
+        if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) {
+          SANDBOX_DIE(
+              "Internal compiler error; cannot find all "
+              "branch targets");
+        }
+        insn = NULL;
+      }
+    }
+  }
+  return;
+}
+
+BasicBlock* CodeGen::MakeBasicBlock(Instruction* head, Instruction* tail) {
+  // Iterate over all the instructions between "head" and "tail" and
+  // insert them into a new basic block.
+  BasicBlock* bb = new BasicBlock;
+  for (;; head = head->next) {
+    bb->instructions.push_back(head);
+    if (head == tail) {
+      break;
+    }
+    if (BPF_CLASS(head->code) == BPF_JMP) {
+      SANDBOX_DIE("Found a jump inside of a basic block");
+    }
+  }
+  basic_blocks_.push_back(bb);
+  return bb;
+}
+
+void CodeGen::AddBasicBlock(Instruction* head,
+                            Instruction* tail,
+                            const BranchTargets& branch_targets,
+                            TargetsToBlocks* basic_blocks,
+                            BasicBlock** firstBlock) {
+  // Add a new basic block to "basic_blocks". Also set "firstBlock", if it
+  // has not been set before.
+  BranchTargets::const_iterator iter = branch_targets.find(head);
+  if ((iter == branch_targets.end()) != !*firstBlock ||
+      !*firstBlock != basic_blocks->empty()) {
+    SANDBOX_DIE(
+        "Only the very first basic block should have no "
+        "incoming jumps");
+  }
+  BasicBlock* bb = MakeBasicBlock(head, tail);
+  if (!*firstBlock) {
+    *firstBlock = bb;
+  }
+  (*basic_blocks)[head] = bb;
+  return;
+}
+
+BasicBlock* CodeGen::CutGraphIntoBasicBlocks(
+    Instruction* instructions,
+    const BranchTargets& branch_targets,
+    TargetsToBlocks* basic_blocks) {
+  // Textbook implementation of a basic block generator. All basic blocks
+  // start with a branch target and end with either a return statement or
+  // a jump (or are followed by an instruction that forms the beginning of a
+  // new block). Both conditional and "always" jumps are supported.
+  BasicBlock* first_block = NULL;
+  std::set<const Instruction*> seen_instructions;
+  Instructions stack;
+  Instruction* tail = NULL;
+  Instruction* head = instructions;
+  for (Instruction* insn = head; insn;) {
+    if (seen_instructions.find(insn) != seen_instructions.end()) {
+      // We somehow went in a circle. This should never be possible. Not even
+      // cyclic graphs are supposed to confuse us this much.
+      SANDBOX_DIE("Internal compiler error; cannot compute basic blocks");
+    }
+    seen_instructions.insert(insn);
+    if (tail && branch_targets.find(insn) != branch_targets.end()) {
+      // We reached a branch target. Start a new basic block (this means,
+      // flushing the previous basic block first).
+      AddBasicBlock(head, tail, branch_targets, basic_blocks, &first_block);
+      head = insn;
+    }
+    if (BPF_CLASS(insn->code) == BPF_JMP) {
+      // We reached a jump instruction, this completes our current basic
+      // block. Flush it and continue by traversing both the true and the
+      // false branch of the jump. We need to maintain a stack to do so.
+      AddBasicBlock(head, insn, branch_targets, basic_blocks, &first_block);
+      if (BPF_OP(insn->code) != BPF_JA) {
+        stack.push_back(insn->jf_ptr);
+      }
+      insn = insn->jt_ptr;
+
+      // If we are jumping to an instruction that we have previously
+      // processed, we are done with this branch. Continue by backtracking
+      // up the stack.
+      while (seen_instructions.find(insn) != seen_instructions.end()) {
+      backtracking:
+        if (stack.empty()) {
+          // We successfully traversed all reachable instructions.
+          return first_block;
+        } else {
+          // Going up the stack.
+          insn = stack.back();
+          stack.pop_back();
+        }
+      }
+      // Starting a new basic block.
+      tail = NULL;
+      head = insn;
+    } else {
+      // We found a non-jumping instruction, append it to current basic
+      // block.
+      tail = insn;
+      insn = insn->next;
+      if (!insn) {
+        // We reached a return statement, flush the current basic block and
+        // backtrack up the stack.
+        AddBasicBlock(head, tail, branch_targets, basic_blocks, &first_block);
+        goto backtracking;
+      }
+    }
+  }
+  return first_block;
+}
+
+// We define a comparator that inspects the sequence of instructions in our
+// basic block and any blocks referenced by this block. This function can be
+// used in a "less" comparator for the purpose of storing pointers to basic
+// blocks in STL containers; this gives an easy option to use STL to find
+// shared tail  sequences of basic blocks.
+static int PointerCompare(const BasicBlock* block1,
+                          const BasicBlock* block2,
+                          const TargetsToBlocks& blocks) {
+  // Return <0, 0, or >0 depending on the ordering of "block1" and "block2".
+  // If we are looking at the exact same block, this is trivial and we don't
+  // need to do a full comparison.
+  if (block1 == block2) {
+    return 0;
+  }
+
+  // We compare the sequence of instructions in both basic blocks.
+  const Instructions& insns1 = block1->instructions;
+  const Instructions& insns2 = block2->instructions;
+  // Basic blocks should never be empty.
+  CHECK(!insns1.empty());
+  CHECK(!insns2.empty());
+
+  Instructions::const_iterator iter1 = insns1.begin();
+  Instructions::const_iterator iter2 = insns2.begin();
+  for (;; ++iter1, ++iter2) {
+    // If we have reached the end of the sequence of instructions in one or
+    // both basic blocks, we know the relative ordering between the two blocks
+    // and can return.
+    if (iter1 == insns1.end()) {
+      if (iter2 == insns2.end()) {
+        // If the two blocks are the same length (and have elementwise-equal
+        // code and k fields, which is the only way we can reach this point),
+        // and the last instruction isn't a JMP or a RET, then we must compare
+        // their successors.
+        Instruction* const insns1_last = insns1.back();
+        Instruction* const insns2_last = insns2.back();
+        if (BPF_CLASS(insns1_last->code) != BPF_JMP &&
+            BPF_CLASS(insns1_last->code) != BPF_RET) {
+          // Non jumping instructions will always have a valid next instruction.
+          CHECK(insns1_last->next);
+          CHECK(insns2_last->next);
+          return PointerCompare(blocks.find(insns1_last->next)->second,
+                                blocks.find(insns2_last->next)->second,
+                                blocks);
+        } else {
+          return 0;
+        }
+      }
+      return -1;
+    } else if (iter2 == insns2.end()) {
+      return 1;
+    }
+
+    // Compare the individual fields for both instructions.
+    const Instruction& insn1 = **iter1;
+    const Instruction& insn2 = **iter2;
+    if (insn1.code == insn2.code) {
+      if (insn1.k == insn2.k) {
+        // Only conditional jump instructions use the jt_ptr and jf_ptr
+        // fields.
+        if (BPF_CLASS(insn1.code) == BPF_JMP) {
+          if (BPF_OP(insn1.code) != BPF_JA) {
+            // Recursively compare the "true" and "false" branches.
+            // A well-formed BPF program can't have any cycles, so we know
+            // that our recursive algorithm will ultimately terminate.
+            // In the unlikely event that the programmer made a mistake and
+            // went out of the way to give us a cyclic program, we will crash
+            // with a stack overflow. We are OK with that.
+            int c = PointerCompare(blocks.find(insn1.jt_ptr)->second,
+                                   blocks.find(insn2.jt_ptr)->second,
+                                   blocks);
+            if (c == 0) {
+              c = PointerCompare(blocks.find(insn1.jf_ptr)->second,
+                                 blocks.find(insn2.jf_ptr)->second,
+                                 blocks);
+              if (c == 0) {
+                continue;
+              } else {
+                return c;
+              }
+            } else {
+              return c;
+            }
+          } else {
+            int c = PointerCompare(blocks.find(insn1.jt_ptr)->second,
+                                   blocks.find(insn2.jt_ptr)->second,
+                                   blocks);
+            if (c == 0) {
+              continue;
+            } else {
+              return c;
+            }
+          }
+        } else {
+          continue;
+        }
+      } else {
+        return insn1.k - insn2.k;
+      }
+    } else {
+      return insn1.code - insn2.code;
+    }
+  }
+}
+
+void CodeGen::MergeTails(TargetsToBlocks* blocks) {
+  // We enter all of our basic blocks into a set using the BasicBlock::Less()
+  // comparator. This naturally results in blocks with identical tails of
+  // instructions to map to the same entry in the set. Whenever we discover
+  // that a particular chain of instructions is already in the set, we merge
+  // the basic blocks and update the pointer in the "blocks" map.
+  // Returns the number of unique basic blocks.
+  // N.B. We don't merge instructions on a granularity that is finer than
+  //      a basic block. In practice, this is sufficiently rare that we don't
+  //      incur a big cost.
+  //      Similarly, we currently don't merge anything other than tails. In
+  //      the future, we might decide to revisit this decision and attempt to
+  //      merge arbitrary sub-sequences of instructions.
+  BasicBlock::Less<TargetsToBlocks> less(*blocks, PointerCompare);
+  typedef std::set<BasicBlock*, BasicBlock::Less<TargetsToBlocks> > Set;
+  Set seen_basic_blocks(less);
+  for (TargetsToBlocks::iterator iter = blocks->begin(); iter != blocks->end();
+       ++iter) {
+    BasicBlock* bb = iter->second;
+    Set::const_iterator entry = seen_basic_blocks.find(bb);
+    if (entry == seen_basic_blocks.end()) {
+      // This is the first time we see this particular sequence of
+      // instructions. Enter the basic block into the set of known
+      // basic blocks.
+      seen_basic_blocks.insert(bb);
+    } else {
+      // We have previously seen another basic block that defines the same
+      // sequence of instructions. Merge the two blocks and update the
+      // pointer in the "blocks" map.
+      iter->second = *entry;
+    }
+  }
+}
+
+void CodeGen::ComputeIncomingBranches(BasicBlock* block,
+                                      const TargetsToBlocks& targets_to_blocks,
+                                      IncomingBranches* incoming_branches) {
+  // We increment the number of incoming branches each time we encounter a
+  // basic block. But we only traverse recursively the very first time we
+  // encounter a new block. This is necessary to make topological sorting
+  // work correctly.
+  if (++(*incoming_branches)[block] == 1) {
+    Instruction* last_insn = block->instructions.back();
+    if (BPF_CLASS(last_insn->code) == BPF_JMP) {
+      ComputeIncomingBranches(targets_to_blocks.find(last_insn->jt_ptr)->second,
+                              targets_to_blocks,
+                              incoming_branches);
+      if (BPF_OP(last_insn->code) != BPF_JA) {
+        ComputeIncomingBranches(
+            targets_to_blocks.find(last_insn->jf_ptr)->second,
+            targets_to_blocks,
+            incoming_branches);
+      }
+    } else if (BPF_CLASS(last_insn->code) != BPF_RET) {
+      ComputeIncomingBranches(targets_to_blocks.find(last_insn->next)->second,
+                              targets_to_blocks,
+                              incoming_branches);
+    }
+  }
+}
+
+void CodeGen::TopoSortBasicBlocks(BasicBlock* first_block,
+                                  const TargetsToBlocks& blocks,
+                                  BasicBlocks* basic_blocks) {
+  // Textbook implementation of a toposort. We keep looking for basic blocks
+  // that don't have any incoming branches (initially, this is just the
+  // "first_block") and add them to the topologically sorted list of
+  // "basic_blocks". As we do so, we remove outgoing branches. This potentially
+  // ends up making our descendants eligible for the sorted list. The
+  // sorting algorithm terminates when there are no more basic blocks that have
+  // no incoming branches. If we didn't move all blocks from the set of
+  // "unordered_blocks" to the sorted list of "basic_blocks", there must have
+  // been a cyclic dependency. This should never happen in a BPF program, as
+  // well-formed BPF programs only ever have forward branches.
+  IncomingBranches unordered_blocks;
+  ComputeIncomingBranches(first_block, blocks, &unordered_blocks);
+
+  std::set<BasicBlock*> heads;
+  for (;;) {
+    // Move block from "unordered_blocks" to "basic_blocks".
+    basic_blocks->push_back(first_block);
+
+    // Inspect last instruction in the basic block. This is typically either a
+    // jump or a return statement. But it could also be a "normal" instruction
+    // that is followed by a jump target.
+    Instruction* last_insn = first_block->instructions.back();
+    if (BPF_CLASS(last_insn->code) == BPF_JMP) {
+      // Remove outgoing branches. This might end up moving our descendants
+      // into set of "head" nodes that no longer have any incoming branches.
+      TargetsToBlocks::const_iterator iter;
+      if (BPF_OP(last_insn->code) != BPF_JA) {
+        iter = blocks.find(last_insn->jf_ptr);
+        if (!--unordered_blocks[iter->second]) {
+          heads.insert(iter->second);
+        }
+      }
+      iter = blocks.find(last_insn->jt_ptr);
+      if (!--unordered_blocks[iter->second]) {
+        first_block = iter->second;
+        continue;
+      }
+    } else if (BPF_CLASS(last_insn->code) != BPF_RET) {
+      // We encountered an instruction that doesn't change code flow. Try to
+      // pick the next "first_block" from "last_insn->next", if possible.
+      TargetsToBlocks::const_iterator iter;
+      iter = blocks.find(last_insn->next);
+      if (!--unordered_blocks[iter->second]) {
+        first_block = iter->second;
+        continue;
+      } else {
+        // Our basic block is supposed to be followed by "last_insn->next",
+        // but dependencies prevent this from happening. Insert a BPF_JA
+        // instruction to correct the code flow.
+        Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, last_insn->next);
+        first_block->instructions.push_back(ja);
+        last_insn->next = ja;
+      }
+    }
+    if (heads.empty()) {
+      if (unordered_blocks.size() != basic_blocks->size()) {
+        SANDBOX_DIE("Internal compiler error; cyclic graph detected");
+      }
+      return;
+    }
+    // Proceed by picking an arbitrary node from the set of basic blocks that
+    // do not have any incoming branches.
+    first_block = *heads.begin();
+    heads.erase(heads.begin());
+  }
+}
+
+void CodeGen::ComputeRelativeJumps(BasicBlocks* basic_blocks,
+                                   const TargetsToBlocks& targets_to_blocks) {
+  // While we previously used pointers in jt_ptr and jf_ptr to link jump
+  // instructions to their targets, we now convert these jumps to relative
+  // jumps that are suitable for loading the BPF program into the kernel.
+  int offset = 0;
+
+  // Since we just completed a toposort, all jump targets are guaranteed to
+  // go forward. This means, iterating over the basic blocks in reverse makes
+  // it trivial to compute the correct offsets.
+  BasicBlock* bb = NULL;
+  BasicBlock* last_bb = NULL;
+  for (BasicBlocks::reverse_iterator iter = basic_blocks->rbegin();
+       iter != basic_blocks->rend();
+       ++iter) {
+    last_bb = bb;
+    bb = *iter;
+    Instruction* insn = bb->instructions.back();
+    if (BPF_CLASS(insn->code) == BPF_JMP) {
+      // Basic block ended in a jump instruction. We can now compute the
+      // appropriate offsets.
+      if (BPF_OP(insn->code) == BPF_JA) {
+        // "Always" jumps use the 32bit "k" field for the offset, instead
+        // of the 8bit "jt" and "jf" fields.
+        int jmp = offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
+        insn->k = jmp;
+        insn->jt = insn->jf = 0;
+      } else {
+        // The offset computations for conditional jumps are just the same
+        // as for "always" jumps.
+        int jt = offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
+        int jf = offset - targets_to_blocks.find(insn->jf_ptr)->second->offset;
+
+        // There is an added complication, because conditional relative jumps
+        // can only jump at most 255 instructions forward. If we have to jump
+        // further, insert an extra "always" jump.
+        Instructions::size_type jmp = bb->instructions.size();
+        if (jt > 255 || (jt == 255 && jf > 255)) {
+          Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, insn->jt_ptr);
+          bb->instructions.push_back(ja);
+          ja->k = jt;
+          ja->jt = ja->jf = 0;
+
+          // The newly inserted "always" jump, of course, requires us to adjust
+          // the jump targets in the original conditional jump.
+          jt = 0;
+          ++jf;
+        }
+        if (jf > 255) {
+          Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, insn->jf_ptr);
+          bb->instructions.insert(bb->instructions.begin() + jmp, ja);
+          ja->k = jf;
+          ja->jt = ja->jf = 0;
+
+          // Again, we have to adjust the jump targets in the original
+          // conditional jump.
+          ++jt;
+          jf = 0;
+        }
+
+        // Now we can finally set the relative jump targets in the conditional
+        // jump instruction. Afterwards, we must no longer access the jt_ptr
+        // and jf_ptr fields.
+        insn->jt = jt;
+        insn->jf = jf;
+      }
+    } else if (BPF_CLASS(insn->code) != BPF_RET &&
+               targets_to_blocks.find(insn->next)->second != last_bb) {
+      SANDBOX_DIE("Internal compiler error; invalid basic block encountered");
+    }
+
+    // Proceed to next basic block.
+    offset += bb->instructions.size();
+    bb->offset = offset;
+  }
+  return;
+}
+
+void CodeGen::ConcatenateBasicBlocks(const BasicBlocks& basic_blocks,
+                                     SandboxBPF::Program* program) {
+  // Our basic blocks have been sorted and relative jump offsets have been
+  // computed. The last remaining step is for all the instructions in our
+  // basic blocks to be concatenated into a BPF program.
+  program->clear();
+  for (BasicBlocks::const_iterator bb_iter = basic_blocks.begin();
+       bb_iter != basic_blocks.end();
+       ++bb_iter) {
+    const BasicBlock& bb = **bb_iter;
+    for (Instructions::const_iterator insn_iter = bb.instructions.begin();
+         insn_iter != bb.instructions.end();
+         ++insn_iter) {
+      const Instruction& insn = **insn_iter;
+      program->push_back(
+          (struct sock_filter) {insn.code, insn.jt, insn.jf, insn.k});
+    }
+  }
+  return;
+}
+
+void CodeGen::Compile(Instruction* instructions, SandboxBPF::Program* program) {
+  if (compiled_) {
+    SANDBOX_DIE(
+        "Cannot call Compile() multiple times. Create a new code "
+        "generator instead");
+  }
+  compiled_ = true;
+
+  BranchTargets branch_targets;
+  FindBranchTargets(*instructions, &branch_targets);
+  TargetsToBlocks all_blocks;
+  BasicBlock* first_block =
+      CutGraphIntoBasicBlocks(instructions, branch_targets, &all_blocks);
+  MergeTails(&all_blocks);
+  BasicBlocks basic_blocks;
+  TopoSortBasicBlocks(first_block, all_blocks, &basic_blocks);
+  ComputeRelativeJumps(&basic_blocks, all_blocks);
+  ConcatenateBasicBlocks(basic_blocks, program);
+  return;
+}
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/codegen.h
@@ -0,0 +1,160 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_CODEGEN_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_CODEGEN_H__
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "sandbox/linux/sandbox_export.h"
+#include "sandbox/linux/seccomp-bpf/basicblock.h"
+#include "sandbox/linux/seccomp-bpf/instruction.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+
+namespace sandbox {
+
+typedef std::vector<Instruction*> Instructions;
+typedef std::vector<BasicBlock*> BasicBlocks;
+typedef std::map<const Instruction*, int> BranchTargets;
+typedef std::map<const Instruction*, BasicBlock*> TargetsToBlocks;
+typedef std::map<const BasicBlock*, int> IncomingBranches;
+
+// The code generator instantiates a basic compiler that can convert a
+// graph of BPF instructions into a well-formed stream of BPF instructions.
+// Most notably, it ensures that jumps are always forward and don't exceed
+// the limit of 255 instructions imposed by the instruction set.
+//
+// Callers would typically create a new CodeGen object and then use it to
+// build a DAG of Instructions. They'll eventually call Compile() to convert
+// this DAG to a SandboxBPF::Program.
+//
+// Instructions can be chained at the time when they are created, or they
+// can be joined later by calling JoinInstructions().
+//
+//   CodeGen gen;
+//   Instruction *dag, *branch;
+//   dag =
+//     gen.MakeInstruction(BPF_LD+BPF_W+BPF_ABS,
+//                         offsetof(struct arch_seccomp_data, nr),
+//   branch =
+//     gen.MakeInstruction(BPF_JMP+BPF_EQ+BPF_K, __NR_getpid,
+//                         Trap(GetPidHandler, NULL), NULL);
+//   gen.JoinInstructions(branch,
+//     gen.MakeInstruction(BPF_RET+BPF_K, ErrorCode(ErrorCode::ERR_ALLOWED)));
+//
+//   // Simplified code follows; in practice, it is important to avoid calling
+//   // any C++ destructors after starting the sandbox.
+//   SandboxBPF::Program program;
+//   gen.Compile(dag, program);
+//   const struct sock_fprog prog = {
+//     static_cast<unsigned short>(program->size()), &program[0] };
+//   prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
+//
+class SANDBOX_EXPORT CodeGen {
+ public:
+  CodeGen();
+  ~CodeGen();
+
+  // This is a helper method that can be used for debugging purposes. It is
+  // not normally called.
+  static void PrintProgram(const SandboxBPF::Program& program);
+
+  // Create a new instruction. Instructions form a DAG. The instruction objects
+  // are owned by the CodeGen object. They do not need to be explicitly
+  // deleted.
+  // For details on the possible parameters refer to <linux/filter.h>
+  Instruction* MakeInstruction(uint16_t code,
+                               uint32_t k,
+                               Instruction* next = NULL);
+  Instruction* MakeInstruction(uint16_t code, const ErrorCode& err);
+  Instruction* MakeInstruction(uint16_t code,
+                               uint32_t k,
+                               Instruction* jt,
+                               Instruction* jf);
+
+  // Join two (sequences of) instructions. This is useful, if the "next"
+  // parameter had not originally been given in the call to MakeInstruction(),
+  // or if a (conditional) jump still has an unsatisfied target.
+  void JoinInstructions(Instruction* head, Instruction* tail);
+
+  // Traverse the graph of instructions and visit each instruction once.
+  // Traversal order is implementation-defined. It is acceptable to make
+  // changes to the graph from within the callback function. These changes
+  // do not affect traversal.
+  // The "fnc" function gets called with both the instruction and the opaque
+  // "aux" pointer.
+  void Traverse(Instruction*, void (*fnc)(Instruction*, void* aux), void* aux);
+
+  // Compiles the graph of instructions into a BPF program that can be passed
+  // to the kernel. Please note that this function modifies the graph in place
+  // and must therefore only be called once per graph.
+  void Compile(Instruction* instructions, SandboxBPF::Program* program);
+
+ private:
+  friend class CodeGenUnittestHelper;
+
+  // Find all the instructions that are the target of BPF_JMPs.
+  void FindBranchTargets(const Instruction& instructions,
+                         BranchTargets* branch_targets);
+
+  // Combine instructions between "head" and "tail" into a new basic block.
+  // Basic blocks are defined as sequences of instructions whose only branch
+  // target is the very first instruction; furthermore, any BPF_JMP or BPF_RET
+  // instruction must be at the very end of the basic block.
+  BasicBlock* MakeBasicBlock(Instruction* head, Instruction* tail);
+
+  // Creates a basic block and adds it to "basic_blocks"; sets "first_block"
+  // if it is still NULL.
+  void AddBasicBlock(Instruction* head,
+                     Instruction* tail,
+                     const BranchTargets& branch_targets,
+                     TargetsToBlocks* basic_blocks,
+                     BasicBlock** first_block);
+
+  // Cuts the DAG of instructions into basic blocks.
+  BasicBlock* CutGraphIntoBasicBlocks(Instruction* instructions,
+                                      const BranchTargets& branch_targets,
+                                      TargetsToBlocks* blocks);
+
+  // Find common tail sequences of basic blocks and coalesce them.
+  void MergeTails(TargetsToBlocks* blocks);
+
+  // For each basic block, compute the number of incoming branches.
+  void ComputeIncomingBranches(BasicBlock* block,
+                               const TargetsToBlocks& targets_to_blocks,
+                               IncomingBranches* incoming_branches);
+
+  // Topologically sort the basic blocks so that all jumps are forward jumps.
+  // This is a requirement for any well-formed BPF program.
+  void TopoSortBasicBlocks(BasicBlock* first_block,
+                           const TargetsToBlocks& blocks,
+                           BasicBlocks* basic_blocks);
+
+  // Convert jt_ptr_ and jf_ptr_ fields in BPF_JMP instructions to valid
+  // jt_ and jf_ jump offsets. This can result in BPF_JA instructions being
+  // inserted, if we need to jump over more than 256 instructions.
+  void ComputeRelativeJumps(BasicBlocks* basic_blocks,
+                            const TargetsToBlocks& targets_to_blocks);
+
+  // Concatenate instructions from all basic blocks into a BPF program that
+  // can be passed to the kernel.
+  void ConcatenateBasicBlocks(const BasicBlocks&, SandboxBPF::Program* program);
+
+  // We stick all instructions and basic blocks into pools that get destroyed
+  // when the CodeGen object is destroyed. This way, we neither need to worry
+  // about explicitly managing ownership, nor do we need to worry about using
+  // smart pointers in the presence of circular references.
+  Instructions instructions_;
+  BasicBlocks basic_blocks_;
+
+  // Compile() must only ever be called once as it makes destructive changes
+  // to the DAG.
+  bool compiled_;
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_CODEGEN_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/codegen_unittest.cc
@@ -0,0 +1,538 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <errno.h>
+
+#include <algorithm>
+#include <set>
+#include <vector>
+
+#include "sandbox/linux/seccomp-bpf/codegen.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/tests/unit_tests.h"
+
+namespace sandbox {
+
+class SandboxUnittestHelper : public SandboxBPF {
+ public:
+  typedef SandboxBPF::Program Program;
+};
+
+// We want to access some of the private methods in the code generator. We
+// do so by defining a "friend" that makes these methods public for us.
+class CodeGenUnittestHelper : public CodeGen {
+ public:
+  void FindBranchTargets(const Instruction& instructions,
+                         BranchTargets* branch_targets) {
+    CodeGen::FindBranchTargets(instructions, branch_targets);
+  }
+
+  BasicBlock* CutGraphIntoBasicBlocks(Instruction* insns,
+                                      const BranchTargets& branch_targets,
+                                      TargetsToBlocks* blocks) {
+    return CodeGen::CutGraphIntoBasicBlocks(insns, branch_targets, blocks);
+  }
+
+  void MergeTails(TargetsToBlocks* blocks) { CodeGen::MergeTails(blocks); }
+};
+
+enum { NO_FLAGS = 0x0000, HAS_MERGEABLE_TAILS = 0x0001, };
+
+Instruction* SampleProgramOneInstruction(CodeGen* codegen, int* flags) {
+  // Create the most basic valid BPF program:
+  //    RET ERR_ALLOWED
+  *flags = NO_FLAGS;
+  return codegen->MakeInstruction(BPF_RET + BPF_K,
+                                  ErrorCode(ErrorCode::ERR_ALLOWED));
+}
+
+Instruction* SampleProgramSimpleBranch(CodeGen* codegen, int* flags) {
+  // Create a program with a single branch:
+  //    JUMP if eq 42 then $0 else $1
+  // 0: RET EPERM
+  // 1: RET ERR_ALLOWED
+  *flags = NO_FLAGS;
+  return codegen->MakeInstruction(
+      BPF_JMP + BPF_JEQ + BPF_K,
+      42,
+      codegen->MakeInstruction(BPF_RET + BPF_K, ErrorCode(EPERM)),
+      codegen->MakeInstruction(BPF_RET + BPF_K,
+                               ErrorCode(ErrorCode::ERR_ALLOWED)));
+}
+
+Instruction* SampleProgramAtypicalBranch(CodeGen* codegen, int* flags) {
+  // Create a program with a single branch:
+  //    JUMP if eq 42 then $0 else $0
+  // 0: RET ERR_ALLOWED
+
+  // N.B.: As the instructions in both sides of the branch are already
+  //       the same object, we do not actually have any "mergeable" branches.
+  //       This needs to be reflected in our choice of "flags".
+  *flags = NO_FLAGS;
+
+  Instruction* ret = codegen->MakeInstruction(
+      BPF_RET + BPF_K, ErrorCode(ErrorCode::ERR_ALLOWED));
+  return codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, ret, ret);
+}
+
+Instruction* SampleProgramComplex(CodeGen* codegen, int* flags) {
+  // Creates a basic BPF program that we'll use to test some of the code:
+  //    JUMP if eq 42 the $0 else $1     (insn6)
+  // 0: LD 23                            (insn5)
+  // 1: JUMP if eq 42 then $2 else $4    (insn4)
+  // 2: JUMP to $3                       (insn1)
+  // 3: LD 42                            (insn0)
+  //    RET ErrorCode(42)                (insn2)
+  // 4: LD 42                            (insn3)
+  //    RET ErrorCode(42)                (insn3+)
+  *flags = HAS_MERGEABLE_TAILS;
+
+  Instruction* insn0 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 42);
+  SANDBOX_ASSERT(insn0);
+  SANDBOX_ASSERT(insn0->code == BPF_LD + BPF_W + BPF_ABS);
+  SANDBOX_ASSERT(insn0->k == 42);
+  SANDBOX_ASSERT(insn0->next == NULL);
+
+  Instruction* insn1 = codegen->MakeInstruction(BPF_JMP + BPF_JA, 0, insn0);
+  SANDBOX_ASSERT(insn1);
+  SANDBOX_ASSERT(insn1->code == BPF_JMP + BPF_JA);
+  SANDBOX_ASSERT(insn1->jt_ptr == insn0);
+
+  Instruction* insn2 = codegen->MakeInstruction(BPF_RET + BPF_K, ErrorCode(42));
+  SANDBOX_ASSERT(insn2);
+  SANDBOX_ASSERT(insn2->code == BPF_RET + BPF_K);
+  SANDBOX_ASSERT(insn2->next == NULL);
+
+  // We explicitly duplicate instructions so that MergeTails() can coalesce
+  // them later.
+  Instruction* insn3 = codegen->MakeInstruction(
+      BPF_LD + BPF_W + BPF_ABS,
+      42,
+      codegen->MakeInstruction(BPF_RET + BPF_K, ErrorCode(42)));
+
+  Instruction* insn4 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, insn1, insn3);
+  SANDBOX_ASSERT(insn4);
+  SANDBOX_ASSERT(insn4->code == BPF_JMP + BPF_JEQ + BPF_K);
+  SANDBOX_ASSERT(insn4->k == 42);
+  SANDBOX_ASSERT(insn4->jt_ptr == insn1);
+  SANDBOX_ASSERT(insn4->jf_ptr == insn3);
+
+  codegen->JoinInstructions(insn0, insn2);
+  SANDBOX_ASSERT(insn0->next == insn2);
+
+  Instruction* insn5 =
+      codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 23, insn4);
+  SANDBOX_ASSERT(insn5);
+  SANDBOX_ASSERT(insn5->code == BPF_LD + BPF_W + BPF_ABS);
+  SANDBOX_ASSERT(insn5->k == 23);
+  SANDBOX_ASSERT(insn5->next == insn4);
+
+  // Force a basic block that ends in neither a jump instruction nor a return
+  // instruction. It only contains "insn5". This exercises one of the less
+  // common code paths in the topo-sort algorithm.
+  // This also gives us a diamond-shaped pattern in our graph, which stresses
+  // another aspect of the topo-sort algorithm (namely, the ability to
+  // correctly count the incoming branches for subtrees that are not disjunct).
+  Instruction* insn6 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 42, insn5, insn4);
+
+  return insn6;
+}
+
+Instruction* SampleProgramConfusingTails(CodeGen* codegen, int* flags) {
+  // This simple program demonstrates https://crbug.com/351103/
+  // The two "LOAD 0" instructions are blocks of their own. MergeTails() could
+  // be tempted to merge them since they are the same. However, they are
+  // not mergeable because they fall-through to non semantically equivalent
+  // blocks.
+  // Without the fix for this bug, this program should trigger the check in
+  // CompileAndCompare: the serialized graphs from the program and its compiled
+  // version will differ.
+  //
+  //  0) LOAD 1  // ???
+  //  1) if A == 0x1; then JMP 2 else JMP 3
+  //  2) LOAD 0  // System call number
+  //  3) if A == 0x2; then JMP 4 else JMP 5
+  //  4) LOAD 0  // System call number
+  //  5) if A == 0x1; then JMP 6 else JMP 7
+  //  6) RET 0x50000  // errno = 0
+  //  7) RET 0x50001  // errno = 1
+  *flags = NO_FLAGS;
+
+  Instruction* i7 = codegen->MakeInstruction(BPF_RET, ErrorCode(1));
+  Instruction* i6 = codegen->MakeInstruction(BPF_RET, ErrorCode(0));
+  Instruction* i5 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i6, i7);
+  Instruction* i4 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i5);
+  Instruction* i3 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
+  Instruction* i2 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i3);
+  Instruction* i1 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
+  Instruction* i0 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
+
+  return i0;
+}
+
+Instruction* SampleProgramConfusingTailsBasic(CodeGen* codegen, int* flags) {
+  // Without the fix for https://crbug.com/351103/, (see
+  // SampleProgramConfusingTails()), this would generate a cyclic graph and
+  // crash as the two "LOAD 0" instructions would get merged.
+  //
+  // 0) LOAD 1  // ???
+  // 1) if A == 0x1; then JMP 2 else JMP 3
+  // 2) LOAD 0  // System call number
+  // 3) if A == 0x2; then JMP 4 else JMP 5
+  // 4) LOAD 0  // System call number
+  // 5) RET 0x50001  // errno = 1
+  *flags = NO_FLAGS;
+
+  Instruction* i5 = codegen->MakeInstruction(BPF_RET, ErrorCode(1));
+  Instruction* i4 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i5);
+  Instruction* i3 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
+  Instruction* i2 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 0, i3);
+  Instruction* i1 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
+  Instruction* i0 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
+
+  return i0;
+}
+
+Instruction* SampleProgramConfusingTailsMergeable(CodeGen* codegen,
+                                                  int* flags) {
+  // This is similar to SampleProgramConfusingTails(), except that
+  // instructions 2 and 4 are now RET instructions.
+  // In PointerCompare(), this exercises the path where two blocks are of the
+  // same length and identical and the last instruction is a JMP or RET, so the
+  // following blocks don't need to be looked at and the blocks are mergeable.
+  //
+  // 0) LOAD 1  // ???
+  // 1) if A == 0x1; then JMP 2 else JMP 3
+  // 2) RET 0x5002a  // errno = 42
+  // 3) if A == 0x2; then JMP 4 else JMP 5
+  // 4) RET 0x5002a  // errno = 42
+  // 5) if A == 0x1; then JMP 6 else JMP 7
+  // 6) RET 0x50000  // errno = 0
+  // 7) RET 0x50001  // errno = 1
+  *flags = HAS_MERGEABLE_TAILS;
+
+  Instruction* i7 = codegen->MakeInstruction(BPF_RET, ErrorCode(1));
+  Instruction* i6 = codegen->MakeInstruction(BPF_RET, ErrorCode(0));
+  Instruction* i5 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i6, i7);
+  Instruction* i4 = codegen->MakeInstruction(BPF_RET, ErrorCode(42));
+  Instruction* i3 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 2, i4, i5);
+  Instruction* i2 = codegen->MakeInstruction(BPF_RET, ErrorCode(42));
+  Instruction* i1 =
+      codegen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, 1, i2, i3);
+  Instruction* i0 = codegen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, 1, i1);
+
+  return i0;
+}
+void ForAllPrograms(void (*test)(CodeGenUnittestHelper*, Instruction*, int)) {
+  Instruction* (*function_table[])(CodeGen* codegen, int* flags) = {
+    SampleProgramOneInstruction,
+    SampleProgramSimpleBranch,
+    SampleProgramAtypicalBranch,
+    SampleProgramComplex,
+    SampleProgramConfusingTails,
+    SampleProgramConfusingTailsBasic,
+    SampleProgramConfusingTailsMergeable,
+  };
+
+  for (size_t i = 0; i < arraysize(function_table); ++i) {
+    CodeGenUnittestHelper codegen;
+    int flags = NO_FLAGS;
+    Instruction *prg = function_table[i](&codegen, &flags);
+    test(&codegen, prg, flags);
+  }
+}
+
+void MakeInstruction(CodeGenUnittestHelper* codegen,
+                     Instruction* program, int) {
+  // Nothing to do here
+}
+
+SANDBOX_TEST(CodeGen, MakeInstruction) {
+  ForAllPrograms(MakeInstruction);
+}
+
+void FindBranchTargets(CodeGenUnittestHelper* codegen, Instruction* prg, int) {
+  BranchTargets branch_targets;
+  codegen->FindBranchTargets(*prg, &branch_targets);
+
+  // Verifying the general properties that should be true for every
+  // well-formed BPF program.
+  // Perform a depth-first traversal of the BPF program an verify that all
+  // targets of BPF_JMP instructions are represented in the "branch_targets".
+  // At the same time, compute a set of both the branch targets and all the
+  // instructions in the program.
+  std::vector<Instruction*> stack;
+  std::set<Instruction*> all_instructions;
+  std::set<Instruction*> target_instructions;
+  BranchTargets::const_iterator end = branch_targets.end();
+  for (Instruction* insn = prg;;) {
+    all_instructions.insert(insn);
+    if (BPF_CLASS(insn->code) == BPF_JMP) {
+      target_instructions.insert(insn->jt_ptr);
+      SANDBOX_ASSERT(insn->jt_ptr != NULL);
+      SANDBOX_ASSERT(branch_targets.find(insn->jt_ptr) != end);
+      if (BPF_OP(insn->code) != BPF_JA) {
+        target_instructions.insert(insn->jf_ptr);
+        SANDBOX_ASSERT(insn->jf_ptr != NULL);
+        SANDBOX_ASSERT(branch_targets.find(insn->jf_ptr) != end);
+        stack.push_back(insn->jf_ptr);
+      }
+      insn = insn->jt_ptr;
+    } else if (BPF_CLASS(insn->code) == BPF_RET) {
+      SANDBOX_ASSERT(insn->next == NULL);
+      if (stack.empty()) {
+        break;
+      }
+      insn = stack.back();
+      stack.pop_back();
+    } else {
+      SANDBOX_ASSERT(insn->next != NULL);
+      insn = insn->next;
+    }
+  }
+  SANDBOX_ASSERT(target_instructions.size() == branch_targets.size());
+
+  // We can now subtract the set of the branch targets from the set of all
+  // instructions. This gives us a set with the instructions that nobody
+  // ever jumps to. Verify that they are no included in the
+  // "branch_targets" that FindBranchTargets() computed for us.
+  Instructions non_target_instructions(all_instructions.size() -
+                                       target_instructions.size());
+  set_difference(all_instructions.begin(),
+                 all_instructions.end(),
+                 target_instructions.begin(),
+                 target_instructions.end(),
+                 non_target_instructions.begin());
+  for (Instructions::const_iterator iter = non_target_instructions.begin();
+       iter != non_target_instructions.end();
+       ++iter) {
+    SANDBOX_ASSERT(branch_targets.find(*iter) == end);
+  }
+}
+
+SANDBOX_TEST(CodeGen, FindBranchTargets) { ForAllPrograms(FindBranchTargets); }
+
+void CutGraphIntoBasicBlocks(CodeGenUnittestHelper* codegen,
+                             Instruction* prg,
+                             int) {
+  BranchTargets branch_targets;
+  codegen->FindBranchTargets(*prg, &branch_targets);
+  TargetsToBlocks all_blocks;
+  BasicBlock* first_block =
+      codegen->CutGraphIntoBasicBlocks(prg, branch_targets, &all_blocks);
+  SANDBOX_ASSERT(first_block != NULL);
+  SANDBOX_ASSERT(first_block->instructions.size() > 0);
+  Instruction* first_insn = first_block->instructions[0];
+
+  // Basic blocks are supposed to start with a branch target and end with
+  // either a jump or a return instruction. It can also end, if the next
+  // instruction forms the beginning of a new basic block. There should be
+  // no other jumps or return instructions in the middle of a basic block.
+  for (TargetsToBlocks::const_iterator bb_iter = all_blocks.begin();
+       bb_iter != all_blocks.end();
+       ++bb_iter) {
+    BasicBlock* bb = bb_iter->second;
+    SANDBOX_ASSERT(bb != NULL);
+    SANDBOX_ASSERT(bb->instructions.size() > 0);
+    Instruction* insn = bb->instructions[0];
+    SANDBOX_ASSERT(insn == first_insn ||
+                   branch_targets.find(insn) != branch_targets.end());
+    for (Instructions::const_iterator insn_iter = bb->instructions.begin();;) {
+      insn = *insn_iter;
+      if (++insn_iter != bb->instructions.end()) {
+        SANDBOX_ASSERT(BPF_CLASS(insn->code) != BPF_JMP);
+        SANDBOX_ASSERT(BPF_CLASS(insn->code) != BPF_RET);
+      } else {
+        SANDBOX_ASSERT(BPF_CLASS(insn->code) == BPF_JMP ||
+                       BPF_CLASS(insn->code) == BPF_RET ||
+                       branch_targets.find(insn->next) != branch_targets.end());
+        break;
+      }
+      SANDBOX_ASSERT(branch_targets.find(*insn_iter) == branch_targets.end());
+    }
+  }
+}
+
+SANDBOX_TEST(CodeGen, CutGraphIntoBasicBlocks) {
+  ForAllPrograms(CutGraphIntoBasicBlocks);
+}
+
+void MergeTails(CodeGenUnittestHelper* codegen, Instruction* prg, int flags) {
+  BranchTargets branch_targets;
+  codegen->FindBranchTargets(*prg, &branch_targets);
+  TargetsToBlocks all_blocks;
+  BasicBlock* first_block =
+      codegen->CutGraphIntoBasicBlocks(prg, branch_targets, &all_blocks);
+
+  // The shape of our graph and thus the function of our program should
+  // still be unchanged after we run MergeTails(). We verify this by
+  // serializing the graph and verifying that it is still the same.
+  // We also verify that at least some of the edges changed because of
+  // tail merging.
+  std::string graph[2];
+  std::string edges[2];
+
+  // The loop executes twice. After the first run, we call MergeTails() on
+  // our graph.
+  for (int i = 0;;) {
+    // Traverse the entire program in depth-first order.
+    std::vector<BasicBlock*> stack;
+    for (BasicBlock* bb = first_block;;) {
+      // Serialize the instructions in this basic block. In general, we only
+      // need to serialize "code" and "k"; except for a BPF_JA instruction
+      // where "k" isn't set.
+      // The stream of instructions should be unchanged after MergeTails().
+      for (Instructions::const_iterator iter = bb->instructions.begin();
+           iter != bb->instructions.end();
+           ++iter) {
+        graph[i].append(reinterpret_cast<char*>(&(*iter)->code),
+                        sizeof((*iter)->code));
+        if (BPF_CLASS((*iter)->code) != BPF_JMP ||
+            BPF_OP((*iter)->code) != BPF_JA) {
+          graph[i].append(reinterpret_cast<char*>(&(*iter)->k),
+                          sizeof((*iter)->k));
+        }
+      }
+
+      // Also serialize the addresses the basic blocks as we encounter them.
+      // This will change as basic blocks are coalesed by MergeTails().
+      edges[i].append(reinterpret_cast<char*>(&bb), sizeof(bb));
+
+      // Depth-first traversal of the graph. We only ever need to look at the
+      // very last instruction in the basic block, as that is the only one that
+      // can change code flow.
+      Instruction* insn = bb->instructions.back();
+      if (BPF_CLASS(insn->code) == BPF_JMP) {
+        // For jump instructions, we need to remember the "false" branch while
+        // traversing the "true" branch. This is not necessary for BPF_JA which
+        // only has a single branch.
+        if (BPF_OP(insn->code) != BPF_JA) {
+          stack.push_back(all_blocks[insn->jf_ptr]);
+        }
+        bb = all_blocks[insn->jt_ptr];
+      } else if (BPF_CLASS(insn->code) == BPF_RET) {
+        // After a BPF_RET, see if we need to back track.
+        if (stack.empty()) {
+          break;
+        }
+        bb = stack.back();
+        stack.pop_back();
+      } else {
+        // For "normal" instructions, just follow to the next basic block.
+        bb = all_blocks[insn->next];
+      }
+    }
+
+    // Our loop runs exactly two times.
+    if (++i > 1) {
+      break;
+    }
+    codegen->MergeTails(&all_blocks);
+  }
+  SANDBOX_ASSERT(graph[0] == graph[1]);
+  if (flags & HAS_MERGEABLE_TAILS) {
+    SANDBOX_ASSERT(edges[0] != edges[1]);
+  } else {
+    SANDBOX_ASSERT(edges[0] == edges[1]);
+  }
+}
+
+SANDBOX_TEST(CodeGen, MergeTails) {
+  ForAllPrograms(MergeTails);
+}
+
+void CompileAndCompare(CodeGenUnittestHelper* codegen, Instruction* prg, int) {
+  // TopoSortBasicBlocks() has internal checks that cause it to fail, if it
+  // detects a problem. Typically, if anything goes wrong, this looks to the
+  // TopoSort algorithm as if there had been cycles in the input data.
+  // This provides a pretty good unittest.
+  // We hand-crafted the program returned by SampleProgram() to exercise
+  // several of the more interesting code-paths. See comments in
+  // SampleProgram() for details.
+  // In addition to relying on the internal consistency checks in the compiler,
+  // we also serialize the graph and the resulting BPF program and compare
+  // them. With the exception of BPF_JA instructions that might have been
+  // inserted, both instruction streams should be equivalent.
+  // As Compile() modifies the instructions, we have to serialize the graph
+  // before calling Compile().
+  std::string source;
+  Instructions source_stack;
+  for (const Instruction* insn = prg, *next; insn; insn = next) {
+    if (BPF_CLASS(insn->code) == BPF_JMP) {
+      if (BPF_OP(insn->code) == BPF_JA) {
+        // Do not serialize BPF_JA instructions (see above).
+        next = insn->jt_ptr;
+        continue;
+      } else {
+        source_stack.push_back(insn->jf_ptr);
+        next = insn->jt_ptr;
+      }
+    } else if (BPF_CLASS(insn->code) == BPF_RET) {
+      if (source_stack.empty()) {
+        next = NULL;
+      } else {
+        next = source_stack.back();
+        source_stack.pop_back();
+      }
+    } else {
+      next = insn->next;
+    }
+    // Only serialize "code" and "k". That's all the information we need to
+    // compare. The rest of the information is encoded in the order of
+    // instructions.
+    source.append(reinterpret_cast<const char*>(&insn->code),
+                  sizeof(insn->code));
+    source.append(reinterpret_cast<const char*>(&insn->k), sizeof(insn->k));
+  }
+
+  // Compile the program
+  SandboxUnittestHelper::Program bpf;
+  codegen->Compile(prg, &bpf);
+
+  // Serialize the resulting BPF instructions.
+  std::string assembly;
+  std::vector<int> assembly_stack;
+  for (int idx = 0; idx >= 0;) {
+    SANDBOX_ASSERT(idx < (int)bpf.size());
+    struct sock_filter& insn = bpf[idx];
+    if (BPF_CLASS(insn.code) == BPF_JMP) {
+      if (BPF_OP(insn.code) == BPF_JA) {
+        // Do not serialize BPF_JA instructions (see above).
+        idx += insn.k + 1;
+        continue;
+      } else {
+        assembly_stack.push_back(idx + insn.jf + 1);
+        idx += insn.jt + 1;
+      }
+    } else if (BPF_CLASS(insn.code) == BPF_RET) {
+      if (assembly_stack.empty()) {
+        idx = -1;
+      } else {
+        idx = assembly_stack.back();
+        assembly_stack.pop_back();
+      }
+    } else {
+      ++idx;
+    }
+    // Serialize the same information that we serialized before compilation.
+    assembly.append(reinterpret_cast<char*>(&insn.code), sizeof(insn.code));
+    assembly.append(reinterpret_cast<char*>(&insn.k), sizeof(insn.k));
+  }
+  SANDBOX_ASSERT(source == assembly);
+}
+
+SANDBOX_TEST(CodeGen, All) {
+  ForAllPrograms(CompileAndCompare);
+}
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/demo.cc
@@ -0,0 +1,529 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/unistd.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/shm.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "base/posix/eintr_wrapper.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/services/linux_syscalls.h"
+
+using sandbox::ErrorCode;
+using sandbox::SandboxBPF;
+using sandbox::arch_seccomp_data;
+
+#define ERR EPERM
+
+// We don't expect our sandbox to do anything useful yet. So, we will fail
+// almost immediately. For now, force the code to continue running. The
+// following line should be removed as soon as the sandbox is starting to
+// actually enforce restrictions in a meaningful way:
+#define _exit(x) do { } while (0)
+
+namespace {
+
+bool SendFds(int transport, const void *buf, size_t len, ...) {
+  int count = 0;
+  va_list ap;
+  va_start(ap, len);
+  while (va_arg(ap, int) >= 0) {
+    ++count;
+  }
+  va_end(ap);
+  if (!count) {
+    return false;
+  }
+  char cmsg_buf[CMSG_SPACE(count*sizeof(int))];
+  memset(cmsg_buf, 0, sizeof(cmsg_buf));
+  struct iovec  iov[2] = { { 0 } };
+  struct msghdr msg    = { 0 };
+  int dummy            = 0;
+  iov[0].iov_base      = &dummy;
+  iov[0].iov_len       = sizeof(dummy);
+  if (buf && len > 0) {
+    iov[1].iov_base    = const_cast<void *>(buf);
+    iov[1].iov_len     = len;
+  }
+  msg.msg_iov          = iov;
+  msg.msg_iovlen       = (buf && len > 0) ? 2 : 1;
+  msg.msg_control      = cmsg_buf;
+  msg.msg_controllen   = CMSG_LEN(count*sizeof(int));
+  struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+  cmsg->cmsg_level     = SOL_SOCKET;
+  cmsg->cmsg_type      = SCM_RIGHTS;
+  cmsg->cmsg_len       = CMSG_LEN(count*sizeof(int));
+  va_start(ap, len);
+  for (int i = 0, fd; (fd = va_arg(ap, int)) >= 0; ++i) {
+    (reinterpret_cast<int *>(CMSG_DATA(cmsg)))[i] = fd;
+  }
+  return sendmsg(transport, &msg, 0) ==
+      static_cast<ssize_t>(sizeof(dummy) + ((buf && len > 0) ? len : 0));
+}
+
+bool GetFds(int transport, void *buf, size_t *len, ...) {
+  int count = 0;
+  va_list ap;
+  va_start(ap, len);
+  for (int *fd; (fd = va_arg(ap, int *)) != NULL; ++count) {
+    *fd = -1;
+  }
+  va_end(ap);
+  if (!count) {
+    return false;
+  }
+  char cmsg_buf[CMSG_SPACE(count*sizeof(int))];
+  memset(cmsg_buf, 0, sizeof(cmsg_buf));
+  struct iovec iov[2] = { { 0 } };
+  struct msghdr msg   = { 0 };
+  int err;
+  iov[0].iov_base     = &err;
+  iov[0].iov_len      = sizeof(int);
+  if (buf && len && *len > 0) {
+    iov[1].iov_base   = buf;
+    iov[1].iov_len    = *len;
+  }
+  msg.msg_iov         = iov;
+  msg.msg_iovlen      = (buf && len && *len > 0) ? 2 : 1;
+  msg.msg_control     = cmsg_buf;
+  msg.msg_controllen  = CMSG_LEN(count*sizeof(int));
+  ssize_t bytes = recvmsg(transport, &msg, 0);
+  if (len) {
+    *len = bytes > static_cast<int>(sizeof(int)) ? bytes - sizeof(int) : 0;
+  }
+  if (bytes != static_cast<ssize_t>(sizeof(int) + iov[1].iov_len)) {
+    if (bytes >= 0) {
+      errno = 0;
+    }
+    return false;
+  }
+  if (err) {
+    // "err" is the first four bytes of the payload. If these are non-zero,
+    // the sender on the other side of the socketpair sent us an errno value.
+    // We don't expect to get any file handles in this case.
+    errno = err;
+    return false;
+  }
+  struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+  if ((msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) ||
+      !cmsg                                    ||
+      cmsg->cmsg_level != SOL_SOCKET           ||
+      cmsg->cmsg_type  != SCM_RIGHTS           ||
+      cmsg->cmsg_len   != CMSG_LEN(count*sizeof(int))) {
+    errno = EBADF;
+    return false;
+  }
+  va_start(ap, len);
+  for (int *fd, i = 0; (fd = va_arg(ap, int *)) != NULL; ++i) {
+    *fd = (reinterpret_cast<int *>(CMSG_DATA(cmsg)))[i];
+  }
+  va_end(ap);
+  return true;
+}
+
+
+// POSIX doesn't define any async-signal safe function for converting
+// an integer to ASCII. We'll have to define our own version.
+// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
+// conversion was successful or NULL otherwise. It never writes more than "sz"
+// bytes. Output will be truncated as needed, and a NUL character is always
+// appended.
+char *itoa_r(int i, char *buf, size_t sz) {
+  // Make sure we can write at least one NUL byte.
+  size_t n = 1;
+  if (n > sz) {
+    return NULL;
+  }
+
+  // Handle negative numbers.
+  char *start = buf;
+  int minint = 0;
+  if (i < 0) {
+    // Make sure we can write the '-' character.
+    if (++n > sz) {
+      *start = '\000';
+      return NULL;
+    }
+    *start++ = '-';
+
+    // Turn our number positive.
+    if (i == -i) {
+      // The lowest-most negative integer needs special treatment.
+      minint = 1;
+      i = -(i + 1);
+    } else {
+      // "Normal" negative numbers are easy.
+      i = -i;
+    }
+  }
+
+  // Loop until we have converted the entire number. Output at least one
+  // character (i.e. '0').
+  char *ptr = start;
+  do {
+    // Make sure there is still enough space left in our output buffer.
+    if (++n > sz) {
+      buf = NULL;
+      goto truncate;
+    }
+
+    // Output the next digit and (if necessary) compensate for the lowest-most
+    // negative integer needing special treatment. This works because, no
+    // matter the bit width of the integer, the lowest-most integer always ends
+    // in 2, 4, 6, or 8.
+    *ptr++ = i%10 + '0' + minint;
+    minint = 0;
+    i /= 10;
+  } while (i);
+ truncate:  // Terminate the output with a NUL character.
+  *ptr = '\000';
+
+  // Conversion to ASCII actually resulted in the digits being in reverse
+  // order. We can't easily generate them in forward order, as we can't tell
+  // the number of characters needed until we are done converting.
+  // So, now, we reverse the string (except for the possible "-" sign).
+  while (--ptr > start) {
+    char ch = *ptr;
+    *ptr = *start;
+    *start++ = ch;
+  }
+  return buf;
+}
+
+// This handler gets called, whenever we encounter a system call that we
+// don't recognize explicitly. For the purposes of this program, we just
+// log the system call and then deny it. More elaborate sandbox policies
+// might try to evaluate the system call in user-space, instead.
+// The only notable complication is that this function must be async-signal
+// safe. This restricts the libary functions that we can call.
+intptr_t DefaultHandler(const struct arch_seccomp_data& data, void *) {
+  static const char msg0[] = "Disallowed system call #";
+  static const char msg1[] = "\n";
+  char buf[sizeof(msg0) - 1 + 25 + sizeof(msg1)];
+
+  *buf = '\000';
+  strncat(buf, msg0, sizeof(buf) - 1);
+
+  char *ptr = strrchr(buf, '\000');
+  itoa_r(data.nr, ptr, sizeof(buf) - (ptr - buf));
+
+  ptr = strrchr(ptr, '\000');
+  strncat(ptr, msg1, sizeof(buf) - (ptr - buf));
+
+  ptr = strrchr(ptr, '\000');
+  if (HANDLE_EINTR(write(2, buf, ptr - buf))) { }
+
+  return -ERR;
+}
+
+ErrorCode Evaluator(SandboxBPF* sandbox, int sysno, void *) {
+  switch (sysno) {
+#if defined(__NR_accept)
+  case __NR_accept: case __NR_accept4:
+#endif
+  case __NR_alarm:
+  case __NR_brk:
+  case __NR_clock_gettime:
+  case __NR_close:
+  case __NR_dup: case __NR_dup2:
+  case __NR_epoll_create: case __NR_epoll_ctl: case __NR_epoll_wait:
+  case __NR_exit: case __NR_exit_group:
+  case __NR_fcntl:
+#if defined(__NR_fcntl64)
+  case __NR_fcntl64:
+#endif
+  case __NR_fdatasync:
+  case __NR_fstat:
+#if defined(__NR_fstat64)
+  case __NR_fstat64:
+#endif
+  case __NR_ftruncate:
+  case __NR_futex:
+  case __NR_getdents: case __NR_getdents64:
+  case __NR_getegid:
+#if defined(__NR_getegid32)
+  case __NR_getegid32:
+#endif
+  case __NR_geteuid:
+#if defined(__NR_geteuid32)
+  case __NR_geteuid32:
+#endif
+  case __NR_getgid:
+#if defined(__NR_getgid32)
+  case __NR_getgid32:
+#endif
+  case __NR_getitimer: case __NR_setitimer:
+#if defined(__NR_getpeername)
+  case __NR_getpeername:
+#endif
+  case __NR_getpid: case __NR_gettid:
+#if defined(__NR_getsockname)
+  case __NR_getsockname:
+#endif
+  case __NR_gettimeofday:
+  case __NR_getuid:
+#if defined(__NR_getuid32)
+  case __NR_getuid32:
+#endif
+#if defined(__NR__llseek)
+  case __NR__llseek:
+#endif
+  case __NR_lseek:
+  case __NR_nanosleep:
+  case __NR_pipe: case __NR_pipe2:
+  case __NR_poll:
+  case __NR_pread64: case __NR_preadv:
+  case __NR_pwrite64: case __NR_pwritev:
+  case __NR_read: case __NR_readv:
+  case __NR_restart_syscall:
+  case __NR_set_robust_list:
+  case __NR_rt_sigaction:
+#if defined(__NR_sigaction)
+  case __NR_sigaction:
+#endif
+#if defined(__NR_signal)
+  case __NR_signal:
+#endif
+  case __NR_rt_sigprocmask:
+#if defined(__NR_sigprocmask)
+  case __NR_sigprocmask:
+#endif
+#if defined(__NR_shutdown)
+  case __NR_shutdown:
+#endif
+  case __NR_rt_sigreturn:
+#if defined(__NR_sigreturn)
+  case __NR_sigreturn:
+#endif
+#if defined(__NR_socketpair)
+  case __NR_socketpair:
+#endif
+  case __NR_time:
+  case __NR_uname:
+  case __NR_write: case __NR_writev:
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+
+  case __NR_prctl:
+    // Allow PR_SET_DUMPABLE and PR_GET_DUMPABLE. Do not allow anything else.
+    return sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
+                         PR_SET_DUMPABLE,
+                         ErrorCode(ErrorCode::ERR_ALLOWED),
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
+                         PR_GET_DUMPABLE,
+                         ErrorCode(ErrorCode::ERR_ALLOWED),
+           sandbox->Trap(DefaultHandler, NULL)));
+
+  // The following system calls are temporarily permitted. This must be
+  // tightened later. But we currently don't implement enough of the sandboxing
+  // API to do so.
+  // As is, this sandbox isn't exactly safe :-/
+#if defined(__NR_sendmsg)
+  case __NR_sendmsg: case __NR_sendto:
+  case __NR_recvmsg: case __NR_recvfrom:
+  case __NR_getsockopt: case __NR_setsockopt:
+#elif defined(__NR_socketcall)
+  case __NR_socketcall:
+#endif
+#if defined(__NR_shmat)
+  case __NR_shmat: case __NR_shmctl: case __NR_shmdt: case __NR_shmget:
+#elif defined(__NR_ipc)
+  case __NR_ipc:
+#endif
+#if defined(__NR_mmap2)
+  case __NR_mmap2:
+#else
+  case __NR_mmap:
+#endif
+#if defined(__NR_ugetrlimit)
+  case __NR_ugetrlimit:
+#endif
+  case __NR_getrlimit:
+  case __NR_ioctl:
+  case __NR_clone:
+  case __NR_munmap: case __NR_mprotect: case __NR_madvise:
+  case __NR_remap_file_pages:
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+
+  // Everything that isn't explicitly allowed is denied.
+  default:
+    return sandbox->Trap(DefaultHandler, NULL);
+  }
+}
+
+void *ThreadFnc(void *arg) {
+  return arg;
+}
+
+void *SendmsgStressThreadFnc(void *arg) {
+  if (arg) { }
+  static const int repetitions = 100;
+  static const int kNumFds = 3;
+  for (int rep = 0; rep < repetitions; ++rep) {
+    int fds[2 + kNumFds];
+    if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) {
+      perror("socketpair()");
+      _exit(1);
+    }
+    size_t len = 4;
+    char buf[4];
+    if (!SendFds(fds[0], "test", 4, fds[1], fds[1], fds[1], -1) ||
+        !GetFds(fds[1], buf, &len, fds+2, fds+3, fds+4, NULL) ||
+        len != 4 ||
+        memcmp(buf, "test", len) ||
+        write(fds[2], "demo", 4) != 4 ||
+        read(fds[0], buf, 4) != 4 ||
+        memcmp(buf, "demo", 4)) {
+      perror("sending/receiving of fds");
+      _exit(1);
+    }
+    for (int i = 0; i < 2+kNumFds; ++i) {
+      if (close(fds[i])) {
+        perror("close");
+        _exit(1);
+      }
+    }
+  }
+  return NULL;
+}
+
+}  // namespace
+
+int main(int argc, char *argv[]) {
+  if (argc) { }
+  if (argv) { }
+  int proc_fd = open("/proc", O_RDONLY|O_DIRECTORY);
+  if (SandboxBPF::SupportsSeccompSandbox(proc_fd) !=
+      SandboxBPF::STATUS_AVAILABLE) {
+    perror("sandbox");
+    _exit(1);
+  }
+  SandboxBPF sandbox;
+  sandbox.set_proc_fd(proc_fd);
+  sandbox.SetSandboxPolicyDeprecated(Evaluator, NULL);
+  if (!sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)) {
+    fprintf(stderr, "StartSandbox() failed");
+    _exit(1);
+  }
+
+  // Check that we can create threads
+  pthread_t thr;
+  if (!pthread_create(&thr, NULL, ThreadFnc,
+                      reinterpret_cast<void *>(0x1234))) {
+    void *ret;
+    pthread_join(thr, &ret);
+    if (ret != reinterpret_cast<void *>(0x1234)) {
+      perror("clone() failed");
+      _exit(1);
+    }
+  } else {
+    perror("clone() failed");
+    _exit(1);
+  }
+
+  // Check that we handle restart_syscall() without dieing. This is a little
+  // tricky to trigger. And I can't think of a good way to verify whether it
+  // actually executed.
+  signal(SIGALRM, SIG_IGN);
+  const struct itimerval tv = { { 0, 0 }, { 0, 5*1000 } };
+  const struct timespec tmo = { 0, 100*1000*1000 };
+  setitimer(ITIMER_REAL, &tv, NULL);
+  nanosleep(&tmo, NULL);
+
+  // Check that we can query the size of the stack, but that all other
+  // calls to getrlimit() fail.
+  if (((errno = 0), !getrlimit(RLIMIT_STACK, NULL)) || errno != EFAULT ||
+      ((errno = 0), !getrlimit(RLIMIT_CORE,  NULL)) || errno != ERR) {
+    perror("getrlimit()");
+    _exit(1);
+  }
+
+  // Check that we can query TCGETS and TIOCGWINSZ, but no other ioctls().
+  if (((errno = 0), !ioctl(2, TCGETS,     NULL)) || errno != EFAULT ||
+      ((errno = 0), !ioctl(2, TIOCGWINSZ, NULL)) || errno != EFAULT ||
+      ((errno = 0), !ioctl(2, TCSETS,     NULL)) || errno != ERR) {
+    perror("ioctl()");
+    _exit(1);
+  }
+
+  // Check that prctl() can manipulate the dumpable flag, but nothing else.
+  if (((errno = 0), !prctl(PR_GET_DUMPABLE))    || errno ||
+      ((errno = 0),  prctl(PR_SET_DUMPABLE, 1)) || errno ||
+      ((errno = 0), !prctl(PR_SET_SECCOMP,  0)) || errno != ERR) {
+    perror("prctl()");
+    _exit(1);
+  }
+
+  // Check that we can send and receive file handles.
+  int fds[3];
+  if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) {
+    perror("socketpair()");
+    _exit(1);
+  }
+  size_t len = 4;
+  char buf[4];
+  if (!SendFds(fds[0], "test", 4, fds[1], -1) ||
+      !GetFds(fds[1], buf, &len, fds+2, NULL) ||
+      len != 4 ||
+      memcmp(buf, "test", len) ||
+      write(fds[2], "demo", 4) != 4 ||
+      read(fds[0], buf, 4) != 4 ||
+      memcmp(buf, "demo", 4) ||
+      close(fds[0]) ||
+      close(fds[1]) ||
+      close(fds[2])) {
+    perror("sending/receiving of fds");
+    _exit(1);
+  }
+
+  // Check whether SysV IPC works.
+  int shmid;
+  void *addr;
+  if ((shmid = shmget(IPC_PRIVATE, 4096, IPC_CREAT|0600)) < 0 ||
+      (addr = shmat(shmid, NULL, 0)) == reinterpret_cast<void *>(-1) ||
+      shmdt(addr) ||
+      shmctl(shmid, IPC_RMID, NULL)) {
+    perror("sysv IPC");
+    _exit(1);
+  }
+
+  // Print a message so that the user can see the sandbox is activated.
+  time_t tm = time(NULL);
+  printf("Sandbox has been started at %s", ctime(&tm));
+
+  // Stress-test the sendmsg() code
+  static const int kSendmsgStressNumThreads = 10;
+  pthread_t sendmsgStressThreads[kSendmsgStressNumThreads];
+  for (int i = 0; i < kSendmsgStressNumThreads; ++i) {
+    if (pthread_create(sendmsgStressThreads + i, NULL,
+                       SendmsgStressThreadFnc, NULL)) {
+      perror("pthread_create");
+      _exit(1);
+    }
+  }
+  for (int i = 0; i < kSendmsgStressNumThreads; ++i) {
+    pthread_join(sendmsgStressThreads[i], NULL);
+  }
+
+  return 0;
+}
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/die.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <errno.h>
+#include <linux/unistd.h>
+#include <stdio.h>
+#include <sys/prctl.h>
+
+#include <string>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+
+namespace sandbox {
+
+void Die::ExitGroup() {
+  // exit_group() should exit our program. After all, it is defined as a
+  // function that doesn't return. But things can theoretically go wrong.
+  // Especially, since we are dealing with system call filters. Continuing
+  // execution would be very bad in most cases where ExitGroup() gets called.
+  // So, we'll try a few other strategies too.
+  SandboxSyscall(__NR_exit_group, 1);
+
+  // We have no idea what our run-time environment looks like. So, signal
+  // handlers might or might not do the right thing. Try to reset settings
+  // to a defined state; but we have not way to verify whether we actually
+  // succeeded in doing so. Nonetheless, triggering a fatal signal could help
+  // us terminate.
+  signal(SIGSEGV, SIG_DFL);
+  SandboxSyscall(__NR_prctl, PR_SET_DUMPABLE, (void*)0, (void*)0, (void*)0);
+  if (*(volatile char*)0) {
+  }
+
+  // If there is no way for us to ask for the program to exit, the next
+  // best thing we can do is to loop indefinitely. Maybe, somebody will notice
+  // and file a bug...
+  // We in fact retry the system call inside of our loop so that it will
+  // stand out when somebody tries to diagnose the problem by using "strace".
+  for (;;) {
+    SandboxSyscall(__NR_exit_group, 1);
+  }
+}
+
+void Die::SandboxDie(const char* msg, const char* file, int line) {
+  if (simple_exit_) {
+    LogToStderr(msg, file, line);
+  } else {
+    logging::LogMessage(file, line, logging::LOG_FATAL).stream() << msg;
+  }
+  ExitGroup();
+}
+
+void Die::RawSandboxDie(const char* msg) {
+  if (!msg)
+    msg = "";
+  RAW_LOG(FATAL, msg);
+  ExitGroup();
+}
+
+void Die::SandboxInfo(const char* msg, const char* file, int line) {
+  if (!suppress_info_) {
+    logging::LogMessage(file, line, logging::LOG_INFO).stream() << msg;
+  }
+}
+
+void Die::LogToStderr(const char* msg, const char* file, int line) {
+  if (msg) {
+    char buf[40];
+    snprintf(buf, sizeof(buf), "%d", line);
+    std::string s = std::string(file) + ":" + buf + ":" + msg + "\n";
+
+    // No need to loop. Short write()s are unlikely and if they happen we
+    // probably prefer them over a loop that blocks.
+    ignore_result(
+        HANDLE_EINTR(SandboxSyscall(__NR_write, 2, s.c_str(), s.length())));
+  }
+}
+
+bool Die::simple_exit_ = false;
+bool Die::suppress_info_ = false;
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/die.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_DIE_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_DIE_H__
+
+#include "base/basictypes.h"
+#include "sandbox/linux/sandbox_export.h"
+
+namespace sandbox {
+
+// This is the main API for using this file. Prints a error message and
+// exits with a fatal error. This is not async-signal safe.
+#define SANDBOX_DIE(m) sandbox::Die::SandboxDie(m, __FILE__, __LINE__)
+
+// An async signal safe version of the same API. Won't print the filename
+// and line numbers.
+#define RAW_SANDBOX_DIE(m) sandbox::Die::RawSandboxDie(m)
+
+// Adds an informational message to the log file or stderr as appropriate.
+#define SANDBOX_INFO(m) sandbox::Die::SandboxInfo(m, __FILE__, __LINE__)
+
+class SANDBOX_EXPORT Die {
+ public:
+  // Terminate the program, even if the current sandbox policy prevents some
+  // of the more commonly used functions used for exiting.
+  // Most users would want to call SANDBOX_DIE() instead, as it logs extra
+  // information. But calling ExitGroup() is correct and in some rare cases
+  // preferable. So, we make it part of the public API.
+  static void ExitGroup() __attribute__((noreturn));
+
+  // This method gets called by SANDBOX_DIE(). There is normally no reason
+  // to call it directly unless you are defining your own exiting macro.
+  static void SandboxDie(const char* msg, const char* file, int line)
+      __attribute__((noreturn));
+
+  static void RawSandboxDie(const char* msg) __attribute__((noreturn));
+
+  // This method gets called by SANDBOX_INFO(). There is normally no reason
+  // to call it directly unless you are defining your own logging macro.
+  static void SandboxInfo(const char* msg, const char* file, int line);
+
+  // Writes a message to stderr. Used as a fall-back choice, if we don't have
+  // any other way to report an error.
+  static void LogToStderr(const char* msg, const char* file, int line);
+
+  // We generally want to run all exit handlers. This means, on SANDBOX_DIE()
+  // we should be calling LOG(FATAL). But there are some situations where
+  // we just need to print a message and then terminate. This would typically
+  // happen in cases where we consume the error message internally (e.g. in
+  // unit tests or in the supportsSeccompSandbox() method).
+  static void EnableSimpleExit() { simple_exit_ = true; }
+
+  // Sometimes we need to disable all informational messages (e.g. from within
+  // unittests).
+  static void SuppressInfoMessages(bool flag) { suppress_info_ = flag; }
+
+ private:
+  static bool simple_exit_;
+  static bool suppress_info_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Die);
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_DIE_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/errorcode.cc
@@ -0,0 +1,104 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/die.h"
+#include "sandbox/linux/seccomp-bpf/errorcode.h"
+
+namespace sandbox {
+
+ErrorCode::ErrorCode(int err) {
+  switch (err) {
+    case ERR_ALLOWED:
+      err_ = SECCOMP_RET_ALLOW;
+      error_type_ = ET_SIMPLE;
+      break;
+    case ERR_MIN_ERRNO... ERR_MAX_ERRNO:
+      err_ = SECCOMP_RET_ERRNO + err;
+      error_type_ = ET_SIMPLE;
+      break;
+    default:
+      SANDBOX_DIE("Invalid use of ErrorCode object");
+  }
+}
+
+ErrorCode::ErrorCode(Trap::TrapFnc fnc, const void* aux, bool safe, uint16_t id)
+    : error_type_(ET_TRAP),
+      fnc_(fnc),
+      aux_(const_cast<void*>(aux)),
+      safe_(safe),
+      err_(SECCOMP_RET_TRAP + id) {}
+
+ErrorCode::ErrorCode(int argno,
+                     ArgType width,
+                     Operation op,
+                     uint64_t value,
+                     const ErrorCode* passed,
+                     const ErrorCode* failed)
+    : error_type_(ET_COND),
+      value_(value),
+      argno_(argno),
+      width_(width),
+      op_(op),
+      passed_(passed),
+      failed_(failed),
+      err_(SECCOMP_RET_INVALID) {
+  if (op < 0 || op >= OP_NUM_OPS) {
+    SANDBOX_DIE("Invalid opcode in BPF sandbox rules");
+  }
+}
+
+bool ErrorCode::Equals(const ErrorCode& err) const {
+  if (error_type_ == ET_INVALID || err.error_type_ == ET_INVALID) {
+    SANDBOX_DIE("Dereferencing invalid ErrorCode");
+  }
+  if (error_type_ != err.error_type_) {
+    return false;
+  }
+  if (error_type_ == ET_SIMPLE || error_type_ == ET_TRAP) {
+    return err_ == err.err_;
+  } else if (error_type_ == ET_COND) {
+    return value_ == err.value_ && argno_ == err.argno_ &&
+           width_ == err.width_ && op_ == err.op_ &&
+           passed_->Equals(*err.passed_) && failed_->Equals(*err.failed_);
+  } else {
+    SANDBOX_DIE("Corrupted ErrorCode");
+  }
+}
+
+bool ErrorCode::LessThan(const ErrorCode& err) const {
+  // Implementing a "LessThan()" operator allows us to use ErrorCode objects
+  // as keys in STL containers; most notably, it also allows us to put them
+  // into std::set<>. Actual ordering is not important as long as it is
+  // deterministic.
+  if (error_type_ == ET_INVALID || err.error_type_ == ET_INVALID) {
+    SANDBOX_DIE("Dereferencing invalid ErrorCode");
+  }
+  if (error_type_ != err.error_type_) {
+    return error_type_ < err.error_type_;
+  } else {
+    if (error_type_ == ET_SIMPLE || error_type_ == ET_TRAP) {
+      return err_ < err.err_;
+    } else if (error_type_ == ET_COND) {
+      if (value_ != err.value_) {
+        return value_ < err.value_;
+      } else if (argno_ != err.argno_) {
+        return argno_ < err.argno_;
+      } else if (width_ != err.width_) {
+        return width_ < err.width_;
+      } else if (op_ != err.op_) {
+        return op_ < err.op_;
+      } else if (!passed_->Equals(*err.passed_)) {
+        return passed_->LessThan(*err.passed_);
+      } else if (!failed_->Equals(*err.failed_)) {
+        return failed_->LessThan(*err.failed_);
+      } else {
+        return false;
+      }
+    } else {
+      SANDBOX_DIE("Corrupted ErrorCode");
+    }
+  }
+}
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/errorcode.h
@@ -0,0 +1,198 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_ERRORCODE_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_ERRORCODE_H__
+
+#include "sandbox/linux/sandbox_export.h"
+#include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
+#include "sandbox/linux/seccomp-bpf/trap.h"
+
+namespace sandbox {
+
+struct arch_seccomp_data;
+
+// This class holds all the possible values that can be returned by a sandbox
+// policy.
+// We can either wrap a symbolic ErrorCode (i.e. ERR_XXX enum values), an
+// errno value (in the range 0..4095), a pointer to a TrapFnc callback
+// handling a SECCOMP_RET_TRAP trap, or a complex constraint.
+// All of the commonly used values are stored in the "err_" field. So, code
+// that is using the ErrorCode class typically operates on a single 32bit
+// field.
+class SANDBOX_EXPORT ErrorCode {
+ public:
+  enum {
+    // Allow this system call. The value of ERR_ALLOWED is pretty much
+    // completely arbitrary. But we want to pick it so that is is unlikely
+    // to be passed in accidentally, when the user intended to return an
+    // "errno" (see below) value instead.
+    ERR_ALLOWED = 0x04000000,
+
+    // Deny the system call with a particular "errno" value.
+    // N.B.: It is also possible to return "0" here. That would normally
+    //       indicate success, but it won't actually run the system call.
+    //       This is very different from return ERR_ALLOWED.
+    ERR_MIN_ERRNO = 0,
+    // TODO(markus): Android only supports errno up to 255
+    // (crbug.com/181647).
+    ERR_MAX_ERRNO = 4095,
+  };
+
+  // While BPF filter programs always operate on 32bit quantities, the kernel
+  // always sees system call arguments as 64bit values. This statement is true
+  // no matter whether the host system is natively operating in 32bit or 64bit.
+  // The BPF compiler hides the fact that BPF instructions cannot directly
+  // access 64bit quantities. But policies are still advised to specify whether
+  // a system call expects a 32bit or a 64bit quantity.
+  enum ArgType {
+    // When passed as an argument to SandboxBPF::Cond(), TP_32BIT requests that
+    // the conditional test should operate on the 32bit part of the system call
+    // argument.
+    // On 64bit architectures, this verifies that user space did not pass
+    // a 64bit value as an argument to the system call. If it did, that will be
+    // interpreted as an attempt at breaking the sandbox and results in the
+    // program getting terminated.
+    // In other words, only perform a 32bit test, if you are sure this
+    // particular system call would never legitimately take a 64bit
+    // argument.
+    // Implementation detail: TP_32BIT does two things. 1) it restricts the
+    // conditional test to operating on the LSB only, and 2) it adds code to
+    // the BPF filter program verifying that the MSB  the kernel received from
+    // user space is either 0, or 0xFFFFFFFF; the latter is acceptable, iff bit
+    // 31 was set in the system call argument. It deals with 32bit arguments
+    // having been sign extended.
+    TP_32BIT,
+
+    // When passed as an argument to SandboxBPF::Cond(), TP_64BIT requests that
+    // the conditional test should operate on the full 64bit argument. It is
+    // generally harmless to perform a 64bit test on 32bit systems, as the
+    // kernel will always see the top 32 bits of all arguments as zero'd out.
+    // This approach has the desirable property that for tests of pointer
+    // values, we can always use TP_64BIT no matter the host architecture.
+    // But of course, that also means, it is possible to write conditional
+    // policies that turn into no-ops on 32bit systems; this is by design.
+    TP_64BIT,
+  };
+
+  enum Operation {
+    // Test whether the system call argument is equal to the operand.
+    OP_EQUAL,
+
+    // Test whether the system call argument is greater (or equal) to the
+    // operand. Please note that all tests always operate on unsigned
+    // values. You can generally emulate signed tests, if that's what you
+    // need.
+    // TODO(markus): Check whether we should automatically emulate signed
+    //               operations.
+    OP_GREATER_UNSIGNED,
+    OP_GREATER_EQUAL_UNSIGNED,
+
+    // Tests a system call argument against a bit mask.
+    // The "ALL_BITS" variant performs this test: "arg & mask == mask"
+    // This implies that a mask of zero always results in a passing test.
+    // The "ANY_BITS" variant performs this test: "arg & mask != 0"
+    // This implies that a mask of zero always results in a failing test.
+    OP_HAS_ALL_BITS,
+    OP_HAS_ANY_BITS,
+
+    // Total number of operations.
+    OP_NUM_OPS,
+  };
+
+  enum ErrorType {
+    ET_INVALID,
+    ET_SIMPLE,
+    ET_TRAP,
+    ET_COND,
+  };
+
+  // We allow the default constructor, as it makes the ErrorCode class
+  // much easier to use. But if we ever encounter an invalid ErrorCode
+  // when compiling a BPF filter, we deliberately generate an invalid
+  // program that will get flagged both by our Verifier class and by
+  // the Linux kernel.
+  ErrorCode() : error_type_(ET_INVALID), err_(SECCOMP_RET_INVALID) {}
+  explicit ErrorCode(int err);
+
+  // For all practical purposes, ErrorCodes are treated as if they were
+  // structs. The copy constructor and assignment operator are trivial and
+  // we do not need to explicitly specify them.
+  // Most notably, it is in fact perfectly OK to directly copy the passed_ and
+  // failed_ field. They only ever get set by our private constructor, and the
+  // callers handle life-cycle management for these objects.
+
+  // Destructor
+  ~ErrorCode() {}
+
+  bool Equals(const ErrorCode& err) const;
+  bool LessThan(const ErrorCode& err) const;
+
+  uint32_t err() const { return err_; }
+  ErrorType error_type() const { return error_type_; }
+
+  bool safe() const { return safe_; }
+
+  uint64_t value() const { return value_; }
+  int argno() const { return argno_; }
+  ArgType width() const { return width_; }
+  Operation op() const { return op_; }
+  const ErrorCode* passed() const { return passed_; }
+  const ErrorCode* failed() const { return failed_; }
+
+  struct LessThan {
+    bool operator()(const ErrorCode& a, const ErrorCode& b) const {
+      return a.LessThan(b);
+    }
+  };
+
+ private:
+  friend class CodeGen;
+  friend class SandboxBPF;
+  friend class Trap;
+
+  // If we are wrapping a callback, we must assign a unique id. This id is
+  // how the kernel tells us which one of our different SECCOMP_RET_TRAP
+  // cases has been triggered.
+  ErrorCode(Trap::TrapFnc fnc, const void* aux, bool safe, uint16_t id);
+
+  // Some system calls require inspection of arguments. This constructor
+  // allows us to specify additional constraints.
+  ErrorCode(int argno,
+            ArgType width,
+            Operation op,
+            uint64_t value,
+            const ErrorCode* passed,
+            const ErrorCode* failed);
+
+  ErrorType error_type_;
+
+  union {
+    // Fields needed for SECCOMP_RET_TRAP callbacks
+    struct {
+      Trap::TrapFnc fnc_;  // Callback function and arg, if trap was
+      void* aux_;          //   triggered by the kernel's BPF filter.
+      bool safe_;          // Keep sandbox active while calling fnc_()
+    };
+
+    // Fields needed when inspecting additional arguments.
+    struct {
+      uint64_t value_;           // Value that we are comparing with.
+      int argno_;                // Syscall arg number that we are inspecting.
+      ArgType width_;            // Whether we are looking at a 32/64bit value.
+      Operation op_;             // Comparison operation.
+      const ErrorCode* passed_;  // Value to be returned if comparison passed,
+      const ErrorCode* failed_;  //   or if it failed.
+    };
+  };
+
+  // 32bit field used for all possible types of ErrorCode values. This is
+  // the value that uniquely identifies any ErrorCode and it (typically) can
+  // be emitted directly into a BPF filter program.
+  uint32_t err_;
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_ERRORCODE_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/errorcode_unittest.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <errno.h>
+
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/tests/unit_tests.h"
+
+namespace sandbox {
+
+namespace {
+
+SANDBOX_TEST(ErrorCode, ErrnoConstructor) {
+  ErrorCode e0;
+  SANDBOX_ASSERT(e0.err() == SECCOMP_RET_INVALID);
+
+  ErrorCode e1(ErrorCode::ERR_ALLOWED);
+  SANDBOX_ASSERT(e1.err() == SECCOMP_RET_ALLOW);
+
+  ErrorCode e2(EPERM);
+  SANDBOX_ASSERT(e2.err() == SECCOMP_RET_ERRNO + EPERM);
+
+  SandboxBPF sandbox;
+  ErrorCode e3 = sandbox.Trap(NULL, NULL);
+  SANDBOX_ASSERT((e3.err() & SECCOMP_RET_ACTION)  == SECCOMP_RET_TRAP);
+}
+
+SANDBOX_TEST(ErrorCode, Trap) {
+  SandboxBPF sandbox;
+  ErrorCode e0 = sandbox.Trap(NULL, "a");
+  ErrorCode e1 = sandbox.Trap(NULL, "b");
+  SANDBOX_ASSERT((e0.err() & SECCOMP_RET_DATA) + 1 ==
+                 (e1.err() & SECCOMP_RET_DATA));
+
+  ErrorCode e2 = sandbox.Trap(NULL, "a");
+  SANDBOX_ASSERT((e0.err() & SECCOMP_RET_DATA) ==
+                 (e2.err() & SECCOMP_RET_DATA));
+}
+
+SANDBOX_TEST(ErrorCode, Equals) {
+  ErrorCode e1(ErrorCode::ERR_ALLOWED);
+  ErrorCode e2(ErrorCode::ERR_ALLOWED);
+  SANDBOX_ASSERT(e1.Equals(e1));
+  SANDBOX_ASSERT(e1.Equals(e2));
+  SANDBOX_ASSERT(e2.Equals(e1));
+
+  ErrorCode e3(EPERM);
+  SANDBOX_ASSERT(!e1.Equals(e3));
+
+  SandboxBPF sandbox;
+  ErrorCode e4 = sandbox.Trap(NULL, "a");
+  ErrorCode e5 = sandbox.Trap(NULL, "b");
+  ErrorCode e6 = sandbox.Trap(NULL, "a");
+  SANDBOX_ASSERT(!e1.Equals(e4));
+  SANDBOX_ASSERT(!e3.Equals(e4));
+  SANDBOX_ASSERT(!e5.Equals(e4));
+  SANDBOX_ASSERT( e6.Equals(e4));
+}
+
+SANDBOX_TEST(ErrorCode, LessThan) {
+  ErrorCode e1(ErrorCode::ERR_ALLOWED);
+  ErrorCode e2(ErrorCode::ERR_ALLOWED);
+  SANDBOX_ASSERT(!e1.LessThan(e1));
+  SANDBOX_ASSERT(!e1.LessThan(e2));
+  SANDBOX_ASSERT(!e2.LessThan(e1));
+
+  ErrorCode e3(EPERM);
+  SANDBOX_ASSERT(!e1.LessThan(e3));
+  SANDBOX_ASSERT( e3.LessThan(e1));
+
+  SandboxBPF sandbox;
+  ErrorCode e4 = sandbox.Trap(NULL, "a");
+  ErrorCode e5 = sandbox.Trap(NULL, "b");
+  ErrorCode e6 = sandbox.Trap(NULL, "a");
+  SANDBOX_ASSERT(e1.LessThan(e4));
+  SANDBOX_ASSERT(e3.LessThan(e4));
+  SANDBOX_ASSERT(e4.LessThan(e5));
+  SANDBOX_ASSERT(!e4.LessThan(e6));
+  SANDBOX_ASSERT(!e6.LessThan(e4));
+}
+
+}  // namespace
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/instruction.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_INSTRUCTION_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_INSTRUCTION_H__
+
+#include <stdint.h>
+
+namespace sandbox {
+
+// The fields in this structure have the same meaning as the corresponding
+// fields in "struct sock_filter". See <linux/filter.h> for a lot more
+// detail.
+// code     -- Opcode of the instruction. This is typically a bitwise
+//             combination BPF_XXX values.
+// k        -- Operand; BPF instructions take zero or one operands. Operands
+//             are 32bit-wide constants, if present. They can be immediate
+//             values (if BPF_K is present in "code_"), addresses (if BPF_ABS
+//             is present in "code_"), or relative jump offsets (if BPF_JMP
+//             and BPF_JA are present in "code_").
+// jt, jf   -- all conditional jumps have a 8bit-wide jump offset that allows
+//             jumps of up to 256 instructions forward. Conditional jumps are
+//             identified by BPF_JMP in "code_", but the lack of BPF_JA.
+//             Conditional jumps have a "t"rue and "f"alse branch.
+struct Instruction {
+  // Constructor for an non-jumping instruction or for an unconditional
+  // "always" jump.
+  Instruction(uint16_t c, uint32_t parm, Instruction* n)
+      : code(c), next(n), k(parm) {}
+
+  // Constructor for a conditional jump instruction.
+  Instruction(uint16_t c, uint32_t parm, Instruction* jt, Instruction* jf)
+      : code(c), jt_ptr(jt), jf_ptr(jf), k(parm) {}
+
+  uint16_t code;
+  union {
+    // When code generation is complete, we will have computed relative
+    // branch targets that are in the range 0..255.
+    struct {
+      uint8_t jt, jf;
+    };
+
+    // While assembling the BPF program, we use pointers for branch targets.
+    // Once we have computed basic blocks, these pointers will be entered as
+    // keys in a TargetsToBlocks map and should no longer be dereferenced
+    // directly.
+    struct {
+      Instruction* jt_ptr, *jf_ptr;
+    };
+
+    // While assembling the BPF program, non-jumping instructions are linked
+    // by the "next_" pointer. This field is no longer needed when we have
+    // computed basic blocks.
+    Instruction* next;
+  };
+  uint32_t k;
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_INSTRUCTION_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/linux_seccomp.h
@@ -0,0 +1,197 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_LINUX_SECCOMP_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_LINUX_SECCOMP_H__
+
+// The Seccomp2 kernel ABI is not part of older versions of glibc.
+// As we can't break compilation with these versions of the library,
+// we explicitly define all missing symbols.
+// If we ever decide that we can now rely on system headers, the following
+// include files should be enabled:
+// #include <linux/audit.h>
+// #include <linux/seccomp.h>
+
+#include <asm/unistd.h>
+#include <linux/filter.h>
+
+// For audit.h
+#ifndef EM_ARM
+#define EM_ARM    40
+#endif
+#ifndef EM_386
+#define EM_386    3
+#endif
+#ifndef EM_X86_64
+#define EM_X86_64 62
+#endif
+
+#ifndef __AUDIT_ARCH_64BIT
+#define __AUDIT_ARCH_64BIT 0x80000000
+#endif
+#ifndef __AUDIT_ARCH_LE
+#define __AUDIT_ARCH_LE    0x40000000
+#endif
+#ifndef AUDIT_ARCH_ARM
+#define AUDIT_ARCH_ARM    (EM_ARM|__AUDIT_ARCH_LE)
+#endif
+#ifndef AUDIT_ARCH_I386
+#define AUDIT_ARCH_I386   (EM_386|__AUDIT_ARCH_LE)
+#endif
+#ifndef AUDIT_ARCH_X86_64
+#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#endif
+
+// For prctl.h
+#ifndef PR_SET_SECCOMP
+#define PR_SET_SECCOMP               22
+#define PR_GET_SECCOMP               21
+#endif
+#ifndef PR_SET_NO_NEW_PRIVS
+#define PR_SET_NO_NEW_PRIVS          38
+#define PR_GET_NO_NEW_PRIVS          39
+#endif
+#ifndef IPC_64
+#define IPC_64                   0x0100
+#endif
+
+#ifndef BPF_MOD
+#define BPF_MOD                    0x90
+#endif
+#ifndef BPF_XOR
+#define BPF_XOR                    0xA0
+#endif
+
+// In order to build will older tool chains, we currently have to avoid
+// including <linux/seccomp.h>. Until that can be fixed (if ever). Rely on
+// our own definitions of the seccomp kernel ABI.
+#ifndef SECCOMP_MODE_FILTER
+#define SECCOMP_MODE_DISABLED         0
+#define SECCOMP_MODE_STRICT           1
+#define SECCOMP_MODE_FILTER           2  // User user-supplied filter
+#endif
+
+#ifndef SECCOMP_RET_KILL
+// Return values supported for BPF filter programs. Please note that the
+// "illegal" SECCOMP_RET_INVALID is not supported by the kernel, should only
+// ever be used internally, and would result in the kernel killing our process.
+#define SECCOMP_RET_KILL    0x00000000U  // Kill the task immediately
+#define SECCOMP_RET_INVALID 0x00010000U  // Illegal return value
+#define SECCOMP_RET_TRAP    0x00030000U  // Disallow and force a SIGSYS
+#define SECCOMP_RET_ERRNO   0x00050000U  // Returns an errno
+#define SECCOMP_RET_TRACE   0x7ff00000U  // Pass to a tracer or disallow
+#define SECCOMP_RET_ALLOW   0x7fff0000U  // Allow
+#define SECCOMP_RET_ACTION  0xffff0000U  // Masks for the return value
+#define SECCOMP_RET_DATA    0x0000ffffU  //   sections
+#else
+#define SECCOMP_RET_INVALID 0x00010000U  // Illegal return value
+#endif
+
+#ifndef SYS_SECCOMP
+#define SYS_SECCOMP                   1
+#endif
+
+// Impose some reasonable maximum BPF program size. Realistically, the
+// kernel probably has much lower limits. But by limiting to less than
+// 30 bits, we can ease requirements on some of our data types.
+#define SECCOMP_MAX_PROGRAM_SIZE (1<<30)
+
+#if defined(__i386__)
+#define MIN_SYSCALL         0u
+#define MAX_PUBLIC_SYSCALL  1024u
+#define MAX_SYSCALL         MAX_PUBLIC_SYSCALL
+#define SECCOMP_ARCH        AUDIT_ARCH_I386
+
+#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.gregs[(_reg)])
+#define SECCOMP_RESULT(_ctx)    SECCOMP_REG(_ctx, REG_EAX)
+#define SECCOMP_SYSCALL(_ctx)   SECCOMP_REG(_ctx, REG_EAX)
+#define SECCOMP_IP(_ctx)        SECCOMP_REG(_ctx, REG_EIP)
+#define SECCOMP_PARM1(_ctx)     SECCOMP_REG(_ctx, REG_EBX)
+#define SECCOMP_PARM2(_ctx)     SECCOMP_REG(_ctx, REG_ECX)
+#define SECCOMP_PARM3(_ctx)     SECCOMP_REG(_ctx, REG_EDX)
+#define SECCOMP_PARM4(_ctx)     SECCOMP_REG(_ctx, REG_ESI)
+#define SECCOMP_PARM5(_ctx)     SECCOMP_REG(_ctx, REG_EDI)
+#define SECCOMP_PARM6(_ctx)     SECCOMP_REG(_ctx, REG_EBP)
+#define SECCOMP_NR_IDX          (offsetof(struct arch_seccomp_data, nr))
+#define SECCOMP_ARCH_IDX        (offsetof(struct arch_seccomp_data, arch))
+#define SECCOMP_IP_MSB_IDX      (offsetof(struct arch_seccomp_data,           \
+                                          instruction_pointer) + 4)
+#define SECCOMP_IP_LSB_IDX      (offsetof(struct arch_seccomp_data,           \
+                                          instruction_pointer) + 0)
+#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) +   \
+                                 8*(nr) + 4)
+#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) +   \
+                                 8*(nr) + 0)
+
+#elif defined(__x86_64__)
+#define MIN_SYSCALL         0u
+#define MAX_PUBLIC_SYSCALL  1024u
+#define MAX_SYSCALL         MAX_PUBLIC_SYSCALL
+#define SECCOMP_ARCH        AUDIT_ARCH_X86_64
+
+#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.gregs[(_reg)])
+#define SECCOMP_RESULT(_ctx)    SECCOMP_REG(_ctx, REG_RAX)
+#define SECCOMP_SYSCALL(_ctx)   SECCOMP_REG(_ctx, REG_RAX)
+#define SECCOMP_IP(_ctx)        SECCOMP_REG(_ctx, REG_RIP)
+#define SECCOMP_PARM1(_ctx)     SECCOMP_REG(_ctx, REG_RDI)
+#define SECCOMP_PARM2(_ctx)     SECCOMP_REG(_ctx, REG_RSI)
+#define SECCOMP_PARM3(_ctx)     SECCOMP_REG(_ctx, REG_RDX)
+#define SECCOMP_PARM4(_ctx)     SECCOMP_REG(_ctx, REG_R10)
+#define SECCOMP_PARM5(_ctx)     SECCOMP_REG(_ctx, REG_R8)
+#define SECCOMP_PARM6(_ctx)     SECCOMP_REG(_ctx, REG_R9)
+#define SECCOMP_NR_IDX          (offsetof(struct arch_seccomp_data, nr))
+#define SECCOMP_ARCH_IDX        (offsetof(struct arch_seccomp_data, arch))
+#define SECCOMP_IP_MSB_IDX      (offsetof(struct arch_seccomp_data,           \
+                                          instruction_pointer) + 4)
+#define SECCOMP_IP_LSB_IDX      (offsetof(struct arch_seccomp_data,           \
+                                          instruction_pointer) + 0)
+#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) +   \
+                                 8*(nr) + 4)
+#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) +   \
+                                 8*(nr) + 0)
+
+#elif defined(__arm__) && (defined(__thumb__) || defined(__ARM_EABI__))
+// ARM EABI includes "ARM private" system calls starting at |__ARM_NR_BASE|,
+// and a "ghost syscall private to the kernel", cmpxchg,
+// at |__ARM_NR_BASE+0x00fff0|.
+// See </arch/arm/include/asm/unistd.h> in the Linux kernel.
+#define MIN_SYSCALL         ((unsigned int)__NR_SYSCALL_BASE)
+#define MAX_PUBLIC_SYSCALL  (MIN_SYSCALL + 1024u)
+#define MIN_PRIVATE_SYSCALL ((unsigned int)__ARM_NR_BASE)
+#define MAX_PRIVATE_SYSCALL (MIN_PRIVATE_SYSCALL + 16u)
+#define MIN_GHOST_SYSCALL   ((unsigned int)__ARM_NR_BASE + 0xfff0u)
+#define MAX_SYSCALL         (MIN_GHOST_SYSCALL + 4u)
+
+#define SECCOMP_ARCH AUDIT_ARCH_ARM
+
+// ARM sigcontext_t is different from i386/x86_64.
+// See </arch/arm/include/asm/sigcontext.h> in the Linux kernel.
+#define SECCOMP_REG(_ctx, _reg) ((_ctx)->uc_mcontext.arm_##_reg)
+// ARM EABI syscall convention.
+#define SECCOMP_RESULT(_ctx)    SECCOMP_REG(_ctx, r0)
+#define SECCOMP_SYSCALL(_ctx)   SECCOMP_REG(_ctx, r7)
+#define SECCOMP_IP(_ctx)        SECCOMP_REG(_ctx, pc)
+#define SECCOMP_PARM1(_ctx)     SECCOMP_REG(_ctx, r0)
+#define SECCOMP_PARM2(_ctx)     SECCOMP_REG(_ctx, r1)
+#define SECCOMP_PARM3(_ctx)     SECCOMP_REG(_ctx, r2)
+#define SECCOMP_PARM4(_ctx)     SECCOMP_REG(_ctx, r3)
+#define SECCOMP_PARM5(_ctx)     SECCOMP_REG(_ctx, r4)
+#define SECCOMP_PARM6(_ctx)     SECCOMP_REG(_ctx, r5)
+#define SECCOMP_NR_IDX          (offsetof(struct arch_seccomp_data, nr))
+#define SECCOMP_ARCH_IDX        (offsetof(struct arch_seccomp_data, arch))
+#define SECCOMP_IP_MSB_IDX      (offsetof(struct arch_seccomp_data,           \
+                                          instruction_pointer) + 4)
+#define SECCOMP_IP_LSB_IDX      (offsetof(struct arch_seccomp_data,           \
+                                          instruction_pointer) + 0)
+#define SECCOMP_ARG_MSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) +   \
+                                 8*(nr) + 4)
+#define SECCOMP_ARG_LSB_IDX(nr) (offsetof(struct arch_seccomp_data, args) +   \
+                                 8*(nr) + 0)
+
+#else
+#error Unsupported target platform
+
+#endif
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_LINUX_SECCOMP_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
@@ -0,0 +1,1048 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+
+// Some headers on Android are missing cdefs: crbug.com/172337.
+// (We can't use OS_ANDROID here since build_config.h is not included).
+#if defined(ANDROID)
+#include <sys/cdefs.h>
+#endif
+
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/posix/eintr_wrapper.h"
+#include "sandbox/linux/seccomp-bpf/codegen.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
+#include "sandbox/linux/seccomp-bpf/verifier.h"
+
+namespace sandbox {
+
+namespace {
+
+const int kExpectedExitCode = 100;
+
+int popcount(uint32_t x) {
+  return __builtin_popcount(x);
+}
+
+#if !defined(NDEBUG)
+void WriteFailedStderrSetupMessage(int out_fd) {
+  const char* error_string = strerror(errno);
+  static const char msg[] =
+      "You have reproduced a puzzling issue.\n"
+      "Please, report to crbug.com/152530!\n"
+      "Failed to set up stderr: ";
+  if (HANDLE_EINTR(write(out_fd, msg, sizeof(msg) - 1)) > 0 && error_string &&
+      HANDLE_EINTR(write(out_fd, error_string, strlen(error_string))) > 0 &&
+      HANDLE_EINTR(write(out_fd, "\n", 1))) {
+  }
+}
+#endif  // !defined(NDEBUG)
+
+// We define a really simple sandbox policy. It is just good enough for us
+// to tell that the sandbox has actually been activated.
+ErrorCode ProbeEvaluator(SandboxBPF*, int sysnum, void*) __attribute__((const));
+ErrorCode ProbeEvaluator(SandboxBPF*, int sysnum, void*) {
+  switch (sysnum) {
+    case __NR_getpid:
+      // Return EPERM so that we can check that the filter actually ran.
+      return ErrorCode(EPERM);
+    case __NR_exit_group:
+      // Allow exit() with a non-default return code.
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+    default:
+      // Make everything else fail in an easily recognizable way.
+      return ErrorCode(EINVAL);
+  }
+}
+
+void ProbeProcess(void) {
+  if (syscall(__NR_getpid) < 0 && errno == EPERM) {
+    syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode));
+  }
+}
+
+ErrorCode AllowAllEvaluator(SandboxBPF*, int sysnum, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysnum)) {
+    return ErrorCode(ENOSYS);
+  }
+  return ErrorCode(ErrorCode::ERR_ALLOWED);
+}
+
+void TryVsyscallProcess(void) {
+  time_t current_time;
+  // time() is implemented as a vsyscall. With an older glibc, with
+  // vsyscall=emulate and some versions of the seccomp BPF patch
+  // we may get SIGKILL-ed. Detect this!
+  if (time(&current_time) != static_cast<time_t>(-1)) {
+    syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode));
+  }
+}
+
+bool IsSingleThreaded(int proc_fd) {
+  if (proc_fd < 0) {
+    // Cannot determine whether program is single-threaded. Hope for
+    // the best...
+    return true;
+  }
+
+  struct stat sb;
+  int task = -1;
+  if ((task = openat(proc_fd, "self/task", O_RDONLY | O_DIRECTORY)) < 0 ||
+      fstat(task, &sb) != 0 || sb.st_nlink != 3 || IGNORE_EINTR(close(task))) {
+    if (task >= 0) {
+      if (IGNORE_EINTR(close(task))) {
+      }
+    }
+    return false;
+  }
+  return true;
+}
+
+bool IsDenied(const ErrorCode& code) {
+  return (code.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP ||
+         (code.err() >= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MIN_ERRNO) &&
+          code.err() <= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MAX_ERRNO));
+}
+
+// Function that can be passed as a callback function to CodeGen::Traverse().
+// Checks whether the "insn" returns an UnsafeTrap() ErrorCode. If so, it
+// sets the "bool" variable pointed to by "aux".
+void CheckForUnsafeErrorCodes(Instruction* insn, void* aux) {
+  bool* is_unsafe = static_cast<bool*>(aux);
+  if (!*is_unsafe) {
+    if (BPF_CLASS(insn->code) == BPF_RET && insn->k > SECCOMP_RET_TRAP &&
+        insn->k - SECCOMP_RET_TRAP <= SECCOMP_RET_DATA) {
+      const ErrorCode& err =
+          Trap::ErrorCodeFromTrapId(insn->k & SECCOMP_RET_DATA);
+      if (err.error_type() != ErrorCode::ET_INVALID && !err.safe()) {
+        *is_unsafe = true;
+      }
+    }
+  }
+}
+
+// A Trap() handler that returns an "errno" value. The value is encoded
+// in the "aux" parameter.
+intptr_t ReturnErrno(const struct arch_seccomp_data&, void* aux) {
+  // TrapFnc functions report error by following the native kernel convention
+  // of returning an exit code in the range of -1..-4096. They do not try to
+  // set errno themselves. The glibc wrapper that triggered the SIGSYS will
+  // ultimately do so for us.
+  int err = reinterpret_cast<intptr_t>(aux) & SECCOMP_RET_DATA;
+  return -err;
+}
+
+// Function that can be passed as a callback function to CodeGen::Traverse().
+// Checks whether the "insn" returns an errno value from a BPF filter. If so,
+// it rewrites the instruction to instead call a Trap() handler that does
+// the same thing. "aux" is ignored.
+void RedirectToUserspace(Instruction* insn, void* aux) {
+  // When inside an UnsafeTrap() callback, we want to allow all system calls.
+  // This means, we must conditionally disable the sandbox -- and that's not
+  // something that kernel-side BPF filters can do, as they cannot inspect
+  // any state other than the syscall arguments.
+  // But if we redirect all error handlers to user-space, then we can easily
+  // make this decision.
+  // The performance penalty for this extra round-trip to user-space is not
+  // actually that bad, as we only ever pay it for denied system calls; and a
+  // typical program has very few of these.
+  SandboxBPF* sandbox = static_cast<SandboxBPF*>(aux);
+  if (BPF_CLASS(insn->code) == BPF_RET &&
+      (insn->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
+    insn->k = sandbox->Trap(ReturnErrno,
+        reinterpret_cast<void*>(insn->k & SECCOMP_RET_DATA)).err();
+  }
+}
+
+// This wraps an existing policy and changes its behavior to match the changes
+// made by RedirectToUserspace(). This is part of the framework that allows BPF
+// evaluation in userland.
+// TODO(markus): document the code inside better.
+class RedirectToUserSpacePolicyWrapper : public SandboxBPFPolicy {
+ public:
+  explicit RedirectToUserSpacePolicyWrapper(
+      const SandboxBPFPolicy* wrapped_policy)
+      : wrapped_policy_(wrapped_policy) {
+    DCHECK(wrapped_policy_);
+  }
+
+  virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler,
+                                    int system_call_number) const OVERRIDE {
+    ErrorCode err =
+        wrapped_policy_->EvaluateSyscall(sandbox_compiler, system_call_number);
+    if ((err.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
+      return sandbox_compiler->Trap(
+          ReturnErrno, reinterpret_cast<void*>(err.err() & SECCOMP_RET_DATA));
+    }
+    return err;
+  }
+
+ private:
+  const SandboxBPFPolicy* wrapped_policy_;
+  DISALLOW_COPY_AND_ASSIGN(RedirectToUserSpacePolicyWrapper);
+};
+
+intptr_t BPFFailure(const struct arch_seccomp_data&, void* aux) {
+  SANDBOX_DIE(static_cast<char*>(aux));
+}
+
+// This class allows compatibility with the old, deprecated SetSandboxPolicy.
+class CompatibilityPolicy : public SandboxBPFPolicy {
+ public:
+  CompatibilityPolicy(SandboxBPF::EvaluateSyscall syscall_evaluator, void* aux)
+      : syscall_evaluator_(syscall_evaluator), aux_(aux) {
+    DCHECK(syscall_evaluator_);
+  }
+
+  virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler,
+                                    int system_call_number) const OVERRIDE {
+    return syscall_evaluator_(sandbox_compiler, system_call_number, aux_);
+  }
+
+ private:
+  SandboxBPF::EvaluateSyscall syscall_evaluator_;
+  void* aux_;
+  DISALLOW_COPY_AND_ASSIGN(CompatibilityPolicy);
+};
+
+}  // namespace
+
+SandboxBPF::SandboxBPF()
+    : quiet_(false),
+      proc_fd_(-1),
+      conds_(new Conds),
+      sandbox_has_started_(false) {}
+
+SandboxBPF::~SandboxBPF() {
+  // It is generally unsafe to call any memory allocator operations or to even
+  // call arbitrary destructors after having installed a new policy. We just
+  // have no way to tell whether this policy would allow the system calls that
+  // the constructors can trigger.
+  // So, we normally destroy all of our complex state prior to starting the
+  // sandbox. But this won't happen, if the Sandbox object was created and
+  // never actually used to set up a sandbox. So, just in case, we are
+  // destroying any remaining state.
+  // The "if ()" statements are technically superfluous. But let's be explicit
+  // that we really don't want to run any code, when we already destroyed
+  // objects before setting up the sandbox.
+  if (conds_) {
+    delete conds_;
+  }
+}
+
+bool SandboxBPF::IsValidSyscallNumber(int sysnum) {
+  return SyscallIterator::IsValid(sysnum);
+}
+
+bool SandboxBPF::RunFunctionInPolicy(void (*code_in_sandbox)(),
+                                     EvaluateSyscall syscall_evaluator,
+                                     void* aux) {
+  // Block all signals before forking a child process. This prevents an
+  // attacker from manipulating our test by sending us an unexpected signal.
+  sigset_t old_mask, new_mask;
+  if (sigfillset(&new_mask) || sigprocmask(SIG_BLOCK, &new_mask, &old_mask)) {
+    SANDBOX_DIE("sigprocmask() failed");
+  }
+  int fds[2];
+  if (pipe2(fds, O_NONBLOCK | O_CLOEXEC)) {
+    SANDBOX_DIE("pipe() failed");
+  }
+
+  if (fds[0] <= 2 || fds[1] <= 2) {
+    SANDBOX_DIE("Process started without standard file descriptors");
+  }
+
+  // This code is using fork() and should only ever run single-threaded.
+  // Most of the code below is "async-signal-safe" and only minor changes
+  // would be needed to support threads.
+  DCHECK(IsSingleThreaded(proc_fd_));
+  pid_t pid = fork();
+  if (pid < 0) {
+    // Die if we cannot fork(). We would probably fail a little later
+    // anyway, as the machine is likely very close to running out of
+    // memory.
+    // But what we don't want to do is return "false", as a crafty
+    // attacker might cause fork() to fail at will and could trick us
+    // into running without a sandbox.
+    sigprocmask(SIG_SETMASK, &old_mask, NULL);  // OK, if it fails
+    SANDBOX_DIE("fork() failed unexpectedly");
+  }
+
+  // In the child process
+  if (!pid) {
+    // Test a very simple sandbox policy to verify that we can
+    // successfully turn on sandboxing.
+    Die::EnableSimpleExit();
+
+    errno = 0;
+    if (IGNORE_EINTR(close(fds[0]))) {
+      // This call to close() has been failing in strange ways. See
+      // crbug.com/152530. So we only fail in debug mode now.
+#if !defined(NDEBUG)
+      WriteFailedStderrSetupMessage(fds[1]);
+      SANDBOX_DIE(NULL);
+#endif
+    }
+    if (HANDLE_EINTR(dup2(fds[1], 2)) != 2) {
+      // Stderr could very well be a file descriptor to .xsession-errors, or
+      // another file, which could be backed by a file system that could cause
+      // dup2 to fail while trying to close stderr. It's important that we do
+      // not fail on trying to close stderr.
+      // If dup2 fails here, we will continue normally, this means that our
+      // parent won't cause a fatal failure if something writes to stderr in
+      // this child.
+#if !defined(NDEBUG)
+      // In DEBUG builds, we still want to get a report.
+      WriteFailedStderrSetupMessage(fds[1]);
+      SANDBOX_DIE(NULL);
+#endif
+    }
+    if (IGNORE_EINTR(close(fds[1]))) {
+      // This call to close() has been failing in strange ways. See
+      // crbug.com/152530. So we only fail in debug mode now.
+#if !defined(NDEBUG)
+      WriteFailedStderrSetupMessage(fds[1]);
+      SANDBOX_DIE(NULL);
+#endif
+    }
+
+    SetSandboxPolicyDeprecated(syscall_evaluator, aux);
+    if (!StartSandbox(PROCESS_SINGLE_THREADED)) {
+      SANDBOX_DIE(NULL);
+    }
+
+    // Run our code in the sandbox.
+    code_in_sandbox();
+
+    // code_in_sandbox() is not supposed to return here.
+    SANDBOX_DIE(NULL);
+  }
+
+  // In the parent process.
+  if (IGNORE_EINTR(close(fds[1]))) {
+    SANDBOX_DIE("close() failed");
+  }
+  if (sigprocmask(SIG_SETMASK, &old_mask, NULL)) {
+    SANDBOX_DIE("sigprocmask() failed");
+  }
+  int status;
+  if (HANDLE_EINTR(waitpid(pid, &status, 0)) != pid) {
+    SANDBOX_DIE("waitpid() failed unexpectedly");
+  }
+  bool rc = WIFEXITED(status) && WEXITSTATUS(status) == kExpectedExitCode;
+
+  // If we fail to support sandboxing, there might be an additional
+  // error message. If so, this was an entirely unexpected and fatal
+  // failure. We should report the failure and somebody must fix
+  // things. This is probably a security-critical bug in the sandboxing
+  // code.
+  if (!rc) {
+    char buf[4096];
+    ssize_t len = HANDLE_EINTR(read(fds[0], buf, sizeof(buf) - 1));
+    if (len > 0) {
+      while (len > 1 && buf[len - 1] == '\n') {
+        --len;
+      }
+      buf[len] = '\000';
+      SANDBOX_DIE(buf);
+    }
+  }
+  if (IGNORE_EINTR(close(fds[0]))) {
+    SANDBOX_DIE("close() failed");
+  }
+
+  return rc;
+}
+
+bool SandboxBPF::KernelSupportSeccompBPF() {
+  return RunFunctionInPolicy(ProbeProcess, ProbeEvaluator, 0) &&
+         RunFunctionInPolicy(TryVsyscallProcess, AllowAllEvaluator, 0);
+}
+
+SandboxBPF::SandboxStatus SandboxBPF::SupportsSeccompSandbox(int proc_fd) {
+  // It the sandbox is currently active, we clearly must have support for
+  // sandboxing.
+  if (status_ == STATUS_ENABLED) {
+    return status_;
+  }
+
+  // Even if the sandbox was previously available, something might have
+  // changed in our run-time environment. Check one more time.
+  if (status_ == STATUS_AVAILABLE) {
+    if (!IsSingleThreaded(proc_fd)) {
+      status_ = STATUS_UNAVAILABLE;
+    }
+    return status_;
+  }
+
+  if (status_ == STATUS_UNAVAILABLE && IsSingleThreaded(proc_fd)) {
+    // All state transitions resulting in STATUS_UNAVAILABLE are immediately
+    // preceded by STATUS_AVAILABLE. Furthermore, these transitions all
+    // happen, if and only if they are triggered by the process being multi-
+    // threaded.
+    // In other words, if a single-threaded process is currently in the
+    // STATUS_UNAVAILABLE state, it is safe to assume that sandboxing is
+    // actually available.
+    status_ = STATUS_AVAILABLE;
+    return status_;
+  }
+
+  // If we have not previously checked for availability of the sandbox or if
+  // we otherwise don't believe to have a good cached value, we have to
+  // perform a thorough check now.
+  if (status_ == STATUS_UNKNOWN) {
+    // We create our own private copy of a "Sandbox" object. This ensures that
+    // the object does not have any policies configured, that might interfere
+    // with the tests done by "KernelSupportSeccompBPF()".
+    SandboxBPF sandbox;
+
+    // By setting "quiet_ = true" we suppress messages for expected and benign
+    // failures (e.g. if the current kernel lacks support for BPF filters).
+    sandbox.quiet_ = true;
+    sandbox.set_proc_fd(proc_fd);
+    status_ = sandbox.KernelSupportSeccompBPF() ? STATUS_AVAILABLE
+                                                : STATUS_UNSUPPORTED;
+
+    // As we are performing our tests from a child process, the run-time
+    // environment that is visible to the sandbox is always guaranteed to be
+    // single-threaded. Let's check here whether the caller is single-
+    // threaded. Otherwise, we mark the sandbox as temporarily unavailable.
+    if (status_ == STATUS_AVAILABLE && !IsSingleThreaded(proc_fd)) {
+      status_ = STATUS_UNAVAILABLE;
+    }
+  }
+  return status_;
+}
+
+void SandboxBPF::set_proc_fd(int proc_fd) { proc_fd_ = proc_fd; }
+
+bool SandboxBPF::StartSandbox(SandboxThreadState thread_state) {
+  CHECK(thread_state == PROCESS_SINGLE_THREADED ||
+        thread_state == PROCESS_MULTI_THREADED);
+
+  if (status_ == STATUS_UNSUPPORTED || status_ == STATUS_UNAVAILABLE) {
+    SANDBOX_DIE(
+        "Trying to start sandbox, even though it is known to be "
+        "unavailable");
+    return false;
+  } else if (sandbox_has_started_ || !conds_) {
+    SANDBOX_DIE(
+        "Cannot repeatedly start sandbox. Create a separate Sandbox "
+        "object instead.");
+    return false;
+  }
+  if (proc_fd_ < 0) {
+    proc_fd_ = open("/proc", O_RDONLY | O_DIRECTORY);
+  }
+  if (proc_fd_ < 0) {
+    // For now, continue in degraded mode, if we can't access /proc.
+    // In the future, we might want to tighten this requirement.
+  }
+
+  if (thread_state == PROCESS_SINGLE_THREADED && !IsSingleThreaded(proc_fd_)) {
+    SANDBOX_DIE("Cannot start sandbox, if process is already multi-threaded");
+    return false;
+  }
+
+  // We no longer need access to any files in /proc. We want to do this
+  // before installing the filters, just in case that our policy denies
+  // close().
+  if (proc_fd_ >= 0) {
+    if (IGNORE_EINTR(close(proc_fd_))) {
+      SANDBOX_DIE("Failed to close file descriptor for /proc");
+      return false;
+    }
+    proc_fd_ = -1;
+  }
+
+  // Install the filters.
+  InstallFilter(thread_state);
+
+  // We are now inside the sandbox.
+  status_ = STATUS_ENABLED;
+
+  return true;
+}
+
+void SandboxBPF::PolicySanityChecks(SandboxBPFPolicy* policy) {
+  for (SyscallIterator iter(true); !iter.Done();) {
+    uint32_t sysnum = iter.Next();
+    if (!IsDenied(policy->EvaluateSyscall(this, sysnum))) {
+      SANDBOX_DIE(
+          "Policies should deny system calls that are outside the "
+          "expected range (typically MIN_SYSCALL..MAX_SYSCALL)");
+    }
+  }
+  return;
+}
+
+// Deprecated API, supported with a wrapper to the new API.
+void SandboxBPF::SetSandboxPolicyDeprecated(EvaluateSyscall syscall_evaluator,
+                                            void* aux) {
+  if (sandbox_has_started_ || !conds_) {
+    SANDBOX_DIE("Cannot change policy after sandbox has started");
+  }
+  SetSandboxPolicy(new CompatibilityPolicy(syscall_evaluator, aux));
+}
+
+// Don't take a scoped_ptr here, polymorphism make their use awkward.
+void SandboxBPF::SetSandboxPolicy(SandboxBPFPolicy* policy) {
+  DCHECK(!policy_);
+  if (sandbox_has_started_ || !conds_) {
+    SANDBOX_DIE("Cannot change policy after sandbox has started");
+  }
+  PolicySanityChecks(policy);
+  policy_.reset(policy);
+}
+
+void SandboxBPF::InstallFilter(SandboxThreadState thread_state) {
+  // We want to be very careful in not imposing any requirements on the
+  // policies that are set with SetSandboxPolicy(). This means, as soon as
+  // the sandbox is active, we shouldn't be relying on libraries that could
+  // be making system calls. This, for example, means we should avoid
+  // using the heap and we should avoid using STL functions.
+  // Temporarily copy the contents of the "program" vector into a
+  // stack-allocated array; and then explicitly destroy that object.
+  // This makes sure we don't ex- or implicitly call new/delete after we
+  // installed the BPF filter program in the kernel. Depending on the
+  // system memory allocator that is in effect, these operators can result
+  // in system calls to things like munmap() or brk().
+  Program* program = AssembleFilter(false /* force_verification */);
+
+  struct sock_filter bpf[program->size()];
+  const struct sock_fprog prog = {static_cast<unsigned short>(program->size()),
+                                  bpf};
+  memcpy(bpf, &(*program)[0], sizeof(bpf));
+  delete program;
+
+  // Make an attempt to release memory that is no longer needed here, rather
+  // than in the destructor. Try to avoid as much as possible to presume of
+  // what will be possible to do in the new (sandboxed) execution environment.
+  delete conds_;
+  conds_ = NULL;
+  policy_.reset();
+
+  // Install BPF filter program
+  if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+    SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to enable no-new-privs");
+  } else {
+    if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) {
+      SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to turn on BPF filters");
+    }
+  }
+
+  // TODO(rsesek): Always try to engage the sandbox with the
+  // PROCESS_MULTI_THREADED path first, and if that fails, assert that the
+  // process IsSingleThreaded() or SANDBOX_DIE.
+
+  if (thread_state == PROCESS_MULTI_THREADED) {
+    // TODO(rsesek): Move these to a more reasonable place once the kernel
+    // patch has landed upstream and these values are formalized.
+    #define PR_SECCOMP_EXT 41
+    #define SECCOMP_EXT_ACT 1
+    #define SECCOMP_EXT_ACT_TSYNC 1
+    if (prctl(PR_SECCOMP_EXT, SECCOMP_EXT_ACT, SECCOMP_EXT_ACT_TSYNC, 0, 0)) {
+      SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to synchronize threadgroup "
+                                  "BPF filters.");
+    }
+  }
+
+  sandbox_has_started_ = true;
+}
+
+SandboxBPF::Program* SandboxBPF::AssembleFilter(bool force_verification) {
+#if !defined(NDEBUG)
+  force_verification = true;
+#endif
+
+  // Verify that the user pushed a policy.
+  DCHECK(policy_);
+
+  // Assemble the BPF filter program.
+  CodeGen* gen = new CodeGen();
+  if (!gen) {
+    SANDBOX_DIE("Out of memory");
+  }
+
+  // If the architecture doesn't match SECCOMP_ARCH, disallow the
+  // system call.
+  Instruction* tail;
+  Instruction* head = gen->MakeInstruction(
+      BPF_LD + BPF_W + BPF_ABS,
+      SECCOMP_ARCH_IDX,
+      tail = gen->MakeInstruction(
+          BPF_JMP + BPF_JEQ + BPF_K,
+          SECCOMP_ARCH,
+          NULL,
+          gen->MakeInstruction(
+              BPF_RET + BPF_K,
+              Kill("Invalid audit architecture in BPF filter"))));
+
+  bool has_unsafe_traps = false;
+  {
+    // Evaluate all possible system calls and group their ErrorCodes into
+    // ranges of identical codes.
+    Ranges ranges;
+    FindRanges(&ranges);
+
+    // Compile the system call ranges to an optimized BPF jumptable
+    Instruction* jumptable =
+        AssembleJumpTable(gen, ranges.begin(), ranges.end());
+
+    // If there is at least one UnsafeTrap() in our program, the entire sandbox
+    // is unsafe. We need to modify the program so that all non-
+    // SECCOMP_RET_ALLOW ErrorCodes are handled in user-space. This will then
+    // allow us to temporarily disable sandboxing rules inside of callbacks to
+    // UnsafeTrap().
+    gen->Traverse(jumptable, CheckForUnsafeErrorCodes, &has_unsafe_traps);
+
+    // Grab the system call number, so that we can implement jump tables.
+    Instruction* load_nr =
+        gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, SECCOMP_NR_IDX);
+
+    // If our BPF program has unsafe jumps, enable support for them. This
+    // test happens very early in the BPF filter program. Even before we
+    // consider looking at system call numbers.
+    // As support for unsafe jumps essentially defeats all the security
+    // measures that the sandbox provides, we print a big warning message --
+    // and of course, we make sure to only ever enable this feature if it
+    // is actually requested by the sandbox policy.
+    if (has_unsafe_traps) {
+      if (SandboxSyscall(-1) == -1 && errno == ENOSYS) {
+        SANDBOX_DIE(
+            "Support for UnsafeTrap() has not yet been ported to this "
+            "architecture");
+      }
+
+      if (!policy_->EvaluateSyscall(this, __NR_rt_sigprocmask)
+               .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) ||
+          !policy_->EvaluateSyscall(this, __NR_rt_sigreturn)
+               .Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
+#if defined(__NR_sigprocmask)
+          ||
+          !policy_->EvaluateSyscall(this, __NR_sigprocmask)
+               .Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
+#endif
+#if defined(__NR_sigreturn)
+          ||
+          !policy_->EvaluateSyscall(this, __NR_sigreturn)
+               .Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
+#endif
+          ) {
+        SANDBOX_DIE(
+            "Invalid seccomp policy; if using UnsafeTrap(), you must "
+            "unconditionally allow sigreturn() and sigprocmask()");
+      }
+
+      if (!Trap::EnableUnsafeTrapsInSigSysHandler()) {
+        // We should never be able to get here, as UnsafeTrap() should never
+        // actually return a valid ErrorCode object unless the user set the
+        // CHROME_SANDBOX_DEBUGGING environment variable; and therefore,
+        // "has_unsafe_traps" would always be false. But better double-check
+        // than enabling dangerous code.
+        SANDBOX_DIE("We'd rather die than enable unsafe traps");
+      }
+      gen->Traverse(jumptable, RedirectToUserspace, this);
+
+      // Allow system calls, if they originate from our magic return address
+      // (which we can query by calling SandboxSyscall(-1)).
+      uintptr_t syscall_entry_point =
+          static_cast<uintptr_t>(SandboxSyscall(-1));
+      uint32_t low = static_cast<uint32_t>(syscall_entry_point);
+#if __SIZEOF_POINTER__ > 4
+      uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32);
+#endif
+
+      // BPF cannot do native 64bit comparisons. On 64bit architectures, we
+      // have to compare both 32bit halves of the instruction pointer. If they
+      // match what we expect, we return ERR_ALLOWED. If either or both don't
+      // match, we continue evalutating the rest of the sandbox policy.
+      Instruction* escape_hatch = gen->MakeInstruction(
+          BPF_LD + BPF_W + BPF_ABS,
+          SECCOMP_IP_LSB_IDX,
+          gen->MakeInstruction(
+              BPF_JMP + BPF_JEQ + BPF_K,
+              low,
+#if __SIZEOF_POINTER__ > 4
+              gen->MakeInstruction(
+                  BPF_LD + BPF_W + BPF_ABS,
+                  SECCOMP_IP_MSB_IDX,
+                  gen->MakeInstruction(
+                      BPF_JMP + BPF_JEQ + BPF_K,
+                      hi,
+#endif
+                      gen->MakeInstruction(BPF_RET + BPF_K,
+                                           ErrorCode(ErrorCode::ERR_ALLOWED)),
+#if __SIZEOF_POINTER__ > 4
+                      load_nr)),
+#endif
+              load_nr));
+      gen->JoinInstructions(tail, escape_hatch);
+    } else {
+      gen->JoinInstructions(tail, load_nr);
+    }
+    tail = load_nr;
+
+// On Intel architectures, verify that system call numbers are in the
+// expected number range. The older i386 and x86-64 APIs clear bit 30
+// on all system calls. The newer x32 API always sets bit 30.
+#if defined(__i386__) || defined(__x86_64__)
+    Instruction* invalidX32 = gen->MakeInstruction(
+        BPF_RET + BPF_K, Kill("Illegal mixing of system call ABIs").err_);
+    Instruction* checkX32 =
+#if defined(__x86_64__) && defined(__ILP32__)
+        gen->MakeInstruction(
+            BPF_JMP + BPF_JSET + BPF_K, 0x40000000, 0, invalidX32);
+#else
+        gen->MakeInstruction(
+            BPF_JMP + BPF_JSET + BPF_K, 0x40000000, invalidX32, 0);
+#endif
+    gen->JoinInstructions(tail, checkX32);
+    tail = checkX32;
+#endif
+
+    // Append jump table to our pre-amble
+    gen->JoinInstructions(tail, jumptable);
+  }
+
+  // Turn the DAG into a vector of instructions.
+  Program* program = new Program();
+  gen->Compile(head, program);
+  delete gen;
+
+  // Make sure compilation resulted in BPF program that executes
+  // correctly. Otherwise, there is an internal error in our BPF compiler.
+  // There is really nothing the caller can do until the bug is fixed.
+  if (force_verification) {
+    // Verification is expensive. We only perform this step, if we are
+    // compiled in debug mode, or if the caller explicitly requested
+    // verification.
+    VerifyProgram(*program, has_unsafe_traps);
+  }
+
+  return program;
+}
+
+void SandboxBPF::VerifyProgram(const Program& program, bool has_unsafe_traps) {
+  // If we previously rewrote the BPF program so that it calls user-space
+  // whenever we return an "errno" value from the filter, then we have to
+  // wrap our system call evaluator to perform the same operation. Otherwise,
+  // the verifier would also report a mismatch in return codes.
+  scoped_ptr<const RedirectToUserSpacePolicyWrapper> redirected_policy(
+      new RedirectToUserSpacePolicyWrapper(policy_.get()));
+
+  const char* err = NULL;
+  if (!Verifier::VerifyBPF(this,
+                           program,
+                           has_unsafe_traps ? *redirected_policy : *policy_,
+                           &err)) {
+    CodeGen::PrintProgram(program);
+    SANDBOX_DIE(err);
+  }
+}
+
+void SandboxBPF::FindRanges(Ranges* ranges) {
+  // Please note that "struct seccomp_data" defines system calls as a signed
+  // int32_t, but BPF instructions always operate on unsigned quantities. We
+  // deal with this disparity by enumerating from MIN_SYSCALL to MAX_SYSCALL,
+  // and then verifying that the rest of the number range (both positive and
+  // negative) all return the same ErrorCode.
+  uint32_t old_sysnum = 0;
+  ErrorCode old_err = policy_->EvaluateSyscall(this, old_sysnum);
+  ErrorCode invalid_err = policy_->EvaluateSyscall(this, MIN_SYSCALL - 1);
+
+  for (SyscallIterator iter(false); !iter.Done();) {
+    uint32_t sysnum = iter.Next();
+    ErrorCode err = policy_->EvaluateSyscall(this, static_cast<int>(sysnum));
+    if (!iter.IsValid(sysnum) && !invalid_err.Equals(err)) {
+      // A proper sandbox policy should always treat system calls outside of
+      // the range MIN_SYSCALL..MAX_SYSCALL (i.e. anything that returns
+      // "false" for SyscallIterator::IsValid()) identically. Typically, all
+      // of these system calls would be denied with the same ErrorCode.
+      SANDBOX_DIE("Invalid seccomp policy");
+    }
+    if (!err.Equals(old_err) || iter.Done()) {
+      ranges->push_back(Range(old_sysnum, sysnum - 1, old_err));
+      old_sysnum = sysnum;
+      old_err = err;
+    }
+  }
+}
+
+Instruction* SandboxBPF::AssembleJumpTable(CodeGen* gen,
+                                           Ranges::const_iterator start,
+                                           Ranges::const_iterator stop) {
+  // We convert the list of system call ranges into jump table that performs
+  // a binary search over the ranges.
+  // As a sanity check, we need to have at least one distinct ranges for us
+  // to be able to build a jump table.
+  if (stop - start <= 0) {
+    SANDBOX_DIE("Invalid set of system call ranges");
+  } else if (stop - start == 1) {
+    // If we have narrowed things down to a single range object, we can
+    // return from the BPF filter program.
+    return RetExpression(gen, start->err);
+  }
+
+  // Pick the range object that is located at the mid point of our list.
+  // We compare our system call number against the lowest valid system call
+  // number in this range object. If our number is lower, it is outside of
+  // this range object. If it is greater or equal, it might be inside.
+  Ranges::const_iterator mid = start + (stop - start) / 2;
+
+  // Sub-divide the list of ranges and continue recursively.
+  Instruction* jf = AssembleJumpTable(gen, start, mid);
+  Instruction* jt = AssembleJumpTable(gen, mid, stop);
+  return gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, mid->from, jt, jf);
+}
+
+Instruction* SandboxBPF::RetExpression(CodeGen* gen, const ErrorCode& err) {
+  if (err.error_type_ == ErrorCode::ET_COND) {
+    return CondExpression(gen, err);
+  } else {
+    return gen->MakeInstruction(BPF_RET + BPF_K, err);
+  }
+}
+
+Instruction* SandboxBPF::CondExpression(CodeGen* gen, const ErrorCode& cond) {
+  // We can only inspect the six system call arguments that are passed in
+  // CPU registers.
+  if (cond.argno_ < 0 || cond.argno_ >= 6) {
+    SANDBOX_DIE(
+        "Internal compiler error; invalid argument number "
+        "encountered");
+  }
+
+  // BPF programs operate on 32bit entities. Load both halfs of the 64bit
+  // system call argument and then generate suitable conditional statements.
+  Instruction* msb_head = gen->MakeInstruction(
+      BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_MSB_IDX(cond.argno_));
+  Instruction* msb_tail = msb_head;
+  Instruction* lsb_head = gen->MakeInstruction(
+      BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_LSB_IDX(cond.argno_));
+  Instruction* lsb_tail = lsb_head;
+
+  // Emit a suitable comparison statement.
+  switch (cond.op_) {
+    case ErrorCode::OP_EQUAL:
+      // Compare the least significant bits for equality
+      lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K,
+                                      static_cast<uint32_t>(cond.value_),
+                                      RetExpression(gen, *cond.passed_),
+                                      RetExpression(gen, *cond.failed_));
+      gen->JoinInstructions(lsb_head, lsb_tail);
+
+      // If we are looking at a 64bit argument, we need to also compare the
+      // most significant bits.
+      if (cond.width_ == ErrorCode::TP_64BIT) {
+        msb_tail =
+            gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K,
+                                 static_cast<uint32_t>(cond.value_ >> 32),
+                                 lsb_head,
+                                 RetExpression(gen, *cond.failed_));
+        gen->JoinInstructions(msb_head, msb_tail);
+      }
+      break;
+    case ErrorCode::OP_HAS_ALL_BITS:
+      // Check the bits in the LSB half of the system call argument. Our
+      // OP_HAS_ALL_BITS operator passes, iff all of the bits are set. This is
+      // different from the kernel's BPF_JSET operation which passes, if any of
+      // the bits are set.
+      // Of course, if there is only a single set bit (or none at all), then
+      // things get easier.
+      {
+        uint32_t lsb_bits = static_cast<uint32_t>(cond.value_);
+        int lsb_bit_count = popcount(lsb_bits);
+        if (lsb_bit_count == 0) {
+          // No bits are set in the LSB half. The test will always pass.
+          lsb_head = RetExpression(gen, *cond.passed_);
+          lsb_tail = NULL;
+        } else if (lsb_bit_count == 1) {
+          // Exactly one bit is set in the LSB half. We can use the BPF_JSET
+          // operator.
+          lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
+                                          lsb_bits,
+                                          RetExpression(gen, *cond.passed_),
+                                          RetExpression(gen, *cond.failed_));
+          gen->JoinInstructions(lsb_head, lsb_tail);
+        } else {
+          // More than one bit is set in the LSB half. We need to combine
+          // BPF_AND and BPF_JEQ to test whether all of these bits are in fact
+          // set in the system call argument.
+          gen->JoinInstructions(
+              lsb_head,
+              gen->MakeInstruction(BPF_ALU + BPF_AND + BPF_K,
+                                   lsb_bits,
+                                   lsb_tail = gen->MakeInstruction(
+                                       BPF_JMP + BPF_JEQ + BPF_K,
+                                       lsb_bits,
+                                       RetExpression(gen, *cond.passed_),
+                                       RetExpression(gen, *cond.failed_))));
+        }
+      }
+
+      // If we are looking at a 64bit argument, we need to also check the bits
+      // in the MSB half of the system call argument.
+      if (cond.width_ == ErrorCode::TP_64BIT) {
+        uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32);
+        int msb_bit_count = popcount(msb_bits);
+        if (msb_bit_count == 0) {
+          // No bits are set in the MSB half. The test will always pass.
+          msb_head = lsb_head;
+        } else if (msb_bit_count == 1) {
+          // Exactly one bit is set in the MSB half. We can use the BPF_JSET
+          // operator.
+          msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
+                                          msb_bits,
+                                          lsb_head,
+                                          RetExpression(gen, *cond.failed_));
+          gen->JoinInstructions(msb_head, msb_tail);
+        } else {
+          // More than one bit is set in the MSB half. We need to combine
+          // BPF_AND and BPF_JEQ to test whether all of these bits are in fact
+          // set in the system call argument.
+          gen->JoinInstructions(
+              msb_head,
+              gen->MakeInstruction(
+                  BPF_ALU + BPF_AND + BPF_K,
+                  msb_bits,
+                  gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K,
+                                       msb_bits,
+                                       lsb_head,
+                                       RetExpression(gen, *cond.failed_))));
+        }
+      }
+      break;
+    case ErrorCode::OP_HAS_ANY_BITS:
+      // Check the bits in the LSB half of the system call argument. Our
+      // OP_HAS_ANY_BITS operator passes, iff any of the bits are set. This maps
+      // nicely to the kernel's BPF_JSET operation.
+      {
+        uint32_t lsb_bits = static_cast<uint32_t>(cond.value_);
+        if (!lsb_bits) {
+          // No bits are set in the LSB half. The test will always fail.
+          lsb_head = RetExpression(gen, *cond.failed_);
+          lsb_tail = NULL;
+        } else {
+          lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
+                                          lsb_bits,
+                                          RetExpression(gen, *cond.passed_),
+                                          RetExpression(gen, *cond.failed_));
+          gen->JoinInstructions(lsb_head, lsb_tail);
+        }
+      }
+
+      // If we are looking at a 64bit argument, we need to also check the bits
+      // in the MSB half of the system call argument.
+      if (cond.width_ == ErrorCode::TP_64BIT) {
+        uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32);
+        if (!msb_bits) {
+          // No bits are set in the MSB half. The test will always fail.
+          msb_head = lsb_head;
+        } else {
+          msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
+                                          msb_bits,
+                                          RetExpression(gen, *cond.passed_),
+                                          lsb_head);
+          gen->JoinInstructions(msb_head, msb_tail);
+        }
+      }
+      break;
+    default:
+      // TODO(markus): Need to add support for OP_GREATER
+      SANDBOX_DIE("Not implemented");
+      break;
+  }
+
+  // Ensure that we never pass a 64bit value, when we only expect a 32bit
+  // value. This is somewhat complicated by the fact that on 64bit systems,
+  // callers could legitimately pass in a non-zero value in the MSB, iff the
+  // LSB has been sign-extended into the MSB.
+  if (cond.width_ == ErrorCode::TP_32BIT) {
+    if (cond.value_ >> 32) {
+      SANDBOX_DIE(
+          "Invalid comparison of a 32bit system call argument "
+          "against a 64bit constant; this test is always false.");
+    }
+
+    Instruction* invalid_64bit = RetExpression(gen, Unexpected64bitArgument());
+#if __SIZEOF_POINTER__ > 4
+    invalid_64bit = gen->MakeInstruction(
+        BPF_JMP + BPF_JEQ + BPF_K,
+        0xFFFFFFFF,
+        gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS,
+                             SECCOMP_ARG_LSB_IDX(cond.argno_),
+                             gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K,
+                                                  0x80000000,
+                                                  lsb_head,
+                                                  invalid_64bit)),
+        invalid_64bit);
+#endif
+    gen->JoinInstructions(
+        msb_tail,
+        gen->MakeInstruction(
+            BPF_JMP + BPF_JEQ + BPF_K, 0, lsb_head, invalid_64bit));
+  }
+
+  return msb_head;
+}
+
+ErrorCode SandboxBPF::Unexpected64bitArgument() {
+  return Kill("Unexpected 64bit argument detected");
+}
+
+ErrorCode SandboxBPF::Trap(Trap::TrapFnc fnc, const void* aux) {
+  return Trap::MakeTrap(fnc, aux, true /* Safe Trap */);
+}
+
+ErrorCode SandboxBPF::UnsafeTrap(Trap::TrapFnc fnc, const void* aux) {
+  return Trap::MakeTrap(fnc, aux, false /* Unsafe Trap */);
+}
+
+intptr_t SandboxBPF::ForwardSyscall(const struct arch_seccomp_data& args) {
+  return SandboxSyscall(args.nr,
+                        static_cast<intptr_t>(args.args[0]),
+                        static_cast<intptr_t>(args.args[1]),
+                        static_cast<intptr_t>(args.args[2]),
+                        static_cast<intptr_t>(args.args[3]),
+                        static_cast<intptr_t>(args.args[4]),
+                        static_cast<intptr_t>(args.args[5]));
+}
+
+ErrorCode SandboxBPF::Cond(int argno,
+                           ErrorCode::ArgType width,
+                           ErrorCode::Operation op,
+                           uint64_t value,
+                           const ErrorCode& passed,
+                           const ErrorCode& failed) {
+  return ErrorCode(argno,
+                   width,
+                   op,
+                   value,
+                   &*conds_->insert(passed).first,
+                   &*conds_->insert(failed).first);
+}
+
+ErrorCode SandboxBPF::Kill(const char* msg) {
+  return Trap(BPFFailure, const_cast<char*>(msg));
+}
+
+SandboxBPF::SandboxStatus SandboxBPF::status_ = STATUS_UNKNOWN;
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/sandbox_bpf.h
@@ -0,0 +1,292 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H__
+
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <algorithm>
+#include <limits>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "sandbox/linux/sandbox_export.h"
+#include "sandbox/linux/seccomp-bpf/die.h"
+#include "sandbox/linux/seccomp-bpf/errorcode.h"
+#include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
+
+namespace sandbox {
+
+struct arch_seccomp_data {
+  int nr;
+  uint32_t arch;
+  uint64_t instruction_pointer;
+  uint64_t args[6];
+};
+
+struct arch_sigsys {
+  void* ip;
+  int nr;
+  unsigned int arch;
+};
+
+class CodeGen;
+class SandboxBPFPolicy;
+class SandboxUnittestHelper;
+struct Instruction;
+
+class SANDBOX_EXPORT SandboxBPF {
+ public:
+  enum SandboxStatus {
+    STATUS_UNKNOWN,      // Status prior to calling supportsSeccompSandbox()
+    STATUS_UNSUPPORTED,  // The kernel does not appear to support sandboxing
+    STATUS_UNAVAILABLE,  // Currently unavailable but might work again later
+    STATUS_AVAILABLE,    // Sandboxing is available but not currently active
+    STATUS_ENABLED       // The sandbox is now active
+  };
+
+  // Depending on the level of kernel support, seccomp-bpf may require the
+  // process to be single-threaded in order to enable it. When calling
+  // StartSandbox(), the program should indicate whether or not the sandbox
+  // should try and engage with multi-thread support.
+  enum SandboxThreadState {
+    PROCESS_INVALID,
+    PROCESS_SINGLE_THREADED,  // The program is currently single-threaded.
+    // Note: PROCESS_MULTI_THREADED requires experimental kernel support that
+    // has not been contributed to upstream Linux.
+    PROCESS_MULTI_THREADED,   // The program may be multi-threaded.
+  };
+
+  // When calling setSandboxPolicy(), the caller can provide an arbitrary
+  // pointer in |aux|. This pointer will then be forwarded to the sandbox
+  // policy each time a call is made through an EvaluateSyscall function
+  // pointer.  One common use case would be to pass the "aux" pointer as an
+  // argument to Trap() functions.
+  typedef ErrorCode (*EvaluateSyscall)(SandboxBPF* sandbox_compiler,
+                                       int system_call_number,
+                                       void* aux);
+  typedef std::vector<std::pair<EvaluateSyscall, void*> > Evaluators;
+  // A vector of BPF instructions that need to be installed as a filter
+  // program in the kernel.
+  typedef std::vector<struct sock_filter> Program;
+
+  // Constructors and destructors.
+  // NOTE: Setting a policy and starting the sandbox is a one-way operation.
+  //       The kernel does not provide any option for unloading a loaded
+  //       sandbox. Strictly speaking, that means we should disallow calling
+  //       the destructor, if StartSandbox() has ever been called. In practice,
+  //       this makes it needlessly complicated to operate on "Sandbox"
+  //       objects. So, we instead opted to allow object destruction. But it
+  //       should be noted that during its lifetime, the object probably made
+  //       irreversible state changes to the runtime environment. These changes
+  //       stay in effect even after the destructor has been run.
+  SandboxBPF();
+  ~SandboxBPF();
+
+  // Checks whether a particular system call number is valid on the current
+  // architecture. E.g. on ARM there's a non-contiguous range of private
+  // system calls.
+  static bool IsValidSyscallNumber(int sysnum);
+
+  // There are a lot of reasons why the Seccomp sandbox might not be available.
+  // This could be because the kernel does not support Seccomp mode, or it
+  // could be because another sandbox is already active.
+  // "proc_fd" should be a file descriptor for "/proc", or -1 if not
+  // provided by the caller.
+  static SandboxStatus SupportsSeccompSandbox(int proc_fd);
+
+  // The sandbox needs to be able to access files in "/proc/self". If this
+  // directory is not accessible when "startSandbox()" gets called, the caller
+  // can provide an already opened file descriptor by calling "set_proc_fd()".
+  // The sandbox becomes the new owner of this file descriptor and will
+  // eventually close it when "StartSandbox()" executes.
+  void set_proc_fd(int proc_fd);
+
+  // The system call evaluator function is called with the system
+  // call number. It can decide to allow the system call unconditionally
+  // by returning ERR_ALLOWED; it can deny the system call unconditionally by
+  // returning an appropriate "errno" value; or it can request inspection
+  // of system call argument(s) by returning a suitable ErrorCode.
+  // The "aux" parameter can be used to pass optional data to the system call
+  // evaluator. There are different possible uses for this data, but one of the
+  // use cases would be for the policy to then forward this pointer to a Trap()
+  // handler. In this case, of course, the data that is pointed to must remain
+  // valid for the entire time that Trap() handlers can be called; typically,
+  // this would be the lifetime of the program.
+  // DEPRECATED: use the policy interface below.
+  void SetSandboxPolicyDeprecated(EvaluateSyscall syscallEvaluator, void* aux);
+
+  // Set the BPF policy as |policy|. Ownership of |policy| is transfered here
+  // to the sandbox object.
+  void SetSandboxPolicy(SandboxBPFPolicy* policy);
+
+  // We can use ErrorCode to request calling of a trap handler. This method
+  // performs the required wrapping of the callback function into an
+  // ErrorCode object.
+  // The "aux" field can carry a pointer to arbitrary data. See EvaluateSyscall
+  // for a description of how to pass data from SetSandboxPolicy() to a Trap()
+  // handler.
+  ErrorCode Trap(Trap::TrapFnc fnc, const void* aux);
+
+  // Calls a user-space trap handler and disables all sandboxing for system
+  // calls made from this trap handler.
+  // This feature is available only if explicitly enabled by the user having
+  // set the CHROME_SANDBOX_DEBUGGING environment variable.
+  // Returns an ET_INVALID ErrorCode, if called when not enabled.
+  // NOTE: This feature, by definition, disables all security features of
+  //   the sandbox. It should never be used in production, but it can be
+  //   very useful to diagnose code that is incompatible with the sandbox.
+  //   If even a single system call returns "UnsafeTrap", the security of
+  //   entire sandbox should be considered compromised.
+  ErrorCode UnsafeTrap(Trap::TrapFnc fnc, const void* aux);
+
+  // From within an UnsafeTrap() it is often useful to be able to execute
+  // the system call that triggered the trap. The ForwardSyscall() method
+  // makes this easy. It is more efficient than calling glibc's syscall()
+  // function, as it avoid the extra round-trip to the signal handler. And
+  // it automatically does the correct thing to report kernel-style error
+  // conditions, rather than setting errno. See the comments for TrapFnc for
+  // details. In other words, the return value from ForwardSyscall() is
+  // directly suitable as a return value for a trap handler.
+  static intptr_t ForwardSyscall(const struct arch_seccomp_data& args);
+
+  // We can also use ErrorCode to request evaluation of a conditional
+  // statement based on inspection of system call parameters.
+  // This method wrap an ErrorCode object around the conditional statement.
+  // Argument "argno" (1..6) will be compared to "value" using comparator
+  // "op". If the condition is true "passed" will be returned, otherwise
+  // "failed".
+  // If "is32bit" is set, the argument must in the range of 0x0..(1u << 32 - 1)
+  // If it is outside this range, the sandbox treats the system call just
+  // the same as any other ABI violation (i.e. it aborts with an error
+  // message).
+  ErrorCode Cond(int argno,
+                 ErrorCode::ArgType is_32bit,
+                 ErrorCode::Operation op,
+                 uint64_t value,
+                 const ErrorCode& passed,
+                 const ErrorCode& failed);
+
+  // Kill the program and print an error message.
+  ErrorCode Kill(const char* msg);
+
+  // This is the main public entry point. It finds all system calls that
+  // need rewriting, sets up the resources needed by the sandbox, and
+  // enters Seccomp mode.
+  // The calling process must specify its current SandboxThreadState, as a way
+  // to tell the sandbox which type of kernel support it should engage.
+  // It is possible to stack multiple sandboxes by creating separate "Sandbox"
+  // objects and calling "StartSandbox()" on each of them. Please note, that
+  // this requires special care, though, as newly stacked sandboxes can never
+  // relax restrictions imposed by earlier sandboxes. Furthermore, installing
+  // a new policy requires making system calls, that might already be
+  // disallowed.
+  // Finally, stacking does add more kernel overhead than having a single
+  // combined policy. So, it should only be used if there are no alternatives.
+  bool StartSandbox(SandboxThreadState thread_state) WARN_UNUSED_RESULT;
+
+  // Assembles a BPF filter program from the current policy. After calling this
+  // function, you must not call any other sandboxing function.
+  // Typically, AssembleFilter() is only used by unit tests and by sandbox
+  // internals. It should not be used by production code.
+  // For performance reasons, we normally only run the assembled BPF program
+  // through the verifier, iff the program was built in debug mode.
+  // But by setting "force_verification", the caller can request that the
+  // verifier is run unconditionally. This is useful for unittests.
+  Program* AssembleFilter(bool force_verification);
+
+  // Returns the fatal ErrorCode that is used to indicate that somebody
+  // attempted to pass a 64bit value in a 32bit system call argument.
+  // This method is primarily needed for testing purposes.
+  ErrorCode Unexpected64bitArgument();
+
+ private:
+  friend class CodeGen;
+  friend class SandboxUnittestHelper;
+  friend class ErrorCode;
+
+  struct Range {
+    Range(uint32_t f, uint32_t t, const ErrorCode& e)
+        : from(f), to(t), err(e) {}
+    uint32_t from, to;
+    ErrorCode err;
+  };
+  typedef std::vector<Range> Ranges;
+  typedef std::map<uint32_t, ErrorCode> ErrMap;
+  typedef std::set<ErrorCode, struct ErrorCode::LessThan> Conds;
+
+  // Get a file descriptor pointing to "/proc", if currently available.
+  int proc_fd() { return proc_fd_; }
+
+  // Creates a subprocess and runs "code_in_sandbox" inside of the specified
+  // policy. The caller has to make sure that "this" has not yet been
+  // initialized with any other policies.
+  bool RunFunctionInPolicy(void (*code_in_sandbox)(),
+                           EvaluateSyscall syscall_evaluator,
+                           void* aux);
+
+  // Performs a couple of sanity checks to verify that the kernel supports the
+  // features that we need for successful sandboxing.
+  // The caller has to make sure that "this" has not yet been initialized with
+  // any other policies.
+  bool KernelSupportSeccompBPF();
+
+  // Verify that the current policy passes some basic sanity checks.
+  void PolicySanityChecks(SandboxBPFPolicy* policy);
+
+  // Assembles and installs a filter based on the policy that has previously
+  // been configured with SetSandboxPolicy().
+  void InstallFilter(SandboxThreadState thread_state);
+
+  // Verify the correctness of a compiled program by comparing it against the
+  // current policy. This function should only ever be called by unit tests and
+  // by the sandbox internals. It should not be used by production code.
+  void VerifyProgram(const Program& program, bool has_unsafe_traps);
+
+  // Finds all the ranges of system calls that need to be handled. Ranges are
+  // sorted in ascending order of system call numbers. There are no gaps in the
+  // ranges. System calls with identical ErrorCodes are coalesced into a single
+  // range.
+  void FindRanges(Ranges* ranges);
+
+  // Returns a BPF program snippet that implements a jump table for the
+  // given range of system call numbers. This function runs recursively.
+  Instruction* AssembleJumpTable(CodeGen* gen,
+                                 Ranges::const_iterator start,
+                                 Ranges::const_iterator stop);
+
+  // Returns a BPF program snippet that makes the BPF filter program exit
+  // with the given ErrorCode "err". N.B. the ErrorCode may very well be a
+  // conditional expression; if so, this function will recursively call
+  // CondExpression() and possibly RetExpression() to build a complex set of
+  // instructions.
+  Instruction* RetExpression(CodeGen* gen, const ErrorCode& err);
+
+  // Returns a BPF program that evaluates the conditional expression in
+  // "cond" and returns the appropriate value from the BPF filter program.
+  // This function recursively calls RetExpression(); it should only ever be
+  // called from RetExpression().
+  Instruction* CondExpression(CodeGen* gen, const ErrorCode& cond);
+
+  static SandboxStatus status_;
+
+  bool quiet_;
+  int proc_fd_;
+  scoped_ptr<const SandboxBPFPolicy> policy_;
+  Conds* conds_;
+  bool sandbox_has_started_;
+
+  DISALLOW_COPY_AND_ASSIGN(SandboxBPF);
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h
@@ -0,0 +1,35 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_POLICY_H_
+#define SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_POLICY_H_
+
+#include "base/basictypes.h"
+
+namespace sandbox {
+
+class ErrorCode;
+class SandboxBPF;
+
+// This is the interface to implement to define a BPF sandbox policy.
+class SandboxBPFPolicy {
+ public:
+  SandboxBPFPolicy() {}
+  virtual ~SandboxBPFPolicy() {}
+
+  // The EvaluateSyscall method is called with the system call number. It can
+  // decide to allow the system call unconditionally by returning ERR_ALLOWED;
+  // it can deny the system call unconditionally by returning an appropriate
+  // "errno" value; or it can request inspection of system call argument(s) by
+  // returning a suitable ErrorCode.
+  virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler,
+                                    int system_call_number) const = 0;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SandboxBPFPolicy);
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_SANDBOX_BPF_POLICY_H_
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/sandbox_bpf_unittest.cc
@@ -0,0 +1,1786 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <errno.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+#if defined(ANDROID)
+// Work-around for buggy headers in Android's NDK
+#define __user
+#endif
+#include <linux/futex.h>
+
+#include <ostream>
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "build/build_config.h"
+#include "sandbox/linux/seccomp-bpf/bpf_tests.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/seccomp-bpf/trap.h"
+#include "sandbox/linux/seccomp-bpf/verifier.h"
+#include "sandbox/linux/services/broker_process.h"
+#include "sandbox/linux/services/linux_syscalls.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Workaround for Android's prctl.h file.
+#ifndef PR_GET_ENDIAN
+#define PR_GET_ENDIAN 19
+#endif
+#ifndef PR_CAPBSET_READ
+#define PR_CAPBSET_READ 23
+#define PR_CAPBSET_DROP 24
+#endif
+
+namespace sandbox {
+
+namespace {
+
+const int kExpectedReturnValue = 42;
+const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
+
+// This test should execute no matter whether we have kernel support. So,
+// we make it a TEST() instead of a BPF_TEST().
+TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupports)) {
+  // We check that we don't crash, but it's ok if the kernel doesn't
+  // support it.
+  bool seccomp_bpf_supported =
+      SandboxBPF::SupportsSeccompSandbox(-1) == SandboxBPF::STATUS_AVAILABLE;
+  // We want to log whether or not seccomp BPF is actually supported
+  // since actual test coverage depends on it.
+  RecordProperty("SeccompBPFSupported",
+                 seccomp_bpf_supported ? "true." : "false.");
+  std::cout << "Seccomp BPF supported: "
+            << (seccomp_bpf_supported ? "true." : "false.") << "\n";
+  RecordProperty("PointerSize", sizeof(void*));
+  std::cout << "Pointer size: " << sizeof(void*) << "\n";
+}
+
+SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupportsTwice)) {
+  SandboxBPF::SupportsSeccompSandbox(-1);
+  SandboxBPF::SupportsSeccompSandbox(-1);
+}
+
+// BPF_TEST does a lot of the boiler-plate code around setting up a
+// policy and optional passing data between the caller, the policy and
+// any Trap() handlers. This is great for writing short and concise tests,
+// and it helps us accidentally forgetting any of the crucial steps in
+// setting up the sandbox. But it wouldn't hurt to have at least one test
+// that explicitly walks through all these steps.
+
+intptr_t FakeGetPid(const struct arch_seccomp_data& args, void* aux) {
+  BPF_ASSERT(aux);
+  pid_t* pid_ptr = static_cast<pid_t*>(aux);
+  return (*pid_ptr)++;
+}
+
+ErrorCode VerboseAPITestingPolicy(SandboxBPF* sandbox, int sysno, void* aux) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    return ErrorCode(ENOSYS);
+  } else if (sysno == __NR_getpid) {
+    return sandbox->Trap(FakeGetPid, aux);
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) {
+  if (SandboxBPF::SupportsSeccompSandbox(-1) ==
+      sandbox::SandboxBPF::STATUS_AVAILABLE) {
+    pid_t test_var = 0;
+    SandboxBPF sandbox;
+    sandbox.SetSandboxPolicyDeprecated(VerboseAPITestingPolicy, &test_var);
+    BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
+
+    BPF_ASSERT(test_var == 0);
+    BPF_ASSERT(syscall(__NR_getpid) == 0);
+    BPF_ASSERT(test_var == 1);
+    BPF_ASSERT(syscall(__NR_getpid) == 1);
+    BPF_ASSERT(test_var == 2);
+
+    // N.B.: Any future call to getpid() would corrupt the stack.
+    //       This is OK. The SANDBOX_TEST() macro is guaranteed to
+    //       only ever call _exit() after the test completes.
+  }
+}
+
+// A simple blacklist test
+
+ErrorCode BlacklistNanosleepPolicy(SandboxBPF*, int sysno, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  }
+
+  switch (sysno) {
+    case __NR_nanosleep:
+      return ErrorCode(EACCES);
+    default:
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) {
+  // nanosleep() should be denied
+  const struct timespec ts = {0, 0};
+  errno = 0;
+  BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1);
+  BPF_ASSERT(errno == EACCES);
+}
+
+// Now do a simple whitelist test
+
+ErrorCode WhitelistGetpidPolicy(SandboxBPF*, int sysno, void*) {
+  switch (sysno) {
+    case __NR_getpid:
+    case __NR_exit_group:
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+    default:
+      return ErrorCode(ENOMEM);
+  }
+}
+
+BPF_TEST(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) {
+  // getpid() should be allowed
+  errno = 0;
+  BPF_ASSERT(syscall(__NR_getpid) > 0);
+  BPF_ASSERT(errno == 0);
+
+  // getpgid() should be denied
+  BPF_ASSERT(getpgid(0) == -1);
+  BPF_ASSERT(errno == ENOMEM);
+}
+
+// A simple blacklist policy, with a SIGSYS handler
+
+intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) {
+  // We also check that the auxiliary data is correct
+  SANDBOX_ASSERT(aux);
+  *(static_cast<int*>(aux)) = kExpectedReturnValue;
+  return -ENOMEM;
+}
+
+ErrorCode BlacklistNanosleepPolicySigsys(SandboxBPF* sandbox,
+                                         int sysno,
+                                         void* aux) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  }
+
+  switch (sysno) {
+    case __NR_nanosleep:
+      return sandbox->Trap(EnomemHandler, aux);
+    default:
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF,
+         BasicBlacklistWithSigsys,
+         BlacklistNanosleepPolicySigsys,
+         int /* BPF_AUX */) {
+  // getpid() should work properly
+  errno = 0;
+  BPF_ASSERT(syscall(__NR_getpid) > 0);
+  BPF_ASSERT(errno == 0);
+
+  // Our Auxiliary Data, should be reset by the signal handler
+  BPF_AUX = -1;
+  const struct timespec ts = {0, 0};
+  BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1);
+  BPF_ASSERT(errno == ENOMEM);
+
+  // We expect the signal handler to modify AuxData
+  BPF_ASSERT(BPF_AUX == kExpectedReturnValue);
+}
+
+// A simple test that verifies we can return arbitrary errno values.
+
+ErrorCode ErrnoTestPolicy(SandboxBPF*, int sysno, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  }
+
+  switch (sysno) {
+    case __NR_dup2:
+      // Pretend that dup2() worked, but don't actually do anything.
+      return ErrorCode(0);
+    case __NR_setuid:
+#if defined(__NR_setuid32)
+    case __NR_setuid32:
+#endif
+      // Return errno = 1.
+      return ErrorCode(1);
+    case __NR_setgid:
+#if defined(__NR_setgid32)
+    case __NR_setgid32:
+#endif
+      // Return maximum errno value (typically 4095).
+      return ErrorCode(ErrorCode::ERR_MAX_ERRNO);
+    case __NR_uname:
+      // Return errno = 42;
+      return ErrorCode(42);
+    default:
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF, ErrnoTest, ErrnoTestPolicy) {
+  // Verify that dup2() returns success, but doesn't actually run.
+  int fds[4];
+  BPF_ASSERT(pipe(fds) == 0);
+  BPF_ASSERT(pipe(fds + 2) == 0);
+  BPF_ASSERT(dup2(fds[2], fds[0]) == 0);
+  char buf[1] = {};
+  BPF_ASSERT(write(fds[1], "\x55", 1) == 1);
+  BPF_ASSERT(write(fds[3], "\xAA", 1) == 1);
+  BPF_ASSERT(read(fds[0], buf, 1) == 1);
+
+  // If dup2() executed, we will read \xAA, but it dup2() has been turned
+  // into a no-op by our policy, then we will read \x55.
+  BPF_ASSERT(buf[0] == '\x55');
+
+  // Verify that we can return the minimum and maximum errno values.
+  errno = 0;
+  BPF_ASSERT(setuid(0) == -1);
+  BPF_ASSERT(errno == 1);
+
+  // On Android, errno is only supported up to 255, otherwise errno
+  // processing is skipped.
+  // We work around this (crbug.com/181647).
+  if (sandbox::IsAndroid() && setgid(0) != -1) {
+    errno = 0;
+    BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO);
+    BPF_ASSERT(errno == 0);
+  } else {
+    errno = 0;
+    BPF_ASSERT(setgid(0) == -1);
+    BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO);
+  }
+
+  // Finally, test an errno in between the minimum and maximum.
+  errno = 0;
+  struct utsname uts_buf;
+  BPF_ASSERT(uname(&uts_buf) == -1);
+  BPF_ASSERT(errno == 42);
+}
+
+// Testing the stacking of two sandboxes
+
+ErrorCode StackingPolicyPartOne(SandboxBPF* sandbox, int sysno, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    return ErrorCode(ENOSYS);
+  }
+
+  switch (sysno) {
+    case __NR_getppid:
+      return sandbox->Cond(0,
+                           ErrorCode::TP_32BIT,
+                           ErrorCode::OP_EQUAL,
+                           0,
+                           ErrorCode(ErrorCode::ERR_ALLOWED),
+                           ErrorCode(EPERM));
+    default:
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+ErrorCode StackingPolicyPartTwo(SandboxBPF* sandbox, int sysno, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    return ErrorCode(ENOSYS);
+  }
+
+  switch (sysno) {
+    case __NR_getppid:
+      return sandbox->Cond(0,
+                           ErrorCode::TP_32BIT,
+                           ErrorCode::OP_EQUAL,
+                           0,
+                           ErrorCode(EINVAL),
+                           ErrorCode(ErrorCode::ERR_ALLOWED));
+    default:
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF, StackingPolicy, StackingPolicyPartOne) {
+  errno = 0;
+  BPF_ASSERT(syscall(__NR_getppid, 0) > 0);
+  BPF_ASSERT(errno == 0);
+
+  BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
+  BPF_ASSERT(errno == EPERM);
+
+  // Stack a second sandbox with its own policy. Verify that we can further
+  // restrict filters, but we cannot relax existing filters.
+  SandboxBPF sandbox;
+  sandbox.SetSandboxPolicyDeprecated(StackingPolicyPartTwo, NULL);
+  BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
+
+  errno = 0;
+  BPF_ASSERT(syscall(__NR_getppid, 0) == -1);
+  BPF_ASSERT(errno == EINVAL);
+
+  BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
+  BPF_ASSERT(errno == EPERM);
+}
+
+// A more complex, but synthetic policy. This tests the correctness of the BPF
+// program by iterating through all syscalls and checking for an errno that
+// depends on the syscall number. Unlike the Verifier, this exercises the BPF
+// interpreter in the kernel.
+
+// We try to make sure we exercise optimizations in the BPF compiler. We make
+// sure that the compiler can have an opportunity to coalesce syscalls with
+// contiguous numbers and we also make sure that disjoint sets can return the
+// same errno.
+int SysnoToRandomErrno(int sysno) {
+  // Small contiguous sets of 3 system calls return an errno equal to the
+  // index of that set + 1 (so that we never return a NUL errno).
+  return ((sysno & ~3) >> 2) % 29 + 1;
+}
+
+ErrorCode SyntheticPolicy(SandboxBPF*, int sysno, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  }
+
+// TODO(jorgelo): remove this once the new code generator lands.
+#if defined(__arm__)
+  if (sysno > static_cast<int>(MAX_PUBLIC_SYSCALL)) {
+    return ErrorCode(ENOSYS);
+  }
+#endif
+
+  if (sysno == __NR_exit_group || sysno == __NR_write) {
+    // exit_group() is special, we really need it to work.
+    // write() is needed for BPF_ASSERT() to report a useful error message.
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  } else {
+    return ErrorCode(SysnoToRandomErrno(sysno));
+  }
+}
+
+BPF_TEST(SandboxBPF, SyntheticPolicy, SyntheticPolicy) {
+  // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int
+  // overflow.
+  BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >=
+             static_cast<int>(MAX_PUBLIC_SYSCALL));
+
+  for (int syscall_number = static_cast<int>(MIN_SYSCALL);
+       syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL);
+       ++syscall_number) {
+    if (syscall_number == __NR_exit_group || syscall_number == __NR_write) {
+      // exit_group() is special
+      continue;
+    }
+    errno = 0;
+    BPF_ASSERT(syscall(syscall_number) == -1);
+    BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number));
+  }
+}
+
+#if defined(__arm__)
+// A simple policy that tests whether ARM private system calls are supported
+// by our BPF compiler and by the BPF interpreter in the kernel.
+
+// For ARM private system calls, return an errno equal to their offset from
+// MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno).
+int ArmPrivateSysnoToErrno(int sysno) {
+  if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) &&
+      sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
+    return (sysno - MIN_PRIVATE_SYSCALL) + 1;
+  } else {
+    return ENOSYS;
+  }
+}
+
+ErrorCode ArmPrivatePolicy(SandboxBPF*, int sysno, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy.
+    return ErrorCode(ENOSYS);
+  }
+
+  // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual
+  // ARM private system calls.
+  if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) &&
+      sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
+    return ErrorCode(ArmPrivateSysnoToErrno(sysno));
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) {
+  for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1);
+       syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL);
+       ++syscall_number) {
+    errno = 0;
+    BPF_ASSERT(syscall(syscall_number) == -1);
+    BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number));
+  }
+}
+#endif  // defined(__arm__)
+
+intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) {
+  // Count all invocations of our callback function.
+  ++*reinterpret_cast<int*>(aux);
+
+  // Verify that within the callback function all filtering is temporarily
+  // disabled.
+  BPF_ASSERT(syscall(__NR_getpid) > 1);
+
+  // Verify that we can now call the underlying system call without causing
+  // infinite recursion.
+  return SandboxBPF::ForwardSyscall(args);
+}
+
+ErrorCode GreyListedPolicy(SandboxBPF* sandbox, int sysno, void* aux) {
+  // The use of UnsafeTrap() causes us to print a warning message. This is
+  // generally desirable, but it results in the unittest failing, as it doesn't
+  // expect any messages on "stderr". So, temporarily disable messages. The
+  // BPF_TEST() is guaranteed to turn messages back on, after the policy
+  // function has completed.
+  setenv(kSandboxDebuggingEnv, "t", 0);
+  Die::SuppressInfoMessages(true);
+
+  // Some system calls must always be allowed, if our policy wants to make
+  // use of UnsafeTrap()
+  if (sysno == __NR_rt_sigprocmask || sysno == __NR_rt_sigreturn
+#if defined(__NR_sigprocmask)
+      ||
+      sysno == __NR_sigprocmask
+#endif
+#if defined(__NR_sigreturn)
+      ||
+      sysno == __NR_sigreturn
+#endif
+      ) {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  } else if (sysno == __NR_getpid) {
+    // Disallow getpid()
+    return ErrorCode(EPERM);
+  } else if (SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // Allow (and count) all other system calls.
+    return sandbox->UnsafeTrap(CountSyscalls, aux);
+  } else {
+    return ErrorCode(ENOSYS);
+  }
+}
+
+BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* BPF_AUX */) {
+  BPF_ASSERT(syscall(__NR_getpid) == -1);
+  BPF_ASSERT(errno == EPERM);
+  BPF_ASSERT(BPF_AUX == 0);
+  BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid));
+  BPF_ASSERT(BPF_AUX == 2);
+  char name[17] = {};
+  BPF_ASSERT(!syscall(__NR_prctl,
+                      PR_GET_NAME,
+                      name,
+                      (void*)NULL,
+                      (void*)NULL,
+                      (void*)NULL));
+  BPF_ASSERT(BPF_AUX == 3);
+  BPF_ASSERT(*name);
+}
+
+SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) {
+  // Disabling warning messages that could confuse our test framework.
+  setenv(kSandboxDebuggingEnv, "t", 0);
+  Die::SuppressInfoMessages(true);
+
+  unsetenv(kSandboxDebuggingEnv);
+  SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false);
+  setenv(kSandboxDebuggingEnv, "", 1);
+  SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false);
+  setenv(kSandboxDebuggingEnv, "t", 1);
+  SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true);
+}
+
+intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) {
+  if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) {
+    // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always
+    // return an error. But our handler allows this call.
+    return 0;
+  } else {
+    return SandboxBPF::ForwardSyscall(args);
+  }
+}
+
+ErrorCode PrctlPolicy(SandboxBPF* sandbox, int sysno, void* aux) {
+  setenv(kSandboxDebuggingEnv, "t", 0);
+  Die::SuppressInfoMessages(true);
+
+  if (sysno == __NR_prctl) {
+    // Handle prctl() inside an UnsafeTrap()
+    return sandbox->UnsafeTrap(PrctlHandler, NULL);
+  } else if (SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // Allow all other system calls.
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  } else {
+    return ErrorCode(ENOSYS);
+  }
+}
+
+BPF_TEST(SandboxBPF, ForwardSyscall, PrctlPolicy) {
+  // This call should never be allowed. But our policy will intercept it and
+  // let it pass successfully.
+  BPF_ASSERT(
+      !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL));
+
+  // Verify that the call will fail, if it makes it all the way to the kernel.
+  BPF_ASSERT(
+      prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1);
+
+  // And verify that other uses of prctl() work just fine.
+  char name[17] = {};
+  BPF_ASSERT(!syscall(__NR_prctl,
+                      PR_GET_NAME,
+                      name,
+                      (void*)NULL,
+                      (void*)NULL,
+                      (void*)NULL));
+  BPF_ASSERT(*name);
+
+  // Finally, verify that system calls other than prctl() are completely
+  // unaffected by our policy.
+  struct utsname uts = {};
+  BPF_ASSERT(!uname(&uts));
+  BPF_ASSERT(!strcmp(uts.sysname, "Linux"));
+}
+
+intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) {
+  return SandboxBPF::ForwardSyscall(args);
+}
+
+ErrorCode RedirectAllSyscallsPolicy(SandboxBPF* sandbox, int sysno, void* aux) {
+  setenv(kSandboxDebuggingEnv, "t", 0);
+  Die::SuppressInfoMessages(true);
+
+  // Some system calls must always be allowed, if our policy wants to make
+  // use of UnsafeTrap()
+  if (sysno == __NR_rt_sigprocmask || sysno == __NR_rt_sigreturn
+#if defined(__NR_sigprocmask)
+      ||
+      sysno == __NR_sigprocmask
+#endif
+#if defined(__NR_sigreturn)
+      ||
+      sysno == __NR_sigreturn
+#endif
+      ) {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  } else if (SandboxBPF::IsValidSyscallNumber(sysno)) {
+    return sandbox->UnsafeTrap(AllowRedirectedSyscall, aux);
+  } else {
+    return ErrorCode(ENOSYS);
+  }
+}
+
+int bus_handler_fd_ = -1;
+
+void SigBusHandler(int, siginfo_t* info, void* void_context) {
+  BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1);
+}
+
+BPF_TEST(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) {
+  // We use the SIGBUS bit in the signal mask as a thread-local boolean
+  // value in the implementation of UnsafeTrap(). This is obviously a bit
+  // of a hack that could conceivably interfere with code that uses SIGBUS
+  // in more traditional ways. This test verifies that basic functionality
+  // of SIGBUS is not impacted, but it is certainly possibly to construe
+  // more complex uses of signals where our use of the SIGBUS mask is not
+  // 100% transparent. This is expected behavior.
+  int fds[2];
+  BPF_ASSERT(pipe(fds) == 0);
+  bus_handler_fd_ = fds[1];
+  struct sigaction sa = {};
+  sa.sa_sigaction = SigBusHandler;
+  sa.sa_flags = SA_SIGINFO;
+  BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0);
+  raise(SIGBUS);
+  char c = '\000';
+  BPF_ASSERT(read(fds[0], &c, 1) == 1);
+  BPF_ASSERT(close(fds[0]) == 0);
+  BPF_ASSERT(close(fds[1]) == 0);
+  BPF_ASSERT(c == 0x55);
+}
+
+BPF_TEST(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) {
+  // Signal masks are potentially tricky to handle. For instance, if we
+  // ever tried to update them from inside a Trap() or UnsafeTrap() handler,
+  // the call to sigreturn() at the end of the signal handler would undo
+  // all of our efforts. So, it makes sense to test that sigprocmask()
+  // works, even if we have a policy in place that makes use of UnsafeTrap().
+  // In practice, this works because we force sigprocmask() to be handled
+  // entirely in the kernel.
+  sigset_t mask0, mask1, mask2;
+
+  // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't
+  // change the mask (it shouldn't have been, as it isn't blocked by default
+  // in POSIX).
+  //
+  // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose.
+  sigemptyset(&mask0);
+  BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1));
+  BPF_ASSERT(!sigismember(&mask1, SIGUSR2));
+
+  // Try again, and this time we verify that we can block it. This
+  // requires a second call to sigprocmask().
+  sigaddset(&mask0, SIGUSR2);
+  BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL));
+  BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2));
+  BPF_ASSERT(sigismember(&mask2, SIGUSR2));
+}
+
+BPF_TEST(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) {
+  // An UnsafeTrap() (or for that matter, a Trap()) has to report error
+  // conditions by returning an exit code in the range -1..-4096. This
+  // should happen automatically if using ForwardSyscall(). If the TrapFnc()
+  // uses some other method to make system calls, then it is responsible
+  // for computing the correct return code.
+  // This test verifies that ForwardSyscall() does the correct thing.
+
+  // The glibc system wrapper will ultimately set errno for us. So, from normal
+  // userspace, all of this should be completely transparent.
+  errno = 0;
+  BPF_ASSERT(close(-1) == -1);
+  BPF_ASSERT(errno == EBADF);
+
+  // Explicitly avoid the glibc wrapper. This is not normally the way anybody
+  // would make system calls, but it allows us to verify that we don't
+  // accidentally mess with errno, when we shouldn't.
+  errno = 0;
+  struct arch_seccomp_data args = {};
+  args.nr = __NR_close;
+  args.args[0] = -1;
+  BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF);
+  BPF_ASSERT(errno == 0);
+}
+
+bool NoOpCallback() { return true; }
+
+// Test a trap handler that makes use of a broker process to open().
+
+class InitializedOpenBroker {
+ public:
+  InitializedOpenBroker() : initialized_(false) {
+    std::vector<std::string> allowed_files;
+    allowed_files.push_back("/proc/allowed");
+    allowed_files.push_back("/proc/cpuinfo");
+
+    broker_process_.reset(
+        new BrokerProcess(EPERM, allowed_files, std::vector<std::string>()));
+    BPF_ASSERT(broker_process() != NULL);
+    BPF_ASSERT(broker_process_->Init(base::Bind(&NoOpCallback)));
+
+    initialized_ = true;
+  }
+  bool initialized() { return initialized_; }
+  class BrokerProcess* broker_process() { return broker_process_.get(); }
+
+ private:
+  bool initialized_;
+  scoped_ptr<class BrokerProcess> broker_process_;
+  DISALLOW_COPY_AND_ASSIGN(InitializedOpenBroker);
+};
+
+intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args,
+                               void* aux) {
+  BPF_ASSERT(aux);
+  BrokerProcess* broker_process = static_cast<BrokerProcess*>(aux);
+  switch (args.nr) {
+    case __NR_access:
+      return broker_process->Access(reinterpret_cast<const char*>(args.args[0]),
+                                    static_cast<int>(args.args[1]));
+    case __NR_open:
+      return broker_process->Open(reinterpret_cast<const char*>(args.args[0]),
+                                  static_cast<int>(args.args[1]));
+    case __NR_openat:
+      // We only call open() so if we arrive here, it's because glibc uses
+      // the openat() system call.
+      BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD);
+      return broker_process->Open(reinterpret_cast<const char*>(args.args[1]),
+                                  static_cast<int>(args.args[2]));
+    default:
+      BPF_ASSERT(false);
+      return -ENOSYS;
+  }
+}
+
+ErrorCode DenyOpenPolicy(SandboxBPF* sandbox, int sysno, void* aux) {
+  InitializedOpenBroker* iob = static_cast<InitializedOpenBroker*>(aux);
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    return ErrorCode(ENOSYS);
+  }
+
+  switch (sysno) {
+    case __NR_access:
+    case __NR_open:
+    case __NR_openat:
+      // We get a InitializedOpenBroker class, but our trap handler wants
+      // the BrokerProcess object.
+      return ErrorCode(
+          sandbox->Trap(BrokerOpenTrapHandler, iob->broker_process()));
+    default:
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+// We use a InitializedOpenBroker class, so that we can run unsandboxed
+// code in its constructor, which is the only way to do so in a BPF_TEST.
+BPF_TEST(SandboxBPF,
+         UseOpenBroker,
+         DenyOpenPolicy,
+         InitializedOpenBroker /* BPF_AUX */) {
+  BPF_ASSERT(BPF_AUX.initialized());
+  BrokerProcess* broker_process = BPF_AUX.broker_process();
+  BPF_ASSERT(broker_process != NULL);
+
+  // First, use the broker "manually"
+  BPF_ASSERT(broker_process->Open("/proc/denied", O_RDONLY) == -EPERM);
+  BPF_ASSERT(broker_process->Access("/proc/denied", R_OK) == -EPERM);
+  BPF_ASSERT(broker_process->Open("/proc/allowed", O_RDONLY) == -ENOENT);
+  BPF_ASSERT(broker_process->Access("/proc/allowed", R_OK) == -ENOENT);
+
+  // Now use glibc's open() as an external library would.
+  BPF_ASSERT(open("/proc/denied", O_RDONLY) == -1);
+  BPF_ASSERT(errno == EPERM);
+
+  BPF_ASSERT(open("/proc/allowed", O_RDONLY) == -1);
+  BPF_ASSERT(errno == ENOENT);
+
+  // Also test glibc's openat(), some versions of libc use it transparently
+  // instead of open().
+  BPF_ASSERT(openat(AT_FDCWD, "/proc/denied", O_RDONLY) == -1);
+  BPF_ASSERT(errno == EPERM);
+
+  BPF_ASSERT(openat(AT_FDCWD, "/proc/allowed", O_RDONLY) == -1);
+  BPF_ASSERT(errno == ENOENT);
+
+  // And test glibc's access().
+  BPF_ASSERT(access("/proc/denied", R_OK) == -1);
+  BPF_ASSERT(errno == EPERM);
+
+  BPF_ASSERT(access("/proc/allowed", R_OK) == -1);
+  BPF_ASSERT(errno == ENOENT);
+
+  // This is also white listed and does exist.
+  int cpu_info_access = access("/proc/cpuinfo", R_OK);
+  BPF_ASSERT(cpu_info_access == 0);
+  int cpu_info_fd = open("/proc/cpuinfo", O_RDONLY);
+  BPF_ASSERT(cpu_info_fd >= 0);
+  char buf[1024];
+  BPF_ASSERT(read(cpu_info_fd, buf, sizeof(buf)) > 0);
+}
+
+// Simple test demonstrating how to use SandboxBPF::Cond()
+
+ErrorCode SimpleCondTestPolicy(SandboxBPF* sandbox, int sysno, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  }
+
+  // We deliberately return unusual errno values upon failure, so that we
+  // can uniquely test for these values. In a "real" policy, you would want
+  // to return more traditional values.
+  switch (sysno) {
+    case __NR_open:
+      // Allow opening files for reading, but don't allow writing.
+      COMPILE_ASSERT(O_RDONLY == 0, O_RDONLY_must_be_all_zero_bits);
+      return sandbox->Cond(1,
+                           ErrorCode::TP_32BIT,
+                           ErrorCode::OP_HAS_ANY_BITS,
+                           O_ACCMODE /* 0x3 */,
+                           ErrorCode(EROFS),
+                           ErrorCode(ErrorCode::ERR_ALLOWED));
+    case __NR_prctl:
+      // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but
+      // disallow everything else.
+      return sandbox->Cond(0,
+                           ErrorCode::TP_32BIT,
+                           ErrorCode::OP_EQUAL,
+                           PR_SET_DUMPABLE,
+                           ErrorCode(ErrorCode::ERR_ALLOWED),
+                           sandbox->Cond(0,
+                                         ErrorCode::TP_32BIT,
+                                         ErrorCode::OP_EQUAL,
+                                         PR_GET_DUMPABLE,
+                                         ErrorCode(ErrorCode::ERR_ALLOWED),
+                                         ErrorCode(ENOMEM)));
+    default:
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) {
+  int fd;
+  BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1);
+  BPF_ASSERT(errno == EROFS);
+  BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0);
+  close(fd);
+
+  int ret;
+  BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0);
+  BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0);
+  BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1);
+  BPF_ASSERT(errno == ENOMEM);
+}
+
+// This test exercises the SandboxBPF::Cond() method by building a complex
+// tree of conditional equality operations. It then makes system calls and
+// verifies that they return the values that we expected from our BPF
+// program.
+class EqualityStressTest {
+ public:
+  EqualityStressTest() {
+    // We want a deterministic test
+    srand(0);
+
+    // Iterates over system call numbers and builds a random tree of
+    // equality tests.
+    // We are actually constructing a graph of ArgValue objects. This
+    // graph will later be used to a) compute our sandbox policy, and
+    // b) drive the code that verifies the output from the BPF program.
+    COMPILE_ASSERT(
+        kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10),
+        num_test_cases_must_be_significantly_smaller_than_num_system_calls);
+    for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) {
+      if (IsReservedSyscall(sysno)) {
+        // Skip reserved system calls. This ensures that our test frame
+        // work isn't impacted by the fact that we are overriding
+        // a lot of different system calls.
+        ++end;
+        arg_values_.push_back(NULL);
+      } else {
+        arg_values_.push_back(
+            RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs));
+      }
+    }
+  }
+
+  ~EqualityStressTest() {
+    for (std::vector<ArgValue*>::iterator iter = arg_values_.begin();
+         iter != arg_values_.end();
+         ++iter) {
+      DeleteArgValue(*iter);
+    }
+  }
+
+  ErrorCode Policy(SandboxBPF* sandbox, int sysno) {
+    if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+      // FIXME: we should really not have to do that in a trivial policy
+      return ErrorCode(ENOSYS);
+    } else if (sysno < 0 || sysno >= (int)arg_values_.size() ||
+               IsReservedSyscall(sysno)) {
+      // We only return ErrorCode values for the system calls that
+      // are part of our test data. Every other system call remains
+      // allowed.
+      return ErrorCode(ErrorCode::ERR_ALLOWED);
+    } else {
+      // ToErrorCode() turns an ArgValue object into an ErrorCode that is
+      // suitable for use by a sandbox policy.
+      return ToErrorCode(sandbox, arg_values_[sysno]);
+    }
+  }
+
+  void VerifyFilter() {
+    // Iterate over all system calls. Skip the system calls that have
+    // previously been determined as being reserved.
+    for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) {
+      if (!arg_values_[sysno]) {
+        // Skip reserved system calls.
+        continue;
+      }
+      // Verify that system calls return the values that we expect them to
+      // return. This involves passing different combinations of system call
+      // parameters in order to exercise all possible code paths through the
+      // BPF filter program.
+      // We arbitrarily start by setting all six system call arguments to
+      // zero. And we then recursive traverse our tree of ArgValues to
+      // determine the necessary combinations of parameters.
+      intptr_t args[6] = {};
+      Verify(sysno, args, *arg_values_[sysno]);
+    }
+  }
+
+ private:
+  struct ArgValue {
+    int argno;  // Argument number to inspect.
+    int size;   // Number of test cases (must be > 0).
+    struct Tests {
+      uint32_t k_value;            // Value to compare syscall arg against.
+      int err;                     // If non-zero, errno value to return.
+      struct ArgValue* arg_value;  // Otherwise, more args needs inspecting.
+    }* tests;
+    int err;                     // If none of the tests passed, this is what
+    struct ArgValue* arg_value;  // we'll return (this is the "else" branch).
+  };
+
+  bool IsReservedSyscall(int sysno) {
+    // There are a handful of system calls that we should never use in our
+    // test cases. These system calls are needed to allow the test framework
+    // to run properly.
+    // If we wanted to write fully generic code, there are more system calls
+    // that could be listed here, and it is quite difficult to come up with a
+    // truly comprehensive list. After all, we are deliberately making system
+    // calls unavailable. In practice, we have a pretty good idea of the system
+    // calls that will be made by this particular test. So, this small list is
+    // sufficient. But if anybody copy'n'pasted this code for other uses, they
+    // would have to review that the list.
+    return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit ||
+           sysno == __NR_exit_group || sysno == __NR_restart_syscall;
+  }
+
+  ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) {
+    // Create a new ArgValue and fill it with random data. We use as bit mask
+    // to keep track of the system call parameters that have previously been
+    // set; this ensures that we won't accidentally define a contradictory
+    // set of equality tests.
+    struct ArgValue* arg_value = new ArgValue();
+    args_mask |= 1 << argno;
+    arg_value->argno = argno;
+
+    // Apply some restrictions on just how complex our tests can be.
+    // Otherwise, we end up with a BPF program that is too complicated for
+    // the kernel to load.
+    int fan_out = kMaxFanOut;
+    if (remaining_args > 3) {
+      fan_out = 1;
+    } else if (remaining_args > 2) {
+      fan_out = 2;
+    }
+
+    // Create a couple of different test cases with randomized values that
+    // we want to use when comparing system call parameter number "argno".
+    arg_value->size = rand() % fan_out + 1;
+    arg_value->tests = new ArgValue::Tests[arg_value->size];
+
+    uint32_t k_value = rand();
+    for (int n = 0; n < arg_value->size; ++n) {
+      // Ensure that we have unique values
+      k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1;
+
+      // There are two possible types of nodes. Either this is a leaf node;
+      // in that case, we have completed all the equality tests that we
+      // wanted to perform, and we can now compute a random "errno" value that
+      // we should return. Or this is part of a more complex boolean
+      // expression; in that case, we have to recursively add tests for some
+      // of system call parameters that we have not yet included in our
+      // tests.
+      arg_value->tests[n].k_value = k_value;
+      if (!remaining_args || (rand() & 1)) {
+        arg_value->tests[n].err = (rand() % 1000) + 1;
+        arg_value->tests[n].arg_value = NULL;
+      } else {
+        arg_value->tests[n].err = 0;
+        arg_value->tests[n].arg_value =
+            RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
+      }
+    }
+    // Finally, we have to define what we should return if none of the
+    // previous equality tests pass. Again, we can either deal with a leaf
+    // node, or we can randomly add another couple of tests.
+    if (!remaining_args || (rand() & 1)) {
+      arg_value->err = (rand() % 1000) + 1;
+      arg_value->arg_value = NULL;
+    } else {
+      arg_value->err = 0;
+      arg_value->arg_value =
+          RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
+    }
+    // We have now built a new (sub-)tree of ArgValues defining a set of
+    // boolean expressions for testing random system call arguments against
+    // random values. Return this tree to our caller.
+    return arg_value;
+  }
+
+  int RandomArg(int args_mask) {
+    // Compute a random system call parameter number.
+    int argno = rand() % kMaxArgs;
+
+    // Make sure that this same parameter number has not previously been
+    // used. Otherwise, we could end up with a test that is impossible to
+    // satisfy (e.g. args[0] == 1 && args[0] == 2).
+    while (args_mask & (1 << argno)) {
+      argno = (argno + 1) % kMaxArgs;
+    }
+    return argno;
+  }
+
+  void DeleteArgValue(ArgValue* arg_value) {
+    // Delete an ArgValue and all of its child nodes. This requires
+    // recursively descending into the tree.
+    if (arg_value) {
+      if (arg_value->size) {
+        for (int n = 0; n < arg_value->size; ++n) {
+          if (!arg_value->tests[n].err) {
+            DeleteArgValue(arg_value->tests[n].arg_value);
+          }
+        }
+        delete[] arg_value->tests;
+      }
+      if (!arg_value->err) {
+        DeleteArgValue(arg_value->arg_value);
+      }
+      delete arg_value;
+    }
+  }
+
+  ErrorCode ToErrorCode(SandboxBPF* sandbox, ArgValue* arg_value) {
+    // Compute the ErrorCode that should be returned, if none of our
+    // tests succeed (i.e. the system call parameter doesn't match any
+    // of the values in arg_value->tests[].k_value).
+    ErrorCode err;
+    if (arg_value->err) {
+      // If this was a leaf node, return the errno value that we expect to
+      // return from the BPF filter program.
+      err = ErrorCode(arg_value->err);
+    } else {
+      // If this wasn't a leaf node yet, recursively descend into the rest
+      // of the tree. This will end up adding a few more SandboxBPF::Cond()
+      // tests to our ErrorCode.
+      err = ToErrorCode(sandbox, arg_value->arg_value);
+    }
+
+    // Now, iterate over all the test cases that we want to compare against.
+    // This builds a chain of SandboxBPF::Cond() tests
+    // (aka "if ... elif ... elif ... elif ... fi")
+    for (int n = arg_value->size; n-- > 0;) {
+      ErrorCode matched;
+      // Again, we distinguish between leaf nodes and subtrees.
+      if (arg_value->tests[n].err) {
+        matched = ErrorCode(arg_value->tests[n].err);
+      } else {
+        matched = ToErrorCode(sandbox, arg_value->tests[n].arg_value);
+      }
+      // For now, all of our tests are limited to 32bit.
+      // We have separate tests that check the behavior of 32bit vs. 64bit
+      // conditional expressions.
+      err = sandbox->Cond(arg_value->argno,
+                          ErrorCode::TP_32BIT,
+                          ErrorCode::OP_EQUAL,
+                          arg_value->tests[n].k_value,
+                          matched,
+                          err);
+    }
+    return err;
+  }
+
+  void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) {
+    uint32_t mismatched = 0;
+    // Iterate over all the k_values in arg_value.tests[] and verify that
+    // we see the expected return values from system calls, when we pass
+    // the k_value as a parameter in a system call.
+    for (int n = arg_value.size; n-- > 0;) {
+      mismatched += arg_value.tests[n].k_value;
+      args[arg_value.argno] = arg_value.tests[n].k_value;
+      if (arg_value.tests[n].err) {
+        VerifyErrno(sysno, args, arg_value.tests[n].err);
+      } else {
+        Verify(sysno, args, *arg_value.tests[n].arg_value);
+      }
+    }
+  // Find a k_value that doesn't match any of the k_values in
+  // arg_value.tests[]. In most cases, the current value of "mismatched"
+  // would fit this requirement. But on the off-chance that it happens
+  // to collide, we double-check.
+  try_again:
+    for (int n = arg_value.size; n-- > 0;) {
+      if (mismatched == arg_value.tests[n].k_value) {
+        ++mismatched;
+        goto try_again;
+      }
+    }
+    // Now verify that we see the expected return value from system calls,
+    // if we pass a value that doesn't match any of the conditions (i.e. this
+    // is testing the "else" clause of the conditions).
+    args[arg_value.argno] = mismatched;
+    if (arg_value.err) {
+      VerifyErrno(sysno, args, arg_value.err);
+    } else {
+      Verify(sysno, args, *arg_value.arg_value);
+    }
+    // Reset args[arg_value.argno]. This is not technically needed, but it
+    // makes it easier to reason about the correctness of our tests.
+    args[arg_value.argno] = 0;
+  }
+
+  void VerifyErrno(int sysno, intptr_t* args, int err) {
+    // We installed BPF filters that return different errno values
+    // based on the system call number and the parameters that we decided
+    // to pass in. Verify that this condition holds true.
+    BPF_ASSERT(
+        SandboxSyscall(
+            sysno, args[0], args[1], args[2], args[3], args[4], args[5]) ==
+        -err);
+  }
+
+  // Vector of ArgValue trees. These trees define all the possible boolean
+  // expressions that we want to turn into a BPF filter program.
+  std::vector<ArgValue*> arg_values_;
+
+  // Don't increase these values. We are pushing the limits of the maximum
+  // BPF program that the kernel will allow us to load. If the values are
+  // increased too much, the test will start failing.
+  static const int kNumTestCases = 40;
+  static const int kMaxFanOut = 3;
+  static const int kMaxArgs = 6;
+};
+
+ErrorCode EqualityStressTestPolicy(SandboxBPF* sandbox, int sysno, void* aux) {
+  return reinterpret_cast<EqualityStressTest*>(aux)->Policy(sandbox, sysno);
+}
+
+BPF_TEST(SandboxBPF,
+         EqualityTests,
+         EqualityStressTestPolicy,
+         EqualityStressTest /* BPF_AUX */) {
+  BPF_AUX.VerifyFilter();
+}
+
+ErrorCode EqualityArgumentWidthPolicy(SandboxBPF* sandbox, int sysno, void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  } else if (sysno == __NR_uname) {
+    return sandbox->Cond(
+        0,
+        ErrorCode::TP_32BIT,
+        ErrorCode::OP_EQUAL,
+        0,
+        sandbox->Cond(1,
+                      ErrorCode::TP_32BIT,
+                      ErrorCode::OP_EQUAL,
+                      0x55555555,
+                      ErrorCode(1),
+                      ErrorCode(2)),
+        // The BPF compiler and the BPF interpreter in the kernel are
+        // (mostly) agnostic of the host platform's word size. The compiler
+        // will happily generate code that tests a 64bit value, and the
+        // interpreter will happily perform this test.
+        // But unless there is a kernel bug, there is no way for us to pass
+        // in a 64bit quantity on a 32bit platform. The upper 32bits should
+        // always be zero. So, this test should always evaluate as false on
+        // 32bit systems.
+        sandbox->Cond(1,
+                      ErrorCode::TP_64BIT,
+                      ErrorCode::OP_EQUAL,
+                      0x55555555AAAAAAAAULL,
+                      ErrorCode(1),
+                      ErrorCode(2)));
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) {
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 0, 0x55555555) == -1);
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 0, 0xAAAAAAAA) == -2);
+#if __SIZEOF_POINTER__ > 4
+  // On 32bit machines, there is no way to pass a 64bit argument through the
+  // syscall interface. So, we have to skip the part of the test that requires
+  // 64bit arguments.
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1);
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 1, 0x5555555500000000ULL) == -2);
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 1, 0x5555555511111111ULL) == -2);
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2);
+#else
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 1, 0x55555555) == -2);
+#endif
+}
+
+#if __SIZEOF_POINTER__ > 4
+// On 32bit machines, there is no way to pass a 64bit argument through the
+// syscall interface. So, we have to skip the part of the test that requires
+// 64bit arguments.
+BPF_DEATH_TEST(SandboxBPF,
+               EqualityArgumentUnallowed64bit,
+               DEATH_MESSAGE("Unexpected 64bit argument detected"),
+               EqualityArgumentWidthPolicy) {
+  SandboxSyscall(__NR_uname, 0, 0x5555555555555555ULL);
+}
+#endif
+
+ErrorCode EqualityWithNegativeArgumentsPolicy(SandboxBPF* sandbox,
+                                              int sysno,
+                                              void*) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  } else if (sysno == __NR_uname) {
+    return sandbox->Cond(0,
+                         ErrorCode::TP_32BIT,
+                         ErrorCode::OP_EQUAL,
+                         0xFFFFFFFF,
+                         ErrorCode(1),
+                         ErrorCode(2));
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF,
+         EqualityWithNegativeArguments,
+         EqualityWithNegativeArgumentsPolicy) {
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 0xFFFFFFFF) == -1);
+  BPF_ASSERT(SandboxSyscall(__NR_uname, -1) == -1);
+  BPF_ASSERT(SandboxSyscall(__NR_uname, -1LL) == -1);
+}
+
+#if __SIZEOF_POINTER__ > 4
+BPF_DEATH_TEST(SandboxBPF,
+               EqualityWithNegative64bitArguments,
+               DEATH_MESSAGE("Unexpected 64bit argument detected"),
+               EqualityWithNegativeArgumentsPolicy) {
+  // When expecting a 32bit system call argument, we look at the MSB of the
+  // 64bit value and allow both "0" and "-1". But the latter is allowed only
+  // iff the LSB was negative. So, this death test should error out.
+  BPF_ASSERT(SandboxSyscall(__NR_uname, 0xFFFFFFFF00000000LL) == -1);
+}
+#endif
+ErrorCode AllBitTestPolicy(SandboxBPF* sandbox, int sysno, void *) {
+  // Test the OP_HAS_ALL_BITS conditional test operator with a couple of
+  // different bitmasks. We try to find bitmasks that could conceivably
+  // touch corner cases.
+  // For all of these tests, we override the uname(). We can make use with
+  // a single system call number, as we use the first system call argument to
+  // select the different bit masks that we want to test against.
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  } else if (sysno == __NR_uname) {
+    return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 0,
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x0,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 1,
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x1,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 2,
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x3,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 3,
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x80000000,
+                         ErrorCode(1), ErrorCode(0)),
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 4,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x0,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 5,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x1,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 6,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x3,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 7,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x80000000,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 8,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x100000000ULL,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 9,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x300000000ULL,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 10,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         0x100000001ULL,
+                         ErrorCode(1), ErrorCode(0)),
+
+                         sandbox->Kill("Invalid test case number"))))))))))));
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+// Define a macro that performs tests using our test policy.
+// NOTE: Not all of the arguments in this macro are actually used!
+//       They are here just to serve as documentation of the conditions
+//       implemented in the test policy.
+//       Most notably, "op" and "mask" are unused by the macro. If you want
+//       to make changes to these values, you will have to edit the
+//       test policy instead.
+#define BITMASK_TEST(testcase, arg, op, mask, expected_value) \
+  BPF_ASSERT(SandboxSyscall(__NR_uname, (testcase), (arg)) == (expected_value))
+
+// Our uname() system call returns ErrorCode(1) for success and
+// ErrorCode(0) for failure. SandboxSyscall() turns this into an
+// exit code of -1 or 0.
+#define EXPECT_FAILURE 0
+#define EXPECT_SUCCESS -1
+
+// A couple of our tests behave differently on 32bit and 64bit systems, as
+// there is no way for a 32bit system call to pass in a 64bit system call
+// argument "arg".
+// We expect these tests to succeed on 64bit systems, but to tail on 32bit
+// systems.
+#define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE)
+BPF_TEST(SandboxBPF, AllBitTests, AllBitTestPolicy) {
+  // 32bit test: all of 0x0 (should always be true)
+  BITMASK_TEST( 0,                   0, ALLBITS32,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 0,                   1, ALLBITS32,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 0,                   3, ALLBITS32,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 0,         0xFFFFFFFFU, ALLBITS32,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 0,                -1LL, ALLBITS32,          0, EXPECT_SUCCESS);
+
+  // 32bit test: all of 0x1
+  BITMASK_TEST( 1,                   0, ALLBITS32,        0x1, EXPECT_FAILURE);
+  BITMASK_TEST( 1,                   1, ALLBITS32,        0x1, EXPECT_SUCCESS);
+  BITMASK_TEST( 1,                   2, ALLBITS32,        0x1, EXPECT_FAILURE);
+  BITMASK_TEST( 1,                   3, ALLBITS32,        0x1, EXPECT_SUCCESS);
+
+  // 32bit test: all of 0x3
+  BITMASK_TEST( 2,                   0, ALLBITS32,        0x3, EXPECT_FAILURE);
+  BITMASK_TEST( 2,                   1, ALLBITS32,        0x3, EXPECT_FAILURE);
+  BITMASK_TEST( 2,                   2, ALLBITS32,        0x3, EXPECT_FAILURE);
+  BITMASK_TEST( 2,                   3, ALLBITS32,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 2,                   7, ALLBITS32,        0x3, EXPECT_SUCCESS);
+
+  // 32bit test: all of 0x80000000
+  BITMASK_TEST( 3,                   0, ALLBITS32, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 3,         0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 3,         0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 3,         0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 3,       -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
+
+  // 64bit test: all of 0x0 (should always be true)
+  BITMASK_TEST( 4,                   0, ALLBITS64,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 4,                   1, ALLBITS64,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 4,                   3, ALLBITS64,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 4,         0xFFFFFFFFU, ALLBITS64,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 4,       0x100000000LL, ALLBITS64,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 4,       0x300000000LL, ALLBITS64,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64,          0, EXPECT_SUCCESS);
+  BITMASK_TEST( 4,                -1LL, ALLBITS64,          0, EXPECT_SUCCESS);
+
+  // 64bit test: all of 0x1
+  BITMASK_TEST( 5,                   0, ALLBITS64,          1, EXPECT_FAILURE);
+  BITMASK_TEST( 5,                   1, ALLBITS64,          1, EXPECT_SUCCESS);
+  BITMASK_TEST( 5,                   2, ALLBITS64,          1, EXPECT_FAILURE);
+  BITMASK_TEST( 5,                   3, ALLBITS64,          1, EXPECT_SUCCESS);
+  BITMASK_TEST( 5,       0x100000000LL, ALLBITS64,          1, EXPECT_FAILURE);
+  BITMASK_TEST( 5,       0x100000001LL, ALLBITS64,          1, EXPECT_SUCCESS);
+  BITMASK_TEST( 5,       0x100000002LL, ALLBITS64,          1, EXPECT_FAILURE);
+  BITMASK_TEST( 5,       0x100000003LL, ALLBITS64,          1, EXPECT_SUCCESS);
+
+  // 64bit test: all of 0x3
+  BITMASK_TEST( 6,                   0, ALLBITS64,          3, EXPECT_FAILURE);
+  BITMASK_TEST( 6,                   1, ALLBITS64,          3, EXPECT_FAILURE);
+  BITMASK_TEST( 6,                   2, ALLBITS64,          3, EXPECT_FAILURE);
+  BITMASK_TEST( 6,                   3, ALLBITS64,          3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,                   7, ALLBITS64,          3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,       0x100000000LL, ALLBITS64,          3, EXPECT_FAILURE);
+  BITMASK_TEST( 6,       0x100000001LL, ALLBITS64,          3, EXPECT_FAILURE);
+  BITMASK_TEST( 6,       0x100000002LL, ALLBITS64,          3, EXPECT_FAILURE);
+  BITMASK_TEST( 6,       0x100000003LL, ALLBITS64,          3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,       0x100000007LL, ALLBITS64,          3, EXPECT_SUCCESS);
+
+  // 64bit test: all of 0x80000000
+  BITMASK_TEST( 7,                   0, ALLBITS64, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 7,         0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 7,         0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,         0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,       -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,       0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 7,       0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 7,       0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,       0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,      -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
+
+  // 64bit test: all of 0x100000000
+  BITMASK_TEST( 8,       0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
+  BITMASK_TEST( 8,       0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 8,       0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
+  BITMASK_TEST( 8,       0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 8,       0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
+  BITMASK_TEST( 8,       0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 8,       0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
+  BITMASK_TEST( 8,       0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
+
+  // 64bit test: all of 0x300000000
+  BITMASK_TEST( 9,       0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
+  BITMASK_TEST( 9,       0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
+  BITMASK_TEST( 9,       0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
+  BITMASK_TEST( 9,       0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
+  BITMASK_TEST( 9,       0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
+  BITMASK_TEST( 9,       0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
+  BITMASK_TEST( 9,       0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
+
+  // 64bit test: all of 0x100000001
+  BITMASK_TEST(10,       0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
+  BITMASK_TEST(10,       0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
+  BITMASK_TEST(10,       0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
+  BITMASK_TEST(10,       0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS);
+  BITMASK_TEST(10,         0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE);
+  BITMASK_TEST(10,                 -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS);
+}
+
+ErrorCode AnyBitTestPolicy(SandboxBPF* sandbox, int sysno, void*) {
+  // Test the OP_HAS_ANY_BITS conditional test operator with a couple of
+  // different bitmasks. We try to find bitmasks that could conceivably
+  // touch corner cases.
+  // For all of these tests, we override the uname(). We can make use with
+  // a single system call number, as we use the first system call argument to
+  // select the different bit masks that we want to test against.
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  } else if (sysno == __NR_uname) {
+    return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 0,
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x0,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 1,
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x1,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 2,
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x3,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 3,
+           sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x80000000,
+                         ErrorCode(1), ErrorCode(0)),
+
+           // All the following tests don't really make much sense on 32bit
+           // systems. They will always evaluate as false.
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 4,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x0,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 5,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x1,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 6,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x3,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 7,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x80000000,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 8,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x100000000ULL,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 9,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x300000000ULL,
+                         ErrorCode(1), ErrorCode(0)),
+
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 10,
+           sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         0x100000001ULL,
+                         ErrorCode(1), ErrorCode(0)),
+
+                         sandbox->Kill("Invalid test case number"))))))))))));
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+BPF_TEST(SandboxBPF, AnyBitTests, AnyBitTestPolicy) {
+  // 32bit test: any of 0x0 (should always be false)
+  BITMASK_TEST( 0,                   0, ANYBITS32,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 0,                   1, ANYBITS32,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 0,                   3, ANYBITS32,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 0,         0xFFFFFFFFU, ANYBITS32,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 0,                -1LL, ANYBITS32,        0x0, EXPECT_FAILURE);
+
+  // 32bit test: any of 0x1
+  BITMASK_TEST( 1,                   0, ANYBITS32,        0x1, EXPECT_FAILURE);
+  BITMASK_TEST( 1,                   1, ANYBITS32,        0x1, EXPECT_SUCCESS);
+  BITMASK_TEST( 1,                   2, ANYBITS32,        0x1, EXPECT_FAILURE);
+  BITMASK_TEST( 1,                   3, ANYBITS32,        0x1, EXPECT_SUCCESS);
+
+  // 32bit test: any of 0x3
+  BITMASK_TEST( 2,                   0, ANYBITS32,        0x3, EXPECT_FAILURE);
+  BITMASK_TEST( 2,                   1, ANYBITS32,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 2,                   2, ANYBITS32,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 2,                   3, ANYBITS32,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 2,                   7, ANYBITS32,        0x3, EXPECT_SUCCESS);
+
+  // 32bit test: any of 0x80000000
+  BITMASK_TEST( 3,                   0, ANYBITS32, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 3,         0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 3,         0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 3,         0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 3,       -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
+
+  // 64bit test: any of 0x0 (should always be false)
+  BITMASK_TEST( 4,                   0, ANYBITS64,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 4,                   1, ANYBITS64,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 4,                   3, ANYBITS64,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 4,         0xFFFFFFFFU, ANYBITS64,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 4,       0x100000000LL, ANYBITS64,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 4,       0x300000000LL, ANYBITS64,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64,        0x0, EXPECT_FAILURE);
+  BITMASK_TEST( 4,                -1LL, ANYBITS64,        0x0, EXPECT_FAILURE);
+
+  // 64bit test: any of 0x1
+  BITMASK_TEST( 5,                   0, ANYBITS64,        0x1, EXPECT_FAILURE);
+  BITMASK_TEST( 5,                   1, ANYBITS64,        0x1, EXPECT_SUCCESS);
+  BITMASK_TEST( 5,                   2, ANYBITS64,        0x1, EXPECT_FAILURE);
+  BITMASK_TEST( 5,                   3, ANYBITS64,        0x1, EXPECT_SUCCESS);
+  BITMASK_TEST( 5,       0x100000001LL, ANYBITS64,        0x1, EXPECT_SUCCESS);
+  BITMASK_TEST( 5,       0x100000000LL, ANYBITS64,        0x1, EXPECT_FAILURE);
+  BITMASK_TEST( 5,       0x100000002LL, ANYBITS64,        0x1, EXPECT_FAILURE);
+  BITMASK_TEST( 5,       0x100000003LL, ANYBITS64,        0x1, EXPECT_SUCCESS);
+
+  // 64bit test: any of 0x3
+  BITMASK_TEST( 6,                   0, ANYBITS64,        0x3, EXPECT_FAILURE);
+  BITMASK_TEST( 6,                   1, ANYBITS64,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,                   2, ANYBITS64,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,                   3, ANYBITS64,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,                   7, ANYBITS64,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,       0x100000000LL, ANYBITS64,        0x3, EXPECT_FAILURE);
+  BITMASK_TEST( 6,       0x100000001LL, ANYBITS64,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,       0x100000002LL, ANYBITS64,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,       0x100000003LL, ANYBITS64,        0x3, EXPECT_SUCCESS);
+  BITMASK_TEST( 6,       0x100000007LL, ANYBITS64,        0x3, EXPECT_SUCCESS);
+
+  // 64bit test: any of 0x80000000
+  BITMASK_TEST( 7,                   0, ANYBITS64, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 7,         0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 7,         0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,         0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,       -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,       0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 7,       0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
+  BITMASK_TEST( 7,       0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,       0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
+  BITMASK_TEST( 7,      -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
+
+  // 64bit test: any of 0x100000000
+  BITMASK_TEST( 8,       0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
+  BITMASK_TEST( 8,       0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 8,       0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
+  BITMASK_TEST( 8,       0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 8,       0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
+  BITMASK_TEST( 8,       0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 8,       0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
+  BITMASK_TEST( 8,       0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
+
+  // 64bit test: any of 0x300000000
+  BITMASK_TEST( 9,       0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
+  BITMASK_TEST( 9,       0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
+  BITMASK_TEST( 9,       0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
+  BITMASK_TEST( 9,       0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
+
+  // 64bit test: any of 0x100000001
+  BITMASK_TEST( 10,      0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE);
+  BITMASK_TEST( 10,      0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
+  BITMASK_TEST( 10,      0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS);
+  BITMASK_TEST( 10,      0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
+  BITMASK_TEST( 10,        0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS);
+  BITMASK_TEST( 10,                -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS);
+}
+
+intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) {
+  if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) {
+    // We expect to get called for an attempt to fork(). No need to log that
+    // call. But if we ever get called for anything else, we want to verbosely
+    // print as much information as possible.
+    const char* msg = (const char*)aux;
+    printf(
+        "Clone() was called with unexpected arguments\n"
+        "  nr: %d\n"
+        "  1: 0x%llX\n"
+        "  2: 0x%llX\n"
+        "  3: 0x%llX\n"
+        "  4: 0x%llX\n"
+        "  5: 0x%llX\n"
+        "  6: 0x%llX\n"
+        "%s\n",
+        args.nr,
+        (long long)args.args[0],
+        (long long)args.args[1],
+        (long long)args.args[2],
+        (long long)args.args[3],
+        (long long)args.args[4],
+        (long long)args.args[5],
+        msg);
+  }
+  return -EPERM;
+}
+ErrorCode PthreadPolicyEquality(SandboxBPF* sandbox, int sysno, void* aux) {
+  // This policy allows creating threads with pthread_create(). But it
+  // doesn't allow any other uses of clone(). Most notably, it does not
+  // allow callers to implement fork() or vfork() by passing suitable flags
+  // to the clone() system call.
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  } else if (sysno == __NR_clone) {
+    // We have seen two different valid combinations of flags. Glibc
+    // uses the more modern flags, sets the TLS from the call to clone(), and
+    // uses futexes to monitor threads. Android's C run-time library, doesn't
+    // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
+    // More recent versions of Android don't set CLONE_DETACHED anymore, so
+    // the last case accounts for that.
+    // The following policy is very strict. It only allows the exact masks
+    // that we have seen in known implementations. It is probably somewhat
+    // stricter than what we would want to do.
+    const uint64_t kGlibcCloneMask =
+        CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+        CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS |
+        CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
+    const uint64_t kBaseAndroidCloneMask =
+        CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
+        CLONE_THREAD | CLONE_SYSVSEM;
+    return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
+                         kGlibcCloneMask,
+                         ErrorCode(ErrorCode::ERR_ALLOWED),
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
+                         kBaseAndroidCloneMask | CLONE_DETACHED,
+                         ErrorCode(ErrorCode::ERR_ALLOWED),
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
+                         kBaseAndroidCloneMask,
+                         ErrorCode(ErrorCode::ERR_ALLOWED),
+                         sandbox->Trap(PthreadTrapHandler, "Unknown mask"))));
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+ErrorCode PthreadPolicyBitMask(SandboxBPF* sandbox, int sysno, void* aux) {
+  // This policy allows creating threads with pthread_create(). But it
+  // doesn't allow any other uses of clone(). Most notably, it does not
+  // allow callers to implement fork() or vfork() by passing suitable flags
+  // to the clone() system call.
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    // FIXME: we should really not have to do that in a trivial policy
+    return ErrorCode(ENOSYS);
+  } else if (sysno == __NR_clone) {
+    // We have seen two different valid combinations of flags. Glibc
+    // uses the more modern flags, sets the TLS from the call to clone(), and
+    // uses futexes to monitor threads. Android's C run-time library, doesn't
+    // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
+    // The following policy allows for either combination of flags, but it
+    // is generally a little more conservative than strictly necessary. We
+    // err on the side of rather safe than sorry.
+    // Very noticeably though, we disallow fork() (which is often just a
+    // wrapper around clone()).
+    return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         ~uint32(CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND|
+                                 CLONE_THREAD|CLONE_SYSVSEM|CLONE_SETTLS|
+                                 CLONE_PARENT_SETTID|CLONE_CHILD_CLEARTID|
+                                 CLONE_DETACHED),
+                         sandbox->Trap(PthreadTrapHandler,
+                                       "Unexpected CLONE_XXX flag found"),
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND|
+                         CLONE_THREAD|CLONE_SYSVSEM,
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
+                         CLONE_SETTLS|CLONE_PARENT_SETTID|CLONE_CHILD_CLEARTID,
+                         ErrorCode(ErrorCode::ERR_ALLOWED),
+           sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
+                         CLONE_SETTLS|CLONE_PARENT_SETTID|CLONE_CHILD_CLEARTID,
+                         sandbox->Trap(PthreadTrapHandler,
+                                       "Must set either all or none of the TLS"
+                                       " and futex bits in call to clone()"),
+                         ErrorCode(ErrorCode::ERR_ALLOWED))),
+                         sandbox->Trap(PthreadTrapHandler,
+                                       "Missing mandatory CLONE_XXX flags "
+                                       "when creating new thread")));
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+static void* ThreadFnc(void* arg) {
+  ++*reinterpret_cast<int*>(arg);
+  SandboxSyscall(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0);
+  return NULL;
+}
+
+static void PthreadTest() {
+  // Attempt to start a joinable thread. This should succeed.
+  pthread_t thread;
+  int thread_ran = 0;
+  BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran));
+  BPF_ASSERT(!pthread_join(thread, NULL));
+  BPF_ASSERT(thread_ran);
+
+  // Attempt to start a detached thread. This should succeed.
+  thread_ran = 0;
+  pthread_attr_t attr;
+  BPF_ASSERT(!pthread_attr_init(&attr));
+  BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
+  BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran));
+  BPF_ASSERT(!pthread_attr_destroy(&attr));
+  while (SandboxSyscall(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) ==
+         -EINTR) {
+  }
+  BPF_ASSERT(thread_ran);
+
+  // Attempt to fork() a process using clone(). This should fail. We use the
+  // same flags that glibc uses when calling fork(). But we don't actually
+  // try calling the fork() implementation in the C run-time library, as
+  // run-time libraries other than glibc might call __NR_fork instead of
+  // __NR_clone, and that would introduce a bogus test failure.
+  int pid;
+  BPF_ASSERT(SandboxSyscall(__NR_clone,
+                            CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD,
+                            0,
+                            0,
+                            &pid) == -EPERM);
+}
+
+BPF_TEST(SandboxBPF, PthreadEquality, PthreadPolicyEquality) { PthreadTest(); }
+
+BPF_TEST(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) { PthreadTest(); }
+
+}  // namespace
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall.cc
@@ -0,0 +1,243 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+
+#include <asm/unistd.h>
+#include <errno.h>
+
+#include "base/basictypes.h"
+
+namespace sandbox {
+
+  asm(      // We need to be able to tell the kernel exactly where we made a
+            // system call. The C++ compiler likes to sometimes clone or
+            // inline code, which would inadvertently end up duplicating
+            // the entry point.
+            // "gcc" can suppress code duplication with suitable function
+            // attributes, but "clang" doesn't have this ability.
+            // The "clang" developer mailing list suggested that the correct
+            // and portable solution is a file-scope assembly block.
+            // N.B. We do mark our code as a proper function so that backtraces
+            // work correctly. But we make absolutely no attempt to use the
+            // ABI's calling conventions for passing arguments. We will only
+            // ever be called from assembly code and thus can pick more
+            // suitable calling conventions.
+#if defined(__i386__)
+            ".text\n"
+            ".align 16, 0x90\n"
+            ".type SyscallAsm, @function\n"
+ "SyscallAsm:.cfi_startproc\n"
+            // Check if "%eax" is negative. If so, do not attempt to make a
+            // system call. Instead, compute the return address that is visible
+            // to the kernel after we execute "int $0x80". This address can be
+            // used as a marker that BPF code inspects.
+            "test %eax, %eax\n"
+            "jge  1f\n"
+            // Always, make sure that our code is position-independent, or
+            // address space randomization might not work on i386. This means,
+            // we can't use "lea", but instead have to rely on "call/pop".
+            "call 0f;   .cfi_adjust_cfa_offset  4\n"
+          "0:pop  %eax; .cfi_adjust_cfa_offset -4\n"
+            "addl $2f-0b, %eax\n"
+            "ret\n"
+            // Save register that we don't want to clobber. On i386, we need to
+            // save relatively aggressively, as there are a couple or registers
+            // that are used internally (e.g. %ebx for position-independent
+            // code, and %ebp for the frame pointer), and as we need to keep at
+            // least a few registers available for the register allocator.
+          "1:push %esi; .cfi_adjust_cfa_offset 4\n"
+            "push %edi; .cfi_adjust_cfa_offset 4\n"
+            "push %ebx; .cfi_adjust_cfa_offset 4\n"
+            "push %ebp; .cfi_adjust_cfa_offset 4\n"
+            // Copy entries from the array holding the arguments into the
+            // correct CPU registers.
+            "movl  0(%edi), %ebx\n"
+            "movl  4(%edi), %ecx\n"
+            "movl  8(%edi), %edx\n"
+            "movl 12(%edi), %esi\n"
+            "movl 20(%edi), %ebp\n"
+            "movl 16(%edi), %edi\n"
+            // Enter the kernel.
+            "int  $0x80\n"
+            // This is our "magic" return address that the BPF filter sees.
+          "2:"
+            // Restore any clobbered registers that we didn't declare to the
+            // compiler.
+            "pop  %ebp; .cfi_adjust_cfa_offset -4\n"
+            "pop  %ebx; .cfi_adjust_cfa_offset -4\n"
+            "pop  %edi; .cfi_adjust_cfa_offset -4\n"
+            "pop  %esi; .cfi_adjust_cfa_offset -4\n"
+            "ret\n"
+            ".cfi_endproc\n"
+          "9:.size SyscallAsm, 9b-SyscallAsm\n"
+#elif defined(__x86_64__)
+            ".text\n"
+            ".align 16, 0x90\n"
+            ".type SyscallAsm, @function\n"
+ "SyscallAsm:.cfi_startproc\n"
+            // Check if "%rax" is negative. If so, do not attempt to make a
+            // system call. Instead, compute the return address that is visible
+            // to the kernel after we execute "syscall". This address can be
+            // used as a marker that BPF code inspects.
+            "test %rax, %rax\n"
+            "jge  1f\n"
+            // Always make sure that our code is position-independent, or the
+            // linker will throw a hissy fit on x86-64.
+            "call 0f;   .cfi_adjust_cfa_offset  8\n"
+          "0:pop  %rax; .cfi_adjust_cfa_offset -8\n"
+            "addq $2f-0b, %rax\n"
+            "ret\n"
+            // We declared all clobbered registers to the compiler. On x86-64,
+            // there really isn't much of a problem with register pressure. So,
+            // we can go ahead and directly copy the entries from the arguments
+            // array into the appropriate CPU registers.
+          "1:movq  0(%r12), %rdi\n"
+            "movq  8(%r12), %rsi\n"
+            "movq 16(%r12), %rdx\n"
+            "movq 24(%r12), %r10\n"
+            "movq 32(%r12), %r8\n"
+            "movq 40(%r12), %r9\n"
+            // Enter the kernel.
+            "syscall\n"
+            // This is our "magic" return address that the BPF filter sees.
+          "2:ret\n"
+            ".cfi_endproc\n"
+          "9:.size SyscallAsm, 9b-SyscallAsm\n"
+#elif defined(__arm__)
+            // Throughout this file, we use the same mode (ARM vs. thumb)
+            // that the C++ compiler uses. This means, when transfering control
+            // from C++ to assembly code, we do not need to switch modes (e.g.
+            // by using the "bx" instruction). It also means that our assembly
+            // code should not be invoked directly from code that lives in
+            // other compilation units, as we don't bother implementing thumb
+            // interworking. That's OK, as we don't make any of the assembly
+            // symbols public. They are all local to this file.
+            ".text\n"
+            ".align 2\n"
+            ".type SyscallAsm, %function\n"
+#if defined(__thumb__)
+            ".thumb_func\n"
+#else
+            ".arm\n"
+#endif
+ "SyscallAsm:.fnstart\n"
+            "@ args = 0, pretend = 0, frame = 8\n"
+            "@ frame_needed = 1, uses_anonymous_args = 0\n"
+#if defined(__thumb__)
+            ".cfi_startproc\n"
+            "push {r7, lr}\n"
+            ".cfi_offset 14, -4\n"
+            ".cfi_offset  7, -8\n"
+            "mov r7, sp\n"
+            ".cfi_def_cfa_register 7\n"
+            ".cfi_def_cfa_offset 8\n"
+#else
+            "stmfd sp!, {fp, lr}\n"
+            "add fp, sp, #4\n"
+#endif
+            // Check if "r0" is negative. If so, do not attempt to make a
+            // system call. Instead, compute the return address that is visible
+            // to the kernel after we execute "swi 0". This address can be
+            // used as a marker that BPF code inspects.
+            "cmp r0, #0\n"
+            "bge 1f\n"
+            "adr r0, 2f\n"
+            "b   2f\n"
+            // We declared (almost) all clobbered registers to the compiler. On
+            // ARM there is no particular register pressure. So, we can go
+            // ahead and directly copy the entries from the arguments array
+            // into the appropriate CPU registers.
+          "1:ldr r5, [r6, #20]\n"
+            "ldr r4, [r6, #16]\n"
+            "ldr r3, [r6, #12]\n"
+            "ldr r2, [r6, #8]\n"
+            "ldr r1, [r6, #4]\n"
+            "mov r7, r0\n"
+            "ldr r0, [r6, #0]\n"
+            // Enter the kernel
+            "swi 0\n"
+            // Restore the frame pointer. Also restore the program counter from
+            // the link register; this makes us return to the caller.
+#if defined(__thumb__)
+          "2:pop {r7, pc}\n"
+            ".cfi_endproc\n"
+#else
+          "2:ldmfd sp!, {fp, pc}\n"
+#endif
+            ".fnend\n"
+          "9:.size SyscallAsm, 9b-SyscallAsm\n"
+#endif
+  );  // asm
+
+intptr_t SandboxSyscall(int nr,
+                        intptr_t p0, intptr_t p1, intptr_t p2,
+                        intptr_t p3, intptr_t p4, intptr_t p5) {
+  // We rely on "intptr_t" to be the exact size as a "void *". This is
+  // typically true, but just in case, we add a check. The language
+  // specification allows platforms some leeway in cases, where
+  // "sizeof(void *)" is not the same as "sizeof(void (*)())". We expect
+  // that this would only be an issue for IA64, which we are currently not
+  // planning on supporting. And it is even possible that this would work
+  // on IA64, but for lack of actual hardware, I cannot test.
+  COMPILE_ASSERT(sizeof(void *) == sizeof(intptr_t),
+                 pointer_types_and_intptr_must_be_exactly_the_same_size);
+
+  const intptr_t args[6] = { p0, p1, p2, p3, p4, p5 };
+
+  // Invoke our file-scope assembly code. The constraints have been picked
+  // carefully to match what the rest of the assembly code expects in input,
+  // output, and clobbered registers.
+#if defined(__i386__)
+  intptr_t ret = nr;
+  asm volatile(
+    "call SyscallAsm\n"
+    // N.B. These are not the calling conventions normally used by the ABI.
+    : "=a"(ret)
+    : "0"(ret), "D"(args)
+    : "cc", "esp", "memory", "ecx", "edx");
+#elif defined(__x86_64__)
+  intptr_t ret = nr;
+  {
+    register const intptr_t *data __asm__("r12") = args;
+    asm volatile(
+      "lea  -128(%%rsp), %%rsp\n"  // Avoid red zone.
+      "call SyscallAsm\n"
+      "lea  128(%%rsp), %%rsp\n"
+      // N.B. These are not the calling conventions normally used by the ABI.
+      : "=a"(ret)
+      : "0"(ret), "r"(data)
+      : "cc", "rsp", "memory",
+        "rcx", "rdi", "rsi", "rdx", "r8", "r9", "r10", "r11");
+  }
+#elif defined(__arm__)
+  intptr_t ret;
+  {
+    register intptr_t inout __asm__("r0") = nr;
+    register const intptr_t *data __asm__("r6") = args;
+    asm volatile(
+      "bl SyscallAsm\n"
+      // N.B. These are not the calling conventions normally used by the ABI.
+      : "=r"(inout)
+      : "0"(inout), "r"(data)
+      : "cc", "lr", "memory", "r1", "r2", "r3", "r4", "r5"
+#if !defined(__thumb__)
+      // In thumb mode, we cannot use "r7" as a general purpose register, as
+      // it is our frame pointer. We have to manually manage and preserve it.
+      // In ARM mode, we have a dedicated frame pointer register and "r7" is
+      // thus available as a general purpose register. We don't preserve it,
+      // but instead mark it as clobbered.
+        , "r7"
+#endif  // !defined(__thumb__)
+      );
+    ret = inout;
+  }
+#else
+  errno = ENOSYS;
+  intptr_t ret = -1;
+#endif
+  return ret;
+}
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall.h
@@ -0,0 +1,148 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__
+
+#include <stdint.h>
+
+#include "sandbox/linux/sandbox_export.h"
+
+namespace sandbox {
+
+// We have to make sure that we have a single "magic" return address for
+// our system calls, which we can check from within a BPF filter. This
+// works by writing a little bit of asm() code that a) enters the kernel, and
+// that also b) can be invoked in a way that computes this return address.
+// Passing "nr" as "-1" computes the "magic" return address. Passing any
+// other value invokes the appropriate system call.
+SANDBOX_EXPORT intptr_t SandboxSyscall(int nr,
+                                       intptr_t p0,
+                                       intptr_t p1,
+                                       intptr_t p2,
+                                       intptr_t p3,
+                                       intptr_t p4,
+                                       intptr_t p5);
+
+// System calls can take up to six parameters. Traditionally, glibc
+// implements this property by using variadic argument lists. This works, but
+// confuses modern tools such as valgrind, because we are nominally passing
+// uninitialized data whenever we call through this function and pass less
+// than the full six arguments.
+// So, instead, we use C++'s template system to achieve a very similar
+// effect. C++ automatically sets the unused parameters to zero for us, and
+// it also does the correct type expansion (e.g. from 32bit to 64bit) where
+// necessary.
+// We have to use C-style cast operators as we want to be able to accept both
+// integer and pointer types.
+// We explicitly mark all functions as inline. This is not necessary in
+// optimized builds, where the compiler automatically figures out that it
+// can inline everything. But it makes stack traces of unoptimized builds
+// easier to read as it hides implementation details.
+#if __cplusplus >= 201103  // C++11
+
+template <class T0 = intptr_t,
+          class T1 = intptr_t,
+          class T2 = intptr_t,
+          class T3 = intptr_t,
+          class T4 = intptr_t,
+          class T5 = intptr_t>
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr,
+                                              T0 p0 = 0,
+                                              T1 p1 = 0,
+                                              T2 p2 = 0,
+                                              T3 p3 = 0,
+                                              T4 p4 = 0,
+                                              T5 p5 = 0)
+    __attribute__((always_inline));
+
+template <class T0, class T1, class T2, class T3, class T4, class T5>
+SANDBOX_EXPORT inline intptr_t
+SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
+  return SandboxSyscall(nr,
+                        (intptr_t)p0,
+                        (intptr_t)p1,
+                        (intptr_t)p2,
+                        (intptr_t)p3,
+                        (intptr_t)p4,
+                        (intptr_t)p5);
+}
+
+#else  // Pre-C++11
+
+// TODO(markus): C++11 has a much more concise and readable solution for
+//   expressing what we are doing here. Delete the fall-back code for older
+//   compilers as soon as we have fully switched to C++11
+
+template <class T0, class T1, class T2, class T3, class T4, class T5>
+SANDBOX_EXPORT inline intptr_t
+    SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5)
+    __attribute__((always_inline));
+template <class T0, class T1, class T2, class T3, class T4, class T5>
+SANDBOX_EXPORT inline intptr_t
+SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
+  return SandboxSyscall(nr,
+                        (intptr_t)p0,
+                        (intptr_t)p1,
+                        (intptr_t)p2,
+                        (intptr_t)p3,
+                        (intptr_t)p4,
+                        (intptr_t)p5);
+}
+
+template <class T0, class T1, class T2, class T3, class T4>
+SANDBOX_EXPORT inline intptr_t
+    SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4)
+    __attribute__((always_inline));
+template <class T0, class T1, class T2, class T3, class T4>
+SANDBOX_EXPORT inline intptr_t
+SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4) {
+  return SandboxSyscall(nr, p0, p1, p2, p3, p4, 0);
+}
+
+template <class T0, class T1, class T2, class T3>
+SANDBOX_EXPORT inline intptr_t
+    SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3)
+    __attribute__((always_inline));
+template <class T0, class T1, class T2, class T3>
+SANDBOX_EXPORT inline intptr_t
+SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3) {
+  return SandboxSyscall(nr, p0, p1, p2, p3, 0, 0);
+}
+
+template <class T0, class T1, class T2>
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2)
+    __attribute__((always_inline));
+template <class T0, class T1, class T2>
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2) {
+  return SandboxSyscall(nr, p0, p1, p2, 0, 0, 0);
+}
+
+template <class T0, class T1>
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1)
+    __attribute__((always_inline));
+template <class T0, class T1>
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1) {
+  return SandboxSyscall(nr, p0, p1, 0, 0, 0, 0);
+}
+
+template <class T0>
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0)
+    __attribute__((always_inline));
+template <class T0>
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr, T0 p0) {
+  return SandboxSyscall(nr, p0, 0, 0, 0, 0, 0);
+}
+
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr)
+    __attribute__((always_inline));
+SANDBOX_EXPORT inline intptr_t SandboxSyscall(int nr) {
+  return SandboxSyscall(nr, 0, 0, 0, 0, 0, 0);
+}
+
+#endif  // Pre-C++11
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall_iterator.cc
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
+
+#include "base/basictypes.h"
+#include "sandbox/linux/seccomp-bpf/linux_seccomp.h"
+
+namespace sandbox {
+
+uint32_t SyscallIterator::Next() {
+  if (done_) {
+    return num_;
+  }
+
+  uint32_t val;
+  do {
+    // |num_| has been initialized to 0, which we assume is also MIN_SYSCALL.
+    // This true for supported architectures (Intel and ARM EABI).
+    COMPILE_ASSERT(MIN_SYSCALL == 0u, min_syscall_should_always_be_zero);
+    val = num_;
+
+    // First we iterate up to MAX_PUBLIC_SYSCALL, which is equal to MAX_SYSCALL
+    // on Intel architectures, but leaves room for private syscalls on ARM.
+    if (num_ <= MAX_PUBLIC_SYSCALL) {
+      if (invalid_only_ && num_ < MAX_PUBLIC_SYSCALL) {
+        num_ = MAX_PUBLIC_SYSCALL;
+      } else {
+        ++num_;
+      }
+#if defined(__arm__)
+      // ARM EABI includes "ARM private" system calls starting at
+      // MIN_PRIVATE_SYSCALL, and a "ghost syscall private to the kernel" at
+      // MIN_GHOST_SYSCALL.
+    } else if (num_ < MIN_PRIVATE_SYSCALL - 1) {
+      num_ = MIN_PRIVATE_SYSCALL - 1;
+    } else if (num_ <= MAX_PRIVATE_SYSCALL) {
+      if (invalid_only_ && num_ < MAX_PRIVATE_SYSCALL) {
+        num_ = MAX_PRIVATE_SYSCALL;
+      } else {
+        ++num_;
+      }
+    } else if (num_ < MIN_GHOST_SYSCALL - 1) {
+      num_ = MIN_GHOST_SYSCALL - 1;
+    } else if (num_ <= MAX_SYSCALL) {
+      if (invalid_only_ && num_ < MAX_SYSCALL) {
+        num_ = MAX_SYSCALL;
+      } else {
+        ++num_;
+      }
+#endif
+      // BPF programs only ever operate on unsigned quantities. So, that's how
+      // we iterate; we return values from 0..0xFFFFFFFFu. But there are places,
+      // where the kernel might interpret system call numbers as signed
+      // quantities, so the boundaries between signed and unsigned values are
+      // potential problem cases. We want to explicitly return these values from
+      // our iterator.
+    } else if (num_ < 0x7FFFFFFFu) {
+      num_ = 0x7FFFFFFFu;
+    } else if (num_ < 0x80000000u) {
+      num_ = 0x80000000u;
+    } else if (num_ < 0xFFFFFFFFu) {
+      num_ = 0xFFFFFFFFu;
+    }
+  } while (invalid_only_ && IsValid(val));
+
+  done_ |= val == 0xFFFFFFFFu;
+  return val;
+}
+
+bool SyscallIterator::IsValid(uint32_t num) {
+  uint32_t min_syscall = MIN_SYSCALL;
+  if (num >= min_syscall && num <= MAX_PUBLIC_SYSCALL) {
+    return true;
+  }
+  if (IsArmPrivate(num)) {
+    return true;
+  }
+  return false;
+}
+
+#if defined(__arm__) && (defined(__thumb__) || defined(__ARM_EABI__))
+bool SyscallIterator::IsArmPrivate(uint32_t num) {
+  return (num >= MIN_PRIVATE_SYSCALL && num <= MAX_PRIVATE_SYSCALL) ||
+         (num >= MIN_GHOST_SYSCALL && num <= MAX_SYSCALL);
+}
+#else
+bool SyscallIterator::IsArmPrivate(uint32_t) { return false; }
+#endif
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall_iterator.h
@@ -0,0 +1,56 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_ITERATOR_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_ITERATOR_H__
+
+#include <stdint.h>
+
+#include "base/basictypes.h"
+#include "sandbox/linux/sandbox_export.h"
+
+namespace sandbox {
+
+// Iterates over the entire system call range from 0..0xFFFFFFFFu. This
+// iterator is aware of how system calls look like and will skip quickly
+// over ranges that can't contain system calls. It iterates more slowly
+// whenever it reaches a range that is potentially problematic, returning
+// the last invalid value before a valid range of system calls, and the
+// first invalid value after a valid range of syscalls. It iterates over
+// individual values whenever it is in the normal range for system calls
+// (typically MIN_SYSCALL..MAX_SYSCALL).
+// If |invalid_only| is true, this iterator will only return invalid
+// syscall numbers, but will still skip quickly over invalid ranges,
+// returning the first invalid value in the range and then skipping
+// to the last invalid value in the range.
+//
+// Example usage:
+//   for (SyscallIterator iter(false); !iter.Done(); ) {
+//     uint32_t sysnum = iter.Next();
+//     // Do something with sysnum.
+//   }
+//
+// TODO(markus): Make this a classic C++ iterator.
+class SANDBOX_EXPORT SyscallIterator {
+ public:
+  explicit SyscallIterator(bool invalid_only)
+      : invalid_only_(invalid_only), done_(false), num_(0) {}
+
+  bool Done() const { return done_; }
+  uint32_t Next();
+  static bool IsValid(uint32_t num);
+
+ private:
+  static bool IsArmPrivate(uint32_t num);
+
+  bool invalid_only_;
+  bool done_;
+  uint32_t num_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SyscallIterator);
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_ITERATOR_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall_iterator_unittest.cc
@@ -0,0 +1,136 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
+#include "sandbox/linux/tests/unit_tests.h"
+
+namespace sandbox {
+
+namespace {
+
+SANDBOX_TEST(SyscallIterator, Monotonous) {
+  for (int i = 0; i < 2; ++i) {
+    bool invalid_only = !i;  // Testing both |invalid_only| cases.
+    SyscallIterator iter(invalid_only);
+    uint32_t next = iter.Next();
+
+    if (!invalid_only) {
+      // The iterator should start at 0.
+      SANDBOX_ASSERT(next == 0);
+    }
+    for (uint32_t last = next; !iter.Done(); last = next) {
+      next = iter.Next();
+      SANDBOX_ASSERT(last < next);
+    }
+    // The iterator should always return 0xFFFFFFFFu as the last value.
+    SANDBOX_ASSERT(next == 0xFFFFFFFFu);
+  }
+}
+
+SANDBOX_TEST(SyscallIterator, PublicSyscallRange) {
+  SyscallIterator iter(false);
+  uint32_t next = iter.Next();
+
+  // The iterator should cover the public syscall range
+  // MIN_SYSCALL..MAX_PUBLIC_SYSCALL, without skipping syscalls.
+  // We're assuming MIN_SYSCALL == 0 for all architectures,
+  // this is currently valid for Intel and ARM EABI.
+  SANDBOX_ASSERT(MIN_SYSCALL == 0);
+  SANDBOX_ASSERT(next == MIN_SYSCALL);
+  for (uint32_t last = next; next < MAX_PUBLIC_SYSCALL + 1; last = next) {
+    SANDBOX_ASSERT((next = iter.Next()) == last + 1);
+  }
+  SANDBOX_ASSERT(next == MAX_PUBLIC_SYSCALL + 1);
+}
+
+#if defined(__arm__)
+SANDBOX_TEST(SyscallIterator, ARMPrivateSyscallRange) {
+  SyscallIterator iter(false);
+  uint32_t next = iter.Next();
+  while (next < MIN_PRIVATE_SYSCALL - 1) {
+    next = iter.Next();
+  }
+  // The iterator should cover the ARM private syscall range
+  // without skipping syscalls.
+  SANDBOX_ASSERT(next == MIN_PRIVATE_SYSCALL - 1);
+  for (uint32_t last = next; next < MAX_PRIVATE_SYSCALL + 1; last = next) {
+    SANDBOX_ASSERT((next = iter.Next()) == last + 1);
+  }
+  SANDBOX_ASSERT(next == MAX_PRIVATE_SYSCALL + 1);
+}
+
+SANDBOX_TEST(SyscallIterator, ARMHiddenSyscallRange) {
+  SyscallIterator iter(false);
+  uint32_t next = iter.Next();
+  while (next < MIN_GHOST_SYSCALL - 1) {
+    next = iter.Next();
+  }
+  // The iterator should cover the ARM hidden syscall range
+  // without skipping syscalls.
+  SANDBOX_ASSERT(next == MIN_GHOST_SYSCALL - 1);
+  for (uint32_t last = next; next < MAX_SYSCALL + 1; last = next) {
+    SANDBOX_ASSERT((next = iter.Next()) == last + 1);
+  }
+  SANDBOX_ASSERT(next == MAX_SYSCALL + 1);
+}
+#endif
+
+SANDBOX_TEST(SyscallIterator, Invalid) {
+  for (int i = 0; i < 2; ++i) {
+    bool invalid_only = !i;  // Testing both |invalid_only| cases.
+    SyscallIterator iter(invalid_only);
+    uint32_t next = iter.Next();
+
+    while (next < MAX_SYSCALL + 1) {
+      next = iter.Next();
+    }
+
+    SANDBOX_ASSERT(next == MAX_SYSCALL + 1);
+    while (next < 0x7FFFFFFFu) {
+      next = iter.Next();
+    }
+
+    // The iterator should return the signed/unsigned corner cases.
+    SANDBOX_ASSERT(next == 0x7FFFFFFFu);
+    next = iter.Next();
+    SANDBOX_ASSERT(next == 0x80000000u);
+    SANDBOX_ASSERT(!iter.Done());
+    next = iter.Next();
+    SANDBOX_ASSERT(iter.Done());
+    SANDBOX_ASSERT(next == 0xFFFFFFFFu);
+  }
+}
+
+SANDBOX_TEST(SyscallIterator, InvalidOnly) {
+  bool invalid_only = true;
+  SyscallIterator iter(invalid_only);
+  uint32_t next = iter.Next();
+  // We're assuming MIN_SYSCALL == 0 for all architectures,
+  // this is currently valid for Intel and ARM EABI.
+  // First invalid syscall should then be |MAX_PUBLIC_SYSCALL + 1|.
+  SANDBOX_ASSERT(MIN_SYSCALL == 0);
+  SANDBOX_ASSERT(next == MAX_PUBLIC_SYSCALL + 1);
+
+#if defined(__arm__)
+  next = iter.Next();
+  // The iterator should skip until the last invalid syscall in this range.
+  SANDBOX_ASSERT(next == MIN_PRIVATE_SYSCALL - 1);
+  while (next <= MAX_PRIVATE_SYSCALL) {
+    next = iter.Next();
+  }
+
+  next = iter.Next();
+  // The iterator should skip until the last invalid syscall in this range.
+  SANDBOX_ASSERT(next == MIN_GHOST_SYSCALL - 1);
+  while (next <= MAX_SYSCALL) {
+    next = iter.Next();
+  }
+  SANDBOX_ASSERT(next == MAX_SYSCALL + 1);
+#endif
+}
+
+}  // namespace
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/syscall_unittest.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <asm/unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/posix/eintr_wrapper.h"
+#include "sandbox/linux/seccomp-bpf/bpf_tests.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+#include "sandbox/linux/tests/unit_tests.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace sandbox {
+
+namespace {
+
+// Different platforms use different symbols for the six-argument version
+// of the mmap() system call. Test for the correct symbol at compile time.
+#ifdef __NR_mmap2
+const int kMMapNr = __NR_mmap2;
+#else
+const int kMMapNr = __NR_mmap;
+#endif
+
+TEST(Syscall, WellKnownEntryPoint) {
+// Test that SandboxSyscall(-1) is handled specially. Don't do this on ARM,
+// where syscall(-1) crashes with SIGILL. Not running the test is fine, as we
+// are still testing ARM code in the next set of tests.
+#if !defined(__arm__)
+  EXPECT_NE(SandboxSyscall(-1), syscall(-1));
+#endif
+
+// If possible, test that SandboxSyscall(-1) returns the address right after
+// a kernel entry point.
+#if defined(__i386__)
+  EXPECT_EQ(0x80CDu, ((uint16_t*)SandboxSyscall(-1))[-1]);  // INT 0x80
+#elif defined(__x86_64__)
+  EXPECT_EQ(0x050Fu, ((uint16_t*)SandboxSyscall(-1))[-1]);  // SYSCALL
+#elif defined(__arm__)
+#if defined(__thumb__)
+  EXPECT_EQ(0xDF00u, ((uint16_t*)SandboxSyscall(-1))[-1]);  // SWI 0
+#else
+  EXPECT_EQ(0xEF000000u, ((uint32_t*)SandboxSyscall(-1))[-1]);  // SVC 0
+#endif
+#else
+#warning Incomplete test case; need port for target platform
+#endif
+}
+
+TEST(Syscall, TrivialSyscallNoArgs) {
+  // Test that we can do basic system calls
+  EXPECT_EQ(SandboxSyscall(__NR_getpid), syscall(__NR_getpid));
+}
+
+TEST(Syscall, TrivialSyscallOneArg) {
+  int new_fd;
+  // Duplicate standard error and close it.
+  ASSERT_GE(new_fd = SandboxSyscall(__NR_dup, 2), 0);
+  int close_return_value = IGNORE_EINTR(SandboxSyscall(__NR_close, new_fd));
+  ASSERT_EQ(close_return_value, 0);
+}
+
+// SIGSYS trap handler that will be called on __NR_uname.
+intptr_t CopySyscallArgsToAux(const struct arch_seccomp_data& args, void* aux) {
+  // |aux| is a pointer to our BPF_AUX.
+  std::vector<uint64_t>* const seen_syscall_args =
+      static_cast<std::vector<uint64_t>*>(aux);
+  BPF_ASSERT(arraysize(args.args) == 6);
+  seen_syscall_args->assign(args.args, args.args + arraysize(args.args));
+  return -ENOMEM;
+}
+
+ErrorCode CopyAllArgsOnUnamePolicy(SandboxBPF* sandbox, int sysno, void* aux) {
+  if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
+    return ErrorCode(ENOSYS);
+  }
+  if (sysno == __NR_uname) {
+    return sandbox->Trap(CopySyscallArgsToAux, aux);
+  } else {
+    return ErrorCode(ErrorCode::ERR_ALLOWED);
+  }
+}
+
+// We are testing SandboxSyscall() by making use of a BPF filter that allows us
+// to inspect the system call arguments that the kernel saw.
+BPF_TEST(Syscall,
+         SyntheticSixArgs,
+         CopyAllArgsOnUnamePolicy,
+         std::vector<uint64_t> /* BPF_AUX */) {
+  const int kExpectedValue = 42;
+  // In this test we only pass integers to the kernel. We might want to make
+  // additional tests to try other types. What we will see depends on
+  // implementation details of kernel BPF filters and we will need to document
+  // the expected behavior very clearly.
+  int syscall_args[6];
+  for (size_t i = 0; i < arraysize(syscall_args); ++i) {
+    syscall_args[i] = kExpectedValue + i;
+  }
+
+  // We could use pretty much any system call we don't need here. uname() is
+  // nice because it doesn't have any dangerous side effects.
+  BPF_ASSERT(SandboxSyscall(__NR_uname,
+                            syscall_args[0],
+                            syscall_args[1],
+                            syscall_args[2],
+                            syscall_args[3],
+                            syscall_args[4],
+                            syscall_args[5]) == -ENOMEM);
+
+  // We expect the trap handler to have copied the 6 arguments.
+  BPF_ASSERT(BPF_AUX.size() == 6);
+
+  // Don't loop here so that we can see which argument does cause the failure
+  // easily from the failing line.
+  // uint64_t is the type passed to our SIGSYS handler.
+  BPF_ASSERT(BPF_AUX[0] == static_cast<uint64_t>(syscall_args[0]));
+  BPF_ASSERT(BPF_AUX[1] == static_cast<uint64_t>(syscall_args[1]));
+  BPF_ASSERT(BPF_AUX[2] == static_cast<uint64_t>(syscall_args[2]));
+  BPF_ASSERT(BPF_AUX[3] == static_cast<uint64_t>(syscall_args[3]));
+  BPF_ASSERT(BPF_AUX[4] == static_cast<uint64_t>(syscall_args[4]));
+  BPF_ASSERT(BPF_AUX[5] == static_cast<uint64_t>(syscall_args[5]));
+}
+
+TEST(Syscall, ComplexSyscallSixArgs) {
+  int fd;
+  ASSERT_LE(0, fd = SandboxSyscall(__NR_open, "/dev/null", O_RDWR, 0L));
+
+  // Use mmap() to allocate some read-only memory
+  char* addr0;
+  ASSERT_NE((char*)NULL,
+            addr0 = reinterpret_cast<char*>(
+                SandboxSyscall(kMMapNr,
+                               (void*)NULL,
+                               4096,
+                               PROT_READ,
+                               MAP_PRIVATE | MAP_ANONYMOUS,
+                               fd,
+                               0L)));
+
+  // Try to replace the existing mapping with a read-write mapping
+  char* addr1;
+  ASSERT_EQ(addr0,
+            addr1 = reinterpret_cast<char*>(
+                SandboxSyscall(kMMapNr,
+                               addr0,
+                               4096L,
+                               PROT_READ | PROT_WRITE,
+                               MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                               fd,
+                               0L)));
+  ++*addr1;  // This should not seg fault
+
+  // Clean up
+  EXPECT_EQ(0, SandboxSyscall(__NR_munmap, addr1, 4096L));
+  EXPECT_EQ(0, IGNORE_EINTR(SandboxSyscall(__NR_close, fd)));
+
+  // Check that the offset argument (i.e. the sixth argument) is processed
+  // correctly.
+  ASSERT_GE(fd = SandboxSyscall(__NR_open, "/proc/self/exe", O_RDONLY, 0L), 0);
+  char* addr2, *addr3;
+  ASSERT_NE((char*)NULL,
+            addr2 = reinterpret_cast<char*>(SandboxSyscall(
+                kMMapNr, (void*)NULL, 8192L, PROT_READ, MAP_PRIVATE, fd, 0L)));
+  ASSERT_NE((char*)NULL,
+            addr3 = reinterpret_cast<char*>(SandboxSyscall(kMMapNr,
+                                                           (void*)NULL,
+                                                           4096L,
+                                                           PROT_READ,
+                                                           MAP_PRIVATE,
+                                                           fd,
+#if defined(__NR_mmap2)
+                                                           1L
+#else
+                                                           4096L
+#endif
+                                                           )));
+  EXPECT_EQ(0, memcmp(addr2 + 4096, addr3, 4096));
+
+  // Just to be absolutely on the safe side, also verify that the file
+  // contents matches what we are getting from a read() operation.
+  char buf[8192];
+  EXPECT_EQ(8192, SandboxSyscall(__NR_read, fd, buf, 8192L));
+  EXPECT_EQ(0, memcmp(addr2, buf, 8192));
+
+  // Clean up
+  EXPECT_EQ(0, SandboxSyscall(__NR_munmap, addr2, 8192L));
+  EXPECT_EQ(0, SandboxSyscall(__NR_munmap, addr3, 4096L));
+  EXPECT_EQ(0, IGNORE_EINTR(SandboxSyscall(__NR_close, fd)));
+}
+
+}  // namespace
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/trap.cc
@@ -0,0 +1,357 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "sandbox/linux/seccomp-bpf/trap.h"
+
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "sandbox/linux/seccomp-bpf/codegen.h"
+#include "sandbox/linux/seccomp-bpf/die.h"
+#include "sandbox/linux/seccomp-bpf/syscall.h"
+
+// Android's signal.h doesn't define ucontext etc.
+#if defined(OS_ANDROID)
+#include "sandbox/linux/services/android_ucontext.h"
+#endif
+
+namespace {
+
+const int kCapacityIncrement = 20;
+
+// Unsafe traps can only be turned on, if the user explicitly allowed them
+// by setting the CHROME_SANDBOX_DEBUGGING environment variable.
+const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
+
+// We need to tell whether we are performing a "normal" callback, or
+// whether we were called recursively from within a UnsafeTrap() callback.
+// This is a little tricky to do, because we need to somehow get access to
+// per-thread data from within a signal context. Normal TLS storage is not
+// safely accessible at this time. We could roll our own, but that involves
+// a lot of complexity. Instead, we co-opt one bit in the signal mask.
+// If BUS is blocked, we assume that we have been called recursively.
+// There is a possibility for collision with other code that needs to do
+// this, but in practice the risks are low.
+// If SIGBUS turns out to be a problem, we could instead co-opt one of the
+// realtime signals. There are plenty of them. Unfortunately, there is no
+// way to mark a signal as allocated. So, the potential for collision is
+// possibly even worse.
+bool GetIsInSigHandler(const ucontext_t* ctx) {
+  // Note: on Android, sigismember does not take a pointer to const.
+  return sigismember(const_cast<sigset_t*>(&ctx->uc_sigmask), SIGBUS);
+}
+
+void SetIsInSigHandler() {
+  sigset_t mask;
+  if (sigemptyset(&mask) || sigaddset(&mask, SIGBUS) ||
+      sigprocmask(SIG_BLOCK, &mask, NULL)) {
+    SANDBOX_DIE("Failed to block SIGBUS");
+  }
+}
+
+bool IsDefaultSignalAction(const struct sigaction& sa) {
+  if (sa.sa_flags & SA_SIGINFO || sa.sa_handler != SIG_DFL) {
+    return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+namespace sandbox {
+
+Trap::Trap()
+    : trap_array_(NULL),
+      trap_array_size_(0),
+      trap_array_capacity_(0),
+      has_unsafe_traps_(false) {
+  // Set new SIGSYS handler
+  struct sigaction sa = {};
+  sa.sa_sigaction = SigSysAction;
+  sa.sa_flags = SA_SIGINFO | SA_NODEFER;
+  struct sigaction old_sa;
+  if (sigaction(SIGSYS, &sa, &old_sa) < 0) {
+    SANDBOX_DIE("Failed to configure SIGSYS handler");
+  }
+
+  if (!IsDefaultSignalAction(old_sa)) {
+    static const char kExistingSIGSYSMsg[] =
+        "Existing signal handler when trying to install SIGSYS. SIGSYS needs "
+        "to be reserved for seccomp-bpf.";
+    DLOG(FATAL) << kExistingSIGSYSMsg;
+    LOG(ERROR) << kExistingSIGSYSMsg;
+  }
+
+  // Unmask SIGSYS
+  sigset_t mask;
+  if (sigemptyset(&mask) || sigaddset(&mask, SIGSYS) ||
+      sigprocmask(SIG_UNBLOCK, &mask, NULL)) {
+    SANDBOX_DIE("Failed to configure SIGSYS handler");
+  }
+}
+
+Trap* Trap::GetInstance() {
+  // Note: This class is not thread safe. It is the caller's responsibility
+  // to avoid race conditions. Normally, this is a non-issue as the sandbox
+  // can only be initialized if there are no other threads present.
+  // Also, this is not a normal singleton. Once created, the global trap
+  // object must never be destroyed again.
+  if (!global_trap_) {
+    global_trap_ = new Trap();
+    if (!global_trap_) {
+      SANDBOX_DIE("Failed to allocate global trap handler");
+    }
+  }
+  return global_trap_;
+}
+
+void Trap::SigSysAction(int nr, siginfo_t* info, void* void_context) {
+  if (!global_trap_) {
+    RAW_SANDBOX_DIE(
+        "This can't happen. Found no global singleton instance "
+        "for Trap() handling.");
+  }
+  global_trap_->SigSys(nr, info, void_context);
+}
+
+void Trap::SigSys(int nr, siginfo_t* info, void* void_context) {
+  // Signal handlers should always preserve "errno". Otherwise, we could
+  // trigger really subtle bugs.
+  const int old_errno = errno;
+
+  // Various sanity checks to make sure we actually received a signal
+  // triggered by a BPF filter. If something else triggered SIGSYS
+  // (e.g. kill()), there is really nothing we can do with this signal.
+  if (nr != SIGSYS || info->si_code != SYS_SECCOMP || !void_context ||
+      info->si_errno <= 0 ||
+      static_cast<size_t>(info->si_errno) > trap_array_size_) {
+    // ATI drivers seem to send SIGSYS, so this cannot be FATAL.
+    // See crbug.com/178166.
+    // TODO(jln): add a DCHECK or move back to FATAL.
+    RAW_LOG(ERROR, "Unexpected SIGSYS received.");
+    errno = old_errno;
+    return;
+  }
+
+  // Obtain the signal context. This, most notably, gives us access to
+  // all CPU registers at the time of the signal.
+  ucontext_t* ctx = reinterpret_cast<ucontext_t*>(void_context);
+
+  // Obtain the siginfo information that is specific to SIGSYS. Unfortunately,
+  // most versions of glibc don't include this information in siginfo_t. So,
+  // we need to explicitly copy it into a arch_sigsys structure.
+  struct arch_sigsys sigsys;
+  memcpy(&sigsys, &info->_sifields, sizeof(sigsys));
+
+  // Some more sanity checks.
+  if (sigsys.ip != reinterpret_cast<void*>(SECCOMP_IP(ctx)) ||
+      sigsys.nr != static_cast<int>(SECCOMP_SYSCALL(ctx)) ||
+      sigsys.arch != SECCOMP_ARCH) {
+    // TODO(markus):
+    // SANDBOX_DIE() can call LOG(FATAL). This is not normally async-signal
+    // safe and can lead to bugs. We should eventually implement a different
+    // logging and reporting mechanism that is safe to be called from
+    // the sigSys() handler.
+    RAW_SANDBOX_DIE("Sanity checks are failing after receiving SIGSYS.");
+  }
+
+  intptr_t rc;
+  if (has_unsafe_traps_ && GetIsInSigHandler(ctx)) {
+    errno = old_errno;
+    if (sigsys.nr == __NR_clone) {
+      RAW_SANDBOX_DIE("Cannot call clone() from an UnsafeTrap() handler.");
+    }
+    rc = SandboxSyscall(sigsys.nr,
+                        SECCOMP_PARM1(ctx),
+                        SECCOMP_PARM2(ctx),
+                        SECCOMP_PARM3(ctx),
+                        SECCOMP_PARM4(ctx),
+                        SECCOMP_PARM5(ctx),
+                        SECCOMP_PARM6(ctx));
+  } else {
+    const ErrorCode& err = trap_array_[info->si_errno - 1];
+    if (!err.safe_) {
+      SetIsInSigHandler();
+    }
+
+    // Copy the seccomp-specific data into a arch_seccomp_data structure. This
+    // is what we are showing to TrapFnc callbacks that the system call
+    // evaluator registered with the sandbox.
+    struct arch_seccomp_data data = {
+        sigsys.nr, SECCOMP_ARCH, reinterpret_cast<uint64_t>(sigsys.ip),
+        {static_cast<uint64_t>(SECCOMP_PARM1(ctx)),
+         static_cast<uint64_t>(SECCOMP_PARM2(ctx)),
+         static_cast<uint64_t>(SECCOMP_PARM3(ctx)),
+         static_cast<uint64_t>(SECCOMP_PARM4(ctx)),
+         static_cast<uint64_t>(SECCOMP_PARM5(ctx)),
+         static_cast<uint64_t>(SECCOMP_PARM6(ctx))}};
+
+    // Now call the TrapFnc callback associated with this particular instance
+    // of SECCOMP_RET_TRAP.
+    rc = err.fnc_(data, err.aux_);
+  }
+
+  // Update the CPU register that stores the return code of the system call
+  // that we just handled, and restore "errno" to the value that it had
+  // before entering the signal handler.
+  SECCOMP_RESULT(ctx) = static_cast<greg_t>(rc);
+  errno = old_errno;
+
+  return;
+}
+
+bool Trap::TrapKey::operator<(const TrapKey& o) const {
+  if (fnc != o.fnc) {
+    return fnc < o.fnc;
+  } else if (aux != o.aux) {
+    return aux < o.aux;
+  } else {
+    return safe < o.safe;
+  }
+}
+
+ErrorCode Trap::MakeTrap(TrapFnc fnc, const void* aux, bool safe) {
+  return GetInstance()->MakeTrapImpl(fnc, aux, safe);
+}
+
+ErrorCode Trap::MakeTrapImpl(TrapFnc fnc, const void* aux, bool safe) {
+  if (!safe && !SandboxDebuggingAllowedByUser()) {
+    // Unless the user set the CHROME_SANDBOX_DEBUGGING environment variable,
+    // we never return an ErrorCode that is marked as "unsafe". This also
+    // means, the BPF compiler will never emit code that allow unsafe system
+    // calls to by-pass the filter (because they use the magic return address
+    // from SandboxSyscall(-1)).
+
+    // This SANDBOX_DIE() can optionally be removed. It won't break security,
+    // but it might make error messages from the BPF compiler a little harder
+    // to understand. Removing the SANDBOX_DIE() allows callers to easyly check
+    // whether unsafe traps are supported (by checking whether the returned
+    // ErrorCode is ET_INVALID).
+    SANDBOX_DIE(
+        "Cannot use unsafe traps unless CHROME_SANDBOX_DEBUGGING "
+        "is enabled");
+
+    return ErrorCode();
+  }
+
+  // Each unique pair of TrapFnc and auxiliary data make up a distinct instance
+  // of a SECCOMP_RET_TRAP.
+  TrapKey key(fnc, aux, safe);
+  TrapIds::const_iterator iter = trap_ids_.find(key);
+
+  // We return unique identifiers together with SECCOMP_RET_TRAP. This allows
+  // us to associate trap with the appropriate handler. The kernel allows us
+  // identifiers in the range from 0 to SECCOMP_RET_DATA (0xFFFF). We want to
+  // avoid 0, as it could be confused for a trap without any specific id.
+  // The nice thing about sequentially numbered identifiers is that we can also
+  // trivially look them up from our signal handler without making any system
+  // calls that might be async-signal-unsafe.
+  // In order to do so, we store all of our traps in a C-style trap_array_.
+  uint16_t id;
+  if (iter != trap_ids_.end()) {
+    // We have seen this pair before. Return the same id that we assigned
+    // earlier.
+    id = iter->second;
+  } else {
+    // This is a new pair. Remember it and assign a new id.
+    if (trap_array_size_ >= SECCOMP_RET_DATA /* 0xFFFF */ ||
+        trap_array_size_ >= std::numeric_limits<typeof(id)>::max()) {
+      // In practice, this is pretty much impossible to trigger, as there
+      // are other kernel limitations that restrict overall BPF program sizes.
+      SANDBOX_DIE("Too many SECCOMP_RET_TRAP callback instances");
+    }
+    id = trap_array_size_ + 1;
+
+    // Our callers ensure that there are no other threads accessing trap_array_
+    // concurrently (typically this is done by ensuring that we are single-
+    // threaded while the sandbox is being set up). But we nonetheless are
+    // modifying a life data structure that could be accessed any time a
+    // system call is made; as system calls could be triggering SIGSYS.
+    // So, we have to be extra careful that we update trap_array_ atomically.
+    // In particular, this means we shouldn't be using realloc() to resize it.
+    // Instead, we allocate a new array, copy the values, and then switch the
+    // pointer. We only really care about the pointer being updated atomically
+    // and the data that is pointed to being valid, as these are the only
+    // values accessed from the signal handler. It is OK if trap_array_size_
+    // is inconsistent with the pointer, as it is monotonously increasing.
+    // Also, we only care about compiler barriers, as the signal handler is
+    // triggered synchronously from a system call. We don't have to protect
+    // against issues with the memory model or with completely asynchronous
+    // events.
+    if (trap_array_size_ >= trap_array_capacity_) {
+      trap_array_capacity_ += kCapacityIncrement;
+      ErrorCode* old_trap_array = trap_array_;
+      ErrorCode* new_trap_array = new ErrorCode[trap_array_capacity_];
+
+      // Language specs are unclear on whether the compiler is allowed to move
+      // the "delete[]" above our preceding assignments and/or memory moves,
+      // iff the compiler believes that "delete[]" doesn't have any other
+      // global side-effects.
+      // We insert optimization barriers to prevent this from happening.
+      // The first barrier is probably not needed, but better be explicit in
+      // what we want to tell the compiler.
+      // The clang developer mailing list couldn't answer whether this is a
+      // legitimate worry; but they at least thought that the barrier is
+      // sufficient to prevent the (so far hypothetical) problem of re-ordering
+      // of instructions by the compiler.
+      memcpy(new_trap_array, trap_array_, trap_array_size_ * sizeof(ErrorCode));
+      asm volatile("" : "=r"(new_trap_array) : "0"(new_trap_array) : "memory");
+      trap_array_ = new_trap_array;
+      asm volatile("" : "=r"(trap_array_) : "0"(trap_array_) : "memory");
+
+      delete[] old_trap_array;
+    }
+    trap_ids_[key] = id;
+    trap_array_[trap_array_size_] = ErrorCode(fnc, aux, safe, id);
+    return trap_array_[trap_array_size_++];
+  }
+
+  return ErrorCode(fnc, aux, safe, id);
+}
+
+bool Trap::SandboxDebuggingAllowedByUser() const {
+  const char* debug_flag = getenv(kSandboxDebuggingEnv);
+  return debug_flag && *debug_flag;
+}
+
+bool Trap::EnableUnsafeTrapsInSigSysHandler() {
+  Trap* trap = GetInstance();
+  if (!trap->has_unsafe_traps_) {
+    // Unsafe traps are a one-way fuse. Once enabled, they can never be turned
+    // off again.
+    // We only allow enabling unsafe traps, if the user explicitly set an
+    // appropriate environment variable. This prevents bugs that accidentally
+    // disable all sandboxing for all users.
+    if (trap->SandboxDebuggingAllowedByUser()) {
+      // We only ever print this message once, when we enable unsafe traps the
+      // first time.
+      SANDBOX_INFO("WARNING! Disabling sandbox for debugging purposes");
+      trap->has_unsafe_traps_ = true;
+    } else {
+      SANDBOX_INFO(
+          "Cannot disable sandbox and use unsafe traps unless "
+          "CHROME_SANDBOX_DEBUGGING is turned on first");
+    }
+  }
+  // Returns the, possibly updated, value of has_unsafe_traps_.
+  return trap->has_unsafe_traps_;
+}
+
+ErrorCode Trap::ErrorCodeFromTrapId(uint16_t id) {
+  if (global_trap_ && id > 0 && id <= global_trap_->trap_array_size_) {
+    return global_trap_->trap_array_[id - 1];
+  } else {
+    return ErrorCode();
+  }
+}
+
+Trap* Trap::global_trap_;
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/trap.h
@@ -0,0 +1,117 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__
+
+#include <signal.h>
+#include <stdint.h>
+
+#include <map>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "sandbox/linux/sandbox_export.h"
+
+namespace sandbox {
+
+class ErrorCode;
+
+// The Trap class allows a BPF filter program to branch out to user space by
+// raising a SIGSYS signal.
+// N.B.: This class does not perform any synchronization operations. If
+//   modifications are made to any of the traps, it is the caller's
+//   responsibility to ensure that this happens in a thread-safe fashion.
+//   Preferably, that means that no other threads should be running at that
+//   time. For the purposes of our sandbox, this assertion should always be
+//   true. Threads are incompatible with the seccomp sandbox anyway.
+class SANDBOX_EXPORT Trap {
+ public:
+  // TrapFnc is a pointer to a function that handles Seccomp traps in
+  // user-space. The seccomp policy can request that a trap handler gets
+  // installed; it does so by returning a suitable ErrorCode() from the
+  // syscallEvaluator. See the ErrorCode() constructor for how to pass in
+  // the function pointer.
+  // Please note that TrapFnc is executed from signal context and must be
+  // async-signal safe:
+  // http://pubs.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html
+  // Also note that it follows the calling convention of native system calls.
+  // In other words, it reports an error by returning an exit code in the
+  // range -1..-4096. It should not set errno when reporting errors; on the
+  // other hand, accidentally modifying errno is harmless and the changes will
+  // be undone afterwards.
+  typedef intptr_t (*TrapFnc)(const struct arch_seccomp_data& args, void* aux);
+
+  // Registers a new trap handler and sets up the appropriate SIGSYS handler
+  // as needed.
+  // N.B.: This makes a permanent state change. Traps cannot be unregistered,
+  //   as that would break existing BPF filters that are still active.
+  static ErrorCode MakeTrap(TrapFnc fnc, const void* aux, bool safe);
+
+  // Enables support for unsafe traps in the SIGSYS signal handler. This is a
+  // one-way fuse. It works in conjunction with the BPF compiler emitting code
+  // that unconditionally allows system calls, if they have a magic return
+  // address (i.e. SandboxSyscall(-1)).
+  // Once unsafe traps are enabled, the sandbox is essentially compromised.
+  // But this is still a very useful feature for debugging purposes. Use with
+  // care. This feature is availably only if enabled by the user (see above).
+  // Returns "true", if unsafe traps were turned on.
+  static bool EnableUnsafeTrapsInSigSysHandler();
+
+  // Returns the ErrorCode associate with a particular trap id.
+  static ErrorCode ErrorCodeFromTrapId(uint16_t id);
+
+ private:
+  // The destructor is unimplemented. Don't ever attempt to destruct this
+  // object. It'll break subsequent system calls that trigger a SIGSYS.
+  ~Trap();
+
+  struct TrapKey {
+    TrapKey(TrapFnc f, const void* a, bool s) : fnc(f), aux(a), safe(s) {}
+    TrapFnc fnc;
+    const void* aux;
+    bool safe;
+    bool operator<(const TrapKey&) const;
+  };
+  typedef std::map<TrapKey, uint16_t> TrapIds;
+
+  // We only have a very small number of methods. We opt to make them static
+  // and have them internally call GetInstance(). This is a little more
+  // convenient than having each caller obtain short-lived reference to the
+  // singleton.
+  // It also gracefully deals with methods that should check for the singleton,
+  // but avoid instantiating it, if it doesn't exist yet
+  // (e.g. ErrorCodeFromTrapId()).
+  static Trap* GetInstance();
+  static void SigSysAction(int nr, siginfo_t* info, void* void_context);
+
+  // Make sure that SigSys is not inlined in order to get slightly better crash
+  // dumps.
+  void SigSys(int nr, siginfo_t* info, void* void_context)
+      __attribute__((noinline));
+  ErrorCode MakeTrapImpl(TrapFnc fnc, const void* aux, bool safe);
+  bool SandboxDebuggingAllowedByUser() const;
+
+  // We have a global singleton that handles all of our SIGSYS traps. This
+  // variable must never be deallocated after it has been set up initially, as
+  // there is no way to reset in-kernel BPF filters that generate SIGSYS
+  // events.
+  static Trap* global_trap_;
+
+  TrapIds trap_ids_;            // Maps from TrapKeys to numeric ids
+  ErrorCode* trap_array_;       // Array of ErrorCodes indexed by ids
+  size_t trap_array_size_;      // Currently used size of array
+  size_t trap_array_capacity_;  // Currently allocated capacity of array
+  bool has_unsafe_traps_;       // Whether unsafe traps have been enabled
+
+  // Our constructor is private. A shared global instance is created
+  // automatically as needed.
+  // Copying and assigning is unimplemented. It doesn't make sense for a
+  // singleton.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Trap);
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_TRAP_H__
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/verifier.cc
@@ -0,0 +1,446 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string.h>
+
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
+#include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h"
+#include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
+#include "sandbox/linux/seccomp-bpf/verifier.h"
+
+
+namespace sandbox {
+
+namespace {
+
+struct State {
+  State(const std::vector<struct sock_filter>& p,
+        const struct arch_seccomp_data& d)
+      : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
+  const std::vector<struct sock_filter>& program;
+  const struct arch_seccomp_data& data;
+  unsigned int ip;
+  uint32_t accumulator;
+  bool acc_is_valid;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(State);
+};
+
+uint32_t EvaluateErrorCode(SandboxBPF* sandbox,
+                           const ErrorCode& code,
+                           const struct arch_seccomp_data& data) {
+  if (code.error_type() == ErrorCode::ET_SIMPLE ||
+      code.error_type() == ErrorCode::ET_TRAP) {
+    return code.err();
+  } else if (code.error_type() == ErrorCode::ET_COND) {
+    if (code.width() == ErrorCode::TP_32BIT &&
+        (data.args[code.argno()] >> 32) &&
+        (data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
+            0xFFFFFFFF80000000ull) {
+      return sandbox->Unexpected64bitArgument().err();
+    }
+    switch (code.op()) {
+      case ErrorCode::OP_EQUAL:
+        return EvaluateErrorCode(sandbox,
+                                 (code.width() == ErrorCode::TP_32BIT
+                                      ? uint32_t(data.args[code.argno()])
+                                      : data.args[code.argno()]) == code.value()
+                                     ? *code.passed()
+                                     : *code.failed(),
+                                 data);
+      case ErrorCode::OP_HAS_ALL_BITS:
+        return EvaluateErrorCode(sandbox,
+                                 ((code.width() == ErrorCode::TP_32BIT
+                                       ? uint32_t(data.args[code.argno()])
+                                       : data.args[code.argno()]) &
+                                  code.value()) == code.value()
+                                     ? *code.passed()
+                                     : *code.failed(),
+                                 data);
+      case ErrorCode::OP_HAS_ANY_BITS:
+        return EvaluateErrorCode(sandbox,
+                                 (code.width() == ErrorCode::TP_32BIT
+                                      ? uint32_t(data.args[code.argno()])
+                                      : data.args[code.argno()]) &
+                                         code.value()
+                                     ? *code.passed()
+                                     : *code.failed(),
+                                 data);
+      default:
+        return SECCOMP_RET_INVALID;
+    }
+  } else {
+    return SECCOMP_RET_INVALID;
+  }
+}
+
+bool VerifyErrorCode(SandboxBPF* sandbox,
+                     const std::vector<struct sock_filter>& program,
+                     struct arch_seccomp_data* data,
+                     const ErrorCode& root_code,
+                     const ErrorCode& code,
+                     const char** err) {
+  if (code.error_type() == ErrorCode::ET_SIMPLE ||
+      code.error_type() == ErrorCode::ET_TRAP) {
+    uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
+    if (*err) {
+      return false;
+    } else if (computed_ret != EvaluateErrorCode(sandbox, root_code, *data)) {
+      // For efficiency's sake, we'd much rather compare "computed_ret"
+      // against "code.err()". This works most of the time, but it doesn't
+      // always work for nested conditional expressions. The test values
+      // that we generate on the fly to probe expressions can trigger
+      // code flow decisions in multiple nodes of the decision tree, and the
+      // only way to compute the correct error code in that situation is by
+      // calling EvaluateErrorCode().
+      *err = "Exit code from BPF program doesn't match";
+      return false;
+    }
+  } else if (code.error_type() == ErrorCode::ET_COND) {
+    if (code.argno() < 0 || code.argno() >= 6) {
+      *err = "Invalid argument number in error code";
+      return false;
+    }
+    switch (code.op()) {
+      case ErrorCode::OP_EQUAL:
+        // Verify that we can check a 32bit value (or the LSB of a 64bit value)
+        // for equality.
+        data->args[code.argno()] = code.value();
+        if (!VerifyErrorCode(
+                 sandbox, program, data, root_code, *code.passed(), err)) {
+          return false;
+        }
+
+        // Change the value to no longer match and verify that this is detected
+        // as an inequality.
+        data->args[code.argno()] = code.value() ^ 0x55AA55AA;
+        if (!VerifyErrorCode(
+                 sandbox, program, data, root_code, *code.failed(), err)) {
+          return false;
+        }
+
+        // BPF programs can only ever operate on 32bit values. So, we have
+        // generated additional BPF instructions that inspect the MSB. Verify
+        // that they behave as intended.
+        if (code.width() == ErrorCode::TP_32BIT) {
+          if (code.value() >> 32) {
+            SANDBOX_DIE(
+                "Invalid comparison of a 32bit system call argument "
+                "against a 64bit constant; this test is always false.");
+          }
+
+          // If the system call argument was intended to be a 32bit parameter,
+          // verify that it is a fatal error if a 64bit value is ever passed
+          // here.
+          data->args[code.argno()] = 0x100000000ull;
+          if (!VerifyErrorCode(sandbox,
+                               program,
+                               data,
+                               root_code,
+                               sandbox->Unexpected64bitArgument(),
+                               err)) {
+            return false;
+          }
+        } else {
+          // If the system call argument was intended to be a 64bit parameter,
+          // verify that we can handle (in-)equality for the MSB. This is
+          // essentially the same test that we did earlier for the LSB.
+          // We only need to verify the behavior of the inequality test. We
+          // know that the equality test already passed, as unlike the kernel
+          // the Verifier does operate on 64bit quantities.
+          data->args[code.argno()] = code.value() ^ 0x55AA55AA00000000ull;
+          if (!VerifyErrorCode(
+                   sandbox, program, data, root_code, *code.failed(), err)) {
+            return false;
+          }
+        }
+        break;
+      case ErrorCode::OP_HAS_ALL_BITS:
+      case ErrorCode::OP_HAS_ANY_BITS:
+        // A comprehensive test of bit values is difficult and potentially
+        // rather
+        // time-expensive. We avoid doing so at run-time and instead rely on the
+        // unittest for full testing. The test that we have here covers just the
+        // common cases. We test against the bitmask itself, all zeros and all
+        // ones.
+        {
+          // Testing "any" bits against a zero mask is always false. So, there
+          // are some cases, where we expect tests to take the "failed()" branch
+          // even though this is a test that normally should take "passed()".
+          const ErrorCode& passed =
+              (!code.value() && code.op() == ErrorCode::OP_HAS_ANY_BITS) ||
+
+                      // On a 32bit system, it is impossible to pass a 64bit
+                      // value as a
+                      // system call argument. So, some additional tests always
+                      // evaluate
+                      // as false.
+                      ((code.value() & ~uint64_t(uintptr_t(-1))) &&
+                       code.op() == ErrorCode::OP_HAS_ALL_BITS) ||
+                      (code.value() && !(code.value() & uintptr_t(-1)) &&
+                       code.op() == ErrorCode::OP_HAS_ANY_BITS)
+                  ? *code.failed()
+                  : *code.passed();
+
+          // Similary, testing for "all" bits in a zero mask is always true. So,
+          // some cases pass despite them normally failing.
+          const ErrorCode& failed =
+              !code.value() && code.op() == ErrorCode::OP_HAS_ALL_BITS
+                  ? *code.passed()
+                  : *code.failed();
+
+          data->args[code.argno()] = code.value() & uintptr_t(-1);
+          if (!VerifyErrorCode(
+                   sandbox, program, data, root_code, passed, err)) {
+            return false;
+          }
+          data->args[code.argno()] = uintptr_t(-1);
+          if (!VerifyErrorCode(
+                   sandbox, program, data, root_code, passed, err)) {
+            return false;
+          }
+          data->args[code.argno()] = 0;
+          if (!VerifyErrorCode(
+                   sandbox, program, data, root_code, failed, err)) {
+            return false;
+          }
+        }
+        break;
+      default:  // TODO(markus): Need to add support for OP_GREATER
+        *err = "Unsupported operation in conditional error code";
+        return false;
+    }
+  } else {
+    *err = "Attempting to return invalid error code from BPF program";
+    return false;
+  }
+  return true;
+}
+
+void Ld(State* state, const struct sock_filter& insn, const char** err) {
+  if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS) {
+    *err = "Invalid BPF_LD instruction";
+    return;
+  }
+  if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
+    // We only allow loading of properly aligned 32bit quantities.
+    memcpy(&state->accumulator,
+           reinterpret_cast<const char*>(&state->data) + insn.k,
+           4);
+  } else {
+    *err = "Invalid operand in BPF_LD instruction";
+    return;
+  }
+  state->acc_is_valid = true;
+  return;
+}
+
+void Jmp(State* state, const struct sock_filter& insn, const char** err) {
+  if (BPF_OP(insn.code) == BPF_JA) {
+    if (state->ip + insn.k + 1 >= state->program.size() ||
+        state->ip + insn.k + 1 <= state->ip) {
+    compilation_failure:
+      *err = "Invalid BPF_JMP instruction";
+      return;
+    }
+    state->ip += insn.k;
+  } else {
+    if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
+        state->ip + insn.jt + 1 >= state->program.size() ||
+        state->ip + insn.jf + 1 >= state->program.size()) {
+      goto compilation_failure;
+    }
+    switch (BPF_OP(insn.code)) {
+      case BPF_JEQ:
+        if (state->accumulator == insn.k) {
+          state->ip += insn.jt;
+        } else {
+          state->ip += insn.jf;
+        }
+        break;
+      case BPF_JGT:
+        if (state->accumulator > insn.k) {
+          state->ip += insn.jt;
+        } else {
+          state->ip += insn.jf;
+        }
+        break;
+      case BPF_JGE:
+        if (state->accumulator >= insn.k) {
+          state->ip += insn.jt;
+        } else {
+          state->ip += insn.jf;
+        }
+        break;
+      case BPF_JSET:
+        if (state->accumulator & insn.k) {
+          state->ip += insn.jt;
+        } else {
+          state->ip += insn.jf;
+        }
+        break;
+      default:
+        goto compilation_failure;
+    }
+  }
+}
+
+uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
+  if (BPF_SRC(insn.code) != BPF_K) {
+    *err = "Invalid BPF_RET instruction";
+    return 0;
+  }
+  return insn.k;
+}
+
+void Alu(State* state, const struct sock_filter& insn, const char** err) {
+  if (BPF_OP(insn.code) == BPF_NEG) {
+    state->accumulator = -state->accumulator;
+    return;
+  } else {
+    if (BPF_SRC(insn.code) != BPF_K) {
+      *err = "Unexpected source operand in arithmetic operation";
+      return;
+    }
+    switch (BPF_OP(insn.code)) {
+      case BPF_ADD:
+        state->accumulator += insn.k;
+        break;
+      case BPF_SUB:
+        state->accumulator -= insn.k;
+        break;
+      case BPF_MUL:
+        state->accumulator *= insn.k;
+        break;
+      case BPF_DIV:
+        if (!insn.k) {
+          *err = "Illegal division by zero";
+          break;
+        }
+        state->accumulator /= insn.k;
+        break;
+      case BPF_MOD:
+        if (!insn.k) {
+          *err = "Illegal division by zero";
+          break;
+        }
+        state->accumulator %= insn.k;
+        break;
+      case BPF_OR:
+        state->accumulator |= insn.k;
+        break;
+      case BPF_XOR:
+        state->accumulator ^= insn.k;
+        break;
+      case BPF_AND:
+        state->accumulator &= insn.k;
+        break;
+      case BPF_LSH:
+        if (insn.k > 32) {
+          *err = "Illegal shift operation";
+          break;
+        }
+        state->accumulator <<= insn.k;
+        break;
+      case BPF_RSH:
+        if (insn.k > 32) {
+          *err = "Illegal shift operation";
+          break;
+        }
+        state->accumulator >>= insn.k;
+        break;
+      default:
+        *err = "Invalid operator in arithmetic operation";
+        break;
+    }
+  }
+}
+
+}  // namespace
+
+bool Verifier::VerifyBPF(SandboxBPF* sandbox,
+                         const std::vector<struct sock_filter>& program,
+                         const SandboxBPFPolicy& policy,
+                         const char** err) {
+  *err = NULL;
+  for (SyscallIterator iter(false); !iter.Done();) {
+    uint32_t sysnum = iter.Next();
+    // We ideally want to iterate over the full system call range and values
+    // just above and just below this range. This gives us the full result set
+    // of the "evaluators".
+    // On Intel systems, this can fail in a surprising way, as a cleared bit 30
+    // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
+    // unless we pay attention to setting this bit correctly, an early check in
+    // our BPF program will make us fail with a misleading error code.
+    struct arch_seccomp_data data = {static_cast<int>(sysnum),
+                                     static_cast<uint32_t>(SECCOMP_ARCH)};
+#if defined(__i386__) || defined(__x86_64__)
+#if defined(__x86_64__) && defined(__ILP32__)
+    if (!(sysnum & 0x40000000u)) {
+      continue;
+    }
+#else
+    if (sysnum & 0x40000000u) {
+      continue;
+    }
+#endif
+#endif
+    ErrorCode code = policy.EvaluateSyscall(sandbox, sysnum);
+    if (!VerifyErrorCode(sandbox, program, &data, code, code, err)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
+                               const struct arch_seccomp_data& data,
+                               const char** err) {
+  *err = NULL;
+  if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
+    *err = "Invalid program length";
+    return 0;
+  }
+  for (State state(program, data); !*err; ++state.ip) {
+    if (state.ip >= program.size()) {
+      *err = "Invalid instruction pointer in BPF program";
+      break;
+    }
+    const struct sock_filter& insn = program[state.ip];
+    switch (BPF_CLASS(insn.code)) {
+      case BPF_LD:
+        Ld(&state, insn, err);
+        break;
+      case BPF_JMP:
+        Jmp(&state, insn, err);
+        break;
+      case BPF_RET: {
+        uint32_t r = Ret(&state, insn, err);
+        switch (r & SECCOMP_RET_ACTION) {
+          case SECCOMP_RET_TRAP:
+          case SECCOMP_RET_ERRNO:
+          case SECCOMP_RET_ALLOW:
+            break;
+          case SECCOMP_RET_KILL:     // We don't ever generate this
+          case SECCOMP_RET_TRACE:    // We don't ever generate this
+          case SECCOMP_RET_INVALID:  // Should never show up in BPF program
+          default:
+            *err = "Unexpected return code found in BPF program";
+            return 0;
+        }
+        return r;
+      }
+      case BPF_ALU:
+        Alu(&state, insn, err);
+        break;
+      default:
+        *err = "Unexpected instruction in BPF program";
+        break;
+    }
+  }
+  return 0;
+}
+
+}  // namespace sandbox
new file mode 100644
--- /dev/null
+++ b/security/sandbox/chromium/sandbox/linux/seccomp-bpf/verifier.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SANDBOX_LINUX_SECCOMP_BPF_VERIFIER_H__
+#define SANDBOX_LINUX_SECCOMP_BPF_VERIFIER_H__
+
+#include <linux/filter.h>
+
+#include <utility>
+#include <vector>
+
+namespace sandbox {
+
+class SandboxBPFPolicy;
+
+class Verifier {
+ public:
+  // Evaluate the BPF program for all possible inputs and verify that it
+  // computes the correct result. We use the "evaluators" to determine
+  // the full set of possible inputs that we have to iterate over.
+  // Returns success, if the BPF filter accurately reflects the rules
+  // set by the "evaluators".
+  // Upon success, "err" is set to NULL. Upon failure, it contains a static
+  // error message that does not need to be free()'d.
+  static bool VerifyBPF(SandboxBPF* sandbox,
+                        const std::vector<struct sock_filter>& program,
+                        const SandboxBPFPolicy& policy,
+                        const char** err);
+
+  // Evaluate a given BPF program for a particular set of system call
+  // parameters. If evaluation failed for any reason, "err" will be set to
+  // a non-NULL error string. Otherwise, the BPF program's result will be
+  // returned by the function and "err" is NULL.
+  // We do not actually implement the full BPF state machine, but only the
+  // parts that can actually be generated by our BPF compiler. If this code
+  // is used for purposes other than verifying the output of the sandbox's
+  // BPF compiler, we might have to extend this BPF interpreter.
+  static uint32_t EvaluateBPF(const std::vector<struct sock_filter>& program,
+                              const struct arch_seccomp_data& data,
+                              const char** err);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Verifier);
+};
+
+}  // namespace sandbox
+
+#endif  // SANDBOX_LINUX_SECCOMP_BPF_VERIFIER_H__