summarylogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.SRCINFO49
-rw-r--r--0001_kfreebsd.patch41
-rw-r--r--0002_mips.patch935
-rw-r--r--0002_mips_r15102_backport.patch17
-rw-r--r--0002_mips_r19121_backport.patch71
-rw-r--r--0003_armv4t_disable_vfp.patch15
-rw-r--r--0004_hurd.patch925
-rw-r--r--0008_mksnapshot_stdout.patch16
-rw-r--r--0011_use_system_gyp.patch23
-rw-r--r--0012_loongson_force_cache_flush.patch22
-rw-r--r--0013_gcc_48_compat.patch28
-rw-r--r--0014_cve_2013_6639_6640.patch303
-rw-r--r--0015-Backport-Utils-ApiCheck.patch29
-rw-r--r--0016-remove-this-null.patch202
-rw-r--r--0017_increase_stack_size_for_test.patch18
-rw-r--r--PKGBUILD133
-rw-r--r--dont-assume-hardfloat-means-vfpv3.diff13
-rw-r--r--fix_CVE-2014-5256.patch25
-rw-r--r--gcc7-fix.patch223
-rw-r--r--nodejsREPLACE_INVALID_UTF8.patch18
-rw-r--r--series19
-rw-r--r--strict_overflow.patch15
22 files changed, 3115 insertions, 25 deletions
diff --git a/.SRCINFO b/.SRCINFO
index ac316edd9cb2..a65820675812 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,21 +1,58 @@
-# Generated by mksrcinfo v8
-# Tue Jun 21 19:10:55 UTC 2016
pkgbase = v8-3.14
- pkgdesc = A fast and modern javascript engine (old 3.14 version required for plv8)
+ pkgdesc = A fast and modern javascript engine (old 3.14 version required R package V8)
pkgver = 3.14.5
- pkgrel = 2
+ pkgrel = 3
url = http://code.google.com/p/v8
arch = i686
arch = x86_64
license = BSD
- makedepends = subversion
makedepends = python2
+ makedepends = gyp-git
depends = gcc-libs
- depends = readline
provides = v8
conflicts = v8
source = http://commondatastorage.googleapis.com/chromium-browser-official/v8-3.14.5.tar.bz2
+ source = 0001_kfreebsd.patch
+ source = 0002_mips.patch
+ source = 0002_mips_r15102_backport.patch
+ source = 0002_mips_r19121_backport.patch
+ source = 0003_armv4t_disable_vfp.patch
+ source = 0004_hurd.patch
+ source = 0008_mksnapshot_stdout.patch
+ source = 0011_use_system_gyp.patch
+ source = 0012_loongson_force_cache_flush.patch
+ source = 0013_gcc_48_compat.patch
+ source = 0014_cve_2013_6639_6640.patch
+ source = 0015-Backport-Utils-ApiCheck.patch
+ source = 0016-remove-this-null.patch
+ source = 0017_increase_stack_size_for_test.patch
+ source = https://gist.github.com/pat-s/942e255ea38821e6ac3e82a36cb3c4bd
+ source = fix_CVE-2014-5256.patch
+ source = nodejsREPLACE_INVALID_UTF8.patch
+ source = strict_overflow.patch
+ source = dont-assume-hardfloat-means-vfpv3.diff
+ source = gcc7-fix.patch
sha256sums = 361ad3b63dc7c9d0943b72b1be592a8135e4ddb0e416b9bcf02b4d2df514fca7
+ sha256sums = 15af4bbb02ad510ed57f7c635f00f7163c45884e55acadb1d05510d2f3aaa494
+ sha256sums = 239170677f6dfcae285dfb719ae3ae8d698a9652dab69f54506fbdd1b2eac9e4
+ sha256sums = a1bd65547bad7113619f77ad442422944b7fa5afac7795868e653a2d0c38877f
+ sha256sums = 1d4e0f503100515dea4be5558f6080321f3117108745cd3a481c44d80ebe8fc9
+ sha256sums = 16fdb157a24a336bf2979b73cfba484314f2cfca2cdcfa9fe51fe2ac9970f202
+ sha256sums = 8b43ef8dfc001d138d25348cd3594d139bc88bb1d333d3908800adbc8c6e55ab
+ sha256sums = 73f75ce1fe02cfa51d8ee6410e000e96f07c21f1e42dd48ffc7d7970434e1677
+ sha256sums = 4dba0e7e1d5f7cad6907c76551e36ef616765de003f83f8989d46008cf53911a
+ sha256sums = 7d4dc3f2325f2b95c612e89904a07d9f3e8b050552be856910cb3ae0b41e04f8
+ sha256sums = 8c1aa4a99748f7a567a3961022c39b1f3666cc57bf63b3ebabe0c51068a65b9b
+ sha256sums = 76b7be145758e80af56429d46c23ce0942be6d13047b31b40855363ce9f88ce4
+ sha256sums = 69906640439c263fdeacaf14605e785294f1f3daf28f7633b40a5ac8d6977797
+ sha256sums = e90b54cf2e296c6d5c4bc41b7159015a6584191b5c2ab95a2f28861fb1c3bcb3
+ sha256sums = 71a600e3e502896d45076103201d35c30f778fa57a750bb3f2dfdbdcb3a708b8
+ sha256sums = 19f4708484c837d01b82ebd4667bbbcb73f65da3ee3f7a12fa38038fe730d733
+ sha256sums = d6d3eb0ef53ce501c6da5d756f7dc1adcf85361ad75b17253051bb3869b0b3dc
+ sha256sums = b76c02ca0d88e9818e58ef70592a216c6d969bde3b563c74244ee3687a39f672
+ sha256sums = 1b48a5714e9d89d419dac8969c005c56a0adc2599b558375ac9254a3168f55ae
+ sha256sums = 2e6a8f36c33e5e37956429eae2753944519f60a57fde81e0d72de1afa60a4103
+ sha256sums = c67da79111fa171a0900af0da9b151a1568b233f4929922e72d049d7490f98df
pkgname = v8-3.14
diff --git a/0001_kfreebsd.patch b/0001_kfreebsd.patch
new file mode 100644
index 000000000000..6e14a7ac11e0
--- /dev/null
+++ b/0001_kfreebsd.patch
@@ -0,0 +1,41 @@
+Description: Needed for kfreebsd-* arch
+ Fix link_settings.
+ Use python multiprocessing.dummy module to run tests.
+Forwarded: not-needed
+Author: Steven Chamberlain <steven@pyro.eu.org>
+Author: Jérémy Lal <kapouer@melix.org>
+Bug-Debian: http://bugs.debian.org/670836
+Last-Update: 2013-05-02
+--- a/tools/gyp/v8.gyp
++++ b/tools/gyp/v8.gyp
+@@ -676,7 +676,7 @@
+ ['OS=="freebsd"', {
+ 'link_settings': {
+ 'libraries': [
+- '-L/usr/local/lib -lexecinfo',
++ '-L/usr/local/lib -lpthread',
+ ]},
+ 'sources': [
+ '../../src/platform-freebsd.cc',
+--- a/tools/run-tests.py
++++ b/tools/run-tests.py
+@@ -28,7 +28,7 @@
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+-import multiprocessing
++import multiprocessing.dummy as multiprocessing
+ import optparse
+ import os
+ from os.path import join
+--- a/tools/testrunner/local/execution.py
++++ b/tools/testrunner/local/execution.py
+@@ -26,7 +26,7 @@
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+-import multiprocessing
++import multiprocessing.dummy as multiprocessing
+ import os
+ import threading
+ import time
diff --git a/0002_mips.patch b/0002_mips.patch
new file mode 100644
index 000000000000..c4777179922c
--- /dev/null
+++ b/0002_mips.patch
@@ -0,0 +1,935 @@
+Description: mips arch support backported to v8 3.14 branch
+Origin: https://github.com/paul99/v8m-rb/tree/dm-mipsbe-3.14
+Last-Update: 2014-04-09
+Acked-by: Jérémy Lal <kapouer@melix.org>
+
+--- a/Makefile
++++ b/Makefile
+@@ -133,7 +133,7 @@
+
+ # Architectures and modes to be compiled. Consider these to be internal
+ # variables, don't override them (use the targets instead).
+-ARCHES = ia32 x64 arm mipsel
++ARCHES = ia32 x64 arm mipsel mips
+ DEFAULT_ARCHES = ia32 x64 arm
+ MODES = release debug
+ ANDROID_ARCHES = android_ia32 android_arm
+@@ -168,10 +168,6 @@
+ $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
+ builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
+
+-mips mips.release mips.debug:
+- @echo "V8 does not support big-endian MIPS builds at the moment," \
+- "please use little-endian builds (mipsel)."
+-
+ # Compile targets. MODES and ARCHES are convenience targets.
+ .SECONDEXPANSION:
+ $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
+--- a/build/common.gypi
++++ b/build/common.gypi
+@@ -176,7 +176,7 @@
+ 'V8_TARGET_ARCH_IA32',
+ ],
+ }], # v8_target_arch=="ia32"
+- ['v8_target_arch=="mipsel"', {
++ ['v8_target_arch=="mipsel" or v8_target_arch=="mips"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_MIPS',
+ ],
+@@ -187,12 +187,17 @@
+ ['mipscompiler=="yes"', {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+- 'cflags': ['-EL'],
+- 'ldflags': ['-EL'],
+ 'conditions': [
++ ['v8_target_arch=="mipsel"', {
++ 'cflags': ['-EL'],
++ 'ldflags': ['-EL'],
++ }],
++ ['v8_target_arch=="mips"', {
++ 'cflags': ['-EB'],
++ 'ldflags': ['-EB'],
++ }],
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'cflags': ['-mhard-float'],
+- 'ldflags': ['-mhard-float'],
+ }, {
+ 'cflags': ['-msoft-float'],
+ 'ldflags': ['-msoft-float'],
+@@ -202,7 +207,8 @@
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'cflags': ['-mips3', '-Wa,-mips3'],
+- }, {
++ }],
++ ['mips_arch_variant=="mips32r1"', {
+ 'cflags': ['-mips32', '-Wa,-mips32'],
+ }],
+ ],
+@@ -290,7 +296,7 @@
+ ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
+ or OS=="netbsd" or OS=="mac" or OS=="android") and \
+ (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
+- v8_target_arch=="mipsel")', {
++ v8_target_arch=="mipsel" or v8_target_arch=="mips")', {
+ # Check whether the host compiler and target compiler support the
+ # '-m32' option and set it if so.
+ 'target_conditions': [
+--- a/build/standalone.gypi
++++ b/build/standalone.gypi
+@@ -68,6 +68,7 @@
+ 'conditions': [
+ ['(v8_target_arch=="arm" and host_arch!="arm") or \
+ (v8_target_arch=="mipsel" and host_arch!="mipsel") or \
++ (v8_target_arch=="mips" and host_arch!="mips") or \
+ (v8_target_arch=="x64" and host_arch!="x64") or \
+ (OS=="android")', {
+ 'want_separate_host_toolset': 1,
+--- a/src/conversions-inl.h
++++ b/src/conversions-inl.h
+@@ -75,7 +75,11 @@
+ if (x < k2Pow52) {
+ x += k2Pow52;
+ uint32_t result;
++#ifndef BIG_ENDIAN_FLOATING_POINT
+ Address mantissa_ptr = reinterpret_cast<Address>(&x);
++#else
++ Address mantissa_ptr = reinterpret_cast<Address>(&x) + 4;
++#endif
+ // Copy least significant 32 bits of mantissa.
+ memcpy(&result, mantissa_ptr, sizeof(result));
+ return negative ? ~result + 1 : result;
+--- a/src/globals.h
++++ b/src/globals.h
+@@ -83,7 +83,7 @@
+ #if CAN_USE_UNALIGNED_ACCESSES
+ #define V8_HOST_CAN_READ_UNALIGNED 1
+ #endif
+-#elif defined(__MIPSEL__)
++#elif defined(__MIPSEL__) || defined(__MIPSEB__)
+ #define V8_HOST_ARCH_MIPS 1
+ #define V8_HOST_ARCH_32_BIT 1
+ #else
+@@ -101,13 +101,17 @@
+ #define V8_TARGET_ARCH_IA32 1
+ #elif defined(__ARMEL__)
+ #define V8_TARGET_ARCH_ARM 1
+-#elif defined(__MIPSEL__)
++#elif defined(__MIPSEL__) || defined(__MIPSEB__)
+ #define V8_TARGET_ARCH_MIPS 1
+ #else
+ #error Target architecture was not detected as supported by v8
+ #endif
+ #endif
+
++#if defined(__MIPSEB__)
++#define BIG_ENDIAN_FLOATING_POINT 1
++#endif
++
+ // Check for supported combinations of host and target architectures.
+ #if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
+ #error Target architecture ia32 is only supported on ia32 host
+--- a/src/mips/assembler-mips.cc
++++ b/src/mips/assembler-mips.cc
+@@ -1631,10 +1631,17 @@
+ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+ // load to two 32-bit loads.
++#ifndef BIG_ENDIAN_FLOATING_POINT
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
++#else
++ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ + 4);
++ FPURegister nextfpreg;
++ nextfpreg.setcode(fd.code() + 1);
++ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_);
++#endif
+ }
+
+
+@@ -1646,10 +1653,17 @@
+ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+ // store to two 32-bit stores.
++#ifndef BIG_ENDIAN_FLOATING_POINT
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
++#else
++ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ + 4);
++ FPURegister nextfpreg;
++ nextfpreg.setcode(fd.code() + 1);
++ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ );
++#endif
+ }
+
+
+--- a/src/mips/assembler-mips.h
++++ b/src/mips/assembler-mips.h
+@@ -74,6 +74,13 @@
+ static const int kNumRegisters = v8::internal::kNumRegisters;
+ static const int kNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kSizeInBytes = 4;
++#if __BYTE_ORDER == __LITTLE_ENDIAN
++ static const int kMantissaOffset = 0;
++ static const int kExponentOffset = 4;
++#else
++ static const int kMantissaOffset = 4;
++ static const int kExponentOffset = 0;
++#endif
+
+ static int ToAllocationIndex(Register reg) {
+ return reg.code() - 2; // zero_reg and 'at' are skipped.
+--- a/src/mips/builtins-mips.cc
++++ b/src/mips/builtins-mips.cc
+@@ -869,9 +869,7 @@
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ if (count_constructions) {
+- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+- __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+- kBitsPerByte);
++ __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
+ __ sll(t0, a0, kPointerSizeLog2);
+ __ addu(a0, t5, t0);
+ // a0: offset of first field after pre-allocated fields
+@@ -899,14 +897,12 @@
+ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+- __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+- kBitsPerByte);
++ __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
+ __ Addu(a3, a3, Operand(t6));
+- __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
+- kBitsPerByte);
++ __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+ __ subu(a3, a3, t6);
+
++
+ // Done if no extra properties are to be allocated.
+ __ Branch(&allocated, eq, a3, Operand(zero_reg));
+ __ Assert(greater_equal, "Property allocation count failed.",
+--- a/src/mips/code-stubs-mips.cc
++++ b/src/mips/code-stubs-mips.cc
+@@ -536,13 +536,8 @@
+
+
+ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+-#ifndef BIG_ENDIAN_FLOATING_POINT
+ Register exponent = result1_;
+ Register mantissa = result2_;
+-#else
+- Register exponent = result2_;
+- Register mantissa = result1_;
+-#endif
+ Label not_special;
+ // Convert from Smi to integer.
+ __ sra(source_, source_, kSmiTagSize);
+@@ -679,9 +674,8 @@
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Load the double from heap number to dst1 and dst2 in double format.
+- __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
+- __ lw(dst2, FieldMemOperand(object,
+- HeapNumber::kValueOffset + kPointerSize));
++ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
++ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ }
+ __ Branch(&done);
+
+@@ -1075,6 +1069,11 @@
+ // a0-a3 registers to f12/f14 register pairs.
+ __ Move(f12, a0, a1);
+ __ Move(f14, a2, a3);
++ } else {
++#ifdef BIG_ENDIAN_FLOATING_POINT
++ __ Swap(a0, a1);
++ __ Swap(a2, a3);
++#endif
+ }
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+@@ -1088,8 +1087,13 @@
+ __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ } else {
+ // Double returned in registers v0 and v1.
++#ifndef BIG_ENDIAN_FLOATING_POINT
+ __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
+ __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
++#else
++ __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
++ __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
++#endif
+ }
+ // Place heap_number_result in v0 and return to the pushed return address.
+ __ pop(ra);
+@@ -1320,8 +1324,8 @@
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ } else {
+ // Load lhs to a double in a2, a3.
+- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
++ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kExponentOffset));
++ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kMantissaOffset));
+
+ // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
+ __ mov(t6, rhs);
+@@ -1366,11 +1370,11 @@
+ __ pop(ra);
+ // Load rhs to a double in a1, a0.
+ if (rhs.is(a0)) {
+- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
++ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kExponentOffset));
++ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kMantissaOffset));
+ } else {
+- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
++ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kMantissaOffset));
++ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kExponentOffset));
+ }
+ }
+ // Fall through to both_loaded_as_doubles.
+@@ -1378,7 +1382,6 @@
+
+
+ void EmitNanCheck(MacroAssembler* masm, Condition cc) {
+- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Lhs and rhs are already loaded to f12 and f14 register pairs.
+@@ -1391,10 +1394,10 @@
+ __ mov(t2, a2); // a2 has LS 32 bits of lhs.
+ __ mov(t3, a3); // a3 has MS 32 bits of lhs.
+ }
+- Register rhs_exponent = exp_first ? t0 : t1;
+- Register lhs_exponent = exp_first ? t2 : t3;
+- Register rhs_mantissa = exp_first ? t1 : t0;
+- Register lhs_mantissa = exp_first ? t3 : t2;
++ Register rhs_exponent = t1;
++ Register lhs_exponent = t3;
++ Register rhs_mantissa = t0;
++ Register lhs_mantissa = t2;
+ Label one_is_nan, neither_is_nan;
+ Label lhs_not_nan_exp_mask_is_loaded;
+
+@@ -1445,7 +1448,6 @@
+ if (cc == eq) {
+ // Doubles are not equal unless they have the same bit pattern.
+ // Exception: 0 and -0.
+- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Lhs and rhs are already loaded to f12 and f14 register pairs.
+@@ -1458,10 +1460,10 @@
+ __ mov(t2, a2); // a2 has LS 32 bits of lhs.
+ __ mov(t3, a3); // a3 has MS 32 bits of lhs.
+ }
+- Register rhs_exponent = exp_first ? t0 : t1;
+- Register lhs_exponent = exp_first ? t2 : t3;
+- Register rhs_mantissa = exp_first ? t1 : t0;
+- Register lhs_mantissa = exp_first ? t3 : t2;
++ Register rhs_exponent = t1;
++ Register lhs_exponent = t3;
++ Register rhs_mantissa = t0;
++ Register lhs_mantissa = t2;
+
+ __ xor_(v0, rhs_mantissa, lhs_mantissa);
+ __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
+@@ -1495,6 +1497,11 @@
+ // a0-a3 registers to f12/f14 register pairs.
+ __ Move(f12, a0, a1);
+ __ Move(f14, a2, a3);
++ } else {
++#ifdef BIG_ENDIAN_FLOATING_POINT
++ __ Swap(a0, a1);
++ __ Swap(a2, a3);
++#endif
+ }
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+@@ -1582,14 +1589,14 @@
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
++ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kMantissaOffset));
++ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kExponentOffset));
+ if (rhs.is(a0)) {
+- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
++ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kExponentOffset));
++ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kMantissaOffset));
+ } else {
+- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
++ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kMantissaOffset));
++ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kExponentOffset));
+ }
+ }
+ __ jmp(both_loaded_as_doubles);
+@@ -5902,14 +5909,18 @@
+ __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
+
+ // Loop for src/dst that are not aligned the same way.
+- // This loop uses lwl and lwr instructions. These instructions
+- // depend on the endianness, and the implementation assumes little-endian.
+ {
+ Label loop;
+ __ bind(&loop);
++#if __BYTE_ORDER == __BIG_ENDIAN
++ __ lwl(scratch1, MemOperand(src));
++ __ Addu(src, src, Operand(kReadAlignment));
++ __ lwr(scratch1, MemOperand(src, -1));
++#else
+ __ lwr(scratch1, MemOperand(src));
+ __ Addu(src, src, Operand(kReadAlignment));
+ __ lwl(scratch1, MemOperand(src, -1));
++#endif
+ __ sw(scratch1, MemOperand(dest));
+ __ Addu(dest, dest, Operand(kReadAlignment));
+ __ Subu(scratch2, limit, dest);
+@@ -6616,6 +6627,11 @@
+ // in a little endian mode).
+ __ li(t2, Operand(2));
+ __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
++#if __BYTE_ORDER == __BIG_ENDIAN
++ __ sll(t0, a2, 8);
++ __ srl(t1, a2, 8);
++ __ or_(a2, t0, t1);
++#endif
+ __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ DropAndRet(2);
+--- a/src/mips/codegen-mips.cc
++++ b/src/mips/codegen-mips.cc
+@@ -210,8 +210,8 @@
+ a1,
+ t7,
+ f0);
+- __ sw(a0, MemOperand(t3)); // mantissa
+- __ sw(a1, MemOperand(t3, kIntSize)); // exponent
++ __ sw(a0, MemOperand(t3, Register::kMantissaOffset)); // mantissa
++ __ sw(a1, MemOperand(t3, Register::kExponentOffset)); // exponent
+ __ Addu(t3, t3, kDoubleSize);
+ }
+ __ Branch(&entry);
+@@ -225,8 +225,8 @@
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, "object found in smi-only array", at, Operand(t5));
+ }
+- __ sw(t0, MemOperand(t3)); // mantissa
+- __ sw(t1, MemOperand(t3, kIntSize)); // exponent
++ __ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa
++ __ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent
+ __ Addu(t3, t3, kDoubleSize);
+
+ __ bind(&entry);
+@@ -273,7 +273,7 @@
+ __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+
+ // Prepare for conversion loop.
+- __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
++ __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + Register::kExponentOffset));
+ __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
+ __ Addu(t2, t2, Operand(kHeapObjectTag));
+ __ sll(t1, t1, 1);
+@@ -282,7 +282,7 @@
+ __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
+ // Using offsetted addresses.
+ // a3: begin of destination FixedArray element fields, not tagged
+- // t0: begin of source FixedDoubleArray element fields, not tagged, +4
++ // t0: begin of source FixedDoubleArray element fields, not tagged, points to the exponent
+ // t1: end of destination FixedArray, not tagged
+ // t2: destination FixedArray
+ // t3: the-hole pointer
+@@ -296,7 +296,7 @@
+ __ Branch(fail);
+
+ __ bind(&loop);
+- __ lw(a1, MemOperand(t0));
++ __ lw(a1, MemOperand(t0, 0)); // exponent
+ __ Addu(t0, t0, kDoubleSize);
+ // a1: current element's upper 32 bit
+ // t0: address of next element's upper 32 bit
+@@ -305,7 +305,8 @@
+ // Non-hole double, copy value into a heap number.
+ __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
+ // a2: new heap number
+- __ lw(a0, MemOperand(t0, -12));
++ // Load mantissa of current element, t0 point to exponent of next element.
++ __ lw(a0, MemOperand(t0, (Register::kMantissaOffset - Register::kExponentOffset - kDoubleSize)));
+ __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
+ __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
+ __ mov(a0, a3);
+--- a/src/mips/constants-mips.h
++++ b/src/mips/constants-mips.h
+@@ -69,6 +69,15 @@
+ #endif
+
+
++#if __BYTE_ORDER == __LITTLE_ENDIAN
++const uint32_t kHoleNanUpper32Offset = 4;
++const uint32_t kHoleNanLower32Offset = 0;
++#else
++const uint32_t kHoleNanUpper32Offset = 0;
++const uint32_t kHoleNanLower32Offset = 4;
++#endif
++
++
+ // Defines constants and accessor classes to assemble, disassemble and
+ // simulate MIPS32 instructions.
+ //
+--- a/src/mips/lithium-codegen-mips.cc
++++ b/src/mips/lithium-codegen-mips.cc
+@@ -2699,7 +2699,7 @@
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
++ __ lw(scratch, MemOperand(elements, kHoleNanUpper32Offset));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ }
+
+@@ -4869,15 +4869,14 @@
+ Handle<FixedDoubleArray>::cast(elements);
+ for (int i = 0; i < elements_length; i++) {
+ int64_t value = double_array->get_representation(i);
+- // We only support little endian mode...
+ int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
+ int32_t value_high = static_cast<int32_t>(value >> 32);
+ int total_offset =
+ elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
+ __ li(a2, Operand(value_low));
+- __ sw(a2, FieldMemOperand(result, total_offset));
++ __ sw(a2, FieldMemOperand(result, total_offset + Register::kMantissaOffset));
+ __ li(a2, Operand(value_high));
+- __ sw(a2, FieldMemOperand(result, total_offset + 4));
++ __ sw(a2, FieldMemOperand(result, total_offset + Register::kExponentOffset));
+ }
+ } else if (elements->IsFixedArray()) {
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+--- a/src/mips/macro-assembler-mips.cc
++++ b/src/mips/macro-assembler-mips.cc
+@@ -3300,6 +3300,7 @@
+
+ // TODO(kalmard) check if this can be optimized to use sw in most cases.
+ // Can't use unaligned access - copy byte by byte.
++#if __BYTE_ORDER == __LITTLE_ENDIAN
+ sb(scratch, MemOperand(dst, 0));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+@@ -3307,6 +3308,16 @@
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
++#else
++ sb(scratch, MemOperand(dst, 3));
++ srl(scratch, scratch, 8);
++ sb(scratch, MemOperand(dst, 2));
++ srl(scratch, scratch, 8);
++ sb(scratch, MemOperand(dst, 1));
++ srl(scratch, scratch, 8);
++ sb(scratch, MemOperand(dst, 0));
++#endif
++
+ Addu(dst, dst, 4);
+
+ Subu(length, length, Operand(kPointerSize));
+@@ -3412,9 +3423,8 @@
+ bind(&have_double_value);
+ sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ Addu(scratch1, scratch1, elements_reg);
+- sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+- sw(exponent_reg, FieldMemOperand(scratch1, offset));
++ sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize + kHoleNanLower32Offset));
++ sw(exponent_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+@@ -3459,8 +3469,8 @@
+ CpuFeatures::Scope scope(FPU);
+ sdc1(f0, MemOperand(scratch1, 0));
+ } else {
+- sw(mantissa_reg, MemOperand(scratch1, 0));
+- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
++ sw(mantissa_reg, MemOperand(scratch1, Register::kMantissaOffset));
++ sw(exponent_reg, MemOperand(scratch1, Register::kExponentOffset));
+ }
+ bind(&done);
+ }
+--- a/src/mips/stub-cache-mips.cc
++++ b/src/mips/stub-cache-mips.cc
+@@ -2195,7 +2195,7 @@
+
+ // Start checking for special cases.
+ // Get the argument exponent and clear the sign bit.
+- __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
++ __ lw(t1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ And(t2, t1, Operand(~HeapNumber::kSignMask));
+ __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
+
+@@ -3768,8 +3768,8 @@
+ __ ldc1(f0, MemOperand(t3, 0));
+ } else {
+ // t3: pointer to the beginning of the double we want to load.
+- __ lw(a2, MemOperand(t3, 0));
+- __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
++ __ lw(a2, MemOperand(t3, Register::kMantissaOffset));
++ __ lw(a3, MemOperand(t3, Register::kExponentOffset));
+ }
+ break;
+ case FAST_ELEMENTS:
+@@ -4132,8 +4132,8 @@
+ CpuFeatures::Scope scope(FPU);
+ __ sdc1(f0, MemOperand(a3, 0));
+ } else {
+- __ sw(t2, MemOperand(a3, 0));
+- __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
++ __ sw(t2, MemOperand(a3, Register::kMantissaOffset));
++ __ sw(t3, MemOperand(a3, Register::kExponentOffset));
+ }
+ break;
+ case FAST_ELEMENTS:
+@@ -4296,8 +4296,8 @@
+ __ sll(t8, key, 2);
+ __ addu(t8, a3, t8);
+ // t8: effective address of destination element.
+- __ sw(t4, MemOperand(t8, 0));
+- __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
++ __ sw(t4, MemOperand(t8, Register::kMantissaOffset));
++ __ sw(t3, MemOperand(t8, Register::kExponentOffset));
+ __ mov(v0, a0);
+ __ Ret();
+ } else {
+@@ -4497,11 +4497,11 @@
+ __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+- // Load the upper word of the double in the fixed array and test for NaN.
++ // Load the exponent in the fixed array and test for NaN.
+ __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
+- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
+- __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
++ __ lw(scratch, FieldMemOperand(indexed_double_offset,
++ FixedArray::kHeaderSize + kHoleNanUpper32Offset));
+ __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
+
+ // Non-NaN. Allocate a new heap number and copy the double value into it.
+@@ -4509,12 +4509,12 @@
+ __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
+ heap_number_map, &slow_allocate_heapnumber);
+
+- // Don't need to reload the upper 32 bits of the double, it's already in
++ // Don't need to reload the exponent (the upper 32 bits of the double), it's already in
+ // scratch.
+ __ sw(scratch, FieldMemOperand(heap_number_reg,
+ HeapNumber::kExponentOffset));
+ __ lw(scratch, FieldMemOperand(indexed_double_offset,
+- FixedArray::kHeaderSize));
++ FixedArray::kHeaderSize + kHoleNanLower32Offset));
+ __ sw(scratch, FieldMemOperand(heap_number_reg,
+ HeapNumber::kMantissaOffset));
+
+--- a/src/objects.h
++++ b/src/objects.h
+@@ -1344,8 +1344,13 @@
+ // is a mixture of sign, exponent and mantissa. Our current platforms are all
+ // little endian apart from non-EABI arm which is little endian with big
+ // endian floating point word ordering!
++#ifndef BIG_ENDIAN_FLOATING_POINT
+ static const int kMantissaOffset = kValueOffset;
+ static const int kExponentOffset = kValueOffset + 4;
++#else
++ static const int kMantissaOffset = kValueOffset + 4;
++ static const int kExponentOffset = kValueOffset;
++#endif
+
+ static const int kSize = kValueOffset + kDoubleSize;
+ static const uint32_t kSignMask = 0x80000000u;
+--- a/src/profile-generator.cc
++++ b/src/profile-generator.cc
+@@ -1819,7 +1819,9 @@
+ Address field = obj->address() + offset;
+ ASSERT(!Memory::Object_at(field)->IsFailure());
+ ASSERT(Memory::Object_at(field)->IsHeapObject());
+- *field |= kFailureTag;
++ Object* untagged = *reinterpret_cast<Object**>(field);
++ intptr_t tagged = reinterpret_cast<intptr_t>(untagged) | kFailureTag;
++ *reinterpret_cast<Object**>(field) = reinterpret_cast<Object*>(tagged);
+ }
+
+ private:
+--- a/src/runtime.cc
++++ b/src/runtime.cc
+@@ -8553,8 +8553,15 @@
+ #else
+ typedef uint64_t ObjectPair;
+ static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
++#if __BYTE_ORDER == __LITTLE_ENDIAN
+ return reinterpret_cast<uint32_t>(x) |
+ (reinterpret_cast<ObjectPair>(y) << 32);
++#elif __BYTE_ORDER == __BIG_ENDIAN
++ return reinterpret_cast<uint32_t>(y) |
++ (reinterpret_cast<ObjectPair>(x) << 32);
++#else
++#error Unknown endianess
++#endif
+ }
+ #endif
+
+--- a/test/cctest/cctest.gyp
++++ b/test/cctest/cctest.gyp
+@@ -118,7 +118,7 @@
+ 'test-disasm-arm.cc'
+ ],
+ }],
+- ['v8_target_arch=="mipsel"', {
++ ['v8_target_arch=="mipsel" or v8_target_arch=="mips"', {
+ 'sources': [
+ 'test-assembler-mips.cc',
+ 'test-disasm-mips.cc',
+--- a/test/cctest/test-assembler-mips.cc
++++ b/test/cctest/test-assembler-mips.cc
+@@ -537,11 +537,21 @@
+ USE(dummy);
+
+ CHECK_EQ(0x11223344, t.r1);
++#if __BYTE_ORDER == __LITTLE_ENDIAN
+ CHECK_EQ(0x3344, t.r2);
+ CHECK_EQ(0xffffbbcc, t.r3);
+ CHECK_EQ(0x0000bbcc, t.r4);
+ CHECK_EQ(0xffffffcc, t.r5);
+ CHECK_EQ(0x3333bbcc, t.r6);
++#elif __BYTE_ORDER == __BIG_ENDIAN
++ CHECK_EQ(0x1122, t.r2);
++ CHECK_EQ(0xffff99aa, t.r3);
++ CHECK_EQ(0x000099aa, t.r4);
++ CHECK_EQ(0xffffff99, t.r5);
++ CHECK_EQ(0x99aa3333, t.r6);
++#else
++#error Unknown endianess
++#endif
+ }
+
+
+@@ -955,6 +965,7 @@
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
++#if __BYTE_ORDER == __LITTLE_ENDIAN
+ CHECK_EQ(0x44bbccdd, t.lwl_0);
+ CHECK_EQ(0x3344ccdd, t.lwl_1);
+ CHECK_EQ(0x223344dd, t.lwl_2);
+@@ -974,6 +985,29 @@
+ CHECK_EQ(0xbbccdd44, t.swr_1);
+ CHECK_EQ(0xccdd3344, t.swr_2);
+ CHECK_EQ(0xdd223344, t.swr_3);
++#elif __BYTE_ORDER == __BIG_ENDIAN
++ CHECK_EQ(0x11223344, t.lwl_0);
++ CHECK_EQ(0x223344dd, t.lwl_1);
++ CHECK_EQ(0x3344ccdd, t.lwl_2);
++ CHECK_EQ(0x44bbccdd, t.lwl_3);
++
++ CHECK_EQ(0xaabbcc11, t.lwr_0);
++ CHECK_EQ(0xaabb1122, t.lwr_1);
++ CHECK_EQ(0xaa112233, t.lwr_2);
++ CHECK_EQ(0x11223344, t.lwr_3);
++
++ CHECK_EQ(0xaabbccdd, t.swl_0);
++ CHECK_EQ(0x11aabbcc, t.swl_1);
++ CHECK_EQ(0x1122aabb, t.swl_2);
++ CHECK_EQ(0x112233aa, t.swl_3);
++
++ CHECK_EQ(0xdd223344, t.swr_0);
++ CHECK_EQ(0xccdd3344, t.swr_1);
++ CHECK_EQ(0xbbccdd44, t.swr_2);
++ CHECK_EQ(0xaabbccdd, t.swr_3);
++#else
++#error Unknown endianess
++#endif
+ }
+
+
+--- a/test/mjsunit/mjsunit.status
++++ b/test/mjsunit/mjsunit.status
+@@ -49,7 +49,7 @@
+ ##############################################################################
+ # These use a built-in that's only present in debug mode. They take
+ # too long to run in debug mode on ARM and MIPS.
+-fuzz-natives-part*: PASS, SKIP if ($mode == release || $arch == arm || $arch == android_arm || $arch == mipsel)
++fuzz-natives-part*: PASS, SKIP if ($mode == release || $arch == arm || $arch == android_arm || $arch == mipsel || $arch == mips)
+
+ big-object-literal: PASS, SKIP if ($arch == arm || $arch == android_arm)
+
+@@ -57,7 +57,7 @@
+ array-constructor: PASS || TIMEOUT
+
+ # Very slow on ARM and MIPS, contains no architecture dependent code.
+-unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == android_arm || $arch == mipsel)
++unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == android_arm || $arch == mipsel || $arch == mips)
+
+ ##############################################################################
+ # This test sets the umask on a per-process basis and hence cannot be
+@@ -127,7 +127,7 @@
+ math-floor-of-div-minus-zero: SKIP
+
+ ##############################################################################
+-[ $arch == mipsel ]
++[ $arch == mipsel || $arch == mips ]
+
+ # Slow tests which times out in debug mode.
+ try: PASS, SKIP if $mode == debug
+--- a/test/mozilla/mozilla.status
++++ b/test/mozilla/mozilla.status
+@@ -126,13 +126,13 @@
+ ecma/Date/15.9.2.2-6: PASS || FAIL
+
+ # 1026139: These date tests fail on arm and mips
+-ecma/Date/15.9.5.29-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
+-ecma/Date/15.9.5.34-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
+-ecma/Date/15.9.5.28-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
++ecma/Date/15.9.5.29-1: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips)
++ecma/Date/15.9.5.34-1: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips)
++ecma/Date/15.9.5.28-1: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips)
+
+ # 1050186: Arm/MIPS vm is broken; probably unrelated to dates
+-ecma/Array/15.4.4.5-3: PASS || FAIL if ($arch == arm || $arch == mipsel)
+-ecma/Date/15.9.5.22-2: PASS || FAIL if ($arch == arm || $arch == mipsel)
++ecma/Array/15.4.4.5-3: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips)
++ecma/Date/15.9.5.22-2: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips)
+
+ # Flaky test that fails due to what appears to be a bug in the test.
+ # Occurs depending on current time
+@@ -854,6 +854,28 @@
+
+ # Times out and print so much output that we need to skip it to not
+ # hang the builder.
++js1_5/extensions/regress-342960: SKIP
++
++# BUG(3251229): Times out when running new crankshaft test script.
++ecma_3/RegExp/regress-311414: SKIP
++ecma/Date/15.9.5.8: SKIP
++ecma/Date/15.9.5.10-2: SKIP
++ecma/Date/15.9.5.11-2: SKIP
++ecma/Date/15.9.5.12-2: SKIP
++js1_5/Array/regress-99120-02: SKIP
++js1_5/extensions/regress-371636: SKIP
++js1_5/Regress/regress-203278-1: SKIP
++js1_5/Regress/regress-404755: SKIP
++js1_5/Regress/regress-451322: SKIP
++
++
++# BUG(1040): Allow this test to timeout.
++js1_5/GC/regress-203278-2: PASS || TIMEOUT
++
++[ $arch == mips ]
++
++# Times out and print so much output that we need to skip it to not
++# hang the builder.
+ js1_5/extensions/regress-342960: SKIP
+
+ # BUG(3251229): Times out when running new crankshaft test script.
+--- a/test/sputnik/sputnik.status
++++ b/test/sputnik/sputnik.status
+@@ -229,3 +229,17 @@
+ S15.1.3.4_A2.3_T1: SKIP
+ S15.1.3.1_A2.5_T1: SKIP
+ S15.1.3.2_A2.5_T1: SKIP
++
++[ $arch == mips ]
++
++# BUG(3251225): Tests that timeout with --nocrankshaft.
++S15.1.3.1_A2.5_T1: SKIP
++S15.1.3.2_A2.5_T1: SKIP
++S15.1.3.1_A2.4_T1: SKIP
++S15.1.3.1_A2.5_T1: SKIP
++S15.1.3.2_A2.4_T1: SKIP
++S15.1.3.2_A2.5_T1: SKIP
++S15.1.3.3_A2.3_T1: SKIP
++S15.1.3.4_A2.3_T1: SKIP
++S15.1.3.1_A2.5_T1: SKIP
++S15.1.3.2_A2.5_T1: SKIP
+--- a/test/test262/test262.status
++++ b/test/test262/test262.status
+@@ -74,7 +74,7 @@
+ S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug
+ S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug
+
+-[ $arch == arm || $arch == mipsel ]
++[ $arch == arm || $arch == mipsel || $arch == mips ]
+
+ # TODO(mstarzinger): Causes stack overflow on simulators due to eager
+ # compilation of parenthesized function literals. Needs investigation.
+--- a/tools/gyp/v8.gyp
++++ b/tools/gyp/v8.gyp
+@@ -564,7 +564,7 @@
+ '../../src/ia32/stub-cache-ia32.cc',
+ ],
+ }],
+- ['v8_target_arch=="mipsel"', {
++ ['v8_target_arch=="mipsel" or v8_target_arch=="mips"', {
+ 'sources': [
+ '../../src/mips/assembler-mips.cc',
+ '../../src/mips/assembler-mips.h',
+--- a/tools/run-tests.py
++++ b/tools/run-tests.py
+@@ -65,6 +65,7 @@
+ "arm",
+ "ia32",
+ "mipsel",
++ "mips",
+ "x64"]
+
+
+@@ -268,7 +269,7 @@
+ timeout = options.timeout
+ if timeout == -1:
+ # Simulators are slow, therefore allow a longer default timeout.
+- if arch in ["android", "arm", "mipsel"]:
++ if arch in ["android", "arm", "mipsel", "mips"]:
+ timeout = 2 * TIMEOUT_DEFAULT;
+ else:
+ timeout = TIMEOUT_DEFAULT;
+--- a/tools/test-wrapper-gypbuild.py
++++ b/tools/test-wrapper-gypbuild.py
+@@ -151,7 +151,7 @@
+ print "Unknown mode %s" % mode
+ return False
+ for arch in options.arch:
+- if not arch in ['ia32', 'x64', 'arm', 'mipsel', 'android_arm',
++ if not arch in ['ia32', 'x64', 'arm', 'mipsel', 'mips', 'android_arm',
+ 'android_ia32']:
+ print "Unknown architecture %s" % arch
+ return False
+--- a/tools/test.py
++++ b/tools/test.py
+@@ -1282,7 +1282,7 @@
+ options.scons_flags.append("arch=" + options.arch)
+ # Simulators are slow, therefore allow a longer default timeout.
+ if options.timeout == -1:
+- if options.arch in ['android', 'arm', 'mipsel']:
++ if options.arch in ['android', 'arm', 'mipsel', 'mips']:
+ options.timeout = 2 * TIMEOUT_DEFAULT;
+ else:
+ options.timeout = TIMEOUT_DEFAULT;
+--- a/tools/testrunner/local/statusfile.py
++++ b/tools/testrunner/local/statusfile.py
+@@ -59,7 +59,7 @@
+ # Support arches, modes to be written as keywords instead of strings.
+ VARIABLES = {ALWAYS: True}
+ for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
+- "mipsel", "x64"]:
++ "mipsel", "mips", "x64"]:
+ VARIABLES[var] = var
+
+
diff --git a/0002_mips_r15102_backport.patch b/0002_mips_r15102_backport.patch
new file mode 100644
index 000000000000..23014d3a168d
--- /dev/null
+++ b/0002_mips_r15102_backport.patch
@@ -0,0 +1,17 @@
+Description: upstream fix needed by mips arch
+Origin: https://code.google.com/p/v8/source/detail?r=15102
+
+--- a/test/cctest/test-mark-compact.cc
++++ b/test/cctest/test-mark-compact.cc
+@@ -545,9 +545,9 @@
+ }
+ } else {
+ if (v8::internal::Snapshot::IsEnabled()) {
+- CHECK_LE(delta, 2500 * 1024); // 2400.
++ CHECK_LE(delta, 2942 * 1024); // 2400.
+ } else {
+- CHECK_LE(delta, 2860 * 1024); // 2760.
++ CHECK_LE(delta, 3400 * 1024); // 2760.
+ }
+ }
+ }
diff --git a/0002_mips_r19121_backport.patch b/0002_mips_r19121_backport.patch
new file mode 100644
index 000000000000..7d08790713cc
--- /dev/null
+++ b/0002_mips_r19121_backport.patch
@@ -0,0 +1,71 @@
+Description: upstream fix needed by mips arch
+Origin: https://code.google.com/p/v8/source/detail?r=19121
+
+--- a/src/mips/code-stubs-mips.cc
++++ b/src/mips/code-stubs-mips.cc
+@@ -7808,9 +7808,16 @@
+ const int32_t kReturnAddressDistanceFromFunctionStart =
+ Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
+
+- // Save live volatile registers.
+- __ Push(ra, t1, a1);
+- const int32_t kNumSavedRegs = 3;
++ // This should contain all kJSCallerSaved registers.
++ const RegList kSavedRegs =
++ kJSCallerSaved | // Caller saved registers.
++ s5.bit(); // Saved stack pointer.
++
++ // We also save ra, so the count here is one higher than the mask indicates.
++ const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
++
++ // Save all caller-save registers as this may be called from anywhere.
++ __ MultiPush(kSavedRegs | ra.bit());
+
+ // Compute the function's address for the first argument.
+ __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
+@@ -7822,32 +7829,36 @@
+ // Align the stack if necessary.
+ int frame_alignment = masm->ActivationFrameAlignment();
+ if (frame_alignment > kPointerSize) {
+- __ mov(t1, sp);
+ ASSERT(IsPowerOf2(frame_alignment));
++ __ mov(s5, sp);
+ __ And(sp, sp, Operand(-frame_alignment));
+ }
+-
++ // Allocate space for arg slots.
++ __ Subu(sp, sp, kCArgsSlotsSize);
+ #if defined(V8_HOST_ARCH_MIPS)
+- __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
+- __ lw(at, MemOperand(at));
++ __ li(t9, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
++ __ lw(t9, MemOperand(t9));
+ #else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ Address trampoline_address = reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(EntryHookTrampoline));
+ ApiFunction dispatcher(trampoline_address);
+- __ li(at, Operand(ExternalReference(&dispatcher,
++ __ li(t9, Operand(ExternalReference(&dispatcher,
+ ExternalReference::BUILTIN_CALL,
+ masm->isolate())));
+ #endif
+- __ Call(at);
+-
++ // Call C function through t9 to conform ABI for PIC.
++ __ Call(t9);
+ // Restore the stack pointer if needed.
+ if (frame_alignment > kPointerSize) {
+- __ mov(sp, t1);
++ __ mov(sp, s5);
++ } else {
++ __ Addu(sp, sp, kCArgsSlotsSize);
+ }
+
+- __ Pop(ra, t1, a1);
++ // Also pop ra to get Ret(0).
++ __ MultiPop(kSavedRegs | ra.bit());
+ __ Ret();
+ }
+
diff --git a/0003_armv4t_disable_vfp.patch b/0003_armv4t_disable_vfp.patch
new file mode 100644
index 000000000000..765a3bf642e3
--- /dev/null
+++ b/0003_armv4t_disable_vfp.patch
@@ -0,0 +1,15 @@
+Description: disable ARM vfpv3 detection at runtime - breaks when true
+Forwarded: not-needed, armv4t not supported upstream
+Last-Update: 2014-04-24
+Author: Jérémy Lal <kapouer@melix.org>
+--- a/src/platform-linux.cc
++++ b/src/platform-linux.cc
+@@ -143,7 +143,7 @@
+ search_string = "vfp";
+ break;
+ case VFP3:
+- search_string = "vfpv3";
++ search_string = "vfpv3-disabled";
+ break;
+ case ARMv7:
+ search_string = "ARMv7";
diff --git a/0004_hurd.patch b/0004_hurd.patch
new file mode 100644
index 000000000000..5a5519cec240
--- /dev/null
+++ b/0004_hurd.patch
@@ -0,0 +1,925 @@
+Description: hurd-i386 support
+ https://bugs.debian.org/747246
+Author: Svante Signell <svante.signell@gmail.com>
+Acked-By: Jérémy Lal <kapouer@melix.org>
+Last-Update: 2014-05-06
+
+---
+ src/platform-gnu.cc | 890 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ tools/gyp/v8.gyp | 11 +
+ 2 files changed, 901 insertions(+)
+ create mode 100644 src/platform-gnu.cc
+
+--- /dev/null
++++ b/src/platform-gnu.cc
+@@ -0,0 +1,890 @@
++// Copyright 2012 the V8 project authors. All rights reserved.
++// Redistribution and use in source and binary forms, with or without
++// modification, are permitted provided that the following conditions are
++// met:
++//
++// * Redistributions of source code must retain the above copyright
++// notice, this list of conditions and the following disclaimer.
++// * Redistributions in binary form must reproduce the above
++// copyright notice, this list of conditions and the following
++// disclaimer in the documentation and/or other materials provided
++// with the distribution.
++// * Neither the name of Google Inc. nor the names of its
++// contributors may be used to endorse or promote products derived
++// from this software without specific prior written permission.
++//
++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++// Platform specific code for GNU/Hurd goes here. For the POSIX comaptible parts
++// the implementation is in platform-posix.cc.
++
++#include <pthread.h>
++#include <semaphore.h>
++#include <signal.h>
++#include <sys/time.h>
++#include <sys/resource.h>
++#include <sys/types.h>
++#include <sys/ucontext.h>
++#include <stdlib.h>
++
++#include <sys/types.h> // mmap & munmap
++#include <sys/mman.h> // mmap & munmap
++#include <sys/stat.h> // open
++#include <sys/fcntl.h> // open
++#include <unistd.h> // getpagesize
++// If you don't have execinfo.h then you need devel/libexecinfo from ports.
++#include <execinfo.h> // backtrace, backtrace_symbols
++#include <strings.h> // index
++#include <errno.h>
++#include <stdarg.h>
++#include <limits.h>
++
++#undef MAP_TYPE
++
++#include "v8.h"
++#include "v8threads.h"
++
++#include "platform-posix.h"
++#include "platform.h"
++#include "vm-state-inl.h"
++
++#ifndef MAP_NORESERVE
++#define MAP_NORESERVE 0
++#endif
++
++namespace v8 {
++namespace internal {
++
++// 0 is never a valid thread id on GNU/Hurd
++static const pthread_t kNoThread = (pthread_t) 0;
++
++
++double ceiling(double x) {
++ return ceil(x);
++}
++
++
++static Mutex* limit_mutex = NULL;
++
++
++void OS::PostSetUp() {
++ POSIXPostSetUp();
++}
++
++
++void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
++ __asm__ __volatile__("" : : : "memory");
++ *ptr = value;
++}
++
++
++uint64_t OS::CpuFeaturesImpliedByPlatform() {
++ return 0; // GNU/Hurd runs on anything.
++}
++
++
++int OS::ActivationFrameAlignment() {
++ // 16 byte alignment on GNU/Hurd
++ return 16;
++}
++
++
++const char* OS::LocalTimezone(double time) {
++ if (isnan(time)) return "";
++ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
++ struct tm* t = localtime(&tv);
++ if (NULL == t) return "";
++ return t->tm_zone;
++}
++
++
++double OS::LocalTimeOffset() {
++ time_t tv = time(NULL);
++ struct tm* t = localtime(&tv);
++ // tm_gmtoff includes any daylight savings offset, so subtract it.
++ return static_cast<double>(t->tm_gmtoff * msPerSecond -
++ (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
++}
++
++
++// We keep the lowest and highest addresses mapped as a quick way of
++// determining that pointers are outside the heap (used mostly in assertions
++// and verification). The estimate is conservative, i.e., not all addresses in
++// 'allocated' space are actually allocated to our heap. The range is
++// [lowest, highest), inclusive on the low and and exclusive on the high end.
++static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
++static void* highest_ever_allocated = reinterpret_cast<void*>(0);
++
++
++static void UpdateAllocatedSpaceLimits(void* address, int size) {
++ ASSERT(limit_mutex != NULL);
++ ScopedLock lock(limit_mutex);
++
++ lowest_ever_allocated = Min(lowest_ever_allocated, address);
++ highest_ever_allocated =
++ Max(highest_ever_allocated,
++ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
++}
++
++
++bool OS::IsOutsideAllocatedSpace(void* address) {
++ return address < lowest_ever_allocated || address >= highest_ever_allocated;
++}
++
++
++size_t OS::AllocateAlignment() {
++ return getpagesize();
++}
++
++
++void* OS::Allocate(const size_t requested,
++ size_t* allocated,
++ bool executable) {
++ const size_t msize = RoundUp(requested, getpagesize());
++ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
++ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
++
++ if (mbase == MAP_FAILED) {
++ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
++ return NULL;
++ }
++ *allocated = msize;
++ UpdateAllocatedSpaceLimits(mbase, msize);
++ return mbase;
++}
++
++
++void OS::Free(void* buf, const size_t length) {
++ // TODO(1240712): munmap has a return value which is ignored here.
++ int result = munmap(buf, length);
++ USE(result);
++ ASSERT(result == 0);
++}
++
++
++void OS::Sleep(int milliseconds) {
++ unsigned int ms = static_cast<unsigned int>(milliseconds);
++ usleep(1000 * ms);
++}
++
++
++void OS::Abort() {
++ // Redirect to std abort to signal abnormal program termination.
++ abort();
++}
++
++
++void OS::DebugBreak() {
++#if (defined(__arm__) || defined(__thumb__))
++# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
++ asm("bkpt 0");
++# endif
++#else
++ asm("int $3");
++#endif
++}
++
++
++class PosixMemoryMappedFile : public OS::MemoryMappedFile {
++ public:
++ PosixMemoryMappedFile(FILE* file, void* memory, int size)
++ : file_(file), memory_(memory), size_(size) { }
++ virtual ~PosixMemoryMappedFile();
++ virtual void* memory() { return memory_; }
++ virtual int size() { return size_; }
++ private:
++ FILE* file_;
++ void* memory_;
++ int size_;
++};
++
++
++OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
++ FILE* file = fopen(name, "r+");
++ if (file == NULL) return NULL;
++
++ fseek(file, 0, SEEK_END);
++ int size = ftell(file);
++
++ void* memory =
++ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
++ return new PosixMemoryMappedFile(file, memory, size);
++}
++
++
++OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
++ void* initial) {
++ FILE* file = fopen(name, "w+");
++ if (file == NULL) return NULL;
++ int result = fwrite(initial, size, 1, file);
++ if (result < 1) {
++ fclose(file);
++ return NULL;
++ }
++ void* memory =
++ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
++ return new PosixMemoryMappedFile(file, memory, size);
++}
++
++
++PosixMemoryMappedFile::~PosixMemoryMappedFile() {
++ if (memory_) munmap(memory_, size_);
++ fclose(file_);
++}
++
++
++static unsigned StringToLong(char* buffer) {
++ return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
++}
++
++
++void OS::LogSharedLibraryAddresses() {
++ static const int MAP_LENGTH = 1024;
++ int fd = open("/proc/self/maps", O_RDONLY);
++ if (fd < 0) return;
++ while (true) {
++ char addr_buffer[11];
++ addr_buffer[0] = '0';
++ addr_buffer[1] = 'x';
++ addr_buffer[10] = 0;
++ int result = read(fd, addr_buffer + 2, 8);
++ if (result < 8) break;
++ unsigned start = StringToLong(addr_buffer);
++ result = read(fd, addr_buffer + 2, 1);
++ if (result < 1) break;
++ if (addr_buffer[2] != '-') break;
++ result = read(fd, addr_buffer + 2, 8);
++ if (result < 8) break;
++ unsigned end = StringToLong(addr_buffer);
++ char buffer[MAP_LENGTH];
++ int bytes_read = -1;
++ do {
++ bytes_read++;
++ if (bytes_read >= MAP_LENGTH - 1)
++ break;
++ result = read(fd, buffer + bytes_read, 1);
++ if (result < 1) break;
++ } while (buffer[bytes_read] != '\n');
++ buffer[bytes_read] = 0;
++ // Ignore mappings that are not executable.
++ if (buffer[3] != 'x') continue;
++ char* start_of_path = index(buffer, '/');
++ // There may be no filename in this line. Skip to next.
++ if (start_of_path == NULL) continue;
++ buffer[bytes_read] = 0;
++ LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
++ }
++ close(fd);
++}
++
++
++void OS::SignalCodeMovingGC() {
++}
++
++
++int OS::StackWalk(Vector<OS::StackFrame> frames) {
++ int frames_size = frames.length();
++ ScopedVector<void*> addresses(frames_size);
++
++ int frames_count = backtrace(addresses.start(), frames_size);
++
++ char** symbols = backtrace_symbols(addresses.start(), frames_count);
++ if (symbols == NULL) {
++ return kStackWalkError;
++ }
++
++ for (int i = 0; i < frames_count; i++) {
++ frames[i].address = addresses[i];
++ // Format a text representation of the frame based on the information
++ // available.
++ SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
++ "%s",
++ symbols[i]);
++ // Make sure line termination is in place.
++ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
++ }
++
++ free(symbols);
++
++ return frames_count;
++}
++
++
++// Constants used for mmap.
++static const int kMmapFd = -1;
++static const int kMmapFdOffset = 0;
++
++VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
++
++VirtualMemory::VirtualMemory(size_t size) {
++ address_ = ReserveRegion(size);
++ size_ = size;
++}
++
++
++VirtualMemory::VirtualMemory(size_t size, size_t alignment)
++ : address_(NULL), size_(0) {
++ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
++ size_t request_size = RoundUp(size + alignment,
++ static_cast<intptr_t>(OS::AllocateAlignment()));
++ void* reservation = mmap(OS::GetRandomMmapAddr(),
++ request_size,
++ PROT_NONE,
++ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
++ kMmapFd,
++ kMmapFdOffset);
++ if (reservation == MAP_FAILED) return;
++
++ Address base = static_cast<Address>(reservation);
++ Address aligned_base = RoundUp(base, alignment);
++ ASSERT_LE(base, aligned_base);
++
++ // Unmap extra memory reserved before and after the desired block.
++ if (aligned_base != base) {
++ size_t prefix_size = static_cast<size_t>(aligned_base - base);
++ OS::Free(base, prefix_size);
++ request_size -= prefix_size;
++ }
++
++ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
++ ASSERT_LE(aligned_size, request_size);
++
++ if (aligned_size != request_size) {
++ size_t suffix_size = request_size - aligned_size;
++ OS::Free(aligned_base + aligned_size, suffix_size);
++ request_size -= suffix_size;
++ }
++
++ ASSERT(aligned_size == request_size);
++
++ address_ = static_cast<void*>(aligned_base);
++ size_ = aligned_size;
++}
++
++
++VirtualMemory::~VirtualMemory() {
++ if (IsReserved()) {
++ bool result = ReleaseRegion(address(), size());
++ ASSERT(result);
++ USE(result);
++ }
++}
++
++
++bool VirtualMemory::IsReserved() {
++ return address_ != NULL;
++}
++
++
++void VirtualMemory::Reset() {
++ address_ = NULL;
++ size_ = 0;
++}
++
++
++bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
++ return CommitRegion(address, size, is_executable);
++}
++
++
++bool VirtualMemory::Uncommit(void* address, size_t size) {
++ return UncommitRegion(address, size);
++}
++
++
++bool VirtualMemory::Guard(void* address) {
++ OS::Guard(address, OS::CommitPageSize());
++ return true;
++}
++
++
++void* VirtualMemory::ReserveRegion(size_t size) {
++ void* result = mmap(OS::GetRandomMmapAddr(),
++ size,
++ PROT_NONE,
++ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
++ kMmapFd,
++ kMmapFdOffset);
++
++ if (result == MAP_FAILED) return NULL;
++
++ return result;
++}
++
++
++bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
++ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
++ if (MAP_FAILED == mmap(base,
++ size,
++ prot,
++ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
++ kMmapFd,
++ kMmapFdOffset)) {
++ return false;
++ }
++
++ UpdateAllocatedSpaceLimits(base, size);
++ return true;
++}
++
++
++bool VirtualMemory::UncommitRegion(void* base, size_t size) {
++ return mmap(base,
++ size,
++ PROT_NONE,
++ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
++ kMmapFd,
++ kMmapFdOffset) != MAP_FAILED;
++}
++
++
++bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
++ return munmap(base, size) == 0;
++}
++
++
++class Thread::PlatformData : public Malloced {
++ public:
++ pthread_t thread_; // Thread handle for pthread.
++};
++
++
++Thread::Thread(const Options& options)
++ : data_(new PlatformData),
++ stack_size_(options.stack_size()) {
++ set_name(options.name());
++}
++
++
++Thread::~Thread() {
++ delete data_;
++}
++
++
++static void* ThreadEntry(void* arg) {
++ Thread* thread = reinterpret_cast<Thread*>(arg);
++ // This is also initialized by the first argument to pthread_create() but we
++ // don't know which thread will run first (the original thread or the new
++ // one) so we initialize it here too.
++ thread->data()->thread_ = pthread_self();
++ ASSERT(thread->data()->thread_ != kNoThread);
++ thread->Run();
++ return NULL;
++}
++
++
++void Thread::set_name(const char* name) {
++ strncpy(name_, name, sizeof(name_));
++ name_[sizeof(name_) - 1] = '\0';
++}
++
++
++void Thread::Start() {
++ pthread_attr_t* attr_ptr = NULL;
++ pthread_attr_t attr;
++ if (stack_size_ > 0) {
++ pthread_attr_init(&attr);
++ pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
++ attr_ptr = &attr;
++ }
++ pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
++ ASSERT(data_->thread_ != kNoThread);
++}
++
++
++void Thread::Join() {
++ pthread_join(data_->thread_, NULL);
++}
++
++
++Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
++ pthread_key_t key;
++ int result = pthread_key_create(&key, NULL);
++ USE(result);
++ ASSERT(result == 0);
++ return static_cast<LocalStorageKey>(key);
++}
++
++
++void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
++ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
++ int result = pthread_key_delete(pthread_key);
++ USE(result);
++ ASSERT(result == 0);
++}
++
++
++void* Thread::GetThreadLocal(LocalStorageKey key) {
++ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
++ return pthread_getspecific(pthread_key);
++}
++
++
++void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
++ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
++ pthread_setspecific(pthread_key, value);
++}
++
++
++void Thread::YieldCPU() {
++ sched_yield();
++}
++
++
++class GNUMutex : public Mutex {
++ public:
++ GNUMutex() {
++ pthread_mutexattr_t attrs;
++ int result = pthread_mutexattr_init(&attrs);
++ ASSERT(result == 0);
++ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
++ ASSERT(result == 0);
++ result = pthread_mutex_init(&mutex_, &attrs);
++ ASSERT(result == 0);
++ USE(result);
++ }
++
++ virtual ~GNUMutex() { pthread_mutex_destroy(&mutex_); }
++
++ virtual int Lock() {
++ int result = pthread_mutex_lock(&mutex_);
++ return result;
++ }
++
++ virtual int Unlock() {
++ int result = pthread_mutex_unlock(&mutex_);
++ return result;
++ }
++
++ virtual bool TryLock() {
++ int result = pthread_mutex_trylock(&mutex_);
++ // Return false if the lock is busy and locking failed.
++ if (result == EBUSY) {
++ return false;
++ }
++ ASSERT(result == 0); // Verify no other errors.
++ return true;
++ }
++
++ private:
++ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
++};
++
++
++Mutex* OS::CreateMutex() {
++ return new GNUMutex();
++}
++
++
++class GNUSemaphore : public Semaphore {
++ public:
++ explicit GNUSemaphore(int count) { sem_init(&sem_, 0, count); }
++ virtual ~GNUSemaphore() { sem_destroy(&sem_); }
++
++ virtual void Wait();
++ virtual bool Wait(int timeout);
++ virtual void Signal() { sem_post(&sem_); }
++ private:
++ sem_t sem_;
++};
++
++
++void GNUSemaphore::Wait() {
++ while (true) {
++ int result = sem_wait(&sem_);
++ if (result == 0) return; // Successfully got semaphore.
++ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
++ }
++}
++
++
++bool GNUSemaphore::Wait(int timeout) {
++ const long kOneSecondMicros = 1000000; // NOLINT
++
++ // Split timeout into second and nanosecond parts.
++ struct timeval delta;
++ delta.tv_usec = timeout % kOneSecondMicros;
++ delta.tv_sec = timeout / kOneSecondMicros;
++
++ struct timeval current_time;
++ // Get the current time.
++ if (gettimeofday(&current_time, NULL) == -1) {
++ return false;
++ }
++
++ // Calculate time for end of timeout.
++ struct timeval end_time;
++ timeradd(&current_time, &delta, &end_time);
++
++ struct timespec ts;
++ TIMEVAL_TO_TIMESPEC(&end_time, &ts);
++ while (true) {
++ int result = sem_timedwait(&sem_, &ts);
++ if (result == 0) return true; // Successfully got semaphore.
++ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
++ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
++ }
++}
++
++
++Semaphore* OS::CreateSemaphore(int count) {
++ return new GNUSemaphore(count);
++}
++
++
++static pthread_t GetThreadID() {
++ pthread_t thread_id = pthread_self();
++ return thread_id;
++}
++
++
++class Sampler::PlatformData : public Malloced {
++ public:
++ PlatformData() : vm_tid_(GetThreadID()) {}
++
++ pthread_t vm_tid() const { return vm_tid_; }
++
++ private:
++ pthread_t vm_tid_;
++};
++
++
++static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
++ USE(info);
++ if (signal != SIGPROF) return;
++ Isolate* isolate = Isolate::UncheckedCurrent();
++ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
++ // We require a fully initialized and entered isolate.
++ return;
++ }
++ if (v8::Locker::IsActive() &&
++ !isolate->thread_manager()->IsLockedByCurrentThread()) {
++ return;
++ }
++
++ Sampler* sampler = isolate->logger()->sampler();
++ if (sampler == NULL || !sampler->IsActive()) return;
++
++ TickSample sample_obj;
++ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
++ if (sample == NULL) sample = &sample_obj;
++
++ // Extracting the sample from the context is extremely machine dependent.
++ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
++ mcontext_t& mcontext = ucontext->uc_mcontext;
++ sample->state = isolate->current_vm_state();
++#if V8_HOST_ARCH_IA32
++ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
++ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
++ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
++#endif
++ sampler->SampleStack(sample);
++ sampler->Tick(sample);
++}
++
++
++class SignalSender : public Thread {
++ public:
++ enum SleepInterval {
++ HALF_INTERVAL,
++ FULL_INTERVAL
++ };
++
++ static const int kSignalSenderStackSize = 64 * KB;
++
++ explicit SignalSender(int interval)
++ : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
++ interval_(interval) {}
++
++ static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
++ static void TearDown() { delete mutex_; }
++
++ static void AddActiveSampler(Sampler* sampler) {
++ ScopedLock lock(mutex_);
++ SamplerRegistry::AddActiveSampler(sampler);
++ if (instance_ == NULL) {
++ // Install a signal handler.
++ struct sigaction sa;
++ sa.sa_sigaction = ProfilerSignalHandler;
++ sigemptyset(&sa.sa_mask);
++ sa.sa_flags = SA_RESTART | SA_SIGINFO;
++ signal_handler_installed_ =
++ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
++
++ // Start a thread that sends SIGPROF signal to VM threads.
++ instance_ = new SignalSender(sampler->interval());
++ instance_->Start();
++ } else {
++ ASSERT(instance_->interval_ == sampler->interval());
++ }
++ }
++
++ static void RemoveActiveSampler(Sampler* sampler) {
++ ScopedLock lock(mutex_);
++ SamplerRegistry::RemoveActiveSampler(sampler);
++ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
++ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
++ delete instance_;
++ instance_ = NULL;
++
++ // Restore the old signal handler.
++ if (signal_handler_installed_) {
++ sigaction(SIGPROF, &old_signal_handler_, 0);
++ signal_handler_installed_ = false;
++ }
++ }
++ }
++
++ // Implement Thread::Run().
++ virtual void Run() {
++ SamplerRegistry::State state;
++ while ((state = SamplerRegistry::GetState()) !=
++ SamplerRegistry::HAS_NO_SAMPLERS) {
++ bool cpu_profiling_enabled =
++ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
++ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
++ // When CPU profiling is enabled both JavaScript and C++ code is
++ // profiled. We must not suspend.
++ if (!cpu_profiling_enabled) {
++ if (rate_limiter_.SuspendIfNecessary()) continue;
++ }
++ if (cpu_profiling_enabled && runtime_profiler_enabled) {
++ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
++ return;
++ }
++ Sleep(HALF_INTERVAL);
++ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
++ return;
++ }
++ Sleep(HALF_INTERVAL);
++ } else {
++ if (cpu_profiling_enabled) {
++ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
++ this)) {
++ return;
++ }
++ }
++ if (runtime_profiler_enabled) {
++ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
++ NULL)) {
++ return;
++ }
++ }
++ Sleep(FULL_INTERVAL);
++ }
++ }
++ }
++
++ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
++ if (!sampler->IsProfiling()) return;
++ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
++ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
++ }
++
++ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
++ if (!sampler->isolate()->IsInitialized()) return;
++ sampler->isolate()->runtime_profiler()->NotifyTick();
++ }
++
++ void SendProfilingSignal(pthread_t tid) {
++ if (!signal_handler_installed_) return;
++ pthread_kill(tid, SIGPROF);
++ }
++
++ void Sleep(SleepInterval full_or_half) {
++ // Convert ms to us and subtract 100 us to compensate delays
++ // occuring during signal delivery.
++ useconds_t interval = interval_ * 1000 - 100;
++ if (full_or_half == HALF_INTERVAL) interval /= 2;
++ int result = usleep(interval);
++#ifdef DEBUG
++ if (result != 0 && errno != EINTR) {
++ fprintf(stderr,
++ "SignalSender usleep error; interval = %u, errno = %d\n",
++ interval,
++ errno);
++ ASSERT(result == 0 || errno == EINTR);
++ }
++#endif
++ USE(result);
++ }
++
++ const int interval_;
++ RuntimeProfilerRateLimiter rate_limiter_;
++
++ // Protects the process wide state below.
++ static Mutex* mutex_;
++ static SignalSender* instance_;
++ static bool signal_handler_installed_;
++ static struct sigaction old_signal_handler_;
++
++ private:
++ DISALLOW_COPY_AND_ASSIGN(SignalSender);
++};
++
++Mutex* SignalSender::mutex_ = NULL;
++SignalSender* SignalSender::instance_ = NULL;
++struct sigaction SignalSender::old_signal_handler_;
++bool SignalSender::signal_handler_installed_ = false;
++
++
++void OS::SetUp() {
++ // Seed the random number generator.
++ // Convert the current time to a 64-bit integer first, before converting it
++ // to an unsigned. Going directly can cause an overflow and the seed to be
++ // set to all ones. The seed will be identical for different instances that
++ // call this setup code within the same millisecond.
++ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
++ srandom(static_cast<unsigned int>(seed));
++ limit_mutex = CreateMutex();
++ SignalSender::SetUp();
++}
++
++
++void OS::TearDown() {
++ SignalSender::TearDown();
++ delete limit_mutex;
++}
++
++
++Sampler::Sampler(Isolate* isolate, int interval)
++ : isolate_(isolate),
++ interval_(interval),
++ profiling_(false),
++ active_(false),
++ samples_taken_(0) {
++ data_ = new PlatformData;
++}
++
++
++Sampler::~Sampler() {
++ ASSERT(!IsActive());
++ delete data_;
++}
++
++
++void Sampler::Start() {
++ ASSERT(!IsActive());
++ SetActive(true);
++ SignalSender::AddActiveSampler(this);
++}
++
++
++void Sampler::Stop() {
++ ASSERT(IsActive());
++ SignalSender::RemoveActiveSampler(this);
++ SetActive(false);
++}
++
++
++} } // namespace v8::internal
+--- a/tools/gyp/v8.gyp
++++ b/tools/gyp/v8.gyp
+@@ -723,6 +723,17 @@
+ '../../src/platform-posix.cc'
+ ]},
+ ],
++ ['OS=="hurd"', {
++ 'link_settings': {
++ 'libraries': [
++ '-lpthread',
++ ]},
++ 'sources': [
++ '../../src/platform-gnu.cc',
++ '../../src/platform-posix.cc'
++ ],
++ }
++ ],
+ ['OS=="win"', {
+ 'sources': [
+ '../../src/platform-win32.cc',
diff --git a/0008_mksnapshot_stdout.patch b/0008_mksnapshot_stdout.patch
new file mode 100644
index 000000000000..d825ee0f9c3b
--- /dev/null
+++ b/0008_mksnapshot_stdout.patch
@@ -0,0 +1,16 @@
+Description: Redirect mksnapshot log to stdout
+ armel builds typically fail at mksnapshot, for which it is useful to be able to get the actual log.
+Forwarded: not-needed
+Author: Jérémy Lal <kapouer@melix.org>
+Last-Update: 2011-10-25
+--- a/tools/gyp/v8.gyp
++++ b/tools/gyp/v8.gyp
+@@ -136,7 +136,7 @@
+ 'variables': {
+ 'mksnapshot_flags': [
+ '--log-snapshot-positions',
+- '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
++ '--logfile', '-',
+ ],
+ },
+ 'conditions': [
diff --git a/0011_use_system_gyp.patch b/0011_use_system_gyp.patch
new file mode 100644
index 000000000000..c1527d2b6aae
--- /dev/null
+++ b/0011_use_system_gyp.patch
@@ -0,0 +1,23 @@
+Description: Use gyp package, not build/gyp/gyp.
+Forwarded: not-needed
+Author: Jérémy Lal <kapouer@melix.org>
+Last-Update: 2011-10-22
+--- a/Makefile
++++ b/Makefile
+@@ -250,14 +250,14 @@
+ OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
+ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
+ GYP_GENERATORS=make \
+- build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
++ gyp --generator-output="$(OUTDIR)" build/all.gyp \
+ -Ibuild/standalone.gypi --depth=. \
+ -Dv8_target_arch=$(subst .,,$(suffix $@)) \
+ -S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
+
+ $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
+ GYP_GENERATORS=make \
+- build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
++ gyp --generator-output="$(OUTDIR)" build/all.gyp \
+ -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
+
+ must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN:
diff --git a/0012_loongson_force_cache_flush.patch b/0012_loongson_force_cache_flush.patch
new file mode 100644
index 000000000000..6211e6cc6e2c
--- /dev/null
+++ b/0012_loongson_force_cache_flush.patch
@@ -0,0 +1,22 @@
+Description: Forced whole instruction cache flushing on Loongson.
+ Workaround for instruction cache flushing malfunction on Loongson systems
+ that occasionally cause failures under stress test conditions.
+Author: Dusan Milosavljevic <dusan.milosavljevic@rt-rk.com>
+Origin:upstream,https://github.com/paul99/v8m-rb/commit/ded6c2c2.patch
+Last-Update: 2012-06-13
+--- a/src/mips/cpu-mips.cc
++++ b/src/mips/cpu-mips.cc
+@@ -72,6 +72,13 @@
+ #else // ANDROID
+ int res;
+ // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
++ if (kArchVariant==kLoongson) {
++ // Force flushing of whole instruction cache on Loongson. This is a
++ // workaround for problem when under stress tests cache lines are not
++ // flushed through syscall for some reasons.
++ size_t iCacheSize = 64 * KB;
++ size = iCacheSize + 1;
++ }
+ res = syscall(__NR_cacheflush, start, size, ICACHE);
+ if (res) {
+ V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
diff --git a/0013_gcc_48_compat.patch b/0013_gcc_48_compat.patch
new file mode 100644
index 000000000000..c7bc89ad9746
--- /dev/null
+++ b/0013_gcc_48_compat.patch
@@ -0,0 +1,28 @@
+Description: gcc 4.8 compatibility fixes
+Bug: https://code.google.com/p/v8/issues/detail?id=2149
+Bug: https://code.google.com/p/v8/issues/detail?id=2767
+Bug-Debian: http://bugs.debian.org/701312
+Author: Jérémy Lal <kapouer@melix.org>
+Last-Update: 2013-07-06
+--- a/src/checks.h
++++ b/src/checks.h
+@@ -248,7 +248,7 @@
+ #define STATIC_CHECK(test) \
+ typedef \
+ StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
+- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
++ SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) __attribute__((unused))
+
+
+ extern bool FLAG_enable_slow_asserts;
+--- a/test/cctest/test-macro-assembler-x64.cc
++++ b/test/cctest/test-macro-assembler-x64.cc
+@@ -2185,7 +2185,7 @@
+ TEST(OperandOffset) {
+ v8::internal::V8::Initialize(NULL);
+ int data[256];
+- for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
++ for (int i = 0; i < 256; i++) { data[i] = (long)i * 0x01010101; }
+
+ // Allocate an executable page of memory.
+ size_t actual_size;
diff --git a/0014_cve_2013_6639_6640.patch b/0014_cve_2013_6639_6640.patch
new file mode 100644
index 000000000000..3ba0b2c948da
--- /dev/null
+++ b/0014_cve_2013_6639_6640.patch
@@ -0,0 +1,303 @@
+From: "jkummerow@chromium.org" <jkummerow@chromium.org>
+Date: Fri, 13 Dec 2013 14:21:10 -0700
+Subject: [PATCH] v8: backport fix for CVE-2013-{6639|6640}
+Origin: https://github.com/joyent/node/commit/39e2426
+
+This is a backport of upstream commit r17801. Original commit log:
+
+ Limit size of dehoistable array indices
+
+ LOG=Y
+ BUG=chromium:319835,chromium:319860
+ R=dslomov@chromium.org
+
+ Review URL: https://codereview.chromium.org/74113002
+---
+ src/elements-kind.cc | 30 +++++++++++++
+ src/elements-kind.h | 2 +
+ src/hydrogen-instructions.h | 9 ++++
+ src/hydrogen.cc | 2 +-
+ src/lithium.cc | 30 -------------
+ src/lithium.h | 3 --
+ test/mjsunit/regress/regress-crbug-319835.js | 51 ++++++++++++++++++++++
+ test/mjsunit/regress/regress-crbug-319860.js | 47 ++++++++++++++++++++
+ 8 files changed, 140 insertions(+), 34 deletions(-)
+ create mode 100644 test/mjsunit/regress/regress-crbug-319835.js
+ create mode 100644 test/mjsunit/regress/regress-crbug-319860.js
+
+--- a/src/elements-kind.cc
++++ b/src/elements-kind.cc
+@@ -35,6 +35,36 @@
+ namespace internal {
+
+
++int ElementsKindToShiftSize(ElementsKind elements_kind) {
++ switch (elements_kind) {
++ case EXTERNAL_BYTE_ELEMENTS:
++ case EXTERNAL_PIXEL_ELEMENTS:
++ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
++ return 0;
++ case EXTERNAL_SHORT_ELEMENTS:
++ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
++ return 1;
++ case EXTERNAL_INT_ELEMENTS:
++ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
++ case EXTERNAL_FLOAT_ELEMENTS:
++ return 2;
++ case EXTERNAL_DOUBLE_ELEMENTS:
++ case FAST_DOUBLE_ELEMENTS:
++ case FAST_HOLEY_DOUBLE_ELEMENTS:
++ return 3;
++ case FAST_SMI_ELEMENTS:
++ case FAST_ELEMENTS:
++ case FAST_HOLEY_SMI_ELEMENTS:
++ case FAST_HOLEY_ELEMENTS:
++ case DICTIONARY_ELEMENTS:
++ case NON_STRICT_ARGUMENTS_ELEMENTS:
++ return kPointerSizeLog2;
++ }
++ UNREACHABLE();
++ return 0;
++}
++
++
+ void PrintElementsKind(FILE* out, ElementsKind kind) {
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ PrintF(out, "%s", accessor->name());
+--- a/src/elements-kind.h
++++ b/src/elements-kind.h
+@@ -77,6 +77,8 @@
+ const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
+ FIRST_FAST_ELEMENTS_KIND + 1;
+
++int ElementsKindToShiftSize(ElementsKind elements_kind);
++
+ void PrintElementsKind(FILE* out, ElementsKind kind);
+
+ ElementsKind GetInitialFastElementsKind();
+--- a/src/hydrogen-instructions.h
++++ b/src/hydrogen-instructions.h
+@@ -4240,6 +4240,7 @@
+ virtual HValue* GetKey() = 0;
+ virtual void SetKey(HValue* key) = 0;
+ virtual void SetIndexOffset(uint32_t index_offset) = 0;
++ virtual int MaxIndexOffsetBits() = 0;
+ virtual bool IsDehoisted() = 0;
+ virtual void SetDehoisted(bool is_dehoisted) = 0;
+ virtual ~ArrayInstructionInterface() { };
+@@ -4274,6 +4275,7 @@
+ void SetIndexOffset(uint32_t index_offset) {
+ bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
+ }
++ int MaxIndexOffsetBits() { return 25; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
+@@ -4343,6 +4345,7 @@
+ HValue* dependency() { return OperandAt(2); }
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
++ int MaxIndexOffsetBits() { return 25; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+@@ -4420,6 +4423,7 @@
+ ElementsKind elements_kind() const { return elements_kind_; }
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
++ int MaxIndexOffsetBits() { return 25; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+@@ -4595,6 +4599,7 @@
+ }
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
++ int MaxIndexOffsetBits() { return 25; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+@@ -4648,6 +4653,7 @@
+ HValue* value() { return OperandAt(2); }
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
++ int MaxIndexOffsetBits() { return 25; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+@@ -4706,6 +4712,9 @@
+ ElementsKind elements_kind() const { return elements_kind_; }
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
++ int MaxIndexOffsetBits() {
++ return 31 - ElementsKindToShiftSize(elements_kind_);
++ }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+--- a/src/hydrogen.cc
++++ b/src/hydrogen.cc
+@@ -3737,7 +3737,7 @@
+ int32_t value = constant->Integer32Value() * sign;
+ // We limit offset values to 30 bits because we want to avoid the risk of
+ // overflows when the offset is added to the object header size.
+- if (value >= 1 << 30 || value < 0) return;
++ if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return;
+ array_operation->SetKey(subexpression);
+ if (index->HasNoUses()) {
+ index->DeleteAndReplaceWith(NULL);
+--- a/src/lithium.cc
++++ b/src/lithium.cc
+@@ -227,36 +227,6 @@
+ }
+
+
+-int ElementsKindToShiftSize(ElementsKind elements_kind) {
+- switch (elements_kind) {
+- case EXTERNAL_BYTE_ELEMENTS:
+- case EXTERNAL_PIXEL_ELEMENTS:
+- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+- return 0;
+- case EXTERNAL_SHORT_ELEMENTS:
+- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+- return 1;
+- case EXTERNAL_INT_ELEMENTS:
+- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+- case EXTERNAL_FLOAT_ELEMENTS:
+- return 2;
+- case EXTERNAL_DOUBLE_ELEMENTS:
+- case FAST_DOUBLE_ELEMENTS:
+- case FAST_HOLEY_DOUBLE_ELEMENTS:
+- return 3;
+- case FAST_SMI_ELEMENTS:
+- case FAST_ELEMENTS:
+- case FAST_HOLEY_SMI_ELEMENTS:
+- case FAST_HOLEY_ELEMENTS:
+- case DICTIONARY_ELEMENTS:
+- case NON_STRICT_ARGUMENTS_ELEMENTS:
+- return kPointerSizeLog2;
+- }
+- UNREACHABLE();
+- return 0;
+-}
+-
+-
+ LLabel* LChunk::GetLabel(int block_id) const {
+ HBasicBlock* block = graph_->blocks()->at(block_id);
+ int first_instruction = block->first_instruction_index();
+--- a/src/lithium.h
++++ b/src/lithium.h
+@@ -704,9 +704,6 @@
+ };
+
+
+-int ElementsKindToShiftSize(ElementsKind elements_kind);
+-
+-
+ } } // namespace v8::internal
+
+ #endif // V8_LITHIUM_H_
+--- /dev/null
++++ b/test/mjsunit/regress/regress-crbug-319835.js
+@@ -0,0 +1,51 @@
++// Copyright 2013 the V8 project authors. All rights reserved.
++// Redistribution and use in source and binary forms, with or without
++// modification, are permitted provided that the following conditions are
++// met:
++//
++// * Redistributions of source code must retain the above copyright
++// notice, this list of conditions and the following disclaimer.
++// * Redistributions in binary form must reproduce the above
++// copyright notice, this list of conditions and the following
++// disclaimer in the documentation and/or other materials provided
++// with the distribution.
++// * Neither the name of Google Inc. nor the names of its
++// contributors may be used to endorse or promote products derived
++// from this software without specific prior written permission.
++//
++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++// Flags: --allow-natives-syntax
++
++try {} catch(e) {} // No need to optimize the top level.
++
++var size = 0x20000;
++var a = new Float64Array(size);
++var training = new Float64Array(10);
++function store(a, index) {
++ var offset = 0x20000000;
++ for (var i = 0; i < 1; i++) {
++ a[index + offset] = 0xcc;
++ }
++}
++
++store(training, -0x20000000);
++store(training, -0x20000000 + 1);
++store(training, -0x20000000);
++store(training, -0x20000000 + 1);
++%OptimizeFunctionOnNextCall(store);
++
++// Segfault maybe?
++for (var i = -0x20000000; i < -0x20000000 + size; i++) {
++ store(a, i);
++}
+--- /dev/null
++++ b/test/mjsunit/regress/regress-crbug-319860.js
+@@ -0,0 +1,47 @@
++// Copyright 2013 the V8 project authors. All rights reserved.
++// Redistribution and use in source and binary forms, with or without
++// modification, are permitted provided that the following conditions are
++// met:
++//
++// * Redistributions of source code must retain the above copyright
++// notice, this list of conditions and the following disclaimer.
++// * Redistributions in binary form must reproduce the above
++// copyright notice, this list of conditions and the following
++// disclaimer in the documentation and/or other materials provided
++// with the distribution.
++// * Neither the name of Google Inc. nor the names of its
++// contributors may be used to endorse or promote products derived
++// from this software without specific prior written permission.
++//
++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++// Flags: --allow-natives-syntax
++
++function read(a, index) {
++ var offset = 0x2000000;
++ var result;
++ for (var i = 0; i < 1; i++) {
++ result = a[index + offset];
++ }
++ return result;
++}
++
++var a = new Int8Array(0x2000001);
++read(a, 0);
++read(a, 0);
++%OptimizeFunctionOnNextCall(read);
++
++// Segfault maybe?
++for (var i = 0; i > -1000000; --i) {
++ read(a, i);
++}
diff --git a/0015-Backport-Utils-ApiCheck.patch b/0015-Backport-Utils-ApiCheck.patch
new file mode 100644
index 000000000000..66425a59de2d
--- /dev/null
+++ b/0015-Backport-Utils-ApiCheck.patch
@@ -0,0 +1,29 @@
+From be3df35d659a9dd3c59eb29abdcc10d74b8fc90a Mon Sep 17 00:00:00 2001
+From: Balint Reczey <balint@balintreczey.hu>
+Date: Mon, 9 Jan 2017 18:12:23 +0100
+Subject: [PATCH 15/16] Backport Utils::ApiCheck()
+
+---
+ src/api.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/src/api.h b/src/api.h
+index 7197b6c..c7877aa 100644
+--- a/src/api.h
++++ b/src/api.h
+@@ -183,6 +183,12 @@ class RegisteredExtension {
+ class Utils {
+ public:
+ static bool ReportApiFailure(const char* location, const char* message);
++ static inline bool ApiCheck(bool condition,
++ const char* location,
++ const char* message) {
++ if (!condition) Utils::ReportApiFailure(location, message);
++ return condition;
++ }
+
+ static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
+ static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
+--
+2.1.4
+
diff --git a/0016-remove-this-null.patch b/0016-remove-this-null.patch
new file mode 100644
index 000000000000..91224ffb205c
--- /dev/null
+++ b/0016-remove-this-null.patch
@@ -0,0 +1,202 @@
+From 94dcbe59a9f324089f9f5195635ee031063078cf Mon Sep 17 00:00:00 2001
+From: "dcarney@chromium.org"
+ <dcarney@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
+Date: Thu, 12 Jun 2014 12:01:01 +0000
+Subject: [PATCH 16/16] remove this == null
+
+R=danno@chromium.org
+
+BUG=chromium:381910
+
+Review URL: https://codereview.chromium.org/336483002
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21807 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
+
+Conflicts:
+ src/api.cc
+ src/factory.cc
+ src/heap.cc
+ src/hydrogen-load-elimination.cc
+ src/spaces.cc
+ src/spaces.h
+ src/x64/assembler-x64-inl.h
+ test/cctest/test-spaces.cc
+
+Back-port to v3.14.5.8 by Balint Reczey
+---
+ src/api.cc | 27 +++++++++++++++------------
+ src/heap.cc | 15 +++++++++------
+ src/spaces.cc | 18 ++++++++++++------
+ src/spaces.h | 8 ++++++--
+ 4 files changed, 42 insertions(+), 26 deletions(-)
+
+diff --git a/src/api.cc b/src/api.cc
+index f168398..5308485 100644
+--- a/src/api.cc
++++ b/src/api.cc
+@@ -1120,12 +1120,15 @@ void FunctionTemplate::AddInstancePropertyAccessor(
+
+
+ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
+- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+- if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()")
+- || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this))
++ i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this, true);
++ if (!Utils::ApiCheck(!handle.is_null(),
++ "v8::FunctionTemplate::InstanceTemplate()",
++ "Reading from empty handle")) {
+ return Local<ObjectTemplate>();
++ }
++ i::Isolate* isolate = handle->GetIsolate();
+ ENTER_V8(isolate);
+- if (Utils::OpenHandle(this)->instance_template()->IsUndefined()) {
++ if (handle->instance_template()->IsUndefined()) {
+ Local<ObjectTemplate> templ =
+ ObjectTemplate::New(v8::Handle<FunctionTemplate>(this));
+ Utils::OpenHandle(this)->set_instance_template(*Utils::OpenHandle(*templ));
+@@ -2683,14 +2686,14 @@ int32_t Value::Int32Value() const {
+
+ bool Value::Equals(Handle<Value> that) const {
+ i::Isolate* isolate = i::Isolate::Current();
+- if (IsDeadCheck(isolate, "v8::Value::Equals()")
+- || EmptyCheck("v8::Value::Equals()", this)
+- || EmptyCheck("v8::Value::Equals()", that)) {
++ i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
++ if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
++ "v8::Value::Equals()",
++ "Reading from empty handle")) {
+ return false;
+ }
+ LOG_API(isolate, "Equals");
+ ENTER_V8(isolate);
+- i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ // If both obj and other are JSObjects, we'd better compare by identity
+ // immediately when going into JS builtin. The reason is Invoke
+@@ -2710,13 +2713,13 @@ bool Value::Equals(Handle<Value> that) const {
+
+ bool Value::StrictEquals(Handle<Value> that) const {
+ i::Isolate* isolate = i::Isolate::Current();
+- if (IsDeadCheck(isolate, "v8::Value::StrictEquals()")
+- || EmptyCheck("v8::Value::StrictEquals()", this)
+- || EmptyCheck("v8::Value::StrictEquals()", that)) {
++ i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
++ if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
++ "v8::Value::StrictEquals()",
++ "Reading from empty handle")) {
+ return false;
+ }
+ LOG_API(isolate, "StrictEquals");
+- i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ // Must check HeapNumber first, since NaN !== NaN.
+ if (obj->IsHeapNumber()) {
+diff --git a/src/heap.cc b/src/heap.cc
+index e3fcb93..46e1e3d 100644
+--- a/src/heap.cc
++++ b/src/heap.cc
+@@ -3630,8 +3630,9 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
+ // Initialize the object
+ result->set_map_no_write_barrier(code_map());
+ Code* code = Code::cast(result);
+- ASSERT(!isolate_->code_range()->exists() ||
+- isolate_->code_range()->contains(code->address()));
++ ASSERT(isolate_->code_range() == NULL ||
++ !isolate_->code_range()->valid() ||
++ isolate_->code_range()->contains(code->address()));
+ code->set_instruction_size(desc.instr_size);
+ code->set_relocation_info(reloc_info);
+ code->set_flags(flags);
+@@ -3683,8 +3684,9 @@ MaybeObject* Heap::CopyCode(Code* code) {
+ CopyBlock(new_addr, old_addr, obj_size);
+ // Relocate the copy.
+ Code* new_code = Code::cast(result);
+- ASSERT(!isolate_->code_range()->exists() ||
+- isolate_->code_range()->contains(code->address()));
++ ASSERT(isolate_->code_range() == NULL ||
++ !isolate_->code_range()->valid() ||
++ isolate_->code_range()->contains(code->address()));
+ new_code->Relocate(new_addr - old_addr);
+ return new_code;
+ }
+@@ -3733,8 +3735,9 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
+ memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
+
+ // Relocate the copy.
+- ASSERT(!isolate_->code_range()->exists() ||
+- isolate_->code_range()->contains(code->address()));
++ ASSERT(isolate_->code_range() == NULL ||
++ !isolate_->code_range()->valid() ||
++ isolate_->code_range()->contains(code->address()));
+ new_code->Relocate(new_addr - old_addr);
+
+ #ifdef VERIFY_HEAP
+diff --git a/src/spaces.cc b/src/spaces.cc
+index cc84180..c541f49 100644
+--- a/src/spaces.cc
++++ b/src/spaces.cc
+@@ -307,9 +307,12 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
+ size_executable_ -= size;
+ }
+ // Code which is part of the code-range does not have its own VirtualMemory.
+- ASSERT(!isolate_->code_range()->contains(
+- static_cast<Address>(reservation->address())));
+- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
++ ASSERT(isolate_->code_range() == NULL ||
++ !isolate_->code_range()->contains(
++ static_cast<Address>(reservation->address())));
++ ASSERT(executable == NOT_EXECUTABLE ||
++ isolate_->code_range() == NULL ||
++ !isolate_->code_range()->valid());
+ reservation->Release();
+ }
+
+@@ -327,11 +330,14 @@ void MemoryAllocator::FreeMemory(Address base,
+ ASSERT(size_executable_ >= size);
+ size_executable_ -= size;
+ }
+- if (isolate_->code_range()->contains(static_cast<Address>(base))) {
++ if (isolate_->code_range() != NULL &&
++ isolate_->code_range()->contains(static_cast<Address>(base))) {
+ ASSERT(executable == EXECUTABLE);
+ isolate_->code_range()->FreeRawMemory(base, size);
+ } else {
+- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
++ ASSERT(executable == NOT_EXECUTABLE ||
++ isolate_->code_range() == NULL ||
++ !isolate_->code_range()->valid());
+ bool result = VirtualMemory::ReleaseRegion(base, size);
+ USE(result);
+ ASSERT(result);
+@@ -512,7 +518,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+
+ // Allocate executable memory either from code range or from the
+ // OS.
+- if (isolate_->code_range()->exists()) {
++ if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+ base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
+ ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
+ MemoryChunk::kAlignment));
+diff --git a/src/spaces.h b/src/spaces.h
+index 95c63d6..d590d3b 100644
+--- a/src/spaces.h
++++ b/src/spaces.h
+@@ -834,9 +834,13 @@ class CodeRange {
+ // manage it.
+ void TearDown();
+
+- bool exists() { return this != NULL && code_range_ != NULL; }
++ bool valid() { return code_range_ != NULL; }
++ Address start() {
++ ASSERT(valid());
++ return static_cast<Address>(code_range_->address());
++ }
+ bool contains(Address address) {
+- if (this == NULL || code_range_ == NULL) return false;
++ if (!valid()) return false;
+ Address start = static_cast<Address>(code_range_->address());
+ return start <= address && address < start + code_range_->size();
+ }
+--
+2.1.4
+
diff --git a/0017_increase_stack_size_for_test.patch b/0017_increase_stack_size_for_test.patch
new file mode 100644
index 000000000000..bff664130cb4
--- /dev/null
+++ b/0017_increase_stack_size_for_test.patch
@@ -0,0 +1,18 @@
+Description: Increase stack size for specific tests
+ This makes those tests pass.
+
+Author: Balint Reczey <balint@balintreczey.hu>
+Forwarded: not-needed
+
+--- a/test/mjsunit/big-array-literal.js
++++ b/test/mjsunit/big-array-literal.js
+@@ -26,7 +26,8 @@
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ // On MacOS, this test needs a stack size of at least 538 kBytes.
+-// Flags: --stack-size=600
++// On Debian amd64 the minimum seems to be 763
++// Flags: --stack-size=800
+
+ // Test that we can make large object literals that work.
+ // Also test that we can attempt to make even larger object literals without
diff --git a/PKGBUILD b/PKGBUILD
index 6bae32b94b60..feab38eceb39 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -1,18 +1,60 @@
# $Id$
-# Maintainer: Kaiting Chen <kaitocracy@gmail.com>
+# Maintainer: Patrick Schratz <patrick.schratz@gmail.com
+# Contributor: Kaiting Chen <kaitocracy@gmail.com>
# Contributor: tocer <tocer.deng@gmail.com>
pkgname=v8-3.14
pkgver=3.14.5
-pkgrel=2
-pkgdesc='A fast and modern javascript engine (old 3.14 version required for plv8)'
+pkgrel=3
+pkgdesc='A fast and modern javascript engine (old 3.14 version required R package 'V8')'
arch=('i686' 'x86_64')
url='http://code.google.com/p/v8'
license=('BSD')
-depends=('gcc-libs' 'readline')
-makedepends=('subversion' 'python2')
-source=(http://commondatastorage.googleapis.com/chromium-browser-official/v8-$pkgver.tar.bz2)
-sha256sums=('361ad3b63dc7c9d0943b72b1be592a8135e4ddb0e416b9bcf02b4d2df514fca7')
+depends=('gcc-libs')
+makedepends=('python2' 'gyp-git')
+source=("http://commondatastorage.googleapis.com/chromium-browser-official/v8-$pkgver.tar.bz2"
+'0001_kfreebsd.patch'
+'0002_mips.patch'
+'0002_mips_r15102_backport.patch'
+'0002_mips_r19121_backport.patch'
+'0003_armv4t_disable_vfp.patch'
+'0004_hurd.patch'
+'0008_mksnapshot_stdout.patch'
+'0011_use_system_gyp.patch'
+'0012_loongson_force_cache_flush.patch'
+'0013_gcc_48_compat.patch'
+'0014_cve_2013_6639_6640.patch'
+'0015-Backport-Utils-ApiCheck.patch'
+'0016-remove-this-null.patch'
+'0017_increase_stack_size_for_test.patch'
+'https://gist.github.com/pat-s/942e255ea38821e6ac3e82a36cb3c4bd'
+'fix_CVE-2014-5256.patch'
+'nodejsREPLACE_INVALID_UTF8.patch'
+'strict_overflow.patch'
+'dont-assume-hardfloat-means-vfpv3.diff'
+'gcc7-fix.patch'
+)
+sha256sums=('361ad3b63dc7c9d0943b72b1be592a8135e4ddb0e416b9bcf02b4d2df514fca7'
+ '15af4bbb02ad510ed57f7c635f00f7163c45884e55acadb1d05510d2f3aaa494'
+ '239170677f6dfcae285dfb719ae3ae8d698a9652dab69f54506fbdd1b2eac9e4'
+ 'a1bd65547bad7113619f77ad442422944b7fa5afac7795868e653a2d0c38877f'
+ '1d4e0f503100515dea4be5558f6080321f3117108745cd3a481c44d80ebe8fc9'
+ '16fdb157a24a336bf2979b73cfba484314f2cfca2cdcfa9fe51fe2ac9970f202'
+ '8b43ef8dfc001d138d25348cd3594d139bc88bb1d333d3908800adbc8c6e55ab'
+ '73f75ce1fe02cfa51d8ee6410e000e96f07c21f1e42dd48ffc7d7970434e1677'
+ '4dba0e7e1d5f7cad6907c76551e36ef616765de003f83f8989d46008cf53911a'
+ '7d4dc3f2325f2b95c612e89904a07d9f3e8b050552be856910cb3ae0b41e04f8'
+ '8c1aa4a99748f7a567a3961022c39b1f3666cc57bf63b3ebabe0c51068a65b9b'
+ '76b7be145758e80af56429d46c23ce0942be6d13047b31b40855363ce9f88ce4'
+ '69906640439c263fdeacaf14605e785294f1f3daf28f7633b40a5ac8d6977797'
+ 'e90b54cf2e296c6d5c4bc41b7159015a6584191b5c2ab95a2f28861fb1c3bcb3'
+ '71a600e3e502896d45076103201d35c30f778fa57a750bb3f2dfdbdcb3a708b8'
+ '19f4708484c837d01b82ebd4667bbbcb73f65da3ee3f7a12fa38038fe730d733'
+ 'd6d3eb0ef53ce501c6da5d756f7dc1adcf85361ad75b17253051bb3869b0b3dc'
+ 'b76c02ca0d88e9818e58ef70592a216c6d969bde3b563c74244ee3687a39f672'
+ '1b48a5714e9d89d419dac8969c005c56a0adc2599b558375ac9254a3168f55ae'
+ '2e6a8f36c33e5e37956429eae2753944519f60a57fde81e0d72de1afa60a4103'
+ 'c67da79111fa171a0900af0da9b151a1568b233f4929922e72d049d7490f98df')
provides=('v8')
conflicts=('v8')
@@ -22,30 +64,83 @@ conflicts=('v8')
build() {
cd v8-$pkgver
- # The world isn't ready for python2
- export PYTHON=python2
- find build/ test/ tools/ src/ -type f \
- -exec sed -e 's_^#!/usr/bin/env python$_&2_' \
- -e 's_^\(#!/usr/bin/python2\).[45]$_\1_' \
- -e 's_^#!/usr/bin/python$_&2_' \
- -e "s_'python'_'python2'_" -i {} \;
- sed -i 's/python /python2 /' Makefile
+ # work-around crashes in tests as reported in #812304
+ # TODO fix the checks themselves instead
- # -Werror causes errors with newer versions of GCC
- sed -i 's/-Werror//' build/standalone.gypi build/common.gypi
+ # keep old ABI to prevent symbol changes due to GCC5 transition
+ # https://wiki.debian.org/GCC5
+ export CXXFLAGS="${CXXFLAGS} -fno-delete-null-pointer-checks -std=c++98"
- make $ARCH.release library=shared console=readline snapshot=off
+ export GYPFLAGS="-Dhost_arch=$ARCH -DOS=linux"
+
+ # The world isn't ready for python2
+ export PYTHON=python2
+ find build/ test/ tools/ src/ -type f \
+ -exec sed -e 's_^#!/usr/bin/env python$_&2_' \
+ -e 's_^\(#!/usr/bin/python2\).[45]$_\1_' \
+ -e 's_^#!/usr/bin/python$_&2_' \
+ -e "s_'python'_'python2'_" -i {} \;
+ sed -i 's/python /python2 /' Makefile
+
+ # debian patches
+ cd $srcdir/v8-$pkgver
+ msg "p1"
+ patch -p1 < $srcdir/0001_kfreebsd.patch
+ msg "p2"
+ patch -p1 < $srcdir/0002_mips.patch
+ msg "p3"
+ patch -p1 < $srcdir/0002_mips_r15102_backport.patch
+ msg "p4"
+ patch -p1 < $srcdir/0002_mips_r19121_backport.patch
+ msg "p5"
+ patch -p1 < $srcdir/0003_armv4t_disable_vfp.patch
+ msg "p6"
+ patch -p1 < $srcdir/0004_hurd.patch
+ msg "p7"
+ patch -p1 < $srcdir/0008_mksnapshot_stdout.patch
+ msg "p8"
+ patch -p1 < $srcdir/0011_use_system_gyp.patch
+ msg "p9"
+ patch -p1 < $srcdir/0012_loongson_force_cache_flush.patch
+ msg "p10"
+ patch -p1 < $srcdir/0013_gcc_48_compat.patch
+ msg "p11"
+ patch -p1 < $srcdir/0014_cve_2013_6639_6640.patch
+ msg "p12"
+ patch -p1 < $srcdir/0015-Backport-Utils-ApiCheck.patch
+ msg "p13"
+ patch -p1 < $srcdir/0016-remove-this-null.patch
+ msg "p14"
+ patch -p1 < $srcdir/0017_increase_stack_size_for_test.patch
+ # msg "p15"
+ # patch -p1 < $srcdir/0099_powerpc_support.patch
+ msg "p16"
+ patch -p1 < $srcdir/fix_CVE-2014-5256.patch
+ msg "p17"
+ patch -p1 < $srcdir/nodejsREPLACE_INVALID_UTF8.patch
+ msg "p8"
+ patch -p1 < $srcdir/strict_overflow.patch
+ msg "p19"
+ patch -p1 < $srcdir/dont-assume-hardfloat-means-vfpv3.diff
+ msg "p20"
+ patch -p1 < $srcdir/gcc7-fix.patch
+
+ make $ARCH.release library=shared snapshot=off soname_version=$pkgver OS=linux V=1
}
package() {
cd v8-$pkgver
install -Dm755 out/$ARCH.release/d8 $pkgdir/usr/bin/d8
- install -Dm755 out/$ARCH.release/lib.target/libv8.so $pkgdir/usr/lib/libv8.so.3.14.5
+ install -Dm755 out/$ARCH.release/lib.target/libv8.so.$pkgver $pkgdir/usr/lib/libv8.so.$pkgver
install -d $pkgdir/usr/include
install -Dm644 include/*.h $pkgdir/usr/include
install -d $pkgdir/usr/share/licenses/v8
install -m644 LICENSE* ${pkgdir}/usr/share/licenses/v8
+
+ # debian way
+ cd $pkgdir/usr/lib
+ ln -s -T libv8.so.$pkgver libv8.so
}
diff --git a/dont-assume-hardfloat-means-vfpv3.diff b/dont-assume-hardfloat-means-vfpv3.diff
new file mode 100644
index 000000000000..b34f5d9d8dcb
--- /dev/null
+++ b/dont-assume-hardfloat-means-vfpv3.diff
@@ -0,0 +1,13 @@
+Description: don't assume hardfloat means vfpv3
+Author: Peter Michael Green <plugwash@raspbian.org>
+
+--- a/build/common.gypi
++++ b/build/common.gypi
+@@ -170,7 +170,6 @@
+ [ 'v8_use_arm_eabi_hardfloat=="true"', {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=1',
+- 'CAN_USE_VFP3_INSTRUCTIONS',
+ ],
+ 'target_conditions': [
+ ['_toolset=="target"', {
diff --git a/fix_CVE-2014-5256.patch b/fix_CVE-2014-5256.patch
new file mode 100644
index 000000000000..a6a41479c545
--- /dev/null
+++ b/fix_CVE-2014-5256.patch
@@ -0,0 +1,25 @@
+Description: Fix for CVE-2014-5256
+Bug-Node: https://github.com/joyent/node/commit/530af9cb8e700e7596b3ec812bad123c9fa06356
+Author: Fedor Indutny <fedor@indutny.com>
+Acked-by: Jean Baptiste Favre <debian@jbfavre.org>
+Last-Update: 2014-11-15
+Applied-Upstream: https://github.com/joyent/node/commit/530af9cb8e700e7596b3ec812bad123c9fa06356
+--- a/src/isolate.h
++++ b/src/isolate.h
+@@ -1405,14 +1405,9 @@
+ public:
+ explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
+
+- bool HasOverflowed() const {
++ inline bool HasOverflowed() const {
+ StackGuard* stack_guard = isolate_->stack_guard();
+- // Stack has overflowed in C++ code only if stack pointer exceeds the C++
+- // stack guard and the limits are not set to interrupt values.
+- // TODO(214): Stack overflows are ignored if a interrupt is pending. This
+- // code should probably always use the initial C++ limit.
+- return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
+- stack_guard->IsStackOverflow();
++ return reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit();
+ }
+ private:
+ Isolate* isolate_;
diff --git a/gcc7-fix.patch b/gcc7-fix.patch
new file mode 100644
index 000000000000..f929b4aceab9
--- /dev/null
+++ b/gcc7-fix.patch
@@ -0,0 +1,223 @@
+diff -up v8-3.14.5.10/src/arm/assembler-arm.cc.gcc7 v8-3.14.5.10/src/arm/assembler-arm.cc
+diff -up v8-3.14.5.10/src/deoptimizer.cc.gcc7 v8-3.14.5.10/src/deoptimizer.cc
+--- v8-3.14.5.10/src/deoptimizer.cc.gcc7 2012-10-22 09:09:53.000000000 -0400
++++ v8-3.14.5.10/src/deoptimizer.cc 2017-02-28 16:55:25.553045035 -0500
+@@ -1141,7 +1141,7 @@ bool Deoptimizer::DoOsrTranslateCommand(
+ }
+ output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
+ }
+-
++ // intentional fallthrough
+
+ case Translation::DOUBLE_REGISTER: {
+ // Abort OSR if we don't have a number.
+diff -up v8-3.14.5.10/src/ia32/assembler-ia32.cc.gcc7 v8-3.14.5.10/src/ia32/assembler-ia32.cc
+--- v8-3.14.5.10/src/ia32/assembler-ia32.cc.gcc7 2017-03-01 10:21:11.271775490 -0500
++++ v8-3.14.5.10/src/ia32/assembler-ia32.cc 2017-03-01 10:21:44.242983779 -0500
+@@ -420,6 +420,7 @@ void Assembler::Nop(int bytes) {
+ switch (bytes) {
+ case 2:
+ EMIT(0x66);
++ // intentional fallthrough
+ case 1:
+ EMIT(0x90);
+ return;
+@@ -436,6 +437,7 @@ void Assembler::Nop(int bytes) {
+ return;
+ case 6:
+ EMIT(0x66);
++ // intentional fallthrough
+ case 5:
+ EMIT(0xf);
+ EMIT(0x1f);
+@@ -456,12 +458,15 @@ void Assembler::Nop(int bytes) {
+ case 11:
+ EMIT(0x66);
+ bytes--;
++ // intentional fallthrough
+ case 10:
+ EMIT(0x66);
+ bytes--;
++ // intentional fallthrough
+ case 9:
+ EMIT(0x66);
+ bytes--;
++ // intentional fallthrough
+ case 8:
+ EMIT(0xf);
+ EMIT(0x1f);
+diff -up v8-3.14.5.10/src/ic.cc.gcc7 v8-3.14.5.10/src/ic.cc
+--- v8-3.14.5.10/src/ic.cc.gcc7 2012-10-22 09:09:53.000000000 -0400
++++ v8-3.14.5.10/src/ic.cc 2017-02-28 16:55:25.554045011 -0500
+@@ -1989,8 +1989,8 @@ void KeyedStoreIC::UpdateCaches(LookupRe
+ name, receiver, field_index, transition, strict_mode);
+ break;
+ }
+- // fall through.
+ }
++ // intentional fallthrough
+ case NORMAL:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+diff -up v8-3.14.5.10/src/objects.cc.gcc7 v8-3.14.5.10/src/objects.cc
+--- v8-3.14.5.10/src/objects.cc.gcc7 2017-02-28 16:55:25.516045908 -0500
++++ v8-3.14.5.10/src/objects.cc 2017-02-28 16:55:25.555044988 -0500
+@@ -10302,7 +10302,7 @@ void JSObject::GetElementsCapacityAndUsa
+ *used = Smi::cast(JSArray::cast(this)->length())->value();
+ break;
+ }
+- // Fall through if packing is not guaranteed.
++ // intentional fallthrough
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ backing_store = FixedArray::cast(backing_store_base);
+@@ -10324,7 +10324,7 @@ void JSObject::GetElementsCapacityAndUsa
+ *used = Smi::cast(JSArray::cast(this)->length())->value();
+ break;
+ }
+- // Fall through if packing is not guaranteed.
++ // intentional fallthrough
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ *capacity = elms->length();
+diff -up v8-3.14.5.10/src/objects.h.gcc7 v8-3.14.5.10/src/objects.h
+--- v8-3.14.5.10/src/objects.h.gcc7 2017-02-28 16:55:25.517045885 -0500
++++ v8-3.14.5.10/src/objects.h 2017-02-28 16:55:25.556044964 -0500
+@@ -2785,24 +2785,10 @@ class HashTable: public FixedArray {
+ USE_CUSTOM_MINIMUM_CAPACITY
+ };
+
+- // Wrapper methods
+- inline uint32_t Hash(Key key) {
+- if (Shape::UsesSeed) {
+- return Shape::SeededHash(key,
+- GetHeap()->HashSeed());
+- } else {
+- return Shape::Hash(key);
+- }
+- }
+-
+- inline uint32_t HashForObject(Key key, Object* object) {
+- if (Shape::UsesSeed) {
+- return Shape::SeededHashForObject(key,
+- GetHeap()->HashSeed(), object);
+- } else {
+- return Shape::HashForObject(key, object);
+- }
+- }
++ // Wrapper methods. Defined in src/objects-inl.h
++ // to break a cycle with src/heap/heap.h.
++ inline uint32_t Hash(Key key);
++ inline uint32_t HashForObject(Key key, Object* object);
+
+ // Returns the number of elements in the hash table.
+ int NumberOfElements() {
+diff -up v8-3.14.5.10/src/objects-inl.h.gcc7 v8-3.14.5.10/src/objects-inl.h
+--- v8-3.14.5.10/src/objects-inl.h.gcc7 2017-02-28 16:55:25.517045885 -0500
++++ v8-3.14.5.10/src/objects-inl.h 2017-02-28 16:55:25.556044964 -0500
+@@ -52,6 +52,26 @@
+ namespace v8 {
+ namespace internal {
+
++template<typename Shape, typename Key>
++uint32_t HashTable<Shape, Key>::Hash(Key key) {
++ if (Shape::UsesSeed) {
++ return Shape::SeededHash(key,
++ GetHeap()->HashSeed());
++ } else {
++ return Shape::Hash(key);
++ }
++}
++
++template<typename Shape, typename Key>
++uint32_t HashTable<Shape, Key>::HashForObject(Key key, Object* object) {
++ if (Shape::UsesSeed) {
++ return Shape::SeededHashForObject(key,
++ GetHeap()->HashSeed(), object);
++ } else {
++ return Shape::HashForObject(key, object);
++ }
++}
++
+ PropertyDetails::PropertyDetails(Smi* smi) {
+ value_ = smi->value();
+ }
+diff -up v8-3.14.5.10/src/parser.cc.gcc7 v8-3.14.5.10/src/parser.cc
+--- v8-3.14.5.10/src/parser.cc.gcc7 2017-02-28 16:55:25.450047466 -0500
++++ v8-3.14.5.10/src/parser.cc 2017-02-28 16:55:25.557044941 -0500
+@@ -3649,8 +3649,7 @@ Expression* Parser::ParsePrimaryExpressi
+ result = ParseV8Intrinsic(CHECK_OK);
+ break;
+ }
+- // If we're not allowing special syntax we fall-through to the
+- // default case.
++ // intentional fallthrough
+
+ default: {
+ Token::Value tok = Next();
+@@ -5376,8 +5375,8 @@ RegExpTree* RegExpParser::ParseDisjuncti
+ if (ParseIntervalQuantifier(&dummy, &dummy)) {
+ ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
+ }
+- // fallthrough
+ }
++ // intentional fallthrough
+ default:
+ builder->AddCharacter(current());
+ Advance();
+diff -up v8-3.14.5.10/src/spaces.h.gcc7 v8-3.14.5.10/src/spaces.h
+--- v8-3.14.5.10/src/spaces.h.gcc7 2012-10-22 09:09:53.000000000 -0400
++++ v8-3.14.5.10/src/spaces.h 2017-02-28 16:55:25.557044941 -0500
+@@ -2613,15 +2613,15 @@ class PointerChunkIterator BASE_EMBEDDED
+ return old_pointer_iterator_.next();
+ }
+ state_ = kMapState;
+- // Fall through.
+ }
++ // intentional fallthrough
+ case kMapState: {
+ if (map_iterator_.has_next()) {
+ return map_iterator_.next();
+ }
+ state_ = kLargeObjectState;
+- // Fall through.
+ }
++ // intentional fallthrough
+ case kLargeObjectState: {
+ HeapObject* heap_object;
+ do {
+diff -up v8-3.14.5.10/src/x64/assembler-x64.cc.gcc7 v8-3.14.5.10/src/x64/assembler-x64.cc
+--- v8-3.14.5.10/src/x64/assembler-x64.cc.gcc7 2017-03-01 10:19:40.086088012 -0500
++++ v8-3.14.5.10/src/x64/assembler-x64.cc 2017-03-01 10:20:51.859241627 -0500
+@@ -1800,6 +1800,7 @@ void Assembler::Nop(int n) {
+ switch (n) {
+ case 2:
+ emit(0x66);
++ // intentional fallthrough
+ case 1:
+ emit(0x90);
+ return;
+@@ -1816,6 +1817,7 @@ void Assembler::Nop(int n) {
+ return;
+ case 6:
+ emit(0x66);
++ // intentional fallthrough
+ case 5:
+ emit(0x0f);
+ emit(0x1f);
+@@ -1836,12 +1838,15 @@ void Assembler::Nop(int n) {
+ case 11:
+ emit(0x66);
+ n--;
++ // intentional fallthrough
+ case 10:
+ emit(0x66);
+ n--;
++ // intentional fallthrough
+ case 9:
+ emit(0x66);
+ n--;
++ // intentional fallthrough
+ case 8:
+ emit(0x0f);
+ emit(0x1f);
diff --git a/nodejsREPLACE_INVALID_UTF8.patch b/nodejsREPLACE_INVALID_UTF8.patch
new file mode 100644
index 000000000000..4f1337ca7b23
--- /dev/null
+++ b/nodejsREPLACE_INVALID_UTF8.patch
@@ -0,0 +1,18 @@
+Description: nodejs 0.10.29 adds this option and its addons (node-nan)
+ expects REPLACE_INVALID_UTF8 to exist - or else it defines it to be 0.
+ Simplify its job by just setting it to 0 too.
+Author: Jérémy Lal <kapouer@melix.org>
+Last-Update: 2015-08-18
+Forwarded: not-needed
+--- a/include/v8.h
++++ b/include/v8.h
+@@ -1076,7 +1076,8 @@
+ NO_OPTIONS = 0,
+ HINT_MANY_WRITES_EXPECTED = 1,
+ NO_NULL_TERMINATION = 2,
+- PRESERVE_ASCII_NULL = 4
++ PRESERVE_ASCII_NULL = 4,
++ REPLACE_INVALID_UTF8 = 0
+ };
+
+ // 16-bit character codes.
diff --git a/series b/series
new file mode 100644
index 000000000000..4b2f97c65d04
--- /dev/null
+++ b/series
@@ -0,0 +1,19 @@
+0001_kfreebsd.patch
+0002_mips.patch
+0002_mips_r15102_backport.patch
+0002_mips_r19121_backport.patch
+0003_armv4t_disable_vfp.patch
+0004_hurd.patch
+0008_mksnapshot_stdout.patch
+0011_use_system_gyp.patch
+0012_loongson_force_cache_flush.patch
+0013_gcc_48_compat.patch
+0014_cve_2013_6639_6640.patch
+0015-Backport-Utils-ApiCheck.patch
+0016-remove-this-null.patch
+0017_increase_stack_size_for_test.patch
+0099_powerpc_support.patch
+fix_CVE-2014-5256.patch
+nodejsREPLACE_INVALID_UTF8.patch
+strict_overflow.patch
+dont-assume-hardfloat-means-vfpv3.diff
diff --git a/strict_overflow.patch b/strict_overflow.patch
new file mode 100644
index 000000000000..b50cce5c5637
--- /dev/null
+++ b/strict_overflow.patch
@@ -0,0 +1,15 @@
+Description: silence strict-overflow error when building with gcc5
+Forwarded: not-yet
+Author: Jérémy Lal <kapoure@melix.org>
+Last-Update: 2015-08-18
+--- a/src/bignum.cc
++++ b/src/bignum.cc
+@@ -105,7 +105,7 @@
+ const int kMaxUint64DecimalDigits = 19;
+ Zero();
+ int length = value.length();
+- int pos = 0;
++ uint pos = 0;
+ // Let's just say that each digit needs 4 bits.
+ while (length >= kMaxUint64DecimalDigits) {
+ uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);