Description: hurd-i386 support https://bugs.debian.org/747246 Author: Svante Signell Acked-By: Jérémy Lal Last-Update: 2014-05-06 --- src/platform-gnu.cc | 890 ++++++++++++++++++++++++++++++++++++++++++++++++++++ tools/gyp/v8.gyp | 11 + 2 files changed, 901 insertions(+) create mode 100644 src/platform-gnu.cc --- /dev/null +++ b/src/platform-gnu.cc @@ -0,0 +1,890 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Platform specific code for GNU/Hurd goes here. For the POSIX comaptible parts +// the implementation is in platform-posix.cc. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include // mmap & munmap +#include // mmap & munmap +#include // open +#include // open +#include // getpagesize +// If you don't have execinfo.h then you need devel/libexecinfo from ports. +#include // backtrace, backtrace_symbols +#include // index +#include +#include +#include + +#undef MAP_TYPE + +#include "v8.h" +#include "v8threads.h" + +#include "platform-posix.h" +#include "platform.h" +#include "vm-state-inl.h" + +#ifndef MAP_NORESERVE +#define MAP_NORESERVE 0 +#endif + +namespace v8 { +namespace internal { + +// 0 is never a valid thread id on GNU/Hurd +static const pthread_t kNoThread = (pthread_t) 0; + + +double ceiling(double x) { + return ceil(x); +} + + +static Mutex* limit_mutex = NULL; + + +void OS::PostSetUp() { + POSIXPostSetUp(); +} + + +void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { + __asm__ __volatile__("" : : : "memory"); + *ptr = value; +} + + +uint64_t OS::CpuFeaturesImpliedByPlatform() { + return 0; // GNU/Hurd runs on anything. +} + + +int OS::ActivationFrameAlignment() { + // 16 byte alignment on GNU/Hurd + return 16; +} + + +const char* OS::LocalTimezone(double time) { + if (isnan(time)) return ""; + time_t tv = static_cast(floor(time/msPerSecond)); + struct tm* t = localtime(&tv); + if (NULL == t) return ""; + return t->tm_zone; +} + + +double OS::LocalTimeOffset() { + time_t tv = time(NULL); + struct tm* t = localtime(&tv); + // tm_gmtoff includes any daylight savings offset, so subtract it. + return static_cast(t->tm_gmtoff * msPerSecond - + (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); +} + + +// We keep the lowest and highest addresses mapped as a quick way of +// determining that pointers are outside the heap (used mostly in assertions +// and verification). The estimate is conservative, i.e., not all addresses in +// 'allocated' space are actually allocated to our heap. The range is +// [lowest, highest), inclusive on the low and and exclusive on the high end. +static void* lowest_ever_allocated = reinterpret_cast(-1); +static void* highest_ever_allocated = reinterpret_cast(0); + + +static void UpdateAllocatedSpaceLimits(void* address, int size) { + ASSERT(limit_mutex != NULL); + ScopedLock lock(limit_mutex); + + lowest_ever_allocated = Min(lowest_ever_allocated, address); + highest_ever_allocated = + Max(highest_ever_allocated, + reinterpret_cast(reinterpret_cast(address) + size)); +} + + +bool OS::IsOutsideAllocatedSpace(void* address) { + return address < lowest_ever_allocated || address >= highest_ever_allocated; +} + + +size_t OS::AllocateAlignment() { + return getpagesize(); +} + + +void* OS::Allocate(const size_t requested, + size_t* allocated, + bool executable) { + const size_t msize = RoundUp(requested, getpagesize()); + int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); + void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0); + + if (mbase == MAP_FAILED) { + LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed")); + return NULL; + } + *allocated = msize; + UpdateAllocatedSpaceLimits(mbase, msize); + return mbase; +} + + +void OS::Free(void* buf, const size_t length) { + // TODO(1240712): munmap has a return value which is ignored here. + int result = munmap(buf, length); + USE(result); + ASSERT(result == 0); +} + + +void OS::Sleep(int milliseconds) { + unsigned int ms = static_cast(milliseconds); + usleep(1000 * ms); +} + + +void OS::Abort() { + // Redirect to std abort to signal abnormal program termination. + abort(); +} + + +void OS::DebugBreak() { +#if (defined(__arm__) || defined(__thumb__)) +# if defined(CAN_USE_ARMV5_INSTRUCTIONS) + asm("bkpt 0"); +# endif +#else + asm("int $3"); +#endif +} + + +class PosixMemoryMappedFile : public OS::MemoryMappedFile { + public: + PosixMemoryMappedFile(FILE* file, void* memory, int size) + : file_(file), memory_(memory), size_(size) { } + virtual ~PosixMemoryMappedFile(); + virtual void* memory() { return memory_; } + virtual int size() { return size_; } + private: + FILE* file_; + void* memory_; + int size_; +}; + + +OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { + FILE* file = fopen(name, "r+"); + if (file == NULL) return NULL; + + fseek(file, 0, SEEK_END); + int size = ftell(file); + + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, + void* initial) { + FILE* file = fopen(name, "w+"); + if (file == NULL) return NULL; + int result = fwrite(initial, size, 1, file); + if (result < 1) { + fclose(file); + return NULL; + } + void* memory = + mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + return new PosixMemoryMappedFile(file, memory, size); +} + + +PosixMemoryMappedFile::~PosixMemoryMappedFile() { + if (memory_) munmap(memory_, size_); + fclose(file_); +} + + +static unsigned StringToLong(char* buffer) { + return static_cast(strtol(buffer, NULL, 16)); // NOLINT +} + + +void OS::LogSharedLibraryAddresses() { + static const int MAP_LENGTH = 1024; + int fd = open("/proc/self/maps", O_RDONLY); + if (fd < 0) return; + while (true) { + char addr_buffer[11]; + addr_buffer[0] = '0'; + addr_buffer[1] = 'x'; + addr_buffer[10] = 0; + int result = read(fd, addr_buffer + 2, 8); + if (result < 8) break; + unsigned start = StringToLong(addr_buffer); + result = read(fd, addr_buffer + 2, 1); + if (result < 1) break; + if (addr_buffer[2] != '-') break; + result = read(fd, addr_buffer + 2, 8); + if (result < 8) break; + unsigned end = StringToLong(addr_buffer); + char buffer[MAP_LENGTH]; + int bytes_read = -1; + do { + bytes_read++; + if (bytes_read >= MAP_LENGTH - 1) + break; + result = read(fd, buffer + bytes_read, 1); + if (result < 1) break; + } while (buffer[bytes_read] != '\n'); + buffer[bytes_read] = 0; + // Ignore mappings that are not executable. + if (buffer[3] != 'x') continue; + char* start_of_path = index(buffer, '/'); + // There may be no filename in this line. Skip to next. + if (start_of_path == NULL) continue; + buffer[bytes_read] = 0; + LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end)); + } + close(fd); +} + + +void OS::SignalCodeMovingGC() { +} + + +int OS::StackWalk(Vector frames) { + int frames_size = frames.length(); + ScopedVector addresses(frames_size); + + int frames_count = backtrace(addresses.start(), frames_size); + + char** symbols = backtrace_symbols(addresses.start(), frames_count); + if (symbols == NULL) { + return kStackWalkError; + } + + for (int i = 0; i < frames_count; i++) { + frames[i].address = addresses[i]; + // Format a text representation of the frame based on the information + // available. + SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen), + "%s", + symbols[i]); + // Make sure line termination is in place. + frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; + } + + free(symbols); + + return frames_count; +} + + +// Constants used for mmap. +static const int kMmapFd = -1; +static const int kMmapFdOffset = 0; + +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + +VirtualMemory::VirtualMemory(size_t size) { + address_ = ReserveRegion(size); + size_ = size; +} + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast
(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast(aligned_base); + size_ = aligned_size; +} + + +VirtualMemory::~VirtualMemory() { + if (IsReserved()) { + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); + } +} + + +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +bool VirtualMemory::Guard(void* address) { + OS::Guard(address, OS::CommitPageSize()); + return true; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { + int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); + if (MAP_FAILED == mmap(base, + size, + prot, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, + kMmapFd, + kMmapFdOffset)) { + return false; + } + + UpdateAllocatedSpaceLimits(base, size); + return true; +} + + +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; +} + + +class Thread::PlatformData : public Malloced { + public: + pthread_t thread_; // Thread handle for pthread. +}; + + +Thread::Thread(const Options& options) + : data_(new PlatformData), + stack_size_(options.stack_size()) { + set_name(options.name()); +} + + +Thread::~Thread() { + delete data_; +} + + +static void* ThreadEntry(void* arg) { + Thread* thread = reinterpret_cast(arg); + // This is also initialized by the first argument to pthread_create() but we + // don't know which thread will run first (the original thread or the new + // one) so we initialize it here too. + thread->data()->thread_ = pthread_self(); + ASSERT(thread->data()->thread_ != kNoThread); + thread->Run(); + return NULL; +} + + +void Thread::set_name(const char* name) { + strncpy(name_, name, sizeof(name_)); + name_[sizeof(name_) - 1] = '\0'; +} + + +void Thread::Start() { + pthread_attr_t* attr_ptr = NULL; + pthread_attr_t attr; + if (stack_size_ > 0) { + pthread_attr_init(&attr); + pthread_attr_setstacksize(&attr, static_cast(stack_size_)); + attr_ptr = &attr; + } + pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this); + ASSERT(data_->thread_ != kNoThread); +} + + +void Thread::Join() { + pthread_join(data_->thread_, NULL); +} + + +Thread::LocalStorageKey Thread::CreateThreadLocalKey() { + pthread_key_t key; + int result = pthread_key_create(&key, NULL); + USE(result); + ASSERT(result == 0); + return static_cast(key); +} + + +void Thread::DeleteThreadLocalKey(LocalStorageKey key) { + pthread_key_t pthread_key = static_cast(key); + int result = pthread_key_delete(pthread_key); + USE(result); + ASSERT(result == 0); +} + + +void* Thread::GetThreadLocal(LocalStorageKey key) { + pthread_key_t pthread_key = static_cast(key); + return pthread_getspecific(pthread_key); +} + + +void Thread::SetThreadLocal(LocalStorageKey key, void* value) { + pthread_key_t pthread_key = static_cast(key); + pthread_setspecific(pthread_key, value); +} + + +void Thread::YieldCPU() { + sched_yield(); +} + + +class GNUMutex : public Mutex { + public: + GNUMutex() { + pthread_mutexattr_t attrs; + int result = pthread_mutexattr_init(&attrs); + ASSERT(result == 0); + result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); + ASSERT(result == 0); + result = pthread_mutex_init(&mutex_, &attrs); + ASSERT(result == 0); + USE(result); + } + + virtual ~GNUMutex() { pthread_mutex_destroy(&mutex_); } + + virtual int Lock() { + int result = pthread_mutex_lock(&mutex_); + return result; + } + + virtual int Unlock() { + int result = pthread_mutex_unlock(&mutex_); + return result; + } + + virtual bool TryLock() { + int result = pthread_mutex_trylock(&mutex_); + // Return false if the lock is busy and locking failed. + if (result == EBUSY) { + return false; + } + ASSERT(result == 0); // Verify no other errors. + return true; + } + + private: + pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. +}; + + +Mutex* OS::CreateMutex() { + return new GNUMutex(); +} + + +class GNUSemaphore : public Semaphore { + public: + explicit GNUSemaphore(int count) { sem_init(&sem_, 0, count); } + virtual ~GNUSemaphore() { sem_destroy(&sem_); } + + virtual void Wait(); + virtual bool Wait(int timeout); + virtual void Signal() { sem_post(&sem_); } + private: + sem_t sem_; +}; + + +void GNUSemaphore::Wait() { + while (true) { + int result = sem_wait(&sem_); + if (result == 0) return; // Successfully got semaphore. + CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. + } +} + + +bool GNUSemaphore::Wait(int timeout) { + const long kOneSecondMicros = 1000000; // NOLINT + + // Split timeout into second and nanosecond parts. + struct timeval delta; + delta.tv_usec = timeout % kOneSecondMicros; + delta.tv_sec = timeout / kOneSecondMicros; + + struct timeval current_time; + // Get the current time. + if (gettimeofday(¤t_time, NULL) == -1) { + return false; + } + + // Calculate time for end of timeout. + struct timeval end_time; + timeradd(¤t_time, &delta, &end_time); + + struct timespec ts; + TIMEVAL_TO_TIMESPEC(&end_time, &ts); + while (true) { + int result = sem_timedwait(&sem_, &ts); + if (result == 0) return true; // Successfully got semaphore. + if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. + CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. + } +} + + +Semaphore* OS::CreateSemaphore(int count) { + return new GNUSemaphore(count); +} + + +static pthread_t GetThreadID() { + pthread_t thread_id = pthread_self(); + return thread_id; +} + + +class Sampler::PlatformData : public Malloced { + public: + PlatformData() : vm_tid_(GetThreadID()) {} + + pthread_t vm_tid() const { return vm_tid_; } + + private: + pthread_t vm_tid_; +}; + + +static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { + USE(info); + if (signal != SIGPROF) return; + Isolate* isolate = Isolate::UncheckedCurrent(); + if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { + // We require a fully initialized and entered isolate. + return; + } + if (v8::Locker::IsActive() && + !isolate->thread_manager()->IsLockedByCurrentThread()) { + return; + } + + Sampler* sampler = isolate->logger()->sampler(); + if (sampler == NULL || !sampler->IsActive()) return; + + TickSample sample_obj; + TickSample* sample = CpuProfiler::TickSampleEvent(isolate); + if (sample == NULL) sample = &sample_obj; + + // Extracting the sample from the context is extremely machine dependent. + ucontext_t* ucontext = reinterpret_cast(context); + mcontext_t& mcontext = ucontext->uc_mcontext; + sample->state = isolate->current_vm_state(); +#if V8_HOST_ARCH_IA32 + sample->pc = reinterpret_cast
(mcontext.gregs[REG_EIP]); + sample->sp = reinterpret_cast
(mcontext.gregs[REG_ESP]); + sample->fp = reinterpret_cast
(mcontext.gregs[REG_EBP]); +#endif + sampler->SampleStack(sample); + sampler->Tick(sample); +} + + +class SignalSender : public Thread { + public: + enum SleepInterval { + HALF_INTERVAL, + FULL_INTERVAL + }; + + static const int kSignalSenderStackSize = 64 * KB; + + explicit SignalSender(int interval) + : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), + interval_(interval) {} + + static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); } + static void TearDown() { delete mutex_; } + + static void AddActiveSampler(Sampler* sampler) { + ScopedLock lock(mutex_); + SamplerRegistry::AddActiveSampler(sampler); + if (instance_ == NULL) { + // Install a signal handler. + struct sigaction sa; + sa.sa_sigaction = ProfilerSignalHandler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_RESTART | SA_SIGINFO; + signal_handler_installed_ = + (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); + + // Start a thread that sends SIGPROF signal to VM threads. + instance_ = new SignalSender(sampler->interval()); + instance_->Start(); + } else { + ASSERT(instance_->interval_ == sampler->interval()); + } + } + + static void RemoveActiveSampler(Sampler* sampler) { + ScopedLock lock(mutex_); + SamplerRegistry::RemoveActiveSampler(sampler); + if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { + RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_); + delete instance_; + instance_ = NULL; + + // Restore the old signal handler. + if (signal_handler_installed_) { + sigaction(SIGPROF, &old_signal_handler_, 0); + signal_handler_installed_ = false; + } + } + } + + // Implement Thread::Run(). + virtual void Run() { + SamplerRegistry::State state; + while ((state = SamplerRegistry::GetState()) != + SamplerRegistry::HAS_NO_SAMPLERS) { + bool cpu_profiling_enabled = + (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); + bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); + // When CPU profiling is enabled both JavaScript and C++ code is + // profiled. We must not suspend. + if (!cpu_profiling_enabled) { + if (rate_limiter_.SuspendIfNecessary()) continue; + } + if (cpu_profiling_enabled && runtime_profiler_enabled) { + if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { + return; + } + Sleep(HALF_INTERVAL); + if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { + return; + } + Sleep(HALF_INTERVAL); + } else { + if (cpu_profiling_enabled) { + if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, + this)) { + return; + } + } + if (runtime_profiler_enabled) { + if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, + NULL)) { + return; + } + } + Sleep(FULL_INTERVAL); + } + } + } + + static void DoCpuProfile(Sampler* sampler, void* raw_sender) { + if (!sampler->IsProfiling()) return; + SignalSender* sender = reinterpret_cast(raw_sender); + sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); + } + + static void DoRuntimeProfile(Sampler* sampler, void* ignored) { + if (!sampler->isolate()->IsInitialized()) return; + sampler->isolate()->runtime_profiler()->NotifyTick(); + } + + void SendProfilingSignal(pthread_t tid) { + if (!signal_handler_installed_) return; + pthread_kill(tid, SIGPROF); + } + + void Sleep(SleepInterval full_or_half) { + // Convert ms to us and subtract 100 us to compensate delays + // occuring during signal delivery. + useconds_t interval = interval_ * 1000 - 100; + if (full_or_half == HALF_INTERVAL) interval /= 2; + int result = usleep(interval); +#ifdef DEBUG + if (result != 0 && errno != EINTR) { + fprintf(stderr, + "SignalSender usleep error; interval = %u, errno = %d\n", + interval, + errno); + ASSERT(result == 0 || errno == EINTR); + } +#endif + USE(result); + } + + const int interval_; + RuntimeProfilerRateLimiter rate_limiter_; + + // Protects the process wide state below. + static Mutex* mutex_; + static SignalSender* instance_; + static bool signal_handler_installed_; + static struct sigaction old_signal_handler_; + + private: + DISALLOW_COPY_AND_ASSIGN(SignalSender); +}; + +Mutex* SignalSender::mutex_ = NULL; +SignalSender* SignalSender::instance_ = NULL; +struct sigaction SignalSender::old_signal_handler_; +bool SignalSender::signal_handler_installed_ = false; + + +void OS::SetUp() { + // Seed the random number generator. + // Convert the current time to a 64-bit integer first, before converting it + // to an unsigned. Going directly can cause an overflow and the seed to be + // set to all ones. The seed will be identical for different instances that + // call this setup code within the same millisecond. + uint64_t seed = static_cast(TimeCurrentMillis()); + srandom(static_cast(seed)); + limit_mutex = CreateMutex(); + SignalSender::SetUp(); +} + + +void OS::TearDown() { + SignalSender::TearDown(); + delete limit_mutex; +} + + +Sampler::Sampler(Isolate* isolate, int interval) + : isolate_(isolate), + interval_(interval), + profiling_(false), + active_(false), + samples_taken_(0) { + data_ = new PlatformData; +} + + +Sampler::~Sampler() { + ASSERT(!IsActive()); + delete data_; +} + + +void Sampler::Start() { + ASSERT(!IsActive()); + SetActive(true); + SignalSender::AddActiveSampler(this); +} + + +void Sampler::Stop() { + ASSERT(IsActive()); + SignalSender::RemoveActiveSampler(this); + SetActive(false); +} + + +} } // namespace v8::internal --- a/tools/gyp/v8.gyp +++ b/tools/gyp/v8.gyp @@ -723,6 +723,17 @@ '../../src/platform-posix.cc' ]}, ], + ['OS=="hurd"', { + 'link_settings': { + 'libraries': [ + '-lpthread', + ]}, + 'sources': [ + '../../src/platform-gnu.cc', + '../../src/platform-posix.cc' + ], + } + ], ['OS=="win"', { 'sources': [ '../../src/platform-win32.cc',