os_solaris.cpp 216.1 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31
// no precompiled headers
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
32
#include "compiler/disassembler.hpp"
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
#include "interpreter/interpreter.hpp"
#include "jvm_solaris.h"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "mutex_solaris.inline.hpp"
#include "oops/oop.inline.hpp"
#include "os_share_solaris.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
51
#include "runtime/orderAccess.inline.hpp"
52 53 54 55 56
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/statSampler.hpp"
#include "runtime/stubRoutines.hpp"
57
#include "runtime/thread.inline.hpp"
58 59 60
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
61
#include "services/memTracker.hpp"
62
#include "services/runtimeService.hpp"
63
#include "utilities/decoder.hpp"
64 65 66 67
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
D
duke 已提交
68 69 70 71

// put OS-includes here
# include <dlfcn.h>
# include <errno.h>
72
# include <exception>
D
duke 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
# include <link.h>
# include <poll.h>
# include <pthread.h>
# include <pwd.h>
# include <schedctl.h>
# include <setjmp.h>
# include <signal.h>
# include <stdio.h>
# include <alloca.h>
# include <sys/filio.h>
# include <sys/ipc.h>
# include <sys/lwp.h>
# include <sys/machelf.h>     // for elf Sym structure used by dladdr1
# include <sys/mman.h>
# include <sys/processor.h>
# include <sys/procset.h>
# include <sys/pset.h>
# include <sys/resource.h>
# include <sys/shm.h>
# include <sys/socket.h>
# include <sys/stat.h>
# include <sys/systeminfo.h>
# include <sys/time.h>
# include <sys/times.h>
# include <sys/types.h>
# include <sys/wait.h>
# include <sys/utsname.h>
# include <thread.h>
# include <unistd.h>
# include <sys/priocntl.h>
# include <sys/rtpriocntl.h>
# include <sys/tspriocntl.h>
# include <sys/iapriocntl.h>
106
# include <sys/fxpriocntl.h>
D
duke 已提交
107 108
# include <sys/loadavg.h>
# include <string.h>
109
# include <stdio.h>
D
duke 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129

# define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
# include <sys/procfs.h>     //  see comment in <sys/procfs.h>

#define MAX_PATH (2 * K)

// for timer info max values which include all bits
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)


// Here are some liblgrp types from sys/lgrp_user.h to be able to
// compile on older systems without this header file.

#ifndef MADV_ACCESS_LWP
# define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
#endif
#ifndef MADV_ACCESS_MANY
# define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
#endif

130 131 132 133 134 135 136
#ifndef LGRP_RSRC_CPU
# define LGRP_RSRC_CPU           0       /* CPU resources */
#endif
#ifndef LGRP_RSRC_MEM
# define LGRP_RSRC_MEM           1       /* memory resources */
#endif

D
duke 已提交
137 138 139 140 141 142
// see thr_setprio(3T) for the basis of these numbers
#define MinimumPriority 0
#define NormalPriority  64
#define MaximumPriority 127

// Values for ThreadPriorityPolicy == 1
143 144 145
int prio_policy1[CriticalPriority+1] = {
  -99999,  0, 16,  32,  48,  64,
          80, 96, 112, 124, 127, 127 };
D
duke 已提交
146 147 148 149

// System parameters used internally
static clock_t clock_tics_per_sec = 100;

150 151 152
// Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
static bool enabled_extended_FILE_stdio = false;

D
duke 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
// For diagnostics to print a message once. see run_periodic_checks
static bool check_addr0_done = false;
static sigset_t check_signal_done;
static bool check_signals = true;

address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo

address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround


// "default" initializers for missing libc APIs
extern "C" {
  static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
  static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }

  static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
  static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
}

// "default" initializers for pthread-based synchronization
extern "C" {
  static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
  static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
}

S
sla 已提交
179 180
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);

D
duke 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static inline size_t adjust_stack_size(address base, size_t size) {
  if ((ssize_t)size < 0) {
    // 4759953: Compensate for ridiculous stack size.
    size = max_intx;
  }
  if (size > (size_t)base) {
    // 4812466: Make sure size doesn't allow the stack to wrap the address space.
    size = (size_t)base;
  }
  return size;
}

static inline stack_t get_stack_info() {
  stack_t st;
  int retval = thr_stksegment(&st);
  st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
  assert(retval == 0, "incorrect return value from thr_stksegment");
  assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
  assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
  return st;
}

address os::current_stack_base() {
  int r = thr_main() ;
  guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
  bool is_primordial_thread = r;

  // Workaround 4352906, avoid calls to thr_stksegment by
  // thr_main after the first one (it looks like we trash
  // some data, causing the value for ss_sp to be incorrect).
  if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
    stack_t st = get_stack_info();
    if (is_primordial_thread) {
      // cache initial value of stack base
      os::Solaris::_main_stack_base = (address)st.ss_sp;
    }
    return (address)st.ss_sp;
  } else {
    guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
    return os::Solaris::_main_stack_base;
  }
}

size_t os::current_stack_size() {
  size_t size;

  int r = thr_main() ;
  guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
  if(!r) {
    size = get_stack_info().ss_size;
  } else {
    struct rlimit limits;
    getrlimit(RLIMIT_STACK, &limits);
    size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
  }
  // base may not be page aligned
  address base = current_stack_base();
  address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
  return (size_t)(base - bottom);
}

242 243 244 245
struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
  return localtime_r(clock, res);
}

D
duke 已提交
246 247 248 249 250 251 252
// interruptible infrastructure

// setup_interruptible saves the thread state before going into an
// interruptible system call.
// The saved state is used to restore the thread to
// its former state whether or not an interrupt is received.
// Used by classloader os::read
253
// os::restartable_read calls skip this layer and stay in _thread_in_native
D
duke 已提交
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353

void os::Solaris::setup_interruptible(JavaThread* thread) {

  JavaThreadState thread_state = thread->thread_state();

  assert(thread_state != _thread_blocked, "Coming from the wrong thread");
  assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
  OSThread* osthread = thread->osthread();
  osthread->set_saved_interrupt_thread_state(thread_state);
  thread->frame_anchor()->make_walkable(thread);
  ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
}

// Version of setup_interruptible() for threads that are already in
// _thread_blocked. Used by os_sleep().
void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
  thread->frame_anchor()->make_walkable(thread);
}

JavaThread* os::Solaris::setup_interruptible() {
  JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
  setup_interruptible(thread);
  return thread;
}

void os::Solaris::try_enable_extended_io() {
  typedef int (*enable_extended_FILE_stdio_t)(int, int);

  if (!UseExtendedFileIO) {
    return;
  }

  enable_extended_FILE_stdio_t enabler =
    (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
                                         "enable_extended_FILE_stdio");
  if (enabler) {
    enabler(-1, -1);
  }
}


#ifdef ASSERT

JavaThread* os::Solaris::setup_interruptible_native() {
  JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
  JavaThreadState thread_state = thread->thread_state();
  assert(thread_state == _thread_in_native, "Assumed thread_in_native");
  return thread;
}

void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
  JavaThreadState thread_state = thread->thread_state();
  assert(thread_state == _thread_in_native, "Assumed thread_in_native");
}
#endif

// cleanup_interruptible reverses the effects of setup_interruptible
// setup_interruptible_already_blocked() does not need any cleanup.

void os::Solaris::cleanup_interruptible(JavaThread* thread) {
  OSThread* osthread = thread->osthread();

  ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
}

// I/O interruption related counters called in _INTERRUPTIBLE

void os::Solaris::bump_interrupted_before_count() {
  RuntimeService::record_interrupted_before_count();
}

void os::Solaris::bump_interrupted_during_count() {
  RuntimeService::record_interrupted_during_count();
}

static int _processors_online = 0;

         jint os::Solaris::_os_thread_limit = 0;
volatile jint os::Solaris::_os_thread_count = 0;

julong os::available_memory() {
  return Solaris::available_memory();
}

julong os::Solaris::available_memory() {
  return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
}

julong os::Solaris::_physical_memory = 0;

julong os::physical_memory() {
   return Solaris::physical_memory();
}

static hrtime_t first_hrtime = 0;
static const hrtime_t hrtime_hz = 1000*1000*1000;
static volatile hrtime_t max_hrtime = 0;


void os::Solaris::initialize_system_info() {
354
  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
D
duke 已提交
355 356 357 358 359 360 361 362
  _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
  _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
}

int os::active_processor_count() {
  int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  pid_t pid = getpid();
  psetid_t pset = PS_NONE;
363
  // Are we running in a processor set or is there any processor set around?
D
duke 已提交
364
  if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
365 366 367 368 369 370
    uint_t pset_cpus;
    // Query the number of cpus available to us.
    if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
      assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
      _processors_online = pset_cpus;
      return pset_cpus;
D
duke 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383
    }
  }
  // Otherwise return number of online cpus
  return online_cpus;
}

static bool find_processors_in_pset(psetid_t        pset,
                                    processorid_t** id_array,
                                    uint_t*         id_length) {
  bool result = false;
  // Find the number of processors in the processor set.
  if (pset_info(pset, NULL, id_length, NULL) == 0) {
    // Make up an array to hold their ids.
Z
zgu 已提交
384
    *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
D
duke 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
    // Fill in the array with their processor ids.
    if (pset_info(pset, NULL, id_length, *id_array) == 0) {
      result = true;
    }
  }
  return result;
}

// Callers of find_processors_online() must tolerate imprecise results --
// the system configuration can change asynchronously because of DR
// or explicit psradm operations.
//
// We also need to take care that the loop (below) terminates as the
// number of processors online can change between the _SC_NPROCESSORS_ONLN
// request and the loop that builds the list of processor ids.   Unfortunately
// there's no reliable way to determine the maximum valid processor id,
// so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
// man pages, which claim the processor id set is "sparse, but
// not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
// exit the loop.
//
// In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
// not available on S8.0.

static bool find_processors_online(processorid_t** id_array,
                                   uint*           id_length) {
  const processorid_t MAX_PROCESSOR_ID = 100000 ;
  // Find the number of processors online.
  *id_length = sysconf(_SC_NPROCESSORS_ONLN);
  // Make up an array to hold their ids.
Z
zgu 已提交
415
  *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
D
duke 已提交
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
  // Processors need not be numbered consecutively.
  long found = 0;
  processorid_t next = 0;
  while (found < *id_length && next < MAX_PROCESSOR_ID) {
    processor_info_t info;
    if (processor_info(next, &info) == 0) {
      // NB, PI_NOINTR processors are effectively online ...
      if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
        (*id_array)[found] = next;
        found += 1;
      }
    }
    next += 1;
  }
  if (found < *id_length) {
      // The loop above didn't identify the expected number of processors.
      // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
      // and re-running the loop, above, but there's no guarantee of progress
      // if the system configuration is in flux.  Instead, we just return what
      // we've got.  Note that in the worst case find_processors_online() could
      // return an empty set.  (As a fall-back in the case of the empty set we
      // could just return the ID of the current processor).
      *id_length = found ;
  }

  return true;
}

static bool assign_distribution(processorid_t* id_array,
                                uint           id_length,
                                uint*          distribution,
                                uint           distribution_length) {
  // We assume we can assign processorid_t's to uint's.
  assert(sizeof(processorid_t) == sizeof(uint),
         "can't convert processorid_t to uint");
  // Quick check to see if we won't succeed.
  if (id_length < distribution_length) {
    return false;
  }
  // Assign processor ids to the distribution.
  // Try to shuffle processors to distribute work across boards,
  // assuming 4 processors per board.
  const uint processors_per_board = ProcessDistributionStride;
  // Find the maximum processor id.
  processorid_t max_id = 0;
  for (uint m = 0; m < id_length; m += 1) {
    max_id = MAX2(max_id, id_array[m]);
  }
  // The next id, to limit loops.
  const processorid_t limit_id = max_id + 1;
  // Make up markers for available processors.
Z
zgu 已提交
467
  bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
D
duke 已提交
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
  for (uint c = 0; c < limit_id; c += 1) {
    available_id[c] = false;
  }
  for (uint a = 0; a < id_length; a += 1) {
    available_id[id_array[a]] = true;
  }
  // Step by "boards", then by "slot", copying to "assigned".
  // NEEDS_CLEANUP: The assignment of processors should be stateful,
  //                remembering which processors have been assigned by
  //                previous calls, etc., so as to distribute several
  //                independent calls of this method.  What we'd like is
  //                It would be nice to have an API that let us ask
  //                how many processes are bound to a processor,
  //                but we don't have that, either.
  //                In the short term, "board" is static so that
  //                subsequent distributions don't all start at board 0.
  static uint board = 0;
  uint assigned = 0;
  // Until we've found enough processors ....
  while (assigned < distribution_length) {
    // ... find the next available processor in the board.
    for (uint slot = 0; slot < processors_per_board; slot += 1) {
      uint try_id = board * processors_per_board + slot;
      if ((try_id < limit_id) && (available_id[try_id] == true)) {
        distribution[assigned] = try_id;
        available_id[try_id] = false;
        assigned += 1;
        break;
      }
    }
    board += 1;
    if (board * processors_per_board + 0 >= limit_id) {
      board = 0;
    }
  }
  if (available_id != NULL) {
Z
zgu 已提交
504
    FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
D
duke 已提交
505 506 507 508
  }
  return true;
}

D
dcubed 已提交
509 510 511 512 513
void os::set_native_thread_name(const char *name) {
  // Not yet implemented.
  return;
}

D
duke 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
bool os::distribute_processes(uint length, uint* distribution) {
  bool result = false;
  // Find the processor id's of all the available CPUs.
  processorid_t* id_array  = NULL;
  uint           id_length = 0;
  // There are some races between querying information and using it,
  // since processor sets can change dynamically.
  psetid_t pset = PS_NONE;
  // Are we running in a processor set?
  if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
    result = find_processors_in_pset(pset, &id_array, &id_length);
  } else {
    result = find_processors_online(&id_array, &id_length);
  }
  if (result == true) {
    if (id_length >= length) {
      result = assign_distribution(id_array, id_length, distribution, length);
    } else {
      result = false;
    }
  }
  if (id_array != NULL) {
Z
zgu 已提交
536
    FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
D
duke 已提交
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
  }
  return result;
}

bool os::bind_to_processor(uint processor_id) {
  // We assume that a processorid_t can be stored in a uint.
  assert(sizeof(uint) == sizeof(processorid_t),
         "can't convert uint to processorid_t");
  int bind_result =
    processor_bind(P_LWPID,                       // bind LWP.
                   P_MYID,                        // bind current LWP.
                   (processorid_t) processor_id,  // id.
                   NULL);                         // don't return old binding.
  return (bind_result == 0);
}

bool os::getenv(const char* name, char* buffer, int len) {
  char* val = ::getenv( name );
  if ( val == NULL
  ||   strlen(val) + 1  >  len ) {
    if (len > 0)  buffer[0] = 0; // return a null string
    return false;
  }
  strcpy( buffer, val );
  return true;
}


// Return true if user is running as root.

bool os::have_special_privileges() {
  static bool init = false;
  static bool privileges = false;
  if (!init) {
    privileges = (getuid() != geteuid()) || (getgid() != getegid());
    init = true;
  }
  return privileges;
}


void os::init_system_properties_values() {
  // The next steps are taken in the product version:
  //
581
  // Obtain the JAVA_HOME value from the location of libjvm.so.
D
duke 已提交
582
  // This library should be located at:
583
  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
D
duke 已提交
584 585
  //
  // If "/jre/lib/" appears at the right place in the path, then we
586
  // assume libjvm.so is installed in a JDK and we use this path.
D
duke 已提交
587 588 589 590 591 592 593 594 595
  //
  // Otherwise exit with message: "Could not create the Java virtual machine."
  //
  // The following extra steps are taken in the debugging version:
  //
  // If "/jre/lib/" does NOT appear at the right place in the path
  // instead of exit check for $JAVA_HOME environment variable.
  //
  // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
596 597 598
  // then we append a fake suffix "hotspot/libjvm.so" to this path so
  // it looks like libjvm.so is installed there
  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
D
duke 已提交
599 600 601 602 603 604
  //
  // Otherwise exit.
  //
  // Important note: if the location of libjvm.so changes this
  // code needs to be changed accordingly.

605 606
// Base path of extensions installed on the system.
#define SYS_EXT_DIR     "/usr/jdk/packages"
D
duke 已提交
607 608 609
#define EXTENSIONS_DIR  "/lib/ext"
#define ENDORSED_DIR    "/lib/endorsed"

610 611 612 613 614 615 616 617 618 619 620 621
  char cpu_arch[12];
  // Buffer that fits several sprintfs.
  // Note that the space for the colon and the trailing null are provided
  // by the nulls included by the sizeof operator.
  const size_t bufsize =
    MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
         sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);

  // sysclasspath, java_home, dll_dir
D
duke 已提交
622
  {
623 624 625 626 627 628 629 630 631 632 633
    char *pslash;
    os::jvm_path(buf, bufsize);

    // Found the full path to libjvm.so.
    // Now cut the path to <java_home>/jre if we can.
    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
    pslash = strrchr(buf, '/');
    if (pslash != NULL) {
      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
    }
    Arguments::set_dll_dir(buf);
D
duke 已提交
634

635 636 637 638 639
    if (pslash != NULL) {
      pslash = strrchr(buf, '/');
      if (pslash != NULL) {
        *pslash = '\0';          // Get rid of /<arch>.
        pslash = strrchr(buf, '/');
D
duke 已提交
640
        if (pslash != NULL) {
641
          *pslash = '\0';        // Get rid of /lib.
D
duke 已提交
642
        }
643
      }
D
duke 已提交
644
    }
645 646 647
    Arguments::set_java_home(buf);
    set_boot_path('/', ':');
  }
D
duke 已提交
648

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
  // Where to look for native libraries.
  {
    // Use dlinfo() to determine the correct java.library.path.
    //
    // If we're launched by the Java launcher, and the user
    // does not set java.library.path explicitly on the commandline,
    // the Java launcher sets LD_LIBRARY_PATH for us and unsets
    // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
    // dlinfo returns LD_LIBRARY_PATH + crle settings (including
    // /usr/lib), which is exactly what we want.
    //
    // If the user does set java.library.path, it completely
    // overwrites this setting, and always has.
    //
    // If we're not launched by the Java launcher, we may
    // get here with any/all of the LD_LIBRARY_PATH[_32|64]
    // settings.  Again, dlinfo does exactly what we want.

    Dl_serinfo     info_sz, *info = &info_sz;
    Dl_serpath     *path;
    char           *library_path;
    char           *common_path = buf;

    // Determine search path count and required buffer size.
    if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
      FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
      vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
    }
D
duke 已提交
677

678 679 680 681
    // Allocate new buffer and initialize.
    info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
    info->dls_size = info_sz.dls_size;
    info->dls_cnt = info_sz.dls_cnt;
D
duke 已提交
682

683 684 685 686 687 688
    // Obtain search path information.
    if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
      FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
      FREE_C_HEAP_ARRAY(char, info, mtInternal);
      vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
    }
D
duke 已提交
689

690
    path = &info->dls_serpath[0];
D
duke 已提交
691

692 693 694 695 696 697 698
    // Note: Due to a legacy implementation, most of the library path
    // is set in the launcher. This was to accomodate linking restrictions
    // on legacy Solaris implementations (which are no longer supported).
    // Eventually, all the library path setting will be done here.
    //
    // However, to prevent the proliferation of improperly built native
    // libraries, the new path component /usr/jdk/packages is added here.
D
duke 已提交
699

700 701
    // Determine the actual CPU architecture.
    sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
D
duke 已提交
702
#ifdef _LP64
703 704 705 706 707 708 709 710
    // If we are a 64-bit vm, perform the following translations:
    //   sparc   -> sparcv9
    //   i386    -> amd64
    if (strcmp(cpu_arch, "sparc") == 0) {
      strcat(cpu_arch, "v9");
    } else if (strcmp(cpu_arch, "i386") == 0) {
      strcpy(cpu_arch, "amd64");
    }
D
duke 已提交
711 712
#endif

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
    // Construct the invariant part of ld_library_path.
    sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);

    // Struct size is more than sufficient for the path components obtained
    // through the dlinfo() call, so only add additional space for the path
    // components explicitly added here.
    size_t library_path_size = info->dls_size + strlen(common_path);
    library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
    library_path[0] = '\0';

    // Construct the desired Java library path from the linker's library
    // search path.
    //
    // For compatibility, it is optimal that we insert the additional path
    // components specific to the Java VM after those components specified
    // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
    // infrastructure.
    if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
      strcpy(library_path, common_path);
    } else {
      int inserted = 0;
      int i;
      for (i = 0; i < info->dls_cnt; i++, path++) {
        uint_t flags = path->dls_flags & LA_SER_MASK;
        if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
          strcat(library_path, common_path);
D
duke 已提交
739
          strcat(library_path, os::path_separator());
740
          inserted = 1;
D
duke 已提交
741
        }
742 743
        strcat(library_path, path->dls_name);
        strcat(library_path, os::path_separator());
D
duke 已提交
744
      }
745 746 747
      // Eliminate trailing path separator.
      library_path[strlen(library_path)-1] = '\0';
    }
D
duke 已提交
748

749 750 751
    // happens before argument parsing - can't use a trace flag
    // tty->print_raw("init_system_properties_values: native lib path: ");
    // tty->print_raw_cr(library_path);
D
duke 已提交
752

753 754
    // Callee copies into its own buffer.
    Arguments::set_library_path(library_path);
D
duke 已提交
755

756 757 758
    FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
    FREE_C_HEAP_ARRAY(char, info, mtInternal);
  }
D
duke 已提交
759

760 761 762
  // Extensions directories.
  sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
  Arguments::set_ext_dirs(buf);
D
duke 已提交
763

764 765 766 767 768
  // Endorsed standards default directory.
  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
  Arguments::set_endorsed_dirs(buf);

  FREE_C_HEAP_ARRAY(char, buf, mtInternal);
D
duke 已提交
769

770
#undef SYS_EXT_DIR
D
duke 已提交
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
#undef EXTENSIONS_DIR
#undef ENDORSED_DIR
}

void os::breakpoint() {
  BREAKPOINT;
}

bool os::obsolete_option(const JavaVMOption *option)
{
  if (!strncmp(option->optionString, "-Xt", 3)) {
    return true;
  } else if (!strncmp(option->optionString, "-Xtm", 4)) {
    return true;
  } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
    return true;
  } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
    return true;
  }
  return false;
}

bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
  address  stackStart  = (address)thread->stack_base();
  address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
  if (sp < stackStart && sp >= stackEnd ) return true;
  return false;
}

extern "C" void breakpoint() {
  // use debugger to set breakpoint here
}

static thread_t main_thread;

// Thread start routine for all new Java threads
extern "C" void* java_start(void* thread_addr) {
  // Try to randomize the cache line index of hot stack frames.
  // This helps when threads of the same stack traces evict each other's
  // cache lines. The threads can be either from the same JVM instance, or
  // from different JVM instances. The benefit is especially true for
  // processors with hyperthreading technology.
  static int counter = 0;
  int pid = os::current_process_id();
  alloca(((pid ^ counter++) & 7) * 128);

  int prio;
  Thread* thread = (Thread*)thread_addr;
  OSThread* osthr = thread->osthread();

  osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
  thread->_schedctl = (void *) schedctl_init () ;

  if (UseNUMA) {
    int lgrp_id = os::numa_get_group_id();
    if (lgrp_id != -1) {
      thread->set_lgrp_id(lgrp_id);
    }
  }

  // If the creator called set priority before we started,
832 833 834 835 836 837 838 839 840
  // we need to call set_native_priority now that we have an lwp.
  // We used to get the priority from thr_getprio (we called
  // thr_setprio way back in create_thread) and pass it to
  // set_native_priority, but Solaris scales the priority
  // in java_to_os_priority, so when we read it back here,
  // we pass trash to set_native_priority instead of what's
  // in java_to_os_priority. So we save the native priority
  // in the osThread and recall it here.

D
duke 已提交
841 842
  if ( osthr->thread_id() != -1 ) {
    if ( UseThreadPriorities ) {
843
      int prio = osthr->native_priority();
D
duke 已提交
844
      if (ThreadPriorityVerbose) {
845 846 847
        tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
                      INTPTR_FORMAT ", setting priority: %d\n",
                      osthr->thread_id(), osthr->lwp_id(), prio);
D
duke 已提交
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
      }
      os::set_native_priority(thread, prio);
    }
  } else if (ThreadPriorityVerbose) {
    warning("Can't set priority in _start routine, thread id hasn't been set\n");
  }

  assert(osthr->get_state() == RUNNABLE, "invalid os thread state");

  // initialize signal mask for this thread
  os::Solaris::hotspot_sigmask(thread);

  thread->run();

  // One less thread is executing
  // When the VMThread gets here, the main thread may have already exited
  // which frees the CodeHeap containing the Atomic::dec code
  if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
    Atomic::dec(&os::Solaris::_os_thread_count);
  }

  if (UseDetachedThreads) {
    thr_exit(NULL);
    ShouldNotReachHere();
  }
  return NULL;
}

static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
  // Allocate the OSThread object
  OSThread* osthread = new OSThread(NULL, NULL);
  if (osthread == NULL) return NULL;

  // Store info on the Solaris thread into the OSThread
  osthread->set_thread_id(thread_id);
  osthread->set_lwp_id(_lwp_self());
  thread->_schedctl = (void *) schedctl_init () ;

  if (UseNUMA) {
    int lgrp_id = os::numa_get_group_id();
    if (lgrp_id != -1) {
      thread->set_lgrp_id(lgrp_id);
    }
  }

  if ( ThreadPriorityVerbose ) {
    tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
                  osthread->thread_id(), osthread->lwp_id() );
  }

  // Initial thread state is INITIALIZED, not SUSPENDED
  osthread->set_state(INITIALIZED);

  return osthread;
}

void os::Solaris::hotspot_sigmask(Thread* thread) {

  //Save caller's signal mask
  sigset_t sigmask;
  thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
  OSThread *osthread = thread->osthread();
  osthread->set_caller_sigmask(sigmask);

  thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
  if (!ReduceSignalUsage) {
    if (thread->is_VM_thread()) {
      // Only the VM thread handles BREAK_SIGNAL ...
      thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
    } else {
      // ... all other threads block BREAK_SIGNAL
      assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
      thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
    }
  }
}

bool os::create_attached_thread(JavaThread* thread) {
#ifdef ASSERT
  thread->verify_not_published();
#endif
  OSThread* osthread = create_os_thread(thread, thr_self());
  if (osthread == NULL) {
     return false;
  }

  // Initial thread state is RUNNABLE
  osthread->set_state(RUNNABLE);
  thread->set_osthread(osthread);

  // initialize signal mask for this thread
  // and save the caller's signal mask
  os::Solaris::hotspot_sigmask(thread);

  return true;
}

bool os::create_main_thread(JavaThread* thread) {
#ifdef ASSERT
  thread->verify_not_published();
#endif
  if (_starting_thread == NULL) {
    _starting_thread = create_os_thread(thread, main_thread);
     if (_starting_thread == NULL) {
        return false;
     }
  }

  // The primodial thread is runnable from the start
  _starting_thread->set_state(RUNNABLE);

  thread->set_osthread(_starting_thread);

  // initialize signal mask for this thread
  // and save the caller's signal mask
  os::Solaris::hotspot_sigmask(thread);

  return true;
}

// _T2_libthread is true if we believe we are running with the newer
// SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
bool os::Solaris::_T2_libthread = false;

bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
  // Allocate the OSThread object
  OSThread* osthread = new OSThread(NULL, NULL);
  if (osthread == NULL) {
    return false;
  }

  if ( ThreadPriorityVerbose ) {
    char *thrtyp;
    switch ( thr_type ) {
      case vm_thread:
        thrtyp = (char *)"vm";
        break;
      case cgc_thread:
        thrtyp = (char *)"cgc";
        break;
      case pgc_thread:
        thrtyp = (char *)"pgc";
        break;
      case java_thread:
        thrtyp = (char *)"java";
        break;
      case compiler_thread:
        thrtyp = (char *)"compiler";
        break;
      case watcher_thread:
        thrtyp = (char *)"watcher";
        break;
      default:
        thrtyp = (char *)"unknown";
        break;
    }
    tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
  }

  // Calculate stack size if it's not specified by caller.
  if (stack_size == 0) {
    // The default stack size 1M (2M for LP64).
    stack_size = (BytesPerWord >> 2) * K * K;

    switch (thr_type) {
    case os::java_thread:
      // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
      if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
      break;
    case os::compiler_thread:
      if (CompilerThreadStackSize > 0) {
        stack_size = (size_t)(CompilerThreadStackSize * K);
        break;
      } // else fall through:
        // use VMThreadStackSize if CompilerThreadStackSize is not defined
    case os::vm_thread:
    case os::pgc_thread:
    case os::cgc_thread:
    case os::watcher_thread:
      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
      break;
    }
  }
  stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);

  // Initial state is ALLOCATED but not INITIALIZED
  osthread->set_state(ALLOCATED);

  if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
    // We got lots of threads. Check if we still have some address space left.
    // Need to be at least 5Mb of unreserved address space. We do check by
    // trying to reserve some.
    const size_t VirtualMemoryBangSize = 20*K*K;
    char* mem = os::reserve_memory(VirtualMemoryBangSize);
    if (mem == NULL) {
      delete osthread;
      return false;
    } else {
      // Release the memory again
      os::release_memory(mem, VirtualMemoryBangSize);
    }
  }

  // Setup osthread because the child thread may need it.
  thread->set_osthread(osthread);

  // Create the Solaris thread
  // explicit THR_BOUND for T2_libthread case in case
  // that assumption is not accurate, but our alternate signal stack
  // handling is based on it which must have bound threads
  thread_t tid = 0;
  long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
                   | ((UseBoundThreads || os::Solaris::T2_libthread() ||
                       (thr_type == vm_thread) ||
                       (thr_type == cgc_thread) ||
                       (thr_type == pgc_thread) ||
                       (thr_type == compiler_thread && BackgroundCompilation)) ?
                      THR_BOUND : 0);
  int      status;

  // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
  //
  // On multiprocessors systems, libthread sometimes under-provisions our
  // process with LWPs.  On a 30-way systems, for instance, we could have
  // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
  // to our process.  This can result in under utilization of PEs.
  // I suspect the problem is related to libthread's LWP
  // pool management and to the kernel's SIGBLOCKING "last LWP parked"
  // upcall policy.
  //
  // The following code is palliative -- it attempts to ensure that our
  // process has sufficient LWPs to take advantage of multiple PEs.
  // Proper long-term cures include using user-level threads bound to LWPs
  // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
  // slight timing window with respect to sampling _os_thread_count, but
  // the race is benign.  Also, we should periodically recompute
  // _processors_online as the min of SC_NPROCESSORS_ONLN and the
  // the number of PEs in our partition.  You might be tempted to use
  // THR_NEW_LWP here, but I'd recommend against it as that could
  // result in undesirable growth of the libthread's LWP pool.
  // The fix below isn't sufficient; for instance, it doesn't take into count
  // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
  //
  // Some pathologies this scheme doesn't handle:
  // *  Threads can block, releasing the LWPs.  The LWPs can age out.
  //    When a large number of threads become ready again there aren't
  //    enough LWPs available to service them.  This can occur when the
  //    number of ready threads oscillates.
  // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
  //
  // Finally, we should call thr_setconcurrency() periodically to refresh
  // the LWP pool and thwart the LWP age-out mechanism.
  // The "+3" term provides a little slop -- we want to slightly overprovision.

  if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
    if (!(flags & THR_BOUND)) {
      thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
    }
  }
  // Although this doesn't hurt, we should warn of undefined behavior
  // when using unbound T1 threads with schedctl().  This should never
  // happen, as the compiler and VM threads are always created bound
  DEBUG_ONLY(
      if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
          (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
          ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
           (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
         warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
      }
  );


  // Mark that we don't have an lwp or thread id yet.
  // In case we attempt to set the priority before the thread starts.
  osthread->set_lwp_id(-1);
  osthread->set_thread_id(-1);

  status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
  if (status != 0) {
    if (PrintMiscellaneous && (Verbose || WizardMode)) {
      perror("os::create_thread");
    }
    thread->set_osthread(NULL);
    // Need to clean up stuff we've allocated so far
    delete osthread;
    return false;
  }

  Atomic::inc(&os::Solaris::_os_thread_count);

  // Store info on the Solaris thread into the OSThread
  osthread->set_thread_id(tid);

  // Remember that we created this thread so we can set priority on it
  osthread->set_vm_created();

1144 1145 1146 1147
  // Set the default thread priority.  If using bound threads, setting
  // lwp priority will be delayed until thread start.
  set_native_priority(thread,
                      DefaultThreadPriority == -1 ?
D
duke 已提交
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
                        java_to_os_priority[NormPriority] :
                        DefaultThreadPriority);

  // Initial thread state is INITIALIZED, not SUSPENDED
  osthread->set_state(INITIALIZED);

  // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
  return true;
}

/* defined for >= Solaris 10. This allows builds on earlier versions
 *  of Solaris to take advantage of the newly reserved Solaris JVM signals
 *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
 *  and -XX:+UseAltSigs does nothing since these should have no conflict
 */
#if !defined(SIGJVM1)
#define SIGJVM1 39
#define SIGJVM2 40
#endif

debug_only(static bool signal_sets_initialized = false);
static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
int os::Solaris::_SIGasync = ASYNC_SIGNAL;

bool os::Solaris::is_sig_ignored(int sig) {
      struct sigaction oact;
      sigaction(sig, (struct sigaction*)NULL, &oact);
      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
           return true;
      else
           return false;
}

// Note: SIGRTMIN is a macro that calls sysconf() so it will
// dynamically detect SIGRTMIN value for the system at runtime, not buildtime
static bool isJVM1available() {
  return SIGJVM1 < SIGRTMIN;
}

void os::Solaris::signal_sets_init() {
  // Should also have an assertion stating we are still single-threaded.
  assert(!signal_sets_initialized, "Already initialized");
  // Fill in signals that are necessarily unblocked for all threads in
  // the VM. Currently, we unblock the following signals:
  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
  //                         by -Xrs (=ReduceSignalUsage));
  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
  // the dispositions or masks wrt these signals.
  // Programs embedding the VM that want to use the above signals for their
  // own purposes must, at this time, use the "-Xrs" option to prevent
  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
  // (See bug 4345157, and other related bugs).
  // In reality, though, unblocking these signals is really a nop, since
  // these signals are not blocked by default.
  sigemptyset(&unblocked_sigs);
  sigemptyset(&allowdebug_blocked_sigs);
  sigaddset(&unblocked_sigs, SIGILL);
  sigaddset(&unblocked_sigs, SIGSEGV);
  sigaddset(&unblocked_sigs, SIGBUS);
  sigaddset(&unblocked_sigs, SIGFPE);

  if (isJVM1available) {
    os::Solaris::set_SIGinterrupt(SIGJVM1);
    os::Solaris::set_SIGasync(SIGJVM2);
  } else if (UseAltSigs) {
    os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
    os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
  } else {
    os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
    os::Solaris::set_SIGasync(ASYNC_SIGNAL);
  }

  sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
  sigaddset(&unblocked_sigs, os::Solaris::SIGasync());

  if (!ReduceSignalUsage) {
   if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
   }
   if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
   }
   if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
   }
  }
  // Fill in signals that are blocked by all but the VM thread.
  sigemptyset(&vm_sigs);
  if (!ReduceSignalUsage)
    sigaddset(&vm_sigs, BREAK_SIGNAL);
  debug_only(signal_sets_initialized = true);

  // For diagnostics only used in run_periodic_checks
  sigemptyset(&check_signal_done);
}

// These are signals that are unblocked while a thread is running Java.
// (For some reason, they get blocked by default.)
sigset_t* os::Solaris::unblocked_signals() {
  assert(signal_sets_initialized, "Not initialized");
  return &unblocked_sigs;
}

// These are the signals that are blocked while a (non-VM) thread is
// running Java. Only the VM thread handles these signals.
sigset_t* os::Solaris::vm_signals() {
  assert(signal_sets_initialized, "Not initialized");
  return &vm_sigs;
}

// These are signals that are blocked during cond_wait to allow debugger in
sigset_t* os::Solaris::allowdebug_blocked_signals() {
  assert(signal_sets_initialized, "Not initialized");
  return &allowdebug_blocked_sigs;
}

1271 1272 1273 1274 1275 1276 1277

void _handle_uncaught_cxx_exception() {
  VMError err("An uncaught C++ exception");
  err.report_and_die();
}


D
duke 已提交
1278
// First crack at OS-specific initialization, from inside the new thread.
1279
void os::initialize_thread(Thread* thr) {
D
duke 已提交
1280 1281 1282
  int r = thr_main() ;
  guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
  if (r) {
1283
    JavaThread* jt = (JavaThread *)thr;
D
duke 已提交
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
    assert(jt != NULL,"Sanity check");
    size_t stack_size;
    address base = jt->stack_base();
    if (Arguments::created_by_java_launcher()) {
      // Use 2MB to allow for Solaris 7 64 bit mode.
      stack_size = JavaThread::stack_size_at_create() == 0
        ? 2048*K : JavaThread::stack_size_at_create();

      // There are rare cases when we may have already used more than
      // the basic stack size allotment before this method is invoked.
      // Attempt to allow for a normally sized java_stack.
      size_t current_stack_offset = (size_t)(base - (address)&stack_size);
      stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
    } else {
      // 6269555: If we were not created by a Java launcher, i.e. if we are
      // running embedded in a native application, treat the primordial thread
      // as much like a native attached thread as possible.  This means using
      // the current stack size from thr_stksegment(), unless it is too large
      // to reliably setup guard pages.  A reasonable max size is 8MB.
      size_t current_size = current_stack_size();
      // This should never happen, but just in case....
      if (current_size == 0) current_size = 2 * K * K;
      stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
    }
    address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
    stack_size = (size_t)(base - bottom);

    assert(stack_size > 0, "Stack size calculation problem");

    if (stack_size > jt->stack_size()) {
      NOT_PRODUCT(
        struct rlimit limits;
        getrlimit(RLIMIT_STACK, &limits);
        size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
        assert(size >= jt->stack_size(), "Stack size problem in main thread");
      )
      tty->print_cr(
        "Stack size of %d Kb exceeds current limit of %d Kb.\n"
        "(Stack sizes are rounded up to a multiple of the system page size.)\n"
        "See limit(1) to increase the stack size limit.",
        stack_size / K, jt->stack_size() / K);
      vm_exit(1);
    }
    assert(jt->stack_size() >= stack_size,
          "Attempt to map more stack than was allocated");
    jt->set_stack_size(stack_size);
  }

   // 5/22/01: Right now alternate signal stacks do not handle
   // throwing stack overflow exceptions, see bug 4463178
   // Until a fix is found for this, T2 will NOT imply alternate signal
   // stacks.
   // If using T2 libthread threads, install an alternate signal stack.
   // Because alternate stacks associate with LWPs on Solaris,
   // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
   // we prefer to explicitly stack bang.
   // If not using T2 libthread, but using UseBoundThreads any threads
   // (primordial thread, jni_attachCurrentThread) we do not create,
   // probably are not bound, therefore they can not have an alternate
   // signal stack. Since our stack banging code is generated and
   // is shared across threads, all threads must be bound to allow
   // using alternate signal stacks.  The alternative is to interpose
   // on _lwp_create to associate an alt sig stack with each LWP,
   // and this could be a problem when the JVM is embedded.
   // We would prefer to use alternate signal stacks with T2
   // Since there is currently no accurate way to detect T2
   // we do not. Assuming T2 when running T1 causes sig 11s or assertions
   // on installing alternate signal stacks


   // 05/09/03: removed alternate signal stack support for Solaris
   // The alternate signal stack mechanism is no longer needed to
   // handle stack overflow. This is now handled by allocating
   // guard pages (red zone) and stackbanging.
   // Initially the alternate signal stack mechanism was removed because
   // it did not work with T1 llibthread. Alternate
   // signal stacks MUST have all threads bound to lwps. Applications
   // can create their own threads and attach them without their being
   // bound under T1. This is frequently the case for the primordial thread.
   // If we were ever to reenable this mechanism we would need to
   // use the dynamic check for T2 libthread.

  os::Solaris::init_thread_fpu_state();
1367
  std::set_terminate(_handle_uncaught_cxx_exception);
D
duke 已提交
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
}



// Free Solaris resources related to the OSThread
void os::free_thread(OSThread* osthread) {
  assert(osthread != NULL, "os::free_thread but osthread not set");


  // We are told to free resources of the argument thread,
  // but we can only really operate on the current thread.
  // The main thread must take the VMThread down synchronously
  // before the main thread exits and frees up CodeHeap
  guarantee((Thread::current()->osthread() == osthread
     || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
  if (Thread::current()->osthread() == osthread) {
    // Restore caller's signal mask
    sigset_t sigmask = osthread->caller_sigmask();
    thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
  }
  delete osthread;
}

void os::pd_start_thread(Thread* thread) {
  int status = thr_continue(thread->osthread()->thread_id());
  assert_status(status == 0, status, "thr_continue failed");
}


intx os::current_thread_id() {
  return (intx)thr_self();
}

static pid_t _initial_pid = 0;

int os::current_process_id() {
  return (int)(_initial_pid ? _initial_pid : getpid());
}

1407 1408 1409 1410
// gethrtime() should be monotonic according to the documentation,
// but some virtualized platforms are known to break this guarantee.
// getTimeNanos() must be guaranteed not to move backwards, so we
// are forced to add a check here.
D
duke 已提交
1411
inline hrtime_t getTimeNanos() {
1412 1413 1414 1415
  const hrtime_t now = gethrtime();
  const hrtime_t prev = max_hrtime;
  if (now <= prev) {
    return prev;   // same or retrograde time;
D
duke 已提交
1416
  }
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
  const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
  assert(obsv >= prev, "invariant");   // Monotonicity
  // If the CAS succeeded then we're done and return "now".
  // If the CAS failed and the observed value "obsv" is >= now then
  // we should return "obsv".  If the CAS failed and now > obsv > prv then
  // some other thread raced this thread and installed a new value, in which case
  // we could either (a) retry the entire operation, (b) retry trying to install now
  // or (c) just return obsv.  We use (c).   No loop is required although in some cases
  // we might discard a higher "now" value in deference to a slightly lower but freshly
  // installed obsv value.   That's entirely benign -- it admits no new orderings compared
  // to (a) or (b) -- and greatly reduces coherence traffic.
  // We might also condition (c) on the magnitude of the delta between obsv and now.
  // Avoiding excessive CAS operations to hot RW locations is critical.
  // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
  return (prev == obsv) ? now : obsv;
D
duke 已提交
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
}

// Time since start-up in seconds to a fine granularity.
// Used by VMSelfDestructTimer and the MemProfiler.
double os::elapsedTime() {
  return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
}

jlong os::elapsed_counter() {
  return (jlong)(getTimeNanos() - first_hrtime);
}

jlong os::elapsed_frequency() {
   return hrtime_hz;
}

// Return the real, user, and system times in seconds from an
// arbitrary fixed point in the past.
bool os::getTimesSecs(double* process_real_time,
                  double* process_user_time,
                  double* process_system_time) {
  struct tms ticks;
  clock_t real_ticks = times(&ticks);

  if (real_ticks == (clock_t) (-1)) {
    return false;
  } else {
    double ticks_per_second = (double) clock_tics_per_sec;
    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
    // For consistency return the real time from getTimeNanos()
    // converted to seconds.
    *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);

    return true;
  }
}

1470 1471 1472
bool os::supports_vtime() { return true; }

bool os::enable_vtime() {
1473
  int fd = ::open("/proc/self/ctl", O_WRONLY);
1474 1475 1476 1477
  if (fd == -1)
    return false;

  long cmd[] = { PCSET, PR_MSACCT };
1478 1479
  int res = ::write(fd, cmd, sizeof(long) * 2);
  ::close(fd);
1480 1481 1482 1483 1484 1485 1486
  if (res != sizeof(long) * 2)
    return false;

  return true;
}

bool os::vtime_enabled() {
1487
  int fd = ::open("/proc/self/status", O_RDONLY);
1488 1489 1490 1491
  if (fd == -1)
    return false;

  pstatus_t status;
1492 1493
  int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
  ::close(fd);
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
  if (res != sizeof(pstatus_t))
    return false;

  return status.pr_flags & PR_MSACCT;
}

double os::elapsedVTime() {
  return (double)gethrvtime() / (double)hrtime_hz;
}

D
duke 已提交
1504 1505 1506 1507
// Used internally for comparisons only
// getTimeMillis guaranteed to not move backwards on Solaris
jlong getTimeMillis() {
  jlong nanotime = getTimeNanos();
1508
  return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
D
duke 已提交
1509 1510
}

S
sbohne 已提交
1511 1512
// Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
jlong os::javaTimeMillis() {
D
duke 已提交
1513 1514
  timeval t;
  if (gettimeofday( &t, NULL) == -1)
1515
    fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
D
duke 已提交
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
  return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
}

jlong os::javaTimeNanos() {
  return (jlong)getTimeNanos();
}

void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
  info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
  info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
  info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
  info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
}

char * os::local_time_string(char *buf, size_t buflen) {
  struct tm t;
  time_t long_time;
  time(&long_time);
  localtime_r(&long_time, &t);
  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
               t.tm_hour, t.tm_min, t.tm_sec);
  return buf;
}

// Note: os::shutdown() might be called very early during initialization, or
// called from signal handler. Before adding something to os::shutdown(), make
// sure it is async-safe and can handle partially initialized VM.
void os::shutdown() {

  // allow PerfMemory to attempt cleanup of any persistent resources
  perfMemory_exit();

  // needs to remove object in file system
  AttachListener::abort();

  // flush buffered output, finish log files
  ostream_abort();

  // Check for abort hook
  abort_hook_t abort_hook = Arguments::abort_hook();
  if (abort_hook != NULL) {
    abort_hook();
  }
}

// Note: os::abort() might be called very early during initialization, or
// called from signal handler. Before adding something to os::abort(), make
// sure it is async-safe and can handle partially initialized VM.
void os::abort(bool dump_core) {
  os::shutdown();
  if (dump_core) {
#ifndef PRODUCT
    fdStream out(defaultStream::output_fd());
    out.print_raw("Current thread is ");
    char buf[16];
    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
    out.print_raw_cr(buf);
    out.print_raw_cr("Dumping core ...");
#endif
    ::abort(); // dump core (for debugging)
  }

  ::exit(1);
}

// Die immediately, no exit hook, no abort hook, no cleanup.
void os::die() {
1584
  ::abort(); // dump core (for debugging)
D
duke 已提交
1585 1586 1587 1588 1589 1590
}

// DLL functions

const char* os::dll_file_extension() { return ".so"; }

1591 1592 1593
// This must be hard coded because it's the system's temporary
// directory not the java application's temp directory, ala java.io.tmpdir.
const char* os::get_temp_directory() { return "/tmp"; }
D
duke 已提交
1594

P
phh 已提交
1595 1596 1597 1598 1599 1600 1601 1602
static bool file_exists(const char* filename) {
  struct stat statbuf;
  if (filename == NULL || strlen(filename) == 0) {
    return false;
  }
  return os::stat(filename, &statbuf) == 0;
}

1603
bool os::dll_build_name(char* buffer, size_t buflen,
P
phh 已提交
1604
                        const char* pname, const char* fname) {
1605
  bool retval = false;
K
kamg 已提交
1606 1607
  const size_t pnamelen = pname ? strlen(pname) : 0;

1608
  // Return error on buffer overflow.
K
kamg 已提交
1609
  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1610
    return retval;
K
kamg 已提交
1611 1612 1613
  }

  if (pnamelen == 0) {
P
phh 已提交
1614
    snprintf(buffer, buflen, "lib%s.so", fname);
1615
    retval = true;
P
phh 已提交
1616 1617 1618
  } else if (strchr(pname, *os::path_separator()) != NULL) {
    int n;
    char** pelements = split_path(pname, &n);
1619
    if (pelements == NULL) {
D
Merge  
dcubed 已提交
1620
      return false;
1621
    }
P
phh 已提交
1622 1623 1624 1625 1626 1627 1628
    for (int i = 0 ; i < n ; i++) {
      // really shouldn't be NULL but what the heck, check can't hurt
      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
        continue; // skip the empty path values
      }
      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
      if (file_exists(buffer)) {
1629
        retval = true;
P
phh 已提交
1630 1631 1632 1633 1634 1635
        break;
      }
    }
    // release the storage
    for (int i = 0 ; i < n ; i++) {
      if (pelements[i] != NULL) {
Z
zgu 已提交
1636
        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
P
phh 已提交
1637 1638 1639
      }
    }
    if (pelements != NULL) {
Z
zgu 已提交
1640
      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
P
phh 已提交
1641
    }
K
kamg 已提交
1642
  } else {
P
phh 已提交
1643
    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1644
    retval = true;
K
kamg 已提交
1645
  }
1646
  return retval;
K
kamg 已提交
1647 1648
}

1649
// check if addr is inside libjvm.so
D
duke 已提交
1650 1651 1652 1653 1654
bool os::address_is_in_vm(address addr) {
  static address libjvm_base_addr;
  Dl_info dlinfo;

  if (libjvm_base_addr == NULL) {
1655 1656 1657
    if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
      libjvm_base_addr = (address)dlinfo.dli_fbase;
    }
D
duke 已提交
1658 1659 1660
    assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
  }

1661
  if (dladdr((void *)addr, &dlinfo) != 0) {
D
duke 已提交
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
    if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
  }

  return false;
}

typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
static dladdr1_func_type dladdr1_func = NULL;

bool os::dll_address_to_function_name(address addr, char *buf,
                                      int buflen, int * offset) {
1673 1674 1675
  // buf is not optional, but offset is optional
  assert(buf != NULL, "sanity check");

D
duke 已提交
1676 1677 1678
  Dl_info dlinfo;

  // dladdr1_func was initialized in os::init()
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
  if (dladdr1_func != NULL) {
    // yes, we have dladdr1

    // Support for dladdr1 is checked at runtime; it may be
    // available even if the vm is built on a machine that does
    // not have dladdr1 support.  Make sure there is a value for
    // RTLD_DL_SYMENT.
    #ifndef RTLD_DL_SYMENT
    #define RTLD_DL_SYMENT 1
    #endif
1689
#ifdef _LP64
1690
    Elf64_Sym * info;
1691
#else
1692
    Elf32_Sym * info;
1693
#endif
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
    if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
                     RTLD_DL_SYMENT) != 0) {
      // see if we have a matching symbol that covers our address
      if (dlinfo.dli_saddr != NULL &&
          (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
        if (dlinfo.dli_sname != NULL) {
          if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
            jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
          }
          if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
          return true;
1705 1706
        }
      }
1707 1708
      // no matching symbol so try for just file info
      if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1709
        if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1710
                            buf, buflen, offset, dlinfo.dli_fname)) {
1711 1712
          return true;
        }
D
duke 已提交
1713
      }
1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
    }
    buf[0] = '\0';
    if (offset != NULL) *offset  = -1;
    return false;
  }

  // no, only dladdr is available
  if (dladdr((void *)addr, &dlinfo) != 0) {
    // see if we have a matching symbol
    if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
      if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
        jio_snprintf(buf, buflen, dlinfo.dli_sname);
      }
      if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
      return true;
    }
    // no matching symbol so try for just file info
    if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
      if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
                          buf, buflen, offset, dlinfo.dli_fname)) {
1734
        return true;
D
duke 已提交
1735
      }
1736
    }
D
duke 已提交
1737
  }
1738 1739 1740
  buf[0] = '\0';
  if (offset != NULL) *offset  = -1;
  return false;
D
duke 已提交
1741 1742 1743 1744
}

bool os::dll_address_to_library_name(address addr, char* buf,
                                     int buflen, int* offset) {
1745 1746 1747
  // buf is not optional, but offset is optional
  assert(buf != NULL, "sanity check");

D
duke 已提交
1748 1749
  Dl_info dlinfo;

1750 1751 1752 1753 1754 1755 1756 1757
  if (dladdr((void*)addr, &dlinfo) != 0) {
    if (dlinfo.dli_fname != NULL) {
      jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
    }
    if (dlinfo.dli_fbase != NULL && offset != NULL) {
      *offset = addr - (address)dlinfo.dli_fbase;
    }
    return true;
D
duke 已提交
1758
  }
1759 1760 1761 1762

  buf[0] = '\0';
  if (offset) *offset = -1;
  return false;
D
duke 已提交
1763 1764 1765 1766 1767
}

// Prints the names and full paths of all opened dynamic libraries
// for current process
void os::print_dll_info(outputStream * st) {
1768 1769 1770 1771
  Dl_info dli;
  void *handle;
  Link_map *map;
  Link_map *p;
D
duke 已提交
1772

1773
  st->print_cr("Dynamic libraries:"); st->flush();
D
duke 已提交
1774

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
  if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
      dli.dli_fname == NULL) {
    st->print_cr("Error: Cannot print dynamic libraries.");
    return;
  }
  handle = dlopen(dli.dli_fname, RTLD_LAZY);
  if (handle == NULL) {
    st->print_cr("Error: Cannot print dynamic libraries.");
    return;
  }
  dlinfo(handle, RTLD_DI_LINKMAP, &map);
  if (map == NULL) {
    st->print_cr("Error: Cannot print dynamic libraries.");
    return;
  }
D
duke 已提交
1790

1791 1792
  while (map->l_prev != NULL)
    map = map->l_prev;
D
duke 已提交
1793

1794 1795 1796 1797
  while (map != NULL) {
    st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
    map = map->l_next;
  }
D
duke 已提交
1798

1799
  dlclose(handle);
D
duke 已提交
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
}

  // Loads .dll/.so and
  // in case of error it checks if .dll/.so was built for the
  // same architecture as Hotspot is running on

void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
{
  void * result= ::dlopen(filename, RTLD_LAZY);
  if (result != NULL) {
    // Successful loading
    return result;
  }

  Elf32_Ehdr elf_head;

  // Read system error message into ebuf
  // It may or may not be overwritten below
  ::strncpy(ebuf, ::dlerror(), ebuflen-1);
  ebuf[ebuflen-1]='\0';
  int diag_msg_max_length=ebuflen-strlen(ebuf);
  char* diag_msg_buf=ebuf+strlen(ebuf);

  if (diag_msg_max_length==0) {
    // No more space in ebuf for additional diagnostics message
    return NULL;
  }


  int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);

  if (file_descriptor < 0) {
    // Can't open library, report dlerror() message
    return NULL;
  }

  bool failed_to_read_elf_head=
    (sizeof(elf_head)!=
        (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;

  ::close(file_descriptor);
  if (failed_to_read_elf_head) {
    // file i/o error - report dlerror() msg
    return NULL;
  }

  typedef struct {
    Elf32_Half  code;         // Actual value as defined in elf.h
    Elf32_Half  compat_class; // Compatibility of archs at VM's sense
    char        elf_class;    // 32 or 64 bit
    char        endianess;    // MSB or LSB
    char*       name;         // String representation
  } arch_t;

  static const arch_t arch_array[]={
    {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
    {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
    {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
    {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
    {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
    {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
    {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
    {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1863 1864
    {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
    {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
D
duke 已提交
1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
  };

  #if  (defined IA32)
    static  Elf32_Half running_arch_code=EM_386;
  #elif   (defined AMD64)
    static  Elf32_Half running_arch_code=EM_X86_64;
  #elif  (defined IA64)
    static  Elf32_Half running_arch_code=EM_IA_64;
  #elif  (defined __sparc) && (defined _LP64)
    static  Elf32_Half running_arch_code=EM_SPARCV9;
  #elif  (defined __sparc) && (!defined _LP64)
    static  Elf32_Half running_arch_code=EM_SPARC;
  #elif  (defined __powerpc64__)
    static  Elf32_Half running_arch_code=EM_PPC64;
  #elif  (defined __powerpc__)
    static  Elf32_Half running_arch_code=EM_PPC;
1881 1882
  #elif (defined ARM)
    static  Elf32_Half running_arch_code=EM_ARM;
D
duke 已提交
1883 1884
  #else
    #error Method os::dll_load requires that one of following is defined:\
1885
         IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
D
duke 已提交
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
  #endif

  // Identify compatability class for VM's architecture and library's architecture
  // Obtain string descriptions for architectures

  arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
  int running_arch_index=-1;

  for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
    if (running_arch_code == arch_array[i].code) {
      running_arch_index    = i;
    }
    if (lib_arch.code == arch_array[i].code) {
      lib_arch.compat_class = arch_array[i].compat_class;
      lib_arch.name         = arch_array[i].name;
    }
  }

  assert(running_arch_index != -1,
    "Didn't find running architecture code (running_arch_code) in arch_array");
  if (running_arch_index == -1) {
    // Even though running architecture detection failed
    // we may still continue with reporting dlerror() message
    return NULL;
  }

  if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
    return NULL;
  }

  if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
    return NULL;
  }

  if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
    if ( lib_arch.name!=NULL ) {
      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
        lib_arch.name, arch_array[running_arch_index].name);
    } else {
      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
        lib_arch.code,
        arch_array[running_arch_index].name);
    }
  }

  return NULL;
}

K
kamg 已提交
1938 1939 1940
void* os::dll_lookup(void* handle, const char* name) {
  return dlsym(handle, name);
}
D
duke 已提交
1941

1942 1943 1944 1945
void* os::get_default_process_handle() {
  return (void*)::dlopen(NULL, RTLD_LAZY);
}

1946 1947 1948 1949 1950 1951 1952 1953 1954
int os::stat(const char *path, struct stat *sbuf) {
  char pathbuf[MAX_PATH];
  if (strlen(path) > MAX_PATH - 1) {
    errno = ENAMETOOLONG;
    return -1;
  }
  os::native_path(strcpy(pathbuf, path));
  return ::stat(pathbuf, sbuf);
}
D
duke 已提交
1955

1956 1957
static bool _print_ascii_file(const char* filename, outputStream* st) {
  int fd = ::open(filename, O_RDONLY);
D
duke 已提交
1958 1959 1960 1961 1962 1963
  if (fd == -1) {
     return false;
  }

  char buf[32];
  int bytes;
1964
  while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
D
duke 已提交
1965 1966 1967
    st->print_raw(buf, bytes);
  }

1968
  ::close(fd);
D
duke 已提交
1969 1970 1971 1972

  return true;
}

1973 1974 1975 1976 1977 1978 1979 1980
void os::print_os_info_brief(outputStream* st) {
  os::Solaris::print_distro_info(st);

  os::Posix::print_uname_info(st);

  os::Solaris::print_libversion_info(st);
}

D
duke 已提交
1981 1982 1983
void os::print_os_info(outputStream* st) {
  st->print("OS:");

1984
  os::Solaris::print_distro_info(st);
D
duke 已提交
1985

1986
  os::Posix::print_uname_info(st);
D
duke 已提交
1987

1988
  os::Solaris::print_libversion_info(st);
D
duke 已提交
1989

1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
  os::Posix::print_rlimit_info(st);

  os::Posix::print_load_average(st);
}

void os::Solaris::print_distro_info(outputStream* st) {
  if (!_print_ascii_file("/etc/release", st)) {
      st->print("Solaris");
    }
    st->cr();
D
duke 已提交
2000 2001
}

2002 2003 2004 2005 2006 2007 2008 2009 2010
void os::Solaris::print_libversion_info(outputStream* st) {
  if (os::Solaris::T2_libthread()) {
    st->print("  (T2 libthread)");
  }
  else {
    st->print("  (T1 libthread)");
  }
  st->cr();
}
D
duke 已提交
2011 2012 2013

static bool check_addr0(outputStream* st) {
  jboolean status = false;
2014
  int fd = ::open("/proc/self/map",O_RDONLY);
D
duke 已提交
2015 2016
  if (fd >= 0) {
    prmap_t p;
2017
    while(::read(fd, &p, sizeof(p)) > 0) {
D
duke 已提交
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
      if (p.pr_vaddr == 0x0) {
        st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
        st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
        st->print("Access:");
        st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
        st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
        st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
        st->cr();
        status = true;
      }
    }
D
dcubed 已提交
2029
    ::close(fd);
D
duke 已提交
2030 2031 2032 2033
  }
  return status;
}

2034 2035 2036 2037
void os::pd_print_cpu_info(outputStream* st) {
  // Nothing to do for now.
}

D
duke 已提交
2038 2039 2040 2041 2042 2043
void os::print_memory_info(outputStream* st) {
  st->print("Memory:");
  st->print(" %dk page", os::vm_page_size()>>10);
  st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
  st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
  st->cr();
2044 2045 2046
  if (VMError::fatal_error_in_progress()) {
     (void) check_addr0(st);
  }
D
duke 已提交
2047 2048
}

2049 2050
void os::print_siginfo(outputStream* st, void* siginfo) {
  const siginfo_t* si = (const siginfo_t*)siginfo;
D
duke 已提交
2051

2052
  os::Posix::print_siginfo_brief(st, si);
D
duke 已提交
2053

2054
  if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
D
duke 已提交
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
      UseSharedSpaces) {
    FileMapInfo* mapinfo = FileMapInfo::current_info();
    if (mapinfo->is_in_shared_space(si->si_addr)) {
      st->print("\n\nError accessing class data sharing archive."   \
                " Mapped file inaccessible during execution, "      \
                " possible disk/network problem.");
    }
  }
  st->cr();
}

// Moved from whole group, because we need them here for diagnostic
// prints.
#define OLDMAXSIGNUM 32
static int Maxsignum = 0;
static int *ourSigFlags = NULL;

extern "C" void sigINTRHandler(int, siginfo_t*, void*);

int os::Solaris::get_our_sigflags(int sig) {
  assert(ourSigFlags!=NULL, "signal data structure not initialized");
  assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
  return ourSigFlags[sig];
}

void os::Solaris::set_our_sigflags(int sig, int flags) {
  assert(ourSigFlags!=NULL, "signal data structure not initialized");
  assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
  ourSigFlags[sig] = flags;
}


static const char* get_signal_handler_name(address handler,
                                           char* buf, int buflen) {
  int offset;
  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
  if (found) {
    // skip directory names
    const char *p1, *p2;
    p1 = buf;
    size_t len = strlen(os::file_separator());
    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
    jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
  } else {
    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
  }
  return buf;
}

static void print_signal_handler(outputStream* st, int sig,
                                  char* buf, size_t buflen) {
  struct sigaction sa;

  sigaction(sig, NULL, &sa);

  st->print("%s: ", os::exception_name(sig, buf, buflen));

  address handler = (sa.sa_flags & SA_SIGINFO)
                  ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
                  : CAST_FROM_FN_PTR(address, sa.sa_handler);

  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
    st->print("SIG_DFL");
  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
    st->print("SIG_IGN");
  } else {
    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
  }

2124 2125
  st->print(", sa_mask[0]=");
  os::Posix::print_signal_set_short(st, &sa.sa_mask);
D
duke 已提交
2126 2127 2128 2129 2130 2131 2132 2133

  address rh = VMError::get_resetted_sighandler(sig);
  // May be, handler was resetted by VMError?
  if(rh != NULL) {
    handler = rh;
    sa.sa_flags = VMError::get_resetted_sigflags(sig);
  }

2134 2135
  st->print(", sa_flags=");
  os::Posix::print_sa_flags(st, sa.sa_flags);
D
duke 已提交
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170

  // Check: is it our handler?
  if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
     handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
    // It is our signal handler
    // check for flags
    if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
      st->print(
        ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
        os::Solaris::get_our_sigflags(sig));
    }
  }
  st->cr();
}

void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
  st->print_cr("Signal Handlers:");
  print_signal_handler(st, SIGSEGV, buf, buflen);
  print_signal_handler(st, SIGBUS , buf, buflen);
  print_signal_handler(st, SIGFPE , buf, buflen);
  print_signal_handler(st, SIGPIPE, buf, buflen);
  print_signal_handler(st, SIGXFSZ, buf, buflen);
  print_signal_handler(st, SIGILL , buf, buflen);
  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
  print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
  print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
  print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
  print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
  print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
}

static char saved_jvm_path[MAXPATHLEN] = { 0 };

2171
// Find the full path to the current module, libjvm.so
D
duke 已提交
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
void os::jvm_path(char *buf, jint buflen) {
  // Error checking.
  if (buflen < MAXPATHLEN) {
    assert(false, "must use a large-enough buffer");
    buf[0] = '\0';
    return;
  }
  // Lazy resolve the path to current module.
  if (saved_jvm_path[0] != 0) {
    strcpy(buf, saved_jvm_path);
    return;
  }

  Dl_info dlinfo;
  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
  assert(ret != 0, "cannot locate libjvm");
2188 2189 2190 2191 2192 2193
  if (ret != 0 && dlinfo.dli_fname != NULL) {
    realpath((char *)dlinfo.dli_fname, buf);
  } else {
    buf[0] = '\0';
    return;
  }
D
duke 已提交
2194

2195
  if (Arguments::created_by_gamma_launcher()) {
D
duke 已提交
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
    // Support for the gamma launcher.  Typical value for buf is
    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
    // the right place in the string, then assume we are installed in a JDK and
    // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
    // up the path so it looks like libjvm.so is installed there (append a
    // fake suffix hotspot/libjvm.so).
    const char *p = buf + strlen(buf) - 1;
    for (int count = 0; p > buf && count < 5; ++count) {
      for (--p; p > buf && *p != '/'; --p)
        /* empty */ ;
    }

    if (strncmp(p, "/jre/lib/", 9) != 0) {
      // Look for JAVA_HOME in the environment.
      char* java_home_var = ::getenv("JAVA_HOME");
      if (java_home_var != NULL && java_home_var[0] != 0) {
        char cpu_arch[12];
2213 2214
        char* jrelib_p;
        int   len;
D
duke 已提交
2215 2216 2217 2218 2219 2220 2221 2222 2223
        sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
#ifdef _LP64
        // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
        if (strcmp(cpu_arch, "sparc") == 0) {
          strcat(cpu_arch, "v9");
        } else if (strcmp(cpu_arch, "i386") == 0) {
          strcpy(cpu_arch, "amd64");
        }
#endif
2224
        // Check the current module name "libjvm.so".
D
duke 已提交
2225 2226 2227 2228
        p = strrchr(buf, '/');
        assert(strstr(p, "/libjvm") == p, "invalid library name");

        realpath(java_home_var, buf);
2229 2230 2231
        // determine if this is a legacy image or modules image
        // modules image doesn't have "jre" subdirectory
        len = strlen(buf);
2232
        assert(len < buflen, "Ran out of buffer space");
2233 2234 2235 2236 2237 2238
        jrelib_p = buf + len;
        snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
        if (0 != access(buf, F_OK)) {
          snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
        }

D
duke 已提交
2239
        if (0 == access(buf, F_OK)) {
2240
          // Use current module name "libjvm.so"
2241
          len = strlen(buf);
2242
          snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
D
duke 已提交
2243 2244 2245 2246 2247 2248 2249 2250
        } else {
          // Go back to path of .so
          realpath((char *)dlinfo.dli_fname, buf);
        }
      }
    }
  }

2251
  strncpy(saved_jvm_path, buf, MAXPATHLEN);
D
duke 已提交
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
}


void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
  // no prefix required, not even "_"
}


void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
  // no suffix required
}

2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
// This method is a copy of JDK's sysGetLastErrorString
// from src/solaris/hpi/src/system_md.c

size_t os::lasterror(char *buf, size_t len) {

  if (errno == 0)  return 0;

  const char *s = ::strerror(errno);
  size_t n = ::strlen(s);
  if (n >= len) {
    n = len - 1;
  }
  ::strncpy(buf, s, n);
  buf[n] = '\0';
  return n;
}

D
duke 已提交
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300

// sun.misc.Signal

extern "C" {
  static void UserHandler(int sig, void *siginfo, void *context) {
    // Ctrl-C is pressed during error reporting, likely because the error
    // handler fails to abort. Let VM die immediately.
    if (sig == SIGINT && is_error_reported()) {
       os::die();
    }

    os::signal_notify(sig);
    // We do not need to reinstate the signal handler each time...
  }
}

void* os::user_handler() {
  return CAST_FROM_FN_PTR(void*, UserHandler);
}

S
sla 已提交
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
class Semaphore : public StackObj {
  public:
    Semaphore();
    ~Semaphore();
    void signal();
    void wait();
    bool trywait();
    bool timedwait(unsigned int sec, int nsec);
  private:
    sema_t _semaphore;
};


Semaphore::Semaphore() {
  sema_init(&_semaphore, 0, NULL, NULL);
}

Semaphore::~Semaphore() {
  sema_destroy(&_semaphore);
}

void Semaphore::signal() {
  sema_post(&_semaphore);
}

void Semaphore::wait() {
  sema_wait(&_semaphore);
}

bool Semaphore::trywait() {
  return sema_trywait(&_semaphore) == 0;
}

bool Semaphore::timedwait(unsigned int sec, int nsec) {
  struct timespec ts;
  unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);

  while (1) {
    int result = sema_timedwait(&_semaphore, &ts);
    if (result == 0) {
      return true;
    } else if (errno == EINTR) {
      continue;
    } else if (errno == ETIME) {
      return false;
    } else {
      return false;
    }
  }
}

D
duke 已提交
2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
extern "C" {
  typedef void (*sa_handler_t)(int);
  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
}

void* os::signal(int signal_number, void* handler) {
  struct sigaction sigAct, oldSigAct;
  sigfillset(&(sigAct.sa_mask));
  sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);

  if (sigaction(signal_number, &sigAct, &oldSigAct))
    // -1 means registration failed
    return (void *)-1;

  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
}

void os::signal_raise(int signal_number) {
  raise(signal_number);
}

/*
 * The following code is moved from os.cpp for making this
 * code platform specific, which it is by its very nature.
 */

// a counter for each possible signal value
static int Sigexit = 0;
static int Maxlibjsigsigs;
static jint *pending_signals = NULL;
static int *preinstalled_sigs = NULL;
static struct sigaction *chainedsigactions = NULL;
static sema_t sig_sem;
typedef int (*version_getting_t)();
version_getting_t os::Solaris::get_libjsig_version = NULL;
static int libjsigversion = NULL;

int os::sigexitnum_pd() {
  assert(Sigexit > 0, "signal memory not yet initialized");
  return Sigexit;
}

void os::Solaris::init_signal_mem() {
  // Initialize signal structures
  Maxsignum = SIGRTMAX;
  Sigexit = Maxsignum+1;
  assert(Maxsignum >0, "Unable to obtain max signal number");

  Maxlibjsigsigs = Maxsignum;

  // pending_signals has one int per signal
  // The additional signal is for SIGEXIT - exit signal to signal_thread
Z
zgu 已提交
2405
  pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
D
duke 已提交
2406 2407 2408 2409
  memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));

  if (UseSignalChaining) {
     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
Z
zgu 已提交
2410
       * (Maxsignum + 1), mtInternal);
D
duke 已提交
2411
     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
Z
zgu 已提交
2412
     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
D
duke 已提交
2413 2414
     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
  }
Z
zgu 已提交
2415
  ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
D
duke 已提交
2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502
  memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
}

void os::signal_init_pd() {
  int ret;

  ret = ::sema_init(&sig_sem, 0, NULL, NULL);
  assert(ret == 0, "sema_init() failed");
}

void os::signal_notify(int signal_number) {
  int ret;

  Atomic::inc(&pending_signals[signal_number]);
  ret = ::sema_post(&sig_sem);
  assert(ret == 0, "sema_post() failed");
}

static int check_pending_signals(bool wait_for_signal) {
  int ret;
  while (true) {
    for (int i = 0; i < Sigexit + 1; i++) {
      jint n = pending_signals[i];
      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
        return i;
      }
    }
    if (!wait_for_signal) {
      return -1;
    }
    JavaThread *thread = JavaThread::current();
    ThreadBlockInVM tbivm(thread);

    bool threadIsSuspended;
    do {
      thread->set_suspend_equivalent();
      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
      while((ret = ::sema_wait(&sig_sem)) == EINTR)
          ;
      assert(ret == 0, "sema_wait() failed");

      // were we externally suspended while we were waiting?
      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
      if (threadIsSuspended) {
        //
        // The semaphore has been incremented, but while we were waiting
        // another thread suspended us. We don't want to continue running
        // while suspended because that would surprise the thread that
        // suspended us.
        //
        ret = ::sema_post(&sig_sem);
        assert(ret == 0, "sema_post() failed");

        thread->java_suspend_self();
      }
    } while (threadIsSuspended);
  }
}

int os::signal_lookup() {
  return check_pending_signals(false);
}

int os::signal_wait() {
  return check_pending_signals(true);
}

////////////////////////////////////////////////////////////////////////////////
// Virtual Memory

static int page_size = -1;

// The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
// clear this var if support is not available.
static bool has_map_align = true;

int os::vm_page_size() {
  assert(page_size != -1, "must call os::init");
  return page_size;
}

// Solaris allocates memory by pages.
int os::vm_allocation_granularity() {
  assert(page_size != -1, "must call os::init");
  return page_size;
}

2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
static bool recoverable_mmap_error(int err) {
  // See if the error is one we can let the caller handle. This
  // list of errno values comes from the Solaris mmap(2) man page.
  switch (err) {
  case EBADF:
  case EINVAL:
  case ENOTSUP:
    // let the caller deal with these errors
    return true;

  default:
    // Any remaining errors on this OS can cause our reserved mapping
    // to be lost. That can cause confusion where different data
    // structures think they have the same memory mapped. The worst
    // scenario is if both the VM and a library think they have the
    // same memory mapped.
    return false;
  }
}

static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
                                    int err) {
  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
          ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
          strerror(err), err);
}

static void warn_fail_commit_memory(char* addr, size_t bytes,
                                    size_t alignment_hint, bool exec,
                                    int err) {
  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
          ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
          alignment_hint, exec, strerror(err), err);
}

int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
C
coleenp 已提交
2539
  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
D
duke 已提交
2540
  size_t size = bytes;
I
iveresov 已提交
2541 2542 2543 2544 2545
  char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
  if (res != NULL) {
    if (UseNUMAInterleaving) {
      numa_make_global(addr, bytes);
    }
2546
    return 0;
I
iveresov 已提交
2547
  }
2548 2549 2550 2551 2552 2553 2554 2555 2556

  int err = errno;  // save errno from mmap() call in mmap_chunk()

  if (!recoverable_mmap_error(err)) {
    warn_fail_commit_memory(addr, bytes, exec, err);
    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
  }

  return err;
D
duke 已提交
2557 2558
}

2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
  return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
}

void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
                                  const char* mesg) {
  assert(mesg != NULL, "mesg must be specified");
  int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
  if (err != 0) {
    // the caller wants all commit errors to exit with the specified mesg:
    warn_fail_commit_memory(addr, bytes, exec, err);
    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
  }
}

2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587
size_t os::Solaris::page_size_for_alignment(size_t alignment) {
  assert(is_size_aligned(alignment, (size_t) vm_page_size()),
         err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
                 alignment, (size_t) vm_page_size()));

  for (int i = 0; _page_sizes[i] != 0; i++) {
    if (is_size_aligned(alignment, _page_sizes[i])) {
      return _page_sizes[i];
    }
  }

  return (size_t) vm_page_size();
}

2588 2589 2590
int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
                                    size_t alignment_hint, bool exec) {
  int err = Solaris::commit_memory_impl(addr, bytes, exec);
2591 2592 2593 2594 2595 2596 2597
  if (err == 0 && UseLargePages && alignment_hint > 0) {
    assert(is_size_aligned(bytes, alignment_hint),
           err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));

    // The syscall memcntl requires an exact page size (see man memcntl for details).
    size_t page_size = page_size_for_alignment(alignment_hint);
    if (page_size > (size_t) vm_page_size()) {
2598
      (void)Solaris::setup_large_pages(addr, bytes, page_size);
D
duke 已提交
2599 2600
    }
  }
2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
  return err;
}

bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
                          bool exec) {
  return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
}

void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
                                  size_t alignment_hint, bool exec,
                                  const char* mesg) {
  assert(mesg != NULL, "mesg must be specified");
  int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
  if (err != 0) {
    // the caller wants all commit errors to exit with the specified mesg:
    warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
  }
D
duke 已提交
2619 2620 2621
}

// Uncommit the pages in a specified region.
Z
zgu 已提交
2622
void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
D
duke 已提交
2623 2624 2625 2626 2627 2628
  if (madvise(addr, bytes, MADV_FREE) < 0) {
    debug_only(warning("MADV_FREE failed."));
    return;
  }
}

Z
zgu 已提交
2629
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2630
  return os::commit_memory(addr, size, !ExecMem);
2631 2632 2633 2634 2635 2636
}

bool os::remove_stack_guard_pages(char* addr, size_t size) {
  return os::uncommit_memory(addr, size);
}

D
duke 已提交
2637
// Change the page size in a given range.
Z
zgu 已提交
2638
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
D
duke 已提交
2639 2640
  assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
  assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2641 2642
  if (UseLargePages) {
    Solaris::setup_large_pages(addr, bytes, alignment_hint);
2643
  }
D
duke 已提交
2644 2645 2646
}

// Tell the OS to make the range local to the first-touching LWP
2647
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
D
duke 已提交
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
  assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
  if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
    debug_only(warning("MADV_ACCESS_LWP failed."));
  }
}

// Tell the OS that this range would be accessed from different LWPs.
void os::numa_make_global(char *addr, size_t bytes) {
  assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
  if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
    debug_only(warning("MADV_ACCESS_MANY failed."));
  }
}

// Get the number of the locality groups.
size_t os::numa_get_groups_num() {
  size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
  return n != -1 ? n : 1;
}

// Get a list of leaf locality groups. A leaf lgroup is group that
// doesn't have any children. Typical leaf group is a CPU or a CPU/memory
// board. An LWP is assigned to one of these groups upon creation.
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
     ids[0] = 0;
     return 1;
   }
   int result_size = 0, top = 1, bottom = 0, cur = 0;
   for (int k = 0; k < size; k++) {
     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
     if (r == -1) {
       ids[0] = 0;
       return 1;
     }
     if (!r) {
2685
       // That's a leaf node.
D
duke 已提交
2686
       assert (bottom <= cur, "Sanity check");
2687 2688 2689 2690 2691
       // Check if the node has memory
       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
         ids[bottom++] = ids[cur];
       }
D
duke 已提交
2692 2693 2694 2695
     }
     top += r;
     cur++;
   }
2696 2697 2698 2699 2700 2701
   if (bottom == 0) {
     // Handle a situation, when the OS reports no memory available.
     // Assume UMA architecture.
     ids[0] = 0;
     return 1;
   }
D
duke 已提交
2702 2703 2704
   return bottom;
}

2705
// Detect the topology change. Typically happens during CPU plugging-unplugging.
D
duke 已提交
2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
bool os::numa_topology_changed() {
  int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
  if (is_stale != -1 && is_stale) {
    Solaris::lgrp_fini(Solaris::lgrp_cookie());
    Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
    assert(c != 0, "Failure to initialize LGRP API");
    Solaris::set_lgrp_cookie(c);
    return true;
  }
  return false;
}

// Get the group id of the current LWP.
int os::numa_get_group_id() {
2720
  int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
D
duke 已提交
2721 2722 2723
  if (lgrp_id == -1) {
    return 0;
  }
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
  const int size = os::numa_get_groups_num();
  int *ids = (int*)alloca(size * sizeof(int));

  // Get the ids of all lgroups with memory; r is the count.
  int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
                                  (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
  if (r <= 0) {
    return 0;
  }
  return ids[os::random() % r];
D
duke 已提交
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766
}

// Request information about the page.
bool os::get_page_info(char *start, page_info* info) {
  const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
  uint64_t addr = (uintptr_t)start;
  uint64_t outdata[2];
  uint_t validity = 0;

  if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
    return false;
  }

  info->size = 0;
  info->lgrp_id = -1;

  if ((validity & 1) != 0) {
    if ((validity & 2) != 0) {
      info->lgrp_id = outdata[0];
    }
    if ((validity & 4) != 0) {
      info->size = outdata[1];
    }
    return true;
  }
  return false;
}

// Scan the pages from start to end until a page different than
// the one described in the info parameter is encountered.
char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
  const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
  const size_t types = sizeof(info_types) / sizeof(info_types[0]);
D
dcubed 已提交
2767
  uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
D
duke 已提交
2768 2769 2770 2771 2772 2773 2774
  uint_t validity[MAX_MEMINFO_CNT];

  size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
  uint64_t p = (uint64_t)start;
  while (p < (uint64_t)end) {
    addrs[0] = p;
    size_t addrs_count = 1;
2775
    while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
D
duke 已提交
2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805
      addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
      addrs_count++;
    }

    if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
      return NULL;
    }

    size_t i = 0;
    for (; i < addrs_count; i++) {
      if ((validity[i] & 1) != 0) {
        if ((validity[i] & 4) != 0) {
          if (outdata[types * i + 1] != page_expected->size) {
            break;
          }
        } else
          if (page_expected->size != 0) {
            break;
          }

        if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
          if (outdata[types * i] != page_expected->lgrp_id) {
            break;
          }
        }
      } else {
        return NULL;
      }
    }

D
dcubed 已提交
2806
    if (i < addrs_count) {
D
duke 已提交
2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824
      if ((validity[i] & 2) != 0) {
        page_found->lgrp_id = outdata[types * i];
      } else {
        page_found->lgrp_id = -1;
      }
      if ((validity[i] & 4) != 0) {
        page_found->size = outdata[types * i + 1];
      } else {
        page_found->size = 0;
      }
      return (char*)addrs[i];
    }

    p = addrs[addrs_count - 1] + page_size;
  }
  return end;
}

Z
zgu 已提交
2825
bool os::pd_uncommit_memory(char* addr, size_t bytes) {
D
duke 已提交
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
  size_t size = bytes;
  // Map uncommitted pages PROT_NONE so we fail early if we touch an
  // uncommitted page. Otherwise, the read/write might succeed if we
  // have enough swap space to back the physical page.
  return
    NULL != Solaris::mmap_chunk(addr, size,
                                MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
                                PROT_NONE);
}

char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
  char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);

  if (b == MAP_FAILED) {
    return NULL;
  }
  return b;
}

2845 2846 2847 2848 2849 2850 2851 2852 2853
char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
  char* addr = requested_addr;
  int flags = MAP_PRIVATE | MAP_NORESERVE;

  assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");

  if (fixed) {
    flags |= MAP_FIXED;
  } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
D
duke 已提交
2854 2855 2856 2857 2858 2859 2860
    flags |= MAP_ALIGN;
    addr = (char*) alignment_hint;
  }

  // Map uncommitted pages PROT_NONE so we fail early if we touch an
  // uncommitted page. Otherwise, the read/write might succeed if we
  // have enough swap space to back the physical page.
2861 2862 2863
  return mmap_chunk(addr, bytes, flags, PROT_NONE);
}

Z
zgu 已提交
2864
char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2865
  char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
D
duke 已提交
2866 2867 2868 2869 2870 2871 2872 2873 2874

  guarantee(requested_addr == NULL || requested_addr == addr,
            "OS failed to return requested mmap address.");
  return addr;
}

// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).

Z
zgu 已提交
2875
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
D
duke 已提交
2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893
  const int max_tries = 10;
  char* base[max_tries];
  size_t size[max_tries];

  // Solaris adds a gap between mmap'ed regions.  The size of the gap
  // is dependent on the requested size and the MMU.  Our initial gap
  // value here is just a guess and will be corrected later.
  bool had_top_overlap = false;
  bool have_adjusted_gap = false;
  size_t gap = 0x400000;

  // Assert only that the size is a multiple of the page size, since
  // that's all that mmap requires, and since that's all we really know
  // about at this low abstraction level.  If we need higher alignment,
  // we can either pass an alignment to this method or verify alignment
  // in one of the methods further up the call chain.  See bug 5044738.
  assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");

2894 2895 2896
  // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
  // Give it a try, if the kernel honors the hint we can return immediately.
  char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2897

2898 2899 2900 2901
  volatile int err = errno;
  if (addr == requested_addr) {
    return addr;
  } else if (addr != NULL) {
2902
    pd_unmap_memory(addr, bytes);
2903 2904 2905 2906 2907 2908 2909 2910
  }

  if (PrintMiscellaneous && Verbose) {
    char buf[256];
    buf[0] = '\0';
    if (addr == NULL) {
      jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
    }
2911
    warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2912 2913 2914 2915 2916 2917 2918 2919
            PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
            "%s", bytes, requested_addr, addr, buf);
  }

  // Address hint method didn't work.  Fall back to the old method.
  // In theory, once SNV becomes our oldest supported platform, this
  // code will no longer be needed.
  //
D
duke 已提交
2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
  // Repeatedly allocate blocks until the block is allocated at the
  // right spot. Give up after max_tries.
  int i;
  for (i = 0; i < max_tries; ++i) {
    base[i] = reserve_memory(bytes);

    if (base[i] != NULL) {
      // Is this the block we wanted?
      if (base[i] == requested_addr) {
        size[i] = bytes;
        break;
      }

      // check that the gap value is right
      if (had_top_overlap && !have_adjusted_gap) {
        size_t actual_gap = base[i-1] - base[i] - bytes;
        if (gap != actual_gap) {
          // adjust the gap value and retry the last 2 allocations
          assert(i > 0, "gap adjustment code problem");
          have_adjusted_gap = true;  // adjust the gap only once, just in case
          gap = actual_gap;
          if (PrintMiscellaneous && Verbose) {
            warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
          }
          unmap_memory(base[i], bytes);
          unmap_memory(base[i-1], size[i-1]);
          i-=2;
          continue;
        }
      }

      // Does this overlap the block we wanted? Give back the overlapped
      // parts and try again.
      //
      // There is still a bug in this code: if top_overlap == bytes,
      // the overlap is offset from requested region by the value of gap.
      // In this case giving back the overlapped part will not work,
      // because we'll give back the entire block at base[i] and
      // therefore the subsequent allocation will not generate a new gap.
      // This could be fixed with a new algorithm that used larger
      // or variable size chunks to find the requested region -
      // but such a change would introduce additional complications.
      // It's rare enough that the planets align for this bug,
      // so we'll just wait for a fix for 6204603/5003415 which
      // will provide a mmap flag to allow us to avoid this business.

      size_t top_overlap = requested_addr + (bytes + gap) - base[i];
      if (top_overlap >= 0 && top_overlap < bytes) {
        had_top_overlap = true;
        unmap_memory(base[i], top_overlap);
        base[i] += top_overlap;
        size[i] = bytes - top_overlap;
      } else {
        size_t bottom_overlap = base[i] + bytes - requested_addr;
        if (bottom_overlap >= 0 && bottom_overlap < bytes) {
          if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
            warning("attempt_reserve_memory_at: possible alignment bug");
          }
          unmap_memory(requested_addr, bottom_overlap);
          size[i] = bytes - bottom_overlap;
        } else {
          size[i] = bytes;
        }
      }
    }
  }

  // Give back the unused reserved pieces.

  for (int j = 0; j < i; ++j) {
    if (base[j] != NULL) {
      unmap_memory(base[j], size[j]);
    }
  }

  return (i < max_tries) ? requested_addr : NULL;
}

Z
zgu 已提交
2998
bool os::pd_release_memory(char* addr, size_t bytes) {
D
duke 已提交
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
  size_t size = bytes;
  return munmap(addr, size) == 0;
}

static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
  assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
         "addr must be page aligned");
  int retVal = mprotect(addr, bytes, prot);
  return retVal == 0;
}

3010
// Protect memory (Used to pass readonly pages through
D
duke 已提交
3011
// JNI GetArray<type>Elements with empty arrays.)
3012 3013
// Also, used for serialization page and for compressed oops null pointer
// checking.
3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
                        bool is_committed) {
  unsigned int p = 0;
  switch (prot) {
  case MEM_PROT_NONE: p = PROT_NONE; break;
  case MEM_PROT_READ: p = PROT_READ; break;
  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
  default:
    ShouldNotReachHere();
  }
  // is_committed is unused.
  return solaris_mprotect(addr, bytes, p);
D
duke 已提交
3027 3028 3029 3030 3031 3032 3033 3034 3035 3036
}

// guard_memory and unguard_memory only happens within stack guard pages.
// Since ISM pertains only to the heap, guard and unguard memory should not
/// happen with an ISM region.
bool os::guard_memory(char* addr, size_t bytes) {
  return solaris_mprotect(addr, bytes, PROT_NONE);
}

bool os::unguard_memory(char* addr, size_t bytes) {
3037
  return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
D
duke 已提交
3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
}

// Large page support
static size_t _large_page_size = 0;

// Insertion sort for small arrays (descending order).
static void insertion_sort_descending(size_t* array, int len) {
  for (int i = 0; i < len; i++) {
    size_t val = array[i];
    for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
      size_t tmp = array[key];
      array[key] = array[key - 1];
      array[key - 1] = tmp;
    }
  }
}

3055
bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
D
duke 已提交
3056 3057 3058 3059 3060
  const unsigned int usable_count = VM_Version::page_size_count();
  if (usable_count == 1) {
    return false;
  }

3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074
  // Find the right getpagesizes interface.  When solaris 11 is the minimum
  // build platform, getpagesizes() (without the '2') can be called directly.
  typedef int (*gps_t)(size_t[], int);
  gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
  if (gps_func == NULL) {
    gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
    if (gps_func == NULL) {
      if (warn) {
        warning("MPSS is not supported by the operating system.");
      }
      return false;
    }
  }

D
duke 已提交
3075
  // Fill the array of page sizes.
3076
  int n = (*gps_func)(_page_sizes, page_sizes_max);
D
duke 已提交
3077
  assert(n > 0, "Solaris bug?");
3078

D
duke 已提交
3079 3080 3081 3082 3083 3084 3085
  if (n == page_sizes_max) {
    // Add a sentinel value (necessary only if the array was completely filled
    // since it is static (zeroed at initialization)).
    _page_sizes[--n] = 0;
    DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
  }
  assert(_page_sizes[n] == 0, "missing sentinel");
3086
  trace_page_sizes("available page sizes", _page_sizes, n);
D
duke 已提交
3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115

  if (n == 1) return false;     // Only one page size available.

  // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
  // select up to usable_count elements.  First sort the array, find the first
  // acceptable value, then copy the usable sizes to the top of the array and
  // trim the rest.  Make sure to include the default page size :-).
  //
  // A better policy could get rid of the 4M limit by taking the sizes of the
  // important VM memory regions (java heap and possibly the code cache) into
  // account.
  insertion_sort_descending(_page_sizes, n);
  const size_t size_limit =
    FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
  int beg;
  for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
  const int end = MIN2((int)usable_count, n) - 1;
  for (int cur = 0; cur < end; ++cur, ++beg) {
    _page_sizes[cur] = _page_sizes[beg];
  }
  _page_sizes[end] = vm_page_size();
  _page_sizes[end + 1] = 0;

  if (_page_sizes[end] > _page_sizes[end - 1]) {
    // Default page size is not the smallest; sort again.
    insertion_sort_descending(_page_sizes, end + 1);
  }
  *page_size = _page_sizes[0];

3116
  trace_page_sizes("usable page sizes", _page_sizes, end + 1);
D
duke 已提交
3117 3118 3119
  return true;
}

3120
void os::large_page_init() {
3121 3122 3123 3124
  if (UseLargePages) {
    // print a warning if any large page related flag is specified on command line
    bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
                           !FLAG_IS_DEFAULT(LargePageSizeInBytes);
D
duke 已提交
3125

3126
    UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
D
duke 已提交
3127 3128 3129
  }
}

3130 3131 3132 3133 3134 3135 3136 3137 3138
bool os::Solaris::is_valid_page_size(size_t bytes) {
  for (int i = 0; _page_sizes[i] != 0; i++) {
    if (_page_sizes[i] == bytes) {
      return true;
    }
  }
  return false;
}

3139
bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3140 3141 3142 3143 3144 3145
  assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
  assert(is_ptr_aligned((void*) start, align),
         err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
  assert(is_size_aligned(bytes, align),
         err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));

D
duke 已提交
3146 3147 3148 3149 3150 3151
  // Signal to OS that we want large pages for addresses
  // from addr, addr + bytes
  struct memcntl_mha mpss_struct;
  mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
  mpss_struct.mha_pagesize = align;
  mpss_struct.mha_flags = 0;
3152 3153
  // Upon successful completion, memcntl() returns 0
  if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
D
duke 已提交
3154 3155 3156 3157 3158 3159
    debug_only(warning("Attempt to use MPSS failed."));
    return false;
  }
  return true;
}

3160
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3161 3162
  fatal("os::reserve_memory_special should not be called on Solaris.");
  return NULL;
D
duke 已提交
3163 3164 3165
}

bool os::release_memory_special(char* base, size_t bytes) {
3166 3167
  fatal("os::release_memory_special should not be called on Solaris.");
  return false;
D
duke 已提交
3168 3169 3170 3171 3172 3173 3174 3175 3176
}

size_t os::large_page_size() {
  return _large_page_size;
}

// MPSS allows application to commit large page memory on demand; with ISM
// the entire memory region must be allocated as shared memory.
bool os::can_commit_large_page_memory() {
3177
  return true;
D
duke 已提交
3178 3179
}

3180
bool os::can_execute_large_page_memory() {
3181
  return true;
3182 3183
}

D
duke 已提交
3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216
static int os_sleep(jlong millis, bool interruptible) {
  const jlong limit = INT_MAX;
  jlong prevtime;
  int res;

  while (millis > limit) {
    if ((res = os_sleep(limit, interruptible)) != OS_OK)
      return res;
    millis -= limit;
  }

  // Restart interrupted polls with new parameters until the proper delay
  // has been completed.

  prevtime = getTimeMillis();

  while (millis > 0) {
    jlong newtime;

    if (!interruptible) {
      // Following assert fails for os::yield_all:
      // assert(!thread->is_Java_thread(), "must not be java thread");
      res = poll(NULL, 0, millis);
    } else {
      JavaThread *jt = JavaThread::current();

      INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
        os::Solaris::clear_interrupted);
    }

    // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
    // thread.Interrupt.

3217 3218 3219 3220 3221 3222
    // See c/r 6751923. Poll can return 0 before time
    // has elapsed if time is set via clock_settime (as NTP does).
    // res == 0 if poll timed out (see man poll RETURN VALUES)
    // using the logic below checks that we really did
    // sleep at least "millis" if not we'll sleep again.
    if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
D
duke 已提交
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
      newtime = getTimeMillis();
      assert(newtime >= prevtime, "time moving backwards");
    /* Doing prevtime and newtime in microseconds doesn't help precision,
       and trying to round up to avoid lost milliseconds can result in a
       too-short delay. */
      millis -= newtime - prevtime;
      if(millis <= 0)
        return OS_OK;
      prevtime = newtime;
    } else
      return res;
  }

  return OS_OK;
}

// Read calls from inside the vm need to perform state transitions
size_t os::read(int fd, void *buf, unsigned int nBytes) {
  INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
}

3244 3245 3246 3247
size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
  INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
}

D
duke 已提交
3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310
int os::sleep(Thread* thread, jlong millis, bool interruptible) {
  assert(thread == Thread::current(),  "thread consistency check");

  // TODO-FIXME: this should be removed.
  // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
  // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
  // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
  // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
  // is fooled into believing that the system is making progress. In the code below we block the
  // the watcher thread while safepoint is in progress so that it would not appear as though the
  // system is making progress.
  if (!Solaris::T2_libthread() &&
      thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
    // We now try to acquire the threads lock. Since this lock is held by the VM thread during
    // the entire safepoint, the watcher thread will  line up here during the safepoint.
    Threads_lock->lock_without_safepoint_check();
    Threads_lock->unlock();
  }

  if (thread->is_Java_thread()) {
    // This is a JavaThread so we honor the _thread_blocked protocol
    // even for sleeps of 0 milliseconds. This was originally done
    // as a workaround for bug 4338139. However, now we also do it
    // to honor the suspend-equivalent protocol.

    JavaThread *jt = (JavaThread *) thread;
    ThreadBlockInVM tbivm(jt);

    jt->set_suspend_equivalent();
    // cleared by handle_special_suspend_equivalent_condition() or
    // java_suspend_self() via check_and_wait_while_suspended()

    int ret_code;
    if (millis <= 0) {
      thr_yield();
      ret_code = 0;
    } else {
      // The original sleep() implementation did not create an
      // OSThreadWaitState helper for sleeps of 0 milliseconds.
      // I'm preserving that decision for now.
      OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);

      ret_code = os_sleep(millis, interruptible);
    }

    // were we externally suspended while we were waiting?
    jt->check_and_wait_while_suspended();

    return ret_code;
  }

  // non-JavaThread from this point on:

  if (millis <= 0) {
    thr_yield();
    return 0;
  }

  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);

  return os_sleep(millis, interruptible);
}

3311 3312 3313 3314 3315 3316 3317 3318
void os::naked_short_sleep(jlong ms) {
  assert(ms < 1000, "Un-interruptable sleep, short time use only");

  // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
  // Solaris requires -lrt for this.
  usleep((ms * 1000));

  return;
D
duke 已提交
3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445
}

// Sleep forever; naked call to OS-specific sleep; use with CAUTION
void os::infinite_sleep() {
  while (true) {    // sleep forever ...
    ::sleep(100);   // ... 100 seconds at a time
  }
}

// Used to convert frequent JVM_Yield() to nops
bool os::dont_yield() {
  if (DontYieldALot) {
    static hrtime_t last_time = 0;
    hrtime_t diff = getTimeNanos() - last_time;

    if (diff < DontYieldALotInterval * 1000000)
      return true;

    last_time += diff;

    return false;
  }
  else {
    return false;
  }
}

// Caveat: Solaris os::yield() causes a thread-state transition whereas
// the linux and win32 implementations do not.  This should be checked.

void os::yield() {
  // Yields to all threads with same or greater priority
  os::sleep(Thread::current(), 0, false);
}

// Note that yield semantics are defined by the scheduling class to which
// the thread currently belongs.  Typically, yield will _not yield to
// other equal or higher priority threads that reside on the dispatch queues
// of other CPUs.

os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }


// On Solaris we found that yield_all doesn't always yield to all other threads.
// There have been cases where there is a thread ready to execute but it doesn't
// get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
// The 1 millisecond wait doesn't seem long enough for the kernel to issue a
// SIGWAITING signal which will cause a new lwp to be created. So we count the
// number of times yield_all is called in the one loop and increase the sleep
// time after 8 attempts. If this fails too we increase the concurrency level
// so that the starving thread would get an lwp

void os::yield_all(int attempts) {
  // Yields to all threads, including threads with lower priorities
  if (attempts == 0) {
    os::sleep(Thread::current(), 1, false);
  } else {
    int iterations = attempts % 30;
    if (iterations == 0 && !os::Solaris::T2_libthread()) {
      // thr_setconcurrency and _getconcurrency make sense only under T1.
      int noofLWPS = thr_getconcurrency();
      if (noofLWPS < (Threads::number_of_threads() + 2)) {
        thr_setconcurrency(thr_getconcurrency() + 1);
      }
    } else if (iterations < 25) {
      os::sleep(Thread::current(), 1, false);
    } else {
      os::sleep(Thread::current(), 10, false);
    }
  }
}

// Called from the tight loops to possibly influence time-sharing heuristics
void os::loop_breaker(int attempts) {
  os::yield_all(attempts);
}


// Interface for setting lwp priorities.  If we are using T2 libthread,
// which forces the use of BoundThreads or we manually set UseBoundThreads,
// all of our threads will be assigned to real lwp's.  Using the thr_setprio
// function is meaningless in this mode so we must adjust the real lwp's priority
// The routines below implement the getting and setting of lwp priorities.
//
// Note: There are three priority scales used on Solaris.  Java priotities
//       which range from 1 to 10, libthread "thr_setprio" scale which range
//       from 0 to 127, and the current scheduling class of the process we
//       are running in.  This is typically from -60 to +60.
//       The setting of the lwp priorities in done after a call to thr_setprio
//       so Java priorities are mapped to libthread priorities and we map from
//       the latter to lwp priorities.  We don't keep priorities stored in
//       Java priorities since some of our worker threads want to set priorities
//       higher than all Java threads.
//
// For related information:
// (1)  man -s 2 priocntl
// (2)  man -s 4 priocntl
// (3)  man dispadmin
// =    librt.so
// =    libthread/common/rtsched.c - thrp_setlwpprio().
// =    ps -cL <pid> ... to validate priority.
// =    sched_get_priority_min and _max
//              pthread_create
//              sched_setparam
//              pthread_setschedparam
//
// Assumptions:
// +    We assume that all threads in the process belong to the same
//              scheduling class.   IE. an homogenous process.
// +    Must be root or in IA group to change change "interactive" attribute.
//              Priocntl() will fail silently.  The only indication of failure is when
//              we read-back the value and notice that it hasn't changed.
// +    Interactive threads enter the runq at the head, non-interactive at the tail.
// +    For RT, change timeslice as well.  Invariant:
//              constant "priority integral"
//              Konst == TimeSlice * (60-Priority)
//              Given a priority, compute appropriate timeslice.
// +    Higher numerical values have higher priority.

// sched class attributes
typedef struct {
        int   schedPolicy;              // classID
        int   maxPrio;
        int   minPrio;
} SchedInfo;


3446
static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
D
duke 已提交
3447 3448 3449 3450 3451 3452 3453 3454 3455 3456

#ifdef ASSERT
static int  ReadBackValidate = 1;
#endif
static int  myClass     = 0;
static int  myMin       = 0;
static int  myMax       = 0;
static int  myCur       = 0;
static bool priocntl_enable = false;

3457 3458
static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
static int java_MaxPriority_to_os_priority = 0; // Saved mapping
D
duke 已提交
3459 3460 3461 3462 3463 3464 3465 3466


// lwp_priocntl_init
//
// Try to determine the priority scale for our process.
//
// Return errno or 0 if OK.
//
3467
static int lwp_priocntl_init () {
D
duke 已提交
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
  int rslt;
  pcinfo_t ClassInfo;
  pcparms_t ParmInfo;
  int i;

  if (!UseThreadPriorities) return 0;

  // We are using Bound threads, we need to determine our priority ranges
  if (os::Solaris::T2_libthread() || UseBoundThreads) {
    // If ThreadPriorityPolicy is 1, switch tables
    if (ThreadPriorityPolicy == 1) {
3479
      for (i = 0 ; i < CriticalPriority+1; i++)
D
duke 已提交
3480 3481
        os::java_to_os_priority[i] = prio_policy1[i];
    }
3482 3483 3484 3485 3486 3487 3488 3489 3490
    if (UseCriticalJavaThreadPriority) {
      // MaxPriority always maps to the FX scheduling class and criticalPrio.
      // See set_native_priority() and set_lwp_class_and_priority().
      // Save original MaxPriority mapping in case attempt to
      // use critical priority fails.
      java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
      // Set negative to distinguish from other priorities
      os::java_to_os_priority[MaxPriority] = -criticalPrio;
    }
D
duke 已提交
3491 3492 3493
  }
  // Not using Bound Threads, set to ThreadPolicy 1
  else {
3494
    for ( i = 0 ; i < CriticalPriority+1; i++ ) {
D
duke 已提交
3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506
      os::java_to_os_priority[i] = prio_policy1[i];
    }
    return 0;
  }

  // Get IDs for a set of well-known scheduling classes.
  // TODO-FIXME: GETCLINFO returns the current # of classes in the
  // the system.  We should have a loop that iterates over the
  // classID values, which are known to be "small" integers.

  strcpy(ClassInfo.pc_clname, "TS");
  ClassInfo.pc_cid = -1;
3507
  rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
D
duke 已提交
3508 3509 3510 3511 3512 3513 3514 3515
  if (rslt < 0) return errno;
  assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
  tsLimits.schedPolicy = ClassInfo.pc_cid;
  tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
  tsLimits.minPrio = -tsLimits.maxPrio;

  strcpy(ClassInfo.pc_clname, "IA");
  ClassInfo.pc_cid = -1;
3516
  rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
D
duke 已提交
3517 3518 3519 3520 3521 3522 3523 3524
  if (rslt < 0) return errno;
  assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
  iaLimits.schedPolicy = ClassInfo.pc_cid;
  iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
  iaLimits.minPrio = -iaLimits.maxPrio;

  strcpy(ClassInfo.pc_clname, "RT");
  ClassInfo.pc_cid = -1;
3525
  rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
D
duke 已提交
3526 3527 3528 3529 3530 3531
  if (rslt < 0) return errno;
  assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
  rtLimits.schedPolicy = ClassInfo.pc_cid;
  rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
  rtLimits.minPrio = 0;

3532 3533
  strcpy(ClassInfo.pc_clname, "FX");
  ClassInfo.pc_cid = -1;
3534
  rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3535 3536 3537 3538 3539
  if (rslt < 0) return errno;
  assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
  fxLimits.schedPolicy = ClassInfo.pc_cid;
  fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
  fxLimits.minPrio = 0;
D
duke 已提交
3540 3541

  // Query our "current" scheduling class.
3542 3543
  // This will normally be IA, TS or, rarely, FX or RT.
  memset(&ParmInfo, 0, sizeof(ParmInfo));
D
duke 已提交
3544
  ParmInfo.pc_cid = PC_CLNULL;
3545
  rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3546
  if (rslt < 0) return errno;
D
duke 已提交
3547 3548 3549
  myClass = ParmInfo.pc_cid;

  // We now know our scheduling classId, get specific information
3550
  // about the class.
D
duke 已提交
3551 3552
  ClassInfo.pc_cid = myClass;
  ClassInfo.pc_clname[0] = 0;
3553
  rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3554
  if (rslt < 0) return errno;
D
duke 已提交
3555

3556 3557 3558
  if (ThreadPriorityVerbose) {
    tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
  }
D
duke 已提交
3559 3560 3561

  memset(&ParmInfo, 0, sizeof(pcparms_t));
  ParmInfo.pc_cid = PC_CLNULL;
3562
  rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
D
duke 已提交
3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577
  if (rslt < 0) return errno;

  if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
    myMin = rtLimits.minPrio;
    myMax = rtLimits.maxPrio;
  } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
    iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
    myMin = iaLimits.minPrio;
    myMax = iaLimits.maxPrio;
    myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
  } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
    tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
    myMin = tsLimits.minPrio;
    myMax = tsLimits.maxPrio;
    myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3578 3579 3580 3581 3582
  } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
    fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
    myMin = fxLimits.minPrio;
    myMax = fxLimits.maxPrio;
    myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
D
duke 已提交
3583 3584 3585 3586 3587 3588 3589
  } else {
    // No clue - punt
    if (ThreadPriorityVerbose)
      tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
    return EINVAL;      // no clue, punt
  }

3590 3591 3592
  if (ThreadPriorityVerbose) {
    tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
  }
D
duke 已提交
3593 3594 3595 3596 3597 3598 3599 3600

  priocntl_enable = true;  // Enable changing priorities
  return 0;
}

#define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
#define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
#define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3601
#define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
D
duke 已提交
3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619


// scale_to_lwp_priority
//
// Convert from the libthread "thr_setprio" scale to our current
// lwp scheduling class scale.
//
static
int     scale_to_lwp_priority (int rMin, int rMax, int x)
{
  int v;

  if (x == 127) return rMax;            // avoid round-down
    v = (((x*(rMax-rMin)))/128)+rMin;
  return v;
}


3620
// set_lwp_class_and_priority
D
duke 已提交
3621
//
3622 3623
// Set the class and priority of the lwp.  This call should only
// be made when using bound threads (T2 threads are bound by default).
D
duke 已提交
3624
//
3625 3626
int set_lwp_class_and_priority(int ThreadID, int lwpid,
                               int newPrio, int new_class, bool scale) {
D
duke 已提交
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650
  int rslt;
  int Actual, Expected, prv;
  pcparms_t ParmInfo;                   // for GET-SET
#ifdef ASSERT
  pcparms_t ReadBack;                   // for readback
#endif

  // Set priority via PC_GETPARMS, update, PC_SETPARMS
  // Query current values.
  // TODO: accelerate this by eliminating the PC_GETPARMS call.
  // Cache "pcparms_t" in global ParmCache.
  // TODO: elide set-to-same-value

  // If something went wrong on init, don't change priorities.
  if ( !priocntl_enable ) {
    if (ThreadPriorityVerbose)
      tty->print_cr("Trying to set priority but init failed, ignoring");
    return EINVAL;
  }

  // If lwp hasn't started yet, just return
  // the _start routine will call us again.
  if ( lwpid <= 0 ) {
    if (ThreadPriorityVerbose) {
3651 3652
      tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
                     INTPTR_FORMAT " to %d, lwpid not set",
D
duke 已提交
3653 3654 3655 3656 3657 3658
                     ThreadID, newPrio);
    }
    return 0;
  }

  if (ThreadPriorityVerbose) {
3659 3660
    tty->print_cr ("set_lwp_class_and_priority("
                   INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
D
duke 已提交
3661 3662 3663 3664 3665
                   ThreadID, lwpid, newPrio);
  }

  memset(&ParmInfo, 0, sizeof(pcparms_t));
  ParmInfo.pc_cid = PC_CLNULL;
3666
  rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
D
duke 已提交
3667 3668
  if (rslt < 0) return errno;

3669 3670 3671 3672
  int cur_class = ParmInfo.pc_cid;
  ParmInfo.pc_cid = (id_t)new_class;

  if (new_class == rtLimits.schedPolicy) {
D
duke 已提交
3673
    rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3674 3675 3676
    rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
                                                       rtLimits.maxPrio, newPrio)
                               : newPrio;
D
duke 已提交
3677 3678 3679 3680 3681
    rtInfo->rt_tqsecs  = RT_NOCHANGE;
    rtInfo->rt_tqnsecs = RT_NOCHANGE;
    if (ThreadPriorityVerbose) {
      tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
    }
3682 3683 3684 3685 3686 3687 3688 3689 3690 3691
  } else if (new_class == iaLimits.schedPolicy) {
    iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
    int maxClamped     = MIN2(iaLimits.maxPrio,
                              cur_class == new_class
                                ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
    iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
                                                       maxClamped, newPrio)
                               : newPrio;
    iaInfo->ia_uprilim = cur_class == new_class
                           ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
D
duke 已提交
3692 3693
    iaInfo->ia_mode    = IA_NOCHANGE;
    if (ThreadPriorityVerbose) {
3694 3695
      tty->print_cr("IA: [%d...%d] %d->%d\n",
                    iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
D
duke 已提交
3696
    }
3697 3698 3699 3700 3701 3702 3703 3704 3705 3706
  } else if (new_class == tsLimits.schedPolicy) {
    tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
    int maxClamped     = MIN2(tsLimits.maxPrio,
                              cur_class == new_class
                                ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
    tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
                                                       maxClamped, newPrio)
                               : newPrio;
    tsInfo->ts_uprilim = cur_class == new_class
                           ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
D
duke 已提交
3707
    if (ThreadPriorityVerbose) {
3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725
      tty->print_cr("TS: [%d...%d] %d->%d\n",
                    tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
    }
  } else if (new_class == fxLimits.schedPolicy) {
    fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
    int maxClamped     = MIN2(fxLimits.maxPrio,
                              cur_class == new_class
                                ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
    fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
                                                       maxClamped, newPrio)
                               : newPrio;
    fxInfo->fx_uprilim = cur_class == new_class
                           ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
    fxInfo->fx_tqsecs  = FX_NOCHANGE;
    fxInfo->fx_tqnsecs = FX_NOCHANGE;
    if (ThreadPriorityVerbose) {
      tty->print_cr("FX: [%d...%d] %d->%d\n",
                    fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
D
duke 已提交
3726 3727
    }
  } else {
3728 3729
    if (ThreadPriorityVerbose) {
      tty->print_cr("Unknown new scheduling class %d\n", new_class);
D
duke 已提交
3730
    }
3731
    return EINVAL;    // no clue, punt
D
duke 已提交
3732 3733
  }

3734
  rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
D
duke 已提交
3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752
  if (ThreadPriorityVerbose && rslt) {
    tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
  }
  if (rslt < 0) return errno;

#ifdef ASSERT
  // Sanity check: read back what we just attempted to set.
  // In theory it could have changed in the interim ...
  //
  // The priocntl system call is tricky.
  // Sometimes it'll validate the priority value argument and
  // return EINVAL if unhappy.  At other times it fails silently.
  // Readbacks are prudent.

  if (!ReadBackValidate) return 0;

  memset(&ReadBack, 0, sizeof(pcparms_t));
  ReadBack.pc_cid = PC_CLNULL;
3753
  rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
D
duke 已提交
3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765
  assert(rslt >= 0, "priocntl failed");
  Actual = Expected = 0xBAD;
  assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
  if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
    Actual   = RTPRI(ReadBack)->rt_pri;
    Expected = RTPRI(ParmInfo)->rt_pri;
  } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
    Actual   = IAPRI(ReadBack)->ia_upri;
    Expected = IAPRI(ParmInfo)->ia_upri;
  } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
    Actual   = TSPRI(ReadBack)->ts_upri;
    Expected = TSPRI(ParmInfo)->ts_upri;
3766 3767 3768
  } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
    Actual   = FXPRI(ReadBack)->fx_upri;
    Expected = FXPRI(ParmInfo)->fx_upri;
D
duke 已提交
3769
  } else {
3770 3771 3772
    if (ThreadPriorityVerbose) {
      tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
                    ParmInfo.pc_cid);
D
duke 已提交
3773 3774 3775 3776
    }
  }

  if (Actual != Expected) {
3777 3778 3779
    if (ThreadPriorityVerbose) {
      tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
                     lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
D
duke 已提交
3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806
    }
  }
#endif

  return 0;
}

// Solaris only gives access to 128 real priorities at a time,
// so we expand Java's ten to fill this range.  This would be better
// if we dynamically adjusted relative priorities.
//
// The ThreadPriorityPolicy option allows us to select 2 different
// priority scales.
//
// ThreadPriorityPolicy=0
// Since the Solaris' default priority is MaximumPriority, we do not
// set a priority lower than Max unless a priority lower than
// NormPriority is requested.
//
// ThreadPriorityPolicy=1
// This mode causes the priority table to get filled with
// linear values.  NormPriority get's mapped to 50% of the
// Maximum priority an so on.  This will cause VM threads
// to get unfair treatment against other Solaris processes
// which do not explicitly alter their thread priorities.
//

3807
int os::java_to_os_priority[CriticalPriority + 1] = {
D
duke 已提交
3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821
  -99999,         // 0 Entry should never be used

  0,              // 1 MinPriority
  32,             // 2
  64,             // 3

  96,             // 4
  127,            // 5 NormPriority
  127,            // 6

  127,            // 7
  127,            // 8
  127,            // 9 NearMaxPriority

3822
  127,            // 10 MaxPriority
D
duke 已提交
3823

3824 3825
  -criticalPrio   // 11 CriticalPriority
};
D
duke 已提交
3826 3827

OSReturn os::set_native_priority(Thread* thread, int newpri) {
3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839
  OSThread* osthread = thread->osthread();

  // Save requested priority in case the thread hasn't been started
  osthread->set_native_priority(newpri);

  // Check for critical priority request
  bool fxcritical = false;
  if (newpri == -criticalPrio) {
    fxcritical = true;
    newpri = criticalPrio;
  }

D
duke 已提交
3840
  assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866
  if (!UseThreadPriorities) return OS_OK;

  int status = 0;

  if (!fxcritical) {
    // Use thr_setprio only if we have a priority that thr_setprio understands
    status = thr_setprio(thread->osthread()->thread_id(), newpri);
  }

  if (os::Solaris::T2_libthread() ||
      (UseBoundThreads && osthread->is_vm_created())) {
    int lwp_status =
      set_lwp_class_and_priority(osthread->thread_id(),
                                 osthread->lwp_id(),
                                 newpri,
                                 fxcritical ? fxLimits.schedPolicy : myClass,
                                 !fxcritical);
    if (lwp_status != 0 && fxcritical) {
      // Try again, this time without changing the scheduling class
      newpri = java_MaxPriority_to_os_priority;
      lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
                                              osthread->lwp_id(),
                                              newpri, myClass, false);
    }
    status |= lwp_status;
  }
D
duke 已提交
3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891
  return (status == 0) ? OS_OK : OS_ERR;
}


OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
  int p;
  if ( !UseThreadPriorities ) {
    *priority_ptr = NormalPriority;
    return OS_OK;
  }
  int status = thr_getprio(thread->osthread()->thread_id(), &p);
  if (status != 0) {
    return OS_ERR;
  }
  *priority_ptr = p;
  return OS_OK;
}


// Hint to the underlying OS that a task switch would not be good.
// Void return because it's a hint and can fail.
void os::hint_no_preempt() {
  schedctl_start(schedctl_init());
}

S
sla 已提交
3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953
static void resume_clear_context(OSThread *osthread) {
  osthread->set_ucontext(NULL);
}

static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
  osthread->set_ucontext(context);
}

static Semaphore sr_semaphore;

void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
  // Save and restore errno to avoid confusing native code with EINTR
  // after sigsuspend.
  int old_errno = errno;

  OSThread* osthread = thread->osthread();
  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");

  os::SuspendResume::State current = osthread->sr.state();
  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
    suspend_save_context(osthread, uc);

    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
    os::SuspendResume::State state = osthread->sr.suspended();
    if (state == os::SuspendResume::SR_SUSPENDED) {
      sigset_t suspend_set;  // signals for sigsuspend()

      // get current set of blocked signals and unblock resume signal
      thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
      sigdelset(&suspend_set, os::Solaris::SIGasync());

      sr_semaphore.signal();
      // wait here until we are resumed
      while (1) {
        sigsuspend(&suspend_set);

        os::SuspendResume::State result = osthread->sr.running();
        if (result == os::SuspendResume::SR_RUNNING) {
          sr_semaphore.signal();
          break;
        }
      }

    } else if (state == os::SuspendResume::SR_RUNNING) {
      // request was cancelled, continue
    } else {
      ShouldNotReachHere();
    }

    resume_clear_context(osthread);
  } else if (current == os::SuspendResume::SR_RUNNING) {
    // request was cancelled, continue
  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
    // ignore
  } else {
    // ignore
  }

  errno = old_errno;
}


D
duke 已提交
3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
void os::interrupt(Thread* thread) {
  assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");

  OSThread* osthread = thread->osthread();

  int isInterrupted = osthread->interrupted();
  if (!isInterrupted) {
      osthread->set_interrupted(true);
      OrderAccess::fence();
      // os::sleep() is implemented with either poll (NULL,0,timeout) or
      // by parking on _SleepEvent.  If the former, thr_kill will unwedge
      // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
      ParkEvent * const slp = thread->_SleepEvent ;
      if (slp != NULL) slp->unpark() ;
  }

  // For JSR166:  unpark after setting status but before thr_kill -dl
  if (thread->is_Java_thread()) {
    ((JavaThread*)thread)->parker()->unpark();
  }

  // Handle interruptible wait() ...
  ParkEvent * const ev = thread->_ParkEvent ;
  if (ev != NULL) ev->unpark() ;

  // When events are used everywhere for os::sleep, then this thr_kill
  // will only be needed if UseVMInterruptibleIO is true.

  if (!isInterrupted) {
    int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
    assert_status(status == 0, status, "thr_kill");

    // Bump thread interruption counter
    RuntimeService::record_thread_interrupt_signaled_count();
  }
}


bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
  assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");

  OSThread* osthread = thread->osthread();

  bool res = osthread->interrupted();

  // NOTE that since there is no "lock" around these two operations,
  // there is the possibility that the interrupted flag will be
  // "false" but that the interrupt event will be set. This is
  // intentional. The effect of this is that Object.wait() will appear
  // to have a spurious wakeup, which is not harmful, and the
  // possibility is so rare that it is not worth the added complexity
  // to add yet another lock. It has also been recommended not to put
  // the interrupted flag into the os::Solaris::Event structure,
  // because it hides the issue.
  if (res && clear_interrupted) {
    osthread->set_interrupted(false);
  }
  return res;
}


void os::print_statistics() {
}

int os::message_box(const char* title, const char* message) {
  int i;
  fdStream err(defaultStream::error_fd());
  for (i = 0; i < 78; i++) err.print_raw("=");
  err.cr();
  err.print_raw_cr(title);
  for (i = 0; i < 78; i++) err.print_raw("-");
  err.cr();
  err.print_raw_cr(message);
  for (i = 0; i < 78; i++) err.print_raw("=");
  err.cr();

  char buf[16];
  // Prevent process from exiting upon "read error" without consuming all CPU
  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }

  return buf[0] == 'y' || buf[0] == 'Y';
}

S
sla 已提交
4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146
static int sr_notify(OSThread* osthread) {
  int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
  assert_status(status == 0, status, "thr_kill");
  return status;
}

// "Randomly" selected value for how long we want to spin
// before bailing out on suspending a thread, also how often
// we send a signal to a thread we want to resume
static const int RANDOMLY_LARGE_INTEGER = 1000000;
static const int RANDOMLY_LARGE_INTEGER2 = 100;

static bool do_suspend(OSThread* osthread) {
  assert(osthread->sr.is_running(), "thread should be running");
  assert(!sr_semaphore.trywait(), "semaphore has invalid state");

  // mark as suspended and send signal
  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
    // failed to switch, state wasn't running?
    ShouldNotReachHere();
    return false;
  }

  if (sr_notify(osthread) != 0) {
    ShouldNotReachHere();
  }

  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
  while (true) {
    if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
      break;
    } else {
      // timeout
      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
      if (cancelled == os::SuspendResume::SR_RUNNING) {
        return false;
      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
        // make sure that we consume the signal on the semaphore as well
        sr_semaphore.wait();
        break;
      } else {
        ShouldNotReachHere();
        return false;
      }
    }
  }

  guarantee(osthread->sr.is_suspended(), "Must be suspended");
  return true;
}

static void do_resume(OSThread* osthread) {
  assert(osthread->sr.is_suspended(), "thread should be suspended");
  assert(!sr_semaphore.trywait(), "invalid semaphore state");

  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
    // failed to switch to WAKEUP_REQUEST
    ShouldNotReachHere();
    return;
  }

  while (true) {
    if (sr_notify(osthread) == 0) {
      if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
        if (osthread->sr.is_running()) {
          return;
        }
      }
    } else {
      ShouldNotReachHere();
    }
  }

  guarantee(osthread->sr.is_running(), "Must be running!");
}

void os::SuspendedThreadTask::internal_do_task() {
  if (do_suspend(_thread->osthread())) {
    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
    do_task(context);
    do_resume(_thread->osthread());
  }
}

class PcFetcher : public os::SuspendedThreadTask {
public:
  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
  ExtendedPC result();
protected:
  void do_task(const os::SuspendedThreadTaskContext& context);
private:
  ExtendedPC _epc;
};

ExtendedPC PcFetcher::result() {
  guarantee(is_done(), "task is not done yet.");
  return _epc;
}

void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
  Thread* thread = context.thread();
  OSThread* osthread = thread->osthread();
  if (osthread->ucontext() != NULL) {
    _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
  } else {
    // NULL context is unexpected, double-check this is the VMThread
    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  }
}

D
duke 已提交
4147 4148 4149 4150 4151 4152 4153
// A lightweight implementation that does not suspend the target thread and
// thus returns only a hint. Used for profiling only!
ExtendedPC os::get_thread_pc(Thread* thread) {
  // Make sure that it is called by the watcher and the Threads lock is owned.
  assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
  // For now, is only used to profile the VM Thread
  assert(thread->is_VM_thread(), "Can only be called for VMThread");
S
sla 已提交
4154 4155 4156
  PcFetcher fetcher(thread);
  fetcher.run();
  return fetcher.result();
D
duke 已提交
4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
}


// This does not do anything on Solaris. This is basically a hook for being
// able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
  f(value, method, args, thread);
}

// This routine may be used by user applications as a "hook" to catch signals.
// The user-defined signal handler must pass unrecognized signals to this
// routine, and if it returns true (non-zero), then the signal handler must
// return immediately.  If the flag "abort_if_unrecognized" is true, then this
// routine will never retun false (zero), but instead will execute a VM panic
// routine kill the process.
//
// If this routine returns false, it is OK to call it again.  This allows
// the user-defined signal handler to perform checks either before or after
// the VM performs its own checks.  Naturally, the user code would be making
// a serious error if it tried to handle an exception (such as a null check
// or breakpoint) that the VM was generating for its own correct operation.
//
// This routine may recognize any of the following kinds of signals:
// SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
// os::Solaris::SIGasync
// It should be consulted by handlers for any of those signals.
// It explicitly does not recognize os::Solaris::SIGinterrupt
//
// The caller of this routine must pass in the three arguments supplied
// to the function referred to in the "sa_sigaction" (not the "sa_handler")
// field of the structure passed to sigaction().  This routine assumes that
// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
//
// Note that the VM will print warnings if it detects conflicting signal
// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
//
4193 4194 4195
extern "C" JNIEXPORT int
JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
                          int abort_if_unrecognized);
D
duke 已提交
4196 4197 4198


void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4199
  int orig_errno = errno;  // Preserve errno value over signal handler.
D
duke 已提交
4200
  JVM_handle_solaris_signal(sig, info, ucVoid, true);
4201
  errno = orig_errno;
D
duke 已提交
4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336
}

/* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
   is needed to provoke threads blocked on IO to return an EINTR
   Note: this explicitly does NOT call JVM_handle_solaris_signal and
   does NOT participate in signal chaining due to requirement for
   NOT setting SA_RESTART to make EINTR work. */
extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
   if (UseSignalChaining) {
      struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
      if (actp && actp->sa_handler) {
        vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
      }
   }
}

// This boolean allows users to forward their own non-matching signals
// to JVM_handle_solaris_signal, harmlessly.
bool os::Solaris::signal_handlers_are_installed = false;

// For signal-chaining
bool os::Solaris::libjsig_is_loaded = false;
typedef struct sigaction *(*get_signal_t)(int);
get_signal_t os::Solaris::get_signal_action = NULL;

struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
  struct sigaction *actp = NULL;

  if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
    // Retrieve the old signal handler from libjsig
    actp = (*get_signal_action)(sig);
  }
  if (actp == NULL) {
    // Retrieve the preinstalled signal handler from jvm
    actp = get_preinstalled_handler(sig);
  }

  return actp;
}

static bool call_chained_handler(struct sigaction *actp, int sig,
                                 siginfo_t *siginfo, void *context) {
  // Call the old signal handler
  if (actp->sa_handler == SIG_DFL) {
    // It's more reasonable to let jvm treat it as an unexpected exception
    // instead of taking the default action.
    return false;
  } else if (actp->sa_handler != SIG_IGN) {
    if ((actp->sa_flags & SA_NODEFER) == 0) {
      // automaticlly block the signal
      sigaddset(&(actp->sa_mask), sig);
    }

    sa_handler_t hand;
    sa_sigaction_t sa;
    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
    // retrieve the chained handler
    if (siginfo_flag_set) {
      sa = actp->sa_sigaction;
    } else {
      hand = actp->sa_handler;
    }

    if ((actp->sa_flags & SA_RESETHAND) != 0) {
      actp->sa_handler = SIG_DFL;
    }

    // try to honor the signal mask
    sigset_t oset;
    thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);

    // call into the chained handler
    if (siginfo_flag_set) {
      (*sa)(sig, siginfo, context);
    } else {
      (*hand)(sig);
    }

    // restore the signal mask
    thr_sigsetmask(SIG_SETMASK, &oset, 0);
  }
  // Tell jvm's signal handler the signal is taken care of.
  return true;
}

bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
  bool chained = false;
  // signal-chaining
  if (UseSignalChaining) {
    struct sigaction *actp = get_chained_signal_action(sig);
    if (actp != NULL) {
      chained = call_chained_handler(actp, sig, siginfo, context);
    }
  }
  return chained;
}

struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
  assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
  if (preinstalled_sigs[sig] != 0) {
    return &chainedsigactions[sig];
  }
  return NULL;
}

void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {

  assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
  assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
  chainedsigactions[sig] = oldAct;
  preinstalled_sigs[sig] = 1;
}

void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
  // Check for overwrite.
  struct sigaction oldAct;
  sigaction(sig, (struct sigaction*)NULL, &oldAct);
  void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
                                      : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
      oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
    if (AllowUserSignalHandlers || !set_installed) {
      // Do not overwrite; user takes responsibility to forward to us.
      return;
    } else if (UseSignalChaining) {
      if (oktochain) {
        // save the old handler in jvm
        save_preinstalled_handler(sig, oldAct);
      } else {
        vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
      }
      // libjsig also interposes the sigaction() call below and saves the
      // old sigaction on it own.
    } else {
4337 4338
      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
                    "%#lx for signal %d.", (long)oldhand, sig));
D
duke 已提交
4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470
    }
  }

  struct sigaction sigAct;
  sigfillset(&(sigAct.sa_mask));
  sigAct.sa_handler = SIG_DFL;

  sigAct.sa_sigaction = signalHandler;
  // Handle SIGSEGV on alternate signal stack if
  // not using stack banging
  if (!UseStackBanging && sig == SIGSEGV) {
    sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
  // Interruptible i/o requires SA_RESTART cleared so EINTR
  // is returned instead of restarting system calls
  } else if (sig == os::Solaris::SIGinterrupt()) {
    sigemptyset(&sigAct.sa_mask);
    sigAct.sa_handler = NULL;
    sigAct.sa_flags = SA_SIGINFO;
    sigAct.sa_sigaction = sigINTRHandler;
  } else {
    sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
  }
  os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);

  sigaction(sig, &sigAct, &oldAct);

  void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
                                       : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
}


#define DO_SIGNAL_CHECK(sig) \
  if (!sigismember(&check_signal_done, sig)) \
    os::Solaris::check_signal_handler(sig)

// This method is a periodic task to check for misbehaving JNI applications
// under CheckJNI, we can add any periodic checks here

void os::run_periodic_checks() {
  // A big source of grief is hijacking virt. addr 0x0 on Solaris,
  // thereby preventing a NULL checks.
  if(!check_addr0_done) check_addr0_done = check_addr0(tty);

  if (check_signals == false) return;

  // SEGV and BUS if overridden could potentially prevent
  // generation of hs*.log in the event of a crash, debugging
  // such a case can be very challenging, so we absolutely
  // check for the following for a good measure:
  DO_SIGNAL_CHECK(SIGSEGV);
  DO_SIGNAL_CHECK(SIGILL);
  DO_SIGNAL_CHECK(SIGFPE);
  DO_SIGNAL_CHECK(SIGBUS);
  DO_SIGNAL_CHECK(SIGPIPE);
  DO_SIGNAL_CHECK(SIGXFSZ);

  // ReduceSignalUsage allows the user to override these handlers
  // see comments at the very top and jvm_solaris.h
  if (!ReduceSignalUsage) {
    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
    DO_SIGNAL_CHECK(BREAK_SIGNAL);
  }

  // See comments above for using JVM1/JVM2 and UseAltSigs
  DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
  DO_SIGNAL_CHECK(os::Solaris::SIGasync());

}

typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);

static os_sigaction_t os_sigaction = NULL;

void os::Solaris::check_signal_handler(int sig) {
  char buf[O_BUFLEN];
  address jvmHandler = NULL;

  struct sigaction act;
  if (os_sigaction == NULL) {
    // only trust the default sigaction, in case it has been interposed
    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
    if (os_sigaction == NULL) return;
  }

  os_sigaction(sig, (struct sigaction*)NULL, &act);

  address thisHandler = (act.sa_flags & SA_SIGINFO)
    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
    : CAST_FROM_FN_PTR(address, act.sa_handler) ;


  switch(sig) {
    case SIGSEGV:
    case SIGBUS:
    case SIGFPE:
    case SIGPIPE:
    case SIGXFSZ:
    case SIGILL:
      jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
      break;

    case SHUTDOWN1_SIGNAL:
    case SHUTDOWN2_SIGNAL:
    case SHUTDOWN3_SIGNAL:
    case BREAK_SIGNAL:
      jvmHandler = (address)user_handler();
      break;

    default:
      int intrsig = os::Solaris::SIGinterrupt();
      int asynsig = os::Solaris::SIGasync();

      if (sig == intrsig) {
        jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
      } else if (sig == asynsig) {
        jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
      } else {
        return;
      }
      break;
  }


  if (thisHandler != jvmHandler) {
    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
    // No need to check this sig any longer
    sigaddset(&check_signal_done, sig);
4471 4472 4473 4474 4475
    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
                    exception_name(sig, buf, O_BUFLEN));
    }
D
duke 已提交
4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547
  } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
    tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
    // No need to check this sig any longer
    sigaddset(&check_signal_done, sig);
  }

  // Print all the signal handler state
  if (sigismember(&check_signal_done, sig)) {
    print_signal_handlers(tty, buf, O_BUFLEN);
  }

}

void os::Solaris::install_signal_handlers() {
  bool libjsigdone = false;
  signal_handlers_are_installed = true;

  // signal-chaining
  typedef void (*signal_setting_t)();
  signal_setting_t begin_signal_setting = NULL;
  signal_setting_t end_signal_setting = NULL;
  begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
                                        dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
  if (begin_signal_setting != NULL) {
    end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
                                        dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
    get_signal_action = CAST_TO_FN_PTR(get_signal_t,
                                       dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
    get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
                                         dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
    libjsig_is_loaded = true;
    if (os::Solaris::get_libjsig_version != NULL) {
      libjsigversion =  (*os::Solaris::get_libjsig_version)();
    }
    assert(UseSignalChaining, "should enable signal-chaining");
  }
  if (libjsig_is_loaded) {
    // Tell libjsig jvm is setting signal handlers
    (*begin_signal_setting)();
  }

  set_signal_handler(SIGSEGV, true, true);
  set_signal_handler(SIGPIPE, true, true);
  set_signal_handler(SIGXFSZ, true, true);
  set_signal_handler(SIGBUS, true, true);
  set_signal_handler(SIGILL, true, true);
  set_signal_handler(SIGFPE, true, true);


  if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {

    // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
    // can not register overridable signals which might be > 32
    if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
    // Tell libjsig jvm has finished setting signal handlers
      (*end_signal_setting)();
      libjsigdone = true;
    }
  }

  // Never ok to chain our SIGinterrupt
  set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
  set_signal_handler(os::Solaris::SIGasync(), true, true);

  if (libjsig_is_loaded && !libjsigdone) {
    // Tell libjsig jvm finishes setting signal handlers
    (*end_signal_setting)();
  }

  // We don't activate signal checker if libjsig is in place, we trust ourselves
4548 4549
  // and if UserSignalHandler is installed all bets are off.
  // Log that signal checking is off only if -verbose:jni is specified.
D
duke 已提交
4550 4551
  if (CheckJNICalls) {
    if (libjsig_is_loaded) {
4552 4553 4554
      if (PrintJNIResolving) {
        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
      }
D
duke 已提交
4555 4556 4557
      check_signals = false;
    }
    if (AllowUserSignalHandlers) {
4558 4559 4560
      if (PrintJNIResolving) {
        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
      }
D
duke 已提交
4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601
      check_signals = false;
    }
  }
}


void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);

const char * signames[] = {
  "SIG0",
  "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
  "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
  "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
  "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
  "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
  "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
  "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
  "SIGCANCEL", "SIGLOST"
};

const char* os::exception_name(int exception_code, char* buf, size_t size) {
  if (0 < exception_code && exception_code <= SIGRTMAX) {
    // signal
    if (exception_code < sizeof(signames)/sizeof(const char*)) {
       jio_snprintf(buf, size, "%s", signames[exception_code]);
    } else {
       jio_snprintf(buf, size, "SIG%d", exception_code);
    }
    return buf;
  } else {
    return NULL;
  }
}

// (Static) wrappers for the new libthread API
int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
int_fnP_thread_t_i os::Solaris::_thr_setmutator;
int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
int_fnP_thread_t os::Solaris::_thr_continue_mutator;

4602 4603 4604
// (Static) wrapper for getisax(2) call.
os::Solaris::getisax_func_t os::Solaris::_getisax = 0;

D
duke 已提交
4605 4606 4607 4608 4609 4610
// (Static) wrappers for the liblgrp API
os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4611
os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
D
duke 已提交
4612 4613 4614 4615 4616 4617 4618
os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;

// (Static) wrapper for meminfo() call.
os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;

4619 4620
static address resolve_symbol_lazy(const char* name) {
  address addr = (address) dlsym(RTLD_DEFAULT, name);
D
duke 已提交
4621 4622 4623
  if(addr == NULL) {
    // RTLD_DEFAULT was not defined on some early versions of 2.5.1
    addr = (address) dlsym(RTLD_NEXT, name);
4624 4625 4626 4627 4628 4629 4630 4631
  }
  return addr;
}

static address resolve_symbol(const char* name) {
  address addr = resolve_symbol_lazy(name);
  if(addr == NULL) {
    fatal(dlerror());
D
duke 已提交
4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662
  }
  return addr;
}



// isT2_libthread()
//
// Routine to determine if we are currently using the new T2 libthread.
//
// We determine if we are using T2 by reading /proc/self/lstatus and
// looking for a thread with the ASLWP bit set.  If we find this status
// bit set, we must assume that we are NOT using T2.  The T2 team
// has approved this algorithm.
//
// We need to determine if we are running with the new T2 libthread
// since setting native thread priorities is handled differently
// when using this library.  All threads created using T2 are bound
// threads. Calling thr_setprio is meaningless in this case.
//
bool isT2_libthread() {
  static prheader_t * lwpArray = NULL;
  static int lwpSize = 0;
  static int lwpFile = -1;
  lwpstatus_t * that;
  char lwpName [128];
  bool isT2 = false;

#define ADR(x)  ((uintptr_t)(x))
#define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))

4663
  lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
D
duke 已提交
4664
  if (lwpFile < 0) {
4665 4666
      if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
      return false;
D
duke 已提交
4667
  }
4668
  lwpSize = 16*1024;
D
duke 已提交
4669
  for (;;) {
4670
    ::lseek64 (lwpFile, 0, SEEK_SET);
Z
zgu 已提交
4671
    lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4672
    if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4673 4674 4675
      if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
      break;
    }
D
duke 已提交
4676
    if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4677 4678 4679 4680 4681 4682 4683 4684 4685
       // We got a good snapshot - now iterate over the list.
      int aslwpcount = 0;
      for (int i = 0; i < lwpArray->pr_nent; i++ ) {
        that = LWPINDEX(lwpArray,i);
        if (that->pr_flags & PR_ASLWP) {
          aslwpcount++;
        }
      }
      if (aslwpcount == 0) isT2 = true;
D
duke 已提交
4686 4687 4688
      break;
    }
    lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
Z
zgu 已提交
4689
    FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
D
duke 已提交
4690 4691
  }

Z
zgu 已提交
4692
  FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4693
  ::close (lwpFile);
4694 4695
  if (ThreadPriorityVerbose) {
    if (isT2) tty->print_cr("We are running with a T2 libthread\n");
D
duke 已提交
4696 4697
    else tty->print_cr("We are not running with a T2 libthread\n");
  }
4698
  return isT2;
D
duke 已提交
4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808
}


void os::Solaris::libthread_init() {
  address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");

  // Determine if we are running with the new T2 libthread
  os::Solaris::set_T2_libthread(isT2_libthread());

  lwp_priocntl_init();

  // RTLD_DEFAULT was not defined on some early versions of 5.5.1
  if(func == NULL) {
    func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
    // Guarantee that this VM is running on an new enough OS (5.6 or
    // later) that it will have a new enough libthread.so.
    guarantee(func != NULL, "libthread.so is too old.");
  }

  // Initialize the new libthread getstate API wrappers
  func = resolve_symbol("thr_getstate");
  os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));

  func = resolve_symbol("thr_setstate");
  os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));

  func = resolve_symbol("thr_setmutator");
  os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));

  func = resolve_symbol("thr_suspend_mutator");
  os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));

  func = resolve_symbol("thr_continue_mutator");
  os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));

  int size;
  void (*handler_info_func)(address *, int *);
  handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
  handler_info_func(&handler_start, &size);
  handler_end = handler_start + size;
}


int_fnP_mutex_tP os::Solaris::_mutex_lock;
int_fnP_mutex_tP os::Solaris::_mutex_trylock;
int_fnP_mutex_tP os::Solaris::_mutex_unlock;
int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
int_fnP_mutex_tP os::Solaris::_mutex_destroy;
int os::Solaris::_mutex_scope = USYNC_THREAD;

int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
int_fnP_cond_tP os::Solaris::_cond_signal;
int_fnP_cond_tP os::Solaris::_cond_broadcast;
int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
int_fnP_cond_tP os::Solaris::_cond_destroy;
int os::Solaris::_cond_scope = USYNC_THREAD;

void os::Solaris::synchronization_init() {
  if(UseLWPSynchronization) {
    os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
    os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
    os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
    os::Solaris::set_mutex_init(lwp_mutex_init);
    os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
    os::Solaris::set_mutex_scope(USYNC_THREAD);

    os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
    os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
    os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
    os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
    os::Solaris::set_cond_init(lwp_cond_init);
    os::Solaris::set_cond_destroy(lwp_cond_destroy);
    os::Solaris::set_cond_scope(USYNC_THREAD);
  }
  else {
    os::Solaris::set_mutex_scope(USYNC_THREAD);
    os::Solaris::set_cond_scope(USYNC_THREAD);

    if(UsePthreads) {
      os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
      os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
      os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
      os::Solaris::set_mutex_init(pthread_mutex_default_init);
      os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));

      os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
      os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
      os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
      os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
      os::Solaris::set_cond_init(pthread_cond_default_init);
      os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
    }
    else {
      os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
      os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
      os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
      os::Solaris::set_mutex_init(::mutex_init);
      os::Solaris::set_mutex_destroy(::mutex_destroy);

      os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
      os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
      os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
      os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
      os::Solaris::set_cond_init(::cond_init);
      os::Solaris::set_cond_destroy(::cond_destroy);
    }
  }
}

4809
bool os::Solaris::liblgrp_init() {
4810
  void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
D
duke 已提交
4811 4812 4813 4814 4815 4816
  if (handle != NULL) {
    os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
    os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
    os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
    os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
    os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4817
    os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
D
duke 已提交
4818 4819 4820 4821 4822 4823
    os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
    os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
                                       dlsym(handle, "lgrp_cookie_stale")));

    lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
    set_lgrp_cookie(c);
4824
    return true;
D
duke 已提交
4825
  }
4826
  return false;
D
duke 已提交
4827 4828 4829
}

void os::Solaris::misc_sym_init() {
4830 4831 4832 4833 4834 4835
  address func;

  // getisax
  func = resolve_symbol_lazy("getisax");
  if (func != NULL) {
    os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
D
duke 已提交
4836
  }
4837 4838 4839

  // meminfo
  func = resolve_symbol_lazy("meminfo");
D
duke 已提交
4840 4841 4842 4843 4844
  if (func != NULL) {
    os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
  }
}

4845 4846 4847 4848 4849
uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
  assert(_getisax != NULL, "_getisax not set");
  return _getisax(array, n);
}

D
duke 已提交
4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873
// int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
static pset_getloadavg_type pset_getloadavg_ptr = NULL;

void init_pset_getloadavg_ptr(void) {
  pset_getloadavg_ptr =
    (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
  if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
    warning("pset_getloadavg function not found");
  }
}

int os::Solaris::_dev_zero_fd = -1;

// this is called _before_ the global arguments have been parsed
void os::init(void) {
  _initial_pid = getpid();

  max_hrtime = first_hrtime = gethrtime();

  init_random(1234567);

  page_size = sysconf(_SC_PAGESIZE);
  if (page_size == -1)
4874 4875
    fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
                  strerror(errno)));
D
duke 已提交
4876 4877 4878 4879
  init_page_sizes((size_t) page_size);

  Solaris::initialize_system_info();

4880 4881 4882 4883
  // Initialize misc. symbols as soon as possible, so we can use them
  // if we need them.
  Solaris::misc_sym_init();

4884
  int fd = ::open("/dev/zero", O_RDWR);
D
duke 已提交
4885
  if (fd < 0) {
4886
    fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
D
duke 已提交
4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962
  } else {
    Solaris::set_dev_zero_fd(fd);

    // Close on exec, child won't inherit.
    fcntl(fd, F_SETFD, FD_CLOEXEC);
  }

  clock_tics_per_sec = CLK_TCK;

  // check if dladdr1() exists; dladdr1 can provide more information than
  // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
  // and is available on linker patches for 5.7 and 5.8.
  // libdl.so must have been loaded, this call is just an entry lookup
  void * hdl = dlopen("libdl.so", RTLD_NOW);
  if (hdl)
    dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));

  // (Solaris only) this switches to calls that actually do locking.
  ThreadCritical::initialize();

  main_thread = thr_self();

  // Constant minimum stack size allowed. It must be at least
  // the minimum of what the OS supports (thr_min_stack()), and
  // enough to allow the thread to get to user bytecode execution.
  Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
  // If the pagesize of the VM is greater than 8K determine the appropriate
  // number of initial guard pages.  The user can change this with the
  // command line arguments, if needed.
  if (vm_page_size() > 8*K) {
    StackYellowPages = 1;
    StackRedPages = 1;
    StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
  }
}

// To install functions for atexit system call
extern "C" {
  static void perfMemory_exit_helper() {
    perfMemory_exit();
  }
}

// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
  // try to enable extended file IO ASAP, see 6431278
  os::Solaris::try_enable_extended_io();

  // Allocate a single page and mark it as readable for safepoint polling.  Also
  // use this first mmap call to check support for MAP_ALIGN.
  address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
                                                      page_size,
                                                      MAP_PRIVATE | MAP_ALIGN,
                                                      PROT_READ);
  if (polling_page == NULL) {
    has_map_align = false;
    polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
                                                PROT_READ);
  }

  os::set_polling_page(polling_page);

#ifndef PRODUCT
  if( Verbose && PrintMiscellaneous )
    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
#endif

  if (!UseMembar) {
    address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
    guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
    os::set_memory_serialize_page( mem_serialize_page );

#ifndef PRODUCT
    if(Verbose && PrintMiscellaneous)
      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
#endif
4963
  }
D
duke 已提交
4964 4965 4966 4967

  // Check minimum allowable stack size for thread creation and to initialize
  // the java system classes, including StackOverflowError - depends on page
  // size.  Add a page for compiler2 recursion in main thread.
4968
  // Add in 2*BytesPerWord times page size to account for VM stack during
D
duke 已提交
4969
  // class initialization depending on 32 or 64 bit VM.
4970 4971 4972
  os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
                    2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
D
duke 已提交
4973 4974 4975

  size_t threadStackSizeInBytes = ThreadStackSize * K;
  if (threadStackSizeInBytes != 0 &&
4976
    threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
D
duke 已提交
4977
    tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
4978
                  os::Solaris::min_stack_allowed/K);
D
duke 已提交
4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002
    return JNI_ERR;
  }

  // For 64kbps there will be a 64kb page size, which makes
  // the usable default stack size quite a bit less.  Increase the
  // stack for 64kb (or any > than 8kb) pages, this increases
  // virtual memory fragmentation (since we're not creating the
  // stack on a power of 2 boundary.  The real fix for this
  // should be to fix the guard page mechanism.

  if (vm_page_size() > 8*K) {
      threadStackSizeInBytes = (threadStackSizeInBytes != 0)
         ? threadStackSizeInBytes +
           ((StackYellowPages + StackRedPages) * vm_page_size())
         : 0;
      ThreadStackSize = threadStackSizeInBytes/K;
  }

  // Make the stack size a multiple of the page size so that
  // the yellow/red zones can be guarded.
  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
        vm_page_size()));

  Solaris::libthread_init();
5003

D
duke 已提交
5004
  if (UseNUMA) {
5005 5006 5007 5008
    if (!Solaris::liblgrp_init()) {
      UseNUMA = false;
    } else {
      size_t lgrp_limit = os::numa_get_groups_num();
Z
zgu 已提交
5009
      int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
5010
      size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
Z
zgu 已提交
5011
      FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
5012 5013 5014 5015 5016 5017 5018 5019
      if (lgrp_num < 2) {
        // There's only one locality group, disable NUMA.
        UseNUMA = false;
      }
    }
    if (!UseNUMA && ForceNUMA) {
      UseNUMA = true;
    }
D
duke 已提交
5020
  }
5021

D
duke 已提交
5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150
  Solaris::signal_sets_init();
  Solaris::init_signal_mem();
  Solaris::install_signal_handlers();

  if (libjsigversion < JSIG_VERSION_1_4_1) {
    Maxlibjsigsigs = OLDMAXSIGNUM;
  }

  // initialize synchronization primitives to use either thread or
  // lwp synchronization (controlled by UseLWPSynchronization)
  Solaris::synchronization_init();

  if (MaxFDLimit) {
    // set the number of file descriptors to max. print out error
    // if getrlimit/setrlimit fails but continue regardless.
    struct rlimit nbr_files;
    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
    if (status != 0) {
      if (PrintMiscellaneous && (Verbose || WizardMode))
        perror("os::init_2 getrlimit failed");
    } else {
      nbr_files.rlim_cur = nbr_files.rlim_max;
      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
      if (status != 0) {
        if (PrintMiscellaneous && (Verbose || WizardMode))
          perror("os::init_2 setrlimit failed");
      }
    }
  }

  // Calculate theoretical max. size of Threads to guard gainst
  // artifical out-of-memory situations, where all available address-
  // space has been reserved by thread stacks. Default stack size is 1Mb.
  size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
    JavaThread::stack_size_at_create() : (1*K*K);
  assert(pre_thread_stack_size != 0, "Must have a stack");
  // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
  // we should start doing Virtual Memory banging. Currently when the threads will
  // have used all but 200Mb of space.
  size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
  Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;

  // at-exit methods are called in the reverse order of their registration.
  // In Solaris 7 and earlier, atexit functions are called on return from
  // main or as a result of a call to exit(3C). There can be only 32 of
  // these functions registered and atexit() does not set errno. In Solaris
  // 8 and later, there is no limit to the number of functions registered
  // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
  // functions are called upon dlclose(3DL) in addition to return from main
  // and exit(3C).

  if (PerfAllowAtExitRegistration) {
    // only register atexit functions if PerfAllowAtExitRegistration is set.
    // atexit functions can be delayed until process exit time, which
    // can be problematic for embedded VM situations. Embedded VMs should
    // call DestroyJavaVM() to assure that VM resources are released.

    // note: perfMemory_exit_helper atexit function may be removed in
    // the future if the appropriate cleanup code can be added to the
    // VM_Exit VMOperation's doit method.
    if (atexit(perfMemory_exit_helper) != 0) {
      warning("os::init2 atexit(perfMemory_exit_helper) failed");
    }
  }

  // Init pset_loadavg function pointer
  init_pset_getloadavg_ptr();

  return JNI_OK;
}

// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
  if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
    fatal("Could not disable polling page");
};

// Mark the polling page as readable
void os::make_polling_page_readable(void) {
  if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
    fatal("Could not enable polling page");
};

// OS interface.

bool os::check_heap(bool force) { return true; }

typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
static vsnprintf_t sol_vsnprintf = NULL;

int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
  if (!sol_vsnprintf) {
    //search  for the named symbol in the objects that were loaded after libjvm
    void* where = RTLD_NEXT;
    if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
        sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
    if (!sol_vsnprintf){
      //search  for the named symbol in the objects that were loaded before libjvm
      where = RTLD_DEFAULT;
      if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
        sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
      assert(sol_vsnprintf != NULL, "vsnprintf not found");
    }
  }
  return (*sol_vsnprintf)(buf, count, fmt, argptr);
}


// Is a (classpath) directory empty?
bool os::dir_is_empty(const char* path) {
  DIR *dir = NULL;
  struct dirent *ptr;

  dir = opendir(path);
  if (dir == NULL) return true;

  /* Scan the directory */
  bool result = true;
  char buf[sizeof(struct dirent) + MAX_PATH];
  struct dirent *dbuf = (struct dirent *) buf;
  while (result && (ptr = readdir(dir, dbuf)) != NULL) {
    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
      result = false;
    }
  }
  closedir(dir);
  return result;
}

5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170
// This code originates from JDK's sysOpen and open64_w
// from src/solaris/hpi/src/system_md.c

#ifndef O_DELETE
#define O_DELETE 0x10000
#endif

// Open a file. Unlink the file immediately after open returns
// if the specified oflag has the O_DELETE flag set.
// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c

int os::open(const char *path, int oflag, int mode) {
  if (strlen(path) > MAX_PATH - 1) {
    errno = ENAMETOOLONG;
    return -1;
  }
  int fd;
  int o_delete = (oflag & O_DELETE);
  oflag = oflag & ~O_DELETE;

5171
  fd = ::open64(path, oflag, mode);
5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269
  if (fd == -1) return -1;

  //If the open succeeded, the file might still be a directory
  {
    struct stat64 buf64;
    int ret = ::fstat64(fd, &buf64);
    int st_mode = buf64.st_mode;

    if (ret != -1) {
      if ((st_mode & S_IFMT) == S_IFDIR) {
        errno = EISDIR;
        ::close(fd);
        return -1;
      }
    } else {
      ::close(fd);
      return -1;
    }
  }
    /*
     * 32-bit Solaris systems suffer from:
     *
     * - an historical default soft limit of 256 per-process file
     *   descriptors that is too low for many Java programs.
     *
     * - a design flaw where file descriptors created using stdio
     *   fopen must be less than 256, _even_ when the first limit above
     *   has been raised.  This can cause calls to fopen (but not calls to
     *   open, for example) to fail mysteriously, perhaps in 3rd party
     *   native code (although the JDK itself uses fopen).  One can hardly
     *   criticize them for using this most standard of all functions.
     *
     * We attempt to make everything work anyways by:
     *
     * - raising the soft limit on per-process file descriptors beyond
     *   256
     *
     * - As of Solaris 10u4, we can request that Solaris raise the 256
     *   stdio fopen limit by calling function enable_extended_FILE_stdio.
     *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
     *
     * - If we are stuck on an old (pre 10u4) Solaris system, we can
     *   workaround the bug by remapping non-stdio file descriptors below
     *   256 to ones beyond 256, which is done below.
     *
     * See:
     * 1085341: 32-bit stdio routines should support file descriptors >255
     * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
     * 6431278: Netbeans crash on 32 bit Solaris: need to call
     *          enable_extended_FILE_stdio() in VM initialisation
     * Giri Mandalika's blog
     * http://technopark02.blogspot.com/2005_05_01_archive.html
     */
#ifndef  _LP64
     if ((!enabled_extended_FILE_stdio) && fd < 256) {
         int newfd = ::fcntl(fd, F_DUPFD, 256);
         if (newfd != -1) {
             ::close(fd);
             fd = newfd;
         }
     }
#endif // 32-bit Solaris
    /*
     * All file descriptors that are opened in the JVM and not
     * specifically destined for a subprocess should have the
     * close-on-exec flag set.  If we don't set it, then careless 3rd
     * party native code might fork and exec without closing all
     * appropriate file descriptors (e.g. as we do in closeDescriptors in
     * UNIXProcess.c), and this in turn might:
     *
     * - cause end-of-file to fail to be detected on some file
     *   descriptors, resulting in mysterious hangs, or
     *
     * - might cause an fopen in the subprocess to fail on a system
     *   suffering from bug 1085341.
     *
     * (Yes, the default setting of the close-on-exec flag is a Unix
     * design flaw)
     *
     * See:
     * 1085341: 32-bit stdio routines should support file descriptors >255
     * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
     * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
     */
#ifdef FD_CLOEXEC
    {
        int flags = ::fcntl(fd, F_GETFD);
        if (flags != -1)
            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
    }
#endif

  if (o_delete != 0) {
    ::unlink(path);
  }
  return fd;
}

D
duke 已提交
5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288
// create binary file, rewriting existing file if required
int os::create_binary_file(const char* path, bool rewrite_existing) {
  int oflags = O_WRONLY | O_CREAT;
  if (!rewrite_existing) {
    oflags |= O_EXCL;
  }
  return ::open64(path, oflags, S_IREAD | S_IWRITE);
}

// return current position of file pointer
jlong os::current_file_offset(int fd) {
  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
}

// move file pointer to the specified offset
jlong os::seek_to_file_offset(int fd, jlong offset) {
  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
}

5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337
jlong os::lseek(int fd, jlong offset, int whence) {
  return (jlong) ::lseek64(fd, offset, whence);
}

char * os::native_path(char *path) {
  return path;
}

int os::ftruncate(int fd, jlong length) {
  return ::ftruncate64(fd, length);
}

int os::fsync(int fd)  {
  RESTARTABLE_RETURN_INT(::fsync(fd));
}

int os::available(int fd, jlong *bytes) {
  jlong cur, end;
  int mode;
  struct stat64 buf64;

  if (::fstat64(fd, &buf64) >= 0) {
    mode = buf64.st_mode;
    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
      /*
      * XXX: is the following call interruptible? If so, this might
      * need to go through the INTERRUPT_IO() wrapper as for other
      * blocking, interruptible calls in this file.
      */
      int n,ioctl_return;

      INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
      if (ioctl_return>= 0) {
          *bytes = n;
        return 1;
      }
    }
  }
  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
    return 0;
  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
    return 0;
  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
    return 0;
  }
  *bytes = end - cur;
  return 1;
}

D
duke 已提交
5338
// Map a block of memory.
Z
zgu 已提交
5339
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
D
duke 已提交
5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370
                     char *addr, size_t bytes, bool read_only,
                     bool allow_exec) {
  int prot;
  int flags;

  if (read_only) {
    prot = PROT_READ;
    flags = MAP_SHARED;
  } else {
    prot = PROT_READ | PROT_WRITE;
    flags = MAP_PRIVATE;
  }

  if (allow_exec) {
    prot |= PROT_EXEC;
  }

  if (addr != NULL) {
    flags |= MAP_FIXED;
  }

  char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
                                     fd, file_offset);
  if (mapped_address == MAP_FAILED) {
    return NULL;
  }
  return mapped_address;
}


// Remap a block of memory.
Z
zgu 已提交
5371
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
D
duke 已提交
5372 5373 5374 5375 5376 5377 5378 5379 5380
                       char *addr, size_t bytes, bool read_only,
                       bool allow_exec) {
  // same as map_memory() on this OS
  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
                        allow_exec);
}


// Unmap a block of memory.
Z
zgu 已提交
5381
bool os::pd_unmap_memory(char* addr, size_t bytes) {
D
duke 已提交
5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395
  return munmap(addr, bytes) == 0;
}

void os::pause() {
  char filename[MAX_PATH];
  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
  } else {
    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
  }

  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
  if (fd != -1) {
    struct stat buf;
5396
    ::close(fd);
D
duke 已提交
5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440
    while (::stat(filename, &buf) == 0) {
      (void)::poll(NULL, 0, 100);
    }
  } else {
    jio_fprintf(stderr,
      "Could not open pause file '%s', continuing immediately.\n", filename);
  }
}

#ifndef PRODUCT
#ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
// Turn this on if you need to trace synch operations.
// Set RECORD_SYNCH_LIMIT to a large-enough value,
// and call record_synch_enable and record_synch_disable
// around the computation of interest.

void record_synch(char* name, bool returning);  // defined below

class RecordSynch {
  char* _name;
 public:
  RecordSynch(char* name) :_name(name)
                 { record_synch(_name, false); }
  ~RecordSynch() { record_synch(_name,   true);  }
};

#define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
extern "C" ret name params {                                    \
  typedef ret name##_t params;                                  \
  static name##_t* implem = NULL;                               \
  static int callcount = 0;                                     \
  if (implem == NULL) {                                         \
    implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
    if (implem == NULL)  fatal(dlerror());                      \
  }                                                             \
  ++callcount;                                                  \
  RecordSynch _rs(#name);                                       \
  inner;                                                        \
  return implem args;                                           \
}
// in dbx, examine callcounts this way:
// for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done

#define CHECK_POINTER_OK(p) \
5441
  (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
D
duke 已提交
5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592
#define CHECK_MU \
  if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
#define CHECK_CV \
  if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
#define CHECK_P(p) \
  if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");

#define CHECK_MUTEX(mutex_op) \
CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);

CHECK_MUTEX(   mutex_lock)
CHECK_MUTEX(  _mutex_lock)
CHECK_MUTEX( mutex_unlock)
CHECK_MUTEX(_mutex_unlock)
CHECK_MUTEX( mutex_trylock)
CHECK_MUTEX(_mutex_trylock)

#define CHECK_COND(cond_op) \
CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);

CHECK_COND( cond_wait);
CHECK_COND(_cond_wait);
CHECK_COND(_cond_wait_cancel);

#define CHECK_COND2(cond_op) \
CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);

CHECK_COND2( cond_timedwait);
CHECK_COND2(_cond_timedwait);
CHECK_COND2(_cond_timedwait_cancel);

// do the _lwp_* versions too
#define mutex_t lwp_mutex_t
#define cond_t  lwp_cond_t
CHECK_MUTEX(  _lwp_mutex_lock)
CHECK_MUTEX(  _lwp_mutex_unlock)
CHECK_MUTEX(  _lwp_mutex_trylock)
CHECK_MUTEX( __lwp_mutex_lock)
CHECK_MUTEX( __lwp_mutex_unlock)
CHECK_MUTEX( __lwp_mutex_trylock)
CHECK_MUTEX(___lwp_mutex_lock)
CHECK_MUTEX(___lwp_mutex_unlock)

CHECK_COND(  _lwp_cond_wait);
CHECK_COND( __lwp_cond_wait);
CHECK_COND(___lwp_cond_wait);

CHECK_COND2(  _lwp_cond_timedwait);
CHECK_COND2( __lwp_cond_timedwait);
#undef mutex_t
#undef cond_t

CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);


// recording machinery:

enum { RECORD_SYNCH_LIMIT = 200 };
char* record_synch_name[RECORD_SYNCH_LIMIT];
void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
bool record_synch_returning[RECORD_SYNCH_LIMIT];
thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
int record_synch_count = 0;
bool record_synch_enabled = false;

// in dbx, examine recorded data this way:
// for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done

void record_synch(char* name, bool returning) {
  if (record_synch_enabled) {
    if (record_synch_count < RECORD_SYNCH_LIMIT) {
      record_synch_name[record_synch_count] = name;
      record_synch_returning[record_synch_count] = returning;
      record_synch_thread[record_synch_count] = thr_self();
      record_synch_arg0ptr[record_synch_count] = &name;
      record_synch_count++;
    }
    // put more checking code here:
    // ...
  }
}

void record_synch_enable() {
  // start collecting trace data, if not already doing so
  if (!record_synch_enabled)  record_synch_count = 0;
  record_synch_enabled = true;
}

void record_synch_disable() {
  // stop collecting trace data
  record_synch_enabled = false;
}

#endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
#endif // PRODUCT

const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
                               (intptr_t)(&((prusage_t *)(NULL))->pr_utime);


// JVMTI & JVM monitoring and management support
// The thread_cpu_time() and current_thread_cpu_time() are only
// supported if is_thread_cpu_time_supported() returns true.
// They are not supported on Solaris T1.

// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
// of a thread.
//
// current_thread_cpu_time() and thread_cpu_time(Thread *)
// returns the fast estimate available on the platform.

// hrtime_t gethrvtime() return value includes
// user time but does not include system time
jlong os::current_thread_cpu_time() {
  return (jlong) gethrvtime();
}

jlong os::thread_cpu_time(Thread *thread) {
  // return user level CPU time only to be consistent with
  // what current_thread_cpu_time returns.
  // thread_cpu_time_info() must be changed if this changes
  return os::thread_cpu_time(thread, false /* user time only */);
}

jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
  if (user_sys_cpu_time) {
    return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
  } else {
    return os::current_thread_cpu_time();
  }
}

jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
  char proc_name[64];
  int count;
  prusage_t prusage;
  jlong lwp_time;
  int fd;

  sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
                     getpid(),
                     thread->osthread()->lwp_id());
5593
  fd = ::open(proc_name, O_RDONLY);
D
duke 已提交
5594 5595 5596
  if ( fd == -1 ) return -1;

  do {
5597
    count = ::pread(fd,
D
duke 已提交
5598 5599 5600 5601
                  (void *)&prusage.pr_utime,
                  thr_time_size,
                  thr_time_off);
  } while (count < 0 && errno == EINTR);
5602
  ::close(fd);
D
duke 已提交
5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654
  if ( count < 0 ) return -1;

  if (user_sys_cpu_time) {
    // user + system CPU time
    lwp_time = (((jlong)prusage.pr_stime.tv_sec +
                 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
                 (jlong)prusage.pr_stime.tv_nsec +
                 (jlong)prusage.pr_utime.tv_nsec;
  } else {
    // user level CPU time only
    lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
                (jlong)prusage.pr_utime.tv_nsec;
  }

  return(lwp_time);
}

void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
  info_ptr->may_skip_backward = false;    // elapsed time not wall time
  info_ptr->may_skip_forward = false;     // elapsed time not wall time
  info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
}

void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
  info_ptr->may_skip_backward = false;    // elapsed time not wall time
  info_ptr->may_skip_forward = false;     // elapsed time not wall time
  info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
}

bool os::is_thread_cpu_time_supported() {
  if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
    return true;
  } else {
    return false;
  }
}

// System loadavg support.  Returns -1 if load average cannot be obtained.
// Return the load average for our processor set if the primitive exists
// (Solaris 9 and later).  Otherwise just return system wide loadavg.
int os::loadavg(double loadavg[], int nelem) {
  if (pset_getloadavg_ptr != NULL) {
    return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
  } else {
    return ::getloadavg(loadavg, nelem);
  }
}

//---------------------------------------------------------------------------------

5655
bool os::find(address addr, outputStream* st) {
D
duke 已提交
5656 5657
  Dl_info dlinfo;
  memset(&dlinfo, 0, sizeof(dlinfo));
5658 5659 5660
  if (dladdr(addr, &dlinfo) != 0) {
    st->print(PTR_FORMAT ": ", addr);
    if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5661
      st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5662
    } else if (dlinfo.dli_fbase != NULL)
5663
      st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
D
duke 已提交
5664
    else
5665
      st->print("<absolute address>");
5666 5667 5668 5669 5670 5671
    if (dlinfo.dli_fname != NULL) {
      st->print(" in %s", dlinfo.dli_fname);
    }
    if (dlinfo.dli_fbase != NULL) {
      st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
    }
5672
    st->cr();
D
duke 已提交
5673 5674 5675

    if (Verbose) {
      // decode some bytes around the PC
5676 5677
      address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
      address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
D
duke 已提交
5678 5679 5680 5681
      address       lowest = (address) dlinfo.dli_sname;
      if (!lowest)  lowest = (address) dlinfo.dli_fbase;
      if (begin < lowest)  begin = lowest;
      Dl_info dlinfo2;
5682
      if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
D
duke 已提交
5683 5684
          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
        end = (address) dlinfo2.dli_saddr;
5685
      Disassembler::decode(begin, end, st);
D
duke 已提交
5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863
    }
    return true;
  }
  return false;
}

// Following function has been added to support HotSparc's libjvm.so running
// under Solaris production JDK 1.2.2 / 1.3.0.  These came from
// src/solaris/hpi/native_threads in the EVM codebase.
//
// NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
// libraries and should thus be removed. We will leave it behind for a while
// until we no longer want to able to run on top of 1.3.0 Solaris production
// JDK. See 4341971.

#define STACK_SLACK 0x800

extern "C" {
  intptr_t sysThreadAvailableStackWithSlack() {
    stack_t st;
    intptr_t retval, stack_top;
    retval = thr_stksegment(&st);
    assert(retval == 0, "incorrect return value from thr_stksegment");
    assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
    assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
    stack_top=(intptr_t)st.ss_sp-st.ss_size;
    return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
  }
}

// ObjectMonitor park-unpark infrastructure ...
//
// We implement Solaris and Linux PlatformEvents with the
// obvious condvar-mutex-flag triple.
// Another alternative that works quite well is pipes:
// Each PlatformEvent consists of a pipe-pair.
// The thread associated with the PlatformEvent
// calls park(), which reads from the input end of the pipe.
// Unpark() writes into the other end of the pipe.
// The write-side of the pipe must be set NDELAY.
// Unfortunately pipes consume a large # of handles.
// Native solaris lwp_park() and lwp_unpark() work nicely, too.
// Using pipes for the 1st few threads might be workable, however.
//
// park() is permitted to return spuriously.
// Callers of park() should wrap the call to park() in
// an appropriate loop.  A litmus test for the correct
// usage of park is the following: if park() were modified
// to immediately return 0 your code should still work,
// albeit degenerating to a spin loop.
//
// An interesting optimization for park() is to use a trylock()
// to attempt to acquire the mutex.  If the trylock() fails
// then we know that a concurrent unpark() operation is in-progress.
// in that case the park() code could simply set _count to 0
// and return immediately.  The subsequent park() operation *might*
// return immediately.  That's harmless as the caller of park() is
// expected to loop.  By using trylock() we will have avoided a
// avoided a context switch caused by contention on the per-thread mutex.
//
// TODO-FIXME:
// 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
//     objectmonitor implementation.
// 2.  Collapse the JSR166 parker event, and the
//     objectmonitor ParkEvent into a single "Event" construct.
// 3.  In park() and unpark() add:
//     assert (Thread::current() == AssociatedWith).
// 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
//     1-out-of-N park() operations will return immediately.
//
// _Event transitions in park()
//   -1 => -1 : illegal
//    1 =>  0 : pass - return immediately
//    0 => -1 : block
//
// _Event serves as a restricted-range semaphore.
//
// Another possible encoding of _Event would be with
// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
//
// TODO-FIXME: add DTRACE probes for:
// 1.   Tx parks
// 2.   Ty unparks Tx
// 3.   Tx resumes from park


// value determined through experimentation
#define ROUNDINGFIX 11

// utility to compute the abstime argument to timedwait.
// TODO-FIXME: switch from compute_abstime() to unpackTime().

static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
  // millis is the relative timeout time
  // abstime will be the absolute timeout time
  if (millis < 0)  millis = 0;
  struct timeval now;
  int status = gettimeofday(&now, NULL);
  assert(status == 0, "gettimeofday");
  jlong seconds = millis / 1000;
  jlong max_wait_period;

  if (UseLWPSynchronization) {
    // forward port of fix for 4275818 (not sleeping long enough)
    // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
    // _lwp_cond_timedwait() used a round_down algorithm rather
    // than a round_up. For millis less than our roundfactor
    // it rounded down to 0 which doesn't meet the spec.
    // For millis > roundfactor we may return a bit sooner, but
    // since we can not accurately identify the patch level and
    // this has already been fixed in Solaris 9 and 8 we will
    // leave it alone rather than always rounding down.

    if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
       // It appears that when we go directly through Solaris _lwp_cond_timedwait()
           // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
           max_wait_period = 21000000;
  } else {
    max_wait_period = 50000000;
  }
  millis %= 1000;
  if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
     seconds = max_wait_period;
  }
  abstime->tv_sec = now.tv_sec  + seconds;
  long       usec = now.tv_usec + millis * 1000;
  if (usec >= 1000000) {
    abstime->tv_sec += 1;
    usec -= 1000000;
  }
  abstime->tv_nsec = usec * 1000;
  return abstime;
}

// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
// Conceptually TryPark() should be equivalent to park(0).

int os::PlatformEvent::TryPark() {
  for (;;) {
    const int v = _Event ;
    guarantee ((v == 0) || (v == 1), "invariant") ;
    if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
  }
}

void os::PlatformEvent::park() {           // AKA: down()
  // Invariant: Only the thread associated with the Event/PlatformEvent
  // may call park().
  int v ;
  for (;;) {
      v = _Event ;
      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  }
  guarantee (v >= 0, "invariant") ;
  if (v == 0) {
     // Do this the hard way by blocking ...
     // See http://monaco.sfbay/detail.jsf?cr=5094058.
     // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
     // Only for SPARC >= V8PlusA
#if defined(__sparc) && defined(COMPILER2)
     if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
#endif
     int status = os::Solaris::mutex_lock(_mutex);
     assert_status(status == 0, status,  "mutex_lock");
     guarantee (_nParked == 0, "invariant") ;
     ++ _nParked ;
     while (_Event < 0) {
        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
        // Treat this the same as if the wait was interrupted
        // With usr/lib/lwp going to kernel, always handle ETIME
        status = os::Solaris::cond_wait(_cond, _mutex);
        if (status == ETIME) status = EINTR ;
        assert_status(status == 0 || status == EINTR, status, "cond_wait");
     }
     -- _nParked ;
     _Event = 0 ;
     status = os::Solaris::mutex_unlock(_mutex);
     assert_status(status == 0, status, "mutex_unlock");
5864 5865 5866
    // Paranoia to ensure our locked and lock-free paths interact
    // correctly with each other.
    OrderAccess::fence();
D
duke 已提交
5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907
  }
}

int os::PlatformEvent::park(jlong millis) {
  guarantee (_nParked == 0, "invariant") ;
  int v ;
  for (;;) {
      v = _Event ;
      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  }
  guarantee (v >= 0, "invariant") ;
  if (v != 0) return OS_OK ;

  int ret = OS_TIMEOUT;
  timestruc_t abst;
  compute_abstime (&abst, millis);

  // See http://monaco.sfbay/detail.jsf?cr=5094058.
  // For Solaris SPARC set fprs.FEF=0 prior to parking.
  // Only for SPARC >= V8PlusA
#if defined(__sparc) && defined(COMPILER2)
 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
#endif
  int status = os::Solaris::mutex_lock(_mutex);
  assert_status(status == 0, status, "mutex_lock");
  guarantee (_nParked == 0, "invariant") ;
  ++ _nParked ;
  while (_Event < 0) {
     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
     assert_status(status == 0 || status == EINTR ||
                   status == ETIME || status == ETIMEDOUT,
                   status, "cond_timedwait");
     if (!FilterSpuriousWakeups) break ;                // previous semantics
     if (status == ETIME || status == ETIMEDOUT) break ;
     // We consume and ignore EINTR and spurious wakeups.
  }
  -- _nParked ;
  if (_Event >= 0) ret = OS_OK ;
  _Event = 0 ;
  status = os::Solaris::mutex_unlock(_mutex);
  assert_status(status == 0, status, "mutex_unlock");
5908 5909 5910
  // Paranoia to ensure our locked and lock-free paths interact
  // correctly with each other.
  OrderAccess::fence();
D
duke 已提交
5911 5912 5913 5914
  return ret;
}

void os::PlatformEvent::unpark() {
5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928
  // Transitions for _Event:
  //    0 :=> 1
  //    1 :=> 1
  //   -1 :=> either 0 or 1; must signal target thread
  //          That is, we can safely transition _Event from -1 to either
  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
  //          unpark() calls.
  // See also: "Semaphores in Plan 9" by Mullender & Cox
  //
  // Note: Forcing a transition from "-1" to "1" on an unpark() means
  // that it will take two back-to-back park() calls for the owning
  // thread to block. This has the benefit of forcing a spurious return
  // from the first park() call after an unpark() call which will help
  // shake out uses of park() and unpark() without condition variables.
D
duke 已提交
5929

5930
  if (Atomic::xchg(1, &_Event) >= 0) return;
D
duke 已提交
5931 5932

  // If the thread associated with the event was parked, wake it.
5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944
  // Wait for the thread assoc with the PlatformEvent to vacate.
  int status = os::Solaris::mutex_lock(_mutex);
  assert_status(status == 0, status, "mutex_lock");
  int AnyWaiters = _nParked;
  status = os::Solaris::mutex_unlock(_mutex);
  assert_status(status == 0, status, "mutex_unlock");
  guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
  if (AnyWaiters != 0) {
    // We intentional signal *after* dropping the lock
    // to avoid a common class of futile wakeups.
    status = os::Solaris::cond_signal(_cond);
    assert_status(status == 0, status, "cond_signal");
D
duke 已提交
5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021
  }
}

// JSR166
// -------------------------------------------------------

/*
 * The solaris and linux implementations of park/unpark are fairly
 * conservative for now, but can be improved. They currently use a
 * mutex/condvar pair, plus _counter.
 * Park decrements _counter if > 0, else does a condvar wait.  Unpark
 * sets count to 1 and signals condvar.  Only one thread ever waits
 * on the condvar. Contention seen when trying to park implies that someone
 * is unparking you, so don't wait. And spurious returns are fine, so there
 * is no need to track notifications.
 */

#define MAX_SECS 100000000
/*
 * This code is common to linux and solaris and will be moved to a
 * common place in dolphin.
 *
 * The passed in time value is either a relative time in nanoseconds
 * or an absolute time in milliseconds. Either way it has to be unpacked
 * into suitable seconds and nanoseconds components and stored in the
 * given timespec structure.
 * Given time is a 64-bit value and the time_t used in the timespec is only
 * a signed-32-bit value (except on 64-bit Linux) we have to watch for
 * overflow if times way in the future are given. Further on Solaris versions
 * prior to 10 there is a restriction (see cond_timedwait) that the specified
 * number of seconds, in abstime, is less than current_time  + 100,000,000.
 * As it will be 28 years before "now + 100000000" will overflow we can
 * ignore overflow and just impose a hard-limit on seconds using the value
 * of "now + 100,000,000". This places a limit on the timeout of about 3.17
 * years from "now".
 */
static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
  assert (time > 0, "convertTime");

  struct timeval now;
  int status = gettimeofday(&now, NULL);
  assert(status == 0, "gettimeofday");

  time_t max_secs = now.tv_sec + MAX_SECS;

  if (isAbsolute) {
    jlong secs = time / 1000;
    if (secs > max_secs) {
      absTime->tv_sec = max_secs;
    }
    else {
      absTime->tv_sec = secs;
    }
    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
  }
  else {
    jlong secs = time / NANOSECS_PER_SEC;
    if (secs >= MAX_SECS) {
      absTime->tv_sec = max_secs;
      absTime->tv_nsec = 0;
    }
    else {
      absTime->tv_sec = now.tv_sec + secs;
      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
        absTime->tv_nsec -= NANOSECS_PER_SEC;
        ++absTime->tv_sec; // note: this must be <= max_secs
      }
    }
  }
  assert(absTime->tv_sec >= 0, "tv_sec < 0");
  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
}

void Parker::park(bool isAbsolute, jlong time) {
6022 6023
  // Ideally we'd do something useful while spinning, such
  // as calling unpackTime().
D
duke 已提交
6024 6025 6026

  // Optional fast-path check:
  // Return immediately if a permit is available.
6027 6028 6029
  // We depend on Atomic::xchg() having full barrier semantics
  // since we are doing a lock-free update to _counter.
  if (Atomic::xchg(0, &_counter) > 0) return;
D
duke 已提交
6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040

  // Optional fast-exit: Check interrupt before trying to wait
  Thread* thread = Thread::current();
  assert(thread->is_Java_thread(), "Must be JavaThread");
  JavaThread *jt = (JavaThread *)thread;
  if (Thread::is_interrupted(thread, false)) {
    return;
  }

  // First, demultiplex/decode time arguments
  timespec absTime;
6041
  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
D
duke 已提交
6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070
    return;
  }
  if (time > 0) {
    // Warning: this code might be exposed to the old Solaris time
    // round-down bugs.  Grep "roundingFix" for details.
    unpackTime(&absTime, isAbsolute, time);
  }

  // Enter safepoint region
  // Beware of deadlocks such as 6317397.
  // The per-thread Parker:: _mutex is a classic leaf-lock.
  // In particular a thread must never block on the Threads_lock while
  // holding the Parker:: mutex.  If safepoints are pending both the
  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
  ThreadBlockInVM tbivm(jt);

  // Don't wait if cannot get lock since interference arises from
  // unblocking.  Also. check interrupt before trying wait
  if (Thread::is_interrupted(thread, false) ||
      os::Solaris::mutex_trylock(_mutex) != 0) {
    return;
  }

  int status ;

  if (_counter > 0)  { // no wait needed
    _counter = 0;
    status = os::Solaris::mutex_unlock(_mutex);
    assert (status == 0, "invariant") ;
6071 6072
    // Paranoia to ensure our locked and lock-free paths interact
    // correctly with each other and Java-level accesses.
6073
    OrderAccess::fence();
D
duke 已提交
6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113
    return;
  }

#ifdef ASSERT
  // Don't catch signals while blocked; let the running threads have the signals.
  // (This allows a debugger to break into the running thread.)
  sigset_t oldsigs;
  sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
  thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
#endif

  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  jt->set_suspend_equivalent();
  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()

  // Do this the hard way by blocking ...
  // See http://monaco.sfbay/detail.jsf?cr=5094058.
  // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
  // Only for SPARC >= V8PlusA
#if defined(__sparc) && defined(COMPILER2)
  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
#endif

  if (time == 0) {
    status = os::Solaris::cond_wait (_cond, _mutex) ;
  } else {
    status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
  }
  // Note that an untimed cond_wait() can sometimes return ETIME on older
  // versions of the Solaris.
  assert_status(status == 0 || status == EINTR ||
                status == ETIME || status == ETIMEDOUT,
                status, "cond_timedwait");

#ifdef ASSERT
  thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
#endif
  _counter = 0 ;
  status = os::Solaris::mutex_unlock(_mutex);
  assert_status(status == 0, status, "mutex_unlock") ;
6114 6115 6116
  // Paranoia to ensure our locked and lock-free paths interact
  // correctly with each other and Java-level accesses.
  OrderAccess::fence();
D
duke 已提交
6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206

  // If externally suspended while waiting, re-suspend
  if (jt->handle_special_suspend_equivalent_condition()) {
    jt->java_suspend_self();
  }
}

void Parker::unpark() {
  int s, status ;
  status = os::Solaris::mutex_lock (_mutex) ;
  assert (status == 0, "invariant") ;
  s = _counter;
  _counter = 1;
  status = os::Solaris::mutex_unlock (_mutex) ;
  assert (status == 0, "invariant") ;

  if (s < 1) {
    status = os::Solaris::cond_signal (_cond) ;
    assert (status == 0, "invariant") ;
  }
}

extern char** environ;

// Run the specified command in a separate process. Return its exit value,
// or -1 on failure (e.g. can't fork a new process).
// Unlike system(), this function can be called from signal handler. It
// doesn't block SIGINT et al.
int os::fork_and_exec(char* cmd) {
  char * argv[4];
  argv[0] = (char *)"sh";
  argv[1] = (char *)"-c";
  argv[2] = cmd;
  argv[3] = NULL;

  // fork is async-safe, fork1 is not so can't use in signal handler
  pid_t pid;
  Thread* t = ThreadLocalStorage::get_thread_slow();
  if (t != NULL && t->is_inside_signal_handler()) {
    pid = fork();
  } else {
    pid = fork1();
  }

  if (pid < 0) {
    // fork failed
    warning("fork failed: %s", strerror(errno));
    return -1;

  } else if (pid == 0) {
    // child process

    // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
    execve("/usr/bin/sh", argv, environ);

    // execve failed
    _exit(-1);

  } else  {
    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
    // care about the actual exit code, for now.

    int status;

    // Wait for the child process to exit.  This returns immediately if
    // the child has already exited. */
    while (waitpid(pid, &status, 0) < 0) {
        switch (errno) {
        case ECHILD: return 0;
        case EINTR: break;
        default: return -1;
        }
    }

    if (WIFEXITED(status)) {
       // The child exited normally; get its exit code.
       return WEXITSTATUS(status);
    } else if (WIFSIGNALED(status)) {
       // The child exited because of a signal
       // The best value to return is 0x80 + signal number,
       // because that is what all Unix shells do, and because
       // it allows callers to distinguish between process exit and
       // process death by signal.
       return 0x80 + WTERMSIG(status);
    } else {
       // Unknown exit code; pass it through
       return status;
    }
  }
}
6207 6208 6209

// is_headless_jre()
//
6210
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
6211 6212
// in order to report if we are running in a headless jre
//
6213 6214 6215
// Since JDK8 xawt/libmawt.so was moved into the same directory
// as libawt.so, and renamed libawt_xawt.so
//
6216 6217 6218 6219 6220
bool os::is_headless_jre() {
    struct stat statbuf;
    char buf[MAXPATHLEN];
    char libmawtpath[MAXPATHLEN];
    const char *xawtstr  = "/xawt/libmawt.so";
6221
    const char *new_xawtstr = "/libawt_xawt.so";
6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241
    char *p;

    // Get path to libjvm.so
    os::jvm_path(buf, sizeof(buf));

    // Get rid of libjvm.so
    p = strrchr(buf, '/');
    if (p == NULL) return false;
    else *p = '\0';

    // Get rid of client or server
    p = strrchr(buf, '/');
    if (p == NULL) return false;
    else *p = '\0';

    // check xawt/libmawt.so
    strcpy(libmawtpath, buf);
    strcat(libmawtpath, xawtstr);
    if (::stat(libmawtpath, &statbuf) == 0) return false;

6242
    // check libawt_xawt.so
6243
    strcpy(libmawtpath, buf);
6244
    strcat(libmawtpath, new_xawtstr);
6245 6246 6247 6248 6249
    if (::stat(libmawtpath, &statbuf) == 0) return false;

    return true;
}

6250
size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6251
  INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6252 6253 6254
}

int os::close(int fd) {
6255
  return ::close(fd);
6256 6257 6258
}

int os::socket_close(int fd) {
6259
  return ::close(fd);
6260 6261
}

6262 6263
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
  INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6264 6265
}

6266 6267
int os::send(int fd, char* buf, size_t nBytes, uint flags) {
  INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6268 6269
}

6270 6271
int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
  RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305
}

// As both poll and select can be interrupted by signals, we have to be
// prepared to restart the system call after updating the timeout, unless
// a poll() is done with timeout == -1, in which case we repeat with this
// "wait forever" value.

int os::timeout(int fd, long timeout) {
  int res;
  struct timeval t;
  julong prevtime, newtime;
  static const char* aNull = 0;
  struct pollfd pfd;
  pfd.fd = fd;
  pfd.events = POLLIN;

  gettimeofday(&t, &aNull);
  prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;

  for(;;) {
    INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
    if(res == OS_ERR && errno == EINTR) {
        if(timeout != -1) {
          gettimeofday(&t, &aNull);
          newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
          timeout -= newtime - prevtime;
          if(timeout <= 0)
            return OS_OK;
          prevtime = newtime;
        }
    } else return res;
  }
}

6306
int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6307
  int _result;
6308
  INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6309 6310 6311 6312 6313 6314
                          os::Solaris::clear_interrupted);

  // Depending on when thread interruption is reset, _result could be
  // one of two values when errno == EINTR

  if (((_result == OS_INTRPT) || (_result == OS_ERR))
6315
      && (errno == EINTR)) {
6316
     /* restarting a connect() changes its errno semantics */
6317 6318
     INTERRUPTIBLE(::connect(fd, him, len), _result,\
                   os::Solaris::clear_interrupted);
6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331
     /* undo these changes */
     if (_result == OS_ERR) {
       if (errno == EALREADY) {
         errno = EINPROGRESS; /* fall through */
       } else if (errno == EISCONN) {
         errno = 0;
         return OS_OK;
       }
     }
   }
   return _result;
 }

6332 6333 6334 6335 6336 6337 6338
int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
  if (fd < 0) {
    return OS_ERR;
  }
  INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
                           os::Solaris::clear_interrupted);
}
6339

6340 6341 6342 6343
int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
                 sockaddr* from, socklen_t* fromlen) {
  INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
                           os::Solaris::clear_interrupted);
6344 6345
}

6346 6347 6348 6349
int os::sendto(int fd, char* buf, size_t len, uint flags,
               struct sockaddr* to, socklen_t tolen) {
  INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
                           os::Solaris::clear_interrupted);
6350 6351 6352
}

int os::socket_available(int fd, jint *pbytes) {
6353 6354 6355 6356 6357 6358 6359 6360
  if (fd < 0) {
    return OS_OK;
  }
  int ret;
  RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
  // note: ioctl can return 0 when successful, JVM_SocketAvailable
  // is expected to return 0 on failure and 1 on success to the jdk.
  return (ret == OS_ERR) ? 0 : 1;
6361 6362
}

6363
int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6364
   INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6365
                                      os::Solaris::clear_interrupted);
6366
}
6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379

// Get the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {
  const char* p = get_current_directory(buffer, bufferSize);

  if (p == NULL) {
    assert(p != NULL, "failed to get current directory");
    return 0;
  }

  return strlen(buffer);
}
6380 6381 6382 6383 6384 6385

#ifndef PRODUCT
void TestReserveMemorySpecial_test() {
  // No tests available for this platform
}
#endif