ruby-changes:2873
From: ko1@a...
Date: 20 Dec 2007 18:30:08 +0900
Subject: [ruby-changes:2873] ko1 - Ruby:r14364 (trunk): * common.mk, *.ci: renamed to *.c.
ko1 2007-12-20 18:29:46 +0900 (Thu, 20 Dec 2007)
New Revision: 14364
Added files:
trunk/eval_error.c
trunk/eval_jump.c
trunk/eval_method.c
trunk/eval_safe.c
trunk/load.c
trunk/thread_pthread.c
trunk/thread_win32.c
trunk/vm_evalbody.c
trunk/vm_insnhelper.c
Removed files:
trunk/eval_error.ci
trunk/eval_jump.ci
trunk/eval_load.c
trunk/eval_method.ci
trunk/eval_safe.ci
trunk/insnhelper.ci
trunk/thread_pthread.ci
trunk/thread_win32.ci
trunk/vm_evalbody.ci
Modified files:
trunk/ChangeLog
trunk/common.mk
trunk/eval.c
trunk/thread.c
trunk/vm.c
Log:
* common.mk, *.ci: renamed to *.c.
* eval_laod.c: renamed to load.c.
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/thread_win32.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/insnhelper.ci
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/vm_evalbody.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/thread_win32.ci
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_jump.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_error.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/thread_pthread.ci
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_error.ci
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_load.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/ChangeLog?r1=14364&r2=14363
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/thread.c?r1=14364&r2=14363
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_method.ci
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/load.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_safe.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_method.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval.c?r1=14364&r2=14363
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/vm_insnhelper.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/vm.c?r1=14364&r2=14363
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/thread_pthread.c
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_jump.ci
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/vm_evalbody.ci
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/eval_safe.ci
http://svn.ruby-lang.org/cgi-bin/viewvc.cgi/trunk/common.mk?r1=14364&r2=14363
Index: vm_evalbody.ci
===================================================================
--- vm_evalbody.ci (revision 14363)
+++ vm_evalbody.ci (revision 14364)
@@ -1,143 +0,0 @@
-/* -*-c-*- */
-/**********************************************************************
-
- vm_evalbody.ci -
-
- $Author$
- $Date$
-
- Copyright (C) 2004-2007 Koichi Sasada
-
-**********************************************************************/
-
-#include <math.h>
-
-#if VMDEBUG > 0
-#define DECL_SC_REG(type, r, reg) register type reg_##r
-
-#elif __GNUC__ && __x86_64
-#define DECL_SC_REG(type, r, reg) register type reg_##r asm("r" reg)
-
-#elif __GNUC__ && __i386__
-#define DECL_SC_REG(type, r, reg) register type reg_##r asm("e" reg)
-
-#else
-#define DECL_SC_REG(type, r, reg) register type reg_##r
-#endif
-/* #define DECL_SC_REG(r, reg) VALUE reg_##r */
-
-#if !OPT_CALL_THREADED_CODE
-VALUE
-vm_eval(rb_thread_t *th, VALUE initial)
-{
-
-#if OPT_STACK_CACHING
-#if 0
-#elif __GNUC__ && __x86_64
- DECL_SC_REG(VALUE, a, "12");
- DECL_SC_REG(VALUE, b, "13");
-#else
- register VALUE reg_a;
- register VALUE reg_b;
-#endif
-#endif
-
-#if __GNUC__ && __i386__
- DECL_SC_REG(VALUE *, pc, "di");
- DECL_SC_REG(rb_control_frame_t *, cfp, "si");
-#define USE_MACHINE_REGS 1
-
-#elif __GNUC__ && __x86_64__
- DECL_SC_REG(VALUE *, pc, "14");
- DECL_SC_REG(rb_control_frame_t *, cfp, "15");
-#define USE_MACHINE_REGS 1
-
-#else
- register rb_control_frame_t *reg_cfp;
- VALUE *reg_pc;
-#endif
-
-#if USE_MACHINE_REGS
-
-#undef RESTORE_REGS
-#define RESTORE_REGS() \
-{ \
- REG_CFP = th->cfp; \
- reg_pc = reg_cfp->pc; \
-}
-
-#undef REG_PC
-#define REG_PC reg_pc
-#undef GET_PC
-#define GET_PC() (reg_pc)
-#undef SET_PC
-#define SET_PC(x) (reg_cfp->pc = REG_PC = (x))
-#endif
-
-#if OPT_TOKEN_THREADED_CODE || OPT_DIRECT_THREADED_CODE
-#include "vmtc.inc"
- if (th == 0) {
-#if OPT_STACK_CACHING
- finish_insn_seq[0] = (VALUE)&&LABEL (finish_SC_ax_ax);
-#else
- finish_insn_seq[0] = (VALUE)&&LABEL (finish);
-#endif
- return (VALUE)insns_address_table;
- }
-#endif
- reg_cfp = th->cfp;
- reg_pc = reg_cfp->pc;
-
-#if OPT_STACK_CACHING
- reg_a = initial;
- reg_b = 0;
-#endif
-
- first:
- INSN_DISPATCH();
-/*****************/
- #include "vm.inc"
-/*****************/
- END_INSNS_DISPATCH();
-
- /* unreachable */
- rb_bug("vm_eval: unreachable");
- goto first;
-}
-
-#else
-
-#include "vm.inc"
-#include "vmtc.inc"
-
-const void *const *
-get_insns_address_table()
-{
- return insns_address_table;
-}
-
-VALUE
-vm_eval(rb_thread_t *th, VALUE initial)
-{
- register rb_control_frame_t *reg_cfp = th->cfp;
- VALUE ret;
-
- while (*GET_PC()) {
- reg_cfp = ((rb_insn_func_t) (*GET_PC()))(th, reg_cfp);
-
- if (reg_cfp == 0) {
- VALUE err = th->errinfo;
- th->errinfo = Qnil;
- return err;
- }
- }
-
- if (VM_FRAME_TYPE(th->cfp) != FRAME_MAGIC_FINISH) {
- rb_bug("cfp consistency error");
- }
-
- ret = *(th->cfp->sp-1); /* pop */
- th->cfp++; /* pop cf */
- return ret;
-}
-#endif
Index: eval_load.c
===================================================================
--- eval_load.c (revision 14363)
+++ eval_load.c (revision 14364)
@@ -1,668 +0,0 @@
-/*
- * load methods from eval.c
- */
-
-#include "eval_intern.h"
-
-VALUE ruby_dln_librefs;
-
-#define IS_RBEXT(e) (strcmp(e, ".rb") == 0)
-#define IS_SOEXT(e) (strcmp(e, ".so") == 0 || strcmp(e, ".o") == 0)
-#ifdef DLEXT2
-#define IS_DLEXT(e) (strcmp(e, DLEXT) == 0 || strcmp(e, DLEXT2) == 0)
-#else
-#define IS_DLEXT(e) (strcmp(e, DLEXT) == 0)
-#endif
-
-
-static const char *const loadable_ext[] = {
- ".rb", DLEXT,
-#ifdef DLEXT2
- DLEXT2,
-#endif
- 0
-};
-
-VALUE rb_load_path; /* to be moved to VM */
-static VALUE
-get_load_path(void)
-{
- VALUE load_path = rb_load_path;
- VALUE ary = rb_ary_new2(RARRAY_LEN(load_path));
- long i;
-
- for (i = 0; i < RARRAY_LEN(load_path); ++i) {
- rb_ary_push(ary, rb_file_expand_path(RARRAY_PTR(load_path)[i], Qnil));
- }
- return ary;
-}
-
-static VALUE
-get_loaded_features(void)
-{
- return GET_VM()->loaded_features;
-}
-
-static st_table *
-get_loading_table(void)
-{
- return GET_VM()->loading_table;
-}
-
-static VALUE
-loaded_feature_path(const char *name, long vlen, const char *feature, long len,
- int type, VALUE load_path)
-{
- long i;
-
- for (i = 0; i < RARRAY_LEN(load_path); ++i) {
- VALUE p = RARRAY_PTR(load_path)[i];
- const char *s = StringValuePtr(p);
- long n = RSTRING_LEN(p);
-
- if (vlen < n + len + 1) continue;
- if (n && (strncmp(name, s, n) || name[n] != '/')) continue;
- if (strncmp(name + n + 1, feature, len)) continue;
- if (name[n+len+1] && name[n+len+1] != '.') continue;
- switch (type) {
- case 's':
- if (IS_DLEXT(&name[n+len+1])) return p;
- break;
- case 'r':
- if (IS_RBEXT(&name[n+len+1])) return p;
- break;
- default:
- return p;
- }
- }
- return 0;
-}
-
-struct loaded_feature_searching {
- const char *name;
- long len;
- int type;
- VALUE load_path;
- const char *result;
-};
-
-static int
-loaded_feature_path_i(st_data_t v, st_data_t b, st_data_t f)
-{
- const char *s = (const char *)v;
- struct loaded_feature_searching *fp = (struct loaded_feature_searching *)f;
- VALUE p = loaded_feature_path(s, strlen(s), fp->name, fp->len,
- fp->type, fp->load_path);
- if (!p) return ST_CONTINUE;
- fp->result = s;
- return ST_STOP;
-}
-
-static int
-rb_feature_p(const char *feature, const char *ext, int rb, int expanded)
-{
- VALUE v, features, p, load_path = 0;
- const char *f, *e;
- long i, len, elen, n;
- st_table *loading_tbl;
- int type;
-
- if (ext) {
- len = ext - feature;
- elen = strlen(ext);
- type = rb ? 'r' : 's';
- }
- else {
- len = strlen(feature);
- elen = 0;
- type = 0;
- }
- features = get_loaded_features();
- for (i = 0; i < RARRAY_LEN(features); ++i) {
- v = RARRAY_PTR(features)[i];
- f = StringValuePtr(v);
- if ((n = RSTRING_LEN(v)) < len) continue;
- if (strncmp(f, feature, len) != 0) {
- if (expanded) continue;
- if (!load_path) load_path = get_load_path();
- if (!(p = loaded_feature_path(f, n, feature, len, type, load_path)))
- continue;
- f += RSTRING_LEN(p) + 1;
- }
- if (!*(e = f + len)) {
- if (ext) continue;
- return 'u';
- }
- if (*e != '.') continue;
- if ((!rb || !ext) && (IS_SOEXT(e) || IS_DLEXT(e))) {
- return 's';
- }
- if ((rb || !ext) && (IS_RBEXT(e))) {
- return 'r';
- }
- }
- loading_tbl = get_loading_table();
- if (loading_tbl) {
- if (!expanded) {
- struct loaded_feature_searching fs;
- fs.name = feature;
- fs.len = len;
- fs.type = type;
- fs.load_path = load_path ? load_path : get_load_path();
- fs.result = 0;
- st_foreach(loading_tbl, loaded_feature_path_i, (st_data_t)&fs);
- if (fs.result) goto loading;
- }
- if (st_lookup(loading_tbl, (st_data_t)feature, 0)) {
- loading:
- if (!ext) return 'u';
- return !IS_RBEXT(ext) ? 's' : 'r';
- }
- else {
- char *buf;
-
- if (ext && *ext) return 0;
- buf = ALLOCA_N(char, len + DLEXT_MAXLEN + 1);
- MEMCPY(buf, feature, char, len);
- for (i = 0; (e = loadable_ext[i]) != 0; i++) {
- strncpy(buf + len, e, DLEXT_MAXLEN + 1);
- if (st_lookup(loading_tbl, (st_data_t)buf, 0)) {
- return i ? 's' : 'r';
- }
- }
- }
- }
- return 0;
-}
-
-int
-rb_provided(const char *feature)
-{
- const char *ext = strrchr(feature, '.');
-
- if (ext && !strchr(ext, '/')) {
- if (IS_RBEXT(ext)) {
- if (rb_feature_p(feature, ext, Qtrue, Qfalse)) return Qtrue;
- return Qfalse;
- }
- else if (IS_SOEXT(ext) || IS_DLEXT(ext)) {
- if (rb_feature_p(feature, ext, Qfalse, Qfalse)) return Qtrue;
- return Qfalse;
- }
- }
- if (rb_feature_p(feature, feature + strlen(feature), Qtrue, Qfalse))
- return Qtrue;
- return Qfalse;
-}
-
-static void
-rb_provide_feature(VALUE feature)
-{
- rb_ary_push(get_loaded_features(), feature);
-}
-
-void
-rb_provide(const char *feature)
-{
- rb_provide_feature(rb_str_new2(feature));
-}
-
-NORETURN(static void load_failed _((VALUE)));
-
-void
-rb_load(VALUE fname, int wrap)
-{
- VALUE tmp;
- int state;
- rb_thread_t *th = GET_THREAD();
- volatile VALUE wrapper = th->top_wrapper;
- volatile VALUE self = th->top_self;
- volatile int parse_in_eval;
- volatile int loaded = Qfalse;
-#ifndef __GNUC__
- rb_thread_t *volatile th0 = th;
-#endif
-
- FilePathValue(fname);
- fname = rb_str_new4(fname);
- tmp = rb_find_file(fname);
- if (!tmp) {
- load_failed(fname);
- }
- RB_GC_GUARD(fname) = rb_str_new4(tmp);
-
- th->errinfo = Qnil; /* ensure */
-
- if (!wrap) {
- rb_secure(4); /* should alter global state */
- th->top_wrapper = 0;
- }
- else {
- /* load in anonymous module as toplevel */
- th->top_self = rb_obj_clone(rb_vm_top_self());
- th->top_wrapper = rb_module_new();
- rb_extend_object(th->top_self, th->top_wrapper);
- }
-
- parse_in_eval = th->parse_in_eval;
- PUSH_TAG();
- state = EXEC_TAG();
- if (state == 0) {
- NODE *node;
- VALUE iseq;
-
- th->parse_in_eval++;
- node = (NODE *)rb_load_file(RSTRING_PTR(fname));
- th->parse_in_eval--;
- loaded = Qtrue;
- iseq = rb_iseq_new(node, rb_str_new2("<top (required)>"),
- fname, Qfalse, ISEQ_TYPE_TOP);
- rb_iseq_eval(iseq);
- }
- POP_TAG();
-
-#ifndef __GNUC__
- th = th0;
- fname = RB_GC_GUARD(fname);
-#endif
- th->parse_in_eval = parse_in_eval;
- th->top_self = self;
- th->top_wrapper = wrapper;
-
- if (!loaded) {
- rb_exc_raise(GET_THREAD()->errinfo);
- }
- if (state) {
- vm_jump_tag_but_local_jump(state, Qundef);
- }
-
- if (!NIL_P(GET_THREAD()->errinfo)) {
- /* exception during load */
- rb_exc_raise(th->errinfo);
- }
-}
-
-void
-rb_load_protect(VALUE fname, int wrap, int *state)
-{
- int status;
-
- PUSH_TAG();
- if ((status = EXEC_TAG()) == 0) {
- rb_load(fname, wrap);
- }
- POP_TAG();
- if (state)
- *state = status;
-}
-
-/*
- * call-seq:
- * load(filename, wrap=false) => true
- *
- * Loads and executes the Ruby
- * program in the file _filename_. If the filename does not
- * resolve to an absolute path, the file is searched for in the library
- * directories listed in <code>$:</code>. If the optional _wrap_
- * parameter is +true+, the loaded script will be executed
- * under an anonymous module, protecting the calling program's global
- * namespace. In no circumstance will any local variables in the loaded
- * file be propagated to the loading environment.
- */
-
-
-static VALUE
-rb_f_load(argc, argv)
- int argc;
- VALUE *argv;
-{
- VALUE fname, wrap;
-
- rb_scan_args(argc, argv, "11", &fname, &wrap);
- rb_load(fname, RTEST(wrap));
- return Qtrue;
-}
-
-static char *
-load_lock(const char *ftptr)
-{
- st_data_t data;
- st_table *loading_tbl = get_loading_table();
-
- if (!loading_tbl || !st_lookup(loading_tbl, (st_data_t)ftptr, &data)) {
- /* loading ruby library should be serialized. */
- if (!loading_tbl) {
- GET_VM()->loading_table = loading_tbl = st_init_strtable();
- }
- /* partial state */
- ftptr = ruby_strdup(ftptr);
- data = (st_data_t)rb_barrier_new();
- st_insert(loading_tbl, (st_data_t)ftptr, data);
- return (char *)ftptr;
- }
- return RTEST(rb_barrier_wait((VALUE)data)) ? (char *)ftptr : 0;
-}
-
-static void
-load_unlock(const char *ftptr)
-{
- if (ftptr) {
- st_data_t key = (st_data_t)ftptr;
- st_data_t data;
- st_table *loading_tbl = get_loading_table();
-
- if (st_delete(loading_tbl, &key, &data)) {
- free((char *)key);
- rb_barrier_release((VALUE)data);
- }
- }
-}
-
-
-/*
- * call-seq:
- * require(string) => true or false
- *
- * Ruby tries to load the library named _string_, returning
- * +true+ if successful. If the filename does not resolve to
- * an absolute path, it will be searched for in the directories listed
- * in <code>$:</code>. If the file has the extension ``.rb'', it is
- * loaded as a source file; if the extension is ``.so'', ``.o'', or
- * ``.dll'', or whatever the default shared library extension is on
- * the current platform, Ruby loads the shared library as a Ruby
- * extension. Otherwise, Ruby tries adding ``.rb'', ``.so'', and so on
- * to the name. The name of the loaded feature is added to the array in
- * <code>$"</code>. A feature will not be loaded if it's name already
- * appears in <code>$"</code>. However, the file name is not converted
- * to an absolute path, so that ``<code>require 'a';require
- * './a'</code>'' will load <code>a.rb</code> twice.
- *
- * require "my-library.rb"
- * require "db-driver"
- */
-
-VALUE
-rb_f_require(VALUE obj, VALUE fname)
-{
- return rb_require_safe(fname, rb_safe_level());
-}
-
-static int
-search_required(VALUE fname, volatile VALUE *path)
-{
- VALUE tmp;
- char *ext, *ftptr;
- int type, ft = 0;
-
- *path = 0;
- ext = strrchr(ftptr = RSTRING_PTR(fname), '.');
- if (ext && !strchr(ext, '/')) {
- if (IS_RBEXT(ext)) {
- if (rb_feature_p(ftptr, ext, Qtrue, Qfalse))
- return 'r';
- if ((tmp = rb_find_file(fname)) != 0) {
- tmp = rb_file_expand_path(tmp, Qnil);
- ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
- if (!rb_feature_p(ftptr, ext, Qtrue, Qtrue))
- *path = tmp;
- return 'r';
- }
- return 0;
- }
- else if (IS_SOEXT(ext)) {
- if (rb_feature_p(ftptr, ext, Qfalse, Qfalse))
- return 's';
- tmp = rb_str_new(RSTRING_PTR(fname), ext - RSTRING_PTR(fname));
-#ifdef DLEXT2
- OBJ_FREEZE(tmp);
- if (rb_find_file_ext(&tmp, loadable_ext + 1)) {
- tmp = rb_file_expand_path(tmp, Qnil);
- ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
- if (!rb_feature_p(ftptr, ext, Qfalse, Qtrue))
- *path = tmp;
- return 's';
- }
-#else
- rb_str_cat2(tmp, DLEXT);
- OBJ_FREEZE(tmp);
- if ((tmp = rb_find_file(tmp)) != 0) {
- tmp = rb_file_expand_path(tmp, Qnil);
- ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
- if (!rb_feature_p(ftptr, ext, Qfalse, Qtrue))
- *path = tmp;
- return 's';
- }
-#endif
- }
- else if (IS_DLEXT(ext)) {
- if (rb_feature_p(ftptr, ext, Qfalse, Qfalse))
- return 's';
- if ((tmp = rb_find_file(fname)) != 0) {
- tmp = rb_file_expand_path(tmp, Qnil);
- ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
- if (!rb_feature_p(ftptr, ext, Qfalse, Qtrue))
- *path = tmp;
- return 's';
- }
- }
- }
- else if ((ft = rb_feature_p(ftptr, 0, Qfalse, Qfalse)) == 'r') {
- return 'r';
- }
- tmp = fname;
- type = rb_find_file_ext(&tmp, loadable_ext);
- tmp = rb_file_expand_path(tmp, Qnil);
- switch (type) {
- case 0:
- ftptr = RSTRING_PTR(tmp);
- if (ft)
- break;
- return rb_feature_p(ftptr, 0, Qfalse, Qtrue);
-
- default:
- if (ft)
- break;
- case 1:
- ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
- if (rb_feature_p(ftptr, ext, !--type, Qtrue))
- break;
- *path = tmp;
- }
- return type ? 's' : 'r';
-}
-
-static void
-load_failed(VALUE fname)
-{
- rb_raise(rb_eLoadError, "no such file to load -- %s",
- RSTRING_PTR(fname));
-}
-
-static VALUE
-load_ext(VALUE path)
-{
- SCOPE_SET(NOEX_PUBLIC);
- return (VALUE)dln_load(RSTRING_PTR(path));
-}
-
-VALUE
-rb_require_safe(VALUE fname, int safe)
-{
- VALUE result = Qnil;
- rb_thread_t *th = GET_THREAD();
- volatile VALUE errinfo = th->errinfo;
- int state;
- struct {
- int safe;
- } volatile saved;
- char *volatile ftptr = 0;
-
- PUSH_TAG();
- saved.safe = rb_safe_level();
- if ((state = EXEC_TAG()) == 0) {
- VALUE path;
- long handle;
- int found;
-
- rb_set_safe_level_force(safe);
- FilePathValue(fname);
- RB_GC_GUARD(fname) = rb_str_new4(fname);
- found = search_required(fname, &path);
- if (found) {
- if (!path || !(ftptr = load_lock(RSTRING_PTR(path)))) {
- result = Qfalse;
- }
- else {
- rb_set_safe_level_force(0);
- switch (found) {
- case 'r':
- rb_load(path, 0);
- break;
-
- case 's':
- handle = (long)rb_vm_call_cfunc(rb_vm_top_self(), load_ext,
- path, 0, path);
- rb_ary_push(ruby_dln_librefs, LONG2NUM(handle));
- break;
- }
- rb_provide_feature(path);
- result = Qtrue;
- }
- }
- }
- POP_TAG();
- load_unlock(ftptr);
-
- rb_set_safe_level_force(saved.safe);
- if (state) {
- JUMP_TAG(state);
- }
-
- if (NIL_P(result)) {
- load_failed(fname);
- }
-
- th->errinfo = errinfo;
-
- return result;
-}
-
-VALUE
-rb_require(const char *fname)
-{
- VALUE fn = rb_str_new2(fname);
- OBJ_FREEZE(fn);
- return rb_require_safe(fn, rb_safe_level());
-}
-
-static VALUE
-init_ext_call(VALUE arg)
-{
- SCOPE_SET(NOEX_PUBLIC);
- (*(void (*)(void))arg)();
- return Qnil;
-}
-
-void
-ruby_init_ext(const char *name, void (*init)(void))
-{
- if (load_lock(name)) {
- rb_vm_call_cfunc(rb_vm_top_self(), init_ext_call, (VALUE)init,
- 0, rb_str_new2(name));
- rb_provide(name);
- load_unlock(name);
- }
-}
-
-/*
- * call-seq:
- * mod.autoload(name, filename) => nil
- *
- * Registers _filename_ to be loaded (using <code>Kernel::require</code>)
- * the first time that _module_ (which may be a <code>String</code> or
- * a symbol) is accessed in the namespace of _mod_.
- *
- * module A
- * end
- * A.autoload(:B, "b")
- * A::B.doit # autoloads "b"
- */
-
-static VALUE
-rb_mod_autoload(VALUE mod, VALUE sym, VALUE file)
-{
- ID id = rb_to_id(sym);
-
- Check_SafeStr(file);
- rb_autoload(mod, id, RSTRING_PTR(file));
- return Qnil;
-}
-
-/*
- * MISSING: documentation
- */
-
-static VALUE
-rb_mod_autoload_p(VALUE mod, VALUE sym)
-{
- return rb_autoload_p(mod, rb_to_id(sym));
-}
-
-/*
- * call-seq:
- * autoload(module, filename) => nil
- *
- * Registers _filename_ to be loaded (using <code>Kernel::require</code>)
- * the first time that _module_ (which may be a <code>String</code> or
- * a symbol) is accessed.
- *
- * autoload(:MyModule, "/usr/local/lib/modules/my_module.rb")
- */
-
-static VALUE
-rb_f_autoload(VALUE obj, VALUE sym, VALUE file)
-{
- VALUE klass = ruby_cbase();
- if (NIL_P(klass)) {
- rb_raise(rb_eTypeError, "Can not set autoload on singleton class");
- }
- return rb_mod_autoload(klass, sym, file);
-}
-
-/*
- * MISSING: documentation
- */
-
-static VALUE
-rb_f_autoload_p(VALUE obj, VALUE sym)
-{
- /* use ruby_cbase() as same as rb_f_autoload. */
- VALUE klass = ruby_cbase();
- if (NIL_P(klass)) {
- return Qnil;
- }
- return rb_mod_autoload_p(klass, sym);
-}
-
-void
-Init_load()
-{
- rb_define_readonly_variable("$:", &rb_load_path);
- rb_define_readonly_variable("$-I", &rb_load_path);
- rb_define_readonly_variable("$LOAD_PATH", &rb_load_path);
- rb_load_path = rb_ary_new();
-
- rb_define_virtual_variable("$\"", get_loaded_features, 0);
- rb_define_virtual_variable("$LOADED_FEATURES", get_loaded_features, 0);
- GET_VM()->loaded_features = rb_ary_new();
-
- rb_define_global_function("load", rb_f_load, -1);
- rb_define_global_function("require", rb_f_require, 1);
- rb_define_method(rb_cModule, "autoload", rb_mod_autoload, 2);
- rb_define_method(rb_cModule, "autoload?", rb_mod_autoload_p, 1);
- rb_define_global_function("autoload", rb_f_autoload, 2);
- rb_define_global_function("autoload?", rb_f_autoload_p, 1);
-
- ruby_dln_librefs = rb_ary_new();
- rb_register_mark_object(ruby_dln_librefs);
-}
Index: eval_method.ci
===================================================================
--- eval_method.ci (revision 14363)
+++ eval_method.ci (revision 14364)
@@ -1,641 +0,0 @@
-/* -*-c-*- */
-/*
- * This file is included by eval.c
- */
-
-#define CACHE_SIZE 0x800
-#define CACHE_MASK 0x7ff
-#define EXPR1(c,m) ((((c)>>3)^(m))&CACHE_MASK)
-
-struct cache_entry { /* method hash table. */
- ID mid; /* method's id */
- ID mid0; /* method's original id */
- VALUE klass; /* receiver's class */
- NODE *method;
-};
-
-static struct cache_entry cache[CACHE_SIZE];
-static int ruby_running = 0;
-
-void
-rb_clear_cache(void)
-{
- struct cache_entry *ent, *end;
-
- rb_vm_change_state();
-
- if (!ruby_running)
- return;
- ent = cache;
- end = ent + CACHE_SIZE;
- while (ent < end) {
- ent->mid = 0;
- ent++;
- }
-}
-
-static void
-rb_clear_cache_for_undef(VALUE klass, ID id)
-{
- struct cache_entry *ent, *end;
-
- rb_vm_change_state();
-
- if (!ruby_running)
- return;
- ent = cache;
- end = ent + CACHE_SIZE;
- while (ent < end) {
- if (ent->method && ent->method->nd_clss == klass && ent->mid == id) {
- ent->mid = 0;
- }
- ent++;
- }
-}
-
-static void
-rb_clear_cache_by_id(ID id)
-{
- struct cache_entry *ent, *end;
-
- rb_vm_change_state();
-
- if (!ruby_running)
- return;
- ent = cache;
- end = ent + CACHE_SIZE;
- while (ent < end) {
- if (ent->mid == id) {
- ent->mid = 0;
- }
- ent++;
- }
-}
-
-void
-rb_clear_cache_by_class(VALUE klass)
-{
- struct cache_entry *ent, *end;
-
- rb_vm_change_state();
-
- if (!ruby_running)
- return;
- ent = cache;
- end = ent + CACHE_SIZE;
- while (ent < end) {
- if ((ent->klass == klass) ||
- (ent->method && ent->method->nd_clss == klass)) {
- ent->mid = 0;
- }
- ent++;
- }
-}
-
-void
-rb_add_method(VALUE klass, ID mid, NODE * node, int noex)
-{
- NODE *body;
-
- if (NIL_P(klass)) {
- klass = rb_cObject;
- }
- if (rb_safe_level() >= 4 && (klass == rb_cObject || !OBJ_TAINTED(klass))) {
- rb_raise(rb_eSecurityError, "Insecure: can't define method");
- }
- if (!FL_TEST(klass, FL_SINGLETON) &&
- node && nd_type(node) != NODE_ZSUPER &&
- (mid == rb_intern("initialize") || mid == rb_intern("initialize_copy"))) {
- noex = NOEX_PRIVATE | noex;
- }
- else if (FL_TEST(klass, FL_SINGLETON) && node
- && nd_type(node) == NODE_CFUNC && mid == rb_intern("allocate")) {
- rb_warn
- ("defining %s.allocate is deprecated; use rb_define_alloc_func()",
- rb_class2name(rb_iv_get(klass, "__attached__")));
- mid = ID_ALLOCATOR;
- }
- if (OBJ_FROZEN(klass)) {
- rb_error_frozen("class/module");
- }
- rb_clear_cache_by_id(mid);
-
- /*
- * NODE_METHOD (NEW_METHOD(body, klass, vis)):
- * nd_body : method body // (2) // mark
- * nd_clss : klass // (1) // mark
- * nd_noex : visibility // (3)
- *
- * NODE_FBODY (NEW_FBODY(method, alias)):
- * nd_body : method (NODE_METHOD) // (2) // mark
- * nd_oid : original id // (1)
- * nd_cnt : alias count // (3)
- */
- if (node) {
- body = NEW_FBODY(NEW_METHOD(node, klass, NOEX_WITH_SAFE(noex)), 0);
- }
- else {
- body = 0;
- }
-
- {
- /* check re-definition */
- st_data_t data;
- NODE *old_node;
-
- if (st_lookup(RCLASS_M_TBL(klass), mid, &data)) {
- old_node = (NODE *)data;
- if (old_node) {
- if (nd_type(old_node->nd_body->nd_body) == NODE_CFUNC) {
- rb_vm_check_redefinition_opt_method(old_node);
- }
- if (RTEST(ruby_verbose) && node && old_node->nd_cnt == 0 && old_node->nd_body) {
- rb_warning("method redefined; discarding old %s", rb_id2name(mid));
- }
- }
- if (klass == rb_cObject && node && node->nd_mid == init) {
- rb_warn("redefining Object#initialize may cause infinite loop");
- }
- }
-
- if (mid == object_id || mid == __send__) {
- if (node && nd_type(node) == RUBY_VM_METHOD_NODE) {
- rb_warn("redefining `%s' may cause serious problem",
- rb_id2name(mid));
- }
- }
- }
-
- st_insert(RCLASS_M_TBL(klass), mid, (st_data_t) body);
-
- if (node && mid != ID_ALLOCATOR && ruby_running) {
- if (FL_TEST(klass, FL_SINGLETON)) {
- rb_funcall(rb_iv_get(klass, "__attached__"), singleton_added, 1,
- ID2SYM(mid));
- }
- else {
- rb_funcall(klass, added, 1, ID2SYM(mid));
- }
- }
-}
-
-void
-rb_define_alloc_func(VALUE klass, VALUE (*func) _((VALUE)))
-{
- Check_Type(klass, T_CLASS);
- rb_add_method(CLASS_OF(klass), ID_ALLOCATOR, NEW_CFUNC(func, 0),
- NOEX_PRIVATE);
-}
-
-void
-rb_undef_alloc_func(VALUE klass)
-{
- Check_Type(klass, T_CLASS);
- rb_add_method(CLASS_OF(klass), ID_ALLOCATOR, 0, NOEX_UNDEF);
-}
-
-rb_alloc_func_t
-rb_get_alloc_func(VALUE klass)
-{
- NODE *n;
- Check_Type(klass, T_CLASS);
- n = rb_method_node(CLASS_OF(klass), ID_ALLOCATOR);
- if (!n) return 0;
- if (nd_type(n) != NODE_METHOD) return 0;
- n = n->nd_body;
- if (nd_type(n) != NODE_CFUNC) return 0;
- return (rb_alloc_func_t)n->nd_cfnc;
-}
-
-static NODE *
-search_method(VALUE klass, ID id, VALUE *klassp)
-{
- st_data_t body;
-
- if (!klass) {
- return 0;
- }
-
- while (!st_lookup(RCLASS_M_TBL(klass), id, &body)) {
- klass = RCLASS_SUPER(klass);
- if (!klass)
- return 0;
- }
-
- if (klassp) {
- *klassp = klass;
- }
-
- return (NODE *)body;
-}
-
-/*
- * search method body (NODE_METHOD)
- * with : klass and id
- * without : method cache
- *
- * if you need method node with method cache, use
- * rb_method_node()
- */
-NODE *
-rb_get_method_body(VALUE klass, ID id, ID *idp)
-{
- NODE *volatile fbody, *body;
- NODE *method;
-
- if ((fbody = search_method(klass, id, 0)) == 0 || !fbody->nd_body) {
- /* store empty info in cache */
- struct cache_entry *ent;
- ent = cache + EXPR1(klass, id);
- ent->klass = klass;
- ent->mid = ent->mid0 = id;
- ent->method = 0;
- return 0;
- }
-
- method = fbody->nd_body;
-
- if (ruby_running) {
- /* store in cache */
- struct cache_entry *ent;
- ent = cache + EXPR1(klass, id);
- ent->klass = klass;
- ent->mid = id;
- ent->mid0 = fbody->nd_oid;
- ent->method = body = method;
- }
- else {
- body = method;
- }
-
- if (idp) {
- *idp = fbody->nd_oid;
- }
-
- return body;
-}
-
-NODE *
-rb_method_node(VALUE klass, ID id)
-{
- struct cache_entry *ent;
-
- ent = cache + EXPR1(klass, id);
- if (ent->mid == id && ent->klass == klass && ent->method) {
- return ent->method;
- }
-
- return rb_get_method_body(klass, id, 0);
-}
-
-static void
-remove_method(VALUE klass, ID mid)
-{
- st_data_t data;
- NODE *body = 0;
-
- if (klass == rb_cObject) {
- rb_secure(4);
- }
- if (rb_safe_level() >= 4 && !OBJ_TAINTED(klass)) {
- rb_raise(rb_eSecurityError, "Insecure: can't remove method");
- }
- if (OBJ_FROZEN(klass))
- rb_error_frozen("class/module");
- if (mid == object_id || mid == __send__ || mid == init) {
- rb_warn("removing `%s' may cause serious problem", rb_id2name(mid));
- }
- if (st_lookup(RCLASS_M_TBL(klass), mid, &data)) {
- body = (NODE *)data;
- if (!body || !body->nd_body) body = 0;
- else {
- st_delete(RCLASS_M_TBL(klass), &mid, &data);
- }
- }
- if (!body) {
- rb_name_error(mid, "method `%s' not defined in %s",
- rb_id2name(mid), rb_class2name(klass));
- }
-
- if (nd_type(body->nd_body->nd_body) == NODE_CFUNC) {
- rb_vm_check_redefinition_opt_method(body);
- }
-
- rb_clear_cache_for_undef(klass, mid);
- if (FL_TEST(klass, FL_SINGLETON)) {
- rb_funcall(rb_iv_get(klass, "__attached__"), singleton_removed, 1,
- ID2SYM(mid));
- }
- else {
- rb_funcall(klass, removed, 1, ID2SYM(mid));
- }
-}
-
-void
-rb_remove_method(VALUE klass, const char *name)
-{
- remove_method(klass, rb_intern(name));
-}
-
-/*
- * call-seq:
- * remove_method(symbol) => self
- *
- * Removes the method identified by _symbol_ from the current
- * class. For an example, see <code>Module.undef_method</code>.
- */
-
-static VALUE
-rb_mod_remove_method(int argc, VALUE *argv, VALUE mod)
-{
- int i;
-
- for (i = 0; i < argc; i++) {
- remove_method(mod, rb_to_id(argv[i]));
- }
- return mod;
-}
-
-#undef rb_disable_super
-#undef rb_enable_super
-
-void
-rb_disable_super(VALUE klass, const char *name)
-{
- /* obsolete - no use */
-}
-
-void
-rb_enable_super(VALUE klass, const char *name)
-{
- rb_warning("rb_enable_super() is obsolete");
-}
-
-static void
-rb_export_method(VALUE klass, ID name, ID noex)
-{
- NODE *fbody;
- VALUE origin;
-
- if (klass == rb_cObject) {
- rb_secure(4);
- }
- fbody = search_method(klass, name, &origin);
- if (!fbody && TYPE(klass) == T_MODULE) {
- fbody = search_method(rb_cObject, name, &origin);
- }
- if (!fbody || !fbody->nd_body) {
- rb_print_undef(klass, name, 0);
- }
- if (fbody->nd_body->nd_noex != noex) {
- if (klass == origin) {
- fbody->nd_body->nd_noex = noex;
- }
- else {
- rb_add_method(klass, name, NEW_ZSUPER(), noex);
- }
- }
-}
-
-int
-rb_method_boundp(VALUE klass, ID id, int ex)
-{
- NODE *method;
-
- if ((method = rb_method_node(klass, id)) != 0) {
- if (ex && (method->nd_noex & NOEX_PRIVATE)) {
- return Qfalse;
- }
- return Qtrue;
- }
- return Qfalse;
-}
-
-void
-rb_attr(VALUE klass, ID id, int read, int write, int ex)
-{
- const char *name;
- ID attriv;
- int noex;
-
- if (!ex) {
- noex = NOEX_PUBLIC;
- }
- else {
- if (SCOPE_TEST(NOEX_PRIVATE)) {
- noex = NOEX_PRIVATE;
- rb_warning((SCOPE_CHECK(NOEX_MODFUNC)) ?
- "attribute accessor as module_function" :
- "private attribute?");
- }
- else if (SCOPE_TEST(NOEX_PROTECTED)) {
- noex = NOEX_PROTECTED;
- }
- else {
- noex = NOEX_PUBLIC;
- }
- }
-
- if (!rb_is_local_id(id) && !rb_is_const_id(id)) {
- rb_name_error(id, "invalid attribute name `%s'", rb_id2name(id));
- }
- name = rb_id2name(id);
- if (!name) {
- rb_raise(rb_eArgError, "argument needs to be symbol or string");
- }
- attriv = rb_intern_str(rb_sprintf("@%s", name));
- if (read) {
- rb_add_method(klass, id, NEW_IVAR(attriv), noex);
- }
- if (write) {
- rb_add_method(klass, rb_id_attrset(id), NEW_ATTRSET(attriv), noex);
- }
-}
-
-void
-rb_undef(VALUE klass, ID id)
-{
- VALUE origin;
- NODE *body;
-
- if (ruby_cbase() == rb_cObject && klass == rb_cObject) {
- rb_secure(4);
- }
- if (rb_safe_level() >= 4 && !OBJ_TAINTED(klass)) {
- rb_raise(rb_eSecurityError, "Insecure: can't undef `%s'",
- rb_id2name(id));
- }
- rb_frozen_class_p(klass);
- if (id == object_id || id == __send__ || id == init) {
- rb_warn("undefining `%s' may cause serious problem", rb_id2name(id));
- }
- body = search_method(klass, id, &origin);
- if (!body || !body->nd_body) {
- char *s0 = " class";
- VALUE c = klass;
-
- if (FL_TEST(c, FL_SINGLETON)) {
- VALUE obj = rb_iv_get(klass, "__attached__");
-
- switch (TYPE(obj)) {
- case T_MODULE:
- case T_CLASS:
- c = obj;
- s0 = "";
- }
- }
- else if (TYPE(c) == T_MODULE) {
- s0 = " module";
- }
- rb_name_error(id, "undefined method `%s' for%s `%s'",
- rb_id2name(id), s0, rb_class2name(c));
- }
-
- rb_add_method(klass, id, 0, NOEX_PUBLIC);
-
- if (FL_TEST(klass, FL_SINGLETON)) {
- rb_funcall(rb_iv_get(klass, "__attached__"),
- singleton_undefined, 1, ID2SYM(id));
- }
- else {
- rb_funcall(klass, undefined, 1, ID2SYM(id));
- }
-}
-
-/*
- * call-seq:
- * undef_method(symbol) => self
- *
- * Prevents the current class from responding to calls to the named
- * method. Contrast this with <code>remove_method</code>, which deletes
- * the method from the particular class; Ruby will still search
- * superclasses and mixed-in modules for a possible receiver.
- *
- * class Parent
- * def hello
- * puts "In parent"
- * end
- * end
- * class Child < Parent
- * def hello
- * puts "In child"
- * end
- * end
- *
- *
- * c = Child.new
- * c.hello
- *
- *
- * class Child
- * remove_method :hello # remove from child, still in parent
- * end
- * c.hello
- *
- *
- * class Child
- * undef_method :hello # prevent any calls to 'hello'
- * end
- * c.hello
- *
- * <em>produces:</em>
- *
- * In child
- * In parent
- * prog.rb:23: undefined method `hello' for #<Child:0x401b3bb4> (NoMethodError)
- */
-
-static VALUE
-rb_mod_undef_method(int argc, VALUE *argv, VALUE mod)
-{
- int i;
- for (i = 0; i < argc; i++) {
- rb_undef(mod, rb_to_id(argv[i]));
- }
- return mod;
-}
-
-void
-rb_alias(VALUE klass, ID name, ID def)
-{
- NODE *orig_fbody, *node;
- VALUE singleton = 0;
- st_data_t data;
-
- rb_frozen_class_p(klass);
- if (klass == rb_cObject) {
- rb_secure(4);
- }
- orig_fbody = search_method(klass, def, 0);
- if (!orig_fbody || !orig_fbody->nd_body) {
- if (TYPE(klass) == T_MODULE) {
- orig_fbody = search_method(rb_cObject, def, 0);
- }
- }
- if (!orig_fbody || !orig_fbody->nd_body) {
- rb_print_undef(klass, def, 0);
- }
- if (FL_TEST(klass, FL_SINGLETON)) {
- singleton = rb_iv_get(klass, "__attached__");
- }
-
- orig_fbody->nd_cnt++;
-
- if (st_lookup(RCLASS_M_TBL(klass), name, &data)) {
- node = (NODE *)data;
- if (node) {
- if (RTEST(ruby_verbose) && node->nd_cnt == 0 && node->nd_body) {
- rb_warning("discarding old %s", rb_id2name(name));
- }
- if (nd_type(node->nd_body->nd_body) == NODE_CFUNC) {
- rb_vm_check_redefinition_opt_method(node);
- }
- }
- }
-
- st_insert(RCLASS_M_TBL(klass), name,
- (st_data_t) NEW_FBODY(
- NEW_METHOD(orig_fbody->nd_body->nd_body,
- orig_fbody->nd_body->nd_clss,
- NOEX_WITH_SAFE(orig_fbody->nd_body->nd_noex)), def));
-
- rb_clear_cache_by_id(name);
-
- if (!ruby_running) return;
-
- if (singleton) {
- rb_funcall(singleton, singleton_added, 1, ID2SYM(name));
- }
- else {
- rb_funcall(klass, added, 1, ID2SYM(name));
- }
-}
-
-/*
- * call-seq:
- * alias_method(new_name, old_name) => self
- *
- * Makes <i>new_name</i> a new copy of the method <i>old_name</i>. This can
- * be used to retain access to methods that are overridden.
- *
- * module Mod
- * alias_method :orig_exit, :exit
- * def exit(code=0)
- * puts "Exiting with code #{code}"
- * orig_exit(code)
- * end
- * end
- * include Mod
- * exit(99)
- *
- * <em>produces:</em>
- *
- * Exiting with code 99
- */
-
-static VALUE
-rb_mod_alias_method(VALUE mod, VALUE newname, VALUE oldname)
-{
- rb_alias(mod, rb_to_id(newname), rb_to_id(oldname));
- return mod;
-}
Index: eval_jump.ci
===================================================================
--- eval_jump.ci (revision 14363)
+++ eval_jump.ci (revision 14364)
@@ -1,305 +0,0 @@
-/* -*-c-*- */
-/*
- * from eval.c
- */
-
-#include "eval_intern.h"
-
-NORETURN(static VALUE rb_f_throw _((int, VALUE *)));
-
-/*
- * call-seq:
- * throw(symbol [, obj])
- *
- * Transfers control to the end of the active +catch+ block
- * waiting for _symbol_. Raises +NameError+ if there
- * is no +catch+ block for the symbol. The optional second
- * parameter supplies a return value for the +catch+ block,
- * which otherwise defaults to +nil+. For examples, see
- * <code>Kernel::catch</code>.
- */
-
-static VALUE
-rb_f_throw(int argc, VALUE *argv)
-{
- VALUE tag, value;
- rb_thread_t *th = GET_THREAD();
- struct rb_vm_tag *tt = th->tag;
-
- rb_scan_args(argc, argv, "11", &tag, &value);
- while (tt) {
- if (tt->tag == tag) {
- tt->retval = value;
- break;
- }
- tt = tt->prev;
- }
- if (!tt) {
- VALUE desc = rb_inspect(tag);
- rb_raise(rb_eArgError, "uncaught throw %s", RSTRING_PTR(desc));
- }
- rb_trap_restore_mask();
- th->errinfo = NEW_THROW_OBJECT(tag, 0, TAG_THROW);
-
- JUMP_TAG(TAG_THROW);
-#ifndef __GNUC__
- return Qnil; /* not reached */
-#endif
-}
-
-void
-rb_throw(const char *tag, VALUE val)
-{
- VALUE argv[2];
-
- argv[0] = ID2SYM(rb_intern(tag));
- argv[1] = val;
- rb_f_throw(2, argv);
-}
-
-void
-rb_throw_obj(VALUE tag, VALUE val)
-{
- VALUE argv[2];
-
- argv[0] = tag;
- argv[1] = val;
- rb_f_throw(2, argv);
-}
-
-/*
- * call-seq:
- * catch(symbol) {| | block } > obj
- *
- * +catch+ executes its block. If a +throw+ is
- * executed, Ruby searches up its stack for a +catch+ block
- * with a tag corresponding to the +throw+'s
- * _symbol_. If found, that block is terminated, and
- * +catch+ returns the value given to +throw+. If
- * +throw+ is not called, the block terminates normally, and
- * the value of +catch+ is the value of the last expression
- * evaluated. +catch+ expressions may be nested, and the
- * +throw+ call need not be in lexical scope.
- *
- * def routine(n)
- * puts n
- * throw :done if n <= 0
- * routine(n-1)
- * end
- *
- *
- * catch(:done) { routine(3) }
- *
- * <em>produces:</em>
- *
- * 3
- * 2
- * 1
- * 0
- */
-
-static VALUE
-rb_f_catch(int argc, VALUE *argv)
-{
- VALUE tag;
- int state;
- VALUE val = Qnil; /* OK */
- rb_thread_t *th = GET_THREAD();
-
- rb_scan_args(argc, argv, "01", &tag);
- if (argc == 0) {
- tag = rb_obj_alloc(rb_cObject);
- }
- PUSH_TAG();
-
- th->tag->tag = tag;
-
- if ((state = EXEC_TAG()) == 0) {
- val = rb_yield_0(1, &tag);
- }
- else if (state == TAG_THROW && RNODE(th->errinfo)->u1.value == tag) {
- val = th->tag->retval;
- th->errinfo = Qnil;
- state = 0;
- }
- POP_TAG();
- if (state)
- JUMP_TAG(state);
-
- return val;
-}
-
-static VALUE
-catch_null_i(VALUE dmy)
-{
- return rb_funcall(Qnil, rb_intern("catch"), 0, 0);
-}
-
-static VALUE
-catch_i(VALUE tag)
-{
- return rb_funcall(Qnil, rb_intern("catch"), 1, tag);
-}
-
-VALUE
-rb_catch(const char *tag, VALUE (*func)(), VALUE data)
-{
- if (!tag) {
- return rb_iterate(catch_null_i, 0, func, data);
- }
- return rb_iterate(catch_i, ID2SYM(rb_intern(tag)), func, data);
-}
-
-VALUE
-rb_catch_obj(VALUE tag, VALUE (*func)(), VALUE data)
-{
- return rb_iterate((VALUE (*)_((VALUE)))catch_i, tag, func, data);
-}
-
-
-/* exit */
-
-void
-rb_call_end_proc(VALUE data)
-{
- rb_proc_call(data, rb_ary_new());
-}
-
-/*
- * call-seq:
- * at_exit { block } -> proc
- *
- * Converts _block_ to a +Proc+ object (and therefore
- * binds it at the point of call) and registers it for execution when
- * the program exits. If multiple handlers are registered, they are
- * executed in reverse order of registration.
- *
- * def do_at_exit(str1)
- * at_exit { print str1 }
- * end
- * at_exit { puts "cruel world" }
- * do_at_exit("goodbye ")
- * exit
- *
- * <em>produces:</em>
- *
- * goodbye cruel world
- */
-
-static VALUE
-rb_f_at_exit(void)
-{
- VALUE proc;
-
- if (!rb_block_given_p()) {
- rb_raise(rb_eArgError, "called without a block");
- }
- proc = rb_block_proc();
- rb_set_end_proc(rb_call_end_proc, proc);
- return proc;
-}
-
-struct end_proc_data {
- void (*func) ();
- VALUE data;
- int safe;
- struct end_proc_data *next;
-};
-
-static struct end_proc_data *end_procs, *ephemeral_end_procs, *tmp_end_procs;
-
-void
-rb_set_end_proc(void (*func)(VALUE), VALUE data)
-{
- struct end_proc_data *link = ALLOC(struct end_proc_data);
- struct end_proc_data **list;
- rb_thread_t *th = GET_THREAD();
-
- if (th->top_wrapper) {
- list = &ephemeral_end_procs;
- }
- else {
- list = &end_procs;
- }
- link->next = *list;
- link->func = func;
- link->data = data;
- link->safe = rb_safe_level();
- *list = link;
-}
-
-void
-rb_mark_end_proc(void)
-{
- struct end_proc_data *link;
-
- link = end_procs;
- while (link) {
- rb_gc_mark(link->data);
- link = link->next;
- }
- link = ephemeral_end_procs;
- while (link) {
- rb_gc_mark(link->data);
- link = link->next;
- }
- link = tmp_end_procs;
- while (link) {
- rb_gc_mark(link->data);
- link = link->next;
- }
-}
-
-void
-rb_exec_end_proc(void)
-{
- struct end_proc_data *link, *tmp;
- int status;
- volatile int safe = rb_safe_level();
-
- while (ephemeral_end_procs) {
- tmp_end_procs = link = ephemeral_end_procs;
- ephemeral_end_procs = 0;
- while (link) {
- PUSH_TAG();
- if ((status = EXEC_TAG()) == 0) {
- rb_set_safe_level_force(link->safe);
- (*link->func) (link->data);
- }
- POP_TAG();
- if (status) {
- error_handle(status);
- }
- tmp = link;
- tmp_end_procs = link = link->next;
- free(tmp);
- }
- }
- while (end_procs) {
- tmp_end_procs = link = end_procs;
- end_procs = 0;
- while (link) {
- PUSH_TAG();
- if ((status = EXEC_TAG()) == 0) {
- rb_set_safe_level_force(link->safe);
- (*link->func) (link->data);
- }
- POP_TAG();
- if (status) {
- error_handle(status);
- }
- tmp = link;
- tmp_end_procs = link = link->next;
- free(tmp);
- }
- }
- rb_set_safe_level_force(safe);
-}
-
-void
-Init_jump(void)
-{
- rb_define_global_function("catch", rb_f_catch, -1);
- rb_define_global_function("throw", rb_f_throw, -1);
- rb_define_global_function("at_exit", rb_f_at_exit, 0);
-}
Index: thread_pthread.ci
===================================================================
--- thread_pthread.ci (revision 14363)
+++ thread_pthread.ci (revision 14364)
@@ -1,581 +0,0 @@
-/* -*-c-*- */
-/**********************************************************************
-
- thread_pthread.ci -
-
- $Author$
- $Date$
-
- Copyright (C) 2004-2007 Koichi Sasada
-
-**********************************************************************/
-
-#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
-
-void
-native_mutex_lock(pthread_mutex_t *lock)
-{
- int r;
- if ((r = pthread_mutex_lock(lock)) != 0) {
- rb_bug("pthread_mutex_lock: %d", r);
- }
-}
-
-void
-native_mutex_unlock(pthread_mutex_t *lock)
-{
- int r;
- if ((r = pthread_mutex_unlock(lock)) != 0) {
- rb_bug("native_mutex_unlock return non-zero: %d", r);
- }
-}
-
-inline int
-native_mutex_trylock(pthread_mutex_t *lock)
-{
- int r;
- if ((r = pthread_mutex_trylock(lock)) != 0) {
- if (r == EBUSY) {
- return EBUSY;
- }
- else {
- rb_bug("native_mutex_trylock return non-zero: %d", r);
- }
- }
- return 0;
-}
-
-void
-native_mutex_initialize(pthread_mutex_t *lock)
-{
- int r = pthread_mutex_init(lock, 0);
- if (r != 0) {
- rb_bug("native_mutex_initialize return non-zero: %d", r);
- }
-}
-
-void
-native_mutex_destroy(pthread_mutex_t *lock)
-{
- int r = pthread_mutex_destroy(lock);
- if (r != 0) {
- rb_bug("native_mutex_destroy return non-zero: %d", r);
- }
-}
-
-void
-native_cond_initialize(pthread_cond_t *cond)
-{
- int r = pthread_cond_init(cond, 0);
- if (r != 0) {
- rb_bug("native_cond_initialize return non-zero: %d", r);
- }
-}
-
-void
-native_cond_destroy(pthread_cond_t *cond)
-{
- int r = pthread_cond_destroy(cond);
- if (r != 0) {
- rb_bug("native_cond_destroy return non-zero: %d", r);
- }
-}
-
-void
-native_cond_signal(pthread_cond_t *cond)
-{
- pthread_cond_signal(cond);
-}
-
-void
-native_cond_broadcast(pthread_cond_t *cond)
-{
- pthread_cond_broadcast(cond);
-}
-
-void
-native_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
-{
- pthread_cond_wait(cond, mutex);
-}
-
-
-#define native_cleanup_push pthread_cleanup_push
-#define native_cleanup_pop pthread_cleanup_pop
-#define native_thread_yield() sched_yield()
-
-static void add_signal_thread_list(rb_thread_t *th);
-static void remove_signal_thread_list(rb_thread_t *th);
-
-static rb_thread_lock_t signal_thread_list_lock;
-
-static pthread_key_t ruby_native_thread_key;
-
-static void
-null_func()
-{
- /* null */
-}
-
-static rb_thread_t *
-ruby_thread_from_native(void)
-{
- return pthread_getspecific(ruby_native_thread_key);
-}
-
-static int
-ruby_thread_set_native(rb_thread_t *th)
-{
- return pthread_setspecific(ruby_native_thread_key, th) == 0;
-}
-
-static void
-Init_native_thread(void)
-{
- rb_thread_t *th = GET_THREAD();
-
- pthread_key_create(&ruby_native_thread_key, NULL);
- th->thread_id = pthread_self();
- ruby_thread_set_native(th);
- native_mutex_initialize(&signal_thread_list_lock);
- posix_signal(SIGVTALRM, null_func);
-}
-
-static void
-native_thread_destroy(rb_thread_t *th)
-{
- pthread_cond_destroy(&th->native_thread_data.sleep_cond);
-}
-
-#define USE_THREAD_CACHE 0
-
-static void *
-thread_start_func_1(void *th_ptr)
-{
-#if USE_THREAD_CACHE
- thread_start:
-#endif
- {
- rb_thread_t *th = th_ptr;
- VALUE stack_start;
-
- /* run */
- thread_start_func_2(th, &stack_start, rb_ia64_bsp());
- }
-#if USE_THREAD_CACHE
- if (1) {
- /* cache thread */
- rb_thread_t *th;
- static rb_thread_t *register_cached_thread_and_wait(void);
- if ((th = register_cached_thread_and_wait()) != 0) {
- th_ptr = (void *)th;
- th->thread_id = pthread_self();
- goto thread_start;
- }
- }
-#endif
- return 0;
-}
-
-void rb_thread_create_control_thread(void);
-
-struct cached_thread_entry {
- volatile rb_thread_t **th_area;
- pthread_cond_t *cond;
- struct cached_thread_entry *next;
-};
-
-
-#if USE_THREAD_CACHE
-static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
-struct cached_thread_entry *cached_thread_root;
-
-static rb_thread_t *
-register_cached_thread_and_wait(void)
-{
- pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
- volatile rb_thread_t *th_area = 0;
- struct cached_thread_entry *entry =
- (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
-
- struct timeval tv;
- struct timespec ts;
- gettimeofday(&tv, 0);
- ts.tv_sec = tv.tv_sec + 60;
- ts.tv_nsec = tv.tv_usec * 1000;
-
- pthread_mutex_lock(&thread_cache_lock);
- {
- entry->th_area = &th_area;
- entry->cond = &cond;
- entry->next = cached_thread_root;
- cached_thread_root = entry;
-
- pthread_cond_timedwait(&cond, &thread_cache_lock, &ts);
-
- {
- struct cached_thread_entry *e = cached_thread_root;
- struct cached_thread_entry *prev = cached_thread_root;
-
- while (e) {
- if (e == entry) {
- if (prev == cached_thread_root) {
- cached_thread_root = e->next;
- }
- else {
- prev->next = e->next;
- }
- break;
- }
- prev = e;
- e = e->next;
- }
- }
-
- free(entry);
- pthread_cond_destroy(&cond);
- }
- pthread_mutex_unlock(&thread_cache_lock);
-
- return (rb_thread_t *)th_area;
-}
-#endif
-
-static int
-use_cached_thread(rb_thread_t *th)
-{
- int result = 0;
-#if USE_THREAD_CACHE
- struct cached_thread_entry *entry;
-
- if (cached_thread_root) {
- pthread_mutex_lock(&thread_cache_lock);
- entry = cached_thread_root;
- {
- if (cached_thread_root) {
- cached_thread_root = entry->next;
- *entry->th_area = th;
- result = 1;
- }
- }
- if (result) {
- pthread_cond_signal(entry->cond);
- }
- pthread_mutex_unlock(&thread_cache_lock);
- }
-#endif
- return result;
-}
-
-#define CHECK_ERR(expr) \
- { int err; if ((err = (expr)) != 0) { rb_bug("err: %d - %s", err, #expr); }}
-
-static int
-native_thread_create(rb_thread_t *th)
-{
- int err = 0;
-
- if (use_cached_thread(th)) {
- thread_debug("create (use cached thread): %p\n", th);
- }
- else {
- pthread_attr_t attr;
- size_t stack_size = 512 * 1024; /* 512KB */
- size_t space;
-
-#ifdef PTHREAD_STACK_MIN
- if (stack_size < PTHREAD_STACK_MIN) {
- stack_size = PTHREAD_STACK_MIN * 2;
- }
-#endif
- space = stack_size/5;
- if (space > 1024*1024) space = 1024*1024;
- th->machine_stack_maxsize = stack_size - space;
-#ifdef __ia64
- th->machine_stack_maxsize /= 2;
- th->machine_register_stack_maxsize = th->machine_stack_maxsize;
-#endif
-
- CHECK_ERR(pthread_attr_init(&attr));
-
-#ifdef PTHREAD_STACK_MIN
- thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
- CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
-#endif
-
- CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
- CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
-
- err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
- thread_debug("create: %p (%d)", th, err);
- CHECK_ERR(pthread_attr_destroy(&attr));
-
- if (!err) {
- pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
- }
- else {
- st_delete_wrap(th->vm->living_threads, th->self);
- th->status = THREAD_KILLED;
- rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
- }
- }
- return err;
-}
-
-static void
-native_thread_join(pthread_t th)
-{
- int err = pthread_join(th, 0);
- if (err) {
- rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
- }
-}
-
-static void
-native_thread_apply_priority(rb_thread_t *th)
-{
- struct sched_param sp;
- int policy;
- int priority = 0 - th->priority;
- int max, min;
- pthread_getschedparam(th->thread_id, &policy, &sp);
- max = sched_get_priority_max(policy);
- min = sched_get_priority_min(policy);
-
- if (min < priority) {
- priority = max;
- }
- else if (max > priority) {
- priority = min;
- }
-
- sp.sched_priority = priority;
- pthread_setschedparam(th->thread_id, policy, &sp);
-}
-
-static void
-ubf_pthread_cond_signal(void *ptr)
-{
- rb_thread_t *th = (rb_thread_t *)ptr;
- thread_debug("ubf_pthread_cond_signal (%p)\n", th);
- pthread_cond_signal(&th->native_thread_data.sleep_cond);
-}
-
-#ifndef __CYGWIN__
-static void
-ubf_select_each(rb_thread_t *th)
-{
- thread_debug("ubf_select_each (%p)\n", (void *)th->thread_id);
- if (th) {
- pthread_kill(th->thread_id, SIGVTALRM);
- }
-}
-
-static void
-ubf_select(void *ptr)
-{
- rb_thread_t *th = (rb_thread_t *)ptr;
- add_signal_thread_list(th);
- ubf_select_each(th);
-}
-#else
-#define ubf_select 0
-#endif
-
-static void
-native_sleep(rb_thread_t *th, struct timeval *tv)
-{
- int prev_status = th->status;
- struct timespec ts;
- struct timeval tvn;
-
- if (tv) {
- gettimeofday(&tvn, NULL);
- ts.tv_sec = tvn.tv_sec + tv->tv_sec;
- ts.tv_nsec = (tvn.tv_usec + tv->tv_usec) * 1000;
- if (ts.tv_nsec >= 1000000000){
- ts.tv_sec += 1;
- ts.tv_nsec -= 1000000000;
- }
- }
-
- th->status = THREAD_STOPPED;
- pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
-
- thread_debug("native_sleep %ld\n", tv ? tv->tv_sec : -1);
- GVL_UNLOCK_BEGIN();
- {
- pthread_mutex_lock(&th->interrupt_lock);
-
- if (th->interrupt_flag) {
- /* interrupted. return immediate */
- thread_debug("native_sleep: interrupted before sleep\n");
- }
- else {
- th->unblock_function = ubf_pthread_cond_signal;
- th->unblock_function_arg = th;
-
- if (tv == 0) {
- thread_debug("native_sleep: pthread_cond_wait start\n");
- pthread_cond_wait(&th->native_thread_data.sleep_cond,
- &th->interrupt_lock);
- thread_debug("native_sleep: pthread_cond_wait end\n");
- }
- else {
- int r;
- thread_debug("native_sleep: pthread_cond_timedwait start (%ld, %ld)\n",
- (unsigned long)ts.tv_sec, ts.tv_nsec);
- r = pthread_cond_timedwait(&th->native_thread_data.sleep_cond,
- &th->interrupt_lock, &ts);
- thread_debug("native_sleep: pthread_cond_timedwait end (%d)\n", r);
- }
- th->unblock_function = 0;
- th->unblock_function_arg = 0;
- }
- pthread_mutex_unlock(&th->interrupt_lock);
-
- th->status = prev_status;
- }
- GVL_UNLOCK_END();
- thread_debug("native_sleep done\n");
-}
-
-struct signal_thread_list {
- rb_thread_t *th;
- struct signal_thread_list *prev;
- struct signal_thread_list *next;
-};
-
-static struct signal_thread_list signal_thread_list_anchor = {
- 0, 0, 0,
-};
-
-#define FGLOCK(lock, body) do { \
- native_mutex_lock(lock); \
- { \
- body; \
- } \
- native_mutex_unlock(lock); \
-} while (0)
-
-#if 0 /* for debug */
-static void
-print_signal_list(char *str)
-{
- struct signal_thread_list *list =
- signal_thread_list_anchor.next;
- thread_debug("list (%s)> ", str);
- while(list){
- thread_debug("%p (%p), ", list->th, list->th->thread_id);
- list = list->next;
- }
- thread_debug("\n");
-}
-#endif
-
-static void
-add_signal_thread_list(rb_thread_t *th)
-{
- if (!th->native_thread_data.signal_thread_list) {
- FGLOCK(&signal_thread_list_lock, {
- struct signal_thread_list *list =
- malloc(sizeof(struct signal_thread_list));
-
- if (list == 0) {
- fprintf(stderr, "[FATAL] failed to allocate memory\n");
- exit(1);
- }
-
- list->th = th;
-
- list->prev = &signal_thread_list_anchor;
- list->next = signal_thread_list_anchor.next;
- if (list->next) {
- list->next->prev = list;
- }
- signal_thread_list_anchor.next = list;
- th->native_thread_data.signal_thread_list = list;
- });
- }
-}
-
-static void
-remove_signal_thread_list(rb_thread_t *th)
-{
- if (th->native_thread_data.signal_thread_list) {
- FGLOCK(&signal_thread_list_lock, {
- struct signal_thread_list *list =
- (struct signal_thread_list *)
- th->native_thread_data.signal_thread_list;
-
- list->prev->next = list->next;
- if (list->next) {
- list->next->prev = list->prev;
- }
- th->native_thread_data.signal_thread_list = 0;
- list->th = 0;
- free(list);
- });
- }
- else {
- /* */
- }
-}
-
-static pthread_t timer_thread_id;
-static void timer_thread_function(void);
-
-static void *
-thread_timer(void *dummy)
-{
- while (system_working) {
-#ifdef HAVE_NANOSLEEP
- struct timespec req, rem;
- req.tv_sec = 0;
- req.tv_nsec = 10 * 1000 * 1000; /* 10 ms */
- nanosleep(&req, &rem);
-#else
- struct timeval tv;
- tv.tv_sec = 0;
- tv.tv_usec = 10000; /* 10 ms */
- select(0, NULL, NULL, NULL, &tv);
-#endif
-#ifndef __CYGWIN__
- if (signal_thread_list_anchor.next) {
- FGLOCK(&signal_thread_list_lock, {
- struct signal_thread_list *list;
- list = signal_thread_list_anchor.next;
- while (list) {
- ubf_select_each(list->th);
- list = list->next;
- }
- });
- }
-#endif
- timer_thread_function();
- }
- return NULL;
-}
-
-static void
-rb_thread_create_timer_thread(void)
-{
- rb_enable_interrupt();
-
- if (!timer_thread_id) {
- pthread_attr_t attr;
- int err;
-
- pthread_attr_init(&attr);
-#ifdef PTHREAD_STACK_MIN
- pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
-#endif
- err = pthread_create(&timer_thread_id, &attr, thread_timer, 0);
- if (err != 0) {
- rb_bug("rb_thread_create_timer_thread: return non-zero (%d)", err);
- }
- }
- rb_disable_interrupt(); /* only timer thread recieve signal */
-}
-
-#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
Index: eval_safe.ci
===================================================================
--- eval_safe.ci (revision 14363)
+++ eval_safe.ci (revision 14364)
@@ -1,111 +0,0 @@
-/* -*-c-*- */
-/*
- * This file is included by eval.c
- */
-
-/* safe-level:
- 0 - strings from streams/environment/ARGV are tainted (default)
- 1 - no dangerous operation by tainted value
- 2 - process/file operations prohibited
- 3 - all generated objects are tainted
- 4 - no global (non-tainted) variable modification/no direct output
-*/
-
-#define SAFE_LEVEL_MAX 4
-
-/* $SAFE accessor */
-
-int
-rb_safe_level(void)
-{
- return GET_THREAD()->safe_level;
-}
-
-void
-rb_set_safe_level_force(int safe)
-{
- GET_THREAD()->safe_level = safe;
-}
-
-void
-rb_set_safe_level(int level)
-{
- rb_thread_t *th = GET_THREAD();
-
- if (level > th->safe_level) {
- if (level > SAFE_LEVEL_MAX) {
- level = SAFE_LEVEL_MAX;
- }
- th->safe_level = level;
- }
-}
-
-static VALUE
-safe_getter(void)
-{
- return INT2NUM(rb_safe_level());
-}
-
-static void
-safe_setter(VALUE val)
-{
- int level = NUM2INT(val);
- rb_thread_t *th = GET_THREAD();
-
- if (level < th->safe_level) {
- rb_raise(rb_eSecurityError,
- "tried to downgrade safe level from %d to %d",
- th->safe_level, level);
- }
- if (level > SAFE_LEVEL_MAX) {
- level = SAFE_LEVEL_MAX;
- }
- th->safe_level = level;
-}
-
-void
-rb_secure(int level)
-{
- if (level <= rb_safe_level()) {
- if (rb_frame_callee()) {
- rb_raise(rb_eSecurityError, "Insecure operation `%s' at level %d",
- rb_id2name(rb_frame_callee()), rb_safe_level());
- }
- else {
- rb_raise(rb_eSecurityError, "Insecure operation at level %d",
- rb_safe_level());
- }
- }
-}
-
-void
-rb_secure_update(VALUE obj)
-{
- if (!OBJ_TAINTED(obj))
- rb_secure(4);
-}
-
-void
-rb_check_safe_obj(VALUE x)
-{
- if (rb_safe_level() > 0 && OBJ_TAINTED(x)) {
- if (rb_frame_callee()) {
- rb_raise(rb_eSecurityError, "Insecure operation - %s",
- rb_id2name(rb_frame_callee()));
- }
- else {
- rb_raise(rb_eSecurityError, "Insecure operation: -r");
- }
- }
- rb_secure(4);
-}
-
-void
-rb_check_safe_str(VALUE x)
-{
- rb_check_safe_obj(x);
- if (TYPE(x) != T_STRING) {
- rb_raise(rb_eTypeError, "wrong argument type %s (expected String)",
- rb_obj_classname(x));
- }
-}
Index: thread_win32.ci
===================================================================
--- thread_win32.ci (revision 14363)
+++ thread_win32.ci (revision 14364)
@@ -1,508 +0,0 @@
-/* -*-c-*- */
-/**********************************************************************
-
- thread_win32.ci -
-
- $Author$
- $Date$
-
- Copyright (C) 2004-2007 Koichi Sasada
-
-**********************************************************************/
-
-#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
-
-#include <process.h>
-
-#define WIN32_WAIT_TIMEOUT 10 /* 10 ms */
-#undef Sleep
-
-#define native_thread_yield() Sleep(0)
-#define remove_signal_thread_list(th)
-
-static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
-
-static rb_thread_t *
-ruby_thread_from_native(void)
-{
- return TlsGetValue(ruby_native_thread_key);
-}
-
-static int
-ruby_thread_set_native(rb_thread_t *th)
-{
- return TlsSetValue(ruby_native_thread_key, th);
-}
-
-static void
-Init_native_thread(void)
-{
- rb_thread_t *th = GET_THREAD();
-
- ruby_native_thread_key = TlsAlloc();
- DuplicateHandle(GetCurrentProcess(),
- GetCurrentThread(),
- GetCurrentProcess(),
- &th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
-
- th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
-
- thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
- th, GET_THREAD()->thread_id,
- th->native_thread_data.interrupt_event);
-}
-
-static void
-w32_error(void)
-{
- LPVOID lpMsgBuf;
- FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
- FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL,
- GetLastError(),
- MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
- (LPTSTR) & lpMsgBuf, 0, NULL);
- rb_bug("%s", lpMsgBuf);
-}
-
-static void
-w32_set_event(HANDLE handle)
-{
- if (SetEvent(handle) == 0) {
- w32_error();
- }
-}
-
-static void
-w32_reset_event(HANDLE handle)
-{
- if (ResetEvent(handle) == 0) {
- w32_error();
- }
-}
-
-static int
-w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
-{
- HANDLE *targets = events;
- HANDLE intr;
- DWORD ret;
-
- thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
- events, count, timeout, th);
- if (th && (intr = th->native_thread_data.interrupt_event)) {
- w32_reset_event(intr);
- if (th->interrupt_flag) {
- w32_set_event(intr);
- }
-
- targets = ALLOCA_N(HANDLE, count + 1);
- memcpy(targets, events, sizeof(HANDLE) * count);
-
- targets[count++] = intr;
- thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
- }
-
- thread_debug(" WaitForMultipleObjects start (count: %d)\n", count);
- ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
- thread_debug(" WaitForMultipleObjects end (ret: %d)\n", ret);
-
- if (ret == WAIT_OBJECT_0 + count - 1 && th) {
- errno = EINTR;
- }
- if (ret == -1 && THREAD_DEBUG) {
- int i;
- DWORD dmy;
- for (i = 0; i < count; i++) {
- thread_debug(" * error handle %d - %s\n", i,
- GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
- }
- }
- return ret;
-}
-
-static void ubf_handle(void *ptr);
-#define ubf_select ubf_handle
-
-int
-rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
-{
- return w32_wait_events(events, num, timeout, GET_THREAD());
-}
-
-int
-rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
-{
- int ret;
-
- BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout),
- ubf_handle, GET_THREAD());
- return ret;
-}
-
-static void
-w32_close_handle(HANDLE handle)
-{
- if (CloseHandle(handle) == 0) {
- w32_error();
- }
-}
-
-static void
-w32_resume_thread(HANDLE handle)
-{
- if (ResumeThread(handle) == -1) {
- w32_error();
- }
-}
-
-#ifdef _MSC_VER
-#define HAVE__BEGINTHREADEX 1
-#else
-#undef HAVE__BEGINTHREADEX
-#endif
-
-#ifdef HAVE__BEGINTHREADEX
-#define start_thread (HANDLE)_beginthreadex
-typedef unsigned long (_stdcall *w32_thread_start_func)(void*);
-#else
-#define start_thread CreateThread
-typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
-#endif
-
-static HANDLE
-w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
-{
- return start_thread(0, stack_size, func, val, CREATE_SUSPENDED, 0);
-}
-
-int
-rb_w32_sleep(unsigned long msec)
-{
- return w32_wait_events(0, 0, msec, GET_THREAD());
-}
-
-int WINAPI
-rb_w32_Sleep(unsigned long msec)
-{
- int ret;
-
- BLOCKING_REGION(ret = rb_w32_sleep(msec),
- ubf_handle, GET_THREAD());
- return ret;
-}
-
-static void
-native_sleep(rb_thread_t *th, struct timeval *tv)
-{
- DWORD msec;
- if (tv) {
- msec = tv->tv_sec * 1000 + tv->tv_usec / 1000;
- }
- else {
- msec = INFINITE;
- }
-
- GVL_UNLOCK_BEGIN();
- {
- DWORD ret;
- int status = th->status;
- th->status = THREAD_STOPPED;
- th->unblock_function = ubf_handle;
- th->unblock_function_arg = th;
- thread_debug("native_sleep start (%d)\n", (int)msec);
- ret = w32_wait_events(0, 0, msec, th);
- thread_debug("native_sleep done (%d)\n", ret);
- th->unblock_function = 0;
- th->unblock_function_arg = 0;
- th->status = status;
- }
- GVL_UNLOCK_END();
-}
-
-int
-native_mutex_lock(rb_thread_lock_t *lock)
-{
-#if USE_WIN32_MUTEX
- DWORD result;
- while (1) {
- thread_debug("native_mutex_lock: %p\n", *lock);
- result = w32_wait_events(&*lock, 1, INFINITE, 0);
- switch (result) {
- case WAIT_OBJECT_0:
- /* get mutex object */
- thread_debug("acquire mutex: %p\n", *lock);
- return 0;
- case WAIT_OBJECT_0 + 1:
- /* interrupt */
- errno = EINTR;
- thread_debug("acquire mutex interrupted: %p\n", *lock);
- return 0;
- case WAIT_TIMEOUT:
- thread_debug("timeout mutex: %p\n", *lock);
- break;
- case WAIT_ABANDONED:
- rb_bug("win32_mutex_lock: WAIT_ABANDONED");
- break;
- default:
- rb_bug("win32_mutex_lock: unknown result (%d)", result);
- break;
- }
- }
- return 0;
-#else
- EnterCriticalSection(lock);
- return 0;
-#endif
-}
-
-int
-native_mutex_unlock(rb_thread_lock_t *lock)
-{
-#if USE_WIN32_MUTEX
- thread_debug("release mutex: %p\n", *lock);
- return ReleaseMutex(*lock);
-#else
- LeaveCriticalSection(lock);
- return 0;
-#endif
-}
-
-int
-native_mutex_trylock(rb_thread_lock_t *lock)
-{
-#if USE_WIN32_MUTEX
- int result;
- thread_debug("native_mutex_trylock: %p\n", *lock);
- result = w32_wait_events(&*lock, 1, 1, 0);
- thread_debug("native_mutex_trylock result: %d\n", result);
- switch (result) {
- case WAIT_OBJECT_0:
- return 0;
- case WAIT_TIMEOUT:
- return EBUSY;
- }
- return EINVAL;
-#else
- return TryEnterCriticalSection(lock) == 0;
-#endif
-}
-
-void
-native_mutex_initialize(rb_thread_lock_t *lock)
-{
-#if USE_WIN32_MUTEX
- *lock = CreateMutex(NULL, FALSE, NULL);
- if (*lock == NULL) {
- w32_error();
- }
- /* thread_debug("initialize mutex: %p\n", *lock); */
-#else
- InitializeCriticalSection(lock);
-#endif
-}
-
-void
-native_mutex_destroy(rb_thread_lock_t *lock)
-{
-#if USE_WIN32_MUTEX
- w32_close_handle(lock);
-#else
- DeleteCriticalSection(lock);
-#endif
-}
-
-struct cond_event_entry {
- struct cond_event_entry* next;
- HANDLE event;
-};
-
-struct rb_thread_cond_struct {
- struct cond_event_entry *next;
- struct cond_event_entry *last;
-};
-
-void
-native_cond_signal(rb_thread_cond_t *cond)
-{
- /* cond is guarded by mutex */
- struct cond_event_entry *e = cond->next;
-
- if (e) {
- cond->next = e->next;
- SetEvent(e->event);
- }
- else {
- rb_bug("native_cond_signal: no pending threads");
- }
-}
-
-void
-native_cond_broadcast(rb_thread_cond_t *cond)
-{
- /* cond is guarded by mutex */
- struct cond_event_entry *e = cond->next;
- cond->next = 0;
-
- while (e) {
- SetEvent(e->event);
- e = e->next;
- }
-}
-
-void
-native_cond_wait(rb_thread_cond_t *cond, rb_thread_lock_t *mutex)
-{
- DWORD r;
- struct cond_event_entry entry;
-
- entry.next = 0;
- entry.event = CreateEvent(0, FALSE, FALSE, 0);
-
- /* cond is guarded by mutex */
- if (cond->next) {
- cond->last->next = &entry;
- cond->last = &entry;
- }
- else {
- cond->next = &entry;
- cond->last = &entry;
- }
-
- native_mutex_unlock(mutex);
- {
- r = WaitForSingleObject(entry.event, INFINITE);
- if (r != WAIT_OBJECT_0) {
- rb_bug("native_cond_wait: WaitForSingleObject returns %d", r);
- }
- }
- native_mutex_lock(mutex);
-
- w32_close_handle(entry.event);
-}
-
-void
-native_cond_initialize(rb_thread_cond_t *cond)
-{
- cond->next = 0;
- cond->last = 0;
-}
-
-void
-native_cond_destroy(rb_thread_cond_t *cond)
-{
- /* */
-}
-
-static void
-native_thread_destroy(rb_thread_t *th)
-{
- HANDLE intr = th->native_thread_data.interrupt_event;
- thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
- th->native_thread_data.interrupt_event = 0;
- w32_close_handle(intr);
-}
-
-static unsigned long _stdcall
-thread_start_func_1(void *th_ptr)
-{
- rb_thread_t *th = th_ptr;
- VALUE stack_start;
- volatile HANDLE thread_id = th->thread_id;
-
- th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
-
- /* run */
- thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
- th->thread_id, th->native_thread_data.interrupt_event);
- thread_start_func_2(th, &stack_start, 0);
-
- w32_close_handle(thread_id);
- thread_debug("thread deleted (th: %p)\n", th);
- return 0;
-}
-
-extern size_t rb_gc_stack_maxsize;
-
-static int
-native_thread_create(rb_thread_t *th)
-{
- size_t stack_size = 4 * 1024; /* 4KB */
- th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
-
- th->machine_stack_maxsize = rb_gc_stack_maxsize; /* not tested. */
-
- if ((th->thread_id) == 0) {
- st_delete_wrap(th->vm->living_threads, th->self);
- rb_raise(rb_eThreadError, "can't create Thread (%d)", errno);
- }
-
- w32_resume_thread(th->thread_id);
-
- if (THREAD_DEBUG) {
- Sleep(0);
- thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %d\n",
- th, th->thread_id,
- th->native_thread_data.interrupt_event, stack_size);
- }
- return 0;
-}
-
-static void
-native_thread_join(HANDLE th)
-{
- w32_wait_events(&th, 1, 0, 0);
-}
-
-static void
-native_thread_apply_priority(rb_thread_t *th)
-{
- int priority = th->priority;
- if (th->priority > 0) {
- priority = THREAD_PRIORITY_ABOVE_NORMAL;
- }
- else if (th->priority < 0) {
- priority = THREAD_PRIORITY_BELOW_NORMAL;
- }
- else {
- priority = THREAD_PRIORITY_NORMAL;
- }
-
- SetThreadPriority(th->thread_id, priority);
-}
-
-static void
-ubf_handle(void *ptr)
-{
- rb_thread_t *th = (rb_thread_t *)ptr;
- thread_debug("ubf_handle: %p\n", th);
- w32_set_event(th->native_thread_data.interrupt_event);
-}
-
-static void timer_thread_function(void);
-
-static HANDLE timer_thread_id = 0;
-
-static unsigned long _stdcall
-timer_thread_func(void *dummy)
-{
- thread_debug("timer_thread\n");
- while (system_working) {
- Sleep(WIN32_WAIT_TIMEOUT);
- timer_thread_function();
- }
- thread_debug("timer killed\n");
- return 0;
-}
-
-void
-rb_thread_create_timer_thread(void)
-{
- if (timer_thread_id == 0) {
- timer_thread_id = w32_create_thread(1024, timer_thread_func, 0);
- w32_resume_thread(timer_thread_id);
- }
-}
-
-#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
Index: eval_error.ci
===================================================================
--- eval_error.ci (revision 14363)
+++ eval_error.ci (revision 14364)
@@ -1,291 +0,0 @@
-/* -*-c-*- */
-/*
- * included by eval.c
- */
-
-const char *
-rb_sourcefile(void)
-{
- rb_thread_t *th = GET_THREAD();
- rb_control_frame_t *cfp = vm_get_ruby_level_cfp(th, th->cfp);
-
- if (cfp) {
- return RSTRING_PTR(cfp->iseq->filename);
- }
- else {
- return 0;
- }
-}
-
-int
-rb_sourceline(void)
-{
- rb_thread_t *th = GET_THREAD();
- rb_control_frame_t *cfp = vm_get_ruby_level_cfp(th, th->cfp);
-
- if (cfp) {
- return vm_get_sourceline(cfp);
- }
- else {
- return 0;
- }
-}
-
-static void
-warn_printf(const char *fmt, ...)
-{
- char buf[BUFSIZ];
- va_list args;
-
- va_init_list(args, fmt);
- vsnprintf(buf, BUFSIZ, fmt, args);
- va_end(args);
- rb_write_error(buf);
-}
-
-#define warn_print(x) rb_write_error(x)
-#define warn_print2(x,l) rb_write_error2(x,l)
-
-static void
-error_pos(void)
-{
- const char *sourcefile = rb_sourcefile();
- int sourceline = rb_sourceline();
-
- if (sourcefile) {
- if (sourceline == 0) {
- warn_printf("%s", sourcefile);
- }
- else if (rb_frame_callee()) {
- warn_printf("%s:%d:in `%s'", sourcefile, sourceline,
- rb_id2name(rb_frame_callee()));
- }
- else {
- warn_printf("%s:%d", sourcefile, sourceline);
- }
- }
-}
-
-VALUE rb_check_backtrace(VALUE);
-
-static VALUE
-get_backtrace(VALUE info)
-{
- if (NIL_P(info))
- return Qnil;
- info = rb_funcall(info, rb_intern("backtrace"), 0);
- if (NIL_P(info))
- return Qnil;
- return rb_check_backtrace(info);
-}
-
-static void
-set_backtrace(VALUE info, VALUE bt)
-{
- rb_funcall(info, rb_intern("set_backtrace"), 1, bt);
-}
-
-static void
-error_print(void)
-{
- VALUE errat = Qnil; /* OK */
- VALUE errinfo = GET_THREAD()->errinfo;
- volatile VALUE eclass, e;
- char *einfo;
- long elen;
-
- if (NIL_P(errinfo))
- return;
-
- PUSH_TAG();
- if (EXEC_TAG() == 0) {
- errat = get_backtrace(errinfo);
- }
- else {
- errat = Qnil;
- }
- if (EXEC_TAG())
- goto error;
- if (NIL_P(errat)) {
- const char *file = rb_sourcefile();
- int line = rb_sourceline();
- if (file)
- warn_printf("%s:%d", file, line);
- else
- warn_printf("%d", line);
- }
- else if (RARRAY_LEN(errat) == 0) {
- error_pos();
- }
- else {
- VALUE mesg = RARRAY_PTR(errat)[0];
-
- if (NIL_P(mesg))
- error_pos();
- else {
- warn_print2(RSTRING_PTR(mesg), RSTRING_LEN(mesg));
- }
- }
-
- eclass = CLASS_OF(errinfo);
- if (EXEC_TAG() == 0) {
- e = rb_funcall(errinfo, rb_intern("message"), 0, 0);
- StringValue(e);
- einfo = RSTRING_PTR(e);
- elen = RSTRING_LEN(e);
- }
- else {
- einfo = "";
- elen = 0;
- }
- if (EXEC_TAG())
- goto error;
- if (eclass == rb_eRuntimeError && elen == 0) {
- warn_print(": unhandled exception\n");
- }
- else {
- VALUE epath;
-
- epath = rb_class_name(eclass);
- if (elen == 0) {
- warn_print(": ");
- warn_print2(RSTRING_PTR(epath), RSTRING_LEN(epath));
- warn_print("\n");
- }
- else {
- char *tail = 0;
- long len = elen;
-
- if (RSTRING_PTR(epath)[0] == '#')
- epath = 0;
- if ((tail = memchr(einfo, '\n', elen)) != 0) {
- len = tail - einfo;
- tail++; /* skip newline */
- }
- warn_print(": ");
- warn_print2(einfo, len);
- if (epath) {
- warn_print(" (");
- warn_print2(RSTRING_PTR(epath), RSTRING_LEN(epath));
- warn_print(")\n");
- }
- if (tail) {
- warn_print2(tail, elen - len - 1);
- }
- }
- }
-
- if (!NIL_P(errat)) {
- long i;
- long len = RARRAY_LEN(errat);
- VALUE *ptr = RARRAY_PTR(errat);
- int skip = eclass == rb_eSysStackError;
-
-#define TRACE_MAX (TRACE_HEAD+TRACE_TAIL+5)
-#define TRACE_HEAD 8
-#define TRACE_TAIL 5
-
- for (i = 1; i < len; i++) {
- if (TYPE(ptr[i]) == T_STRING) {
- warn_printf("\tfrom %s\n", RSTRING_PTR(ptr[i]));
- }
- if (skip && i == TRACE_HEAD && len > TRACE_MAX) {
- warn_printf("\t ... %ld levels...\n",
- len - TRACE_HEAD - TRACE_TAIL);
- i = len - TRACE_TAIL;
- }
- }
- }
- error:
- POP_TAG();
-}
-
-void
-ruby_error_print(void)
-{
- error_print();
-}
-
-void
-rb_print_undef(VALUE klass, ID id, int scope)
-{
- char *v;
-
- switch (scope) {
- default:
- case NOEX_PUBLIC: v = ""; break;
- case NOEX_PRIVATE: v = " private"; break;
- case NOEX_PROTECTED: v = " protected"; break;
- }
- rb_name_error(id, "undefined%s method `%s' for %s `%s'", v,
- rb_id2name(id),
- (TYPE(klass) == T_MODULE) ? "module" : "class",
- rb_class2name(klass));
-}
-
-static int
-sysexit_status(VALUE err)
-{
- VALUE st = rb_iv_get(err, "status");
- return NUM2INT(st);
-}
-
-static int
-error_handle(int ex)
-{
- int status = EXIT_FAILURE;
- rb_thread_t *th = GET_THREAD();
-
- if (thread_set_raised(th))
- return EXIT_FAILURE;
- switch (ex & TAG_MASK) {
- case 0:
- status = EXIT_SUCCESS;
- break;
-
- case TAG_RETURN:
- error_pos();
- warn_print(": unexpected return\n");
- break;
- case TAG_NEXT:
- error_pos();
- warn_print(": unexpected next\n");
- break;
- case TAG_BREAK:
- error_pos();
- warn_print(": unexpected break\n");
- break;
- case TAG_REDO:
- error_pos();
- warn_print(": unexpected redo\n");
- break;
- case TAG_RETRY:
- error_pos();
- warn_print(": retry outside of rescue clause\n");
- break;
- case TAG_THROW:
- /* TODO: fix me */
- error_pos();
- warn_printf(": unexpected throw\n");
- break;
- case TAG_RAISE:
- case TAG_FATAL: {
- VALUE errinfo = GET_THREAD()->errinfo;
- if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
- status = sysexit_status(errinfo);
- }
- else if (rb_obj_is_instance_of(errinfo, rb_eSignal)) {
- /* no message when exiting by signal */
- }
- else {
- error_print();
- }
- break;
- }
- default:
- rb_bug("Unknown longjmp status %d", ex);
- break;
- }
- thread_reset_raised(th);
- return status;
-}
Index: insnhelper.ci
===================================================================
--- insnhelper.ci (revision 14363)
+++ insnhelper.ci (revision 14364)
@@ -1,1456 +0,0 @@
-/* -*-c-*- */
-/**********************************************************************
-
- insnhelper.ci - instruction helper functions.
-
- $Author$
- $Date$
-
- Copyright (C) 2007 Koichi Sasada
-
-**********************************************************************/
-
-/* finish iseq array */
-#include "insns.inc"
-
-/* control stack frame */
-
-
-#ifndef INLINE
-#define INLINE inline
-#endif
-
-static inline rb_control_frame_t *
-vm_push_frame(rb_thread_t *th, rb_iseq_t *iseq, VALUE type,
- VALUE self, VALUE specval, VALUE *pc,
- VALUE *sp, VALUE *lfp, int local_size)
-{
- VALUE *dfp;
- rb_control_frame_t *cfp;
- int i;
-
- /* nil initialize */
- for (i=0; i < local_size; i++) {
- *sp = Qnil;
- sp++;
- }
-
- /* set special val */
- *sp = GC_GUARDED_PTR(specval);
- dfp = sp;
-
- if (lfp == 0) {
- lfp = sp;
- }
-
- cfp = th->cfp = th->cfp - 1;
- cfp->pc = pc;
- cfp->sp = sp + 1;
- cfp->bp = sp + 1;
- cfp->iseq = iseq;
- cfp->flag = type;
- cfp->self = self;
- cfp->lfp = lfp;
- cfp->dfp = dfp;
- cfp->proc = 0;
-
-#define COLLECT_PROFILE 0
-#if COLLECT_PROFILE
- cfp->prof_time_self = clock();
- cfp->prof_time_chld = 0;
-#endif
-
- if (VMDEBUG == 2) {
- SDR();
- }
-
- return cfp;
-}
-
-static inline void
-vm_pop_frame(rb_thread_t *th)
-{
-#if COLLECT_PROFILE
- rb_control_frame_t *cfp = th->cfp;
-
- if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
- VALUE current_time = clock();
- rb_control_frame_t *cfp = th->cfp;
- cfp->prof_time_self = current_time - cfp->prof_time_self;
- (cfp+1)->prof_time_chld += cfp->prof_time_self;
-
- cfp->iseq->profile.count++;
- cfp->iseq->profile.time_cumu = cfp->prof_time_self;
- cfp->iseq->profile.time_self = cfp->prof_time_self - cfp->prof_time_chld;
- }
- else if (0 /* c method? */) {
-
- }
-#endif
- th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
-
- if (VMDEBUG == 2) {
- SDR();
- }
-}
-
-/* method dispatch */
-
-static inline int
-vm_callee_setup_arg(rb_thread_t *th, rb_iseq_t *iseq,
- int argc, VALUE *argv, rb_block_t **block)
-{
- const int m = iseq->argc;
- const int orig_argc = argc;
-
- if (LIKELY(iseq->arg_simple & 0x01)) {
- /* simple check */
- if (argc != m) {
- rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)",
- argc, m);
- }
- return 0;
- }
- else {
- VALUE * const dst = argv;
- int opt_pc = 0;
- th->mark_stack_len = argc + iseq->arg_size;
-
- /* mandatory */
- if (argc < (m + iseq->arg_post_len)) { /* check with post arg */
- rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)",
- argc, m + iseq->arg_post_len);
- }
-
- argv += m;
- argc -= m;
-
- /* post arguments */
- if (iseq->arg_post_len) {
- if (!(orig_argc < iseq->arg_post_start)) {
- VALUE *new_argv = ALLOCA_N(VALUE, argc);
- MEMCPY(new_argv, argv, VALUE, argc);
- argv = new_argv;
- }
-
- MEMCPY(&dst[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
- VALUE, iseq->arg_post_len);
- }
-
- /* opt arguments */
- if (iseq->arg_opts) {
- const int opts = iseq->arg_opts - 1 /* no opt */;
-
- if (iseq->arg_rest == -1 && argc > opts) {
- rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)",
- orig_argc, m + opts + iseq->arg_post_len);
- }
-
- if (argc > opts) {
- argc -= opts;
- argv += opts;
- opt_pc = iseq->arg_opt_table[opts]; /* no opt */
- }
- else {
- int i;
- for (i = argc; i<opts; i++) {
- dst[i + m] = Qnil;
- }
- opt_pc = iseq->arg_opt_table[argc];
- argc = 0;
- }
- }
-
- /* rest arguments */
- if (iseq->arg_rest != -1) {
- dst[iseq->arg_rest] = rb_ary_new4(argc, argv);
- argc = 0;
- }
-
- /* block arguments */
- if (block && iseq->arg_block != -1) {
- VALUE blockval = Qnil;
- rb_block_t * const blockptr = *block;
-
- if (argc != 0) {
- rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)",
- orig_argc, m + iseq->arg_post_len);
- }
-
- if (blockptr) {
- /* make Proc object */
- if (blockptr->proc == 0) {
- rb_proc_t *proc;
-
- blockval = vm_make_proc(th, th->cfp, blockptr);
-
- GetProcPtr(blockval, proc);
- *block = &proc->block;
- }
- else {
- blockval = blockptr->proc;
- }
- }
-
- dst[iseq->arg_block] = blockval; /* Proc or nil */
- }
-
- th->mark_stack_len = 0;
- return opt_pc;
- }
-}
-
-static inline int
-caller_setup_args(rb_thread_t *th, rb_control_frame_t *cfp, VALUE flag,
- int argc, rb_iseq_t *blockiseq, rb_block_t **block)
-{
- rb_block_t *blockptr = 0;
-
- if (block) {
- if (flag & VM_CALL_ARGS_BLOCKARG_BIT) {
- rb_proc_t *po;
- VALUE proc;
-
- proc = *(--cfp->sp);
-
- if (proc != Qnil) {
- if (!rb_obj_is_proc(proc)) {
- VALUE b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
- if (NIL_P(b)) {
- rb_raise(rb_eTypeError,
- "wrong argument type %s (expected Proc)",
- rb_obj_classname(proc));
- }
- proc = b;
- }
- GetProcPtr(proc, po);
- blockptr = &po->block;
- RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
- *block = blockptr;
- }
- }
- else if (blockiseq) {
- blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
- blockptr->iseq = blockiseq;
- blockptr->proc = 0;
- *block = blockptr;
- }
- }
-
- /* expand top of stack? */
- if (flag & VM_CALL_ARGS_SPLAT_BIT) {
- VALUE ary = *(cfp->sp - 1);
- VALUE *ptr;
- int i;
- VALUE tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a");
-
- if (NIL_P(tmp)) {
- /* do nothing */
- }
- else {
- int len = RARRAY_LEN(tmp);
- ptr = RARRAY_PTR(tmp);
- cfp->sp -= 1;
-
- CHECK_STACK_OVERFLOW(cfp, len);
-
- for (i = 0; i < len; i++) {
- *cfp->sp++ = ptr[i];
- }
- argc += i-1;
- }
- }
-
- return argc;
-}
-
-static inline VALUE
-call_cfunc(VALUE (*func)(), VALUE recv, int len, int argc, const VALUE *argv)
-{
- /* printf("len: %d, argc: %d\n", len, argc); */
-
- if (len >= 0 && argc != len) {
- rb_raise(rb_eArgError, "wrong number of arguments(%d for %d)",
- argc, len);
- }
-
- switch (len) {
- case -2:
- return (*func) (recv, rb_ary_new4(argc, argv));
- break;
- case -1:
- return (*func) (argc, argv, recv);
- break;
- case 0:
- return (*func) (recv);
- break;
- case 1:
- return (*func) (recv, argv[0]);
- break;
- case 2:
- return (*func) (recv, argv[0], argv[1]);
- break;
- case 3:
- return (*func) (recv, argv[0], argv[1], argv[2]);
- break;
- case 4:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3]);
- break;
- case 5:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
- break;
- case 6:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5]);
- break;
- case 7:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6]);
- break;
- case 8:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6], argv[7]);
- break;
- case 9:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6], argv[7], argv[8]);
- break;
- case 10:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6], argv[7], argv[8], argv[9]);
- break;
- case 11:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6], argv[7], argv[8], argv[9],
- argv[10]);
- break;
- case 12:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6], argv[7], argv[8], argv[9],
- argv[10], argv[11]);
- break;
- case 13:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
- argv[11], argv[12]);
- break;
- case 14:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
- argv[11], argv[12], argv[13]);
- break;
- case 15:
- return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
- argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
- argv[11], argv[12], argv[13], argv[14]);
- break;
- default:
- rb_raise(rb_eArgError, "too many arguments(%d)", len);
- break;
- }
- return Qnil; /* not reached */
-}
-
-static inline VALUE
-vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, int num,
- ID id, VALUE recv, VALUE klass, VALUE flag,
- NODE *mn, rb_block_t *blockptr)
-{
- VALUE val;
-
- EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, id, klass);
- {
- rb_control_frame_t *cfp =
- vm_push_frame(th, 0, FRAME_MAGIC_CFUNC | (flag << FRAME_MAGIC_MASK_BITS),
- recv, (VALUE) blockptr, 0, reg_cfp->sp, 0, 1);
-
- cfp->method_id = id;
- cfp->method_class = klass;
-
- reg_cfp->sp -= num + 1;
-
- val = call_cfunc(mn->nd_cfnc, recv, mn->nd_argc, num, reg_cfp->sp + 1);
-
- if (reg_cfp != th->cfp + 1) {
- rb_bug("cfp consistency error - send");
- }
- vm_pop_frame(th);
- }
- EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, id, klass);
-
- return val;
-}
-
-static int
-vm_cfunc_flags(rb_control_frame_t *cfp)
-{
- if (RUBYVM_CFUNC_FRAME_P(cfp))
- return cfp->flag >> FRAME_MAGIC_MASK_BITS;
- return 0;
-}
-
-static inline VALUE
-vm_call_bmethod(rb_thread_t *th, ID id, VALUE procval, VALUE recv,
- VALUE klass, int argc, VALUE *argv, rb_block_t *blockptr)
-{
- rb_control_frame_t *cfp = th->cfp;
- rb_proc_t *proc;
- VALUE val;
-
- /* control block frame */
- (cfp-2)->method_id = id;
- (cfp-2)->method_class = klass;
-
- GetProcPtr(procval, proc);
- val = vm_invoke_proc(th, proc, recv, argc, argv, blockptr);
- return val;
-}
-
-static inline VALUE
-vm_method_missing(rb_thread_t *th, ID id, VALUE recv, int num,
- rb_block_t *blockptr, int opt)
-{
- rb_control_frame_t *reg_cfp = th->cfp;
- VALUE *argv = STACK_ADDR_FROM_TOP(num + 1);
- VALUE val;
- argv[0] = ID2SYM(id);
- th->method_missing_reason = opt;
- th->passed_block = blockptr;
- val = rb_funcall2(recv, idMethodMissing, num + 1, argv);
- POPN(num + 1);
- return val;
-}
-
-static inline void
-vm_setup_method(rb_thread_t *th, rb_control_frame_t *cfp,
- int argc, rb_block_t *blockptr, VALUE flag,
- VALUE iseqval, VALUE recv, VALUE klass)
-{
- rb_iseq_t *iseq;
- int opt_pc, i;
- VALUE *sp, *rsp = cfp->sp - argc;
-
- /* TODO: eliminate it */
- GetISeqPtr(iseqval, iseq);
- opt_pc = vm_callee_setup_arg(th, iseq, argc, rsp, &blockptr);
-
- /* stack overflow check */
- CHECK_STACK_OVERFLOW(cfp, iseq->stack_max);
-
- sp = rsp + iseq->arg_size;
-
- if (LIKELY(!(flag & VM_CALL_TAILCALL_BIT))) {
- if (0) printf("local_size: %d, arg_size: %d\n",
- iseq->local_size, iseq->arg_size);
-
- /* clear local variables */
- for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
- *sp++ = Qnil;
- }
-
- vm_push_frame(th, iseq,
- FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
- iseq->iseq_encoded + opt_pc, sp, 0, 0);
-
- cfp->sp = rsp - 1 /* recv */;
- }
- else {
- VALUE *p_rsp;
- cfp = ++th->cfp; /* pop cf */
- p_rsp = th->cfp->sp;
-
- /* copy arguments */
- for (i=0; i < (sp - rsp); i++) {
- p_rsp[i] = rsp[i];
- }
-
- sp -= rsp - p_rsp;
-
- /* clear local variables */
- for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
- *sp++ = Qnil;
- }
-
- vm_push_frame(th, iseq,
- FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
- iseq->iseq_encoded + opt_pc, sp, 0, 0);
- }
-}
-
-static inline VALUE
-vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp,
- int num, rb_block_t *blockptr, VALUE flag,
- ID id, NODE *mn, VALUE recv, VALUE klass)
-{
- VALUE val;
-
- start_method_dispatch:
-
- if ((mn != 0)) {
- if ((mn->nd_noex == 0)) {
- /* dispatch method */
- NODE *node;
-
- normal_method_dispatch:
-
- node = mn->nd_body;
-
- switch (nd_type(node)) {
- case RUBY_VM_METHOD_NODE:{
- vm_setup_method(th, cfp, num, blockptr, flag, (VALUE)node->nd_body, recv, klass);
- return Qundef;
- }
- case NODE_CFUNC:{
- val = vm_call_cfunc(th, cfp, num, id, recv, mn->nd_clss, flag, node, blockptr);
- break;
- }
- case NODE_ATTRSET:{
- val = rb_ivar_set(recv, node->nd_vid, *(cfp->sp - 1));
- cfp->sp -= 2;
- break;
- }
- case NODE_IVAR:{
- val = rb_ivar_get(recv, node->nd_vid);
- cfp->sp -= 1;
- break;
- }
- case NODE_BMETHOD:{
- VALUE *argv = cfp->sp - num;
- val = vm_call_bmethod(th, id, node->nd_cval, recv, klass, num, argv, blockptr);
- cfp->sp += - num - 1;
- break;
- }
- case NODE_ZSUPER:{
- klass = RCLASS_SUPER(mn->nd_clss);
- mn = rb_method_node(klass, id);
-
- if (mn != 0) {
- goto normal_method_dispatch;
- }
- else {
- goto start_method_dispatch;
- }
- }
- default:{
- printf("node: %s\n", ruby_node_name(nd_type(node)));
- rb_bug("eval_invoke_method: unreachable");
- /* unreachable */
- break;
- }
- }
- }
- else {
- int noex_safe;
-
- if (!(flag & VM_CALL_FCALL_BIT) &&
- (mn->nd_noex & NOEX_MASK) & NOEX_PRIVATE) {
- int stat = NOEX_PRIVATE;
-
- if (flag & VM_CALL_VCALL_BIT) {
- stat |= NOEX_VCALL;
- }
- val = vm_method_missing(th, id, recv, num, blockptr, stat);
- }
- else if (((mn->nd_noex & NOEX_MASK) & NOEX_PROTECTED) &&
- !(flag & VM_CALL_SEND_BIT)) {
- VALUE defined_class = mn->nd_clss;
-
- if (TYPE(defined_class) == T_ICLASS) {
- defined_class = RBASIC(defined_class)->klass;
- }
-
- if (!rb_obj_is_kind_of(cfp->self, rb_class_real(defined_class))) {
- val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED);
- }
- else {
- goto normal_method_dispatch;
- }
- }
- else if ((noex_safe = NOEX_SAFE(mn->nd_noex)) > th->safe_level &&
- (noex_safe > 2)) {
- rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id));
- }
- else {
- goto normal_method_dispatch;
- }
- }
- }
- else {
- /* method missing */
- if (id == idMethodMissing) {
- rb_bug("method missing");
- }
- else {
- int stat = 0;
- if (flag & VM_CALL_VCALL_BIT) {
- stat |= NOEX_VCALL;
- }
- if (flag & VM_CALL_SUPER_BIT) {
- stat |= NOEX_SUPER;
- }
- val = vm_method_missing(th, id, recv, num, blockptr, stat);
- }
- }
-
- RUBY_VM_CHECK_INTS();
- return val;
-}
-
-static inline void
-vm_send_optimize(rb_control_frame_t *reg_cfp,
- NODE **mn, rb_num_t *flag, rb_num_t *num, ID *id, VALUE klass)
-{
- if (*mn && nd_type((*mn)->nd_body) == NODE_CFUNC) {
- NODE *node = (*mn)->nd_body;
- extern VALUE rb_f_send(int argc, VALUE *argv, VALUE recv);
-
- if (node->nd_cfnc == rb_f_send) {
- int i = *num - 1;
- VALUE sym = TOPN(i);
- *id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym);
-
- /* shift arguments */
- if (i > 0) {
- MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
- }
-
- *mn = rb_method_node(klass, *id);
- *num -= 1;
- DEC_SP(1);
- *flag |= VM_CALL_FCALL_BIT;
- }
- }
-}
-
-/* yield */
-
-static inline int
-block_proc_is_lambda(VALUE procval)
-{
- rb_proc_t *proc;
-
- if (procval) {
- GetProcPtr(procval, proc);
- return proc->is_lambda;
- }
- else {
- return 0;
- }
-}
-
-static inline VALUE
-vm_yield_with_cfunc(rb_thread_t *th, rb_block_t *block,
- VALUE self, int argc, VALUE *argv)
-{
- NODE *ifunc = (NODE *) block->iseq;
- VALUE val;
- VALUE arg;
- int lambda = block_proc_is_lambda(block->proc);
-
- if (lambda) {
- arg = rb_ary_new4(argc, argv);
- }
- else if (argc == 0) {
- arg = Qnil;
- }
- else {
- arg = argv[0];
- }
-
- vm_push_frame(th, 0, FRAME_MAGIC_IFUNC,
- self, (VALUE)block->dfp,
- 0, th->cfp->sp, block->lfp, 1);
-
- val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv);
-
- th->cfp++;
- return val;
-}
-
-static inline int
-vm_yield_setup_args(rb_thread_t *th, rb_iseq_t *iseq,
- int argc, VALUE *argv, rb_block_t *blockptr, int lambda)
-{
- if (0) { /* for debug */
- printf(" argc: %d\n", argc);
- printf("iseq argc: %d\n", iseq->argc);
- printf("iseq opts: %d\n", iseq->arg_opts);
- printf("iseq rest: %d\n", iseq->arg_rest);
- printf("iseq post: %d\n", iseq->arg_post_len);
- printf("iseq blck: %d\n", iseq->arg_block);
- printf("iseq smpl: %d\n", iseq->arg_simple);
- printf(" lambda: %s\n", lambda ? "true" : "false");
- }
-
- if (lambda) {
- /* call as method */
- return vm_callee_setup_arg(th, iseq, argc, argv, &blockptr);
- }
- else {
- int i;
- const int m = iseq->argc;
-
- th->mark_stack_len = argc;
-
- /*
- * yield [1, 2]
- * => {|a|} => a = [1, 2]
- * => {|a, b|} => a, b = [1, 2]
- */
- if (!(iseq->arg_simple & 0x02) &&
- (m + iseq->arg_post_len) > 0 &&
- argc == 1 && TYPE(argv[0]) == T_ARRAY) {
- VALUE ary = argv[0];
- th->mark_stack_len = argc = RARRAY_LEN(ary);
-
- CHECK_STACK_OVERFLOW(th->cfp, argc);
-
- MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc);
- }
-
- for (i=argc; i<m; i++) {
- argv[i] = Qnil;
- }
-
- if (iseq->arg_rest == -1) {
- if (m < argc) {
- /*
- * yield 1, 2
- * => {|a|} # truncate
- */
- th->mark_stack_len = argc = m;
- }
- }
- else {
- int r = iseq->arg_rest;
-
- if (iseq->arg_post_len) {
- int len = iseq->arg_post_len;
- int start = iseq->arg_post_start;
- int rsize = argc > m ? argc - m : 0;
- int psize = rsize;
- VALUE ary;
-
- if (psize > len) psize = len;
-
- ary = rb_ary_new4(rsize - psize, &argv[r]);
-
- if (0) {
- printf(" argc: %d\n", argc);
- printf(" len: %d\n", len);
- printf("start: %d\n", start);
- printf("rsize: %d\n", rsize);
- }
-
- /* copy post argument */
- MEMMOVE(&argv[start], &argv[r + rsize - psize], VALUE, psize);
-
- for (i=psize; i<len; i++) {
- argv[start + i] = Qnil;
- }
- argv[r] = ary;
- }
- else {
- if (argc < r) {
- /* yield 1
- * => {|a, b, *r|}
- */
- for (i=argc; i<r; i++) {
- argv[i] = Qnil;
- }
- argv[r] = rb_ary_new();
- }
- else {
- argv[r] = rb_ary_new4(argc-r, &argv[r]);
- }
- }
-
- th->mark_stack_len = iseq->arg_size;
- }
-
- /* {|&b|} */
- if (iseq->arg_block != -1) {
- VALUE procval = Qnil;
-
- if (blockptr) {
- procval = blockptr->proc;
- }
-
- argv[iseq->arg_block] = procval;
- }
-
- th->mark_stack_len = 0;
- return 0;
- }
-}
-
-static VALUE
-vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t num, rb_num_t flag)
-{
- VALUE val;
- rb_block_t *block = GET_BLOCK_PTR();
- rb_iseq_t *iseq;
- int argc = num;
-
- if (GET_ISEQ()->local_iseq->type != ISEQ_TYPE_METHOD || block == 0) {
- vm_localjump_error("no block given (yield)", Qnil, 0);
- }
- iseq = block->iseq;
-
- argc = caller_setup_args(th, GET_CFP(), flag, argc, 0, 0);
-
- if (BUILTIN_TYPE(iseq) != T_NODE) {
- int opt_pc;
- const int arg_size = iseq->arg_size;
- VALUE *rsp = GET_SP() - argc;
- SET_SP(rsp);
-
- CHECK_STACK_OVERFLOW(GET_CFP(), iseq->stack_max);
- opt_pc = vm_yield_setup_args(th, iseq, argc, rsp, 0,
- block_proc_is_lambda(block->proc));
-
- vm_push_frame(th, iseq,
- FRAME_MAGIC_BLOCK, block->self, (VALUE) block->dfp,
- iseq->iseq_encoded + opt_pc, rsp + arg_size, block->lfp,
- iseq->local_size - arg_size);
-
- return Qundef;
- }
- else {
- val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc));
- POPN(argc); /* TODO: should put before C/yield? */
- return val;
- }
-}
-
-/* cref */
-
-static NODE *
-lfp_get_special_cref(VALUE *lfp)
-{
- struct RValues *values;
- if (((VALUE)(values = (void *)lfp[-1])) != Qnil && values->basic.klass) {
- return (NODE *)values->basic.klass;
- }
- else {
- return 0;
- }
-}
-
-static struct RValues *
-new_value(void)
-{
- struct RValues *val = RVALUES(rb_newobj());
- OBJSETUP(val, 0, T_VALUES);
- val->v1 = val->v2 = val->v3 = Qnil;
- return val;
-}
-
-static struct RValues *
-lfp_svar_place(rb_thread_t *th, VALUE *lfp)
-{
- struct RValues *svar;
-
- if (th->local_lfp != lfp) {
- svar = (struct RValues *)lfp[-1];
- if ((VALUE)svar == Qnil) {
- svar = new_value();
- lfp[-1] = (VALUE)svar;
- }
- }
- else {
- svar = (struct RValues *)th->local_svar;
- if ((VALUE)svar == Qnil) {
- svar = new_value();
- th->local_svar = (VALUE)svar;
- }
- }
- return svar;
-}
-
-static VALUE
-lfp_svar_get(rb_thread_t *th, VALUE *lfp, VALUE key)
-{
- struct RValues *svar = lfp_svar_place(th, lfp);
-
- switch (key) {
- case 0:
- return svar->v1;
- case 1:
- return svar->v2;
- case 2:
- return svar->basic.klass;
- default: {
- VALUE hash = svar->v3;
-
- if (hash == Qnil) {
- return Qnil;
- }
- else {
- return rb_hash_lookup(hash, key);
- }
- }
- }
-}
-
-static void
-lfp_svar_set(rb_thread_t *th, VALUE *lfp, VALUE key, VALUE val)
-{
- struct RValues *svar = lfp_svar_place(th, lfp);
-
- switch (key) {
- case 0:
- svar->v1 = val;
- return;
- case 1:
- svar->v2 = val;
- return;
- case 2:
- svar->basic.klass = val;
- return;
- default: {
- VALUE hash = svar->v3;
-
- if (hash == Qnil) {
- svar->v3 = hash = rb_hash_new();
- }
- rb_hash_aset(hash, key, val);
- }
- }
-}
-
-static NODE *
-get_cref(rb_iseq_t *iseq, VALUE *lfp)
-{
- NODE *cref;
- if ((cref = lfp_get_special_cref(lfp)) != 0) {
- /* */
- }
- else if ((cref = iseq->cref_stack) != 0) {
- /* */
- }
- else {
- rb_bug("get_cref: unreachable");
- }
- return cref;
-}
-
-static inline VALUE
-vm_getspecial(rb_thread_t *th, VALUE *lfp, VALUE key, rb_num_t type)
-{
- VALUE val;
-
- if (type == 0) {
- if (FIXNUM_P(key)) key = FIX2INT(key);
- val = lfp_svar_get(th, lfp, key);
- }
- else {
- VALUE backref = lfp_svar_get(th, lfp, 1);
-
- if (type & 0x01) {
- switch (type >> 1) {
- case '&':
- val = rb_reg_last_match(backref);
- break;
- case '`':
- val = rb_reg_match_pre(backref);
- break;
- case '\'':
- val = rb_reg_match_post(backref);
- break;
- case '+':
- val = rb_reg_match_last(backref);
- break;
- default:
- rb_bug("unexpected back-ref");
- }
- }
- else {
- val = rb_reg_nth_match(type >> 1, backref);
- }
- }
- return val;
-}
-
-static inline VALUE
-vm_get_ev_const(rb_thread_t *th, rb_iseq_t *iseq,
- VALUE klass, ID id, int is_defined)
-{
- VALUE val;
-
- if (klass == Qnil) {
- /* in current lexical scope */
- NODE *root_cref = get_cref(iseq, th->cfp->lfp);
- NODE *cref = root_cref;
-
- while (cref && cref->nd_next) {
- klass = cref->nd_clss;
- cref = cref->nd_next;
-
- if (klass == 0) {
- continue;
- }
- if (NIL_P(klass)) {
- if (is_defined) {
- /* TODO: check */
- return 1;
- }
- else {
- klass = CLASS_OF(th->cfp->self);
- return rb_const_get(klass, id);
- }
- }
- search_continue:
- if (RCLASS_IV_TBL(klass) &&
- st_lookup(RCLASS_IV_TBL(klass), id, &val)) {
- if (val == Qundef) {
- rb_autoload_load(klass, id);
- goto search_continue;
- }
- else {
- if (is_defined) {
- return 1;
- }
- else {
- return val;
- }
- }
- }
- }
- klass = root_cref->nd_clss;
- if (is_defined) {
- return rb_const_defined(klass, id);
- }
- else {
- return rb_const_get(klass, id);
- }
- }
- else {
- switch (TYPE(klass)) {
- case T_CLASS:
- case T_MODULE:
- break;
- default:
- rb_raise(rb_eTypeError, "%s is not a class/module",
- RSTRING_PTR(rb_obj_as_string(klass)));
- }
- if (is_defined) {
- return rb_const_defined(klass, id);
- }
- else {
- return rb_const_get(klass, id);
- }
- }
-}
-
-static inline VALUE
-vm_get_cvar_base(rb_thread_t *th, rb_iseq_t *iseq)
-{
- NODE *cref = get_cref(iseq, th->cfp->lfp);
- VALUE klass = Qnil;
-
- if (cref) {
- klass = cref->nd_clss;
- if (!cref->nd_next) {
- rb_warn("class variable access from toplevel");
- }
- }
- if (NIL_P(klass)) {
- rb_raise(rb_eTypeError, "no class variables available");
- }
- return klass;
-}
-
-static inline void
-vm_define_method(rb_thread_t *th, VALUE obj,
- ID id, rb_iseq_t *miseq, rb_num_t is_singleton, NODE *cref)
-{
- NODE *newbody;
- int noex = cref->nd_visi;
- VALUE klass = cref->nd_clss;
-
- if (is_singleton) {
- if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
- rb_raise(rb_eTypeError,
- "can't define singleton method \"%s\" for %s",
- rb_id2name(id), rb_obj_classname(obj));
- }
-
- if (OBJ_FROZEN(obj)) {
- rb_error_frozen("object");
- }
-
- klass = rb_singleton_class(obj);
- noex = NOEX_PUBLIC;
- }
-
- /* dup */
- COPY_CREF(miseq->cref_stack, cref);
- miseq->klass = klass;
- miseq->defined_method_id = id;
- newbody = NEW_NODE(RUBY_VM_METHOD_NODE, 0, miseq->self, 0);
- rb_add_method(klass, id, newbody, noex);
-
- if (!is_singleton && noex == NOEX_MODFUNC) {
- rb_add_method(rb_singleton_class(klass), id, newbody, NOEX_PUBLIC);
- }
- INC_VM_STATE_VERSION();
-}
-
-static inline NODE *
-vm_method_search(VALUE id, VALUE klass, IC ic)
-{
- NODE *mn;
-
-#if OPT_INLINE_METHOD_CACHE
- {
- if (LIKELY(klass == ic->ic_class) &&
- LIKELY(GET_VM_STATE_VERSION() == ic->ic_vmstat)) {
- mn = ic->ic_method;
- }
- else {
- mn = rb_method_node(klass, id);
- ic->ic_class = klass;
- ic->ic_method = mn;
- ic->ic_vmstat = GET_VM_STATE_VERSION();
- }
- }
-#else
- mn = rb_method_node(klass, id);
-#endif
- return mn;
-}
-
-static inline VALUE
-vm_search_normal_superclass(VALUE klass, VALUE recv)
-{
- if (BUILTIN_TYPE(klass) == T_CLASS) {
- klass = RCLASS_SUPER(klass);
- }
- else if (BUILTIN_TYPE(klass) == T_MODULE) {
- VALUE k = CLASS_OF(recv);
- while (k) {
- if (BUILTIN_TYPE(k) == T_ICLASS && RBASIC(k)->klass == klass) {
- klass = RCLASS_SUPER(k);
- break;
- }
- k = RCLASS_SUPER(k);
- }
- }
- return klass;
-}
-
-static void
-vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *ip, VALUE recv, VALUE sigval, ID *idp, VALUE *klassp)
-{
- ID id;
- VALUE klass;
-
- while (ip && !ip->klass) {
- ip = ip->parent_iseq;
- }
-
- if (ip == 0) {
- rb_raise(rb_eNoMethodError, "super called outside of method");
- }
-
- id = ip->defined_method_id;
-
- if (ip != ip->local_iseq) {
- /* defined by Module#define_method() */
- rb_control_frame_t *lcfp = GET_CFP();
-
- while (lcfp->iseq != ip) {
- VALUE *tdfp = GET_PREV_DFP(lcfp->dfp);
- while (1) {
- lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
- if (lcfp->dfp == tdfp) {
- break;
- }
- }
- }
-
- id = lcfp->method_id;
- klass = vm_search_normal_superclass(lcfp->method_class, recv);
-
- if (sigval == Qfalse) {
- /* zsuper */
- rb_raise(rb_eRuntimeError, "implicit argument passing of super from method defined by define_method() is not supported. Specify all arguments explicitly.");
- }
- }
- else {
- klass = vm_search_normal_superclass(ip->klass, recv);
- }
-
- *idp = id;
- *klassp = klass;
-}
-
-static VALUE
-vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
-{
- rb_num_t state = throw_state & 0xff;
- rb_num_t flag = throw_state & 0x8000;
- rb_num_t level = throw_state >> 16;
-
- if (state != 0) {
- VALUE *pt;
- int i;
- if (flag != 0) {
- if (throw_state & 0x4000) {
- pt = (void *)1;
- }
- else {
- pt = 0;
- }
- }
- else {
- if (state == TAG_BREAK) {
- rb_control_frame_t *cfp = GET_CFP();
- VALUE *dfp = GET_DFP();
- int is_orphan = 1;
- rb_iseq_t *base_iseq = GET_ISEQ();
-
- search_parent:
- if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
- dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
- base_iseq = base_iseq->parent_iseq;
-
- while ((VALUE *) cfp < th->stack + th->stack_size) {
- if (cfp->dfp == dfp) {
- goto search_parent;
- }
- cfp++;
- }
- rb_bug("VM (throw): can't find break base.");
- }
-
- if (VM_FRAME_TYPE(cfp) == FRAME_MAGIC_LAMBDA) {
- /* lambda{... break ...} */
- is_orphan = 0;
- pt = dfp;
- }
- else {
- dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
-
- while ((VALUE *)cfp < th->stack + th->stack_size) {
- if (cfp->dfp == dfp) {
- VALUE epc = epc = cfp->pc - cfp->iseq->iseq_encoded;
- rb_iseq_t *iseq = cfp->iseq;
- int i;
-
- for (i=0; i<iseq->catch_table_size; i++) {
- struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
-
- if (entry->type == CATCH_TYPE_BREAK &&
- entry->start < epc && entry->end >= epc) {
- if (entry->cont == epc) {
- goto found;
- }
- else {
- break;
- }
- }
- }
- break;
-
- found:
- pt = dfp;
- is_orphan = 0;
- break;
- }
- cfp++;
- }
- }
-
- if (is_orphan) {
- vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
- }
- }
- else if (state == TAG_RETRY) {
- pt = GC_GUARDED_PTR_REF((VALUE *) * GET_DFP());
- for (i = 0; i < level; i++) {
- pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
- }
- }
- else if (state == TAG_RETURN) {
- rb_control_frame_t *cfp = GET_CFP();
- VALUE *dfp = GET_DFP();
- int is_orphan = 1;
-
- /**
- * check orphan:
- */
- while ((VALUE *) cfp < th->stack + th->stack_size) {
- if (GET_DFP() == dfp) {
- if (VM_FRAME_TYPE(cfp) == FRAME_MAGIC_LAMBDA) {
- /* in lambda */
- is_orphan = 0;
- break;
- }
- }
- if (GET_LFP() == cfp->lfp &&
- cfp->iseq->type == ISEQ_TYPE_METHOD) {
- is_orphan = 0;
- break;
- }
- cfp++;
- }
-
- if (is_orphan) {
- vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
- }
-
- pt = GET_LFP();
- }
- else {
- rb_bug("isns(throw): unsupport throw type");
- }
- }
- th->state = state;
- return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
- }
- else {
- /* continue throw */
- VALUE err = throwobj;
-
- if (FIXNUM_P(err)) {
- th->state = FIX2INT(err);
- }
- else if (SYMBOL_P(err)) {
- th->state = TAG_THROW;
- }
- else if (BUILTIN_TYPE(err) == T_NODE) {
- th->state = GET_THROWOBJ_STATE(err);
- }
- else {
- th->state = TAG_RAISE;
- /*th->state = FIX2INT(rb_ivar_get(err, idThrowState));*/
- }
- return err;
- }
-}
-
-static inline void
-vm_expandarray(rb_control_frame_t *cfp, VALUE ary, int num, int flag)
-{
- int is_splat = flag & 0x01;
- int space_size = num + is_splat;
- VALUE *base = cfp->sp, *ptr;
- volatile VALUE tmp_ary;
- int len;
-
- if (TYPE(ary) != T_ARRAY) {
- ary = rb_ary_to_ary(ary);
- }
-
- cfp->sp += space_size;
-
- tmp_ary = ary;
- ptr = RARRAY_PTR(ary);
- len = RARRAY_LEN(ary);
-
- if (flag & 0x02) {
- /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
- int i = 0, j;
-
- if (len < num) {
- for (i=0; i<num-len; i++) {
- *base++ = Qnil;
- }
- }
- for (j=0; i<num; i++, j++) {
- VALUE v = ptr[len - j - 1];
- *base++ = v;
- }
- if (is_splat) {
- *base = rb_ary_new4(len - j, ptr);
- }
- }
- else {
- /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
- int i;
- VALUE *bptr = &base[space_size - 1];
-
- for (i=0; i<num; i++) {
- if (len <= i) {
- for (; i<num; i++) {
- *bptr-- = Qnil;
- }
- break;
- }
- *bptr-- = ptr[i];
- }
- if (is_splat) {
- if (num > len) {
- *bptr = rb_ary_new();
- }
- else {
- *bptr = rb_ary_new4(len - num, ptr + num);
- }
- }
- }
-}
-
-static inline int
-check_cfunc(NODE *mn, void *func)
-{
- if (mn && nd_type(mn->nd_body) == NODE_CFUNC &&
- mn->nd_body->nd_cfnc == func) {
- return 1;
- }
- else {
- return 0;
- }
-}
-
-static VALUE
-opt_eq_func(VALUE recv, VALUE obj, IC ic)
-{
- VALUE val = Qundef;
-
- if (FIXNUM_2_P(recv, obj) &&
- BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
- long a = FIX2LONG(recv), b = FIX2LONG(obj);
-
- if (a == b) {
- val = Qtrue;
- }
- else {
- val = Qfalse;
- }
- }
- else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
- if (HEAP_CLASS_OF(recv) == rb_cFloat &&
- HEAP_CLASS_OF(obj) == rb_cFloat &&
- BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
- double a = RFLOAT_VALUE(recv);
- double b = RFLOAT_VALUE(obj);
-
- if (isnan(a) || isnan(b)) {
- val = Qfalse;
- }
- else if (a == b) {
- val = Qtrue;
- }
- else {
- val = Qfalse;
- }
- }
- else if (HEAP_CLASS_OF(recv) == rb_cString &&
- HEAP_CLASS_OF(obj) == rb_cString &&
- BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
- val = rb_str_equal(recv, obj);
- }
- else {
- NODE *mn = vm_method_search(idEq, CLASS_OF(recv), ic);
- extern VALUE rb_obj_equal(VALUE obj1, VALUE obj2);
-
- if (check_cfunc(mn, rb_obj_equal)) {
- return recv == obj ? Qtrue : Qfalse;
- }
- }
- }
-
- return val;
-}
Index: thread_win32.c
===================================================================
--- thread_win32.c (revision 0)
+++ thread_win32.c (revision 14364)
@@ -0,0 +1,508 @@
+/* -*-c-*- */
+/**********************************************************************
+
+ thread_win32.c -
+
+ $Author$
+ $Date$
+
+ Copyright (C) 2004-2007 Koichi Sasada
+
+**********************************************************************/
+
+#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
+
+#include <process.h>
+
+#define WIN32_WAIT_TIMEOUT 10 /* 10 ms */
+#undef Sleep
+
+#define native_thread_yield() Sleep(0)
+#define remove_signal_thread_list(th)
+
+static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
+
+static rb_thread_t *
+ruby_thread_from_native(void)
+{
+ return TlsGetValue(ruby_native_thread_key);
+}
+
+static int
+ruby_thread_set_native(rb_thread_t *th)
+{
+ return TlsSetValue(ruby_native_thread_key, th);
+}
+
+static void
+Init_native_thread(void)
+{
+ rb_thread_t *th = GET_THREAD();
+
+ ruby_native_thread_key = TlsAlloc();
+ DuplicateHandle(GetCurrentProcess(),
+ GetCurrentThread(),
+ GetCurrentProcess(),
+ &th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
+
+ th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
+
+ thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
+ th, GET_THREAD()->thread_id,
+ th->native_thread_data.interrupt_event);
+}
+
+static void
+w32_error(void)
+{
+ LPVOID lpMsgBuf;
+ FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ GetLastError(),
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPTSTR) & lpMsgBuf, 0, NULL);
+ rb_bug("%s", lpMsgBuf);
+}
+
+static void
+w32_set_event(HANDLE handle)
+{
+ if (SetEvent(handle) == 0) {
+ w32_error();
+ }
+}
+
+static void
+w32_reset_event(HANDLE handle)
+{
+ if (ResetEvent(handle) == 0) {
+ w32_error();
+ }
+}
+
+static int
+w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
+{
+ HANDLE *targets = events;
+ HANDLE intr;
+ DWORD ret;
+
+ thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
+ events, count, timeout, th);
+ if (th && (intr = th->native_thread_data.interrupt_event)) {
+ w32_reset_event(intr);
+ if (th->interrupt_flag) {
+ w32_set_event(intr);
+ }
+
+ targets = ALLOCA_N(HANDLE, count + 1);
+ memcpy(targets, events, sizeof(HANDLE) * count);
+
+ targets[count++] = intr;
+ thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
+ }
+
+ thread_debug(" WaitForMultipleObjects start (count: %d)\n", count);
+ ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
+ thread_debug(" WaitForMultipleObjects end (ret: %d)\n", ret);
+
+ if (ret == WAIT_OBJECT_0 + count - 1 && th) {
+ errno = EINTR;
+ }
+ if (ret == -1 && THREAD_DEBUG) {
+ int i;
+ DWORD dmy;
+ for (i = 0; i < count; i++) {
+ thread_debug(" * error handle %d - %s\n", i,
+ GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
+ }
+ }
+ return ret;
+}
+
+static void ubf_handle(void *ptr);
+#define ubf_select ubf_handle
+
+int
+rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
+{
+ return w32_wait_events(events, num, timeout, GET_THREAD());
+}
+
+int
+rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
+{
+ int ret;
+
+ BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout),
+ ubf_handle, GET_THREAD());
+ return ret;
+}
+
+static void
+w32_close_handle(HANDLE handle)
+{
+ if (CloseHandle(handle) == 0) {
+ w32_error();
+ }
+}
+
+static void
+w32_resume_thread(HANDLE handle)
+{
+ if (ResumeThread(handle) == -1) {
+ w32_error();
+ }
+}
+
+#ifdef _MSC_VER
+#define HAVE__BEGINTHREADEX 1
+#else
+#undef HAVE__BEGINTHREADEX
+#endif
+
+#ifdef HAVE__BEGINTHREADEX
+#define start_thread (HANDLE)_beginthreadex
+typedef unsigned long (_stdcall *w32_thread_start_func)(void*);
+#else
+#define start_thread CreateThread
+typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
+#endif
+
+static HANDLE
+w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
+{
+ return start_thread(0, stack_size, func, val, CREATE_SUSPENDED, 0);
+}
+
+int
+rb_w32_sleep(unsigned long msec)
+{
+ return w32_wait_events(0, 0, msec, GET_THREAD());
+}
+
+int WINAPI
+rb_w32_Sleep(unsigned long msec)
+{
+ int ret;
+
+ BLOCKING_REGION(ret = rb_w32_sleep(msec),
+ ubf_handle, GET_THREAD());
+ return ret;
+}
+
+static void
+native_sleep(rb_thread_t *th, struct timeval *tv)
+{
+ DWORD msec;
+ if (tv) {
+ msec = tv->tv_sec * 1000 + tv->tv_usec / 1000;
+ }
+ else {
+ msec = INFINITE;
+ }
+
+ GVL_UNLOCK_BEGIN();
+ {
+ DWORD ret;
+ int status = th->status;
+ th->status = THREAD_STOPPED;
+ th->unblock_function = ubf_handle;
+ th->unblock_function_arg = th;
+ thread_debug("native_sleep start (%d)\n", (int)msec);
+ ret = w32_wait_events(0, 0, msec, th);
+ thread_debug("native_sleep done (%d)\n", ret);
+ th->unblock_function = 0;
+ th->unblock_function_arg = 0;
+ th->status = status;
+ }
+ GVL_UNLOCK_END();
+}
+
+int
+native_mutex_lock(rb_thread_lock_t *lock)
+{
+#if USE_WIN32_MUTEX
+ DWORD result;
+ while (1) {
+ thread_debug("native_mutex_lock: %p\n", *lock);
+ result = w32_wait_events(&*lock, 1, INFINITE, 0);
+ switch (result) {
+ case WAIT_OBJECT_0:
+ /* get mutex object */
+ thread_debug("acquire mutex: %p\n", *lock);
+ return 0;
+ case WAIT_OBJECT_0 + 1:
+ /* interrupt */
+ errno = EINTR;
+ thread_debug("acquire mutex interrupted: %p\n", *lock);
+ return 0;
+ case WAIT_TIMEOUT:
+ thread_debug("timeout mutex: %p\n", *lock);
+ break;
+ case WAIT_ABANDONED:
+ rb_bug("win32_mutex_lock: WAIT_ABANDONED");
+ break;
+ default:
+ rb_bug("win32_mutex_lock: unknown result (%d)", result);
+ break;
+ }
+ }
+ return 0;
+#else
+ EnterCriticalSection(lock);
+ return 0;
+#endif
+}
+
+int
+native_mutex_unlock(rb_thread_lock_t *lock)
+{
+#if USE_WIN32_MUTEX
+ thread_debug("release mutex: %p\n", *lock);
+ return ReleaseMutex(*lock);
+#else
+ LeaveCriticalSection(lock);
+ return 0;
+#endif
+}
+
+int
+native_mutex_trylock(rb_thread_lock_t *lock)
+{
+#if USE_WIN32_MUTEX
+ int result;
+ thread_debug("native_mutex_trylock: %p\n", *lock);
+ result = w32_wait_events(&*lock, 1, 1, 0);
+ thread_debug("native_mutex_trylock result: %d\n", result);
+ switch (result) {
+ case WAIT_OBJECT_0:
+ return 0;
+ case WAIT_TIMEOUT:
+ return EBUSY;
+ }
+ return EINVAL;
+#else
+ return TryEnterCriticalSection(lock) == 0;
+#endif
+}
+
+void
+native_mutex_initialize(rb_thread_lock_t *lock)
+{
+#if USE_WIN32_MUTEX
+ *lock = CreateMutex(NULL, FALSE, NULL);
+ if (*lock == NULL) {
+ w32_error();
+ }
+ /* thread_debug("initialize mutex: %p\n", *lock); */
+#else
+ InitializeCriticalSection(lock);
+#endif
+}
+
+void
+native_mutex_destroy(rb_thread_lock_t *lock)
+{
+#if USE_WIN32_MUTEX
+ w32_close_handle(lock);
+#else
+ DeleteCriticalSection(lock);
+#endif
+}
+
+struct cond_event_entry {
+ struct cond_event_entry* next;
+ HANDLE event;
+};
+
+struct rb_thread_cond_struct {
+ struct cond_event_entry *next;
+ struct cond_event_entry *last;
+};
+
+void
+native_cond_signal(rb_thread_cond_t *cond)
+{
+ /* cond is guarded by mutex */
+ struct cond_event_entry *e = cond->next;
+
+ if (e) {
+ cond->next = e->next;
+ SetEvent(e->event);
+ }
+ else {
+ rb_bug("native_cond_signal: no pending threads");
+ }
+}
+
+void
+native_cond_broadcast(rb_thread_cond_t *cond)
+{
+ /* cond is guarded by mutex */
+ struct cond_event_entry *e = cond->next;
+ cond->next = 0;
+
+ while (e) {
+ SetEvent(e->event);
+ e = e->next;
+ }
+}
+
+void
+native_cond_wait(rb_thread_cond_t *cond, rb_thread_lock_t *mutex)
+{
+ DWORD r;
+ struct cond_event_entry entry;
+
+ entry.next = 0;
+ entry.event = CreateEvent(0, FALSE, FALSE, 0);
+
+ /* cond is guarded by mutex */
+ if (cond->next) {
+ cond->last->next = &entry;
+ cond->last = &entry;
+ }
+ else {
+ cond->next = &entry;
+ cond->last = &entry;
+ }
+
+ native_mutex_unlock(mutex);
+ {
+ r = WaitForSingleObject(entry.event, INFINITE);
+ if (r != WAIT_OBJECT_0) {
+ rb_bug("native_cond_wait: WaitForSingleObject returns %d", r);
+ }
+ }
+ native_mutex_lock(mutex);
+
+ w32_close_handle(entry.event);
+}
+
+void
+native_cond_initialize(rb_thread_cond_t *cond)
+{
+ cond->next = 0;
+ cond->last = 0;
+}
+
+void
+native_cond_destroy(rb_thread_cond_t *cond)
+{
+ /* */
+}
+
+static void
+native_thread_destroy(rb_thread_t *th)
+{
+ HANDLE intr = th->native_thread_data.interrupt_event;
+ thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
+ th->native_thread_data.interrupt_event = 0;
+ w32_close_handle(intr);
+}
+
+static unsigned long _stdcall
+thread_start_func_1(void *th_ptr)
+{
+ rb_thread_t *th = th_ptr;
+ VALUE stack_start;
+ volatile HANDLE thread_id = th->thread_id;
+
+ th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
+
+ /* run */
+ thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
+ th->thread_id, th->native_thread_data.interrupt_event);
+ thread_start_func_2(th, &stack_start, 0);
+
+ w32_close_handle(thread_id);
+ thread_debug("thread deleted (th: %p)\n", th);
+ return 0;
+}
+
+extern size_t rb_gc_stack_maxsize;
+
+static int
+native_thread_create(rb_thread_t *th)
+{
+ size_t stack_size = 4 * 1024; /* 4KB */
+ th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
+
+ th->machine_stack_maxsize = rb_gc_stack_maxsize; /* not tested. */
+
+ if ((th->thread_id) == 0) {
+ st_delete_wrap(th->vm->living_threads, th->self);
+ rb_raise(rb_eThreadError, "can't create Thread (%d)", errno);
+ }
+
+ w32_resume_thread(th->thread_id);
+
+ if (THREAD_DEBUG) {
+ Sleep(0);
+ thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %d\n",
+ th, th->thread_id,
+ th->native_thread_data.interrupt_event, stack_size);
+ }
+ return 0;
+}
+
+static void
+native_thread_join(HANDLE th)
+{
+ w32_wait_events(&th, 1, 0, 0);
+}
+
+static void
+native_thread_apply_priority(rb_thread_t *th)
+{
+ int priority = th->priority;
+ if (th->priority > 0) {
+ priority = THREAD_PRIORITY_ABOVE_NORMAL;
+ }
+ else if (th->priority < 0) {
+ priority = THREAD_PRIORITY_BELOW_NORMAL;
+ }
+ else {
+ priority = THREAD_PRIORITY_NORMAL;
+ }
+
+ SetThreadPriority(th->thread_id, priority);
+}
+
+static void
+ubf_handle(void *ptr)
+{
+ rb_thread_t *th = (rb_thread_t *)ptr;
+ thread_debug("ubf_handle: %p\n", th);
+ w32_set_event(th->native_thread_data.interrupt_event);
+}
+
+static void timer_thread_function(void);
+
+static HANDLE timer_thread_id = 0;
+
+static unsigned long _stdcall
+timer_thread_func(void *dummy)
+{
+ thread_debug("timer_thread\n");
+ while (system_working) {
+ Sleep(WIN32_WAIT_TIMEOUT);
+ timer_thread_function();
+ }
+ thread_debug("timer killed\n");
+ return 0;
+}
+
+void
+rb_thread_create_timer_thread(void)
+{
+ if (timer_thread_id == 0) {
+ timer_thread_id = w32_create_thread(1024, timer_thread_func, 0);
+ w32_resume_thread(timer_thread_id);
+ }
+}
+
+#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
Property changes on: thread_win32.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
Index: ChangeLog
===================================================================
--- ChangeLog (revision 14363)
+++ ChangeLog (revision 14364)
@@ -1,3 +1,9 @@
+Thu Dec 20 18:17:14 2007 Koichi Sasada <ko1@a...>
+
+ * common.mk, *.ci: renamed to *.c.
+
+ * eval_laod.c: renamed to load.c.
+
Thu Dec 20 17:36:01 2007 Eric Hodel <drbrain@s...>
* lib/rubygems*: Import RubyGems 1.0.0, r1575.
Index: thread_pthread.c
===================================================================
--- thread_pthread.c (revision 0)
+++ thread_pthread.c (revision 14364)
@@ -0,0 +1,581 @@
+/* -*-c-*- */
+/**********************************************************************
+
+ thread_pthread.c -
+
+ $Author$
+ $Date$
+
+ Copyright (C) 2004-2007 Koichi Sasada
+
+**********************************************************************/
+
+#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
+
+void
+native_mutex_lock(pthread_mutex_t *lock)
+{
+ int r;
+ if ((r = pthread_mutex_lock(lock)) != 0) {
+ rb_bug("pthread_mutex_lock: %d", r);
+ }
+}
+
+void
+native_mutex_unlock(pthread_mutex_t *lock)
+{
+ int r;
+ if ((r = pthread_mutex_unlock(lock)) != 0) {
+ rb_bug("native_mutex_unlock return non-zero: %d", r);
+ }
+}
+
+inline int
+native_mutex_trylock(pthread_mutex_t *lock)
+{
+ int r;
+ if ((r = pthread_mutex_trylock(lock)) != 0) {
+ if (r == EBUSY) {
+ return EBUSY;
+ }
+ else {
+ rb_bug("native_mutex_trylock return non-zero: %d", r);
+ }
+ }
+ return 0;
+}
+
+void
+native_mutex_initialize(pthread_mutex_t *lock)
+{
+ int r = pthread_mutex_init(lock, 0);
+ if (r != 0) {
+ rb_bug("native_mutex_initialize return non-zero: %d", r);
+ }
+}
+
+void
+native_mutex_destroy(pthread_mutex_t *lock)
+{
+ int r = pthread_mutex_destroy(lock);
+ if (r != 0) {
+ rb_bug("native_mutex_destroy return non-zero: %d", r);
+ }
+}
+
+void
+native_cond_initialize(pthread_cond_t *cond)
+{
+ int r = pthread_cond_init(cond, 0);
+ if (r != 0) {
+ rb_bug("native_cond_initialize return non-zero: %d", r);
+ }
+}
+
+void
+native_cond_destroy(pthread_cond_t *cond)
+{
+ int r = pthread_cond_destroy(cond);
+ if (r != 0) {
+ rb_bug("native_cond_destroy return non-zero: %d", r);
+ }
+}
+
+void
+native_cond_signal(pthread_cond_t *cond)
+{
+ pthread_cond_signal(cond);
+}
+
+void
+native_cond_broadcast(pthread_cond_t *cond)
+{
+ pthread_cond_broadcast(cond);
+}
+
+void
+native_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+{
+ pthread_cond_wait(cond, mutex);
+}
+
+
+#define native_cleanup_push pthread_cleanup_push
+#define native_cleanup_pop pthread_cleanup_pop
+#define native_thread_yield() sched_yield()
+
+static void add_signal_thread_list(rb_thread_t *th);
+static void remove_signal_thread_list(rb_thread_t *th);
+
+static rb_thread_lock_t signal_thread_list_lock;
+
+static pthread_key_t ruby_native_thread_key;
+
+static void
+null_func()
+{
+ /* null */
+}
+
+static rb_thread_t *
+ruby_thread_from_native(void)
+{
+ return pthread_getspecific(ruby_native_thread_key);
+}
+
+static int
+ruby_thread_set_native(rb_thread_t *th)
+{
+ return pthread_setspecific(ruby_native_thread_key, th) == 0;
+}
+
+static void
+Init_native_thread(void)
+{
+ rb_thread_t *th = GET_THREAD();
+
+ pthread_key_create(&ruby_native_thread_key, NULL);
+ th->thread_id = pthread_self();
+ ruby_thread_set_native(th);
+ native_mutex_initialize(&signal_thread_list_lock);
+ posix_signal(SIGVTALRM, null_func);
+}
+
+static void
+native_thread_destroy(rb_thread_t *th)
+{
+ pthread_cond_destroy(&th->native_thread_data.sleep_cond);
+}
+
+#define USE_THREAD_CACHE 0
+
+static void *
+thread_start_func_1(void *th_ptr)
+{
+#if USE_THREAD_CACHE
+ thread_start:
+#endif
+ {
+ rb_thread_t *th = th_ptr;
+ VALUE stack_start;
+
+ /* run */
+ thread_start_func_2(th, &stack_start, rb_ia64_bsp());
+ }
+#if USE_THREAD_CACHE
+ if (1) {
+ /* cache thread */
+ rb_thread_t *th;
+ static rb_thread_t *register_cached_thread_and_wait(void);
+ if ((th = register_cached_thread_and_wait()) != 0) {
+ th_ptr = (void *)th;
+ th->thread_id = pthread_self();
+ goto thread_start;
+ }
+ }
+#endif
+ return 0;
+}
+
+void rb_thread_create_control_thread(void);
+
+struct cached_thread_entry {
+ volatile rb_thread_t **th_area;
+ pthread_cond_t *cond;
+ struct cached_thread_entry *next;
+};
+
+
+#if USE_THREAD_CACHE
+static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
+struct cached_thread_entry *cached_thread_root;
+
+static rb_thread_t *
+register_cached_thread_and_wait(void)
+{
+ pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+ volatile rb_thread_t *th_area = 0;
+ struct cached_thread_entry *entry =
+ (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
+
+ struct timeval tv;
+ struct timespec ts;
+ gettimeofday(&tv, 0);
+ ts.tv_sec = tv.tv_sec + 60;
+ ts.tv_nsec = tv.tv_usec * 1000;
+
+ pthread_mutex_lock(&thread_cache_lock);
+ {
+ entry->th_area = &th_area;
+ entry->cond = &cond;
+ entry->next = cached_thread_root;
+ cached_thread_root = entry;
+
+ pthread_cond_timedwait(&cond, &thread_cache_lock, &ts);
+
+ {
+ struct cached_thread_entry *e = cached_thread_root;
+ struct cached_thread_entry *prev = cached_thread_root;
+
+ while (e) {
+ if (e == entry) {
+ if (prev == cached_thread_root) {
+ cached_thread_root = e->next;
+ }
+ else {
+ prev->next = e->next;
+ }
+ break;
+ }
+ prev = e;
+ e = e->next;
+ }
+ }
+
+ free(entry);
+ pthread_cond_destroy(&cond);
+ }
+ pthread_mutex_unlock(&thread_cache_lock);
+
+ return (rb_thread_t *)th_area;
+}
+#endif
+
+static int
+use_cached_thread(rb_thread_t *th)
+{
+ int result = 0;
+#if USE_THREAD_CACHE
+ struct cached_thread_entry *entry;
+
+ if (cached_thread_root) {
+ pthread_mutex_lock(&thread_cache_lock);
+ entry = cached_thread_root;
+ {
+ if (cached_thread_root) {
+ cached_thread_root = entry->next;
+ *entry->th_area = th;
+ result = 1;
+ }
+ }
+ if (result) {
+ pthread_cond_signal(entry->cond);
+ }
+ pthread_mutex_unlock(&thread_cache_lock);
+ }
+#endif
+ return result;
+}
+
+#define CHECK_ERR(expr) \
+ { int err; if ((err = (expr)) != 0) { rb_bug("err: %d - %s", err, #expr); }}
+
+static int
+native_thread_create(rb_thread_t *th)
+{
+ int err = 0;
+
+ if (use_cached_thread(th)) {
+ thread_debug("create (use cached thread): %p\n", th);
+ }
+ else {
+ pthread_attr_t attr;
+ size_t stack_size = 512 * 1024; /* 512KB */
+ size_t space;
+
+#ifdef PTHREAD_STACK_MIN
+ if (stack_size < PTHREAD_STACK_MIN) {
+ stack_size = PTHREAD_STACK_MIN * 2;
+ }
+#endif
+ space = stack_size/5;
+ if (space > 1024*1024) space = 1024*1024;
+ th->machine_stack_maxsize = stack_size - space;
+#ifdef __ia64
+ th->machine_stack_maxsize /= 2;
+ th->machine_register_stack_maxsize = th->machine_stack_maxsize;
+#endif
+
+ CHECK_ERR(pthread_attr_init(&attr));
+
+#ifdef PTHREAD_STACK_MIN
+ thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
+ CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
+#endif
+
+ CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
+ CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
+
+ err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
+ thread_debug("create: %p (%d)", th, err);
+ CHECK_ERR(pthread_attr_destroy(&attr));
+
+ if (!err) {
+ pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
+ }
+ else {
+ st_delete_wrap(th->vm->living_threads, th->self);
+ th->status = THREAD_KILLED;
+ rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
+ }
+ }
+ return err;
+}
+
+static void
+native_thread_join(pthread_t th)
+{
+ int err = pthread_join(th, 0);
+ if (err) {
+ rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
+ }
+}
+
+static void
+native_thread_apply_priority(rb_thread_t *th)
+{
+ struct sched_param sp;
+ int policy;
+ int priority = 0 - th->priority;
+ int max, min;
+ pthread_getschedparam(th->thread_id, &policy, &sp);
+ max = sched_get_priority_max(policy);
+ min = sched_get_priority_min(policy);
+
+ if (min < priority) {
+ priority = max;
+ }
+ else if (max > priority) {
+ priority = min;
+ }
+
+ sp.sched_priority = priority;
+ pthread_setschedparam(th->thread_id, policy, &sp);
+}
+
+static void
+ubf_pthread_cond_signal(void *ptr)
+{
+ rb_thread_t *th = (rb_thread_t *)ptr;
+ thread_debug("ubf_pthread_cond_signal (%p)\n", th);
+ pthread_cond_signal(&th->native_thread_data.sleep_cond);
+}
+
+#ifndef __CYGWIN__
+static void
+ubf_select_each(rb_thread_t *th)
+{
+ thread_debug("ubf_select_each (%p)\n", (void *)th->thread_id);
+ if (th) {
+ pthread_kill(th->thread_id, SIGVTALRM);
+ }
+}
+
+static void
+ubf_select(void *ptr)
+{
+ rb_thread_t *th = (rb_thread_t *)ptr;
+ add_signal_thread_list(th);
+ ubf_select_each(th);
+}
+#else
+#define ubf_select 0
+#endif
+
+static void
+native_sleep(rb_thread_t *th, struct timeval *tv)
+{
+ int prev_status = th->status;
+ struct timespec ts;
+ struct timeval tvn;
+
+ if (tv) {
+ gettimeofday(&tvn, NULL);
+ ts.tv_sec = tvn.tv_sec + tv->tv_sec;
+ ts.tv_nsec = (tvn.tv_usec + tv->tv_usec) * 1000;
+ if (ts.tv_nsec >= 1000000000){
+ ts.tv_sec += 1;
+ ts.tv_nsec -= 1000000000;
+ }
+ }
+
+ th->status = THREAD_STOPPED;
+ pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
+
+ thread_debug("native_sleep %ld\n", tv ? tv->tv_sec : -1);
+ GVL_UNLOCK_BEGIN();
+ {
+ pthread_mutex_lock(&th->interrupt_lock);
+
+ if (th->interrupt_flag) {
+ /* interrupted. return immediate */
+ thread_debug("native_sleep: interrupted before sleep\n");
+ }
+ else {
+ th->unblock_function = ubf_pthread_cond_signal;
+ th->unblock_function_arg = th;
+
+ if (tv == 0) {
+ thread_debug("native_sleep: pthread_cond_wait start\n");
+ pthread_cond_wait(&th->native_thread_data.sleep_cond,
+ &th->interrupt_lock);
+ thread_debug("native_sleep: pthread_cond_wait end\n");
+ }
+ else {
+ int r;
+ thread_debug("native_sleep: pthread_cond_timedwait start (%ld, %ld)\n",
+ (unsigned long)ts.tv_sec, ts.tv_nsec);
+ r = pthread_cond_timedwait(&th->native_thread_data.sleep_cond,
+ &th->interrupt_lock, &ts);
+ thread_debug("native_sleep: pthread_cond_timedwait end (%d)\n", r);
+ }
+ th->unblock_function = 0;
+ th->unblock_function_arg = 0;
+ }
+ pthread_mutex_unlock(&th->interrupt_lock);
+
+ th->status = prev_status;
+ }
+ GVL_UNLOCK_END();
+ thread_debug("native_sleep done\n");
+}
+
+struct signal_thread_list {
+ rb_thread_t *th;
+ struct signal_thread_list *prev;
+ struct signal_thread_list *next;
+};
+
+static struct signal_thread_list signal_thread_list_anchor = {
+ 0, 0, 0,
+};
+
+#define FGLOCK(lock, body) do { \
+ native_mutex_lock(lock); \
+ { \
+ body; \
+ } \
+ native_mutex_unlock(lock); \
+} while (0)
+
+#if 0 /* for debug */
+static void
+print_signal_list(char *str)
+{
+ struct signal_thread_list *list =
+ signal_thread_list_anchor.next;
+ thread_debug("list (%s)> ", str);
+ while(list){
+ thread_debug("%p (%p), ", list->th, list->th->thread_id);
+ list = list->next;
+ }
+ thread_debug("\n");
+}
+#endif
+
+static void
+add_signal_thread_list(rb_thread_t *th)
+{
+ if (!th->native_thread_data.signal_thread_list) {
+ FGLOCK(&signal_thread_list_lock, {
+ struct signal_thread_list *list =
+ malloc(sizeof(struct signal_thread_list));
+
+ if (list == 0) {
+ fprintf(stderr, "[FATAL] failed to allocate memory\n");
+ exit(1);
+ }
+
+ list->th = th;
+
+ list->prev = &signal_thread_list_anchor;
+ list->next = signal_thread_list_anchor.next;
+ if (list->next) {
+ list->next->prev = list;
+ }
+ signal_thread_list_anchor.next = list;
+ th->native_thread_data.signal_thread_list = list;
+ });
+ }
+}
+
+static void
+remove_signal_thread_list(rb_thread_t *th)
+{
+ if (th->native_thread_data.signal_thread_list) {
+ FGLOCK(&signal_thread_list_lock, {
+ struct signal_thread_list *list =
+ (struct signal_thread_list *)
+ th->native_thread_data.signal_thread_list;
+
+ list->prev->next = list->next;
+ if (list->next) {
+ list->next->prev = list->prev;
+ }
+ th->native_thread_data.signal_thread_list = 0;
+ list->th = 0;
+ free(list);
+ });
+ }
+ else {
+ /* */
+ }
+}
+
+static pthread_t timer_thread_id;
+static void timer_thread_function(void);
+
+static void *
+thread_timer(void *dummy)
+{
+ while (system_working) {
+#ifdef HAVE_NANOSLEEP
+ struct timespec req, rem;
+ req.tv_sec = 0;
+ req.tv_nsec = 10 * 1000 * 1000; /* 10 ms */
+ nanosleep(&req, &rem);
+#else
+ struct timeval tv;
+ tv.tv_sec = 0;
+ tv.tv_usec = 10000; /* 10 ms */
+ select(0, NULL, NULL, NULL, &tv);
+#endif
+#ifndef __CYGWIN__
+ if (signal_thread_list_anchor.next) {
+ FGLOCK(&signal_thread_list_lock, {
+ struct signal_thread_list *list;
+ list = signal_thread_list_anchor.next;
+ while (list) {
+ ubf_select_each(list->th);
+ list = list->next;
+ }
+ });
+ }
+#endif
+ timer_thread_function();
+ }
+ return NULL;
+}
+
+static void
+rb_thread_create_timer_thread(void)
+{
+ rb_enable_interrupt();
+
+ if (!timer_thread_id) {
+ pthread_attr_t attr;
+ int err;
+
+ pthread_attr_init(&attr);
+#ifdef PTHREAD_STACK_MIN
+ pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
+#endif
+ err = pthread_create(&timer_thread_id, &attr, thread_timer, 0);
+ if (err != 0) {
+ rb_bug("rb_thread_create_timer_thread: return non-zero (%d)", err);
+ }
+ }
+ rb_disable_interrupt(); /* only timer thread recieve signal */
+}
+
+#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
Property changes on: thread_pthread.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
Index: eval_error.c
===================================================================
--- eval_error.c (revision 0)
+++ eval_error.c (revision 14364)
@@ -0,0 +1,291 @@
+/* -*-c-*- */
+/*
+ * included by eval.c
+ */
+
+const char *
+rb_sourcefile(void)
+{
+ rb_thread_t *th = GET_THREAD();
+ rb_control_frame_t *cfp = vm_get_ruby_level_cfp(th, th->cfp);
+
+ if (cfp) {
+ return RSTRING_PTR(cfp->iseq->filename);
+ }
+ else {
+ return 0;
+ }
+}
+
+int
+rb_sourceline(void)
+{
+ rb_thread_t *th = GET_THREAD();
+ rb_control_frame_t *cfp = vm_get_ruby_level_cfp(th, th->cfp);
+
+ if (cfp) {
+ return vm_get_sourceline(cfp);
+ }
+ else {
+ return 0;
+ }
+}
+
+static void
+warn_printf(const char *fmt, ...)
+{
+ char buf[BUFSIZ];
+ va_list args;
+
+ va_init_list(args, fmt);
+ vsnprintf(buf, BUFSIZ, fmt, args);
+ va_end(args);
+ rb_write_error(buf);
+}
+
+#define warn_print(x) rb_write_error(x)
+#define warn_print2(x,l) rb_write_error2(x,l)
+
+static void
+error_pos(void)
+{
+ const char *sourcefile = rb_sourcefile();
+ int sourceline = rb_sourceline();
+
+ if (sourcefile) {
+ if (sourceline == 0) {
+ warn_printf("%s", sourcefile);
+ }
+ else if (rb_frame_callee()) {
+ warn_printf("%s:%d:in `%s'", sourcefile, sourceline,
+ rb_id2name(rb_frame_callee()));
+ }
+ else {
+ warn_printf("%s:%d", sourcefile, sourceline);
+ }
+ }
+}
+
+VALUE rb_check_backtrace(VALUE);
+
+static VALUE
+get_backtrace(VALUE info)
+{
+ if (NIL_P(info))
+ return Qnil;
+ info = rb_funcall(info, rb_intern("backtrace"), 0);
+ if (NIL_P(info))
+ return Qnil;
+ return rb_check_backtrace(info);
+}
+
+static void
+set_backtrace(VALUE info, VALUE bt)
+{
+ rb_funcall(info, rb_intern("set_backtrace"), 1, bt);
+}
+
+static void
+error_print(void)
+{
+ VALUE errat = Qnil; /* OK */
+ VALUE errinfo = GET_THREAD()->errinfo;
+ volatile VALUE eclass, e;
+ char *einfo;
+ long elen;
+
+ if (NIL_P(errinfo))
+ return;
+
+ PUSH_TAG();
+ if (EXEC_TAG() == 0) {
+ errat = get_backtrace(errinfo);
+ }
+ else {
+ errat = Qnil;
+ }
+ if (EXEC_TAG())
+ goto error;
+ if (NIL_P(errat)) {
+ const char *file = rb_sourcefile();
+ int line = rb_sourceline();
+ if (file)
+ warn_printf("%s:%d", file, line);
+ else
+ warn_printf("%d", line);
+ }
+ else if (RARRAY_LEN(errat) == 0) {
+ error_pos();
+ }
+ else {
+ VALUE mesg = RARRAY_PTR(errat)[0];
+
+ if (NIL_P(mesg))
+ error_pos();
+ else {
+ warn_print2(RSTRING_PTR(mesg), RSTRING_LEN(mesg));
+ }
+ }
+
+ eclass = CLASS_OF(errinfo);
+ if (EXEC_TAG() == 0) {
+ e = rb_funcall(errinfo, rb_intern("message"), 0, 0);
+ StringValue(e);
+ einfo = RSTRING_PTR(e);
+ elen = RSTRING_LEN(e);
+ }
+ else {
+ einfo = "";
+ elen = 0;
+ }
+ if (EXEC_TAG())
+ goto error;
+ if (eclass == rb_eRuntimeError && elen == 0) {
+ warn_print(": unhandled exception\n");
+ }
+ else {
+ VALUE epath;
+
+ epath = rb_class_name(eclass);
+ if (elen == 0) {
+ warn_print(": ");
+ warn_print2(RSTRING_PTR(epath), RSTRING_LEN(epath));
+ warn_print("\n");
+ }
+ else {
+ char *tail = 0;
+ long len = elen;
+
+ if (RSTRING_PTR(epath)[0] == '#')
+ epath = 0;
+ if ((tail = memchr(einfo, '\n', elen)) != 0) {
+ len = tail - einfo;
+ tail++; /* skip newline */
+ }
+ warn_print(": ");
+ warn_print2(einfo, len);
+ if (epath) {
+ warn_print(" (");
+ warn_print2(RSTRING_PTR(epath), RSTRING_LEN(epath));
+ warn_print(")\n");
+ }
+ if (tail) {
+ warn_print2(tail, elen - len - 1);
+ }
+ }
+ }
+
+ if (!NIL_P(errat)) {
+ long i;
+ long len = RARRAY_LEN(errat);
+ VALUE *ptr = RARRAY_PTR(errat);
+ int skip = eclass == rb_eSysStackError;
+
+#define TRACE_MAX (TRACE_HEAD+TRACE_TAIL+5)
+#define TRACE_HEAD 8
+#define TRACE_TAIL 5
+
+ for (i = 1; i < len; i++) {
+ if (TYPE(ptr[i]) == T_STRING) {
+ warn_printf("\tfrom %s\n", RSTRING_PTR(ptr[i]));
+ }
+ if (skip && i == TRACE_HEAD && len > TRACE_MAX) {
+ warn_printf("\t ... %ld levels...\n",
+ len - TRACE_HEAD - TRACE_TAIL);
+ i = len - TRACE_TAIL;
+ }
+ }
+ }
+ error:
+ POP_TAG();
+}
+
+void
+ruby_error_print(void)
+{
+ error_print();
+}
+
+void
+rb_print_undef(VALUE klass, ID id, int scope)
+{
+ char *v;
+
+ switch (scope) {
+ default:
+ case NOEX_PUBLIC: v = ""; break;
+ case NOEX_PRIVATE: v = " private"; break;
+ case NOEX_PROTECTED: v = " protected"; break;
+ }
+ rb_name_error(id, "undefined%s method `%s' for %s `%s'", v,
+ rb_id2name(id),
+ (TYPE(klass) == T_MODULE) ? "module" : "class",
+ rb_class2name(klass));
+}
+
+static int
+sysexit_status(VALUE err)
+{
+ VALUE st = rb_iv_get(err, "status");
+ return NUM2INT(st);
+}
+
+static int
+error_handle(int ex)
+{
+ int status = EXIT_FAILURE;
+ rb_thread_t *th = GET_THREAD();
+
+ if (thread_set_raised(th))
+ return EXIT_FAILURE;
+ switch (ex & TAG_MASK) {
+ case 0:
+ status = EXIT_SUCCESS;
+ break;
+
+ case TAG_RETURN:
+ error_pos();
+ warn_print(": unexpected return\n");
+ break;
+ case TAG_NEXT:
+ error_pos();
+ warn_print(": unexpected next\n");
+ break;
+ case TAG_BREAK:
+ error_pos();
+ warn_print(": unexpected break\n");
+ break;
+ case TAG_REDO:
+ error_pos();
+ warn_print(": unexpected redo\n");
+ break;
+ case TAG_RETRY:
+ error_pos();
+ warn_print(": retry outside of rescue clause\n");
+ break;
+ case TAG_THROW:
+ /* TODO: fix me */
+ error_pos();
+ warn_printf(": unexpected throw\n");
+ break;
+ case TAG_RAISE:
+ case TAG_FATAL: {
+ VALUE errinfo = GET_THREAD()->errinfo;
+ if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
+ status = sysexit_status(errinfo);
+ }
+ else if (rb_obj_is_instance_of(errinfo, rb_eSignal)) {
+ /* no message when exiting by signal */
+ }
+ else {
+ error_print();
+ }
+ break;
+ }
+ default:
+ rb_bug("Unknown longjmp status %d", ex);
+ break;
+ }
+ thread_reset_raised(th);
+ return status;
+}
Property changes on: eval_error.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
Index: load.c
===================================================================
--- load.c (revision 0)
+++ load.c (revision 14364)
@@ -0,0 +1,668 @@
+/*
+ * load methods from eval.c
+ */
+
+#include "eval_intern.h"
+
+VALUE ruby_dln_librefs;
+
+#define IS_RBEXT(e) (strcmp(e, ".rb") == 0)
+#define IS_SOEXT(e) (strcmp(e, ".so") == 0 || strcmp(e, ".o") == 0)
+#ifdef DLEXT2
+#define IS_DLEXT(e) (strcmp(e, DLEXT) == 0 || strcmp(e, DLEXT2) == 0)
+#else
+#define IS_DLEXT(e) (strcmp(e, DLEXT) == 0)
+#endif
+
+
+static const char *const loadable_ext[] = {
+ ".rb", DLEXT,
+#ifdef DLEXT2
+ DLEXT2,
+#endif
+ 0
+};
+
+VALUE rb_load_path; /* to be moved to VM */
+static VALUE
+get_load_path(void)
+{
+ VALUE load_path = rb_load_path;
+ VALUE ary = rb_ary_new2(RARRAY_LEN(load_path));
+ long i;
+
+ for (i = 0; i < RARRAY_LEN(load_path); ++i) {
+ rb_ary_push(ary, rb_file_expand_path(RARRAY_PTR(load_path)[i], Qnil));
+ }
+ return ary;
+}
+
+static VALUE
+get_loaded_features(void)
+{
+ return GET_VM()->loaded_features;
+}
+
+static st_table *
+get_loading_table(void)
+{
+ return GET_VM()->loading_table;
+}
+
+static VALUE
+loaded_feature_path(const char *name, long vlen, const char *feature, long len,
+ int type, VALUE load_path)
+{
+ long i;
+
+ for (i = 0; i < RARRAY_LEN(load_path); ++i) {
+ VALUE p = RARRAY_PTR(load_path)[i];
+ const char *s = StringValuePtr(p);
+ long n = RSTRING_LEN(p);
+
+ if (vlen < n + len + 1) continue;
+ if (n && (strncmp(name, s, n) || name[n] != '/')) continue;
+ if (strncmp(name + n + 1, feature, len)) continue;
+ if (name[n+len+1] && name[n+len+1] != '.') continue;
+ switch (type) {
+ case 's':
+ if (IS_DLEXT(&name[n+len+1])) return p;
+ break;
+ case 'r':
+ if (IS_RBEXT(&name[n+len+1])) return p;
+ break;
+ default:
+ return p;
+ }
+ }
+ return 0;
+}
+
+struct loaded_feature_searching {
+ const char *name;
+ long len;
+ int type;
+ VALUE load_path;
+ const char *result;
+};
+
+static int
+loaded_feature_path_i(st_data_t v, st_data_t b, st_data_t f)
+{
+ const char *s = (const char *)v;
+ struct loaded_feature_searching *fp = (struct loaded_feature_searching *)f;
+ VALUE p = loaded_feature_path(s, strlen(s), fp->name, fp->len,
+ fp->type, fp->load_path);
+ if (!p) return ST_CONTINUE;
+ fp->result = s;
+ return ST_STOP;
+}
+
+static int
+rb_feature_p(const char *feature, const char *ext, int rb, int expanded)
+{
+ VALUE v, features, p, load_path = 0;
+ const char *f, *e;
+ long i, len, elen, n;
+ st_table *loading_tbl;
+ int type;
+
+ if (ext) {
+ len = ext - feature;
+ elen = strlen(ext);
+ type = rb ? 'r' : 's';
+ }
+ else {
+ len = strlen(feature);
+ elen = 0;
+ type = 0;
+ }
+ features = get_loaded_features();
+ for (i = 0; i < RARRAY_LEN(features); ++i) {
+ v = RARRAY_PTR(features)[i];
+ f = StringValuePtr(v);
+ if ((n = RSTRING_LEN(v)) < len) continue;
+ if (strncmp(f, feature, len) != 0) {
+ if (expanded) continue;
+ if (!load_path) load_path = get_load_path();
+ if (!(p = loaded_feature_path(f, n, feature, len, type, load_path)))
+ continue;
+ f += RSTRING_LEN(p) + 1;
+ }
+ if (!*(e = f + len)) {
+ if (ext) continue;
+ return 'u';
+ }
+ if (*e != '.') continue;
+ if ((!rb || !ext) && (IS_SOEXT(e) || IS_DLEXT(e))) {
+ return 's';
+ }
+ if ((rb || !ext) && (IS_RBEXT(e))) {
+ return 'r';
+ }
+ }
+ loading_tbl = get_loading_table();
+ if (loading_tbl) {
+ if (!expanded) {
+ struct loaded_feature_searching fs;
+ fs.name = feature;
+ fs.len = len;
+ fs.type = type;
+ fs.load_path = load_path ? load_path : get_load_path();
+ fs.result = 0;
+ st_foreach(loading_tbl, loaded_feature_path_i, (st_data_t)&fs);
+ if (fs.result) goto loading;
+ }
+ if (st_lookup(loading_tbl, (st_data_t)feature, 0)) {
+ loading:
+ if (!ext) return 'u';
+ return !IS_RBEXT(ext) ? 's' : 'r';
+ }
+ else {
+ char *buf;
+
+ if (ext && *ext) return 0;
+ buf = ALLOCA_N(char, len + DLEXT_MAXLEN + 1);
+ MEMCPY(buf, feature, char, len);
+ for (i = 0; (e = loadable_ext[i]) != 0; i++) {
+ strncpy(buf + len, e, DLEXT_MAXLEN + 1);
+ if (st_lookup(loading_tbl, (st_data_t)buf, 0)) {
+ return i ? 's' : 'r';
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+int
+rb_provided(const char *feature)
+{
+ const char *ext = strrchr(feature, '.');
+
+ if (ext && !strchr(ext, '/')) {
+ if (IS_RBEXT(ext)) {
+ if (rb_feature_p(feature, ext, Qtrue, Qfalse)) return Qtrue;
+ return Qfalse;
+ }
+ else if (IS_SOEXT(ext) || IS_DLEXT(ext)) {
+ if (rb_feature_p(feature, ext, Qfalse, Qfalse)) return Qtrue;
+ return Qfalse;
+ }
+ }
+ if (rb_feature_p(feature, feature + strlen(feature), Qtrue, Qfalse))
+ return Qtrue;
+ return Qfalse;
+}
+
+static void
+rb_provide_feature(VALUE feature)
+{
+ rb_ary_push(get_loaded_features(), feature);
+}
+
+void
+rb_provide(const char *feature)
+{
+ rb_provide_feature(rb_str_new2(feature));
+}
+
+NORETURN(static void load_failed _((VALUE)));
+
+void
+rb_load(VALUE fname, int wrap)
+{
+ VALUE tmp;
+ int state;
+ rb_thread_t *th = GET_THREAD();
+ volatile VALUE wrapper = th->top_wrapper;
+ volatile VALUE self = th->top_self;
+ volatile int parse_in_eval;
+ volatile int loaded = Qfalse;
+#ifndef __GNUC__
+ rb_thread_t *volatile th0 = th;
+#endif
+
+ FilePathValue(fname);
+ fname = rb_str_new4(fname);
+ tmp = rb_find_file(fname);
+ if (!tmp) {
+ load_failed(fname);
+ }
+ RB_GC_GUARD(fname) = rb_str_new4(tmp);
+
+ th->errinfo = Qnil; /* ensure */
+
+ if (!wrap) {
+ rb_secure(4); /* should alter global state */
+ th->top_wrapper = 0;
+ }
+ else {
+ /* load in anonymous module as toplevel */
+ th->top_self = rb_obj_clone(rb_vm_top_self());
+ th->top_wrapper = rb_module_new();
+ rb_extend_object(th->top_self, th->top_wrapper);
+ }
+
+ parse_in_eval = th->parse_in_eval;
+ PUSH_TAG();
+ state = EXEC_TAG();
+ if (state == 0) {
+ NODE *node;
+ VALUE iseq;
+
+ th->parse_in_eval++;
+ node = (NODE *)rb_load_file(RSTRING_PTR(fname));
+ th->parse_in_eval--;
+ loaded = Qtrue;
+ iseq = rb_iseq_new(node, rb_str_new2("<top (required)>"),
+ fname, Qfalse, ISEQ_TYPE_TOP);
+ rb_iseq_eval(iseq);
+ }
+ POP_TAG();
+
+#ifndef __GNUC__
+ th = th0;
+ fname = RB_GC_GUARD(fname);
+#endif
+ th->parse_in_eval = parse_in_eval;
+ th->top_self = self;
+ th->top_wrapper = wrapper;
+
+ if (!loaded) {
+ rb_exc_raise(GET_THREAD()->errinfo);
+ }
+ if (state) {
+ vm_jump_tag_but_local_jump(state, Qundef);
+ }
+
+ if (!NIL_P(GET_THREAD()->errinfo)) {
+ /* exception during load */
+ rb_exc_raise(th->errinfo);
+ }
+}
+
+void
+rb_load_protect(VALUE fname, int wrap, int *state)
+{
+ int status;
+
+ PUSH_TAG();
+ if ((status = EXEC_TAG()) == 0) {
+ rb_load(fname, wrap);
+ }
+ POP_TAG();
+ if (state)
+ *state = status;
+}
+
+/*
+ * call-seq:
+ * load(filename, wrap=false) => true
+ *
+ * Loads and executes the Ruby
+ * program in the file _filename_. If the filename does not
+ * resolve to an absolute path, the file is searched for in the library
+ * directories listed in <code>$:</code>. If the optional _wrap_
+ * parameter is +true+, the loaded script will be executed
+ * under an anonymous module, protecting the calling program's global
+ * namespace. In no circumstance will any local variables in the loaded
+ * file be propagated to the loading environment.
+ */
+
+
+static VALUE
+rb_f_load(argc, argv)
+ int argc;
+ VALUE *argv;
+{
+ VALUE fname, wrap;
+
+ rb_scan_args(argc, argv, "11", &fname, &wrap);
+ rb_load(fname, RTEST(wrap));
+ return Qtrue;
+}
+
+static char *
+load_lock(const char *ftptr)
+{
+ st_data_t data;
+ st_table *loading_tbl = get_loading_table();
+
+ if (!loading_tbl || !st_lookup(loading_tbl, (st_data_t)ftptr, &data)) {
+ /* loading ruby library should be serialized. */
+ if (!loading_tbl) {
+ GET_VM()->loading_table = loading_tbl = st_init_strtable();
+ }
+ /* partial state */
+ ftptr = ruby_strdup(ftptr);
+ data = (st_data_t)rb_barrier_new();
+ st_insert(loading_tbl, (st_data_t)ftptr, data);
+ return (char *)ftptr;
+ }
+ return RTEST(rb_barrier_wait((VALUE)data)) ? (char *)ftptr : 0;
+}
+
+static void
+load_unlock(const char *ftptr)
+{
+ if (ftptr) {
+ st_data_t key = (st_data_t)ftptr;
+ st_data_t data;
+ st_table *loading_tbl = get_loading_table();
+
+ if (st_delete(loading_tbl, &key, &data)) {
+ free((char *)key);
+ rb_barrier_release((VALUE)data);
+ }
+ }
+}
+
+
+/*
+ * call-seq:
+ * require(string) => true or false
+ *
+ * Ruby tries to load the library named _string_, returning
+ * +true+ if successful. If the filename does not resolve to
+ * an absolute path, it will be searched for in the directories listed
+ * in <code>$:</code>. If the file has the extension ``.rb'', it is
+ * loaded as a source file; if the extension is ``.so'', ``.o'', or
+ * ``.dll'', or whatever the default shared library extension is on
+ * the current platform, Ruby loads the shared library as a Ruby
+ * extension. Otherwise, Ruby tries adding ``.rb'', ``.so'', and so on
+ * to the name. The name of the loaded feature is added to the array in
+ * <code>$"</code>. A feature will not be loaded if it's name already
+ * appears in <code>$"</code>. However, the file name is not converted
+ * to an absolute path, so that ``<code>require 'a';require
+ * './a'</code>'' will load <code>a.rb</code> twice.
+ *
+ * require "my-library.rb"
+ * require "db-driver"
+ */
+
+VALUE
+rb_f_require(VALUE obj, VALUE fname)
+{
+ return rb_require_safe(fname, rb_safe_level());
+}
+
+static int
+search_required(VALUE fname, volatile VALUE *path)
+{
+ VALUE tmp;
+ char *ext, *ftptr;
+ int type, ft = 0;
+
+ *path = 0;
+ ext = strrchr(ftptr = RSTRING_PTR(fname), '.');
+ if (ext && !strchr(ext, '/')) {
+ if (IS_RBEXT(ext)) {
+ if (rb_feature_p(ftptr, ext, Qtrue, Qfalse))
+ return 'r';
+ if ((tmp = rb_find_file(fname)) != 0) {
+ tmp = rb_file_expand_path(tmp, Qnil);
+ ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
+ if (!rb_feature_p(ftptr, ext, Qtrue, Qtrue))
+ *path = tmp;
+ return 'r';
+ }
+ return 0;
+ }
+ else if (IS_SOEXT(ext)) {
+ if (rb_feature_p(ftptr, ext, Qfalse, Qfalse))
+ return 's';
+ tmp = rb_str_new(RSTRING_PTR(fname), ext - RSTRING_PTR(fname));
+#ifdef DLEXT2
+ OBJ_FREEZE(tmp);
+ if (rb_find_file_ext(&tmp, loadable_ext + 1)) {
+ tmp = rb_file_expand_path(tmp, Qnil);
+ ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
+ if (!rb_feature_p(ftptr, ext, Qfalse, Qtrue))
+ *path = tmp;
+ return 's';
+ }
+#else
+ rb_str_cat2(tmp, DLEXT);
+ OBJ_FREEZE(tmp);
+ if ((tmp = rb_find_file(tmp)) != 0) {
+ tmp = rb_file_expand_path(tmp, Qnil);
+ ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
+ if (!rb_feature_p(ftptr, ext, Qfalse, Qtrue))
+ *path = tmp;
+ return 's';
+ }
+#endif
+ }
+ else if (IS_DLEXT(ext)) {
+ if (rb_feature_p(ftptr, ext, Qfalse, Qfalse))
+ return 's';
+ if ((tmp = rb_find_file(fname)) != 0) {
+ tmp = rb_file_expand_path(tmp, Qnil);
+ ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
+ if (!rb_feature_p(ftptr, ext, Qfalse, Qtrue))
+ *path = tmp;
+ return 's';
+ }
+ }
+ }
+ else if ((ft = rb_feature_p(ftptr, 0, Qfalse, Qfalse)) == 'r') {
+ return 'r';
+ }
+ tmp = fname;
+ type = rb_find_file_ext(&tmp, loadable_ext);
+ tmp = rb_file_expand_path(tmp, Qnil);
+ switch (type) {
+ case 0:
+ ftptr = RSTRING_PTR(tmp);
+ if (ft)
+ break;
+ return rb_feature_p(ftptr, 0, Qfalse, Qtrue);
+
+ default:
+ if (ft)
+ break;
+ case 1:
+ ext = strrchr(ftptr = RSTRING_PTR(tmp), '.');
+ if (rb_feature_p(ftptr, ext, !--type, Qtrue))
+ break;
+ *path = tmp;
+ }
+ return type ? 's' : 'r';
+}
+
+static void
+load_failed(VALUE fname)
+{
+ rb_raise(rb_eLoadError, "no such file to load -- %s",
+ RSTRING_PTR(fname));
+}
+
+static VALUE
+load_ext(VALUE path)
+{
+ SCOPE_SET(NOEX_PUBLIC);
+ return (VALUE)dln_load(RSTRING_PTR(path));
+}
+
+VALUE
+rb_require_safe(VALUE fname, int safe)
+{
+ VALUE result = Qnil;
+ rb_thread_t *th = GET_THREAD();
+ volatile VALUE errinfo = th->errinfo;
+ int state;
+ struct {
+ int safe;
+ } volatile saved;
+ char *volatile ftptr = 0;
+
+ PUSH_TAG();
+ saved.safe = rb_safe_level();
+ if ((state = EXEC_TAG()) == 0) {
+ VALUE path;
+ long handle;
+ int found;
+
+ rb_set_safe_level_force(safe);
+ FilePathValue(fname);
+ RB_GC_GUARD(fname) = rb_str_new4(fname);
+ found = search_required(fname, &path);
+ if (found) {
+ if (!path || !(ftptr = load_lock(RSTRING_PTR(path)))) {
+ result = Qfalse;
+ }
+ else {
+ rb_set_safe_level_force(0);
+ switch (found) {
+ case 'r':
+ rb_load(path, 0);
+ break;
+
+ case 's':
+ handle = (long)rb_vm_call_cfunc(rb_vm_top_self(), load_ext,
+ path, 0, path);
+ rb_ary_push(ruby_dln_librefs, LONG2NUM(handle));
+ break;
+ }
+ rb_provide_feature(path);
+ result = Qtrue;
+ }
+ }
+ }
+ POP_TAG();
+ load_unlock(ftptr);
+
+ rb_set_safe_level_force(saved.safe);
+ if (state) {
+ JUMP_TAG(state);
+ }
+
+ if (NIL_P(result)) {
+ load_failed(fname);
+ }
+
+ th->errinfo = errinfo;
+
+ return result;
+}
+
+VALUE
+rb_require(const char *fname)
+{
+ VALUE fn = rb_str_new2(fname);
+ OBJ_FREEZE(fn);
+ return rb_require_safe(fn, rb_safe_level());
+}
+
+static VALUE
+init_ext_call(VALUE arg)
+{
+ SCOPE_SET(NOEX_PUBLIC);
+ (*(void (*)(void))arg)();
+ return Qnil;
+}
+
+void
+ruby_init_ext(const char *name, void (*init)(void))
+{
+ if (load_lock(name)) {
+ rb_vm_call_cfunc(rb_vm_top_self(), init_ext_call, (VALUE)init,
+ 0, rb_str_new2(name));
+ rb_provide(name);
+ load_unlock(name);
+ }
+}
+
+/*
+ * call-seq:
+ * mod.autoload(name, filename) => nil
+ *
+ * Registers _filename_ to be loaded (using <code>Kernel::require</code>)
+ * the first time that _module_ (which may be a <code>String</code> or
+ * a symbol) is accessed in the namespace of _mod_.
+ *
+ * module A
+ * end
+ * A.autoload(:B, "b")
+ * A::B.doit # autoloads "b"
+ */
+
+static VALUE
+rb_mod_autoload(VALUE mod, VALUE sym, VALUE file)
+{
+ ID id = rb_to_id(sym);
+
+ Check_SafeStr(file);
+ rb_autoload(mod, id, RSTRING_PTR(file));
+ return Qnil;
+}
+
+/*
+ * MISSING: documentation
+ */
+
+static VALUE
+rb_mod_autoload_p(VALUE mod, VALUE sym)
+{
+ return rb_autoload_p(mod, rb_to_id(sym));
+}
+
+/*
+ * call-seq:
+ * autoload(module, filename) => nil
+ *
+ * Registers _filename_ to be loaded (using <code>Kernel::require</code>)
+ * the first time that _module_ (which may be a <code>String</code> or
+ * a symbol) is accessed.
+ *
+ * autoload(:MyModule, "/usr/local/lib/modules/my_module.rb")
+ */
+
+static VALUE
+rb_f_autoload(VALUE obj, VALUE sym, VALUE file)
+{
+ VALUE klass = ruby_cbase();
+ if (NIL_P(klass)) {
+ rb_raise(rb_eTypeError, "Can not set autoload on singleton class");
+ }
+ return rb_mod_autoload(klass, sym, file);
+}
+
+/*
+ * MISSING: documentation
+ */
+
+static VALUE
+rb_f_autoload_p(VALUE obj, VALUE sym)
+{
+ /* use ruby_cbase() as same as rb_f_autoload. */
+ VALUE klass = ruby_cbase();
+ if (NIL_P(klass)) {
+ return Qnil;
+ }
+ return rb_mod_autoload_p(klass, sym);
+}
+
+void
+Init_load()
+{
+ rb_define_readonly_variable("$:", &rb_load_path);
+ rb_define_readonly_variable("$-I", &rb_load_path);
+ rb_define_readonly_variable("$LOAD_PATH", &rb_load_path);
+ rb_load_path = rb_ary_new();
+
+ rb_define_virtual_variable("$\"", get_loaded_features, 0);
+ rb_define_virtual_variable("$LOADED_FEATURES", get_loaded_features, 0);
+ GET_VM()->loaded_features = rb_ary_new();
+
+ rb_define_global_function("load", rb_f_load, -1);
+ rb_define_global_function("require", rb_f_require, 1);
+ rb_define_method(rb_cModule, "autoload", rb_mod_autoload, 2);
+ rb_define_method(rb_cModule, "autoload?", rb_mod_autoload_p, 1);
+ rb_define_global_function("autoload", rb_f_autoload, 2);
+ rb_define_global_function("autoload?", rb_f_autoload_p, 1);
+
+ ruby_dln_librefs = rb_ary_new();
+ rb_register_mark_object(ruby_dln_librefs);
+}
Property changes on: load.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
Index: thread.c
===================================================================
--- thread.c (revision 14363)
+++ thread.c (revision 14364)
@@ -147,7 +147,7 @@
VALUE *register_stack_start));
#if defined(_WIN32)
-#include "thread_win32.ci"
+#include "thread_win32.c"
#define DEBUG_OUT() \
WaitForSingleObject(&debug_mutex, INFINITE); \
@@ -156,7 +156,7 @@
ReleaseMutex(&debug_mutex);
#elif defined(HAVE_PTHREAD_H)
-#include "thread_pthread.ci"
+#include "thread_pthread.c"
#define DEBUG_OUT() \
pthread_mutex_lock(&debug_mutex); \
Index: common.mk
===================================================================
--- common.mk (revision 14363)
+++ common.mk (revision 14364)
@@ -28,7 +28,7 @@
enumerator.$(OBJEXT) \
error.$(OBJEXT) \
eval.$(OBJEXT) \
- eval_load.$(OBJEXT) \
+ load.$(OBJEXT) \
proc.$(OBJEXT) \
file.$(OBJEXT) \
gc.$(OBJEXT) \
@@ -427,14 +427,14 @@
{$(VPATH)}st.h {$(VPATH)}vm_opts.h {$(VPATH)}signal.h \
{$(VPATH)}vm_core.h {$(VPATH)}id.h {$(VPATH)}node.h {$(VPATH)}debug.h \
{$(VPATH)}thread_$(THREAD_MODEL).h
-eval.$(OBJEXT): {$(VPATH)}eval.c {$(VPATH)}eval_error.ci {$(VPATH)}eval_intern.h \
- {$(VPATH)}eval_method.ci {$(VPATH)}eval_safe.ci {$(VPATH)}eval_jump.ci \
+eval.$(OBJEXT): {$(VPATH)}eval.c {$(VPATH)}eval_error.c {$(VPATH)}eval_intern.h \
+ {$(VPATH)}eval_method.c {$(VPATH)}eval_safe.c {$(VPATH)}eval_jump.c \
{$(VPATH)}ruby.h {$(VPATH)}config.h {$(VPATH)}vm_core.h {$(VPATH)}id.h \
{$(VPATH)}defines.h {$(VPATH)}intern.h {$(VPATH)}missing.h \
{$(VPATH)}node.h {$(VPATH)}util.h {$(VPATH)}signal.h \
{$(VPATH)}st.h {$(VPATH)}dln.h {$(VPATH)}debug.h \
{$(VPATH)}vm_opts.h {$(VPATH)}thread_$(THREAD_MODEL).h
-eval_load.$(OBJEXT): {$(VPATH)}eval_load.c {$(VPATH)}eval_intern.h \
+load.$(OBJEXT): {$(VPATH)}load.c {$(VPATH)}eval_intern.h \
{$(VPATH)}ruby.h {$(VPATH)}config.h \
{$(VPATH)}defines.h {$(VPATH)}intern.h {$(VPATH)}missing.h \
{$(VPATH)}node.h {$(VPATH)}util.h {$(VPATH)}vm_core.h {$(VPATH)}id.h \
@@ -540,7 +540,7 @@
{$(VPATH)}defines.h {$(VPATH)}intern.h {$(VPATH)}missing.h
thread.$(OBJEXT): {$(VPATH)}thread.c {$(VPATH)}eval_intern.h \
{$(VPATH)}thread_win32.h {$(VPATH)}thread_pthread.h \
- {$(VPATH)}thread_win32.ci {$(VPATH)}thread_pthread.ci \
+ {$(VPATH)}thread_win32.c {$(VPATH)}thread_pthread.c \
{$(VPATH)}ruby.h {$(VPATH)}vm_core.h {$(VPATH)}id.h {$(VPATH)}config.h \
{$(VPATH)}defines.h {$(VPATH)}intern.h {$(VPATH)}missing.h \
{$(VPATH)}node.h {$(VPATH)}util.h \
@@ -581,7 +581,7 @@
vm.$(OBJEXT): {$(VPATH)}vm.c {$(VPATH)}vm.h {$(VPATH)}vm_core.h {$(VPATH)}id.h \
{$(VPATH)}debug.h {$(VPATH)}ruby.h {$(VPATH)}config.h {$(VPATH)}st.h \
{$(VPATH)}node.h {$(VPATH)}util.h {$(VPATH)}signal.h {$(VPATH)}dln.h \
- {$(VPATH)}insnhelper.h {$(VPATH)}insnhelper.ci {$(VPATH)}vm_evalbody.ci \
+ {$(VPATH)}insnhelper.h {$(VPATH)}vm_insnhelper.c {$(VPATH)}vm_evalbody.c \
{$(VPATH)}insns.inc {$(VPATH)}vm.inc {$(VPATH)}vmtc.inc \
{$(VPATH)}vm_opts.h {$(VPATH)}eval_intern.h \
{$(VPATH)}defines.h {$(VPATH)}missing.h {$(VPATH)}intern.h \
Index: eval_jump.c
===================================================================
--- eval_jump.c (revision 0)
+++ eval_jump.c (revision 14364)
@@ -0,0 +1,305 @@
+/* -*-c-*- */
+/*
+ * from eval.c
+ */
+
+#include "eval_intern.h"
+
+NORETURN(static VALUE rb_f_throw _((int, VALUE *)));
+
+/*
+ * call-seq:
+ * throw(symbol [, obj])
+ *
+ * Transfers control to the end of the active +catch+ block
+ * waiting for _symbol_. Raises +NameError+ if there
+ * is no +catch+ block for the symbol. The optional second
+ * parameter supplies a return value for the +catch+ block,
+ * which otherwise defaults to +nil+. For examples, see
+ * <code>Kernel::catch</code>.
+ */
+
+static VALUE
+rb_f_throw(int argc, VALUE *argv)
+{
+ VALUE tag, value;
+ rb_thread_t *th = GET_THREAD();
+ struct rb_vm_tag *tt = th->tag;
+
+ rb_scan_args(argc, argv, "11", &tag, &value);
+ while (tt) {
+ if (tt->tag == tag) {
+ tt->retval = value;
+ break;
+ }
+ tt = tt->prev;
+ }
+ if (!tt) {
+ VALUE desc = rb_inspect(tag);
+ rb_raise(rb_eArgError, "uncaught throw %s", RSTRING_PTR(desc));
+ }
+ rb_trap_restore_mask();
+ th->errinfo = NEW_THROW_OBJECT(tag, 0, TAG_THROW);
+
+ JUMP_TAG(TAG_THROW);
+#ifndef __GNUC__
+ return Qnil; /* not reached */
+#endif
+}
+
+void
+rb_throw(const char *tag, VALUE val)
+{
+ VALUE argv[2];
+
+ argv[0] = ID2SYM(rb_intern(tag));
+ argv[1] = val;
+ rb_f_throw(2, argv);
+}
+
+void
+rb_throw_obj(VALUE tag, VALUE val)
+{
+ VALUE argv[2];
+
+ argv[0] = tag;
+ argv[1] = val;
+ rb_f_throw(2, argv);
+}
+
+/*
+ * call-seq:
+ * catch(symbol) {| | block } > obj
+ *
+ * +catch+ executes its block. If a +throw+ is
+ * executed, Ruby searches up its stack for a +catch+ block
+ * with a tag corresponding to the +throw+'s
+ * _symbol_. If found, that block is terminated, and
+ * +catch+ returns the value given to +throw+. If
+ * +throw+ is not called, the block terminates normally, and
+ * the value of +catch+ is the value of the last expression
+ * evaluated. +catch+ expressions may be nested, and the
+ * +throw+ call need not be in lexical scope.
+ *
+ * def routine(n)
+ * puts n
+ * throw :done if n <= 0
+ * routine(n-1)
+ * end
+ *
+ *
+ * catch(:done) { routine(3) }
+ *
+ * <em>produces:</em>
+ *
+ * 3
+ * 2
+ * 1
+ * 0
+ */
+
+static VALUE
+rb_f_catch(int argc, VALUE *argv)
+{
+ VALUE tag;
+ int state;
+ VALUE val = Qnil; /* OK */
+ rb_thread_t *th = GET_THREAD();
+
+ rb_scan_args(argc, argv, "01", &tag);
+ if (argc == 0) {
+ tag = rb_obj_alloc(rb_cObject);
+ }
+ PUSH_TAG();
+
+ th->tag->tag = tag;
+
+ if ((state = EXEC_TAG()) == 0) {
+ val = rb_yield_0(1, &tag);
+ }
+ else if (state == TAG_THROW && RNODE(th->errinfo)->u1.value == tag) {
+ val = th->tag->retval;
+ th->errinfo = Qnil;
+ state = 0;
+ }
+ POP_TAG();
+ if (state)
+ JUMP_TAG(state);
+
+ return val;
+}
+
+static VALUE
+catch_null_i(VALUE dmy)
+{
+ return rb_funcall(Qnil, rb_intern("catch"), 0, 0);
+}
+
+static VALUE
+catch_i(VALUE tag)
+{
+ return rb_funcall(Qnil, rb_intern("catch"), 1, tag);
+}
+
+VALUE
+rb_catch(const char *tag, VALUE (*func)(), VALUE data)
+{
+ if (!tag) {
+ return rb_iterate(catch_null_i, 0, func, data);
+ }
+ return rb_iterate(catch_i, ID2SYM(rb_intern(tag)), func, data);
+}
+
+VALUE
+rb_catch_obj(VALUE tag, VALUE (*func)(), VALUE data)
+{
+ return rb_iterate((VALUE (*)_((VALUE)))catch_i, tag, func, data);
+}
+
+
+/* exit */
+
+void
+rb_call_end_proc(VALUE data)
+{
+ rb_proc_call(data, rb_ary_new());
+}
+
+/*
+ * call-seq:
+ * at_exit { block } -> proc
+ *
+ * Converts _block_ to a +Proc+ object (and therefore
+ * binds it at the point of call) and registers it for execution when
+ * the program exits. If multiple handlers are registered, they are
+ * executed in reverse order of registration.
+ *
+ * def do_at_exit(str1)
+ * at_exit { print str1 }
+ * end
+ * at_exit { puts "cruel world" }
+ * do_at_exit("goodbye ")
+ * exit
+ *
+ * <em>produces:</em>
+ *
+ * goodbye cruel world
+ */
+
+static VALUE
+rb_f_at_exit(void)
+{
+ VALUE proc;
+
+ if (!rb_block_given_p()) {
+ rb_raise(rb_eArgError, "called without a block");
+ }
+ proc = rb_block_proc();
+ rb_set_end_proc(rb_call_end_proc, proc);
+ return proc;
+}
+
+struct end_proc_data {
+ void (*func) ();
+ VALUE data;
+ int safe;
+ struct end_proc_data *next;
+};
+
+static struct end_proc_data *end_procs, *ephemeral_end_procs, *tmp_end_procs;
+
+void
+rb_set_end_proc(void (*func)(VALUE), VALUE data)
+{
+ struct end_proc_data *link = ALLOC(struct end_proc_data);
+ struct end_proc_data **list;
+ rb_thread_t *th = GET_THREAD();
+
+ if (th->top_wrapper) {
+ list = &ephemeral_end_procs;
+ }
+ else {
+ list = &end_procs;
+ }
+ link->next = *list;
+ link->func = func;
+ link->data = data;
+ link->safe = rb_safe_level();
+ *list = link;
+}
+
+void
+rb_mark_end_proc(void)
+{
+ struct end_proc_data *link;
+
+ link = end_procs;
+ while (link) {
+ rb_gc_mark(link->data);
+ link = link->next;
+ }
+ link = ephemeral_end_procs;
+ while (link) {
+ rb_gc_mark(link->data);
+ link = link->next;
+ }
+ link = tmp_end_procs;
+ while (link) {
+ rb_gc_mark(link->data);
+ link = link->next;
+ }
+}
+
+void
+rb_exec_end_proc(void)
+{
+ struct end_proc_data *link, *tmp;
+ int status;
+ volatile int safe = rb_safe_level();
+
+ while (ephemeral_end_procs) {
+ tmp_end_procs = link = ephemeral_end_procs;
+ ephemeral_end_procs = 0;
+ while (link) {
+ PUSH_TAG();
+ if ((status = EXEC_TAG()) == 0) {
+ rb_set_safe_level_force(link->safe);
+ (*link->func) (link->data);
+ }
+ POP_TAG();
+ if (status) {
+ error_handle(status);
+ }
+ tmp = link;
+ tmp_end_procs = link = link->next;
+ free(tmp);
+ }
+ }
+ while (end_procs) {
+ tmp_end_procs = link = end_procs;
+ end_procs = 0;
+ while (link) {
+ PUSH_TAG();
+ if ((status = EXEC_TAG()) == 0) {
+ rb_set_safe_level_force(link->safe);
+ (*link->func) (link->data);
+ }
+ POP_TAG();
+ if (status) {
+ error_handle(status);
+ }
+ tmp = link;
+ tmp_end_procs = link = link->next;
+ free(tmp);
+ }
+ }
+ rb_set_safe_level_force(safe);
+}
+
+void
+Init_jump(void)
+{
+ rb_define_global_function("catch", rb_f_catch, -1);
+ rb_define_global_function("throw", rb_f_throw, -1);
+ rb_define_global_function("at_exit", rb_f_at_exit, 0);
+}
Property changes on: eval_jump.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
Index: eval.c
===================================================================
--- eval.c (revision 14363)
+++ eval.c (revision 14364)
@@ -38,10 +38,10 @@
static inline VALUE rb_yield_0(int argc, VALUE *argv);
static VALUE rb_call(VALUE, VALUE, ID, int, const VALUE *, int);
-#include "eval_error.ci"
-#include "eval_method.ci"
-#include "eval_safe.ci"
-#include "eval_jump.ci"
+#include "eval_error.c"
+#include "eval_method.c"
+#include "eval_safe.c"
+#include "eval_jump.c"
/* initialize ruby */
Index: vm_evalbody.c
===================================================================
--- vm_evalbody.c (revision 0)
+++ vm_evalbody.c (revision 14364)
@@ -0,0 +1,143 @@
+/* -*-c-*- */
+/**********************************************************************
+
+ vm_evalbody.c -
+
+ $Author$
+ $Date$
+
+ Copyright (C) 2004-2007 Koichi Sasada
+
+**********************************************************************/
+
+#include <math.h>
+
+#if VMDEBUG > 0
+#define DECL_SC_REG(type, r, reg) register type reg_##r
+
+#elif __GNUC__ && __x86_64
+#define DECL_SC_REG(type, r, reg) register type reg_##r asm("r" reg)
+
+#elif __GNUC__ && __i386__
+#define DECL_SC_REG(type, r, reg) register type reg_##r asm("e" reg)
+
+#else
+#define DECL_SC_REG(type, r, reg) register type reg_##r
+#endif
+/* #define DECL_SC_REG(r, reg) VALUE reg_##r */
+
+#if !OPT_CALL_THREADED_CODE
+VALUE
+vm_eval(rb_thread_t *th, VALUE initial)
+{
+
+#if OPT_STACK_CACHING
+#if 0
+#elif __GNUC__ && __x86_64
+ DECL_SC_REG(VALUE, a, "12");
+ DECL_SC_REG(VALUE, b, "13");
+#else
+ register VALUE reg_a;
+ register VALUE reg_b;
+#endif
+#endif
+
+#if __GNUC__ && __i386__
+ DECL_SC_REG(VALUE *, pc, "di");
+ DECL_SC_REG(rb_control_frame_t *, cfp, "si");
+#define USE_MACHINE_REGS 1
+
+#elif __GNUC__ && __x86_64__
+ DECL_SC_REG(VALUE *, pc, "14");
+ DECL_SC_REG(rb_control_frame_t *, cfp, "15");
+#define USE_MACHINE_REGS 1
+
+#else
+ register rb_control_frame_t *reg_cfp;
+ VALUE *reg_pc;
+#endif
+
+#if USE_MACHINE_REGS
+
+#undef RESTORE_REGS
+#define RESTORE_REGS() \
+{ \
+ REG_CFP = th->cfp; \
+ reg_pc = reg_cfp->pc; \
+}
+
+#undef REG_PC
+#define REG_PC reg_pc
+#undef GET_PC
+#define GET_PC() (reg_pc)
+#undef SET_PC
+#define SET_PC(x) (reg_cfp->pc = REG_PC = (x))
+#endif
+
+#if OPT_TOKEN_THREADED_CODE || OPT_DIRECT_THREADED_CODE
+#include "vmtc.inc"
+ if (th == 0) {
+#if OPT_STACK_CACHING
+ finish_insn_seq[0] = (VALUE)&&LABEL (finish_SC_ax_ax);
+#else
+ finish_insn_seq[0] = (VALUE)&&LABEL (finish);
+#endif
+ return (VALUE)insns_address_table;
+ }
+#endif
+ reg_cfp = th->cfp;
+ reg_pc = reg_cfp->pc;
+
+#if OPT_STACK_CACHING
+ reg_a = initial;
+ reg_b = 0;
+#endif
+
+ first:
+ INSN_DISPATCH();
+/*****************/
+ #include "vm.inc"
+/*****************/
+ END_INSNS_DISPATCH();
+
+ /* unreachable */
+ rb_bug("vm_eval: unreachable");
+ goto first;
+}
+
+#else
+
+#include "vm.inc"
+#include "vmtc.inc"
+
+const void *const *
+get_insns_address_table()
+{
+ return insns_address_table;
+}
+
+VALUE
+vm_eval(rb_thread_t *th, VALUE initial)
+{
+ register rb_control_frame_t *reg_cfp = th->cfp;
+ VALUE ret;
+
+ while (*GET_PC()) {
+ reg_cfp = ((rb_insn_func_t) (*GET_PC()))(th, reg_cfp);
+
+ if (reg_cfp == 0) {
+ VALUE err = th->errinfo;
+ th->errinfo = Qnil;
+ return err;
+ }
+ }
+
+ if (VM_FRAME_TYPE(th->cfp) != FRAME_MAGIC_FINISH) {
+ rb_bug("cfp consistency error");
+ }
+
+ ret = *(th->cfp->sp-1); /* pop */
+ th->cfp++; /* pop cf */
+ return ret;
+}
+#endif
Property changes on: vm_evalbody.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
Index: eval_safe.c
===================================================================
--- eval_safe.c (revision 0)
+++ eval_safe.c (revision 14364)
@@ -0,0 +1,111 @@
+/* -*-c-*- */
+/*
+ * This file is included by eval.c
+ */
+
+/* safe-level:
+ 0 - strings from streams/environment/ARGV are tainted (default)
+ 1 - no dangerous operation by tainted value
+ 2 - process/file operations prohibited
+ 3 - all generated objects are tainted
+ 4 - no global (non-tainted) variable modification/no direct output
+*/
+
+#define SAFE_LEVEL_MAX 4
+
+/* $SAFE accessor */
+
+int
+rb_safe_level(void)
+{
+ return GET_THREAD()->safe_level;
+}
+
+void
+rb_set_safe_level_force(int safe)
+{
+ GET_THREAD()->safe_level = safe;
+}
+
+void
+rb_set_safe_level(int level)
+{
+ rb_thread_t *th = GET_THREAD();
+
+ if (level > th->safe_level) {
+ if (level > SAFE_LEVEL_MAX) {
+ level = SAFE_LEVEL_MAX;
+ }
+ th->safe_level = level;
+ }
+}
+
+static VALUE
+safe_getter(void)
+{
+ return INT2NUM(rb_safe_level());
+}
+
+static void
+safe_setter(VALUE val)
+{
+ int level = NUM2INT(val);
+ rb_thread_t *th = GET_THREAD();
+
+ if (level < th->safe_level) {
+ rb_raise(rb_eSecurityError,
+ "tried to downgrade safe level from %d to %d",
+ th->safe_level, level);
+ }
+ if (level > SAFE_LEVEL_MAX) {
+ level = SAFE_LEVEL_MAX;
+ }
+ th->safe_level = level;
+}
+
+void
+rb_secure(int level)
+{
+ if (level <= rb_safe_level()) {
+ if (rb_frame_callee()) {
+ rb_raise(rb_eSecurityError, "Insecure operation `%s' at level %d",
+ rb_id2name(rb_frame_callee()), rb_safe_level());
+ }
+ else {
+ rb_raise(rb_eSecurityError, "Insecure operation at level %d",
+ rb_safe_level());
+ }
+ }
+}
+
+void
+rb_secure_update(VALUE obj)
+{
+ if (!OBJ_TAINTED(obj))
+ rb_secure(4);
+}
+
+void
+rb_check_safe_obj(VALUE x)
+{
+ if (rb_safe_level() > 0 && OBJ_TAINTED(x)) {
+ if (rb_frame_callee()) {
+ rb_raise(rb_eSecurityError, "Insecure operation - %s",
+ rb_id2name(rb_frame_callee()));
+ }
+ else {
+ rb_raise(rb_eSecurityError, "Insecure operation: -r");
+ }
+ }
+ rb_secure(4);
+}
+
+void
+rb_check_safe_str(VALUE x)
+{
+ rb_check_safe_obj(x);
+ if (TYPE(x) != T_STRING) {
+ rb_raise(rb_eTypeError, "wrong argument type %s (expected String)",
+ rb_obj_classname(x));
+ }
+}
Property changes on: eval_safe.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
Index: eval_method.c
===================================================================
--- eval_method.c (revision 0)
+++ eval_method.c (revision 14364)
@@ -0,0 +1,641 @@
+/* -*-c-*- */
+/*
+ * This file is included by eval.c
+ */
+
+#define CACHE_SIZE 0x800
+#define CACHE_MASK 0x7ff
+#define EXPR1(c,m) ((((c)>>3)^(m))&CACHE_MASK)
+
+struct cache_entry { /* method hash table. */
+ ID mid; /* method's id */
+ ID mid0; /* method's original id */
+ VALUE klass; /* receiver's class */
+ NODE *method;
+};
+
+static struct cache_entry cache[CACHE_SIZE];
+static int ruby_running = 0;
+
+void
+rb_clear_cache(void)
+{
+ struct cache_entry *ent, *end;
+
+ rb_vm_change_state();
+
+ if (!ruby_running)
+ return;
+ ent = cache;
+ end = ent + CACHE_SIZE;
+ while (ent < end) {
+ ent->mid = 0;
+ ent++;
+ }
+}
+
+static void
+rb_clear_cache_for_undef(VALUE klass, ID id)
+{
+ struct cache_entry *ent, *end;
+
+ rb_vm_change_state();
+
+ if (!ruby_running)
+ return;
+ ent = cache;
+ end = ent + CACHE_SIZE;
+ while (ent < end) {
+ if (ent->method && ent->method->nd_clss == klass && ent->mid == id) {
+ ent->mid = 0;
+ }
+ ent++;
+ }
+}
+
+static void
+rb_clear_cache_by_id(ID id)
+{
+ struct cache_entry *ent, *end;
+
+ rb_vm_change_state();
+
+ if (!ruby_running)
+ return;
+ ent = cache;
+ end = ent + CACHE_SIZE;
+ while (ent < end) {
+ if (ent->mid == id) {
+ ent->mid = 0;
+ }
+ ent++;
+ }
+}
+
+void
+rb_clear_cache_by_class(VALUE klass)
+{
+ struct cache_entry *ent, *end;
+
+ rb_vm_change_state();
+
+ if (!ruby_running)
+ return;
+ ent = cache;
+ end = ent + CACHE_SIZE;
+ while (ent < end) {
+ if ((ent->klass == klass) ||
+ (ent->method && ent->method->nd_clss == klass)) {
+ ent->mid = 0;
+ }
+ ent++;
+ }
+}
+
+void
+rb_add_method(VALUE klass, ID mid, NODE * node, int noex)
+{
+ NODE *body;
+
+ if (NIL_P(klass)) {
+ klass = rb_cObject;
+ }
+ if (rb_safe_level() >= 4 && (klass == rb_cObject || !OBJ_TAINTED(klass))) {
+ rb_raise(rb_eSecurityError, "Insecure: can't define method");
+ }
+ if (!FL_TEST(klass, FL_SINGLETON) &&
+ node && nd_type(node) != NODE_ZSUPER &&
+ (mid == rb_intern("initialize") || mid == rb_intern("initialize_copy"))) {
+ noex = NOEX_PRIVATE | noex;
+ }
+ else if (FL_TEST(klass, FL_SINGLETON) && node
+ && nd_type(node) == NODE_CFUNC && mid == rb_intern("allocate")) {
+ rb_warn
+ ("defining %s.allocate is deprecated; use rb_define_alloc_func()",
+ rb_class2name(rb_iv_get(klass, "__attached__")));
+ mid = ID_ALLOCATOR;
+ }
+ if (OBJ_FROZEN(klass)) {
+ rb_error_frozen("class/module");
+ }
+ rb_clear_cache_by_id(mid);
+
+ /*
+ * NODE_METHOD (NEW_METHOD(body, klass, vis)):
+ * nd_body : method body // (2) // mark
+ * nd_clss : klass // (1) // mark
+ * nd_noex : visibility // (3)
+ *
+ * NODE_FBODY (NEW_FBODY(method, alias)):
+ * nd_body : method (NODE_METHOD) // (2) // mark
+ * nd_oid : original id // (1)
+ * nd_cnt : alias count // (3)
+ */
+ if (node) {
+ body = NEW_FBODY(NEW_METHOD(node, klass, NOEX_WITH_SAFE(noex)), 0);
+ }
+ else {
+ body = 0;
+ }
+
+ {
+ /* check re-definition */
+ st_data_t data;
+ NODE *old_node;
+
+ if (st_lookup(RCLASS_M_TBL(klass), mid, &data)) {
+ old_node = (NODE *)data;
+ if (old_node) {
+ if (nd_type(old_node->nd_body->nd_body) == NODE_CFUNC) {
+ rb_vm_check_redefinition_opt_method(old_node);
+ }
+ if (RTEST(ruby_verbose) && node && old_node->nd_cnt == 0 && old_node->nd_body) {
+ rb_warning("method redefined; discarding old %s", rb_id2name(mid));
+ }
+ }
+ if (klass == rb_cObject && node && node->nd_mid == init) {
+ rb_warn("redefining Object#initialize may cause infinite loop");
+ }
+ }
+
+ if (mid == object_id || mid == __send__) {
+ if (node && nd_type(node) == RUBY_VM_METHOD_NODE) {
+ rb_warn("redefining `%s' may cause serious problem",
+ rb_id2name(mid));
+ }
+ }
+ }
+
+ st_insert(RCLASS_M_TBL(klass), mid, (st_data_t) body);
+
+ if (node && mid != ID_ALLOCATOR && ruby_running) {
+ if (FL_TEST(klass, FL_SINGLETON)) {
+ rb_funcall(rb_iv_get(klass, "__attached__"), singleton_added, 1,
+ ID2SYM(mid));
+ }
+ else {
+ rb_funcall(klass, added, 1, ID2SYM(mid));
+ }
+ }
+}
+
+void
+rb_define_alloc_func(VALUE klass, VALUE (*func) _((VALUE)))
+{
+ Check_Type(klass, T_CLASS);
+ rb_add_method(CLASS_OF(klass), ID_ALLOCATOR, NEW_CFUNC(func, 0),
+ NOEX_PRIVATE);
+}
+
+void
+rb_undef_alloc_func(VALUE klass)
+{
+ Check_Type(klass, T_CLASS);
+ rb_add_method(CLASS_OF(klass), ID_ALLOCATOR, 0, NOEX_UNDEF);
+}
+
+rb_alloc_func_t
+rb_get_alloc_func(VALUE klass)
+{
+ NODE *n;
+ Check_Type(klass, T_CLASS);
+ n = rb_method_node(CLASS_OF(klass), ID_ALLOCATOR);
+ if (!n) return 0;
+ if (nd_type(n) != NODE_METHOD) return 0;
+ n = n->nd_body;
+ if (nd_type(n) != NODE_CFUNC) return 0;
+ return (rb_alloc_func_t)n->nd_cfnc;
+}
+
+static NODE *
+search_method(VALUE klass, ID id, VALUE *klassp)
+{
+ st_data_t body;
+
+ if (!klass) {
+ return 0;
+ }
+
+ while (!st_lookup(RCLASS_M_TBL(klass), id, &body)) {
+ klass = RCLASS_SUPER(klass);
+ if (!klass)
+ return 0;
+ }
+
+ if (klassp) {
+ *klassp = klass;
+ }
+
+ return (NODE *)body;
+}
+
+/*
+ * search method body (NODE_METHOD)
+ * with : klass and id
+ * without : method cache
+ *
+ * if you need method node with method cache, use
+ * rb_method_node()
+ */
+NODE *
+rb_get_method_body(VALUE klass, ID id, ID *idp)
+{
+ NODE *volatile fbody, *body;
+ NODE *method;
+
+ if ((fbody = search_method(klass, id, 0)) == 0 || !fbody->nd_body) {
+ /* store empty info in cache */
+ struct cache_entry *ent;
+ ent = cache + EXPR1(klass, id);
+ ent->klass = klass;
+ ent->mid = ent->mid0 = id;
+ ent->method = 0;
+ return 0;
+ }
+
+ method = fbody->nd_body;
+
+ if (ruby_running) {
+ /* store in cache */
+ struct cache_entry *ent;
+ ent = cache + EXPR1(klass, id);
+ ent->klass = klass;
+ ent->mid = id;
+ ent->mid0 = fbody->nd_oid;
+ ent->method = body = method;
+ }
+ else {
+ body = method;
+ }
+
+ if (idp) {
+ *idp = fbody->nd_oid;
+ }
+
+ return body;
+}
+
+NODE *
+rb_method_node(VALUE klass, ID id)
+{
+ struct cache_entry *ent;
+
+ ent = cache + EXPR1(klass, id);
+ if (ent->mid == id && ent->klass == klass && ent->method) {
+ return ent->method;
+ }
+
+ return rb_get_method_body(klass, id, 0);
+}
+
+static void
+remove_method(VALUE klass, ID mid)
+{
+ st_data_t data;
+ NODE *body = 0;
+
+ if (klass == rb_cObject) {
+ rb_secure(4);
+ }
+ if (rb_safe_level() >= 4 && !OBJ_TAINTED(klass)) {
+ rb_raise(rb_eSecurityError, "Insecure: can't remove method");
+ }
+ if (OBJ_FROZEN(klass))
+ rb_error_frozen("class/module");
+ if (mid == object_id || mid == __send__ || mid == init) {
+ rb_warn("removing `%s' may cause serious problem", rb_id2name(mid));
+ }
+ if (st_lookup(RCLASS_M_TBL(klass), mid, &data)) {
+ body = (NODE *)data;
+ if (!body || !body->nd_body) body = 0;
+ else {
+ st_delete(RCLASS_M_TBL(klass), &mid, &data);
+ }
+ }
+ if (!body) {
+ rb_name_error(mid, "method `%s' not defined in %s",
+ rb_id2name(mid), rb_class2name(klass));
+ }
+
+ if (nd_type(body->nd_body->nd_body) == NODE_CFUNC) {
+ rb_vm_check_redefinition_opt_method(body);
+ }
+
+ rb_clear_cache_for_undef(klass, mid);
+ if (FL_TEST(klass, FL_SINGLETON)) {
+ rb_funcall(rb_iv_get(klass, "__attached__"), singleton_removed, 1,
+ ID2SYM(mid));
+ }
+ else {
+ rb_funcall(klass, removed, 1, ID2SYM(mid));
+ }
+}
+
+void
+rb_remove_method(VALUE klass, const char *name)
+{
+ remove_method(klass, rb_intern(name));
+}
+
+/*
+ * call-seq:
+ * remove_method(symbol) => self
+ *
+ * Removes the method identified by _symbol_ from the current
+ * class. For an example, see <code>Module.undef_method</code>.
+ */
+
+static VALUE
+rb_mod_remove_method(int argc, VALUE *argv, VALUE mod)
+{
+ int i;
+
+ for (i = 0; i < argc; i++) {
+ remove_method(mod, rb_to_id(argv[i]));
+ }
+ return mod;
+}
+
+#undef rb_disable_super
+#undef rb_enable_super
+
+void
+rb_disable_super(VALUE klass, const char *name)
+{
+ /* obsolete - no use */
+}
+
+void
+rb_enable_super(VALUE klass, const char *name)
+{
+ rb_warning("rb_enable_super() is obsolete");
+}
+
+static void
+rb_export_method(VALUE klass, ID name, ID noex)
+{
+ NODE *fbody;
+ VALUE origin;
+
+ if (klass == rb_cObject) {
+ rb_secure(4);
+ }
+ fbody = search_method(klass, name, &origin);
+ if (!fbody && TYPE(klass) == T_MODULE) {
+ fbody = search_method(rb_cObject, name, &origin);
+ }
+ if (!fbody || !fbody->nd_body) {
+ rb_print_undef(klass, name, 0);
+ }
+ if (fbody->nd_body->nd_noex != noex) {
+ if (klass == origin) {
+ fbody->nd_body->nd_noex = noex;
+ }
+ else {
+ rb_add_method(klass, name, NEW_ZSUPER(), noex);
+ }
+ }
+}
+
+int
+rb_method_boundp(VALUE klass, ID id, int ex)
+{
+ NODE *method;
+
+ if ((method = rb_method_node(klass, id)) != 0) {
+ if (ex && (method->nd_noex & NOEX_PRIVATE)) {
+ return Qfalse;
+ }
+ return Qtrue;
+ }
+ return Qfalse;
+}
+
+void
+rb_attr(VALUE klass, ID id, int read, int write, int ex)
+{
+ const char *name;
+ ID attriv;
+ int noex;
+
+ if (!ex) {
+ noex = NOEX_PUBLIC;
+ }
+ else {
+ if (SCOPE_TEST(NOEX_PRIVATE)) {
+ noex = NOEX_PRIVATE;
+ rb_warning((SCOPE_CHECK(NOEX_MODFUNC)) ?
+ "attribute accessor as module_function" :
+ "private attribute?");
+ }
+ else if (SCOPE_TEST(NOEX_PROTECTED)) {
+ noex = NOEX_PROTECTED;
+ }
+ else {
+ noex = NOEX_PUBLIC;
+ }
+ }
+
+ if (!rb_is_local_id(id) && !rb_is_const_id(id)) {
+ rb_name_error(id, "invalid attribute name `%s'", rb_id2name(id));
+ }
+ name = rb_id2name(id);
+ if (!name) {
+ rb_raise(rb_eArgError, "argument needs to be symbol or string");
+ }
+ attriv = rb_intern_str(rb_sprintf("@%s", name));
+ if (read) {
+ rb_add_method(klass, id, NEW_IVAR(attriv), noex);
+ }
+ if (write) {
+ rb_add_method(klass, rb_id_attrset(id), NEW_ATTRSET(attriv), noex);
+ }
+}
+
+void
+rb_undef(VALUE klass, ID id)
+{
+ VALUE origin;
+ NODE *body;
+
+ if (ruby_cbase() == rb_cObject && klass == rb_cObject) {
+ rb_secure(4);
+ }
+ if (rb_safe_level() >= 4 && !OBJ_TAINTED(klass)) {
+ rb_raise(rb_eSecurityError, "Insecure: can't undef `%s'",
+ rb_id2name(id));
+ }
+ rb_frozen_class_p(klass);
+ if (id == object_id || id == __send__ || id == init) {
+ rb_warn("undefining `%s' may cause serious problem", rb_id2name(id));
+ }
+ body = search_method(klass, id, &origin);
+ if (!body || !body->nd_body) {
+ char *s0 = " class";
+ VALUE c = klass;
+
+ if (FL_TEST(c, FL_SINGLETON)) {
+ VALUE obj = rb_iv_get(klass, "__attached__");
+
+ switch (TYPE(obj)) {
+ case T_MODULE:
+ case T_CLASS:
+ c = obj;
+ s0 = "";
+ }
+ }
+ else if (TYPE(c) == T_MODULE) {
+ s0 = " module";
+ }
+ rb_name_error(id, "undefined method `%s' for%s `%s'",
+ rb_id2name(id), s0, rb_class2name(c));
+ }
+
+ rb_add_method(klass, id, 0, NOEX_PUBLIC);
+
+ if (FL_TEST(klass, FL_SINGLETON)) {
+ rb_funcall(rb_iv_get(klass, "__attached__"),
+ singleton_undefined, 1, ID2SYM(id));
+ }
+ else {
+ rb_funcall(klass, undefined, 1, ID2SYM(id));
+ }
+}
+
+/*
+ * call-seq:
+ * undef_method(symbol) => self
+ *
+ * Prevents the current class from responding to calls to the named
+ * method. Contrast this with <code>remove_method</code>, which deletes
+ * the method from the particular class; Ruby will still search
+ * superclasses and mixed-in modules for a possible receiver.
+ *
+ * class Parent
+ * def hello
+ * puts "In parent"
+ * end
+ * end
+ * class Child < Parent
+ * def hello
+ * puts "In child"
+ * end
+ * end
+ *
+ *
+ * c = Child.new
+ * c.hello
+ *
+ *
+ * class Child
+ * remove_method :hello # remove from child, still in parent
+ * end
+ * c.hello
+ *
+ *
+ * class Child
+ * undef_method :hello # prevent any calls to 'hello'
+ * end
+ * c.hello
+ *
+ * <em>produces:</em>
+ *
+ * In child
+ * In parent
+ * prog.rb:23: undefined method `hello' for #<Child:0x401b3bb4> (NoMethodError)
+ */
+
+static VALUE
+rb_mod_undef_method(int argc, VALUE *argv, VALUE mod)
+{
+ int i;
+ for (i = 0; i < argc; i++) {
+ rb_undef(mod, rb_to_id(argv[i]));
+ }
+ return mod;
+}
+
+void
+rb_alias(VALUE klass, ID name, ID def)
+{
+ NODE *orig_fbody, *node;
+ VALUE singleton = 0;
+ st_data_t data;
+
+ rb_frozen_class_p(klass);
+ if (klass == rb_cObject) {
+ rb_secure(4);
+ }
+ orig_fbody = search_method(klass, def, 0);
+ if (!orig_fbody || !orig_fbody->nd_body) {
+ if (TYPE(klass) == T_MODULE) {
+ orig_fbody = search_method(rb_cObject, def, 0);
+ }
+ }
+ if (!orig_fbody || !orig_fbody->nd_body) {
+ rb_print_undef(klass, def, 0);
+ }
+ if (FL_TEST(klass, FL_SINGLETON)) {
+ singleton = rb_iv_get(klass, "__attached__");
+ }
+
+ orig_fbody->nd_cnt++;
+
+ if (st_lookup(RCLASS_M_TBL(klass), name, &data)) {
+ node = (NODE *)data;
+ if (node) {
+ if (RTEST(ruby_verbose) && node->nd_cnt == 0 && node->nd_body) {
+ rb_warning("discarding old %s", rb_id2name(name));
+ }
+ if (nd_type(node->nd_body->nd_body) == NODE_CFUNC) {
+ rb_vm_check_redefinition_opt_method(node);
+ }
+ }
+ }
+
+ st_insert(RCLASS_M_TBL(klass), name,
+ (st_data_t) NEW_FBODY(
+ NEW_METHOD(orig_fbody->nd_body->nd_body,
+ orig_fbody->nd_body->nd_clss,
+ NOEX_WITH_SAFE(orig_fbody->nd_body->nd_noex)), def));
+
+ rb_clear_cache_by_id(name);
+
+ if (!ruby_running) return;
+
+ if (singleton) {
+ rb_funcall(singleton, singleton_added, 1, ID2SYM(name));
+ }
+ else {
+ rb_funcall(klass, added, 1, ID2SYM(name));
+ }
+}
+
+/*
+ * call-seq:
+ * alias_method(new_name, old_name) => self
+ *
+ * Makes <i>new_name</i> a new copy of the method <i>old_name</i>. This can
+ * be used to retain access to methods that are overridden.
+ *
+ * module Mod
+ * alias_method :orig_exit, :exit
+ * def exit(code=0)
+ * puts "Exiting with code #{code}"
+ * orig_exit(code)
+ * end
+ * end
+ * include Mod
+ * exit(99)
+ *
+ * <em>produces:</em>
+ *
+ * Exiting with code 99
+ */
+
+static VALUE
+rb_mod_alias_method(VALUE mod, VALUE newname, VALUE oldname)
+{
+ rb_alias(mod, rb_to_id(newname), rb_to_id(oldname));
+ return mod;
+}
Property changes on: eval_method.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
Index: vm.c
===================================================================
--- vm.c (revision 14363)
+++ vm.c (revision 14364)
@@ -16,7 +16,7 @@
#include "gc.h"
#include "insnhelper.h"
-#include "insnhelper.ci"
+#include "vm_insnhelper.c"
#define BUFSIZE 0x100
#define PROCDEBUG 0
@@ -1026,7 +1026,7 @@
/* evaluator body */
-#include "vm_evalbody.ci"
+#include "vm_evalbody.c"
/* finish
VMe (h1) finish
Index: vm_insnhelper.c
===================================================================
--- vm_insnhelper.c (revision 0)
+++ vm_insnhelper.c (revision 14364)
@@ -0,0 +1,1455 @@
+/**********************************************************************
+
+ insnhelper.c - instruction helper functions.
+
+ $Author$
+ $Date$
+
+ Copyright (C) 2007 Koichi Sasada
+
+**********************************************************************/
+
+/* finish iseq array */
+#include "insns.inc"
+
+/* control stack frame */
+
+
+#ifndef INLINE
+#define INLINE inline
+#endif
+
+static inline rb_control_frame_t *
+vm_push_frame(rb_thread_t *th, rb_iseq_t *iseq, VALUE type,
+ VALUE self, VALUE specval, VALUE *pc,
+ VALUE *sp, VALUE *lfp, int local_size)
+{
+ VALUE *dfp;
+ rb_control_frame_t *cfp;
+ int i;
+
+ /* nil initialize */
+ for (i=0; i < local_size; i++) {
+ *sp = Qnil;
+ sp++;
+ }
+
+ /* set special val */
+ *sp = GC_GUARDED_PTR(specval);
+ dfp = sp;
+
+ if (lfp == 0) {
+ lfp = sp;
+ }
+
+ cfp = th->cfp = th->cfp - 1;
+ cfp->pc = pc;
+ cfp->sp = sp + 1;
+ cfp->bp = sp + 1;
+ cfp->iseq = iseq;
+ cfp->flag = type;
+ cfp->self = self;
+ cfp->lfp = lfp;
+ cfp->dfp = dfp;
+ cfp->proc = 0;
+
+#define COLLECT_PROFILE 0
+#if COLLECT_PROFILE
+ cfp->prof_time_self = clock();
+ cfp->prof_time_chld = 0;
+#endif
+
+ if (VMDEBUG == 2) {
+ SDR();
+ }
+
+ return cfp;
+}
+
+static inline void
+vm_pop_frame(rb_thread_t *th)
+{
+#if COLLECT_PROFILE
+ rb_control_frame_t *cfp = th->cfp;
+
+ if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
+ VALUE current_time = clock();
+ rb_control_frame_t *cfp = th->cfp;
+ cfp->prof_time_self = current_time - cfp->prof_time_self;
+ (cfp+1)->prof_time_chld += cfp->prof_time_self;
+
+ cfp->iseq->profile.count++;
+ cfp->iseq->profile.time_cumu = cfp->prof_time_self;
+ cfp->iseq->profile.time_self = cfp->prof_time_self - cfp->prof_time_chld;
+ }
+ else if (0 /* c method? */) {
+
+ }
+#endif
+ th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
+
+ if (VMDEBUG == 2) {
+ SDR();
+ }
+}
+
+/* method dispatch */
+
+static inline int
+vm_callee_setup_arg(rb_thread_t *th, rb_iseq_t *iseq,
+ int argc, VALUE *argv, rb_block_t **block)
+{
+ const int m = iseq->argc;
+ const int orig_argc = argc;
+
+ if (LIKELY(iseq->arg_simple & 0x01)) {
+ /* simple check */
+ if (argc != m) {
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)",
+ argc, m);
+ }
+ return 0;
+ }
+ else {
+ VALUE * const dst = argv;
+ int opt_pc = 0;
+ th->mark_stack_len = argc + iseq->arg_size;
+
+ /* mandatory */
+ if (argc < (m + iseq->arg_post_len)) { /* check with post arg */
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)",
+ argc, m + iseq->arg_post_len);
+ }
+
+ argv += m;
+ argc -= m;
+
+ /* post arguments */
+ if (iseq->arg_post_len) {
+ if (!(orig_argc < iseq->arg_post_start)) {
+ VALUE *new_argv = ALLOCA_N(VALUE, argc);
+ MEMCPY(new_argv, argv, VALUE, argc);
+ argv = new_argv;
+ }
+
+ MEMCPY(&dst[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
+ VALUE, iseq->arg_post_len);
+ }
+
+ /* opt arguments */
+ if (iseq->arg_opts) {
+ const int opts = iseq->arg_opts - 1 /* no opt */;
+
+ if (iseq->arg_rest == -1 && argc > opts) {
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)",
+ orig_argc, m + opts + iseq->arg_post_len);
+ }
+
+ if (argc > opts) {
+ argc -= opts;
+ argv += opts;
+ opt_pc = iseq->arg_opt_table[opts]; /* no opt */
+ }
+ else {
+ int i;
+ for (i = argc; i<opts; i++) {
+ dst[i + m] = Qnil;
+ }
+ opt_pc = iseq->arg_opt_table[argc];
+ argc = 0;
+ }
+ }
+
+ /* rest arguments */
+ if (iseq->arg_rest != -1) {
+ dst[iseq->arg_rest] = rb_ary_new4(argc, argv);
+ argc = 0;
+ }
+
+ /* block arguments */
+ if (block && iseq->arg_block != -1) {
+ VALUE blockval = Qnil;
+ rb_block_t * const blockptr = *block;
+
+ if (argc != 0) {
+ rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)",
+ orig_argc, m + iseq->arg_post_len);
+ }
+
+ if (blockptr) {
+ /* make Proc object */
+ if (blockptr->proc == 0) {
+ rb_proc_t *proc;
+
+ blockval = vm_make_proc(th, th->cfp, blockptr);
+
+ GetProcPtr(blockval, proc);
+ *block = &proc->block;
+ }
+ else {
+ blockval = blockptr->proc;
+ }
+ }
+
+ dst[iseq->arg_block] = blockval; /* Proc or nil */
+ }
+
+ th->mark_stack_len = 0;
+ return opt_pc;
+ }
+}
+
+static inline int
+caller_setup_args(rb_thread_t *th, rb_control_frame_t *cfp, VALUE flag,
+ int argc, rb_iseq_t *blockiseq, rb_block_t **block)
+{
+ rb_block_t *blockptr = 0;
+
+ if (block) {
+ if (flag & VM_CALL_ARGS_BLOCKARG_BIT) {
+ rb_proc_t *po;
+ VALUE proc;
+
+ proc = *(--cfp->sp);
+
+ if (proc != Qnil) {
+ if (!rb_obj_is_proc(proc)) {
+ VALUE b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
+ if (NIL_P(b)) {
+ rb_raise(rb_eTypeError,
+ "wrong argument type %s (expected Proc)",
+ rb_obj_classname(proc));
+ }
+ proc = b;
+ }
+ GetProcPtr(proc, po);
+ blockptr = &po->block;
+ RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
+ *block = blockptr;
+ }
+ }
+ else if (blockiseq) {
+ blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
+ blockptr->iseq = blockiseq;
+ blockptr->proc = 0;
+ *block = blockptr;
+ }
+ }
+
+ /* expand top of stack? */
+ if (flag & VM_CALL_ARGS_SPLAT_BIT) {
+ VALUE ary = *(cfp->sp - 1);
+ VALUE *ptr;
+ int i;
+ VALUE tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a");
+
+ if (NIL_P(tmp)) {
+ /* do nothing */
+ }
+ else {
+ int len = RARRAY_LEN(tmp);
+ ptr = RARRAY_PTR(tmp);
+ cfp->sp -= 1;
+
+ CHECK_STACK_OVERFLOW(cfp, len);
+
+ for (i = 0; i < len; i++) {
+ *cfp->sp++ = ptr[i];
+ }
+ argc += i-1;
+ }
+ }
+
+ return argc;
+}
+
+static inline VALUE
+call_cfunc(VALUE (*func)(), VALUE recv, int len, int argc, const VALUE *argv)
+{
+ /* printf("len: %d, argc: %d\n", len, argc); */
+
+ if (len >= 0 && argc != len) {
+ rb_raise(rb_eArgError, "wrong number of arguments(%d for %d)",
+ argc, len);
+ }
+
+ switch (len) {
+ case -2:
+ return (*func) (recv, rb_ary_new4(argc, argv));
+ break;
+ case -1:
+ return (*func) (argc, argv, recv);
+ break;
+ case 0:
+ return (*func) (recv);
+ break;
+ case 1:
+ return (*func) (recv, argv[0]);
+ break;
+ case 2:
+ return (*func) (recv, argv[0], argv[1]);
+ break;
+ case 3:
+ return (*func) (recv, argv[0], argv[1], argv[2]);
+ break;
+ case 4:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3]);
+ break;
+ case 5:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
+ break;
+ case 6:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5]);
+ break;
+ case 7:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6]);
+ break;
+ case 8:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6], argv[7]);
+ break;
+ case 9:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6], argv[7], argv[8]);
+ break;
+ case 10:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6], argv[7], argv[8], argv[9]);
+ break;
+ case 11:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6], argv[7], argv[8], argv[9],
+ argv[10]);
+ break;
+ case 12:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6], argv[7], argv[8], argv[9],
+ argv[10], argv[11]);
+ break;
+ case 13:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
+ argv[11], argv[12]);
+ break;
+ case 14:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
+ argv[11], argv[12], argv[13]);
+ break;
+ case 15:
+ return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
+ argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
+ argv[11], argv[12], argv[13], argv[14]);
+ break;
+ default:
+ rb_raise(rb_eArgError, "too many arguments(%d)", len);
+ break;
+ }
+ return Qnil; /* not reached */
+}
+
+static inline VALUE
+vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, int num,
+ ID id, VALUE recv, VALUE klass, VALUE flag,
+ NODE *mn, rb_block_t *blockptr)
+{
+ VALUE val;
+
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, id, klass);
+ {
+ rb_control_frame_t *cfp =
+ vm_push_frame(th, 0, FRAME_MAGIC_CFUNC | (flag << FRAME_MAGIC_MASK_BITS),
+ recv, (VALUE) blockptr, 0, reg_cfp->sp, 0, 1);
+
+ cfp->method_id = id;
+ cfp->method_class = klass;
+
+ reg_cfp->sp -= num + 1;
+
+ val = call_cfunc(mn->nd_cfnc, recv, mn->nd_argc, num, reg_cfp->sp + 1);
+
+ if (reg_cfp != th->cfp + 1) {
+ rb_bug("cfp consistency error - send");
+ }
+ vm_pop_frame(th);
+ }
+ EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, id, klass);
+
+ return val;
+}
+
+static int
+vm_cfunc_flags(rb_control_frame_t *cfp)
+{
+ if (RUBYVM_CFUNC_FRAME_P(cfp))
+ return cfp->flag >> FRAME_MAGIC_MASK_BITS;
+ return 0;
+}
+
+static inline VALUE
+vm_call_bmethod(rb_thread_t *th, ID id, VALUE procval, VALUE recv,
+ VALUE klass, int argc, VALUE *argv, rb_block_t *blockptr)
+{
+ rb_control_frame_t *cfp = th->cfp;
+ rb_proc_t *proc;
+ VALUE val;
+
+ /* control block frame */
+ (cfp-2)->method_id = id;
+ (cfp-2)->method_class = klass;
+
+ GetProcPtr(procval, proc);
+ val = vm_invoke_proc(th, proc, recv, argc, argv, blockptr);
+ return val;
+}
+
+static inline VALUE
+vm_method_missing(rb_thread_t *th, ID id, VALUE recv, int num,
+ rb_block_t *blockptr, int opt)
+{
+ rb_control_frame_t *reg_cfp = th->cfp;
+ VALUE *argv = STACK_ADDR_FROM_TOP(num + 1);
+ VALUE val;
+ argv[0] = ID2SYM(id);
+ th->method_missing_reason = opt;
+ th->passed_block = blockptr;
+ val = rb_funcall2(recv, idMethodMissing, num + 1, argv);
+ POPN(num + 1);
+ return val;
+}
+
+static inline void
+vm_setup_method(rb_thread_t *th, rb_control_frame_t *cfp,
+ int argc, rb_block_t *blockptr, VALUE flag,
+ VALUE iseqval, VALUE recv, VALUE klass)
+{
+ rb_iseq_t *iseq;
+ int opt_pc, i;
+ VALUE *sp, *rsp = cfp->sp - argc;
+
+ /* TODO: eliminate it */
+ GetISeqPtr(iseqval, iseq);
+ opt_pc = vm_callee_setup_arg(th, iseq, argc, rsp, &blockptr);
+
+ /* stack overflow check */
+ CHECK_STACK_OVERFLOW(cfp, iseq->stack_max);
+
+ sp = rsp + iseq->arg_size;
+
+ if (LIKELY(!(flag & VM_CALL_TAILCALL_BIT))) {
+ if (0) printf("local_size: %d, arg_size: %d\n",
+ iseq->local_size, iseq->arg_size);
+
+ /* clear local variables */
+ for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
+ *sp++ = Qnil;
+ }
+
+ vm_push_frame(th, iseq,
+ FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
+ iseq->iseq_encoded + opt_pc, sp, 0, 0);
+
+ cfp->sp = rsp - 1 /* recv */;
+ }
+ else {
+ VALUE *p_rsp;
+ cfp = ++th->cfp; /* pop cf */
+ p_rsp = th->cfp->sp;
+
+ /* copy arguments */
+ for (i=0; i < (sp - rsp); i++) {
+ p_rsp[i] = rsp[i];
+ }
+
+ sp -= rsp - p_rsp;
+
+ /* clear local variables */
+ for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
+ *sp++ = Qnil;
+ }
+
+ vm_push_frame(th, iseq,
+ FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
+ iseq->iseq_encoded + opt_pc, sp, 0, 0);
+ }
+}
+
+static inline VALUE
+vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp,
+ int num, rb_block_t *blockptr, VALUE flag,
+ ID id, NODE *mn, VALUE recv, VALUE klass)
+{
+ VALUE val;
+
+ start_method_dispatch:
+
+ if ((mn != 0)) {
+ if ((mn->nd_noex == 0)) {
+ /* dispatch method */
+ NODE *node;
+
+ normal_method_dispatch:
+
+ node = mn->nd_body;
+
+ switch (nd_type(node)) {
+ case RUBY_VM_METHOD_NODE:{
+ vm_setup_method(th, cfp, num, blockptr, flag, (VALUE)node->nd_body, recv, klass);
+ return Qundef;
+ }
+ case NODE_CFUNC:{
+ val = vm_call_cfunc(th, cfp, num, id, recv, mn->nd_clss, flag, node, blockptr);
+ break;
+ }
+ case NODE_ATTRSET:{
+ val = rb_ivar_set(recv, node->nd_vid, *(cfp->sp - 1));
+ cfp->sp -= 2;
+ break;
+ }
+ case NODE_IVAR:{
+ val = rb_ivar_get(recv, node->nd_vid);
+ cfp->sp -= 1;
+ break;
+ }
+ case NODE_BMETHOD:{
+ VALUE *argv = cfp->sp - num;
+ val = vm_call_bmethod(th, id, node->nd_cval, recv, klass, num, argv, blockptr);
+ cfp->sp += - num - 1;
+ break;
+ }
+ case NODE_ZSUPER:{
+ klass = RCLASS_SUPER(mn->nd_clss);
+ mn = rb_method_node(klass, id);
+
+ if (mn != 0) {
+ goto normal_method_dispatch;
+ }
+ else {
+ goto start_method_dispatch;
+ }
+ }
+ default:{
+ printf("node: %s\n", ruby_node_name(nd_type(node)));
+ rb_bug("eval_invoke_method: unreachable");
+ /* unreachable */
+ break;
+ }
+ }
+ }
+ else {
+ int noex_safe;
+
+ if (!(flag & VM_CALL_FCALL_BIT) &&
+ (mn->nd_noex & NOEX_MASK) & NOEX_PRIVATE) {
+ int stat = NOEX_PRIVATE;
+
+ if (flag & VM_CALL_VCALL_BIT) {
+ stat |= NOEX_VCALL;
+ }
+ val = vm_method_missing(th, id, recv, num, blockptr, stat);
+ }
+ else if (((mn->nd_noex & NOEX_MASK) & NOEX_PROTECTED) &&
+ !(flag & VM_CALL_SEND_BIT)) {
+ VALUE defined_class = mn->nd_clss;
+
+ if (TYPE(defined_class) == T_ICLASS) {
+ defined_class = RBASIC(defined_class)->klass;
+ }
+
+ if (!rb_obj_is_kind_of(cfp->self, rb_class_real(defined_class))) {
+ val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED);
+ }
+ else {
+ goto normal_method_dispatch;
+ }
+ }
+ else if ((noex_safe = NOEX_SAFE(mn->nd_noex)) > th->safe_level &&
+ (noex_safe > 2)) {
+ rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id));
+ }
+ else {
+ goto normal_method_dispatch;
+ }
+ }
+ }
+ else {
+ /* method missing */
+ if (id == idMethodMissing) {
+ rb_bug("method missing");
+ }
+ else {
+ int stat = 0;
+ if (flag & VM_CALL_VCALL_BIT) {
+ stat |= NOEX_VCALL;
+ }
+ if (flag & VM_CALL_SUPER_BIT) {
+ stat |= NOEX_SUPER;
+ }
+ val = vm_method_missing(th, id, recv, num, blockptr, stat);
+ }
+ }
+
+ RUBY_VM_CHECK_INTS();
+ return val;
+}
+
+static inline void
+vm_send_optimize(rb_control_frame_t *reg_cfp,
+ NODE **mn, rb_num_t *flag, rb_num_t *num, ID *id, VALUE klass)
+{
+ if (*mn && nd_type((*mn)->nd_body) == NODE_CFUNC) {
+ NODE *node = (*mn)->nd_body;
+ extern VALUE rb_f_send(int argc, VALUE *argv, VALUE recv);
+
+ if (node->nd_cfnc == rb_f_send) {
+ int i = *num - 1;
+ VALUE sym = TOPN(i);
+ *id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym);
+
+ /* shift arguments */
+ if (i > 0) {
+ MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
+ }
+
+ *mn = rb_method_node(klass, *id);
+ *num -= 1;
+ DEC_SP(1);
+ *flag |= VM_CALL_FCALL_BIT;
+ }
+ }
+}
+
+/* yield */
+
+static inline int
+block_proc_is_lambda(VALUE procval)
+{
+ rb_proc_t *proc;
+
+ if (procval) {
+ GetProcPtr(procval, proc);
+ return proc->is_lambda;
+ }
+ else {
+ return 0;
+ }
+}
+
+static inline VALUE
+vm_yield_with_cfunc(rb_thread_t *th, rb_block_t *block,
+ VALUE self, int argc, VALUE *argv)
+{
+ NODE *ifunc = (NODE *) block->iseq;
+ VALUE val;
+ VALUE arg;
+ int lambda = block_proc_is_lambda(block->proc);
+
+ if (lambda) {
+ arg = rb_ary_new4(argc, argv);
+ }
+ else if (argc == 0) {
+ arg = Qnil;
+ }
+ else {
+ arg = argv[0];
+ }
+
+ vm_push_frame(th, 0, FRAME_MAGIC_IFUNC,
+ self, (VALUE)block->dfp,
+ 0, th->cfp->sp, block->lfp, 1);
+
+ val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv);
+
+ th->cfp++;
+ return val;
+}
+
+static inline int
+vm_yield_setup_args(rb_thread_t *th, rb_iseq_t *iseq,
+ int argc, VALUE *argv, rb_block_t *blockptr, int lambda)
+{
+ if (0) { /* for debug */
+ printf(" argc: %d\n", argc);
+ printf("iseq argc: %d\n", iseq->argc);
+ printf("iseq opts: %d\n", iseq->arg_opts);
+ printf("iseq rest: %d\n", iseq->arg_rest);
+ printf("iseq post: %d\n", iseq->arg_post_len);
+ printf("iseq blck: %d\n", iseq->arg_block);
+ printf("iseq smpl: %d\n", iseq->arg_simple);
+ printf(" lambda: %s\n", lambda ? "true" : "false");
+ }
+
+ if (lambda) {
+ /* call as method */
+ return vm_callee_setup_arg(th, iseq, argc, argv, &blockptr);
+ }
+ else {
+ int i;
+ const int m = iseq->argc;
+
+ th->mark_stack_len = argc;
+
+ /*
+ * yield [1, 2]
+ * => {|a|} => a = [1, 2]
+ * => {|a, b|} => a, b = [1, 2]
+ */
+ if (!(iseq->arg_simple & 0x02) &&
+ (m + iseq->arg_post_len) > 0 &&
+ argc == 1 && TYPE(argv[0]) == T_ARRAY) {
+ VALUE ary = argv[0];
+ th->mark_stack_len = argc = RARRAY_LEN(ary);
+
+ CHECK_STACK_OVERFLOW(th->cfp, argc);
+
+ MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc);
+ }
+
+ for (i=argc; i<m; i++) {
+ argv[i] = Qnil;
+ }
+
+ if (iseq->arg_rest == -1) {
+ if (m < argc) {
+ /*
+ * yield 1, 2
+ * => {|a|} # truncate
+ */
+ th->mark_stack_len = argc = m;
+ }
+ }
+ else {
+ int r = iseq->arg_rest;
+
+ if (iseq->arg_post_len) {
+ int len = iseq->arg_post_len;
+ int start = iseq->arg_post_start;
+ int rsize = argc > m ? argc - m : 0;
+ int psize = rsize;
+ VALUE ary;
+
+ if (psize > len) psize = len;
+
+ ary = rb_ary_new4(rsize - psize, &argv[r]);
+
+ if (0) {
+ printf(" argc: %d\n", argc);
+ printf(" len: %d\n", len);
+ printf("start: %d\n", start);
+ printf("rsize: %d\n", rsize);
+ }
+
+ /* copy post argument */
+ MEMMOVE(&argv[start], &argv[r + rsize - psize], VALUE, psize);
+
+ for (i=psize; i<len; i++) {
+ argv[start + i] = Qnil;
+ }
+ argv[r] = ary;
+ }
+ else {
+ if (argc < r) {
+ /* yield 1
+ * => {|a, b, *r|}
+ */
+ for (i=argc; i<r; i++) {
+ argv[i] = Qnil;
+ }
+ argv[r] = rb_ary_new();
+ }
+ else {
+ argv[r] = rb_ary_new4(argc-r, &argv[r]);
+ }
+ }
+
+ th->mark_stack_len = iseq->arg_size;
+ }
+
+ /* {|&b|} */
+ if (iseq->arg_block != -1) {
+ VALUE procval = Qnil;
+
+ if (blockptr) {
+ procval = blockptr->proc;
+ }
+
+ argv[iseq->arg_block] = procval;
+ }
+
+ th->mark_stack_len = 0;
+ return 0;
+ }
+}
+
+static VALUE
+vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t num, rb_num_t flag)
+{
+ VALUE val;
+ rb_block_t *block = GET_BLOCK_PTR();
+ rb_iseq_t *iseq;
+ int argc = num;
+
+ if (GET_ISEQ()->local_iseq->type != ISEQ_TYPE_METHOD || block == 0) {
+ vm_localjump_error("no block given (yield)", Qnil, 0);
+ }
+ iseq = block->iseq;
+
+ argc = caller_setup_args(th, GET_CFP(), flag, argc, 0, 0);
+
+ if (BUILTIN_TYPE(iseq) != T_NODE) {
+ int opt_pc;
+ const int arg_size = iseq->arg_size;
+ VALUE *rsp = GET_SP() - argc;
+ SET_SP(rsp);
+
+ CHECK_STACK_OVERFLOW(GET_CFP(), iseq->stack_max);
+ opt_pc = vm_yield_setup_args(th, iseq, argc, rsp, 0,
+ block_proc_is_lambda(block->proc));
+
+ vm_push_frame(th, iseq,
+ FRAME_MAGIC_BLOCK, block->self, (VALUE) block->dfp,
+ iseq->iseq_encoded + opt_pc, rsp + arg_size, block->lfp,
+ iseq->local_size - arg_size);
+
+ return Qundef;
+ }
+ else {
+ val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc));
+ POPN(argc); /* TODO: should put before C/yield? */
+ return val;
+ }
+}
+
+/* cref */
+
+static NODE *
+lfp_get_special_cref(VALUE *lfp)
+{
+ struct RValues *values;
+ if (((VALUE)(values = (void *)lfp[-1])) != Qnil && values->basic.klass) {
+ return (NODE *)values->basic.klass;
+ }
+ else {
+ return 0;
+ }
+}
+
+static struct RValues *
+new_value(void)
+{
+ struct RValues *val = RVALUES(rb_newobj());
+ OBJSETUP(val, 0, T_VALUES);
+ val->v1 = val->v2 = val->v3 = Qnil;
+ return val;
+}
+
+static struct RValues *
+lfp_svar_place(rb_thread_t *th, VALUE *lfp)
+{
+ struct RValues *svar;
+
+ if (th->local_lfp != lfp) {
+ svar = (struct RValues *)lfp[-1];
+ if ((VALUE)svar == Qnil) {
+ svar = new_value();
+ lfp[-1] = (VALUE)svar;
+ }
+ }
+ else {
+ svar = (struct RValues *)th->local_svar;
+ if ((VALUE)svar == Qnil) {
+ svar = new_value();
+ th->local_svar = (VALUE)svar;
+ }
+ }
+ return svar;
+}
+
+static VALUE
+lfp_svar_get(rb_thread_t *th, VALUE *lfp, VALUE key)
+{
+ struct RValues *svar = lfp_svar_place(th, lfp);
+
+ switch (key) {
+ case 0:
+ return svar->v1;
+ case 1:
+ return svar->v2;
+ case 2:
+ return svar->basic.klass;
+ default: {
+ VALUE hash = svar->v3;
+
+ if (hash == Qnil) {
+ return Qnil;
+ }
+ else {
+ return rb_hash_lookup(hash, key);
+ }
+ }
+ }
+}
+
+static void
+lfp_svar_set(rb_thread_t *th, VALUE *lfp, VALUE key, VALUE val)
+{
+ struct RValues *svar = lfp_svar_place(th, lfp);
+
+ switch (key) {
+ case 0:
+ svar->v1 = val;
+ return;
+ case 1:
+ svar->v2 = val;
+ return;
+ case 2:
+ svar->basic.klass = val;
+ return;
+ default: {
+ VALUE hash = svar->v3;
+
+ if (hash == Qnil) {
+ svar->v3 = hash = rb_hash_new();
+ }
+ rb_hash_aset(hash, key, val);
+ }
+ }
+}
+
+static NODE *
+get_cref(rb_iseq_t *iseq, VALUE *lfp)
+{
+ NODE *cref;
+ if ((cref = lfp_get_special_cref(lfp)) != 0) {
+ /* */
+ }
+ else if ((cref = iseq->cref_stack) != 0) {
+ /* */
+ }
+ else {
+ rb_bug("get_cref: unreachable");
+ }
+ return cref;
+}
+
+static inline VALUE
+vm_getspecial(rb_thread_t *th, VALUE *lfp, VALUE key, rb_num_t type)
+{
+ VALUE val;
+
+ if (type == 0) {
+ if (FIXNUM_P(key)) key = FIX2INT(key);
+ val = lfp_svar_get(th, lfp, key);
+ }
+ else {
+ VALUE backref = lfp_svar_get(th, lfp, 1);
+
+ if (type & 0x01) {
+ switch (type >> 1) {
+ case '&':
+ val = rb_reg_last_match(backref);
+ break;
+ case '`':
+ val = rb_reg_match_pre(backref);
+ break;
+ case '\'':
+ val = rb_reg_match_post(backref);
+ break;
+ case '+':
+ val = rb_reg_match_last(backref);
+ break;
+ default:
+ rb_bug("unexpected back-ref");
+ }
+ }
+ else {
+ val = rb_reg_nth_match(type >> 1, backref);
+ }
+ }
+ return val;
+}
+
+static inline VALUE
+vm_get_ev_const(rb_thread_t *th, rb_iseq_t *iseq,
+ VALUE klass, ID id, int is_defined)
+{
+ VALUE val;
+
+ if (klass == Qnil) {
+ /* in current lexical scope */
+ NODE *root_cref = get_cref(iseq, th->cfp->lfp);
+ NODE *cref = root_cref;
+
+ while (cref && cref->nd_next) {
+ klass = cref->nd_clss;
+ cref = cref->nd_next;
+
+ if (klass == 0) {
+ continue;
+ }
+ if (NIL_P(klass)) {
+ if (is_defined) {
+ /* TODO: check */
+ return 1;
+ }
+ else {
+ klass = CLASS_OF(th->cfp->self);
+ return rb_const_get(klass, id);
+ }
+ }
+ search_continue:
+ if (RCLASS_IV_TBL(klass) &&
+ st_lookup(RCLASS_IV_TBL(klass), id, &val)) {
+ if (val == Qundef) {
+ rb_autoload_load(klass, id);
+ goto search_continue;
+ }
+ else {
+ if (is_defined) {
+ return 1;
+ }
+ else {
+ return val;
+ }
+ }
+ }
+ }
+ klass = root_cref->nd_clss;
+ if (is_defined) {
+ return rb_const_defined(klass, id);
+ }
+ else {
+ return rb_const_get(klass, id);
+ }
+ }
+ else {
+ switch (TYPE(klass)) {
+ case T_CLASS:
+ case T_MODULE:
+ break;
+ default:
+ rb_raise(rb_eTypeError, "%s is not a class/module",
+ RSTRING_PTR(rb_obj_as_string(klass)));
+ }
+ if (is_defined) {
+ return rb_const_defined(klass, id);
+ }
+ else {
+ return rb_const_get(klass, id);
+ }
+ }
+}
+
+static inline VALUE
+vm_get_cvar_base(rb_thread_t *th, rb_iseq_t *iseq)
+{
+ NODE *cref = get_cref(iseq, th->cfp->lfp);
+ VALUE klass = Qnil;
+
+ if (cref) {
+ klass = cref->nd_clss;
+ if (!cref->nd_next) {
+ rb_warn("class variable access from toplevel");
+ }
+ }
+ if (NIL_P(klass)) {
+ rb_raise(rb_eTypeError, "no class variables available");
+ }
+ return klass;
+}
+
+static inline void
+vm_define_method(rb_thread_t *th, VALUE obj,
+ ID id, rb_iseq_t *miseq, rb_num_t is_singleton, NODE *cref)
+{
+ NODE *newbody;
+ int noex = cref->nd_visi;
+ VALUE klass = cref->nd_clss;
+
+ if (is_singleton) {
+ if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
+ rb_raise(rb_eTypeError,
+ "can't define singleton method \"%s\" for %s",
+ rb_id2name(id), rb_obj_classname(obj));
+ }
+
+ if (OBJ_FROZEN(obj)) {
+ rb_error_frozen("object");
+ }
+
+ klass = rb_singleton_class(obj);
+ noex = NOEX_PUBLIC;
+ }
+
+ /* dup */
+ COPY_CREF(miseq->cref_stack, cref);
+ miseq->klass = klass;
+ miseq->defined_method_id = id;
+ newbody = NEW_NODE(RUBY_VM_METHOD_NODE, 0, miseq->self, 0);
+ rb_add_method(klass, id, newbody, noex);
+
+ if (!is_singleton && noex == NOEX_MODFUNC) {
+ rb_add_method(rb_singleton_class(klass), id, newbody, NOEX_PUBLIC);
+ }
+ INC_VM_STATE_VERSION();
+}
+
+static inline NODE *
+vm_method_search(VALUE id, VALUE klass, IC ic)
+{
+ NODE *mn;
+
+#if OPT_INLINE_METHOD_CACHE
+ {
+ if (LIKELY(klass == ic->ic_class) &&
+ LIKELY(GET_VM_STATE_VERSION() == ic->ic_vmstat)) {
+ mn = ic->ic_method;
+ }
+ else {
+ mn = rb_method_node(klass, id);
+ ic->ic_class = klass;
+ ic->ic_method = mn;
+ ic->ic_vmstat = GET_VM_STATE_VERSION();
+ }
+ }
+#else
+ mn = rb_method_node(klass, id);
+#endif
+ return mn;
+}
+
+static inline VALUE
+vm_search_normal_superclass(VALUE klass, VALUE recv)
+{
+ if (BUILTIN_TYPE(klass) == T_CLASS) {
+ klass = RCLASS_SUPER(klass);
+ }
+ else if (BUILTIN_TYPE(klass) == T_MODULE) {
+ VALUE k = CLASS_OF(recv);
+ while (k) {
+ if (BUILTIN_TYPE(k) == T_ICLASS && RBASIC(k)->klass == klass) {
+ klass = RCLASS_SUPER(k);
+ break;
+ }
+ k = RCLASS_SUPER(k);
+ }
+ }
+ return klass;
+}
+
+static void
+vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *ip, VALUE recv, VALUE sigval, ID *idp, VALUE *klassp)
+{
+ ID id;
+ VALUE klass;
+
+ while (ip && !ip->klass) {
+ ip = ip->parent_iseq;
+ }
+
+ if (ip == 0) {
+ rb_raise(rb_eNoMethodError, "super called outside of method");
+ }
+
+ id = ip->defined_method_id;
+
+ if (ip != ip->local_iseq) {
+ /* defined by Module#define_method() */
+ rb_control_frame_t *lcfp = GET_CFP();
+
+ while (lcfp->iseq != ip) {
+ VALUE *tdfp = GET_PREV_DFP(lcfp->dfp);
+ while (1) {
+ lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
+ if (lcfp->dfp == tdfp) {
+ break;
+ }
+ }
+ }
+
+ id = lcfp->method_id;
+ klass = vm_search_normal_superclass(lcfp->method_class, recv);
+
+ if (sigval == Qfalse) {
+ /* zsuper */
+ rb_raise(rb_eRuntimeError, "implicit argument passing of super from method defined by define_method() is not supported. Specify all arguments explicitly.");
+ }
+ }
+ else {
+ klass = vm_search_normal_superclass(ip->klass, recv);
+ }
+
+ *idp = id;
+ *klassp = klass;
+}
+
+static VALUE
+vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj)
+{
+ rb_num_t state = throw_state & 0xff;
+ rb_num_t flag = throw_state & 0x8000;
+ rb_num_t level = throw_state >> 16;
+
+ if (state != 0) {
+ VALUE *pt;
+ int i;
+ if (flag != 0) {
+ if (throw_state & 0x4000) {
+ pt = (void *)1;
+ }
+ else {
+ pt = 0;
+ }
+ }
+ else {
+ if (state == TAG_BREAK) {
+ rb_control_frame_t *cfp = GET_CFP();
+ VALUE *dfp = GET_DFP();
+ int is_orphan = 1;
+ rb_iseq_t *base_iseq = GET_ISEQ();
+
+ search_parent:
+ if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
+ dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
+ base_iseq = base_iseq->parent_iseq;
+
+ while ((VALUE *) cfp < th->stack + th->stack_size) {
+ if (cfp->dfp == dfp) {
+ goto search_parent;
+ }
+ cfp++;
+ }
+ rb_bug("VM (throw): can't find break base.");
+ }
+
+ if (VM_FRAME_TYPE(cfp) == FRAME_MAGIC_LAMBDA) {
+ /* lambda{... break ...} */
+ is_orphan = 0;
+ pt = dfp;
+ }
+ else {
+ dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
+
+ while ((VALUE *)cfp < th->stack + th->stack_size) {
+ if (cfp->dfp == dfp) {
+ VALUE epc = epc = cfp->pc - cfp->iseq->iseq_encoded;
+ rb_iseq_t *iseq = cfp->iseq;
+ int i;
+
+ for (i=0; i<iseq->catch_table_size; i++) {
+ struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
+
+ if (entry->type == CATCH_TYPE_BREAK &&
+ entry->start < epc && entry->end >= epc) {
+ if (entry->cont == epc) {
+ goto found;
+ }
+ else {
+ break;
+ }
+ }
+ }
+ break;
+
+ found:
+ pt = dfp;
+ is_orphan = 0;
+ break;
+ }
+ cfp++;
+ }
+ }
+
+ if (is_orphan) {
+ vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
+ }
+ }
+ else if (state == TAG_RETRY) {
+ pt = GC_GUARDED_PTR_REF((VALUE *) * GET_DFP());
+ for (i = 0; i < level; i++) {
+ pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
+ }
+ }
+ else if (state == TAG_RETURN) {
+ rb_control_frame_t *cfp = GET_CFP();
+ VALUE *dfp = GET_DFP();
+ int is_orphan = 1;
+
+ /**
+ * check orphan:
+ */
+ while ((VALUE *) cfp < th->stack + th->stack_size) {
+ if (GET_DFP() == dfp) {
+ if (VM_FRAME_TYPE(cfp) == FRAME_MAGIC_LAMBDA) {
+ /* in lambda */
+ is_orphan = 0;
+ break;
+ }
+ }
+ if (GET_LFP() == cfp->lfp &&
+ cfp->iseq->type == ISEQ_TYPE_METHOD) {
+ is_orphan = 0;
+ break;
+ }
+ cfp++;
+ }
+
+ if (is_orphan) {
+ vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
+ }
+
+ pt = GET_LFP();
+ }
+ else {
+ rb_bug("isns(throw): unsupport throw type");
+ }
+ }
+ th->state = state;
+ return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
+ }
+ else {
+ /* continue throw */
+ VALUE err = throwobj;
+
+ if (FIXNUM_P(err)) {
+ th->state = FIX2INT(err);
+ }
+ else if (SYMBOL_P(err)) {
+ th->state = TAG_THROW;
+ }
+ else if (BUILTIN_TYPE(err) == T_NODE) {
+ th->state = GET_THROWOBJ_STATE(err);
+ }
+ else {
+ th->state = TAG_RAISE;
+ /*th->state = FIX2INT(rb_ivar_get(err, idThrowState));*/
+ }
+ return err;
+ }
+}
+
+static inline void
+vm_expandarray(rb_control_frame_t *cfp, VALUE ary, int num, int flag)
+{
+ int is_splat = flag & 0x01;
+ int space_size = num + is_splat;
+ VALUE *base = cfp->sp, *ptr;
+ volatile VALUE tmp_ary;
+ int len;
+
+ if (TYPE(ary) != T_ARRAY) {
+ ary = rb_ary_to_ary(ary);
+ }
+
+ cfp->sp += space_size;
+
+ tmp_ary = ary;
+ ptr = RARRAY_PTR(ary);
+ len = RARRAY_LEN(ary);
+
+ if (flag & 0x02) {
+ /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
+ int i = 0, j;
+
+ if (len < num) {
+ for (i=0; i<num-len; i++) {
+ *base++ = Qnil;
+ }
+ }
+ for (j=0; i<num; i++, j++) {
+ VALUE v = ptr[len - j - 1];
+ *base++ = v;
+ }
+ if (is_splat) {
+ *base = rb_ary_new4(len - j, ptr);
+ }
+ }
+ else {
+ /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
+ int i;
+ VALUE *bptr = &base[space_size - 1];
+
+ for (i=0; i<num; i++) {
+ if (len <= i) {
+ for (; i<num; i++) {
+ *bptr-- = Qnil;
+ }
+ break;
+ }
+ *bptr-- = ptr[i];
+ }
+ if (is_splat) {
+ if (num > len) {
+ *bptr = rb_ary_new();
+ }
+ else {
+ *bptr = rb_ary_new4(len - num, ptr + num);
+ }
+ }
+ }
+}
+
+static inline int
+check_cfunc(NODE *mn, void *func)
+{
+ if (mn && nd_type(mn->nd_body) == NODE_CFUNC &&
+ mn->nd_body->nd_cfnc == func) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+static VALUE
+opt_eq_func(VALUE recv, VALUE obj, IC ic)
+{
+ VALUE val = Qundef;
+
+ if (FIXNUM_2_P(recv, obj) &&
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
+ long a = FIX2LONG(recv), b = FIX2LONG(obj);
+
+ if (a == b) {
+ val = Qtrue;
+ }
+ else {
+ val = Qfalse;
+ }
+ }
+ else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
+ if (HEAP_CLASS_OF(recv) == rb_cFloat &&
+ HEAP_CLASS_OF(obj) == rb_cFloat &&
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
+ double a = RFLOAT_VALUE(recv);
+ double b = RFLOAT_VALUE(obj);
+
+ if (isnan(a) || isnan(b)) {
+ val = Qfalse;
+ }
+ else if (a == b) {
+ val = Qtrue;
+ }
+ else {
+ val = Qfalse;
+ }
+ }
+ else if (HEAP_CLASS_OF(recv) == rb_cString &&
+ HEAP_CLASS_OF(obj) == rb_cString &&
+ BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
+ val = rb_str_equal(recv, obj);
+ }
+ else {
+ NODE *mn = vm_method_search(idEq, CLASS_OF(recv), ic);
+ extern VALUE rb_obj_equal(VALUE obj1, VALUE obj2);
+
+ if (check_cfunc(mn, rb_obj_equal)) {
+ return recv == obj ? Qtrue : Qfalse;
+ }
+ }
+ }
+
+ return val;
+}
Property changes on: vm_insnhelper.c
___________________________________________________________________
Name: svn:keywords
+ Author Date Id Revision
Name: svn:eol-style
+ LF
--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml