[前][次][番号順一覧][スレッド一覧]

ruby-changes:25543

From: usa <ko1@a...>
Date: Sat, 10 Nov 2012 00:42:41 +0900 (JST)
Subject: [ruby-changes:25543] usa:r37600 (ruby_1_9_3): merge revision(s) 37585,37587,37591,37592,37597,37599:

usa	2012-11-10 00:38:38 +0900 (Sat, 10 Nov 2012)

  New Revision: 37600

  http://svn.ruby-lang.org/cgi-bin/viewvc.cgi?view=rev&revision=37600

  Log:
    merge revision(s) 37585,37587,37591,37592,37597,37599:
    
    * random.c (rb_memhash): use siphash.
    
    * siphash.c (sip_init_state): use union to suppress warnings by gcc
      4.7.
    
    * siphash.h: include inttypes.h only when HAVE_INTTYPES_H is defined.
    
    * siphash.h: check configure macros before include newer headers.

  Added files:
    branches/ruby_1_9_3/siphash.c
    branches/ruby_1_9_3/siphash.h
  Modified directories:
    branches/ruby_1_9_3/
  Modified files:
    branches/ruby_1_9_3/ChangeLog
    branches/ruby_1_9_3/common.mk
    branches/ruby_1_9_3/random.c
    branches/ruby_1_9_3/string.c
    branches/ruby_1_9_3/version.h

Index: ruby_1_9_3/ChangeLog
===================================================================
--- ruby_1_9_3/ChangeLog	(revision 37599)
+++ ruby_1_9_3/ChangeLog	(revision 37600)
@@ -1,3 +1,16 @@
+Sat Nov 10 00:37:02 2012  NAKAMURA Usaku  <usa@r...>
+
+	* siphash.h: check configure macros before include newer headers.
+
+Sat Nov 10 00:37:02 2012  Nobuyoshi Nakada  <nobu@r...>
+
+	* siphash.c (sip_init_state): use union to suppress warnings by gcc
+	  4.7.
+
+Sat Nov 10 00:37:02 2012  Nobuyoshi Nakada  <nobu@r...>
+
+	* random.c (rb_memhash): use siphash.
+
 Fri Nov  9 16:17:09 2012  Nobuyoshi Nakada  <nobu@r...>
 
 	* file.c (append_fspath): revert a part of r37562.
Index: ruby_1_9_3/string.c
===================================================================
--- ruby_1_9_3/string.c	(revision 37599)
+++ ruby_1_9_3/string.c	(revision 37600)
@@ -2158,12 +2158,6 @@
 }
 
 st_index_t
-rb_memhash(const void *ptr, long len)
-{
-    return st_hash(ptr, len, rb_hash_start((st_index_t)len));
-}
-
-st_index_t
 rb_str_hash(VALUE str)
 {
     int e = ENCODING_GET(str);
Index: ruby_1_9_3/common.mk
===================================================================
--- ruby_1_9_3/common.mk	(revision 37599)
+++ ruby_1_9_3/common.mk	(revision 37600)
@@ -671,7 +671,8 @@
 process.$(OBJEXT): {$(VPATH)}process.c $(RUBY_H_INCLUDES) \
   {$(VPATH)}util.h {$(VPATH)}io.h $(ENCODING_H_INCLUDES) {$(VPATH)}dln.h \
   $(VM_CORE_H_INCLUDES) {$(VPATH)}debug.h {$(VPATH)}internal.h
-random.$(OBJEXT): {$(VPATH)}random.c $(RUBY_H_INCLUDES)
+random.$(OBJEXT): {$(VPATH)}random.c $(RUBY_H_INCLUDES) \
+  {$(VPATH)}siphash.c {$(VPATH)}siphash.h
 range.$(OBJEXT): {$(VPATH)}range.c $(RUBY_H_INCLUDES) \
   $(ENCODING_H_INCLUDES) {$(VPATH)}internal.h
 rational.$(OBJEXT): {$(VPATH)}rational.c $(RUBY_H_INCLUDES) {$(VPATH)}internal.h
Index: ruby_1_9_3/version.h
===================================================================
--- ruby_1_9_3/version.h	(revision 37599)
+++ ruby_1_9_3/version.h	(revision 37600)
@@ -1,5 +1,5 @@
 #define RUBY_VERSION "1.9.3"
-#define RUBY_PATCHLEVEL 325
+#define RUBY_PATCHLEVEL 326
 
 #define RUBY_RELEASE_DATE "2012-11-10"
 #define RUBY_RELEASE_YEAR 2012
Index: ruby_1_9_3/siphash.c
===================================================================
--- ruby_1_9_3/siphash.c	(revision 0)
+++ ruby_1_9_3/siphash.c	(revision 37600)
@@ -0,0 +1,483 @@
+#include <string.h>
+#include <stdio.h>
+#include "siphash.h"
+#ifndef SIP_HASH_STREAMING
+  #define SIP_HASH_STREAMING 1
+#endif
+
+#ifdef _WIN32
+  #define BYTE_ORDER __LITTLE_ENDIAN
+#elif !defined BYTE_ORDER
+  #include <endian.h>
+#endif
+#ifndef LITTLE_ENDIAN
+#define LITTLE_ENDIAN __LITTLE_ENDIAN
+#endif
+#ifndef BIG_ENDIAN
+#define BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+  #define lo u32[0]
+  #define hi u32[1]
+#elif BYTE_ORDER == BIG_ENDIAN
+  #define hi u32[0]
+  #define lo u32[1]
+#else
+  #error "Only strictly little or big endian supported"
+#endif
+
+#ifndef UNALIGNED_WORD_ACCESS
+# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
+     defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD86) || \
+     defined(__mc68020__)
+#   define UNALIGNED_WORD_ACCESS 1
+# endif
+#endif
+#ifndef UNALIGNED_WORD_ACCESS
+# define UNALIGNED_WORD_ACCESS 0
+#endif
+
+#define U8TO32_LE(p)         						\
+    (((uint32_t)((p)[0])       ) | ((uint32_t)((p)[1]) <<  8) |  	\
+     ((uint32_t)((p)[2]) <<  16) | ((uint32_t)((p)[3]) << 24))		\
+
+#define U32TO8_LE(p, v)			\
+do {					\
+    (p)[0] = (uint8_t)((v)      );	\
+    (p)[1] = (uint8_t)((v) >>  8); 	\
+    (p)[2] = (uint8_t)((v) >> 16);	\
+    (p)[3] = (uint8_t)((v) >> 24);	\
+} while (0)
+
+#ifdef HAVE_UINT64_T
+#define U8TO64_LE(p) 							\
+    ((uint64_t)U8TO32_LE(p) | ((uint64_t)U8TO32_LE((p) + 4)) << 32 )
+
+#define U64TO8_LE(p, v) \
+do {						\
+    U32TO8_LE((p),     (uint32_t)((v)      )); 	\
+    U32TO8_LE((p) + 4, (uint32_t)((v) >> 32));	\
+} while (0)
+
+#define ROTL64(v, s)			\
+    ((v) << (s)) | ((v) >> (64 - (s)))
+
+#define ROTL64_TO(v, s) ((v) = ROTL64((v), (s)))
+
+#define ADD64_TO(v, s) ((v) += (s))
+#define XOR64_TO(v, s) ((v) ^= (s))
+#define XOR64_INT(v, x) ((v) ^= (x))
+#else
+#define U8TO64_LE(p) u8to64_le(p)
+static inline uint64_t
+u8to64_le(const uint8_t *p)
+{
+    uint64_t ret;
+    ret.lo = U8TO32_LE(p);
+    ret.hi = U8TO32_LE(p + 4);
+    return ret;
+}
+
+#define U64TO8_LE(p, v) u64to8_le(p, v)
+static inline void
+u64to8_le(uint8_t *p, uint64_t v)
+{
+    U32TO8_LE(p,     v.lo);
+    U32TO8_LE(p + 4, v.hi);
+}
+
+#define ROTL64_TO(v, s) ((s) > 32 ? rotl64_swap(rotl64_to(&(v), (s) - 32)) : \
+			 (s) == 32 ? rotl64_swap(&(v)) : rotl64_to(&(v), (s)))
+static inline uint64_t *
+rotl64_to(uint64_t *v, unsigned int s)
+{
+    uint32_t uhi = (v->hi << s) | (v->lo >> (32 - s));
+    uint32_t ulo = (v->lo << s) | (v->hi >> (32 - s));
+    v->hi = uhi;
+    v->lo = ulo;
+    return v;
+}
+
+static inline uint64_t *
+rotl64_swap(uint64_t *v)
+{
+    uint32_t t = v->lo;
+    v->lo = v->hi;
+    v->hi = t;
+    return v;
+}
+
+#define ADD64_TO(v, s) add64_to(&(v), (s))
+static inline uint64_t *
+add64_to(uint64_t *v, const uint64_t s)
+{
+    v->lo += s.lo;
+    v->hi += s.hi;
+    if (v->lo < s.lo) v->hi++;
+    return v;
+}
+
+#define XOR64_TO(v, s) xor64_to(&(v), (s))
+static inline uint64_t *
+xor64_to(uint64_t *v, const uint64_t s)
+{
+    v->lo ^= s.lo;
+    v->hi ^= s.hi;
+    return v;
+}
+
+#define XOR64_INT(v, x) ((v).lo ^= (x))
+#endif
+
+static const union {
+    char bin[32];
+    uint64_t u64[4];
+} sip_init_state_bin = {"uespemos""modnarod""arenegyl""setybdet"};
+#define sip_init_state sip_init_state_bin.u64
+
+#if SIP_HASH_STREAMING
+struct sip_interface_st {
+    void (*init)(sip_state *s, const uint8_t *key);
+    void (*update)(sip_state *s, const uint8_t *data, size_t len);
+    void (*final)(sip_state *s, uint64_t *digest);
+};
+
+static void int_sip_init(sip_state *state, const uint8_t *key);
+static void int_sip_update(sip_state *state, const uint8_t *data, size_t len);
+static void int_sip_final(sip_state *state, uint64_t *digest);
+
+static const sip_interface sip_methods = {
+    int_sip_init,
+    int_sip_update,
+    int_sip_final
+};
+#endif /* SIP_HASH_STREAMING */
+
+#define SIP_COMPRESS(v0, v1, v2, v3)	\
+do {					\
+    ADD64_TO((v0), (v1));		\
+    ADD64_TO((v2), (v3));		\
+    ROTL64_TO((v1), 13);		\
+    ROTL64_TO((v3), 16);		\
+    XOR64_TO((v1), (v0));		\
+    XOR64_TO((v3), (v2));		\
+    ROTL64_TO((v0), 32);		\
+    ADD64_TO((v2), (v1));		\
+    ADD64_TO((v0), (v3));		\
+    ROTL64_TO((v1), 17);		\
+    ROTL64_TO((v3), 21);		\
+    XOR64_TO((v1), (v2));		\
+    XOR64_TO((v3), (v0));		\
+    ROTL64_TO((v2), 32);		\
+} while(0)
+
+#if SIP_HASH_STREAMING
+static void
+int_sip_dump(sip_state *state)
+{
+    int v;
+
+    for (v = 0; v < 4; v++) {
+#if HAVE_UINT64_T
+	printf("v%d: %" PRIx64 "\n", v, state->v[v]);
+#else
+	printf("v%d: %" PRIx32 "%.8" PRIx32 "\n", v, state->v[v].hi, state->v[v].lo);
+#endif
+    }
+}
+
+static void
+int_sip_init(sip_state *state, const uint8_t key[16])
+{
+    uint64_t k0, k1;
+
+    k0 = U8TO64_LE(key);
+    k1 = U8TO64_LE(key + sizeof(uint64_t));
+
+    state->v[0] = k0; XOR64_TO(state->v[0], sip_init_state[0]);
+    state->v[1] = k1; XOR64_TO(state->v[1], sip_init_state[1]);
+    state->v[2] = k0; XOR64_TO(state->v[2], sip_init_state[2]);
+    state->v[3] = k1; XOR64_TO(state->v[3], sip_init_state[3]);
+}
+
+static inline void
+int_sip_round(sip_state *state, int n)
+{
+    int i;
+
+    for (i = 0; i < n; i++) {
+	SIP_COMPRESS(state->v[0], state->v[1], state->v[2], state->v[3]);
+    }
+}
+
+static inline void
+int_sip_update_block(sip_state *state, uint64_t m)
+{
+    XOR64_TO(state->v[3], m);
+    int_sip_round(state, state->c);
+    XOR64_TO(state->v[0], m);
+}
+
+static inline void
+int_sip_pre_update(sip_state *state, const uint8_t **pdata, size_t *plen)
+{
+    int to_read;
+    uint64_t m;
+
+    if (!state->buflen) return;
+
+    to_read = sizeof(uint64_t) - state->buflen;
+    memcpy(state->buf + state->buflen, *pdata, to_read);
+    m = U8TO64_LE(state->buf);
+    int_sip_update_block(state, m);
+    *pdata += to_read;
+    *plen -= to_read;
+    state->buflen = 0;
+}
+
+static inline void
+int_sip_post_update(sip_state *state, const uint8_t *data, size_t len)
+{
+    uint8_t r = len % sizeof(uint64_t);
+    if (r) {
+	memcpy(state->buf, data + len - r, r);
+	state->buflen = r;
+    }
+}
+
+static void
+int_sip_update(sip_state *state, const uint8_t *data, size_t len)
+{
+    uint64_t *end;
+    uint64_t *data64;
+
+    state->msglen_byte = state->msglen_byte + (len % 256);
+    data64 = (uint64_t *) data;
+
+    int_sip_pre_update(state, &data, &len);
+
+    end = data64 + (len / sizeof(uint64_t));
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+    while (data64 != end) {
+	int_sip_update_block(state, *data64++);
+    }
+#elif BYTE_ORDER == BIG_ENDIAN
+    {
+	uint64_t m;
+	uint8_t *data8 = data;
+	for (; data8 != (uint8_t *) end; data8 += sizeof(uint64_t)) {
+	    m = U8TO64_LE(data8);
+	    int_sip_update_block(state, m);
+	}
+    }
+#endif
+
+    int_sip_post_update(state, data, len);
+}
+
+static inline void
+int_sip_pad_final_block(sip_state *state)
+{
+    int i;
+    /* pad with 0's and finalize with msg_len mod 256 */
+    for (i = state->buflen; i < sizeof(uint64_t); i++) {
+	state->buf[i] = 0x00;
+    }
+    state->buf[sizeof(uint64_t) - 1] = state->msglen_byte;
+}
+
+static void
+int_sip_final(sip_state *state, uint64_t *digest)
+{
+    uint64_t m;
+
+    int_sip_pad_final_block(state);
+
+    m = U8TO64_LE(state->buf);
+    int_sip_update_block(state, m);
+
+    XOR64_INT(state->v[2], 0xff);
+
+    int_sip_round(state, state->d);
+
+    *digest = state->v[0];
+    XOR64_TO(*digest, state->v[1]);
+    XOR64_TO(*digest, state->v[2]);
+    XOR64_TO(*digest, state->v[3]);
+}
+
+sip_hash *
+sip_hash_new(const uint8_t key[16], int c, int d)
+{
+    sip_hash *h = NULL;
+
+    if (!(h = (sip_hash *) malloc(sizeof(sip_hash)))) return NULL;
+    return sip_hash_init(h, key, c, d);
+}
+
+sip_hash *
+sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d)
+{
+    h->state->c = c;
+    h->state->d = d;
+    h->state->buflen = 0;
+    h->state->msglen_byte = 0;
+    h->methods = &sip_methods;
+    h->methods->init(h->state, key);
+    return h;
+}
+
+int
+sip_hash_update(sip_hash *h, const uint8_t *msg, size_t len)
+{
+    h->methods->update(h->state, msg, len);
+    return 1;
+}
+
+int
+sip_hash_final(sip_hash *h, uint8_t **digest, size_t* len)
+{
+    uint64_t digest64;
+    uint8_t *ret;
+
+    h->methods->final(h->state, &digest64);
+    if (!(ret = (uint8_t *)malloc(sizeof(uint64_t)))) return 0;
+    U64TO8_LE(ret, digest64);
+    *len = sizeof(uint64_t);
+    *digest = ret;
+
+    return 1;
+}
+
+int
+sip_hash_final_integer(sip_hash *h, uint64_t *digest)
+{
+    h->methods->final(h->state, digest);
+    return 1;
+}
+
+int
+sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len)
+{
+    if (!sip_hash_update(h, data, data_len)) return 0;
+    return sip_hash_final(h, digest, digest_len);
+}
+
+int
+sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest)
+{
+    if (!sip_hash_update(h, data, data_len)) return 0;
+    return sip_hash_final_integer(h, digest);
+}
+
+void
+sip_hash_free(sip_hash *h)
+{
+    free(h);
+}
+
+void
+sip_hash_dump(sip_hash *h)
+{
+    int_sip_dump(h->state);
+}
+#endif /* SIP_HASH_STREAMING */
+
+#define SIP_2_ROUND(m, v0, v1, v2, v3)	\
+do {					\
+    XOR64_TO((v3), (m));		\
+    SIP_COMPRESS(v0, v1, v2, v3);	\
+    SIP_COMPRESS(v0, v1, v2, v3);	\
+    XOR64_TO((v0), (m));		\
+} while (0)
+
+uint64_t
+sip_hash24(const uint8_t key[16], const uint8_t *data, size_t len)
+{
+    uint64_t k0, k1;
+    uint64_t v0, v1, v2, v3;
+    uint64_t m, last;
+    const uint8_t *end = data + len - (len % sizeof(uint64_t));
+
+    k0 = U8TO64_LE(key);
+    k1 = U8TO64_LE(key + sizeof(uint64_t));
+
+    v0 = k0; XOR64_TO(v0, sip_init_state[0]);
+    v1 = k1; XOR64_TO(v1, sip_init_state[1]);
+    v2 = k0; XOR64_TO(v2, sip_init_state[2]);
+    v3 = k1; XOR64_TO(v3, sip_init_state[3]);
+
+#if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
+    {
+        uint64_t *data64 = (uint64_t *)data;
+        while (data64 != (uint64_t *) end) {
+	    m = *data64++;
+	    SIP_2_ROUND(m, v0, v1, v2, v3);
+        }
+    }
+#elif BYTE_ORDER == BIG_ENDIAN
+    for (; data != end; data += sizeof(uint64_t)) {
+	m = U8TO64_LE(data);
+	SIP_2_ROUND(m, v0, v1, v2, v3);
+    }
+#endif
+
+#ifdef HAVE_UINT64_T
+    last = (uint64_t)len << 56;
+#define OR_BYTE(n) (last |= ((uint64_t) end[n]) << ((n) * 8))
+#else
+    last.hi = len << 24;
+    last.lo = 0;
+#define OR_BYTE(n) do { \
+	if (n >= 4) \
+	    last.hi |= ((uint32_t) end[n]) << ((n) >= 4 ? (n) * 8 - 32 : 0); \
+	else \
+	    last.lo |= ((uint32_t) end[n]) << ((n) >= 4 ? 0 : (n) * 8); \
+    } while (0)
+#endif
+
+    switch (len % sizeof(uint64_t)) {
+	case 7:
+	    OR_BYTE(6);
+	case 6:
+	    OR_BYTE(5);
+	case 5:
+	    OR_BYTE(4);
+	case 4:
+#if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
+  #if HAVE_UINT64_T
+	    last |= (uint64_t) ((uint32_t *) end)[0];
+  #else
+	    last.lo |= ((uint32_t *) end)[0];
+  #endif
+	    break;
+#elif BYTE_ORDER == BIG_ENDIAN
+	    OR_BYTE(3);
+#endif
+	case 3:
+	    OR_BYTE(2);
+	case 2:
+	    OR_BYTE(1);
+	case 1:
+	    OR_BYTE(0);
+	    break;
+	case 0:
+	    break;
+    }
+
+    SIP_2_ROUND(last, v0, v1, v2, v3);
+
+    XOR64_INT(v2, 0xff);
+
+    SIP_COMPRESS(v0, v1, v2, v3);
+    SIP_COMPRESS(v0, v1, v2, v3);
+    SIP_COMPRESS(v0, v1, v2, v3);
+    SIP_COMPRESS(v0, v1, v2, v3);
+
+    XOR64_TO(v0, v1);
+    XOR64_TO(v0, v2);
+    XOR64_TO(v0, v3);
+    return v0;
+}

Property changes on: ruby_1_9_3/siphash.c
___________________________________________________________________
Added: svn:eol-style
   + LF

Index: ruby_1_9_3/siphash.h
===================================================================
--- ruby_1_9_3/siphash.h	(revision 0)
+++ ruby_1_9_3/siphash.h	(revision 37600)
@@ -0,0 +1,48 @@
+#ifndef SIPHASH_H
+#define SIPHASH_H 1
+#include <stdlib.h>
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+#ifndef HAVE_UINT64_T
+typedef struct {
+    uint32_t u32[2];
+} sip_uint64_t;
+#define uint64_t sip_uint64_t
+#else
+typedef uint64_t sip_uint64_t;
+#endif
+
+typedef struct {
+    int c;
+    int d;
+    uint64_t v[4];
+    uint8_t buf[sizeof(uint64_t)];
+    uint8_t buflen;
+    uint8_t msglen_byte;
+} sip_state;
+
+typedef struct sip_interface_st sip_interface;
+
+typedef struct {
+    sip_state state[1];
+    const sip_interface *methods;
+} sip_hash;
+
+sip_hash *sip_hash_new(const uint8_t key[16], int c, int d);
+sip_hash *sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d);
+int sip_hash_update(sip_hash *h, const uint8_t *data, size_t len);
+int sip_hash_final(sip_hash *h, uint8_t **digest, size_t *len);
+int sip_hash_final_integer(sip_hash *h, uint64_t *digest);
+int sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len);
+int sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest);
+void sip_hash_free(sip_hash *h);
+void sip_hash_dump(sip_hash *h);
+
+uint64_t sip_hash24(const uint8_t key[16], const uint8_t *data, size_t len);
+
+#endif

Property changes on: ruby_1_9_3/siphash.h
___________________________________________________________________
Added: svn:eol-style
   + LF

Index: ruby_1_9_3/random.c
===================================================================
--- ruby_1_9_3/random.c	(revision 37599)
+++ ruby_1_9_3/random.c	(revision 37600)
@@ -1259,7 +1259,15 @@
     return random_rand(argc, argv, rb_Random_DEFAULT);
 }
 
+#define SIP_HASH_STREAMING 0
+#define sip_hash24 ruby_sip_hash24
+#include "siphash.c"
+
 static st_index_t hashseed;
+static union {
+    uint8_t key[16];
+    uint32_t u32[(16 * sizeof(uint8_t) - 1) / sizeof(uint32_t)];
+} sipseed;
 
 static VALUE
 init_randomseed(struct MT *mt, unsigned int initial[DEFAULT_SEED_CNT])
@@ -1279,6 +1287,7 @@
     unsigned int initial[DEFAULT_SEED_CNT];
     struct MT *mt = &r->mt;
     VALUE seed = init_randomseed(mt, initial);
+    int i;
 
     hashseed = genrand_int32(mt);
 #if SIZEOF_ST_INDEX_T*CHAR_BIT > 4*8
@@ -1294,6 +1303,9 @@
     hashseed |= genrand_int32(mt);
 #endif
 
+    for (i = 0; i < numberof(sipseed.u32); ++i)
+	sipseed.u32[i] = genrand_int32(mt);
+
     rb_global_variable(&r->seed);
     r->seed = seed;
 }
@@ -1304,6 +1316,17 @@
     return st_hash_start(hashseed + h);
 }
 
+st_index_t
+rb_memhash(const void *ptr, long len)
+{
+    sip_uint64_t h = sip_hash24(sipseed.key, ptr, len);
+#ifdef HAVE_UINT64_T
+    return (st_index_t)h;
+#else
+    return (st_index_t)(h.u32[0] ^ h.u32[1]);
+#endif
+}
+
 static void
 Init_RandomSeed2(void)
 {

Property changes on: ruby_1_9_3
___________________________________________________________________
Modified: svn:mergeinfo
   Merged /trunk:r37585,37587,37591-37592,37597,37599


--
ML: ruby-changes@q...
Info: http://www.atdot.net/~ko1/quickml/

[前][次][番号順一覧][スレッド一覧]