LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 #include "runtime.h" | 5 #include "runtime.h" |
6 #include "arch_GOARCH.h" | 6 #include "arch_GOARCH.h" |
7 #include "malloc.h" | 7 #include "malloc.h" |
8 #include "hashmap.h" | 8 #include "hashmap.h" |
9 #include "type.h" | 9 #include "type.h" |
10 #include "race.h" | 10 #include "race.h" |
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
252 while(hint > BUCKETSIZE && hint > LOAD * ((uintptr)1 << B)) | 252 while(hint > BUCKETSIZE && hint > LOAD * ((uintptr)1 << B)) |
253 B++; | 253 B++; |
254 | 254 |
255 // allocate initial hash table | 255 // allocate initial hash table |
256 // If hint is large zeroing this memory could take a while. | 256 // If hint is large zeroing this memory could take a while. |
257 if(checkgc) mstats.next_gc = mstats.heap_alloc; | 257 if(checkgc) mstats.next_gc = mstats.heap_alloc; |
258 if(B == 0) { | 258 if(B == 0) { |
259 // done lazily later. | 259 // done lazily later. |
260 buckets = nil; | 260 buckets = nil; |
261 } else { | 261 } else { |
262 » » buckets = runtime·mallocgc(bucketsize << B, 0, 1, 0); | 262 » » buckets = runtime·mallocgc(bucketsize << B, 0, FlagNoZero); |
263 for(i = 0; i < (uintptr)1 << B; i++) { | 263 for(i = 0; i < (uintptr)1 << B; i++) { |
264 b = (Bucket*)(buckets + i * bucketsize); | 264 b = (Bucket*)(buckets + i * bucketsize); |
265 clearbucket(b); | 265 clearbucket(b); |
266 } | 266 } |
267 } | 267 } |
268 | 268 |
269 // initialize Hmap | 269 // initialize Hmap |
270 // Note: we save all these stores to the end so gciter doesn't see | 270 // Note: we save all these stores to the end so gciter doesn't see |
271 // a partially initialized map. | 271 // a partially initialized map. |
272 h->count = 0; | 272 h->count = 0; |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
323 if(b->tophash[i] == 0) | 323 if(b->tophash[i] == 0) |
324 continue; | 324 continue; |
325 hash = h->hash0; | 325 hash = h->hash0; |
326 t->key->alg->hash(&hash, t->key->size, IK(h, k))
; | 326 t->key->alg->hash(&hash, t->key->size, IK(h, k))
; |
327 // NOTE: if key != key, then this hash could be
(and probably will be) | 327 // NOTE: if key != key, then this hash could be
(and probably will be) |
328 // entirely different from the old hash. We eff
ectively only update | 328 // entirely different from the old hash. We eff
ectively only update |
329 // the B'th bit of the hash in this case. | 329 // the B'th bit of the hash in this case. |
330 if((hash & newbit) == 0) { | 330 if((hash & newbit) == 0) { |
331 if(xi == BUCKETSIZE) { | 331 if(xi == BUCKETSIZE) { |
332 if(checkgc) mstats.next_gc = mst
ats.heap_alloc; | 332 if(checkgc) mstats.next_gc = mst
ats.heap_alloc; |
333 » » » » » » newx = runtime·mallocgc(h->bucke
tsize, 0, 1, 0); | 333 » » » » » » newx = runtime·mallocgc(h->bucke
tsize, 0, FlagNoZero); |
334 clearbucket(newx); | 334 clearbucket(newx); |
335 x->overflow = newx; | 335 x->overflow = newx; |
336 x = newx; | 336 x = newx; |
337 xi = 0; | 337 xi = 0; |
338 xk = x->data; | 338 xk = x->data; |
339 xv = xk + h->keysize * BUCKETSIZ
E; | 339 xv = xk + h->keysize * BUCKETSIZ
E; |
340 } | 340 } |
341 x->tophash[xi] = b->tophash[i]; | 341 x->tophash[xi] = b->tophash[i]; |
342 if((h->flags & IndirectKey) != 0) { | 342 if((h->flags & IndirectKey) != 0) { |
343 *(byte**)xk = *(byte**)k;
// copy pointer | 343 *(byte**)xk = *(byte**)k;
// copy pointer |
344 } else { | 344 } else { |
345 t->key->alg->copy(t->key->size,
xk, k); // copy value | 345 t->key->alg->copy(t->key->size,
xk, k); // copy value |
346 } | 346 } |
347 if((h->flags & IndirectValue) != 0) { | 347 if((h->flags & IndirectValue) != 0) { |
348 *(byte**)xv = *(byte**)v; | 348 *(byte**)xv = *(byte**)v; |
349 } else { | 349 } else { |
350 t->elem->alg->copy(t->elem->size
, xv, v); | 350 t->elem->alg->copy(t->elem->size
, xv, v); |
351 } | 351 } |
352 xi++; | 352 xi++; |
353 xk += h->keysize; | 353 xk += h->keysize; |
354 xv += h->valuesize; | 354 xv += h->valuesize; |
355 } else { | 355 } else { |
356 if(yi == BUCKETSIZE) { | 356 if(yi == BUCKETSIZE) { |
357 if(checkgc) mstats.next_gc = mst
ats.heap_alloc; | 357 if(checkgc) mstats.next_gc = mst
ats.heap_alloc; |
358 » » » » » » newy = runtime·mallocgc(h->bucke
tsize, 0, 1, 0); | 358 » » » » » » newy = runtime·mallocgc(h->bucke
tsize, 0, FlagNoZero); |
359 clearbucket(newy); | 359 clearbucket(newy); |
360 y->overflow = newy; | 360 y->overflow = newy; |
361 y = newy; | 361 y = newy; |
362 yi = 0; | 362 yi = 0; |
363 yk = y->data; | 363 yk = y->data; |
364 yv = yk + h->keysize * BUCKETSIZ
E; | 364 yv = yk + h->keysize * BUCKETSIZ
E; |
365 } | 365 } |
366 y->tophash[yi] = b->tophash[i]; | 366 y->tophash[yi] = b->tophash[i]; |
367 if((h->flags & IndirectKey) != 0) { | 367 if((h->flags & IndirectKey) != 0) { |
368 *(byte**)yk = *(byte**)k; | 368 *(byte**)yk = *(byte**)k; |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
444 byte *old_buckets; | 444 byte *old_buckets; |
445 byte *new_buckets; | 445 byte *new_buckets; |
446 uint8 flags; | 446 uint8 flags; |
447 | 447 |
448 // allocate a bigger hash table | 448 // allocate a bigger hash table |
449 if(h->oldbuckets != nil) | 449 if(h->oldbuckets != nil) |
450 runtime·throw("evacuation not done in time"); | 450 runtime·throw("evacuation not done in time"); |
451 old_buckets = h->buckets; | 451 old_buckets = h->buckets; |
452 // NOTE: this could be a big malloc, but since we don't need zeroing it
is probably fast. | 452 // NOTE: this could be a big malloc, but since we don't need zeroing it
is probably fast. |
453 if(checkgc) mstats.next_gc = mstats.heap_alloc; | 453 if(checkgc) mstats.next_gc = mstats.heap_alloc; |
454 » new_buckets = runtime·mallocgc((uintptr)h->bucketsize << (h->B + 1), 0,
1, 0); | 454 » new_buckets = runtime·mallocgc((uintptr)h->bucketsize << (h->B + 1), 0,
FlagNoZero); |
455 flags = (h->flags & ~(Iterator | OldIterator)); | 455 flags = (h->flags & ~(Iterator | OldIterator)); |
456 if((h->flags & Iterator) != 0) { | 456 if((h->flags & Iterator) != 0) { |
457 flags |= OldIterator; | 457 flags |= OldIterator; |
458 // We can't free indirect keys any more, as | 458 // We can't free indirect keys any more, as |
459 // they are potentially aliased across buckets. | 459 // they are potentially aliased across buckets. |
460 flags &= ~CanFreeKey; | 460 flags &= ~CanFreeKey; |
461 } | 461 } |
462 | 462 |
463 // commit the grow (atomic wrt gc) | 463 // commit the grow (atomic wrt gc) |
464 h->B++; | 464 h->B++; |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
590 byte *insertk, *insertv; | 590 byte *insertk, *insertv; |
591 uint8 top; | 591 uint8 top; |
592 byte *k, *v; | 592 byte *k, *v; |
593 byte *kmem, *vmem; | 593 byte *kmem, *vmem; |
594 | 594 |
595 if(docheck) | 595 if(docheck) |
596 check(t, h); | 596 check(t, h); |
597 hash = h->hash0; | 597 hash = h->hash0; |
598 t->key->alg->hash(&hash, t->key->size, key); | 598 t->key->alg->hash(&hash, t->key->size, key); |
599 if(h->buckets == nil) { | 599 if(h->buckets == nil) { |
600 » » h->buckets = runtime·mallocgc(h->bucketsize, 0, 1, 0); | 600 » » h->buckets = runtime·mallocgc(h->bucketsize, 0, FlagNoZero); |
601 b = (Bucket*)(h->buckets); | 601 b = (Bucket*)(h->buckets); |
602 clearbucket(b); | 602 clearbucket(b); |
603 } | 603 } |
604 | 604 |
605 again: | 605 again: |
606 bucket = hash & (((uintptr)1 << h->B) - 1); | 606 bucket = hash & (((uintptr)1 << h->B) - 1); |
607 if(h->oldbuckets != nil) | 607 if(h->oldbuckets != nil) |
608 grow_work(t, h, bucket); | 608 grow_work(t, h, bucket); |
609 b = (Bucket*)(h->buckets + bucket * h->bucketsize); | 609 b = (Bucket*)(h->buckets + bucket * h->bucketsize); |
610 top = hash >> (sizeof(uintptr)*8 - 8); | 610 top = hash >> (sizeof(uintptr)*8 - 8); |
611 if(top == 0) | 611 if(top == 0) |
612 top = 1; | 612 top = 1; |
613 » inserti = 0; | 613 » inserti = nil; |
614 insertk = nil; | 614 insertk = nil; |
615 insertv = nil; | 615 insertv = nil; |
616 while(true) { | 616 while(true) { |
617 for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUC
KETSIZE; i++, k += h->keysize, v += h->valuesize) { | 617 for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUC
KETSIZE; i++, k += h->keysize, v += h->valuesize) { |
618 if(b->tophash[i] != top) { | 618 if(b->tophash[i] != top) { |
619 if(b->tophash[i] == 0 && inserti == nil) { | 619 if(b->tophash[i] == 0 && inserti == nil) { |
620 inserti = &b->tophash[i]; | 620 inserti = &b->tophash[i]; |
621 insertk = k; | 621 insertk = k; |
622 insertv = v; | 622 insertv = v; |
623 } | 623 } |
(...skipping 16 matching lines...) Expand all Loading... |
640 | 640 |
641 // did not find mapping for key. Allocate new cell & add entry. | 641 // did not find mapping for key. Allocate new cell & add entry. |
642 if(h->count >= LOAD * ((uintptr)1 << h->B) && h->count >= BUCKETSIZE) { | 642 if(h->count >= LOAD * ((uintptr)1 << h->B) && h->count >= BUCKETSIZE) { |
643 hash_grow(t, h); | 643 hash_grow(t, h); |
644 goto again; // Growing the table invalidates everything, so try
again | 644 goto again; // Growing the table invalidates everything, so try
again |
645 } | 645 } |
646 | 646 |
647 if(inserti == nil) { | 647 if(inserti == nil) { |
648 // all current buckets are full, allocate a new one. | 648 // all current buckets are full, allocate a new one. |
649 if(checkgc) mstats.next_gc = mstats.heap_alloc; | 649 if(checkgc) mstats.next_gc = mstats.heap_alloc; |
650 » » newb = runtime·mallocgc(h->bucketsize, 0, 1, 0); | 650 » » newb = runtime·mallocgc(h->bucketsize, 0, FlagNoZero); |
651 clearbucket(newb); | 651 clearbucket(newb); |
652 b->overflow = newb; | 652 b->overflow = newb; |
653 inserti = newb->tophash; | 653 inserti = newb->tophash; |
654 insertk = newb->data; | 654 insertk = newb->data; |
655 insertv = insertk + h->keysize * BUCKETSIZE; | 655 insertv = insertk + h->keysize * BUCKETSIZE; |
656 } | 656 } |
657 | 657 |
658 // store new key/value at insert position | 658 // store new key/value at insert position |
659 if((h->flags & IndirectKey) != 0) { | 659 if((h->flags & IndirectKey) != 0) { |
660 if(checkgc) mstats.next_gc = mstats.heap_alloc; | 660 if(checkgc) mstats.next_gc = mstats.heap_alloc; |
661 » » kmem = runtime·mallocgc(t->key->size, 0, 1, 0); | 661 » » kmem = runtime·mallocgc(t->key->size, 0, FlagNoZero); |
662 *(byte**)insertk = kmem; | 662 *(byte**)insertk = kmem; |
663 insertk = kmem; | 663 insertk = kmem; |
664 } | 664 } |
665 if((h->flags & IndirectValue) != 0) { | 665 if((h->flags & IndirectValue) != 0) { |
666 if(checkgc) mstats.next_gc = mstats.heap_alloc; | 666 if(checkgc) mstats.next_gc = mstats.heap_alloc; |
667 » » vmem = runtime·mallocgc(t->elem->size, 0, 1, 0); | 667 » » vmem = runtime·mallocgc(t->elem->size, 0, FlagNoZero); |
668 *(byte**)insertv = vmem; | 668 *(byte**)insertv = vmem; |
669 insertv = vmem; | 669 insertv = vmem; |
670 } | 670 } |
671 t->key->alg->copy(t->key->size, insertk, key); | 671 t->key->alg->copy(t->key->size, insertk, key); |
672 t->elem->alg->copy(t->elem->size, insertv, value); | 672 t->elem->alg->copy(t->elem->size, insertv, value); |
673 *inserti = top; | 673 *inserti = top; |
674 h->count++; | 674 h->count++; |
675 if(docheck) | 675 if(docheck) |
676 check(t, h); | 676 check(t, h); |
677 } | 677 } |
(...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1095 Type *key; | 1095 Type *key; |
1096 | 1096 |
1097 key = typ->key; | 1097 key = typ->key; |
1098 | 1098 |
1099 if(hint < 0 || (int32)hint != hint) | 1099 if(hint < 0 || (int32)hint != hint) |
1100 runtime·panicstring("makemap: size out of range"); | 1100 runtime·panicstring("makemap: size out of range"); |
1101 | 1101 |
1102 if(key->alg->hash == runtime·nohash) | 1102 if(key->alg->hash == runtime·nohash) |
1103 runtime·throw("runtime.makemap: unsupported map key type"); | 1103 runtime·throw("runtime.makemap: unsupported map key type"); |
1104 | 1104 |
1105 » m->locks++; // disable preemption to not expose inconsistent Hmap to GC | 1105 » h = runtime·mallocgc(sizeof(*h), (uintptr)typ | TypeInfo_Map, 0); |
1106 » h = runtime·mal(sizeof(*h)); | |
1107 » if(UseSpanType) { | |
1108 » » if(false) | |
1109 » » » runtime·printf("makemap %S: %p\n", *typ->string, h); | |
1110 » » runtime·settype(h, (uintptr)typ | TypeInfo_Map); | |
1111 » } | |
1112 hash_init(typ, h, hint); | 1106 hash_init(typ, h, hint); |
1113 m->locks--; | |
1114 | 1107 |
1115 // these calculations are compiler dependent. | 1108 // these calculations are compiler dependent. |
1116 // figure out offsets of map call arguments. | 1109 // figure out offsets of map call arguments. |
1117 | 1110 |
1118 if(debug) { | 1111 if(debug) { |
1119 runtime·printf("makemap: map=%p; keysize=%p; valsize=%p; keyalg=
%p; valalg=%p\n", | 1112 runtime·printf("makemap: map=%p; keysize=%p; valsize=%p; keyalg=
%p; valalg=%p\n", |
1120 h, key->size, typ->elem->size, key->alg, typ->ele
m->alg); | 1113 h, key->size, typ->elem->size, key->alg, typ->ele
m->alg); |
1121 } | 1114 } |
1122 | 1115 |
1123 return h; | 1116 return h; |
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1477 // the actual data if it fits, or else a pointer to the data. | 1470 // the actual data if it fits, or else a pointer to the data. |
1478 void | 1471 void |
1479 reflect·mapiterkey(struct hash_iter *it, uintptr key, bool ok) | 1472 reflect·mapiterkey(struct hash_iter *it, uintptr key, bool ok) |
1480 { | 1473 { |
1481 byte *res; | 1474 byte *res; |
1482 Type *tkey; | 1475 Type *tkey; |
1483 | 1476 |
1484 key = 0; | 1477 key = 0; |
1485 ok = false; | 1478 ok = false; |
1486 res = it->key; | 1479 res = it->key; |
1487 » if(res == nil) { | 1480 » if(res != nil) { |
1488 » » key = 0; | |
1489 » » ok = false; | |
1490 » } else { | |
1491 tkey = it->t->key; | 1481 tkey = it->t->key; |
1492 key = 0; | |
1493 if(tkey->size <= sizeof(key)) | 1482 if(tkey->size <= sizeof(key)) |
1494 tkey->alg->copy(tkey->size, (byte*)&key, res); | 1483 tkey->alg->copy(tkey->size, (byte*)&key, res); |
1495 else | 1484 else |
1496 key = (uintptr)res; | 1485 key = (uintptr)res; |
1497 ok = true; | 1486 ok = true; |
1498 } | 1487 } |
1499 FLUSH(&key); | 1488 FLUSH(&key); |
1500 FLUSH(&ok); | 1489 FLUSH(&ok); |
1501 } | 1490 } |
1502 | 1491 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1536 t->elem->alg->copy(t->elem->size, av, it->value); | 1525 t->elem->alg->copy(t->elem->size, av, it->value); |
1537 | 1526 |
1538 if(debug) { | 1527 if(debug) { |
1539 runtime·prints("mapiter2: iter="); | 1528 runtime·prints("mapiter2: iter="); |
1540 runtime·printpointer(it); | 1529 runtime·printpointer(it); |
1541 runtime·prints("; map="); | 1530 runtime·prints("; map="); |
1542 runtime·printpointer(it->h); | 1531 runtime·printpointer(it->h); |
1543 runtime·prints("\n"); | 1532 runtime·prints("\n"); |
1544 } | 1533 } |
1545 } | 1534 } |
LEFT | RIGHT |