Left: | ||
Right: |
LEFT | RIGHT |
---|---|
1 // Copyright 2014 The Go Authors. All rights reserved. | 1 // Copyright 2014 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 package server | 5 package server |
6 | 6 |
7 import ( | 7 import ( |
8 "bytes" | 8 "bytes" |
9 "fmt" | 9 "fmt" |
10 "math" | 10 "math" |
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
302 } | 302 } |
303 addr = uintptr(p.arch.Uintptr(p.tmp[:p.arch.PointerSize])) | 303 addr = uintptr(p.arch.Uintptr(p.tmp[:p.arch.PointerSize])) |
304 // Now read the struct. | 304 // Now read the struct. |
305 if !p.peek(addr, structType.ByteSize) { | 305 if !p.peek(addr, structType.ByteSize) { |
306 return | 306 return |
307 } | 307 } |
308 // From runtime/hashmap.*; We need to walk the map data structure. | 308 // From runtime/hashmap.*; We need to walk the map data structure. |
309 // Load the struct, then iterate over the buckets. | 309 // Load the struct, then iterate over the buckets. |
310 // uintgo count (occupancy). | 310 // uintgo count (occupancy). |
311 offset := int(structType.Field[0].ByteOffset) | 311 offset := int(structType.Field[0].ByteOffset) |
312 » count := int(p.arch.Uint(p.tmp[offset : offset+p.arch.PointerSize])) | 312 » count := int(p.arch.Uint(p.tmp[offset : offset+p.arch.IntSize])) |
nigeltao
2014/07/16 05:19:58
I think that sizeof(uintgo) isn't necessarily size
| |
313 // uint8 Log2 of number of buckets. | 313 // uint8 Log2 of number of buckets. |
314 b := uint(p.tmp[structType.Field[3].ByteOffset]) | 314 b := uint(p.tmp[structType.Field[3].ByteOffset]) |
315 // uint8 key size in bytes. | 315 // uint8 key size in bytes. |
316 keySize := uintptr(p.tmp[structType.Field[4].ByteOffset]) | 316 keySize := uintptr(p.tmp[structType.Field[4].ByteOffset]) |
317 // uint8 element size in bytes. | 317 // uint8 element size in bytes. |
318 elemSize := uintptr(p.tmp[structType.Field[5].ByteOffset]) | 318 elemSize := uintptr(p.tmp[structType.Field[5].ByteOffset]) |
319 // uint16 bucket size in bytes. | 319 // uint16 bucket size in bytes. |
320 » bucketSize := uintptr(p.tmp[structType.Field[6].ByteOffset]) | 320 » bucketSize := uintptr(p.arch.Uint16(p.tmp[structType.Field[6].ByteOffset :])) |
nigeltao
2014/07/16 05:19:58
p.arch.Uint16(p.tmp[etc])
| |
321 // pointer to buckets | 321 // pointer to buckets |
322 offset = int(structType.Field[7].ByteOffset) | 322 offset = int(structType.Field[7].ByteOffset) |
323 bucketPtr := uintptr(p.arch.Uintptr(p.tmp[offset : offset+p.arch.Pointer Size])) | 323 bucketPtr := uintptr(p.arch.Uintptr(p.tmp[offset : offset+p.arch.Pointer Size])) |
324 // pointer to old buckets. | 324 // pointer to old buckets. |
325 offset = int(structType.Field[8].ByteOffset) | 325 offset = int(structType.Field[8].ByteOffset) |
326 oldBucketPtr := uintptr(p.arch.Uintptr(p.tmp[offset : offset+p.arch.Poin terSize])) | 326 oldBucketPtr := uintptr(p.arch.Uintptr(p.tmp[offset : offset+p.arch.Poin terSize])) |
327 // Ready to print. | 327 // Ready to print. |
328 p.printf("%s{", typ) | 328 p.printf("%s{", typ) |
329 desc := mapDesc{ | 329 desc := mapDesc{ |
330 typ: typ, | 330 typ: typ, |
(...skipping 25 matching lines...) Expand all Loading... | |
356 if !p.ok(p.peeker.peek(addr, desc.buf)) { | 356 if !p.ok(p.peeker.peek(addr, desc.buf)) { |
357 return | 357 return |
358 } | 358 } |
359 // TODO: We have loaded the data but printValueAt loads from rem ote addresses | 359 // TODO: We have loaded the data but printValueAt loads from rem ote addresses |
360 // so we need to keep track of addresses and read memory twice. It would | 360 // so we need to keep track of addresses and read memory twice. It would |
361 // be nice to avoid this overhead. | 361 // be nice to avoid this overhead. |
362 keyAddr := addr + bucketCnt + uintptr(p.arch.PointerSize) | 362 keyAddr := addr + bucketCnt + uintptr(p.arch.PointerSize) |
363 elemAddr := keyAddr + bucketCnt*desc.keySize | 363 elemAddr := keyAddr + bucketCnt*desc.keySize |
364 addr += desc.bucketSize // Advance to next bucket; buf, keyAddr and elemAddr are all we need now. | 364 addr += desc.bucketSize // Advance to next bucket; buf, keyAddr and elemAddr are all we need now. |
365 // tophash uint8 [bucketCnt] tells us which buckets are occupied . | 365 // tophash uint8 [bucketCnt] tells us which buckets are occupied . |
366 » » tophash := desc.buf[0:bucketCnt] | 366 » » tophash := desc.buf[:bucketCnt] |
nigeltao
2014/07/16 05:19:57
s/0//
| |
367 for j := 0; desc.count > 0 && j < bucketCnt; j++ { | 367 for j := 0; desc.count > 0 && j < bucketCnt; j++ { |
368 if tophash[j] >= minTopHash { | 368 if tophash[j] >= minTopHash { |
369 p.printValueAt(desc.typ.KeyType, keyAddr) | 369 p.printValueAt(desc.typ.KeyType, keyAddr) |
370 p.printf(":") | 370 p.printf(":") |
371 p.printValueAt(desc.typ.ElemType, elemAddr) | 371 p.printValueAt(desc.typ.ElemType, elemAddr) |
372 desc.count-- | 372 desc.count-- |
373 if desc.count > 0 { | 373 if desc.count > 0 { |
374 p.printf(", ") | 374 p.printf(", ") |
375 } | 375 } |
376 } | 376 } |
377 keyAddr += desc.keySize | 377 keyAddr += desc.keySize |
378 elemAddr += desc.elemSize | 378 elemAddr += desc.elemSize |
379 } | 379 } |
380 // pointer to overflow bucket, if any. | 380 // pointer to overflow bucket, if any. |
381 overflow := uintptr(p.arch.Uintptr(desc.buf[bucketCnt : bucketCn t+p.arch.PointerSize])) | 381 overflow := uintptr(p.arch.Uintptr(desc.buf[bucketCnt : bucketCn t+p.arch.PointerSize])) |
382 p.printMapBucketsAt(desc, overflow) | 382 p.printMapBucketsAt(desc, overflow) |
383 } | 383 } |
384 return | |
nigeltao
2014/07/16 05:19:57
Delete.
| |
385 } | 384 } |
386 | 385 |
387 func (p *Printer) printSliceAt(typ *dwarf.SliceType, addr uintptr) { | 386 func (p *Printer) printSliceAt(typ *dwarf.SliceType, addr uintptr) { |
388 // Slices look like a struct with fields array *elemtype, len uint32/64, cap uint32/64. | 387 // Slices look like a struct with fields array *elemtype, len uint32/64, cap uint32/64. |
389 // BUG: Slice header appears to have fields with ByteSize == 0 | 388 // BUG: Slice header appears to have fields with ByteSize == 0 |
390 if !p.peek(addr, typ.ByteSize) { | 389 if !p.peek(addr, typ.ByteSize) { |
391 p.errorf("slice header has no known size") | 390 p.errorf("slice header has no known size") |
392 return | 391 return |
393 } | 392 } |
394 lo := typ.Field[0].ByteOffset | 393 lo := typ.Field[0].ByteOffset |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
446 return size | 445 return size |
447 } | 446 } |
448 switch typ.(type) { | 447 switch typ.(type) { |
449 case *dwarf.PtrType: | 448 case *dwarf.PtrType: |
450 // This is the only one we know of, but more may arise. | 449 // This is the only one we know of, but more may arise. |
451 return int64(p.arch.PointerSize) | 450 return int64(p.arch.PointerSize) |
452 } | 451 } |
453 p.errorf("unknown size for %s", typ) | 452 p.errorf("unknown size for %s", typ) |
454 return -1 | 453 return -1 |
455 } | 454 } |
LEFT | RIGHT |