LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 #include "amd64/asm.h" | 5 #include "amd64/asm.h" |
6 | 6 |
7 TEXT _rt0_amd64(SB),7,$-8 | 7 TEXT _rt0_amd64(SB),7,$-8 |
8 // copy arguments forward on an even stack | 8 // copy arguments forward on an even stack |
9 MOVQ 0(DI), AX // argc | 9 MOVQ 0(DI), AX // argc |
10 LEAQ 8(DI), BX // argv | 10 LEAQ 8(DI), BX // argv |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
82 RET | 82 RET |
83 | 83 |
84 TEXT runtime·breakpoint(SB),7,$0 | 84 TEXT runtime·breakpoint(SB),7,$0 |
85 BYTE $0xcc | 85 BYTE $0xcc |
86 RET | 86 RET |
87 | 87 |
88 /* | 88 /* |
89 * go-routine | 89 * go-routine |
90 */ | 90 */ |
91 | 91 |
92 // uintptr gosave(Gobuf*) | 92 // void gosave(Gobuf*) |
93 // save state in Gobuf; setjmp | 93 // save state in Gobuf; setjmp |
94 TEXT runtime·gosave(SB), 7, $0 | 94 TEXT runtime·gosave(SB), 7, $0 |
95 MOVQ 8(SP), AX // gobuf | 95 MOVQ 8(SP), AX // gobuf |
96 LEAQ 8(SP), BX // caller's SP | 96 LEAQ 8(SP), BX // caller's SP |
97 MOVQ BX, gobuf_sp(AX) | 97 MOVQ BX, gobuf_sp(AX) |
98 MOVQ 0(SP), BX // caller's PC | 98 MOVQ 0(SP), BX // caller's PC |
99 MOVQ BX, gobuf_pc(AX) | 99 MOVQ BX, gobuf_pc(AX) |
100 get_tls(CX) | 100 get_tls(CX) |
101 MOVQ g(CX), BX | 101 MOVQ g(CX), BX |
102 MOVQ BX, gobuf_g(AX) | 102 MOVQ BX, gobuf_g(AX) |
103 MOVL $0, AX // return 0 | |
104 RET | 103 RET |
105 | 104 |
106 // void gogo(Gobuf*, uintptr) | 105 // void gogo(Gobuf*, uintptr) |
107 // restore state from Gobuf; longjmp | 106 // restore state from Gobuf; longjmp |
108 TEXT runtime·gogo(SB), 7, $0 | 107 TEXT runtime·gogo(SB), 7, $0 |
109 MOVQ 16(SP), AX // return 2nd arg | 108 MOVQ 16(SP), AX // return 2nd arg |
110 MOVQ 8(SP), BX // gobuf | 109 MOVQ 8(SP), BX // gobuf |
111 MOVQ gobuf_g(BX), DX | 110 MOVQ gobuf_g(BX), DX |
112 MOVQ 0(DX), CX // make sure g != nil | 111 MOVQ 0(DX), CX // make sure g != nil |
113 get_tls(CX) | 112 get_tls(CX) |
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
358 MOVQ 16(SP), AX | 357 MOVQ 16(SP), AX |
359 MOVQ 24(SP), CX | 358 MOVQ 24(SP), CX |
360 LOCK | 359 LOCK |
361 CMPXCHGQ CX, 0(BX) | 360 CMPXCHGQ CX, 0(BX) |
362 JZ 3(PC) | 361 JZ 3(PC) |
363 MOVL $0, AX | 362 MOVL $0, AX |
364 RET | 363 RET |
365 MOVL $1, AX | 364 MOVL $1, AX |
366 RET | 365 RET |
367 | 366 |
368 | |
369 // void jmpdefer(fn, sp); | 367 // void jmpdefer(fn, sp); |
370 // called from deferreturn. | 368 // called from deferreturn. |
371 // 1. pop the caller | 369 // 1. pop the caller |
372 // 2. sub 5 bytes from the callers return | 370 // 2. sub 5 bytes from the callers return |
373 // 3. jmp to the argument | 371 // 3. jmp to the argument |
374 TEXT runtime·jmpdefer(SB), 7, $0 | 372 TEXT runtime·jmpdefer(SB), 7, $0 |
375 MOVQ 8(SP), AX // fn | 373 MOVQ 8(SP), AX // fn |
376 MOVQ 16(SP), BX // caller sp | 374 MOVQ 16(SP), BX // caller sp |
377 LEAQ -8(BX), SP // caller sp after CALL | 375 LEAQ -8(BX), SP // caller sp after CALL |
378 SUBQ $5, (SP) // return to CALL again | 376 SUBQ $5, (SP) // return to CALL again |
(...skipping 11 matching lines...) Expand all Loading... |
390 // aligned appropriately for the gcc ABI. | 388 // aligned appropriately for the gcc ABI. |
391 // See cgocall.c for more details. | 389 // See cgocall.c for more details. |
392 TEXT runtime·asmcgocall(SB),7,$0 | 390 TEXT runtime·asmcgocall(SB),7,$0 |
393 MOVQ fn+0(FP), AX | 391 MOVQ fn+0(FP), AX |
394 MOVQ arg+8(FP), BX | 392 MOVQ arg+8(FP), BX |
395 MOVQ SP, DX | 393 MOVQ SP, DX |
396 | 394 |
397 // Figure out if we need to switch to m->g0 stack. | 395 // Figure out if we need to switch to m->g0 stack. |
398 // We get called to create new OS threads too, and those | 396 // We get called to create new OS threads too, and those |
399 // come in on the m->g0 stack already. | 397 // come in on the m->g0 stack already. |
400 // TODO(rsc): Why would we not? | |
401 get_tls(CX) | 398 get_tls(CX) |
402 MOVQ m(CX), BP | 399 MOVQ m(CX), BP |
403 MOVQ m_g0(BP), SI | 400 MOVQ m_g0(BP), SI |
404 MOVQ g(CX), DI | 401 MOVQ g(CX), DI |
405 CMPQ SI, DI | 402 CMPQ SI, DI |
406 » JEQ» 5(PC) | 403 » JEQ» 6(PC) |
407 MOVQ SP, (g_sched+gobuf_sp)(DI) | 404 MOVQ SP, (g_sched+gobuf_sp)(DI) |
408 MOVQ $return<>(SB), (g_sched+gobuf_pc)(DI) | 405 MOVQ $return<>(SB), (g_sched+gobuf_pc)(DI) |
409 MOVQ DI, (g_sched+gobuf_g)(DI) | 406 MOVQ DI, (g_sched+gobuf_g)(DI) |
| 407 MOVQ SI, g(CX) |
410 MOVQ (g_sched+gobuf_sp)(SI), SP | 408 MOVQ (g_sched+gobuf_sp)(SI), SP |
411 | 409 |
412 // Now on a scheduling stack (a pthread-created stack). | 410 // Now on a scheduling stack (a pthread-created stack). |
413 SUBQ $32, SP | 411 SUBQ $32, SP |
414 ANDQ $~15, SP // alignment for gcc ABI | 412 ANDQ $~15, SP // alignment for gcc ABI |
415 MOVQ DI, 16(SP) // save g | 413 MOVQ DI, 16(SP) // save g |
416 MOVQ SI, g(CX) | |
417 MOVQ DX, 8(SP) // save SP | 414 MOVQ DX, 8(SP) // save SP |
418 MOVQ BX, DI // DI = first argument in AMD64 ABI | 415 MOVQ BX, DI // DI = first argument in AMD64 ABI |
419 CALL AX | 416 CALL AX |
420 | 417 |
421 // Restore registers, g, stack pointer. | 418 // Restore registers, g, stack pointer. |
422 get_tls(CX) | 419 get_tls(CX) |
423 MOVQ 16(SP), DI | 420 MOVQ 16(SP), DI |
424 MOVQ DI, g(CX) | 421 MOVQ DI, g(CX) |
425 MOVQ 8(SP), SP | 422 MOVQ 8(SP), SP |
426 RET | 423 RET |
427 | 424 |
428 // asmcgocallback(void (*fn)(void*), void *frame, uintptr framesize) | 425 // cgocallback(void (*fn)(void*), void *frame, uintptr framesize) |
429 // See cgocall.c for more details. | 426 // See cgocall.c for more details. |
430 // The frame size is· | 427 TEXT runtime·cgocallback(SB),7,$24 |
431 TEXT runtime·asmcgocallback(SB),7,$24 | |
432 MOVQ fn+0(FP), AX | 428 MOVQ fn+0(FP), AX |
433 MOVQ frame+8(FP), BX | 429 MOVQ frame+8(FP), BX |
434 MOVQ framesize+16(FP), DX | 430 MOVQ framesize+16(FP), DX |
435 | 431 |
436 // Save current m->g0->sched.sp on stack and then set it to SP. | 432 // Save current m->g0->sched.sp on stack and then set it to SP. |
437 get_tls(CX) | 433 get_tls(CX) |
438 MOVQ m(CX), BP | 434 MOVQ m(CX), BP |
439 MOVQ m_g0(BP), SI | 435 MOVQ m_g0(BP), SI |
440 PUSHQ (g_sched+gobuf_sp)(SI) | 436 PUSHQ (g_sched+gobuf_sp)(SI) |
441 MOVQ SP, (g_sched+gobuf_sp)(SI) | 437 MOVQ SP, (g_sched+gobuf_sp)(SI) |
442 | 438 |
443 // Switch to m->curg stack and call runtime.cgocallback | 439 // Switch to m->curg stack and call runtime.cgocallback |
444 // with the three arguments. Because we are taking over | 440 // with the three arguments. Because we are taking over |
445 // the execution of m->curg but *not* resuming what had | 441 // the execution of m->curg but *not* resuming what had |
446 // been running, we need to save that information (m->curg->gobuf) | 442 // been running, we need to save that information (m->curg->gobuf) |
447 // so that we can restore it when we're done.· | 443 // so that we can restore it when we're done.· |
448 // We can restore m->curg->gobuf.sp easily, because calling | 444 // We can restore m->curg->gobuf.sp easily, because calling |
449 // runtime.cgocallback leaves SP unchanged upon return. | 445 // runtime.cgocallback leaves SP unchanged upon return. |
450 // To save m->curg->gobuf.pc, we push it onto the stack. | 446 // To save m->curg->gobuf.pc, we push it onto the stack. |
451 // This has the added benefit that it looks to the traceback | 447 // This has the added benefit that it looks to the traceback |
452 » // routine like asmcgocallback is going to return to that | 448 » // routine like cgocallback is going to return to that |
453 » // PC (because we defined asmcgocallback to have | 449 » // PC (because we defined cgocallback to have |
454 // a frame size of 24, the same amount that we use below), | 450 // a frame size of 24, the same amount that we use below), |
455 // so that the traceback will seamlessly trace back into | 451 // so that the traceback will seamlessly trace back into |
456 // the earlier calls. | 452 // the earlier calls. |
457 MOVQ m_curg(BP), SI | 453 MOVQ m_curg(BP), SI |
458 MOVQ SI, g(CX) | 454 MOVQ SI, g(CX) |
459 MOVQ (g_sched+gobuf_sp)(SI), DI // prepare stack as DI | 455 MOVQ (g_sched+gobuf_sp)(SI), DI // prepare stack as DI |
460 | 456 |
461 // Push gobuf.pc | 457 // Push gobuf.pc |
462 MOVQ (g_sched+gobuf_pc)(SI), BP | 458 MOVQ (g_sched+gobuf_pc)(SI), BP |
463 SUBQ $8, DI | 459 SUBQ $8, DI |
464 MOVQ BP, 0(DI) | 460 MOVQ BP, 0(DI) |
465 | 461 |
466 » // Push arguments to cgocallback. | 462 » // Push arguments to cgocallbackg. |
467 // Frame size here must match the frame size above | 463 // Frame size here must match the frame size above |
468 // to trick traceback routines into doing the right thing. | 464 // to trick traceback routines into doing the right thing. |
469 SUBQ $24, DI | 465 SUBQ $24, DI |
470 MOVQ AX, 0(DI) | 466 MOVQ AX, 0(DI) |
471 MOVQ BX, 8(DI) | 467 MOVQ BX, 8(DI) |
472 MOVQ DX, 16(DI) | 468 MOVQ DX, 16(DI) |
473 ········ | 469 ········ |
474 // Switch stack and make the call. | 470 // Switch stack and make the call. |
475 MOVQ DI, SP | 471 MOVQ DI, SP |
476 » CALL» runtime·cgocallback(SB) | 472 » CALL» runtime·cgocallbackg(SB) |
477 | 473 |
478 // Restore g->gobuf (== m->curg->gobuf) from saved values. | 474 // Restore g->gobuf (== m->curg->gobuf) from saved values. |
479 get_tls(CX) | 475 get_tls(CX) |
480 MOVQ g(CX), SI | 476 MOVQ g(CX), SI |
481 » MOVQ» SP, DI | 477 » MOVQ» 24(SP), BP |
482 » ADDQ» $24, DI | |
483 » MOVQ» 0(DI), BP | |
484 MOVQ BP, (g_sched+gobuf_pc)(SI) | 478 MOVQ BP, (g_sched+gobuf_pc)(SI) |
485 » ADDQ» $8, DI | 479 » LEAQ» (24+8)(SP), DI |
486 MOVQ DI, (g_sched+gobuf_sp)(SI) | 480 MOVQ DI, (g_sched+gobuf_sp)(SI) |
487 | 481 |
488 // Switch back to m->g0's stack and restore m->g0->sched.sp. | 482 // Switch back to m->g0's stack and restore m->g0->sched.sp. |
489 // (Unlike m->curg, the g0 goroutine never uses sched.pc, | 483 // (Unlike m->curg, the g0 goroutine never uses sched.pc, |
490 // so we do not have to restore it.) | 484 // so we do not have to restore it.) |
491 MOVQ m(CX), BP | 485 MOVQ m(CX), BP |
492 MOVQ m_g0(BP), SI | 486 MOVQ m_g0(BP), SI |
493 MOVQ SI, g(CX) | 487 MOVQ SI, g(CX) |
494 MOVQ (g_sched+gobuf_sp)(SI), SP | 488 MOVQ (g_sched+gobuf_sp)(SI), SP |
495 POPQ (g_sched+gobuf_sp)(SI) | 489 POPQ (g_sched+gobuf_sp)(SI) |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
529 MOVQ x+0(FP),AX // addr of first arg | 523 MOVQ x+0(FP),AX // addr of first arg |
530 MOVQ x+8(FP), BX | 524 MOVQ x+8(FP), BX |
531 MOVQ BX, -8(AX) // set calling pc | 525 MOVQ BX, -8(AX) // set calling pc |
532 RET | 526 RET |
533 | 527 |
534 TEXT runtime·getcallersp(SB),7,$0 | 528 TEXT runtime·getcallersp(SB),7,$0 |
535 MOVQ sp+0(FP), AX | 529 MOVQ sp+0(FP), AX |
536 RET | 530 RET |
537 | 531 |
538 GLOBL runtime·tls0(SB), $64 | 532 GLOBL runtime·tls0(SB), $64 |
LEFT | RIGHT |