aboutsummaryrefslogtreecommitdiff
path: root/py/nlrx64.c
diff options
context:
space:
mode:
authorPaul Sokolovsky <pfalcon@users.sourceforge.net>2017-12-26 18:39:51 +0200
committerPaul Sokolovsky <pfalcon@users.sourceforge.net>2017-12-26 19:27:58 +0200
commit096e967aad6df760c16de4878b8b2eea02330011 (patch)
tree93d57cdf309d75aa8be84fe343d4b1448db6dcdf /py/nlrx64.c
parentd9977a8ad9c257005455ca8cbc7913584f2394a6 (diff)
Revert "py/nlr: Factor out common NLR code to generic functions."
This reverts commit 6a3a742a6c9caaa2be0fd0aac7a5df4ac816081c. The above commit has number of faults starting from the motivation down to the actual implementation. 1. Faulty implementation. The original code contained functions like: NORETURN void nlr_jump(void *val) { nlr_buf_t **top_ptr = &MP_STATE_THREAD(nlr_top); nlr_buf_t *top = *top_ptr; ... __asm volatile ( "mov %0, %%edx \n" // %edx points to nlr_buf "mov 28(%%edx), %%esi \n" // load saved %esi "mov 24(%%edx), %%edi \n" // load saved %edi "mov 20(%%edx), %%ebx \n" // load saved %ebx "mov 16(%%edx), %%esp \n" // load saved %esp "mov 12(%%edx), %%ebp \n" // load saved %ebp "mov 8(%%edx), %%eax \n" // load saved %eip "mov %%eax, (%%esp) \n" // store saved %eip to stack "xor %%eax, %%eax \n" // clear return register "inc %%al \n" // increase to make 1, non-local return "ret \n" // return : // output operands : "r"(top) // input operands : // clobbered registers ); } Which clearly stated that C-level variable should be a parameter of the assembly, whcih then moved it into correct register. Whereas now it's: NORETURN void nlr_jump_tail(nlr_buf_t *top) { (void)top; __asm volatile ( "mov 28(%edx), %esi \n" // load saved %esi "mov 24(%edx), %edi \n" // load saved %edi "mov 20(%edx), %ebx \n" // load saved %ebx "mov 16(%edx), %esp \n" // load saved %esp "mov 12(%edx), %ebp \n" // load saved %ebp "mov 8(%edx), %eax \n" // load saved %eip "mov %eax, (%esp) \n" // store saved %eip to stack "xor %eax, %eax \n" // clear return register "inc %al \n" // increase to make 1, non-local return "ret \n" // return ); for (;;); // needed to silence compiler warning } Which just tries to perform operations on a completely random register (edx in this case). The outcome is the expected: saving the pure random luck of the compiler putting the right value in the random register above, there's a crash. 2. Non-critical assessment. The original commit message says "There is a small overhead introduced (typically 1 machine instruction)". That machine instruction is a call if a compiler doesn't perform tail optimization (happens regularly), and it's 1 instruction only with the broken code shown above, fixing it requires adding more. With inefficiencies already presented in the NLR code, the overhead becomes "considerable" (several times more than 1%), not "small". The commit message also says "This eliminates duplicated code.". An obvious way to eliminate duplication would be to factor out common code to macros, not introduce overhead and breakage like above. 3. Faulty motivation. All this started with a report of warnings/errors happening for a niche compiler. It could have been solved in one the direct ways: a) fixing it just for affected compiler(s); b) rewriting it in proper assembly (like it was before BTW); c) by not doing anything at all, MICROPY_NLR_SETJMP exists exactly to address minor-impact cases like thar (where a) or b) are not applicable). Instead, a backwards "solution" was put forward, leading to all the issues above. The best action thus appears to be revert and rework, not trying to work around what went haywire in the first place.
Diffstat (limited to 'py/nlrx64.c')
-rw-r--r--py/nlrx64.c70
1 files changed, 44 insertions, 26 deletions
diff --git a/py/nlrx64.c b/py/nlrx64.c
index e7e2e1474..ddcd76166 100644
--- a/py/nlrx64.c
+++ b/py/nlrx64.c
@@ -26,7 +26,7 @@
#include "py/mpstate.h"
-#if MICROPY_NLR_X64
+#if !MICROPY_NLR_SETJMP && defined(__x86_64__)
#undef nlr_push
@@ -39,6 +39,8 @@
#define NLR_OS_WINDOWS 0
#endif
+__attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr);
+
unsigned int nlr_push(nlr_buf_t *nlr) {
(void)nlr;
@@ -86,38 +88,54 @@ unsigned int nlr_push(nlr_buf_t *nlr) {
return 0; // needed to silence compiler warning
}
-NORETURN void nlr_jump_tail(nlr_buf_t *top) {
- (void)top;
+__attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr) {
+ nlr_buf_t **top = &MP_STATE_THREAD(nlr_top);
+ nlr->prev = *top;
+ MP_NLR_SAVE_PYSTACK(nlr);
+ *top = nlr;
+ return 0; // normal return
+}
+
+void nlr_pop(void) {
+ nlr_buf_t **top = &MP_STATE_THREAD(nlr_top);
+ *top = (*top)->prev;
+}
+
+NORETURN void nlr_jump(void *val) {
+ nlr_buf_t **top_ptr = &MP_STATE_THREAD(nlr_top);
+ nlr_buf_t *top = *top_ptr;
+ if (top == NULL) {
+ nlr_jump_fail(val);
+ }
+
+ top->ret_val = val;
+ MP_NLR_RESTORE_PYSTACK(top);
+ *top_ptr = top->prev;
__asm volatile (
+ "movq %0, %%rcx \n" // %rcx points to nlr_buf
#if NLR_OS_WINDOWS
- "movq 88(%rcx), %rsi \n" // load saved %rsi
- "movq 80(%rcx), %rdi \n" // load saved %rdr
- "movq 72(%rcx), %r15 \n" // load saved %r15
- "movq 64(%rcx), %r14 \n" // load saved %r14
- "movq 56(%rcx), %r13 \n" // load saved %r13
- "movq 48(%rcx), %r12 \n" // load saved %r12
- "movq 40(%rcx), %rbx \n" // load saved %rbx
- "movq 32(%rcx), %rsp \n" // load saved %rsp
- "movq 24(%rcx), %rbp \n" // load saved %rbp
- "movq 16(%rcx), %rax \n" // load saved %rip
- #else
- "movq 72(%rdi), %r15 \n" // load saved %r15
- "movq 64(%rdi), %r14 \n" // load saved %r14
- "movq 56(%rdi), %r13 \n" // load saved %r13
- "movq 48(%rdi), %r12 \n" // load saved %r12
- "movq 40(%rdi), %rbx \n" // load saved %rbx
- "movq 32(%rdi), %rsp \n" // load saved %rsp
- "movq 24(%rdi), %rbp \n" // load saved %rbp
- "movq 16(%rdi), %rax \n" // load saved %rip
+ "movq 88(%%rcx), %%rsi \n" // load saved %rsi
+ "movq 80(%%rcx), %%rdi \n" // load saved %rdr
#endif
- "movq %rax, (%rsp) \n" // store saved %rip to stack
- "xorq %rax, %rax \n" // clear return register
- "inc %al \n" // increase to make 1, non-local return
+ "movq 72(%%rcx), %%r15 \n" // load saved %r15
+ "movq 64(%%rcx), %%r14 \n" // load saved %r14
+ "movq 56(%%rcx), %%r13 \n" // load saved %r13
+ "movq 48(%%rcx), %%r12 \n" // load saved %r12
+ "movq 40(%%rcx), %%rbx \n" // load saved %rbx
+ "movq 32(%%rcx), %%rsp \n" // load saved %rsp
+ "movq 24(%%rcx), %%rbp \n" // load saved %rbp
+ "movq 16(%%rcx), %%rax \n" // load saved %rip
+ "movq %%rax, (%%rsp) \n" // store saved %rip to stack
+ "xorq %%rax, %%rax \n" // clear return register
+ "inc %%al \n" // increase to make 1, non-local return
"ret \n" // return
+ : // output operands
+ : "r"(top) // input operands
+ : // clobbered registers
);
for (;;); // needed to silence compiler warning
}
-#endif // MICROPY_NLR_X64
+#endif // !MICROPY_NLR_SETJMP && defined(__x86_64__)