[Stackless-checkins] CVS: slpdev/src/2.3/dev/Stackless/platf mkswitch_stack.py, NONE, 1.1 slp_switch_stack.h, NONE, 1.1

Christian Tismer tismer at centera.de
Mon Jul 19 01:20:14 CEST 2004


Update of /home/cvs/slpdev/src/2.3/dev/Stackless/platf
In directory centera.de:/tmp/cvs-serv7578/platf

Added Files:
	mkswitch_stack.py slp_switch_stack.h 
Log Message:
started to add real stack switching.
I generated some similar slp_switch_stack
implementations from the existing includes.

Alas, I have no real clue yet, how this will fit the system.
There is just a wrong dummy which is not used, yet.

--- NEW FILE: mkswitch_stack.py ---
"""
  mkswitch_stack.py

  Purpose:
  Generate an include file from the platform dependant
  include files mentioned in slp_platformselect.h .

  The existing slp_switch implementations are calling
  the macros slp_save_state and slp_restore_state.
  Now I want to support real stack switching, that is,
  the stack is not modified in place, but we jump to
  a different stack, without copying anything.
  This costs a lot of memory and should be used for
  a few high-speed tasklets, only.

  In order to keep things simple, I'm not special-casing
  the support macroes, but use a different macro set.
  The machine code is the same, therefore the implementation
  can be generated from the existing include files.

  We generate a new include file called slp_switch_stack.h .
"""

def parse_platformselect():
    fin_name = "slp_platformselect.h"
    fin = file(fin_name)
    fout_name = "slp_switch_stack.h"
    fout = file(fout_name, "w")
    import sys
    print>>fout, "/* this file is generated by mkswitch_stack.py, don't edit */"
    print>>fout
    for line in fin:
        tokens = line.split()
        if not tokens: continue
        tok = tokens[0]
        if tok == "#endif":
            print>>fout, line
            break # done
        if tok in ("#if", "#elif"):
            print>>fout, line
        elif tok == "#include":
            finc_name = tokens[1][1:-1]
            txt = parse_switch(finc_name)
            print>>fout, txt

edits = (
    ("slp_switch", "slp_switch_stack"),
    ("SLP_SAVE_STATE", "SLP_STACK_BEGIN"),
    ("SLP_RESTORE_STATE", "SLP_STACK_END"),
)

def parse_switch(fname):
    f = file(fname)
    res = []
    for line in f:
        if line.strip() == "static int":
            res.append(line)
            break
    for line in f:
        res.append(line)
        if line.rstrip() == "}":
            break
    # end of procedure.
    # now substitute
    s = "".join(res)
    for txt, repl in edits:
        s = s.replace(txt, repl)
    return s

if __name__ == "__main__":
    parse_platformselect()
--- NEW FILE: slp_switch_stack.h ---
/* this file is generated by mkswitch_stack.py, don't edit */

#if   defined(MS_WIN32) && !defined(MS_WIN64) && defined(_M_IX86)

static int
slp_switch_stack(void)
{
	register int *stackref, stsizediff;
	__asm mov stackref, esp;
	/* modify EBX, ESI and EDI in order to get them preserved */
	__asm mov ebx, ebx;
	__asm xchg esi, edi;
	{
	    SLP_STACK_BEGIN(stackref, stsizediff);
	    __asm {
		mov     eax, stsizediff
		add     esp, eax
		add     ebp, eax
	    }
	    SLP_STACK_END();
	    return 0;
	}
}

#elif defined(__GNUC__) && defined(__i386__)

static int
slp_switch_stack(void)
{
	register int *stackref, stsizediff;
	__asm__ volatile ("" : : : "ebx", "esi", "edi");
	__asm__ ("movl %%esp, %0" : "=g" (stackref));
	{
		SLP_STACK_BEGIN(stackref, stsizediff);
		__asm__ volatile (
		    "addl %0, %%esp\n"
		    "addl %0, %%ebp\n"
		    :
		    : "r" (stsizediff)
		    );
		SLP_STACK_END();
		return 0;
	}
	__asm__ volatile ("" : : : "ebx", "esi", "edi");
}

#elif defined(__GNUC__) && defined(__PPC__) && defined(__linux__)

static int
slp_switch_stack(void)
{
	register int *stackref, stsizediff;
	__asm__ volatile ("" : : : REGS_TO_SAVE);
	__asm__ ("mr %0, 1" : "=g" (stackref) : );
	{
		SLP_STACK_BEGIN(stackref, stsizediff);
		__asm__ volatile (
		    "mr 11, %0\n"
		    "add 1, 1, 11\n"
		    "add 30, 30, 11\n"
		    : /* no outputs */
		    : "g" (stsizediff)
		    : "11"
		    );
		SLP_STACK_END();
		return 0;
	}
	__asm__ volatile ("" : : : REGS_TO_SAVE);
}

#elif defined(__GNUC__) && defined(__ppc__) && defined(__APPLE__)

static int
slp_switch_stack(void)
{
	static int x = 0;
	register int *stackref, stsizediff;
	__asm__ volatile (
	    "; asm block 1\n"
	    : /* no outputs */
	    : "r" (x)
	    : REGS_TO_SAVE
	);
	__asm__ ("; asm block 2\n\tmr %0, r1" : "=g" (stackref) : );
	{
		SLP_STACK_BEGIN(stackref, stsizediff);
		__asm__ volatile (
		    "; asm block 3\n"
		    "\tmr r11, %0\n"
		    "\tadd r1, r1, r11\n"
		    "\tadd r30, r30, r11\n"
		    : /* no outputs */
		    : "g" (stsizediff)
		    : "r11"
		);
		SLP_STACK_END();
		return 0;
	}
}

#elif defined(__GNUC__) && defined(sparc) && defined(sun)

static int
slp_switch_stack(void)
{
	register int *stackref, stsizediff;

	/* Put the stack pointer into stackref */

	/* Sparc special: at first, flush register windows
	 */
	__asm__ volatile (
	    "ta %1\n\t"
	    "mov %%sp, %0"
	    : "=r" (stackref) :  "i" (ST_FLUSH_WINDOWS));

	{   /* You shalt put SLP_STACK_BEGIN into a local block */

		SLP_STACK_BEGIN(stackref, stsizediff);

		/* Increment stack and frame pointer by stsizediff */

		/* Sparc special: at first load new return address.
		   This cannot be done later, because the stack
		   might be overwritten again just after SLP_STACK_END
		   has finished. BTW: All other registers (l0-l7 and i0-i5)
		   might be clobbered too. 
		 */
		__asm__ volatile (
		    "ld [%0+60], %%i7\n\t"
		    "add %1, %%sp, %%sp\n\t"
		    "add %1, %%fp, %%fp"
		    : : "r" (_cst->stack), "r" (stsizediff)
		    : "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7",
		      "%i0", "%i1", "%i2", "%i3", "%i4", "%i5");

		SLP_STACK_END();

		/* Run far away as fast as possible, don't look back at the sins.
		 * The LORD rained down burning sulfur on Sodom and Gomorra ...
		 */

		/* Sparc special: Must make it *very* clear to the CPU that
		   it shouldn't look back into the register windows
		 */
		__asm__ volatile ( "ta %0" : : "i" (ST_CLEAN_WINDOWS));
		return 0;
	} 
}

#elif defined(__GNUC__) && defined(__s390__) && defined(__linux__)

static int
slp_switch_stack(void)
{
	register int *stackref, stsizediff;
	__asm__ volatile ("" : : : REGS_TO_SAVE);
	__asm__ ("lr %0, 15" : "=g" (stackref) : );
	{
		SLP_STACK_BEGIN(stackref, stsizediff);
		__asm__ volatile (
		    "ar 15, %0"
		    : /* no outputs */
		    : "g" (stsizediff)
		    );
		SLP_STACK_END();
		return 0;
	}
	__asm__ volatile ("" : : : REGS_TO_SAVE);
}

#elif defined(__GNUC__) && defined(__s390x__) && defined(__linux__)

static int
slp_switch_stack(void)
{
	register int *stackref, stsizediff;
	__asm__ volatile ("" : : : REGS_TO_SAVE);
	__asm__ ("lr %0, 15" : "=g" (stackref) : );
	{
		SLP_STACK_BEGIN(stackref, stsizediff);
		__asm__ volatile (
		    "ar 15, %0"
		    : /* no outputs */
		    : "g" (stsizediff)
		    );
		SLP_STACK_END();
		return 0;
	}
	__asm__ volatile ("" : : : REGS_TO_SAVE);
}

#endif



_______________________________________________
Stackless-checkins mailing list
Stackless-checkins at stackless.com
http://www.stackless.com/mailman/listinfo/stackless-checkins



More information about the Stackless-checkins mailing list