sima/autoconf/
sima/hosts/i386/
sima/mudlib/
sima/mudlib/kernel/
sima/mudlib/obj/
sima/mudlib/sys/
sima/synhash/mips/
/* Copyright 1995, 1997 J"orn Rennecke */

#include <stdarg.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>

#define COMPILER_GLOBAL

#include "common.h"
#include "lex.h"
#include "compiler.h"
#include "exec.h"
#include "instrs.h"
#include "interpret.h"
#include "object.h"
#include "uid.h"
#include "lang.h"

struct program nil_program;

int32 current_id_number;

extern int pragma_optimize;

extern struct ident builtin_structs[];
struct ident builtin_identifiers[] = {
#define EFUN_IDENTIFIER builtin_identifiers[0]
  {
    "efun", 4, I_TYPE_GLOBAL, 0, 0,0, {{ -2, -2, -1, -1 }},
    &builtin_structs[0]
  }
};

static mp_int alloc_variable();

void *alloc_tmpnode() {
    struct binode *n;

    n = free_tmpnodes;
    if (!n) {
	return alloc_node();
    }
    free_tmpnodes = n->node[0].p;
    return n;
}

void free_tmpnode(void *n) {
    ((struct binode *)n)->node[0].p = free_tmpnodes;
    free_tmpnodes = n;
}

union svalue concat_strings(register char *str, mp_int length,
	struct string_concat *next)
{
    struct string_concat *l;
    union svalue res;
    register char *dest;

    if (length > MAX_SMALL_STRING) {
	res = ALLOC_LSTRING(length);
	if (!res.p)
	    goto out_of_memory;
	dest = SV_LSTRING(res);
	SV_LSTRREF(res) = 0;
	SV_LSTRLEN(res) = length;
    } else {
	res = ALLOC_STRING(length);
	if (!res.p) {
  out_of_memory:
	    yyerrorn(CE_NOMEM);
	    return (union svalue)(p_int)0;
	}
	dest = SV_STRING(res);
	SV_STRREF(res) = 0;
	SV_STRLEN(res) = length;
    }
    goto first_string;

    do {
	register char c;

	next = l->next;
	str = l->start;
	free_tmpnode(l);
  first_string:
	c = *str;
	if (c != '\"') do {
	    if (c == '\\') {
		c = escchars[*(unsigned char *)++str];
		if (!c) {
		    /* '\\' \0' and '\\' '\r' are considered to
		     * have unspecified behaviour. We treat
		     * them like '\\' '\n' .
		     */
		    c = *str;
		    if (c <= '\r') {
			/* '\\' '\n' , '\\' \n' '\r' or '\\' \r' \n' */

			if ((c ^ '\n' ^ '\r') == str[1])
			    str++;
			continue;
		    } else {
                        /* octal */

			unsigned char d;

			c -= '0';
			d = str[1] - '0';
			if (d <= 7) {
			    c <<= 3;
			    c += d;
			    str++;
			    d = str[1] - '0';
			    if (d <= 7) {
				c <<= 3;
				c += d;
				str++;
			    }
			}
			/* end octal */
		    }
		}
	    }
	    *dest++ = c;
	} while ((c = *++str) != '\"');
    } while(l = next);
    return res;
}

int constant_node(svalue sv, union node *nodep) {
    if (SV_IS_NUMBER(sv) && sv.i > -0x200 && sv.i < 0x200) {
	nodep->leaf.type = LN_INT;
	nodep->leaf.n.s = sv.i >> 1;
	return ((unsigned)sv.i <= 2) ? 1 : 2;
    } else if (sv.p == NIL_ARRAY.p) {
	struct binode *n;

	n = alloc_mnode();
	nodep->p = n;
	n->ntype = N_UNARY;
	n->opr = F_ALLOCATE_ARRAY;
	n->line = 0; /* don't store extra line number information for this */
	n->node[0].leaf.type = LN_INT;
	n->node[0].leaf.n.s = 0;
	return 2;
    } else if (! SV_IS_NUMBER(sv) && SV_TYPE(sv) == T_CLOSURE &&
	SV_CLOSURE(sv).g.closure_type == CLOSURE_PROTO_LFUN &&
	SV_CLOSURE(sv).lfun.index < CLOSURE_IDENTIFIER_OFFS) {
	nodep->leaf.type = LN_LFUN_CLOSURE;
	nodep->leaf.n.u = SV_CLOSURE(sv).lfun.index;
	FREE_ALLOCED_SVALUE(sv);
	return 3;
    } else {
	if (num_shared == max_shared) {
	    shared = realloc(shared, sizeof(union svalue)*(max_shared <<= 1));
	}
	nodep->leaf.n.u = num_shared;
	shared[num_shared++] = sv;
	nodep->leaf.type = LN_CONST;
	return num_shared > cshared_threshold ? 3 : 2;
	/* When generating code, test if type is array or mapping.
	 * Reserve first shared values for shared variables, shift
	 * shared constants up accordingly.
	 */
    }
}

int comp_type(union svalue sv) {
    if (SV_IS_NUMBER(sv))
	return sv.i ? TYPE_NUMBER : TYPE_ANY;
    switch(SV_TYPE(sv)) {
      case T_STRING:
      case T_LSTRING:
      case T_GSTRING:
      case T_GLSTRING:
      case T_ISTRING:
      case T_ILSTRING:
	return TYPE_STRING;
      case T_MAPPING:
	return TYPE_MAPPING;
      case T_ARRAY:
      case T_LARRAY:
	/* fixme: could test if it's an array of particular type */
	return TYPE__ARRAY|TYPE_ANY;
      case T_OBJECT:
	return TYPE_OBJECT;
      case T_DESTRUCTED:
	return TYPE_ANY;
      case T_CLOSURE:
	return TYPE_CLOSURE;
      case T_QUOTED:
	sv = SV_QUOTED(sv);
	if (SV_IS_STRING(sv))
	    return TYPE_SYMBOL;
	if (SV_GEN_TYPE(sv) == T_ARRAY)
	    return TYPE_QUOTED_ARRAY;
	return TYPE_ANY;
      case T_FLOAT:
	return TYPE_FLOAT;
      case T_REGEXP:
	return TYPE_REGEXP;
      default:
	fatal("Unexpected initializer\n");
	return 0;
    }
}

void constant_expression(YYSTYPE *vp) {
    svalue sv = vp->constant;

    vp->expression.vtype = comp_type(sv);
    vp->expression.length = constant_node(sv, &vp->expression.node);
}

void multiconst_multival(struct statement *mc) {
    struct binode *np = mc->node.p;
    p_int length = 0;

    do {
	np->opr = comp_type(np->node[1].sv);
	length += constant_node(np->node[1].sv, &np->node[1]);
	np = np->node[0].p;
    } while (np);
    mc->length = length;
}

svalue multiconst_array(struct statement *mc) {
    struct binode *np = mc->node.p, *next;
    svalue a =
      allocate_array(mc->length, SV_OBJECT(inter_fp->object).x.uid->self);
    if (a.p) {
	svalue *svp = SV_ARRAY(a).member;

	do {
	    *svp-- = np->node[1].sv;
	    next = np->node[0].p;
	    free_tmpnode(np);
	} while (np = next);
	return a;
    } else {
	do {
	    FREE_SVALUE(np->node[1].sv);
	    next = np->node[0].p;
	    free_tmpnode(np);
	} while (np = next);
	return SV_NULL;
    }
}

static int count_multival(union node mv) {
    int i = 0;
    while (mv.p) {
	mv.p = mv.p->node[0].p;
	i++;
    }
    return i;
}

svalue multiconst_mapping(struct statement *mc)
{
    p_int length = mc->length;
    struct binode *kv_list, *next_kv, *kv, *vlist, *next_v;

    svalue m = allocate_mapping(2, length, inter_fp->object);
    for (kv_list = mc->node.p; kv_list; kv_list = next_kv) {
	kv = kv_list->node[1].p;
	if (m.p) {
	    svalue key = kv->node[0].sv;
	    svalue *start = get_map_lvalue(m, key, 1);
	    svalue *svp = start + length;
	    FREE_SVALUE(key);
	    for (vlist = kv->node[1].p; vlist; vlist = next_v) {
		svalue sv = vlist->node[1].sv;
		if (svp == start) {
		    FREE_SVALUE(sv);
		    yyerrorn(CE_SYNTAX);
		} else
		    *--svp = sv;
		next_v = vlist->node[0].p;
		free_tmpnode(vlist);
	    }
	    if (svp != start) {
		bzero(start, (char*)svp - (char *)start);
		yyerrorn(CE_SYNTAX);
	    }
	} else {
	    for (vlist = kv->node[1].p; vlist; vlist = next_v) {
		FREE_SVALUE(vlist->node[1].sv);
		next_v = vlist->node[0].p;
		free_tmpnode(vlist);
	    }
	}
	next_kv = kv_list->node[0].p;
	free_tmpnode(kv_list);
    }
    return m;
}

void efun_call(int n, struct statement args, struct expression *rp) {
    struct binode *nd;
    int narg = count_multival(args.node);
    union node *np;
    int32 *eargtp;

    nd = ALLOC_NNODE(1 + narg);
    nd->ntype = N_EFUN;
    nd->line = current_line;
    if (narg > instrs[n].max_arg)
	yyerrorn(CE_MANYEPAR);
    if (narg < instrs[n].min_arg) {
	if (instrs[n].Default > 0) {
	    struct binode *dn;
	    struct expression subexp;
	    struct statement subargs;

	    subargs.node.p = 0;
	    subargs.length = 0;
	    efun_call(instrs[n].Default, subargs, &subexp);
	    dn = alloc_tmpnode();
	    dn->node[0] = args.node;
	    dn->node[1] = subexp.node;
	    dn->ntype = N_MULTIVAL;
	    dn->opr = subexp.vtype;
	    args.node.p = dn;
	    args.length = args.length + subexp.length;
	    narg++;
	}
	if (narg < instrs[n].min_arg)
	    yyerrorn(CE_FEWEPAR);
    }
    nd->node[0].efun.narg = narg;
    np = &nd->node[1];
    eargtp = &efun_arg_types[instrs[n].arg_index+narg];
    while (--narg >= 0) {
	np[narg] = args.node.p->node[1];
	if (! (1 << args.node.p->opr & *--eargtp) && narg < instrs[n].check_arg)
	    bad_type(narg, args.node.p->opr);
	args.node = args.node.p->node[0];
    }
    if (n > LAST_INSTRUCTION_CODE)
	n = efun_aliases[n - LAST_INSTRUCTION_CODE - 1];
    nd->node[0].efun.code = n;
    rp->node.p = nd;
    rp->length =
      args.length + 1 + (n > 0xff) + (instrs[n].min_arg != instrs[n].max_arg);
    rp->vtype = instrs[n].ret_type;
}

void lfun_call(int n, struct statement args, struct expression *rp) {
    struct binode *nd;
    int narg = count_multival(args.node);
    union node *np;

    nd = ALLOC_NNODE(1 + narg);
    nd->ntype = N_LFUN;
    nd->line = current_line;
    nd->node[0].lfun.lfun = n;
    nd->node[0].lfun.narg = narg;
    np = &nd->node[1];
    while (--narg >= 0) {
	np[narg] = args.node.p->node[1];
	if (args.node.p->opr, FUNCTION(n), 0)
	    bad_type(narg, args.node.p->opr);
	args.node = args.node.p->node[0];
    }
    rp->node.p = nd;
    rp->length = args.length + 4;
    rp->vtype = FUNCTION(n)->type;
}

void member_call(struct expression ob, svalue fun,
  struct statement args, struct expression *rp)
{
    struct binode *nd;
    int narg = count_multival(args.node);
    union node *np;

    nd = ALLOC_NNODE(2 + 2 + narg);
    nd->ntype = N_EFUN;
    nd->node[0].efun.code = F_CALL_OTHER;
    nd->node[0].efun.narg = 2 + narg;
    nd->node[1] = ob.node;
    args.length += ob.length + constant_node(fun, &nd->node[2]);
    np = &nd->node[3];
    /* Eerything is allowed for the arguments, so there is no point in
       useing the vanilla typechecking approach. */
    while (--narg >= 0) {
	np[narg] = args.node.p->node[1];
	args.node = args.node.p->node[0];
    }
    rp->node.p = nd;
    rp->length = args.length + 2;
    rp->vtype = instrs[F_CALL_OTHER].ret_type;
}

svalue immediate_efun_call(int code, int num_arg)
{
    static struct efun_closure cl = {T_CLOSURE, 1};
    struct control_ret cntret;

    cl.closure_type = code + CLOSURE_EFUN;
    cl.ob = inter_fp->object;
    cntret =
      closure_frame(TO_SVALUE(&cl), inter_sp, inter_fp, num_arg, 0, IR_EXTERN);
    return interpreter(cntret.fp, cntret.sp);
}

struct function *
declare_lfun(int modifier, int type, struct ident *id, struct type_list *list) {
    struct function *fun;
    struct ident_global g = { -1, -1, -1, -1};

    id = NEW_IDENT(id, I_TYPE_GLOBAL, (union ident_u)g);
    if (id->u.global.function >= 0) {
	fun = FUNCTION(id->u.global.function);
    } else {
	fun = ALLOC_ANODE(*fun);
	id->u.global.function = store_function(fun);
	n_undefined_lfuns++;
	fun->inherited = 0;
    }
    fun->modifier = modifier;
    fun->type = type;
    fun->new_def= 0;
    fun->undeclared = 0;
    fun->num_arg = local_preallocated;
    fun->name.id = id;
    return fun;
}

struct ident *verify_declared_lfun(struct ident *id) {
    struct ident_global g = { -1, -1, -1, -1};

    id = NEW_IDENT(id, I_TYPE_GLOBAL, (union ident_u)g);
    if (id->u.global.function < 0) {
	struct function *f;

	if (pragma_strong_types)
	    yyerrorn(CE_VARNDECL, make_string(id->name, id->namelen));
	f = ALLOC_ANODE(*f);
	if (f) {
	    id->u.global.function = store_function(f);
	    f->name.id = id;
	    f->new_def = 0;
	    f->undeclared = 1;
	    f->inherited = 0;
	    n_undefined_lfuns++;
	}
    }
    return id;
}

void declare_global_var(int modifier, int type, struct ident *id, svalue init) {
    struct var_decl *var, *inherited = 0;
    struct ident_global g = { -1, -1, -1, -1};
    int ix;

    id = NEW_IDENT(id, I_TYPE_GLOBAL, (union ident_u)g);
    ix = id->u.global.variable;
    if (ix >= 0) {
	inherited = GVARIABLE(ix);
	if (!inherited->inherited_from) {
	    yyerrorn(CE_VARREDEF, make_string(id->name, id->namelen));
	    FREE_SVALUE(init);
	    return;
	}
    } else {
	id->u.global.variable = ix = alloc_variable();
    }
    var = ALLOC_ANODE(*var);
    GVARIABLE(ix) = var;
    var->next_inherited = inherited;
    var->inherited_from = 0;
    /* FIXME: handle virtual & shared variables */
    var->ix = n_globals++;
    var->modifier = modifier;
    var->type = type;
    var->name.id = id;
}

static int current_node_block;
static int current_node_block_offset;
struct binode *node_blocks[MAX_NODE_BLOCKS];

struct binode *alloc_node() {
    size_t need = sizeof (struct binode);

    if (current_node_block_offset < need) {
	node_blocks[++current_node_block] =
	  alloc_gen(BYTES_PER_NODE_BLOCK);
	current_node_block_offset = BYTES_PER_NODE_BLOCK;
    }
    return (struct binode *)
	&node_blocks[current_node_block][current_node_block_offset -= need];
}

void *alloc_nnode(size_t need) {
    if (current_node_block_offset < need) {
	node_blocks[++current_node_block] =
	  alloc_gen(BYTES_PER_NODE_BLOCK);
	current_node_block_offset = BYTES_PER_NODE_BLOCK;
    }
    return &node_blocks[current_node_block][current_node_block_offset -= need];
}

static void free_node_blocks() {
    while(current_node_block >= 0) {
        free_gen(node_blocks[current_node_block--]);
    }
}

mp_int store_function(struct function *f) {
    mp_int ix = function_ix++;
    if (! (ix & BLOCK_MASK) && ix) {
	struct function **new_block = alloc_gen(BYTES_PER_NODE_BLOCK);
	if (! new_block)
	    return -1;
	funblocks[ix >> BLOCK_BITS] = new_block;
    }
    FUNCTION(ix) = f;
    return ix;
}

static mp_int alloc_variable() {
    mp_int ix = variable_ix++;
    if (ix >= varblock_size) {
	struct var_decl **new;

	new = alloc_gen(varblock_size * 2 * sizeof *new);
	memcpy(new, varblock, varblock_size * sizeof *new);
	varblock_size >>= 1;
	free_gen(varblock);
	varblock = new;
    }
    return ix;
}

uint8 *stack_adjust(uint8 *pc, int old_stack_use) {
    int emit = pc != 0;
    while (stack_use > old_stack_use) {
	if (emit)
	    *pc = F_POP;
	pc++;
	stack_use--;
    }
    /* Usually stack_use will be at least as large as
       old_stack_use, but it might not after we passed a return.  */
    stack_use = old_stack_use;
    return pc;
}

p_int optimize(union node *npp) {
    union node nd = *npp;

    if (nd.i & 3) {
	int n = nd.leaf.n.u;
	switch (nd.leaf.type) {
	  case LN_INT:
	    return 1 + (n > 1U);
	  case LN_LOCAL:
	    if (n >= -0xf) {
		nd.leaf.type = LN_PICK;
		return 1;
	    }
	    return 2;
	}
    } else {
	int opr = nd.p->opr;
	switch (nd.p->ntype) {
	}
    }
    fatal("unimplemented\n");
}

void prepare_compile() {
    num_shared = 0;
    max_shared = 128;
    shared = malloc(sizeof(svalue)*max_shared);
    current_node_block = -1;
    current_node_block_offset = 0;
    local_preallocated = 0;
    free_tmpnodes = 0;
    function_ix = 0;
    variable_ix = 0;
    n_fun_def = 0;
    n_shared_var = 0;
    n_param = 0;
    all_proto_closures = 0;
    cshared_threshold = 0x400;
    n_undefined_lfuns = 0;
    n_globals = 0;
    if (!funblocks[0])
	funblocks[0] = alloc_gen(BYTES_PER_NODE_BLOCK);
    if (!varblock) {
	varblock = alloc_gen(INIT_VARBLOCK_SIZE * sizeof *varblock);
	varblock_size = INIT_VARBLOCK_SIZE;
    }
}

static uint8 *compile_lvalue_node(uint8 *pc, union node nd);

/* void_accepted is only used to guide code generation.  If there is some
   actual optimization that can be performed (like leaving out something
   altogether) it should have been done earlier. */
static uint8 *compile_value_node(uint8 *pc, union node nd, int void_accepted) {
    if (nd.i & 3) {
	int n = nd.leaf.n.u;
	switch (nd.leaf.type) {
	  case LN_GLOBAL:
	  {
	    struct var_decl *var = GVARIABLE(n);
	    int ix = var->ix;
	    if (ix > 0xff) {
		*pc++ = F_V_GLOBAL16;
		STORE16(pc, ix);
		pc += 2;
	    } else {
		*pc++ = F_V_GLOBAL;
		*pc++ = ix;
	    }
	    stack_use++;
	    break;
	  }
	  case LN_PARAM:
	  {
	    int n2;

	    n -= n_param;
	    n2 = n - stack_use - local_preallocated -
		offsetof(struct frame, locals) / sizeof(p_int) +
		offsetof(struct frame, arguments[1]) / sizeof(p_int);
	    if (n2 >= -0xf) {
	    }
	    if (0 && n2 >= -0xff) {
		*pc++ = F_V_LOCAL;
		*pc++ = n + 0xff;
		break;
	    }
	    *pc++ = F_V_PARAM;
	    *pc++ = n + 0x100;
	    stack_use++;
	    break;
	  }
	  case LN_LOCAL:
	    n -= stack_use;
	    if (n < -0xff) {
		*pc++ = F_V_LOCAL;
		STORE16(pc, n + 0x100ff);
		pc += 2;
	    } else {
		*pc++ = F_V_LOCAL;
		*pc++ = n + 0xff;
	    }
	    stack_use++;
	    break;
	  case LN_INT:
	    if (n & 0x8000) {
		*pc++ = F_NCLIT;
		*pc++ = -n;
	    } else if (n <= 1) {
		*pc++ = F_CONST0 + n;
	    } else {
		*pc++ = F_CLIT;
		*pc++ = n;
	    }
	    stack_use++;
	    break;
	  case LN_LFUN_CLOSURE:
	    *pc++ = F_CLOSURE;
	    n = FUNCTION(n)->ix;
	    STORE16(pc, n);
	    pc += 2;
	    stack_use++;
	    break;
	  case LN_CONST:
	    n += n_shared_var;
	    if (n < 0x400) {
		*pc++ = F_CSHARED0 + (n >> 8);
		*pc++ = n;
	    } else {
		*pc++ = F_SHARED;
		STORE16(pc, n);
		pc += 2;
	    }
	    stack_use++;
	    break;
	  case LN_UNSHARED:
	    n += n_shared_var;
	    if (n < 0x400) {
		*pc++ = F_CSHARED0 + (n >> 8);
		*pc++ = n;
	    } else {
		*pc++ = F_SHARED;
		STORE16(pc, n);
		pc += 2;
	    }
	    *pc++ = F_UNSHARE;
	    stack_use++;
	    break;
	  case LN_SHARED:
	}
    } else {
	int opr = nd.p->opr;
	switch (nd.p->ntype) {
	  case N_RETURN:
	    if (nd.p->node[0].leaf.type == LN_INT &&
		nd.p->node[0].leaf.n.s == 0) {
		*pc++ = F_RETURN0;
	    } else {
		pc = compile_value_node(pc, nd.p->node[0], 0);
		*pc++ = F_RETURN;
	    }
	    stack_use = 0;
	    break;
	  case N_VOLATILE:
	    pc = compile_value_node(pc, nd.p->node[0], 1);
	    stack_use = 0;
	    break;
	  case N_UNARY:
	    pc = compile_value_node(pc, nd.p->node[0], 0);
	    *pc++ = opr;
	    break;
	  case N_BINOP:
	    pc = compile_value_node(pc, nd.p->node[0], 0);
	    pc = compile_value_node(pc, nd.p->node[1], 0);
	    *pc++ = opr;
	    stack_use--;
	    break;
	  case N_LV_BINOP:
	    pc = compile_value_node(pc, nd.p->node[0], 0);
	    pc = compile_lvalue_node(pc, nd.p->node[1]);
	    switch (opr) {
	      case ULV_ASSIGN:
	      case ULV_ADD:
	      case ULV_SUB:
	      case ULV_AND:
	      case ULV_OR:
	      case ULV_XOR:
	      case ULV_MUL:
	      case ULV_DIV:
	      case ULV_MOD:
	      case ULV_RSH:
	      case ULV_LSH:
		if (void_accepted) {
		    opr++;
		    stack_use--;
		}
	      case ULV_INDEX:
	      case ULV_RINDEX:
		*pc++ = opr;
		break;
	      case ULV_MAP_CINDEX:
		*pc++ = opr;
		*pc++ = nd.p->node[2].leaf.n.s;
		break;
	    }
	    break;
	  case N_LV_UNARY_CST:
	    pc = compile_lvalue_node(pc, nd.p->node[1]);
	    *pc++ = opr;
	    switch (opr) {
	      case ULV_CINDEX:
	      case ULV_CRINDEX:
		*pc++ = nd.p->node[0].leaf.n.u;
		stack_use++;
		break;
	      case ULV_SINDEX:
	      case ULV_SRINDEX:
		STORE16(pc, nd.p->node[0].leaf.n.u);
		pc += 2;
		stack_use++;
		break;
	    }
	    break;
	  case N_LV_UNARY:
	    pc = compile_lvalue_node(pc, nd.p->node[0]);
	    switch (opr) {
	      case ULV_PRE_DEC:
	      case ULV_POST_DEC:
	      case ULV_PRE_INC:
	      case ULV_POST_INC:
		if (! void_accepted)
		    stack_use++;
		else
		    opr |= 3;
	      case ULV_DEC:
	      case ULV_INC:
		*pc++ = opr;
		break;
	    }
	    break;
	  case N_EFUN:
	  {
	    int n, narg = nd.p->node[0].efun.narg;

	    for (n = 0; ++n <= narg; ) {
		pc = compile_value_node(pc, nd.p->node[n], 0);
	    }
	    n = nd.p->node[0].efun.code;
	    if (n > 0xff)
		*pc++ = n >> F_ESCAPE_BITS;
	    *pc++ = n;
	    stack_use -= narg - (instrs[n].ret_type != TYPE_NIL);
	    if (instrs[n].min_arg != instrs[n].max_arg)
		*pc++ = narg;
	    break;
	  }
	  case N_LFUN:
	  {
	    int n, narg = nd.p->node[0].lfun.narg;

	    for (n = 0; ++n <= narg; ) {
		pc = compile_value_node(pc, nd.p->node[n], 0);
	    }
	    n = nd.p->node[0].lfun.lfun;
	    *pc++ = F_CALL_FUNCTION_BY_INDEX;
	    *pc++ = narg;
	    STORE16(pc, FUNCTION(n)->ix);
	    pc += 2;
	    stack_use -= narg - 1;
	    break;
	  }
	  case N_SEQUENCE:
	  {
	    do {
		pc = compile_value_node(pc, nd.p->node[0], 1);
		nd = nd.p->node[1];
	    } while (nd.p);
	    break;
	  }
	  case N_IF:
	  {
	    uint8 *branch1, *branch2;
	    int opr = nd.p->opr;
	    int len1 = opr >> 1 & 7;
	    int len2 = opr >> 4 & 7;
	    int inverted =
		opr & 128 ? F_BRANCH_ON_NON_ZERO - F_BRANCH_ON_ZERO : 0;
	    int save_stack_use;

	    pc = compile_value_node(pc, nd.p->node[0], 0);
	    branch1 = pc;
	    pc += len1;
	    save_stack_use = --stack_use;
	    pc = compile_value_node(pc, nd.p->node[1], opr & 1);
	    if (opr & 1)
		pc = stack_adjust(pc, save_stack_use);
	    branch2 = pc;
	    pc += len2;
	    if (len1 == 2) {
		branch1[0] = F_BRANCH_ON_ZERO + inverted;
		branch1[1] = pc - branch1 - 1;
	    } else if (len1 == 3) {
		branch1[0] = F_LBRANCH_ON_ZERO + inverted;
		STORE16(branch1+1, pc - branch1 - 1);
	    } else {
		branch1[0] = F_XLBRANCH_ON_ZERO + inverted;
		STORE24(branch1+1, pc - branch1 - 1);
	    }
	    if (len2) {
		stack_use = save_stack_use;
		pc = compile_value_node(pc, nd.p->node[2], opr & 1);
		if (opr & 1)
		    pc = stack_adjust(pc, save_stack_use);
		if (len2 == 2) {
		    branch2[0] = F_BRANCH;
		    branch2[1] = pc - branch2 - 1;
		} else if (len2 == 3) {
		    branch2[0] = F_LBRANCH;
		    STORE16(branch2+1, pc - branch2 - 1);
		} else {
		    branch2[0] = F_XLBRANCH;
		    STORE24(branch2+1, pc - branch2 - 1);
		}
	    }
	    break;
	  }
	  case N_FOR:
	  {
	    int opr = nd.p->opr;
	    int len1 = opr & 7, len2 = opr >> 3 & 7;
	    int inverted =
		opr & 128 ? F_BRANCH_ON_ZERO - F_BRANCH_ON_NON_ZERO : 0;
	    int ulv =
		opr & 64 ? ULV_PRE_DEC_BBRANCH - F_BRANCH_ON_NON_ZERO : 0;
	    uint8 *branch1, *dest2;
	    int save_stack_use;

	    branch1 = pc;
	    pc += len1;
	    dest2 = pc;
	    save_stack_use = stack_use;
	    if (nd.p->node[0].p)
		pc = compile_value_node(pc, nd.p->node[0], 1);
	    if (nd.p->node[1].p)
		pc = compile_value_node(pc, nd.p->node[1], 1);
	    pc = stack_adjust(pc, save_stack_use);
	    if (len1 == 2) {
		branch1[0] = F_BRANCH;
		branch1[1] = pc - branch1 - 1;
	    } else if (len1 == 3) {
		branch1[0] = F_LBRANCH;
		STORE16(branch1+1, pc - branch1 - 1);
	    } else {
		branch1[0] = F_XLBRANCH;
		STORE24(branch1+1, pc - branch1 - 1);
	    }
	    pc = compile_value_node(pc, nd.p->node[2], 0);
	    if (len2 == 2) {
		pc[0] = F_BBRANCH_ON_NON_ZERO + inverted + ulv;
		pc[1] = - (dest2 - pc);
	    } else if (len2 == 3) {
		pc[0] = F_LBRANCH_ON_NON_ZERO + inverted;
		STORE16(pc+1, dest2 - pc - 1);
	    } else {
		pc[0] = F_XLBRANCH_ON_NON_ZERO + inverted;
		STORE24(pc+1, dest2 - pc - 1);
	    }
	    stack_use--;
	    pc += len2;
	    break;
	  }
	  case N_LOP:
	  {
	    uint8 *branch;

	    pc = compile_value_node(pc, nd.p->node[0], 0);
	    *pc = nd.p->opr;
	    branch = pc;
	    pc += 2;
	    stack_use--;
	    pc = compile_value_node(pc, nd.p->node[1], 0);
	    branch[1] = pc - branch - 1;
	    break;
	  }
	  case N_LLOP:
	  {
	    uint8 *branch;

	    pc = compile_value_node(pc, nd.p->node[0], 0);
	    *pc++ = F_PICK0;
	    *pc = nd.p->opr;
	    branch = pc;
	    pc += 4;
	    *pc++ = F_POP;
	    stack_use--;
	    pc = compile_value_node(pc, nd.p->node[1], 0);
	    STORE24(branch+1, pc - branch - 1);
	    break;
	  }
	}
    }
    return pc;
}

static uint8 *compile_lvalue_node(uint8 *pc, union node nd) {
    if (nd.i & 3) {
	int n = nd.leaf.n.u;
	switch (nd.leaf.type) {
	  case LN_GLOBAL:
	  {
	    struct var_decl *var = GVARIABLE(n);
	    int ix = var->ix;
	    if (ix > 0xff) {
		*pc++ = F_LV_GLOBAL16;
		STORE16(pc, ix);
		pc += 2;
	    } else {
		*pc++ = F_LV_GLOBAL;
		*pc++ = ix;
	    }
	    break;
	  }
	  case LN_PARAM:
	  {
	    int n2;

	    n -= n_param;
	    n2 = n - stack_use - local_preallocated -
		offsetof(struct frame, locals) / sizeof(p_int) +
		offsetof(struct frame, arguments[1]) / sizeof(p_int);
	    if (n2 >= -0xf) {
	    }
	    if (0 && n2 >= -0xff) {
		*pc++ = F_LV_LOCAL;
		*pc++ = n + 0xff;
		break;
	    }
	    *pc++ = F_LV_PARAM;
	    *pc++ = n + 0x100;
	    break;
	  }
	  case LN_LOCAL:
	    n -= stack_use;
	    if (n >= -0xff) {
		*pc++ = F_LV_LOCAL;
		*pc++ = n + 0xff;
	    } else {
		*pc++ = F_LV_LOCAL16;
		STORE16(pc, n + 0x100ff);
		pc += 2;
	    }
	    break;
	  case LN_SHARED:
	  case LN_CONST:
	}
    } else {
	int opr = nd.p->opr;
	switch (nd.p->ntype) {
	  case N_LV_UNARY_CST:
	    pc = compile_lvalue_node(pc, nd.p->node[1]);
	    opr += ULV_LV_CINDEX - ULV_CINDEX;
	    *pc++ = opr;
	    switch (opr) {
	      case ULV_LV_CINDEX:
	      case ULV_LV_CRINDEX:
		*pc++ = nd.p->node[0].leaf.n.u;
		break;
	      case ULV_LV_SINDEX:
	      case ULV_LV_SRINDEX:
		STORE16(pc, nd.p->node[0].leaf.n.u);
		pc += 2;
		break;
	    }
	    break;
	  case N_LV_BINOP:
	    pc = compile_value_node(pc, nd.p->node[0], 0);
	    pc = compile_lvalue_node(pc, nd.p->node[1]);
	    opr += ULV_LV_INDEX - ULV_INDEX;
	    switch (opr) {
	      case ULV_LV_INDEX:
	      case ULV_LV_RINDEX:
		*pc++ = opr;
		break;
	      case ULV_LV_MAP_CINDEX:
		*pc++ = opr;
		*pc++ = nd.p->node[2].leaf.n.s;
		break;
	    }
	    stack_use--;
	    break;
	}
    }
    return pc;
}

static int cmp_fundef(const void *a, const void *b) {
    uint16 ia = *(uint16*)a, ib = *(uint16*)b;
    struct function *fa = FUNCTION(ia), *fb = FUNCTION(ib);
    p_int d;
    d = fb->inherited - fa->inherited;
    if (! d)
        d = fa->name.sv.i - fb->name.sv.i;
    if (sizeof d == sizeof (int))
        return d;
    return d < 0 ? -1 : d > 0;
}

struct program *end_compile() {
    struct program *prog = 0;
    p_int size = sizeof *prog;
    svalue sv;
    int nnames;
    svalue *shared_start;

    /* Make space for narg, nlocal bytes at function start.  */
    total_pcode += 2 * n_fun_def;
    /* We don't share the F_UNDEF so that we can properly sort.
       But we don't need narg/nlocal bytes for these since the
       values don't matter.  */
    total_pcode += 2 * n_undefined_lfuns;
    if (pragma_optimize || num_shared > cshared_threshold) {
	int ix;
	for (ix = function_ix; --ix >= 0; ) {
	    struct function *f = FUNCTION(ix);
	    if (pragma_optimize ||
		f->new_def && f->cshared_threshold > cshared_threshold)
	    {
		stack_use = 0;
		total_pcode -= f->block.length;
		total_pcode += f->block.length = optimize(&f->block.node);
	    }

	}
    }
    size += function_ix * 1;
    size += total_pcode;
    size = ALIGNI(size, p_int);
    size += num_shared * sizeof (p_int);
    size += n_fun_def * sizeof(struct new_function);
    size = ALIGNI(size, p_int);
    sv = ALLOC_TTS(T_INTERNAL, IT_PROGRAM, n_globals, size);
    if (sv.p) do {
	uint16 *fia, *fip;
	int ix;
	struct new_function *nfp;
	uint8 *pcode;

	prog = (struct program *)(sv.p - 1);
	prog->ref = 1;
	prog->load_time = current_time;
	prog->id_number =
	  ++current_id_number ? current_id_number : renumber_programs();
	prog->function.name = 0;
	pcode = &prog->virtual.function_8[function_ix];
	pcode += total_pcode;
	prog->shared = shared_start = (svalue*)ALIGN(pcode, p_int);
	memcpy(shared_start, shared, num_shared * sizeof *shared);
	fia = (uint16 *)(shared_start + num_shared);
	for (fip = fia, ix = function_ix; --ix >= 0;) {
	    struct function *f = FUNCTION(ix);
	    if (!f->inherited) {
		f->name.sv
		  = make_global_string(f->name.id->name, f->name.id->namelen);
		*fip++ = ix;
	    }
	}
	qsort(fia, n_fun_def + n_undefined_lfuns, sizeof fia[0], cmp_fundef);
	for (ix = function_ix; --ix >= 0; ) {
	    int ix2 = *--fip;
	    FUNCTION(ix2)->ix = ix;
	}
	nfp = (struct new_function *)fia + n_fun_def + n_undefined_lfuns;
	/* end of used space is nfp */
	while (all_proto_closures.p) {
	    svalue cl = all_proto_closures;
	    int n;

	    SV_CLOSURE(cl).lfun.closure_type -=
	      CLOSURE_PROTO_LFUN - CLOSURE_LFUN;
	    n = SV_CLOSURE(cl).lfun.index;
	    SV_CLOSURE(cl).lfun.index = FUNCTION(n)->ix;
	    all_proto_closures = SV_CLOSURE(cl).lfun.ob;
	    SV_CLOSURE(cl).lfun.ob = COPY_SVALUE(inter_fp->object);
	    FREE_ALLOCED_SVALUE (cl);
	}
	for (fip += function_ix; fip != fia; ) {
	    int ix;
	    struct function *f;

	    ix = *--fip;
	    f = FUNCTION(ix);
	    nfp--;
	    nfp->name = f->name.sv;
	    if (!f->new_def) {
		*--pcode = (uint8)F_UNDEF;
		*--pcode = F_ESCAPE;
		nfp->start = pcode - (uint8 *)prog;
	    } else {
		pcode[-1] = F_RETURN0;
		pcode -= f->block.length;
		nfp->start = pcode - (uint8 *)prog;
		/* TYPE__STATIC is inversed */
		nfp->flags = 0 ^ TYPE__STATIC;
		n_param = f->num_arg;
		stack_use = 0;
		compile_value_node(pcode, f->block.node, 1);
		*--pcode = f->num_local;
		*--pcode = f->num_arg;
	    }
	}
	prog->new_function = nfp;
	memset(&prog->virtual, 0, n_fun_def - n_fun_redef);
	nnames = function_ix;
	sv = ALLOC_TTS(T_INTERNAL, IT_NAMETABLE, nnames,
	  ALIGNI(sizeof(p_int) + nnames*sizeof prog->function.name[0], p_int));
	if (!sv.p)
	    break;
	prog->function.name = (uint16 *)(sv.p - 1 + sizeof(p_int));
	for (fip = prog->function.name, ix = nnames; --ix >= 0; )
	    fip[ix] = ix;
    } while (0);
    lex_close(0);
    free_node_blocks();
    if (varblock_size > INIT_VARBLOCK_SIZE) {
	free_gen(varblock);
	varblock = 0;
    }
    return prog;
}

struct program *compile_file(uint8 *namestart, mp_int namelen, int language) {
    int fd;
    uint8 save[3];
    uint8 *suffixes[] = { ".c" };

    prepare_compile();
    memcpy(save, namestart+namelen, sizeof save);
    strcpy(namestart+namelen, suffixes[language]);
    fd = open(namestart, O_RDONLY);
    if (fd < 0) {
	yyerrorn(CE_SRC_NF);
	return 0;
    }
    memcpy(namestart+namelen, save, sizeof save);
    lex_open(fd, make_string(namestart, namelen));
    stack_use = 0;
    yyparse();
    return end_compile();
}

void yyerrorn(int ce_errno, ...) {
    int nargs, i;
    va_list va;
    union svalue sv;

    nargs = ce_error_nargs[ce_errno];
    PUSH_NUMBER(ce_errno);
    va_start(va, ce_errno);
    for (i = 0; i < nargs; i++) {
        *++inter_sp = va_arg(va, union svalue);
    }
    va_end(va);
    sv = call_hook(driver_hook[H_COMPILE_ERROR], master_ob, nargs+1);
    FREE_SVALUE(sv);
}

void bad_type(int narg, int opr) {
    yyerrorn(CE_BADTYPE, (p_int)narg << 1, (p_int)opr << 1);
}

void yyerror(char *str) {
    int ce_errno;

    if (!strcmp(str, "yacc stack overflow")) {
	ce_errno = CE_STACKOVERFLOW;
    } else if (!strcmp(str, "syntax error")) {
	ce_errno = CE_SYNTAX;
    } else {
	fatal("yyerror(): unknown error %s\n", str);
	return;
    }
    yyerrorn(ce_errno);
}