pmap SMP化でのIPI使用について

FreeBSD-current/mipsではpmap.cの特定処理についてsmp_rendezvous()を使って全CPUで整合が取れるような書き方がされていた。
が、NetBSD-currentをみる限り、powerpcにもi386にもそういう実装はされていない。
これはハードウェアアーキテクチャによる違いなのか、OSのアーキテクチャによる違いなのか、良く分からないなぁと思っていたらNetBSD-current/vaxに似たような記述を見つけた:

/*
 * Update the PCBs using this pmap after a change.
 */
static void
update_pcbs(struct pmap *pm)
{
	struct pcb *pcb;

	for (pcb = pm->pm_pcbs; pcb != NULL; pcb = pcb->pcb_pmnext) {
		KASSERT(pcb->pcb_pm == pm);
		pcb->P0BR = pm->pm_p0br;
		pcb->P0LR = pm->pm_p0lr|AST_PCB;
		pcb->P1BR = pm->pm_p1br;
		pcb->P1LR = pm->pm_p1lr;
		
	}

	/* If curlwp uses this pmap update the regs too */ 
	if (pm == curproc->p_vmspace->vm_map.pmap) {
		mtpr((uintptr_t)pm->pm_p0br, PR_P0BR);
		mtpr(pm->pm_p0lr|AST_PCB, PR_P0LR);
		mtpr((uintptr_t)pm->pm_p1br, PR_P1BR);
		mtpr(pm->pm_p1lr, PR_P1LR);
	}

#if defined(MULTIPROCESSOR) && defined(notyet)
	/* If someone else is using this pmap, be sure to reread */
	cpu_send_ipi(IPI_DEST_ALL, IPI_NEWPTE);
#endif
}
/*
 * New (real nice!) function that allocates memory in kernel space
 * without tracking it in the MD code.
 */
void
pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
{
	int *ptp, opte;

	ptp = (int *)kvtopte(va);
	PMDEBUG(("pmap_kenter_pa: va: %lx, pa %lx, prot %x ptp %p\n",
	    va, pa, prot, ptp));
	opte = ptp[0];
	ptp[0] = PG_V | ((prot & VM_PROT_WRITE)? PG_KW : PG_KR) |
	    PG_PFNUM(pa) | PG_SREF;
	ptp[1] = ptp[0] + 1;
	ptp[2] = ptp[0] + 2;
	ptp[3] = ptp[0] + 3;
	ptp[4] = ptp[0] + 4;
	ptp[5] = ptp[0] + 5;
	ptp[6] = ptp[0] + 6;
	ptp[7] = ptp[0] + 7;
	if (opte & PG_V) {
#if defined(MULTIPROCESSOR)
		cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
#endif
		mtpr(0, PR_TBIA);
	}
}
void
pmap_kremove(vaddr_t va, vsize_t len)
{
	struct pte *pte;
#ifdef PMAPDEBUG
	int i;
#endif

	PMDEBUG(("pmap_kremove: va: %lx, len %lx, ptp %p\n",
		    va, len, kvtopte(va)));

	pte = kvtopte(va);

#ifdef PMAPDEBUG
	/*
	 * Check if any pages are on the pv list.
	 * This shouldn't happen anymore.
	 */
	len >>= PGSHIFT;
	for (i = 0; i < len; i++) {
		if (pte->pg_pfn == 0)
			continue;
		if (pte->pg_sref == 0)
			panic("pmap_kremove");
		bzero(pte, LTOHPN * sizeof(struct pte));
		pte += LTOHPN;
	}
#else
	len >>= VAX_PGSHIFT;
	bzero(pte, len * sizeof(struct pte));
#endif
#if defined(MULTIPROCESSOR)
	cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
#endif
	mtpr(0, PR_TBIA);
}
/*
 * Sets protection for a given region to prot. If prot == none then
 * unmap region. pmap_remove is implemented as pmap_protect with
 * protection none.
 */
void
pmap_protect_long(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
{
	struct	pte *pt, *pts, *ptd;
	int	pr, lr;

	PMDEBUG(("pmap_protect: pmap %p, start %lx, end %lx, prot %x\n",
	    pmap, start, end,prot));

	RECURSESTART;

	switch (SEGTYPE(start)) {
	case SYSSEG:
		pt = Sysmap;
#ifdef DIAGNOSTIC
		if (((end & 0x3fffffff) >> VAX_PGSHIFT) > mfpr(PR_SLR))
			panic("pmap_protect: outside SLR: %lx", end);
#endif
		start &= ~KERNBASE;
		end &= ~KERNBASE;
		pr = (prot & VM_PROT_WRITE ? PROT_KW : PROT_KR);
		break;

	case P1SEG:
		if (vax_btop(end - 0x40000000) <= pmap->pm_p1lr) {
			RECURSEEND;
			return;
		}
		if (vax_btop(start - 0x40000000) < pmap->pm_p1lr)
			start = pmap->pm_p1lr * VAX_NBPG;
		pt = pmap->pm_p1br;
		start &= 0x3fffffff;
		end = (end == KERNBASE ? end >> 1 : end & 0x3fffffff);
		pr = (prot & VM_PROT_WRITE ? PROT_RW : PROT_RO);
		break;

	case P0SEG:
		lr = pmap->pm_p0lr;

		/* Anything to care about at all? */
		if (vax_btop(start) > lr) {
			RECURSEEND;
			return;
		}
		if (vax_btop(end) > lr)
			end = lr * VAX_NBPG;
		pt = pmap->pm_p0br;
		pr = (prot & VM_PROT_WRITE ? PROT_RW : PROT_RO);
		break;
	default:
		panic("unsupported segtype: %d", SEGTYPE(start));
	}

	pts = &pt[start >> VAX_PGSHIFT];
	ptd = &pt[end >> VAX_PGSHIFT];
#ifdef DEBUG
	if (((int)pts - (int)pt) & 7)
		panic("pmap_remove: pts not even");
	if (((int)ptd - (int)pt) & 7)
		panic("pmap_remove: ptd not even");
#endif

	while (pts < ptd) {
		if (kvtopte(pts)->pg_pfn && *(int *)pts) {
			if (prot == VM_PROT_NONE) {
				RECURSEEND;
				if ((*(int *)pts & PG_SREF) == 0)
					rmpage(pmap, (u_int *)pts);
#ifdef DEBUG
				else
					panic("pmap_remove PG_SREF page");
#endif
				RECURSESTART;
				bzero(pts, sizeof(struct pte) * LTOHPN);
				if (pt != Sysmap) {
					if (ptpinuse(pts) == 0)
						rmptep(pts);
				}
			} else {
				pts[0].pg_prot = pr;
				pts[1].pg_prot = pr;
				pts[2].pg_prot = pr;
				pts[3].pg_prot = pr;
				pts[4].pg_prot = pr;
				pts[5].pg_prot = pr;
				pts[6].pg_prot = pr;
				pts[7].pg_prot = pr;
			}
		}
		pts += LTOHPN;
	}
	RECURSEEND;
#ifdef MULTIPROCESSOR
	cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
#endif
	mtpr(0, PR_TBIA);
}
/*
 * Clears valid bit in all ptes referenced to this physical page.
 */
bool
pmap_clear_reference_long(struct pv_entry *pv)
{
	struct pte *pte;
	int ref = 0;

	PMDEBUG(("pmap_clear_reference: pv_entry %p\n", pv));

	RECURSESTART;
	PVTABLE_LOCK;
	if (pv->pv_pmap != NULL) {
		pte = vaddrtopte(pv);
		if (pte->pg_w == 0) {
			pte[0].pg_v = 0; pte[1].pg_v = 0;
			pte[2].pg_v = 0; pte[3].pg_v = 0;
			pte[4].pg_v = 0; pte[5].pg_v = 0;
			pte[6].pg_v = 0; pte[7].pg_v = 0;
		}
	}

	while ((pv = pv->pv_next)) {
		pte = vaddrtopte(pv);
		if (pte[0].pg_w == 0) {
			pte[0].pg_v = 0; pte[1].pg_v = 0;
			pte[2].pg_v = 0; pte[3].pg_v = 0;
			pte[4].pg_v = 0; pte[5].pg_v = 0;
			pte[6].pg_v = 0; pte[7].pg_v = 0;
		}
	}
	PVTABLE_UNLOCK;
	RECURSEEND;
#ifdef MULTIPROCESSOR
	cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
#endif	
	mtpr(0, PR_TBIA);
	return ref;
}
/*
 * Lower the permission for all mappings to a given page.
 * Lower permission can only mean setting protection to either read-only
 * or none; where none is unmapping of the page.
 */
void
pmap_page_protect_long(struct pv_entry *pv, vm_prot_t prot)
{
	struct	pte *pt;
	struct	pv_entry *opv, *pl;
	int	s, *g;

	PMDEBUG(("pmap_page_protect: pv %p, prot %x\n", pv, prot));


	if (prot == VM_PROT_ALL) /* 'cannot happen' */
		return;

	RECURSESTART;
	PVTABLE_LOCK;
	if (prot == VM_PROT_NONE) {
		s = splvm();
		g = (int *)vaddrtopte(pv);
		if (g) {
			simple_lock(&pv->pv_pmap->pm_lock);
			pv->pv_pmap->pm_stats.resident_count--;
			if (g[0] & PG_W) {
				pv->pv_pmap->pm_stats.wired_count--;
			}
			simple_unlock(&pv->pv_pmap->pm_lock);
			if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
				pv->pv_attr |= 
				    g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
			bzero(g, sizeof(struct pte) * LTOHPN);
			if (pv->pv_pmap != pmap_kernel()) {
				if (ptpinuse(g) == 0)
					rmptep((void *)g);
			}
			pv->pv_vaddr = NOVADDR;
			pv->pv_pmap = NULL;
		}
		pl = pv->pv_next;
		pv->pv_pmap = 0;
		pv->pv_next = 0;
		while (pl) {
			g = (int *)vaddrtopte(pl);
			simple_lock(&pl->pv_pmap->pm_lock);
			pl->pv_pmap->pm_stats.resident_count--;
			if (g[0] & PG_W) {
				pl->pv_pmap->pm_stats.wired_count--;
			}
			simple_unlock(&pl->pv_pmap->pm_lock);
			if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
				pv->pv_attr |=
				    g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
			bzero(g, sizeof(struct pte) * LTOHPN);
			if (pl->pv_pmap != pmap_kernel()) {
				if (ptpinuse(g) == 0)
					rmptep((void *)g);
			}
			opv = pl;
			pl = pl->pv_next;
			free_pventry(opv);
		} 
		splx(s);
	} else { /* read-only */
		do {
			int pr;
			pt = vaddrtopte(pv);
			if (pt == 0)
				continue;
			pr = ((vaddr_t)pt < ptemapstart ? PROT_KR : PROT_RO);
			pt[0].pg_prot = pr; pt[1].pg_prot = pr;
			pt[2].pg_prot = pr; pt[3].pg_prot = pr;
			pt[4].pg_prot = pr; pt[5].pg_prot = pr;
			pt[6].pg_prot = pr; pt[7].pg_prot = pr;
		} while ((pv = pv->pv_next));
	}
	PVTABLE_UNLOCK;
	RECURSEEND;
#ifdef MULTIPROCESSOR
	cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
#endif
	mtpr(0, PR_TBIA);
}

きちんと裏をとってないが、どうも見た感じでは同じ関数を実行するというよりもリロードを行って同期が取れている状態にする、という感じなようだ。