/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This file contains the power_save function for 970-family CPUs. */ #include <linux/threads.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/cputable.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/irqflags.h> #include <asm/hw_irq.h> #include <asm/feature-fixups.h> #undef DEBUG .text _GLOBAL(power4_idle) BEGIN_FTR_SECTION blr END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) /* Now check if user or arch enabled NAP mode */ LOAD_REG_ADDRBASE(r3,powersave_nap) lwz r4,ADDROFF(powersave_nap)(r3) cmpwi 0,r4,0 beqlr /* This sequence is similar to prep_irq_for_idle() */ /* Hard disable interrupts */ mfmsr r7 rldicl r0,r7,48,1 rotldi r0,r0,16 mtmsrd r0,1 /* Check if something happened while soft-disabled */ lbz r0,PACAIRQHAPPENED(r13) cmpwi cr0,r0,0 bne- 2f /* * Soft-enable interrupts. This will make power4_fixup_nap return * to our caller with interrupts enabled (soft and hard). The caller * can cope with either interrupts disabled or enabled upon return. */ #ifdef [31mCONFIG_TRACE_IRQFLAGS[0m /* Tell the tracer interrupts are on, because idle responds to them. */ mflr r0 std r0,16(r1) stdu r1,-128(r1) bl trace_hardirqs_on addi r1,r1,128 ld r0,16(r1) mtlr r0 mfmsr r7 #endif /* CONFIG_TRACE_IRQFLAGS */ li r0,IRQS_ENABLED stb r0,PACAIRQSOFTMASK(r13) /* we'll hard-enable shortly */ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ld r9, PACA_THREAD_INFO(r13) ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */ std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ ori r7,r7,MSR_EE oris r7,r7,MSR_POW@h 1: sync isync mtmsrd r7 isync b 1b 2: /* Return if an interrupt had happened while soft disabled */ /* Set the HARD_DIS flag because interrupts are now hard disabled */ ori r0,r0,PACA_IRQ_HARD_DIS stb r0,PACAIRQHAPPENED(r13) blr |