Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
/* smp.c: Sparc SMP support.
 *
 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
 */

#include <asm/head.h>
#include <asm/ptrace.h>

#include <linux/kernel.h>
#include <linux/tasks.h>
#include <linux/smp.h>

#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>

extern ctxd_t *srmmu_ctx_table_phys;
extern int linux_num_cpus;

struct tlog {
	unsigned long pc;
	unsigned long psr;
};

struct tlog trap_log[4][256];
unsigned long trap_log_ent[4] = { 0, 0, 0, 0, };

extern void calibrate_delay(void);

volatile unsigned long stuck_pc = 0;
volatile int smp_processors_ready = 0;

int smp_found_config = 0;
unsigned long cpu_present_map = 0;
int smp_num_cpus = 1;
int smp_threads_ready=0;
unsigned char mid_xlate[NR_CPUS] = { 0, 0, 0, 0, };
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
volatile unsigned long smp_invalidate_needed[NR_CPUS] = { 0, };
volatile unsigned long smp_spinning[NR_CPUS] = { 0, };
struct cpuinfo_sparc cpu_data[NR_CPUS];
unsigned char boot_cpu_id = 0;
static int smp_activated = 0;
static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
static volatile unsigned long smp_msg_data;
static volatile int smp_src_cpu;
static volatile int smp_msg_id;
volatile int cpu_number_map[NR_CPUS];
volatile int cpu_logical_map[NR_CPUS];

/* The only guaranteed locking primitive available on all Sparc
 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
 * places the current byte at the effective address into dest_reg and
 * places 0xff there afterwards.  Pretty lame locking primitive
 * compared to the Alpha and the intel no?  Most Sparcs have 'swap'
 * instruction which is much better...
 */
klock_t kernel_flag = KLOCK_CLEAR;
volatile unsigned char active_kernel_processor = NO_PROC_ID;
volatile unsigned long kernel_counter = 0;
volatile unsigned long syscall_count = 0;
volatile unsigned long ipi_count;
#ifdef __SMP_PROF__
volatile unsigned long smp_spins[NR_CPUS]={0};
volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
#endif
#if defined (__SMP_PROF__)
volatile unsigned long smp_idle_map=0;
#endif

volatile unsigned long smp_proc_in_lock[NR_CPUS] = {0,};
volatile int smp_process_available=0;

/*#define SMP_DEBUG*/

#ifdef SMP_DEBUG
#define SMP_PRINTK(x)	printk x
#else
#define SMP_PRINTK(x)
#endif

static volatile int smp_commenced = 0;

static char smp_buf[512];

char *smp_info(void)
{
	sprintf(smp_buf,
"\n        CPU0\t\tCPU1\t\tCPU2\t\tCPU3\n"
"State: %s\t\t%s\t\t%s\t\t%s\n"
"Lock:  %08lx\t\t%08lx\t%08lx\t%08lx\n"
"\n"
"klock: %x\n",
		(cpu_present_map & 1) ? ((active_kernel_processor == 0) ? "akp" : "online") : "offline",
		(cpu_present_map & 2) ? ((active_kernel_processor == 1) ? "akp" : "online") : "offline",
		(cpu_present_map & 4) ? ((active_kernel_processor == 2) ? "akp" : "online") : "offline",
		(cpu_present_map & 8) ? ((active_kernel_processor == 3) ? "akp" : "online") : "offline",
		smp_proc_in_lock[0], smp_proc_in_lock[1], smp_proc_in_lock[2],
		smp_proc_in_lock[3], 
		kernel_flag);
	return smp_buf;
}

static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
{
	__asm__ __volatile__("swap [%1], %0\n\t" :
			     "=&r" (val), "=&r" (ptr) :
			     "0" (val), "1" (ptr));
	return val;
}

/*
 *	The bootstrap kernel entry code has set these up. Save them for
 *	a given CPU
 */

void smp_store_cpu_info(int id)
{
	cpu_data[id].udelay_val = loops_per_sec; /* this is it on sparc. */
}

/*
 *	Architecture specific routine called by the kernel just before init is
 *	fired off. This allows the BP to have everything in order [we hope].
 *	At the end of this all the AP's will hit the system scheduling and off
 *	we go. Each AP will load the system gdt's and jump through the kernel
 *	init into idle(). At this point the scheduler will one day take over 
 * 	and give them jobs to do. smp_callin is a standard routine
 *	we use to track CPU's as they power up.
 */

void smp_commence(void)
{
	/*
	 *	Lets the callin's below out of their loop.
	 */
	local_flush_cache_all();
	local_flush_tlb_all();
	smp_commenced = 1;
	local_flush_cache_all();
	local_flush_tlb_all();
}

void smp_callin(void)
{
	int cpuid = smp_processor_id();

	sti();
	local_flush_cache_all();
	local_flush_tlb_all();
	calibrate_delay();
	smp_store_cpu_info(cpuid);
	local_flush_cache_all();
	local_flush_tlb_all();
	cli();

	/* Allow master to continue. */
	swap((unsigned long *)&cpu_callin_map[cpuid], 1);
	local_flush_cache_all();
	local_flush_tlb_all();
	while(!smp_commenced)
		barrier();
	local_flush_cache_all();
	local_flush_tlb_all();

	/* Fix idle thread fields. */
	current->mm->mmap->vm_page_prot = PAGE_SHARED;
	current->mm->mmap->vm_start = KERNBASE;
	current->mm->mmap->vm_end = init_task.mm->mmap->vm_end;

	local_flush_cache_all();
	local_flush_tlb_all();

	sti();
}

void cpu_panic(void)
{
	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
	panic("SMP bolixed\n");
}

/*
 *	Cycle through the processors asking the PROM to start each one.
 */
 
extern struct prom_cpuinfo linux_cpus[NCPUS];
static struct linux_prom_registers penguin_ctable;

void smp_boot_cpus(void)
{
	int cpucount = 0;
	int i = 0;

	printk("Entering SparclinuxMultiPenguin(SMP) Mode...\n");

	penguin_ctable.which_io = 0;
	penguin_ctable.phys_addr = (char *) srmmu_ctx_table_phys;
	penguin_ctable.reg_size = 0;

	sti();
	cpu_present_map |= (1 << smp_processor_id());
	cpu_present_map = 0;
	for(i=0; i < linux_num_cpus; i++)
		cpu_present_map |= (1<<i);
	for(i=0; i < NR_CPUS; i++)
		cpu_number_map[i] = -1;
	for(i=0; i < NR_CPUS; i++)
		cpu_logical_map[i] = -1;
	mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
	cpu_number_map[boot_cpu_id] = 0;
	cpu_logical_map[0] = boot_cpu_id;
	active_kernel_processor = boot_cpu_id;
	smp_store_cpu_info(boot_cpu_id);
	set_irq_udt(0);
	local_flush_cache_all();
	if(linux_num_cpus == 1)
		return;  /* Not an MP box. */
	for(i = 0; i < NR_CPUS; i++) {
		if(i == boot_cpu_id)
			continue;

		if(cpu_present_map & (1 << i)) {
			extern unsigned long sparc_cpu_startup;
			unsigned long *entry = &sparc_cpu_startup;
			int timeout;

			/* See trampoline.S for details... */
			entry += ((i-1) * 6);

			/* whirrr, whirrr, whirrrrrrrrr... */
			printk("Starting CPU %d at %p\n", i, entry);
			mid_xlate[i] = (linux_cpus[i].mid & ~8);
			local_flush_cache_all();
			prom_startcpu(linux_cpus[i].prom_node,
				      &penguin_ctable, 0, (char *)entry);

			/* wheee... it's going... */
			for(timeout = 0; timeout < 5000000; timeout++) {
				if(cpu_callin_map[i])
					break;
				udelay(100);
			}
			if(cpu_callin_map[i]) {
				/* Another "Red Snapper". */
				cpucount++;
				cpu_number_map[i] = i;
				cpu_logical_map[i] = i;
			} else {
				printk("Penguin %d is stuck in the bottle.\n", i);
			}
		}
		if(!(cpu_callin_map[i])) {
			cpu_present_map &= ~(1 << i);
			cpu_number_map[i] = -1;
		}
	}
	local_flush_cache_all();
	if(cpucount == 0) {
		printk("Error: only one Penguin found.\n");
		cpu_present_map = (1 << smp_processor_id());
	} else {
		unsigned long bogosum = 0;
		for(i = 0; i < NR_CPUS; i++) {
			if(cpu_present_map & (1 << i))
				bogosum += cpu_data[i].udelay_val;
		}
		printk("Total of %d Penguins activated (%lu.%02lu PenguinMIPS).\n",
		       cpucount + 1,
		       (bogosum + 2500)/500000,
		       ((bogosum + 2500)/5000)%100);
		smp_activated = 1;
		smp_num_cpus = cpucount + 1;
	}
	smp_processors_ready = 1;
}

static inline void send_ipi(unsigned long target_map, int irq)
{
	int i;

	for(i = 0; i < 4; i++) {
		if((1<<i) & target_map)
			set_cpu_int(mid_xlate[i], irq);
	}
}

/*
 * A non wait message cannot pass data or cpu source info. This current
 * setup is only safe because the kernel lock owner is the only person
 * who can send a message.
 *
 * Wrapping this whole block in a spinlock is not the safe answer either.
 * A processor may get stuck with irq's off waiting to send a message and
 * thus not replying to the person spinning for a reply....
 *
 * In the end invalidate ought to be the NMI and a very very short
 * function (to avoid the old IDE disk problems), and other messages sent
 * with IRQ's enabled in a civilised fashion. That will also boost
 * performance.
 */

static volatile int message_cpu = NO_PROC_ID;

void smp_message_pass(int target, int msg, unsigned long data, int wait)
{
	unsigned long target_map;
	int p = smp_processor_id();
	int irq = 15;
	int i;

	/* Before processors have been placed into their initial
	 * patterns do not send messages.
	 */
	if(!smp_processors_ready)
		return;

	/* Skip the reschedule if we are waiting to clear a
	 * message at this time. The reschedule cannot wait
	 * but is not critical.
	 */
	if(msg == MSG_RESCHEDULE) {
		irq = 13;
		if(smp_cpu_in_msg[p])
			return;
	}

	/* Sanity check we don't re-enter this across CPU's. Only the kernel
	 * lock holder may send messages. For a STOP_CPU we are bringing the
	 * entire box to the fastest halt we can.. A reschedule carries
	 * no data and can occur during a flush.. guess what panic
	 * I got to notice this bug...
	 */
	if(message_cpu != NO_PROC_ID && msg != MSG_STOP_CPU && msg != MSG_RESCHEDULE) {
		printk("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
		      smp_processor_id(),msg,message_cpu, smp_msg_id);

		/* I don't know how to gracefully die so that debugging
		 * this doesn't completely eat up my filesystems...
		 * let's try this...
		 */
		smp_cpu_in_msg[p] = 0; /* In case we come back here... */
		intr_count = 0;        /* and so panic don't barf... */
		smp_swap(&message_cpu, NO_PROC_ID); /* push the store buffer */
		sti();
		printk("spinning, please L1-A, type ctrace and send output to davem\n");
		while(1)
			barrier();
	}
	smp_swap(&message_cpu, smp_processor_id()); /* store buffers... */

	/* We are busy. */
	smp_cpu_in_msg[p]++;

	/* Reschedule is currently special. */
	if(msg != MSG_RESCHEDULE) {
		smp_src_cpu = p;
		smp_msg_id = msg;
		smp_msg_data = data;
	}

#if 0
	printk("SMP message pass from cpu %d to cpu %d msg %d\n", p, target, msg);
#endif

	/* Set the target requirement. */
	for(i = 0; i < smp_num_cpus; i++)
		swap((unsigned long *) &cpu_callin_map[i], 0);
	if(target == MSG_ALL_BUT_SELF) {
		target_map = (cpu_present_map & ~(1<<p));
		swap((unsigned long *) &cpu_callin_map[p], 1);
	} else if(target == MSG_ALL) {
		target_map = cpu_present_map;
	} else {
		for(i = 0; i < smp_num_cpus; i++)
			if(i != target)
				swap((unsigned long *) &cpu_callin_map[i], 1);
		target_map = (1<<target);
	}

	/* Fire it off. */
	send_ipi(target_map, irq);

	switch(wait) {
	case 1:
		for(i = 0; i < smp_num_cpus; i++)
			while(!cpu_callin_map[i])
				barrier();
		break;
	case 2:
		for(i = 0; i < smp_num_cpus; i++)
			while(smp_invalidate_needed[i])
				barrier();
		break;
	case 3:
		/* For cross calls we hold message_cpu and smp_cpu_in_msg[]
		 * until all processors disperse.  Else we have _big_ problems.
		 */
		return;
	}
	smp_cpu_in_msg[p]--;
	smp_swap(&message_cpu, NO_PROC_ID);
}

struct smp_funcall {
	smpfunc_t func;
	unsigned long arg1;
	unsigned long arg2;
	unsigned long arg3;
	unsigned long arg4;
	unsigned long arg5;
	unsigned long processors_in[NR_CPUS];  /* Set when ipi entered. */
	unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
} ccall_info;

/* Returns failure code if for example any of the cpu's failed to respond
 * within a certain timeout period.
 */

#define CCALL_TIMEOUT   5000000 /* enough for initial testing */

/* #define DEBUG_CCALL */

/* Some nice day when we really thread the kernel I'd like to synchronize
 * this with either a broadcast conditional variable, a resource adaptive
 * generic mutex, or a convoy semaphore scheme of some sort.  No reason
 * we can't let multiple processors in here if the appropriate locking
 * is done.  Note that such a scheme assumes we will have a
 * prioritized ipi scheme using different software level irq's.
 */
void smp_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
		    unsigned long arg3, unsigned long arg4, unsigned long arg5)
{
	unsigned long me = smp_processor_id();
	unsigned long flags;
	int i, timeout;

#ifdef DEBUG_CCALL
	printk("xc%d<", me);
#endif
	if(smp_processors_ready) {
		save_flags(flags); cli();
		if(me != active_kernel_processor)
			goto cross_call_not_master;

		/* Init function glue. */
		ccall_info.func = func;
		ccall_info.arg1 = arg1;
		ccall_info.arg2 = arg2;
		ccall_info.arg3 = arg3;
		ccall_info.arg4 = arg4;
		ccall_info.arg5 = arg5;

		/* Init receive/complete mapping. */
		for(i = 0; i < smp_num_cpus; i++) {
			ccall_info.processors_in[i] = 0;
			ccall_info.processors_out[i] = 0;
		}
		ccall_info.processors_in[me] = 1;
		ccall_info.processors_out[me] = 1;

		/* Fire it off. */
		smp_message_pass(MSG_ALL_BUT_SELF, MSG_CROSS_CALL, 0, 3);

		/* For debugging purposes right now we can timeout
		 * on both callin and callexit.
		 */
		timeout = CCALL_TIMEOUT;
		for(i = 0; i < smp_num_cpus; i++) {
			while(!ccall_info.processors_in[i] && timeout-- > 0)
				barrier();
			if(!ccall_info.processors_in[i])
				goto procs_time_out;
		}
#ifdef DEBUG_CCALL
		printk("I");
#endif

		/* Run local copy. */
		func(arg1, arg2, arg3, arg4, arg5);

		/* Spin on proc dispersion. */
		timeout = CCALL_TIMEOUT;
		for(i = 0; i < smp_num_cpus; i++) {
			while(!ccall_info.processors_out[i] && timeout-- > 0)
				barrier();
			if(!ccall_info.processors_out[i])
				goto procs_time_out;
		}
#ifdef DEBUG_CCALL
		printk("O>");
#endif
		/* See wait case 3 in smp_message_pass()... */
		smp_cpu_in_msg[me]--;
		smp_swap(&message_cpu, NO_PROC_ID); /* store buffers... */
		restore_flags(flags);
		return; /* made it... */

procs_time_out:
		printk("smp: Wheee, penguin drops off the bus\n");
		smp_cpu_in_msg[me]--;
		message_cpu = NO_PROC_ID;
		restore_flags(flags);
		return; /* why me... why me... */
	}

	/* Just need to run local copy. */
	func(arg1, arg2, arg3, arg4, arg5);
	return;

cross_call_not_master:
	printk("Cross call initiated by non master cpu\n");
	printk("akp=%x me=%08lx\n", active_kernel_processor, me);
	restore_flags(flags);
	panic("penguin cross call");
}

void smp_flush_cache_all(void)
{ xc0((smpfunc_t) local_flush_cache_all); }

void smp_flush_tlb_all(void)
{ xc0((smpfunc_t) local_flush_tlb_all); }

void smp_flush_cache_mm(struct mm_struct *mm)
{ 
	if(mm->context != NO_CONTEXT)
		xc1((smpfunc_t) local_flush_cache_mm, (unsigned long) mm);
}

void smp_flush_tlb_mm(struct mm_struct *mm)
{
	if(mm->context != NO_CONTEXT)
		xc1((smpfunc_t) local_flush_tlb_mm, (unsigned long) mm);
}

void smp_flush_cache_range(struct mm_struct *mm, unsigned long start,
			   unsigned long end)
{
	if(mm->context != NO_CONTEXT)
		xc3((smpfunc_t) local_flush_cache_range, (unsigned long) mm,
		    start, end);
}

void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
			 unsigned long end)
{
	if(mm->context != NO_CONTEXT)
		xc3((smpfunc_t) local_flush_tlb_range, (unsigned long) mm,
		    start, end);
}

void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{ xc2((smpfunc_t) local_flush_cache_page, (unsigned long) vma, page); }

void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{ xc2((smpfunc_t) local_flush_tlb_page, (unsigned long) vma, page); }

void smp_flush_page_to_ram(unsigned long page)
{ xc1((smpfunc_t) local_flush_page_to_ram, page); }

/* Reschedule call back. */
void smp_reschedule_irq(void)
{
	if(smp_processor_id() != active_kernel_processor)
		panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
		      smp_processor_id(), active_kernel_processor);

	need_resched=1;
}

/* XXX FIXME: this still doesn't work right... XXX */

/* #define DEBUG_CAPTURE */

static volatile unsigned long release = 1;
static volatile int capture_level = 0;

void smp_capture(void)
{
	unsigned long flags;

	if(!smp_activated || !smp_commenced)
		return;
#ifdef DEBUG_CAPTURE
	printk("C<%d>", smp_processor_id());
#endif
	save_flags(flags); cli();
	if(!capture_level) {
		release = 0;
		smp_message_pass(MSG_ALL_BUT_SELF, MSG_CAPTURE, 0, 1);
	}
	capture_level++;
	restore_flags(flags);
}

void smp_release(void)
{
	unsigned long flags;
	int i;

	if(!smp_activated || !smp_commenced)
		return;
#ifdef DEBUG_CAPTURE
	printk("R<%d>", smp_processor_id());
#endif
	save_flags(flags); cli();
	if(!(capture_level - 1)) {
		release = 1;
		for(i = 0; i < smp_num_cpus; i++)
			while(cpu_callin_map[i])
				barrier();
	}
	capture_level -= 1;
	restore_flags(flags);
}

/* Park a processor, we must watch for more IPI's to invalidate
 * our cache's and TLB's. And also note we can only wait for
 * "lock-less" IPI's and process those, as a result of such IPI's
 * being non-maskable traps being on is enough to receive them.
 */

/* Message call back. */
void smp_message_irq(void)
{
	int i=smp_processor_id();

	switch(smp_msg_id) {
	case MSG_CROSS_CALL:
		/* Do it to it. */
		ccall_info.processors_in[i] = 1;
		ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
				ccall_info.arg4, ccall_info.arg5);
		ccall_info.processors_out[i] = 1;
		break;

		/*
		 *	Halt other CPU's for a panic or reboot
		 */
	case MSG_STOP_CPU:
		sti();
		while(1)
			barrier();

	default:
		printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
		       smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
		break;
	}
}