|
|
Linux (轉(zhuǎn)自cu) 原文鏈接: http://linux.chinaunix.net/bbs/viewthread.php?tid=904906cpu.c
[CODE]
#include<stdlib.h>
#include<stdio.h>
#include<sys/types.h>
#include<sys/sysinfo.h>
#include<unistd.h>
#define __USE_GNU
#include<sched.h>
#include<ctype.h>
#include<string.h>
int main(int argc, char* argv[])
{
int num = sysconf(_SC_NPROCESSORS_CONF);
int created_thread = 0;
int myid;
int i;
int j = 0;
cpu_set_t mask;
cpu_set_t get;
if (argc != 2)
{
printf("usage : ./cpu num\n");
exit(1);
}
myid = atoi(argv[1]);
printf("system has %i processor(s). \n", num);
CPU_ZERO(&mask);
CPU_SET(myid, &mask);
if (sched_setaffinity(0, sizeof(mask), &mask) == -1)
{
printf("warning: could not set CPU affinity, continuing...\n");
}
while (1)
{
CPU_ZERO(&get);
if (sched_getaffinity(0, sizeof(get), &get) == -1)
{
printf("warning: cound not get cpu affinity, continuing...\n");
}
for (i = 0; i < num; i++)
{
if (CPU_ISSET(i, &get))
{
printf("this process %d is running processor : %d\n",getpid(), i);
}
}
}
return 0;
}
[/CODE]
下面是在兩個(gè)終端分別執(zhí)行了./cpu 0 ./cpu 2 后得到的結(jié)果. 效果比較明顯.
Cpu0 : 5.3%us, 5.3%sy, 0.0%ni, 87.4%id, 0.0%wa, 0.0%hi, 2.0%si, 0.0%st
Cpu1 : 0.0%us, 0.0%sy, 0.0%ni,100.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Cpu2 : 5.0%us, 12.2%sy, 0.0%ni, 82.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Cpu3 : 0.0%us, 0.0%sy, 0.0%ni,100.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Cpu4 : 0.0%us, 0.0%sy, 0.0%ni, 99.7%id, 0.3%wa, 0.0%hi, 0.0%si, 0.0%st
Cpu5 : 0.0%us, 0.0%sy, 0.0%ni,100.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Cpu6 : 0.0%us, 0.0%sy, 0.0%ni,100.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Cpu7 : 0.0%us, 0.0%sy, 0.0%ni,100.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
#ifdef __USE_GNU
/* Access macros for `cpu_set'. */
#define CPU_SETSIZE __CPU_SETSIZE
#define CPU_SET(cpu, cpusetp) __CPU_SET (cpu, cpusetp)
#define CPU_CLR(cpu, cpusetp) __CPU_CLR (cpu, cpusetp)
#define CPU_ISSET(cpu, cpusetp) __CPU_ISSET (cpu, cpusetp)
#define CPU_ZERO(cpusetp) __CPU_ZERO (cpusetp)
/* Set the CPU affinity for a task */
extern int sched_setaffinity (__pid_t __pid, size_t __cpusetsize,
__const cpu_set_t *__cpuset) __THROW;
/* Get the CPU affinity for a task */
extern int sched_getaffinity (__pid_t __pid, size_t __cpusetsize,
cpu_set_t *__cpuset) __THROW;
#endif
#define __USE_GNU是為了使用CPU_SET()等宏. 具體在/usr/include/sched.h中有如下的定義.
CPU Affinity (CPU親合力)
CPU親合力就是指在Linux系統(tǒng)中能夠?qū)⒁粋€(gè)或多個(gè)進(jìn)程綁定到一個(gè)或多個(gè)處理器上運(yùn)行.
一個(gè)進(jìn)程的CPU親合力掩碼決定了該進(jìn)程將在哪個(gè)或哪幾個(gè)CPU上運(yùn)行.在一個(gè)多處理器系統(tǒng)中,設(shè)置CPU親合力的掩碼可能會(huì)獲得更好的性能.
一個(gè)CPU的親合力掩碼用一個(gè)cpu_set_t結(jié)構(gòu)體來表示一個(gè)CPU集合,下面的幾個(gè)宏分別對(duì)這個(gè)掩碼集進(jìn)行操作:
CPU_ZERO() 清空一個(gè)集合
CPU_SET()與CPU_CLR()分別對(duì)將一個(gè)給定的CPU號(hào)加到一個(gè)集合或者從一個(gè)集合中去掉.
CPU_ISSET()檢查一個(gè)CPU號(hào)是否在這個(gè)集合中.
其實(shí)這幾個(gè)的用法與select()函數(shù)那幾個(gè)調(diào)用差不多.
下面兩個(gè)函數(shù)就是最主要的了:
sched_setaffinity(pid_t pid, unsigned int cpusetsize, cpu_set_t *mask)
該函數(shù)設(shè)置進(jìn)程為pid的這個(gè)進(jìn)程,讓它運(yùn)行在mask所設(shè)定的CPU上.如果pid的值為0,則表示指定的是當(dāng)前進(jìn)程,使當(dāng)前進(jìn)程運(yùn)行在mask所設(shè)定的那些CPU上.第二個(gè)參數(shù)cpusetsize是
mask所指定的數(shù)的長度.通常設(shè)定為sizeof(cpu_set_t).如果當(dāng)前pid所指定的CPU此時(shí)沒有運(yùn)行在mask所指定的任意一個(gè)CPU上,則該指定的進(jìn)程會(huì)從其它CPU上遷移到mask的指定的
一個(gè)CPU上運(yùn)行.
sched_getaffinity(pid_t pid, unsigned int cpusetsize, cpu_set_t *mask)
該函數(shù)獲得pid所指示的進(jìn)程的CPU位掩碼,并將該掩碼返回到mask所指向的結(jié)構(gòu)中.即獲得指定pid當(dāng)前可以運(yùn)行在哪些CPU上.同樣,如果pid的值為0.也表示的是當(dāng)前進(jìn)程.
這幾個(gè)宏與函數(shù)的具體用法前面已經(jīng)有講解.
關(guān)于cpu_set_t的定義
[CODE]
# define __CPU_SETSIZE 1024
# define __NCPUBITS (8 * sizeof (__cpu_mask))
typedef unsigned long int __cpu_mask;
# define __CPUELT(cpu) ((cpu) / __NCPUBITS)
# define __CPUMASK(cpu) ((__cpu_mask) 1 << ((cpu) % __NCPUBITS))
typedef struct
{
__cpu_mask __bits[__CPU_SETSIZE / __NCPUBITS];
} cpu_set_t;
# define __CPU_ZERO(cpusetp) \
do { \
unsigned int __i; \
cpu_set_t *__arr = (cpusetp); \
for (__i = 0; __i < sizeof (cpu_set_t) / sizeof (__cpu_mask); ++__i) \
__arr->__bits[__i] = 0; \
} while (0)
# define __CPU_SET(cpu, cpusetp) \
((cpusetp)->__bits[__CPUELT (cpu)] |= __CPUMASK (cpu))
# define __CPU_CLR(cpu, cpusetp) \
((cpusetp)->__bits[__CPUELT (cpu)] &= ~__CPUMASK (cpu))
# define __CPU_ISSET(cpu, cpusetp) \
(((cpusetp)->__bits[__CPUELT (cpu)] & __CPUMASK (cpu)) != 0)
[/CODE]
在我的機(jī)器上sizeof(cpu_set_t)的大小為128,即一共有1024位.第一位代表一個(gè)CPU號(hào).某一位為1則表示某進(jìn)程可以運(yùn)行在該位所代表的cpu上.例如
CPU_SET(1, &mask);
則mask所對(duì)應(yīng)的第2位被設(shè)置為1.
此時(shí)如果printf("%d\n", mask.__bits[0]);就打印出2.表示第2位被置為1了.
具體我是參考man sched_setaffinity文檔中的函數(shù)的.
然后再參考了一下IBM的 developerWorks上的一個(gè)講解.
http://www.ibm.com/developerworks/cn/linux/l-affinity.html-----------------------------------------------------------------
windows
首先用API函數(shù)創(chuàng)建一個(gè)線程,
HANDLE CreateThread( LPSECURITY_ATTRIBUTES lpThreadAttributes, // SD SIZE_T dwStackSize, // initial stack size LPTHREAD_START_ROUTINE lpStartAddress, // thread function LPVOID lpParameter, // thread argument DWORD dwCreationFlags, // creation option LPDWORD lpThreadId // thread identifier );
通過調(diào)用SetThreadAffinityMask,就能為各個(gè)線程設(shè)置親緣性屏蔽: DWORD_PTR SetThreadAffinityMask(HANDLE hThread, DWORD_PTR dwThreadAffinityMask ); 該函數(shù)中的hThread參數(shù)用于指明要限制哪個(gè)線程, dwThreadAffinityMask用于指明該線程能夠在哪個(gè)CPU上運(yùn)行。 dwThreadAffinityMask必須是進(jìn)程的親緣性屏蔽的相應(yīng)子集。 返回值是線程的前一個(gè)親緣性屏蔽。因此,若要將3個(gè)線程限制到CPU1、2和3上去運(yùn)行,可以這樣操作: //Thread 0 can only run on CPU 0. SetThreadAffinityMask(hThread0, 0x00000001); //Threads 1, 2, 3 run on CPUs 1. SetThreadAffinityMask(hThread1, 0x0000000E);
|