score: Add clustered/partitioned scheduling

Clustered/partitioned scheduling helps to control the worst-case
latencies in the system.  The goal is to reduce the amount of shared
state in the system and thus prevention of lock contention.  Modern
multi-processor systems tend to have several layers of data and
instruction caches.  With clustered/partitioned scheduling it is
possible to honour the cache topology of a system and thus avoid
expensive cache synchronization traffic.

We have clustered scheduling in case the set of processors of a system
is partitioned into non-empty pairwise-disjoint subsets.  These subsets
are called clusters.  Clusters with a cardinality of one are partitions.
Each cluster is owned by exactly one scheduler instance.
This commit is contained in:
Sebastian Huber
2014-04-09 15:07:54 +02:00
parent 27270b0d6c
commit c5831a3f9a
49 changed files with 1589 additions and 114 deletions

View File

@@ -35,6 +35,7 @@ static void test_task_get_set_affinity(void)
{
#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
rtems_id self_id = rtems_task_self();
rtems_id task_id;
rtems_status_code sc;
cpu_set_t cpusetone;
cpu_set_t cpuset;
@@ -46,6 +47,16 @@ static void test_task_get_set_affinity(void)
CPU_ZERO(&cpusetone);
CPU_SET(0, &cpusetone);
sc = rtems_task_create(
rtems_build_name('T', 'A', 'S', 'K'),
2,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_DEFAULT_ATTRIBUTES,
&task_id
);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_get_affinity(RTEMS_SELF, sizeof(cpuset), NULL);
rtems_test_assert(sc == RTEMS_INVALID_ADDRESS);
@@ -70,16 +81,19 @@ static void test_task_get_set_affinity(void)
rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone));
sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
sc = rtems_task_set_affinity(task_id, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_get_affinity(self_id, sizeof(cpuset), &cpuset);
sc = rtems_task_get_affinity(task_id, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(CPU_EQUAL(&cpuset, &cpusetone));
sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
cpusetbigone = CPU_ALLOC(big);
rtems_test_assert(cpusetbigone != NULL);
@@ -89,12 +103,15 @@ static void test_task_get_set_affinity(void)
CPU_ZERO_S(cpusetbigsize, cpusetbigone);
CPU_SET_S(0, cpusetbigsize, cpusetbigone);
sc = rtems_task_get_affinity(RTEMS_SELF, cpusetbigsize, cpusetbig);
sc = rtems_task_get_affinity(task_id, cpusetbigsize, cpusetbig);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
rtems_test_assert(CPU_EQUAL_S(cpusetbigsize, cpusetbig, cpusetbigone));
sc = rtems_task_set_affinity(RTEMS_SELF, cpusetbigsize, cpusetbig);
sc = rtems_task_set_affinity(task_id, cpusetbigsize, cpusetbig);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(task_id);
rtems_test_assert(sc == RTEMS_SUCCESSFUL);
CPU_FREE(cpusetbig);