mirror of
https://gitlab.rtems.org/rtems/rtos/rtems.git
synced 2025-12-26 06:08:20 +00:00
Patch from Eric Norum <eric@pollux.usask.ca> to eliminate a panic when the
network stack runs out of mbufs.
This commit is contained in:
@@ -63,114 +63,11 @@ int max_protohdr;
|
||||
int max_hdr;
|
||||
int max_datalen;
|
||||
|
||||
static void m_reclaim __P((void));
|
||||
|
||||
/* "number of clusters of pages" */
|
||||
#define NCL_INIT 1
|
||||
|
||||
#define NMB_INIT 16
|
||||
|
||||
/* ARGSUSED*/
|
||||
static void
|
||||
mbinit(dummy)
|
||||
void *dummy;
|
||||
{
|
||||
int s;
|
||||
|
||||
mmbfree = NULL; mclfree = NULL;
|
||||
s = splimp();
|
||||
if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
|
||||
goto bad;
|
||||
if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
|
||||
goto bad;
|
||||
splx(s);
|
||||
return;
|
||||
bad:
|
||||
panic("mbinit");
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate at least nmb mbufs and place on mbuf free list.
|
||||
* Must be called at splimp.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
int
|
||||
m_mballoc(nmb, nowait)
|
||||
register int nmb;
|
||||
int nowait;
|
||||
{
|
||||
register caddr_t p;
|
||||
register int i;
|
||||
int nbytes;
|
||||
|
||||
/* Once we run out of map space, it will be impossible to get
|
||||
* any more (nothing is ever freed back to the map) (XXX which
|
||||
* is dumb). (however you are not dead as m_reclaim might
|
||||
* still be able to free a substantial amount of space).
|
||||
*/
|
||||
if (mb_map_full)
|
||||
return (0);
|
||||
|
||||
nbytes = round_page(nmb * MSIZE);
|
||||
p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
|
||||
/*
|
||||
* Either the map is now full, or this is nowait and there
|
||||
* are no pages left.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return (0);
|
||||
|
||||
nmb = nbytes / MSIZE;
|
||||
for (i = 0; i < nmb; i++) {
|
||||
((struct mbuf *)p)->m_next = mmbfree;
|
||||
mmbfree = (struct mbuf *)p;
|
||||
p += MSIZE;
|
||||
}
|
||||
mbstat.m_mbufs += nmb;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate some number of mbuf clusters
|
||||
* and place on cluster free list.
|
||||
* Must be called at splimp.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
int
|
||||
m_clalloc(ncl, nowait)
|
||||
register int ncl;
|
||||
int nowait;
|
||||
{
|
||||
register caddr_t p;
|
||||
register int i;
|
||||
|
||||
/*
|
||||
* Once we run out of map space, it will be impossible
|
||||
* to get any more (nothing is ever freed back to the
|
||||
* map).
|
||||
*/
|
||||
if (mb_map_full)
|
||||
return (0);
|
||||
|
||||
p = (caddr_t)kmem_malloc(mb_map, ncl*MCLBYTES,
|
||||
nowait ? M_NOWAIT : M_WAITOK);
|
||||
/*
|
||||
* Either the map is now full, or this is nowait and there
|
||||
* are no pages left.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return (0);
|
||||
|
||||
for (i = 0; i < ncl; i++) {
|
||||
((union mcluster *)p)->mcl_next = mclfree;
|
||||
mclfree = (union mcluster *)p;
|
||||
p += MCLBYTES;
|
||||
mbstat.m_clfree++;
|
||||
}
|
||||
mbstat.m_clusters += ncl;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* When MGET failes, ask protocols to free space when short of memory,
|
||||
* then re-attempt to allocate an mbuf.
|
||||
@@ -212,8 +109,8 @@ m_retryhdr(i, t)
|
||||
return (m);
|
||||
}
|
||||
|
||||
static void
|
||||
m_reclaim()
|
||||
void
|
||||
m_reclaim(void)
|
||||
{
|
||||
register struct domain *dp;
|
||||
register struct protosw *pr;
|
||||
|
||||
@@ -126,25 +126,47 @@ rtems_bsdnet_free (void *addr, int type)
|
||||
|
||||
/*
|
||||
* Do the initializations required by the BSD code
|
||||
* FIXME: Maybe we should use a different memory allocation scheme that
|
||||
* would let us share space between mbufs and mbuf clusters.
|
||||
* For now, we'll just take the easy way out!
|
||||
*/
|
||||
static void
|
||||
bsd_init ()
|
||||
{
|
||||
int i;
|
||||
char *p;
|
||||
|
||||
/*
|
||||
* Set up mbuf data strutures
|
||||
* Cluster allocation *must* come first -- see comment on kmem_malloc().
|
||||
* Set up mbuf cluster data strutures
|
||||
*/
|
||||
m_clalloc (nmbclusters, M_DONTWAIT);
|
||||
p = malloc ((nmbclusters*MCLBYTES)+MCLBYTES-1);
|
||||
p = (char *)(((unsigned long)p + (MCLBYTES-1)) & ~(MCLBYTES-1));
|
||||
if (p == NULL)
|
||||
rtems_panic ("Can't get network cluster memory.");
|
||||
mbutl = (struct mbuf *)p;
|
||||
for (i = 0; i < nmbclusters; i++) {
|
||||
((union mcluster *)p)->mcl_next = mclfree;
|
||||
mclfree = (union mcluster *)p;
|
||||
p += MCLBYTES;
|
||||
mbstat.m_clfree++;
|
||||
}
|
||||
mbstat.m_clusters = nmbclusters;
|
||||
mclrefcnt = malloc (nmbclusters);
|
||||
if (mclrefcnt == NULL)
|
||||
rtems_panic ("No memory for mbuf cluster reference counts.");
|
||||
rtems_panic ("Can't get mbuf cluster reference counts memory.");
|
||||
memset (mclrefcnt, '\0', nmbclusters);
|
||||
m_mballoc (nmbuf, M_DONTWAIT);
|
||||
mbstat.m_mtypes[MT_FREE] = nmbuf;
|
||||
|
||||
/*
|
||||
* Set up mbuf data structures
|
||||
*/
|
||||
|
||||
p = malloc(nmbuf * MSIZE);
|
||||
if (p == NULL)
|
||||
rtems_panic ("Can't get network memory.");
|
||||
for (i = 0; i < nmbuf; i++) {
|
||||
((struct mbuf *)p)->m_next = mmbfree;
|
||||
mmbfree = (struct mbuf *)p;
|
||||
p += MSIZE;
|
||||
}
|
||||
mbstat.m_mbufs = nmbuf;
|
||||
mbstat.m_mtypes[MT_FREE] = nmbuf;
|
||||
|
||||
/*
|
||||
* Set up domains
|
||||
@@ -662,42 +684,9 @@ rtems_bsdnet_log (int priority, const char *fmt, ...)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Hack alert: kmem_malloc `knows' that its
|
||||
* first invocation is to get mbuf clusters!
|
||||
*/
|
||||
int mb_map_full;
|
||||
vm_map_t mb_map;
|
||||
vm_offset_t
|
||||
kmem_malloc (vm_map_t *map, vm_size_t size, boolean_t waitflag)
|
||||
{
|
||||
void *p;
|
||||
|
||||
/*
|
||||
* Can't get memory if we're already running.
|
||||
*/
|
||||
if (networkDaemonTid) {
|
||||
if (waitflag == M_WAITOK)
|
||||
rtems_panic (
|
||||
"Network mbuf space can not be enlarged after rtems_bsdnet_initialize() has\n"
|
||||
"returned. Enlarge the initial mbuf/cluster size in rtems_bsdnet_config.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ROUNDSIZE 2048
|
||||
p = malloc (size+ROUNDSIZE);
|
||||
p = (void *)((unsigned long)p & ~(ROUNDSIZE-1));
|
||||
if ((p == NULL) && (waitflag == M_WAITOK))
|
||||
rtems_panic ("Can't get initial network memory!");
|
||||
if (mbutl == NULL)
|
||||
mbutl = p;
|
||||
return (vm_offset_t)p;
|
||||
}
|
||||
|
||||
/*
|
||||
* IP header checksum routine for processors which don't have an inline version
|
||||
*/
|
||||
|
||||
u_int
|
||||
in_cksum_hdr (const void *ip)
|
||||
{
|
||||
@@ -926,3 +915,57 @@ rtems_bsdnet_parse_driver_name (const struct rtems_bsdnet_ifconfig *config, char
|
||||
printf ("Bad network driver name `%s'", config->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle requests for more network memory
|
||||
* XXX: Another possibility would be to use a semaphore here with
|
||||
* a release in the mbuf free macro. I have chosen this `polling'
|
||||
* approach because:
|
||||
* 1) It is simpler.
|
||||
* 2) It adds no complexity to the free macro.
|
||||
* 3) Running out of mbufs should be a rare
|
||||
* condition -- predeployment testing of
|
||||
* an application should indicate the
|
||||
* required mbuf pool size.
|
||||
* XXX: Should there be a panic if a task is stuck in the loop for
|
||||
* more than a minute or so?
|
||||
*/
|
||||
int
|
||||
m_mballoc (int nmb, int nowait)
|
||||
{
|
||||
if (nowait)
|
||||
return 0;
|
||||
m_reclaim ();
|
||||
if (mmbfree == NULL) {
|
||||
mbstat.m_wait++;
|
||||
do {
|
||||
rtems_bsdnet_semaphore_release ();
|
||||
rtems_task_wake_after (1);
|
||||
rtems_bsdnet_semaphore_obtain ();
|
||||
} while (mmbfree == NULL);
|
||||
}
|
||||
else {
|
||||
mbstat.m_drops++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
m_clalloc(ncl, nowait)
|
||||
{
|
||||
if (nowait)
|
||||
return 0;
|
||||
m_reclaim ();
|
||||
if (mclfree == NULL) {
|
||||
mbstat.m_wait++;
|
||||
do {
|
||||
rtems_bsdnet_semaphore_release ();
|
||||
rtems_task_wake_after (1);
|
||||
rtems_bsdnet_semaphore_obtain ();
|
||||
} while (mclfree == NULL);
|
||||
}
|
||||
else {
|
||||
mbstat.m_drops++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -410,6 +410,7 @@ int m_clalloc __P((int, int));
|
||||
void m_copyback __P((struct mbuf *, int, int, caddr_t));
|
||||
void m_copydata __P((struct mbuf *,int,int,caddr_t));
|
||||
void m_freem __P((struct mbuf *));
|
||||
void m_reclaim __P((void));
|
||||
|
||||
#ifdef MBTYPES
|
||||
int mbtypes[] = { /* XXX */
|
||||
|
||||
@@ -63,114 +63,11 @@ int max_protohdr;
|
||||
int max_hdr;
|
||||
int max_datalen;
|
||||
|
||||
static void m_reclaim __P((void));
|
||||
|
||||
/* "number of clusters of pages" */
|
||||
#define NCL_INIT 1
|
||||
|
||||
#define NMB_INIT 16
|
||||
|
||||
/* ARGSUSED*/
|
||||
static void
|
||||
mbinit(dummy)
|
||||
void *dummy;
|
||||
{
|
||||
int s;
|
||||
|
||||
mmbfree = NULL; mclfree = NULL;
|
||||
s = splimp();
|
||||
if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
|
||||
goto bad;
|
||||
if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
|
||||
goto bad;
|
||||
splx(s);
|
||||
return;
|
||||
bad:
|
||||
panic("mbinit");
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate at least nmb mbufs and place on mbuf free list.
|
||||
* Must be called at splimp.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
int
|
||||
m_mballoc(nmb, nowait)
|
||||
register int nmb;
|
||||
int nowait;
|
||||
{
|
||||
register caddr_t p;
|
||||
register int i;
|
||||
int nbytes;
|
||||
|
||||
/* Once we run out of map space, it will be impossible to get
|
||||
* any more (nothing is ever freed back to the map) (XXX which
|
||||
* is dumb). (however you are not dead as m_reclaim might
|
||||
* still be able to free a substantial amount of space).
|
||||
*/
|
||||
if (mb_map_full)
|
||||
return (0);
|
||||
|
||||
nbytes = round_page(nmb * MSIZE);
|
||||
p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
|
||||
/*
|
||||
* Either the map is now full, or this is nowait and there
|
||||
* are no pages left.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return (0);
|
||||
|
||||
nmb = nbytes / MSIZE;
|
||||
for (i = 0; i < nmb; i++) {
|
||||
((struct mbuf *)p)->m_next = mmbfree;
|
||||
mmbfree = (struct mbuf *)p;
|
||||
p += MSIZE;
|
||||
}
|
||||
mbstat.m_mbufs += nmb;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate some number of mbuf clusters
|
||||
* and place on cluster free list.
|
||||
* Must be called at splimp.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
int
|
||||
m_clalloc(ncl, nowait)
|
||||
register int ncl;
|
||||
int nowait;
|
||||
{
|
||||
register caddr_t p;
|
||||
register int i;
|
||||
|
||||
/*
|
||||
* Once we run out of map space, it will be impossible
|
||||
* to get any more (nothing is ever freed back to the
|
||||
* map).
|
||||
*/
|
||||
if (mb_map_full)
|
||||
return (0);
|
||||
|
||||
p = (caddr_t)kmem_malloc(mb_map, ncl*MCLBYTES,
|
||||
nowait ? M_NOWAIT : M_WAITOK);
|
||||
/*
|
||||
* Either the map is now full, or this is nowait and there
|
||||
* are no pages left.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return (0);
|
||||
|
||||
for (i = 0; i < ncl; i++) {
|
||||
((union mcluster *)p)->mcl_next = mclfree;
|
||||
mclfree = (union mcluster *)p;
|
||||
p += MCLBYTES;
|
||||
mbstat.m_clfree++;
|
||||
}
|
||||
mbstat.m_clusters += ncl;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* When MGET failes, ask protocols to free space when short of memory,
|
||||
* then re-attempt to allocate an mbuf.
|
||||
@@ -212,8 +109,8 @@ m_retryhdr(i, t)
|
||||
return (m);
|
||||
}
|
||||
|
||||
static void
|
||||
m_reclaim()
|
||||
void
|
||||
m_reclaim(void)
|
||||
{
|
||||
register struct domain *dp;
|
||||
register struct protosw *pr;
|
||||
|
||||
@@ -126,25 +126,47 @@ rtems_bsdnet_free (void *addr, int type)
|
||||
|
||||
/*
|
||||
* Do the initializations required by the BSD code
|
||||
* FIXME: Maybe we should use a different memory allocation scheme that
|
||||
* would let us share space between mbufs and mbuf clusters.
|
||||
* For now, we'll just take the easy way out!
|
||||
*/
|
||||
static void
|
||||
bsd_init ()
|
||||
{
|
||||
int i;
|
||||
char *p;
|
||||
|
||||
/*
|
||||
* Set up mbuf data strutures
|
||||
* Cluster allocation *must* come first -- see comment on kmem_malloc().
|
||||
* Set up mbuf cluster data strutures
|
||||
*/
|
||||
m_clalloc (nmbclusters, M_DONTWAIT);
|
||||
p = malloc ((nmbclusters*MCLBYTES)+MCLBYTES-1);
|
||||
p = (char *)(((unsigned long)p + (MCLBYTES-1)) & ~(MCLBYTES-1));
|
||||
if (p == NULL)
|
||||
rtems_panic ("Can't get network cluster memory.");
|
||||
mbutl = (struct mbuf *)p;
|
||||
for (i = 0; i < nmbclusters; i++) {
|
||||
((union mcluster *)p)->mcl_next = mclfree;
|
||||
mclfree = (union mcluster *)p;
|
||||
p += MCLBYTES;
|
||||
mbstat.m_clfree++;
|
||||
}
|
||||
mbstat.m_clusters = nmbclusters;
|
||||
mclrefcnt = malloc (nmbclusters);
|
||||
if (mclrefcnt == NULL)
|
||||
rtems_panic ("No memory for mbuf cluster reference counts.");
|
||||
rtems_panic ("Can't get mbuf cluster reference counts memory.");
|
||||
memset (mclrefcnt, '\0', nmbclusters);
|
||||
m_mballoc (nmbuf, M_DONTWAIT);
|
||||
mbstat.m_mtypes[MT_FREE] = nmbuf;
|
||||
|
||||
/*
|
||||
* Set up mbuf data structures
|
||||
*/
|
||||
|
||||
p = malloc(nmbuf * MSIZE);
|
||||
if (p == NULL)
|
||||
rtems_panic ("Can't get network memory.");
|
||||
for (i = 0; i < nmbuf; i++) {
|
||||
((struct mbuf *)p)->m_next = mmbfree;
|
||||
mmbfree = (struct mbuf *)p;
|
||||
p += MSIZE;
|
||||
}
|
||||
mbstat.m_mbufs = nmbuf;
|
||||
mbstat.m_mtypes[MT_FREE] = nmbuf;
|
||||
|
||||
/*
|
||||
* Set up domains
|
||||
@@ -662,42 +684,9 @@ rtems_bsdnet_log (int priority, const char *fmt, ...)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Hack alert: kmem_malloc `knows' that its
|
||||
* first invocation is to get mbuf clusters!
|
||||
*/
|
||||
int mb_map_full;
|
||||
vm_map_t mb_map;
|
||||
vm_offset_t
|
||||
kmem_malloc (vm_map_t *map, vm_size_t size, boolean_t waitflag)
|
||||
{
|
||||
void *p;
|
||||
|
||||
/*
|
||||
* Can't get memory if we're already running.
|
||||
*/
|
||||
if (networkDaemonTid) {
|
||||
if (waitflag == M_WAITOK)
|
||||
rtems_panic (
|
||||
"Network mbuf space can not be enlarged after rtems_bsdnet_initialize() has\n"
|
||||
"returned. Enlarge the initial mbuf/cluster size in rtems_bsdnet_config.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ROUNDSIZE 2048
|
||||
p = malloc (size+ROUNDSIZE);
|
||||
p = (void *)((unsigned long)p & ~(ROUNDSIZE-1));
|
||||
if ((p == NULL) && (waitflag == M_WAITOK))
|
||||
rtems_panic ("Can't get initial network memory!");
|
||||
if (mbutl == NULL)
|
||||
mbutl = p;
|
||||
return (vm_offset_t)p;
|
||||
}
|
||||
|
||||
/*
|
||||
* IP header checksum routine for processors which don't have an inline version
|
||||
*/
|
||||
|
||||
u_int
|
||||
in_cksum_hdr (const void *ip)
|
||||
{
|
||||
@@ -926,3 +915,57 @@ rtems_bsdnet_parse_driver_name (const struct rtems_bsdnet_ifconfig *config, char
|
||||
printf ("Bad network driver name `%s'", config->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle requests for more network memory
|
||||
* XXX: Another possibility would be to use a semaphore here with
|
||||
* a release in the mbuf free macro. I have chosen this `polling'
|
||||
* approach because:
|
||||
* 1) It is simpler.
|
||||
* 2) It adds no complexity to the free macro.
|
||||
* 3) Running out of mbufs should be a rare
|
||||
* condition -- predeployment testing of
|
||||
* an application should indicate the
|
||||
* required mbuf pool size.
|
||||
* XXX: Should there be a panic if a task is stuck in the loop for
|
||||
* more than a minute or so?
|
||||
*/
|
||||
int
|
||||
m_mballoc (int nmb, int nowait)
|
||||
{
|
||||
if (nowait)
|
||||
return 0;
|
||||
m_reclaim ();
|
||||
if (mmbfree == NULL) {
|
||||
mbstat.m_wait++;
|
||||
do {
|
||||
rtems_bsdnet_semaphore_release ();
|
||||
rtems_task_wake_after (1);
|
||||
rtems_bsdnet_semaphore_obtain ();
|
||||
} while (mmbfree == NULL);
|
||||
}
|
||||
else {
|
||||
mbstat.m_drops++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
m_clalloc(ncl, nowait)
|
||||
{
|
||||
if (nowait)
|
||||
return 0;
|
||||
m_reclaim ();
|
||||
if (mclfree == NULL) {
|
||||
mbstat.m_wait++;
|
||||
do {
|
||||
rtems_bsdnet_semaphore_release ();
|
||||
rtems_task_wake_after (1);
|
||||
rtems_bsdnet_semaphore_obtain ();
|
||||
} while (mclfree == NULL);
|
||||
}
|
||||
else {
|
||||
mbstat.m_drops++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -410,6 +410,7 @@ int m_clalloc __P((int, int));
|
||||
void m_copyback __P((struct mbuf *, int, int, caddr_t));
|
||||
void m_copydata __P((struct mbuf *,int,int,caddr_t));
|
||||
void m_freem __P((struct mbuf *));
|
||||
void m_reclaim __P((void));
|
||||
|
||||
#ifdef MBTYPES
|
||||
int mbtypes[] = { /* XXX */
|
||||
|
||||
@@ -63,114 +63,11 @@ int max_protohdr;
|
||||
int max_hdr;
|
||||
int max_datalen;
|
||||
|
||||
static void m_reclaim __P((void));
|
||||
|
||||
/* "number of clusters of pages" */
|
||||
#define NCL_INIT 1
|
||||
|
||||
#define NMB_INIT 16
|
||||
|
||||
/* ARGSUSED*/
|
||||
static void
|
||||
mbinit(dummy)
|
||||
void *dummy;
|
||||
{
|
||||
int s;
|
||||
|
||||
mmbfree = NULL; mclfree = NULL;
|
||||
s = splimp();
|
||||
if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
|
||||
goto bad;
|
||||
if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
|
||||
goto bad;
|
||||
splx(s);
|
||||
return;
|
||||
bad:
|
||||
panic("mbinit");
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate at least nmb mbufs and place on mbuf free list.
|
||||
* Must be called at splimp.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
int
|
||||
m_mballoc(nmb, nowait)
|
||||
register int nmb;
|
||||
int nowait;
|
||||
{
|
||||
register caddr_t p;
|
||||
register int i;
|
||||
int nbytes;
|
||||
|
||||
/* Once we run out of map space, it will be impossible to get
|
||||
* any more (nothing is ever freed back to the map) (XXX which
|
||||
* is dumb). (however you are not dead as m_reclaim might
|
||||
* still be able to free a substantial amount of space).
|
||||
*/
|
||||
if (mb_map_full)
|
||||
return (0);
|
||||
|
||||
nbytes = round_page(nmb * MSIZE);
|
||||
p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
|
||||
/*
|
||||
* Either the map is now full, or this is nowait and there
|
||||
* are no pages left.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return (0);
|
||||
|
||||
nmb = nbytes / MSIZE;
|
||||
for (i = 0; i < nmb; i++) {
|
||||
((struct mbuf *)p)->m_next = mmbfree;
|
||||
mmbfree = (struct mbuf *)p;
|
||||
p += MSIZE;
|
||||
}
|
||||
mbstat.m_mbufs += nmb;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate some number of mbuf clusters
|
||||
* and place on cluster free list.
|
||||
* Must be called at splimp.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
int
|
||||
m_clalloc(ncl, nowait)
|
||||
register int ncl;
|
||||
int nowait;
|
||||
{
|
||||
register caddr_t p;
|
||||
register int i;
|
||||
|
||||
/*
|
||||
* Once we run out of map space, it will be impossible
|
||||
* to get any more (nothing is ever freed back to the
|
||||
* map).
|
||||
*/
|
||||
if (mb_map_full)
|
||||
return (0);
|
||||
|
||||
p = (caddr_t)kmem_malloc(mb_map, ncl*MCLBYTES,
|
||||
nowait ? M_NOWAIT : M_WAITOK);
|
||||
/*
|
||||
* Either the map is now full, or this is nowait and there
|
||||
* are no pages left.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return (0);
|
||||
|
||||
for (i = 0; i < ncl; i++) {
|
||||
((union mcluster *)p)->mcl_next = mclfree;
|
||||
mclfree = (union mcluster *)p;
|
||||
p += MCLBYTES;
|
||||
mbstat.m_clfree++;
|
||||
}
|
||||
mbstat.m_clusters += ncl;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* When MGET failes, ask protocols to free space when short of memory,
|
||||
* then re-attempt to allocate an mbuf.
|
||||
@@ -212,8 +109,8 @@ m_retryhdr(i, t)
|
||||
return (m);
|
||||
}
|
||||
|
||||
static void
|
||||
m_reclaim()
|
||||
void
|
||||
m_reclaim(void)
|
||||
{
|
||||
register struct domain *dp;
|
||||
register struct protosw *pr;
|
||||
|
||||
@@ -126,25 +126,47 @@ rtems_bsdnet_free (void *addr, int type)
|
||||
|
||||
/*
|
||||
* Do the initializations required by the BSD code
|
||||
* FIXME: Maybe we should use a different memory allocation scheme that
|
||||
* would let us share space between mbufs and mbuf clusters.
|
||||
* For now, we'll just take the easy way out!
|
||||
*/
|
||||
static void
|
||||
bsd_init ()
|
||||
{
|
||||
int i;
|
||||
char *p;
|
||||
|
||||
/*
|
||||
* Set up mbuf data strutures
|
||||
* Cluster allocation *must* come first -- see comment on kmem_malloc().
|
||||
* Set up mbuf cluster data strutures
|
||||
*/
|
||||
m_clalloc (nmbclusters, M_DONTWAIT);
|
||||
p = malloc ((nmbclusters*MCLBYTES)+MCLBYTES-1);
|
||||
p = (char *)(((unsigned long)p + (MCLBYTES-1)) & ~(MCLBYTES-1));
|
||||
if (p == NULL)
|
||||
rtems_panic ("Can't get network cluster memory.");
|
||||
mbutl = (struct mbuf *)p;
|
||||
for (i = 0; i < nmbclusters; i++) {
|
||||
((union mcluster *)p)->mcl_next = mclfree;
|
||||
mclfree = (union mcluster *)p;
|
||||
p += MCLBYTES;
|
||||
mbstat.m_clfree++;
|
||||
}
|
||||
mbstat.m_clusters = nmbclusters;
|
||||
mclrefcnt = malloc (nmbclusters);
|
||||
if (mclrefcnt == NULL)
|
||||
rtems_panic ("No memory for mbuf cluster reference counts.");
|
||||
rtems_panic ("Can't get mbuf cluster reference counts memory.");
|
||||
memset (mclrefcnt, '\0', nmbclusters);
|
||||
m_mballoc (nmbuf, M_DONTWAIT);
|
||||
mbstat.m_mtypes[MT_FREE] = nmbuf;
|
||||
|
||||
/*
|
||||
* Set up mbuf data structures
|
||||
*/
|
||||
|
||||
p = malloc(nmbuf * MSIZE);
|
||||
if (p == NULL)
|
||||
rtems_panic ("Can't get network memory.");
|
||||
for (i = 0; i < nmbuf; i++) {
|
||||
((struct mbuf *)p)->m_next = mmbfree;
|
||||
mmbfree = (struct mbuf *)p;
|
||||
p += MSIZE;
|
||||
}
|
||||
mbstat.m_mbufs = nmbuf;
|
||||
mbstat.m_mtypes[MT_FREE] = nmbuf;
|
||||
|
||||
/*
|
||||
* Set up domains
|
||||
@@ -662,42 +684,9 @@ rtems_bsdnet_log (int priority, const char *fmt, ...)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Hack alert: kmem_malloc `knows' that its
|
||||
* first invocation is to get mbuf clusters!
|
||||
*/
|
||||
int mb_map_full;
|
||||
vm_map_t mb_map;
|
||||
vm_offset_t
|
||||
kmem_malloc (vm_map_t *map, vm_size_t size, boolean_t waitflag)
|
||||
{
|
||||
void *p;
|
||||
|
||||
/*
|
||||
* Can't get memory if we're already running.
|
||||
*/
|
||||
if (networkDaemonTid) {
|
||||
if (waitflag == M_WAITOK)
|
||||
rtems_panic (
|
||||
"Network mbuf space can not be enlarged after rtems_bsdnet_initialize() has\n"
|
||||
"returned. Enlarge the initial mbuf/cluster size in rtems_bsdnet_config.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ROUNDSIZE 2048
|
||||
p = malloc (size+ROUNDSIZE);
|
||||
p = (void *)((unsigned long)p & ~(ROUNDSIZE-1));
|
||||
if ((p == NULL) && (waitflag == M_WAITOK))
|
||||
rtems_panic ("Can't get initial network memory!");
|
||||
if (mbutl == NULL)
|
||||
mbutl = p;
|
||||
return (vm_offset_t)p;
|
||||
}
|
||||
|
||||
/*
|
||||
* IP header checksum routine for processors which don't have an inline version
|
||||
*/
|
||||
|
||||
u_int
|
||||
in_cksum_hdr (const void *ip)
|
||||
{
|
||||
@@ -926,3 +915,57 @@ rtems_bsdnet_parse_driver_name (const struct rtems_bsdnet_ifconfig *config, char
|
||||
printf ("Bad network driver name `%s'", config->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle requests for more network memory
|
||||
* XXX: Another possibility would be to use a semaphore here with
|
||||
* a release in the mbuf free macro. I have chosen this `polling'
|
||||
* approach because:
|
||||
* 1) It is simpler.
|
||||
* 2) It adds no complexity to the free macro.
|
||||
* 3) Running out of mbufs should be a rare
|
||||
* condition -- predeployment testing of
|
||||
* an application should indicate the
|
||||
* required mbuf pool size.
|
||||
* XXX: Should there be a panic if a task is stuck in the loop for
|
||||
* more than a minute or so?
|
||||
*/
|
||||
int
|
||||
m_mballoc (int nmb, int nowait)
|
||||
{
|
||||
if (nowait)
|
||||
return 0;
|
||||
m_reclaim ();
|
||||
if (mmbfree == NULL) {
|
||||
mbstat.m_wait++;
|
||||
do {
|
||||
rtems_bsdnet_semaphore_release ();
|
||||
rtems_task_wake_after (1);
|
||||
rtems_bsdnet_semaphore_obtain ();
|
||||
} while (mmbfree == NULL);
|
||||
}
|
||||
else {
|
||||
mbstat.m_drops++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
m_clalloc(ncl, nowait)
|
||||
{
|
||||
if (nowait)
|
||||
return 0;
|
||||
m_reclaim ();
|
||||
if (mclfree == NULL) {
|
||||
mbstat.m_wait++;
|
||||
do {
|
||||
rtems_bsdnet_semaphore_release ();
|
||||
rtems_task_wake_after (1);
|
||||
rtems_bsdnet_semaphore_obtain ();
|
||||
} while (mclfree == NULL);
|
||||
}
|
||||
else {
|
||||
mbstat.m_drops++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -410,6 +410,7 @@ int m_clalloc __P((int, int));
|
||||
void m_copyback __P((struct mbuf *, int, int, caddr_t));
|
||||
void m_copydata __P((struct mbuf *,int,int,caddr_t));
|
||||
void m_freem __P((struct mbuf *));
|
||||
void m_reclaim __P((void));
|
||||
|
||||
#ifdef MBTYPES
|
||||
int mbtypes[] = { /* XXX */
|
||||
|
||||
@@ -63,114 +63,11 @@ int max_protohdr;
|
||||
int max_hdr;
|
||||
int max_datalen;
|
||||
|
||||
static void m_reclaim __P((void));
|
||||
|
||||
/* "number of clusters of pages" */
|
||||
#define NCL_INIT 1
|
||||
|
||||
#define NMB_INIT 16
|
||||
|
||||
/* ARGSUSED*/
|
||||
static void
|
||||
mbinit(dummy)
|
||||
void *dummy;
|
||||
{
|
||||
int s;
|
||||
|
||||
mmbfree = NULL; mclfree = NULL;
|
||||
s = splimp();
|
||||
if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
|
||||
goto bad;
|
||||
if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
|
||||
goto bad;
|
||||
splx(s);
|
||||
return;
|
||||
bad:
|
||||
panic("mbinit");
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate at least nmb mbufs and place on mbuf free list.
|
||||
* Must be called at splimp.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
int
|
||||
m_mballoc(nmb, nowait)
|
||||
register int nmb;
|
||||
int nowait;
|
||||
{
|
||||
register caddr_t p;
|
||||
register int i;
|
||||
int nbytes;
|
||||
|
||||
/* Once we run out of map space, it will be impossible to get
|
||||
* any more (nothing is ever freed back to the map) (XXX which
|
||||
* is dumb). (however you are not dead as m_reclaim might
|
||||
* still be able to free a substantial amount of space).
|
||||
*/
|
||||
if (mb_map_full)
|
||||
return (0);
|
||||
|
||||
nbytes = round_page(nmb * MSIZE);
|
||||
p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
|
||||
/*
|
||||
* Either the map is now full, or this is nowait and there
|
||||
* are no pages left.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return (0);
|
||||
|
||||
nmb = nbytes / MSIZE;
|
||||
for (i = 0; i < nmb; i++) {
|
||||
((struct mbuf *)p)->m_next = mmbfree;
|
||||
mmbfree = (struct mbuf *)p;
|
||||
p += MSIZE;
|
||||
}
|
||||
mbstat.m_mbufs += nmb;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate some number of mbuf clusters
|
||||
* and place on cluster free list.
|
||||
* Must be called at splimp.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
int
|
||||
m_clalloc(ncl, nowait)
|
||||
register int ncl;
|
||||
int nowait;
|
||||
{
|
||||
register caddr_t p;
|
||||
register int i;
|
||||
|
||||
/*
|
||||
* Once we run out of map space, it will be impossible
|
||||
* to get any more (nothing is ever freed back to the
|
||||
* map).
|
||||
*/
|
||||
if (mb_map_full)
|
||||
return (0);
|
||||
|
||||
p = (caddr_t)kmem_malloc(mb_map, ncl*MCLBYTES,
|
||||
nowait ? M_NOWAIT : M_WAITOK);
|
||||
/*
|
||||
* Either the map is now full, or this is nowait and there
|
||||
* are no pages left.
|
||||
*/
|
||||
if (p == NULL)
|
||||
return (0);
|
||||
|
||||
for (i = 0; i < ncl; i++) {
|
||||
((union mcluster *)p)->mcl_next = mclfree;
|
||||
mclfree = (union mcluster *)p;
|
||||
p += MCLBYTES;
|
||||
mbstat.m_clfree++;
|
||||
}
|
||||
mbstat.m_clusters += ncl;
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* When MGET failes, ask protocols to free space when short of memory,
|
||||
* then re-attempt to allocate an mbuf.
|
||||
@@ -212,8 +109,8 @@ m_retryhdr(i, t)
|
||||
return (m);
|
||||
}
|
||||
|
||||
static void
|
||||
m_reclaim()
|
||||
void
|
||||
m_reclaim(void)
|
||||
{
|
||||
register struct domain *dp;
|
||||
register struct protosw *pr;
|
||||
|
||||
@@ -126,25 +126,47 @@ rtems_bsdnet_free (void *addr, int type)
|
||||
|
||||
/*
|
||||
* Do the initializations required by the BSD code
|
||||
* FIXME: Maybe we should use a different memory allocation scheme that
|
||||
* would let us share space between mbufs and mbuf clusters.
|
||||
* For now, we'll just take the easy way out!
|
||||
*/
|
||||
static void
|
||||
bsd_init ()
|
||||
{
|
||||
int i;
|
||||
char *p;
|
||||
|
||||
/*
|
||||
* Set up mbuf data strutures
|
||||
* Cluster allocation *must* come first -- see comment on kmem_malloc().
|
||||
* Set up mbuf cluster data strutures
|
||||
*/
|
||||
m_clalloc (nmbclusters, M_DONTWAIT);
|
||||
p = malloc ((nmbclusters*MCLBYTES)+MCLBYTES-1);
|
||||
p = (char *)(((unsigned long)p + (MCLBYTES-1)) & ~(MCLBYTES-1));
|
||||
if (p == NULL)
|
||||
rtems_panic ("Can't get network cluster memory.");
|
||||
mbutl = (struct mbuf *)p;
|
||||
for (i = 0; i < nmbclusters; i++) {
|
||||
((union mcluster *)p)->mcl_next = mclfree;
|
||||
mclfree = (union mcluster *)p;
|
||||
p += MCLBYTES;
|
||||
mbstat.m_clfree++;
|
||||
}
|
||||
mbstat.m_clusters = nmbclusters;
|
||||
mclrefcnt = malloc (nmbclusters);
|
||||
if (mclrefcnt == NULL)
|
||||
rtems_panic ("No memory for mbuf cluster reference counts.");
|
||||
rtems_panic ("Can't get mbuf cluster reference counts memory.");
|
||||
memset (mclrefcnt, '\0', nmbclusters);
|
||||
m_mballoc (nmbuf, M_DONTWAIT);
|
||||
mbstat.m_mtypes[MT_FREE] = nmbuf;
|
||||
|
||||
/*
|
||||
* Set up mbuf data structures
|
||||
*/
|
||||
|
||||
p = malloc(nmbuf * MSIZE);
|
||||
if (p == NULL)
|
||||
rtems_panic ("Can't get network memory.");
|
||||
for (i = 0; i < nmbuf; i++) {
|
||||
((struct mbuf *)p)->m_next = mmbfree;
|
||||
mmbfree = (struct mbuf *)p;
|
||||
p += MSIZE;
|
||||
}
|
||||
mbstat.m_mbufs = nmbuf;
|
||||
mbstat.m_mtypes[MT_FREE] = nmbuf;
|
||||
|
||||
/*
|
||||
* Set up domains
|
||||
@@ -662,42 +684,9 @@ rtems_bsdnet_log (int priority, const char *fmt, ...)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Hack alert: kmem_malloc `knows' that its
|
||||
* first invocation is to get mbuf clusters!
|
||||
*/
|
||||
int mb_map_full;
|
||||
vm_map_t mb_map;
|
||||
vm_offset_t
|
||||
kmem_malloc (vm_map_t *map, vm_size_t size, boolean_t waitflag)
|
||||
{
|
||||
void *p;
|
||||
|
||||
/*
|
||||
* Can't get memory if we're already running.
|
||||
*/
|
||||
if (networkDaemonTid) {
|
||||
if (waitflag == M_WAITOK)
|
||||
rtems_panic (
|
||||
"Network mbuf space can not be enlarged after rtems_bsdnet_initialize() has\n"
|
||||
"returned. Enlarge the initial mbuf/cluster size in rtems_bsdnet_config.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ROUNDSIZE 2048
|
||||
p = malloc (size+ROUNDSIZE);
|
||||
p = (void *)((unsigned long)p & ~(ROUNDSIZE-1));
|
||||
if ((p == NULL) && (waitflag == M_WAITOK))
|
||||
rtems_panic ("Can't get initial network memory!");
|
||||
if (mbutl == NULL)
|
||||
mbutl = p;
|
||||
return (vm_offset_t)p;
|
||||
}
|
||||
|
||||
/*
|
||||
* IP header checksum routine for processors which don't have an inline version
|
||||
*/
|
||||
|
||||
u_int
|
||||
in_cksum_hdr (const void *ip)
|
||||
{
|
||||
@@ -926,3 +915,57 @@ rtems_bsdnet_parse_driver_name (const struct rtems_bsdnet_ifconfig *config, char
|
||||
printf ("Bad network driver name `%s'", config->name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle requests for more network memory
|
||||
* XXX: Another possibility would be to use a semaphore here with
|
||||
* a release in the mbuf free macro. I have chosen this `polling'
|
||||
* approach because:
|
||||
* 1) It is simpler.
|
||||
* 2) It adds no complexity to the free macro.
|
||||
* 3) Running out of mbufs should be a rare
|
||||
* condition -- predeployment testing of
|
||||
* an application should indicate the
|
||||
* required mbuf pool size.
|
||||
* XXX: Should there be a panic if a task is stuck in the loop for
|
||||
* more than a minute or so?
|
||||
*/
|
||||
int
|
||||
m_mballoc (int nmb, int nowait)
|
||||
{
|
||||
if (nowait)
|
||||
return 0;
|
||||
m_reclaim ();
|
||||
if (mmbfree == NULL) {
|
||||
mbstat.m_wait++;
|
||||
do {
|
||||
rtems_bsdnet_semaphore_release ();
|
||||
rtems_task_wake_after (1);
|
||||
rtems_bsdnet_semaphore_obtain ();
|
||||
} while (mmbfree == NULL);
|
||||
}
|
||||
else {
|
||||
mbstat.m_drops++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
m_clalloc(ncl, nowait)
|
||||
{
|
||||
if (nowait)
|
||||
return 0;
|
||||
m_reclaim ();
|
||||
if (mclfree == NULL) {
|
||||
mbstat.m_wait++;
|
||||
do {
|
||||
rtems_bsdnet_semaphore_release ();
|
||||
rtems_task_wake_after (1);
|
||||
rtems_bsdnet_semaphore_obtain ();
|
||||
} while (mclfree == NULL);
|
||||
}
|
||||
else {
|
||||
mbstat.m_drops++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -410,6 +410,7 @@ int m_clalloc __P((int, int));
|
||||
void m_copyback __P((struct mbuf *, int, int, caddr_t));
|
||||
void m_copydata __P((struct mbuf *,int,int,caddr_t));
|
||||
void m_freem __P((struct mbuf *));
|
||||
void m_reclaim __P((void));
|
||||
|
||||
#ifdef MBTYPES
|
||||
int mbtypes[] = { /* XXX */
|
||||
|
||||
Reference in New Issue
Block a user