telnetd: Create sessions at start

Update #3543.
This commit is contained in:
Sebastian Huber
2018-10-10 10:40:21 +02:00
parent 0f0e130051
commit 0dc303f09d
5 changed files with 299 additions and 248 deletions

View File

@@ -34,7 +34,16 @@ typedef struct {
char name[sizeof("/dev/pty18446744073709551615")];
} rtems_pty_context;
char *telnet_get_pty(rtems_pty_context *ctx, int socket);
const char *rtems_pty_initialize(rtems_pty_context *pty, uintptr_t unique);
RTEMS_INLINE_ROUTINE const char *rtems_pty_get_path(const rtems_pty_context *pty)
{
return pty->name;
}
void rtems_pty_close_socket(rtems_pty_context *pty);
void rtems_pty_set_socket(rtems_pty_context *pty, int socket);
#ifdef __cplusplus
}

View File

@@ -94,26 +94,37 @@ int send_iac(rtems_pty_context *pty, unsigned char mode, unsigned char option)
return write(pty->socket, buf, sizeof(buf));
}
/* This procedure returns the devname for a pty slot free.
* If not slot availiable (field socket>=0)
* then the socket argument is closed
*/
char *telnet_get_pty(rtems_pty_context *pty, int socket)
const char *rtems_pty_initialize(rtems_pty_context *pty, uintptr_t unique)
{
rtems_status_code sc;
struct timeval t;
memset(pty, 0, sizeof(*pty));
snprintf(pty->name, sizeof(pty->name), "/dev/pty%" PRIuPTR, (uintptr_t)pty);
(void)snprintf(pty->name, sizeof(pty->name), "/dev/pty%" PRIuPTR, unique);
rtems_termios_device_context_initialize(&pty->base, "pty");
pty->socket = socket;
pty->socket = -1;
sc = rtems_termios_device_install(pty->name, &pty_handler, NULL, &pty->base);
if (sc != RTEMS_SUCCESSFUL) {
close(socket);
return NULL;
}
return pty->name;
}
void rtems_pty_close_socket(rtems_pty_context *pty)
{
if (pty->socket >= 0) {
(void)close(pty->socket);
pty->socket = -1;
}
}
void rtems_pty_set_socket(rtems_pty_context *pty, int socket)
{
struct timeval t;
rtems_pty_close_socket(pty);
pty->socket = socket;
/* set a long polling interval to save CPU time */
t.tv_sec=2;
t.tv_usec=00000;
@@ -121,8 +132,6 @@ char *telnet_get_pty(rtems_pty_context *pty, int socket)
/* inform the client that we will echo */
send_iac(pty, IAC_WILL, 1);
return pty->name;
}
/*-----------------------------------------------------------*/

View File

@@ -23,13 +23,13 @@
* possible to have 'telnetd' run an arbitrary 'shell'
* program.
*
* Copyright (c) 2009 embedded brains GmbH and others.
* Copyright (c) 2009, 2018 embedded brains GmbH and others.
*
* embedded brains GmbH
* Obere Lagerstr. 30
* D-82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
* embedded brains GmbH
* Dornierstr. 4
* D-82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -40,6 +40,7 @@
#include "config.h"
#endif
#include <sys/queue.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
@@ -54,201 +55,226 @@
#include <rtems/pty.h>
#include <rtems/shell.h>
#include <rtems/telnetd.h>
#include <rtems/thread.h>
#include <rtems/userenv.h>
#ifdef RTEMS_NETWORKING
#include <rtems/rtems_bsdnet.h>
#endif
#define PARANOIA
#define TELNETD_EVENT_SUCCESS RTEMS_EVENT_0
#define TELNETD_EVENT_ERROR RTEMS_EVENT_1
typedef struct telnetd_context telnetd_context;
typedef struct telnetd_session {
rtems_pty_context pty;
char peername[16];
telnetd_context *ctx;
rtems_pty_context pty;
char peername[16];
telnetd_context *ctx;
rtems_id task_id;
LIST_ENTRY(telnetd_session) link;
} telnetd_session;
struct telnetd_context {
rtems_telnetd_config_table config;
int server_socket;
uint16_t active_clients;
rtems_telnetd_config_table config;
int server_socket;
rtems_id task_id;
rtems_mutex mtx;
LIST_HEAD(, telnetd_session) free_sessions;
telnetd_session sessions[RTEMS_ZERO_LENGTH_ARRAY];
};
typedef union uni_sa {
typedef union {
struct sockaddr_in sin;
struct sockaddr sa;
} uni_sa;
struct sockaddr sa;
} telnetd_address;
static telnetd_session *grab_a_Connection(telnetd_context *ctx)
{
telnetd_session *session;
uni_sa peer;
socklen_t address_len;
int acp_sock;
if (ctx->active_clients >= ctx->config.client_maximum) {
return NULL;
}
session = malloc(sizeof(*session));
if (session == NULL) {
perror("telnetd:malloc");
return NULL;
}
address_len = sizeof(peer.sin);
acp_sock = accept(ctx->server_socket, &peer.sa, &address_len);
if (acp_sock<0) {
perror("telnetd:accept");
free(session);
return NULL;
};
if (telnet_get_pty(&session->pty, acp_sock) == NULL) {
syslog( LOG_DAEMON | LOG_ERR, "telnetd: unable to obtain PTY");
/* NOTE: failing 'do_get_pty()' closed the socket */
free(session);
return NULL;
}
if (
inet_ntop(
AF_INET,
&peer.sin.sin_addr,
session->peername,
sizeof(session->peername)
) == NULL
) {
strlcpy(session->peername, "<UNKNOWN>", sizeof(session->peername));
}
#ifdef PARANOIA
syslog(LOG_DAEMON | LOG_INFO,
"telnetd: accepted connection from %s on %s",
session->peername,
session->pty.name);
#endif
++ctx->active_clients;
session->ctx = ctx;
return session;
}
static void release_a_Connection(
telnetd_context *ctx,
telnetd_session *session,
FILE **pstd,
int n
RTEMS_NO_RETURN static void telnetd_session_fatal_error(
const telnetd_context *ctx
)
{
#ifdef PARANOIA
syslog(
LOG_DAEMON | LOG_INFO,
"telnetd: releasing connection from %s on %s",
session->peername,
session->pty.name
(void)rtems_event_send(ctx->task_id, TELNETD_EVENT_ERROR);
rtems_task_exit();
}
static bool telnetd_login(telnetd_context *ctx, telnetd_session *session)
{
bool success;
if (ctx->config.login_check == NULL) {
return true;
}
success = rtems_shell_login_prompt(
stdin,
stderr,
session->pty.name,
ctx->config.login_check
);
#endif
--ctx->active_clients;
if (!success) {
syslog(
LOG_AUTHPRIV | LOG_WARNING,
"telnetd: too many wrong passwords entered from %s",
session->peername
);
}
while (--n>=0)
if (pstd[n]) fclose(pstd[n]);
unlink(session->pty.name);
return success;
}
static rtems_id telnetd_spawn_task(
rtems_name name,
rtems_task_priority priority,
size_t stack_size,
rtems_task_entry entry,
void *arg
)
static void telnetd_session_task(rtems_task_argument arg)
{
rtems_status_code sc;
rtems_id task_id;
telnetd_session *session;
telnetd_context *ctx;
const char *path;
sc = rtems_task_create(
name,
priority,
stack_size,
RTEMS_DEFAULT_MODES,
RTEMS_FLOATING_POINT,
&task_id
);
session = (telnetd_session *) arg;
ctx = session->ctx;
sc = rtems_libio_set_private_env();
if (sc != RTEMS_SUCCESSFUL) {
return RTEMS_ID_NONE;
telnetd_session_fatal_error(ctx);
}
(void)rtems_task_start(task_id, entry, (rtems_task_argument) arg);
return task_id;
path = rtems_pty_get_path(&session->pty);
stdin = fopen(path, "r+");
if (stdin == NULL) {
telnetd_session_fatal_error(ctx);
}
stdout = fopen(path, "r+");
if (stdout == NULL) {
telnetd_session_fatal_error(ctx);
}
stderr = fopen(path, "r+");
if (stderr == NULL) {
telnetd_session_fatal_error(ctx);
}
(void)rtems_event_send(ctx->task_id, TELNETD_EVENT_SUCCESS);
while (true) {
rtems_event_set events;
(void)rtems_event_system_receive(
RTEMS_EVENT_SYSTEM_SERVER,
RTEMS_WAIT | RTEMS_EVENT_ALL,
RTEMS_NO_TIMEOUT,
&events
);
syslog(
LOG_DAEMON | LOG_INFO,
"telnetd: accepted connection from %s on %s",
session->peername,
path
);
if (telnetd_login(ctx, session)) {
(*ctx->config.command)(session->pty.name, ctx->config.arg);
}
syslog(
LOG_DAEMON | LOG_INFO,
"telnetd: releasing connection from %s on %s",
session->peername,
path
);
rtems_pty_close_socket(&session->pty);
rtems_mutex_lock(&ctx->mtx);
LIST_INSERT_HEAD(&ctx->free_sessions, session, link);
rtems_mutex_unlock(&ctx->mtx);
}
}
static void
telnetd_session_task(rtems_task_argument arg);
/***********************************************************/
static void
telnetd_server_task(rtems_task_argument arg)
static void telnetd_sleep_after_error(void)
{
telnetd_session *session = NULL;
rtems_id task_id;
telnetd_context *ctx = (telnetd_context *) arg;
/* If something went wrong, sleep for some time */
rtems_task_wake_after(10 * rtems_clock_get_ticks_per_second());
}
/* we don't redirect stdio as this probably
* was started from the console anyway ..
*/
do {
session = grab_a_Connection(ctx);
static void telnetd_server_task(rtems_task_argument arg)
{
telnetd_context *ctx;
ctx = (telnetd_context *) arg;
while (true) {
telnetd_address peer;
socklen_t address_len;
int session_socket;
telnetd_session *session;
address_len = sizeof(peer.sin);
session_socket = accept(ctx->server_socket, &peer.sa, &address_len);
if (session_socket < 0) {
syslog(LOG_DAEMON | LOG_ERR, "telnetd: cannot accept session");
telnetd_sleep_after_error();
continue;
};
rtems_mutex_lock(&ctx->mtx);
session = LIST_FIRST(&ctx->free_sessions);
if (session == NULL) {
/* if something went wrong, sleep for some time */
sleep(10);
rtems_mutex_unlock(&ctx->mtx);
(void)close(session_socket);
syslog(LOG_DAEMON | LOG_ERR, "telnetd: no free session available");
telnetd_sleep_after_error();
continue;
}
task_id = telnetd_spawn_task(
rtems_build_name('T', 'N', 'T', 'a'),
ctx->config.priority,
ctx->config.stack_size,
telnetd_session_task,
session
);
if (task_id == RTEMS_ID_NONE) {
FILE *dummy;
LIST_REMOVE(session, link);
rtems_mutex_unlock(&ctx->mtx);
/* hmm - the pty driver slot can only be
* released by opening and subsequently
* closing the PTY - this also closes
* the underlying socket. So we mock up
* a stream...
*/
rtems_pty_set_socket(&session->pty, session_socket);
if ( !(dummy=fopen(session->pty.name,"r+")) )
perror("Unable to dummy open the pty, losing a slot :-(");
release_a_Connection(ctx, session, &dummy, 1);
free(session);
sleep(2); /* don't accept connections too fast */
if (
inet_ntop(
AF_INET,
&peer.sin.sin_addr,
session->peername,
sizeof(session->peername)
) == NULL
) {
strlcpy(session->peername, "<UNKNOWN>", sizeof(session->peername));
}
} while(1);
(void)rtems_event_system_send(session->task_id, RTEMS_EVENT_SYSTEM_SERVER);
}
}
static void telnetd_destroy_context(telnetd_context *ctx)
{
if (ctx->server_socket >= 0) {
close(ctx->server_socket);
telnetd_session *session;
LIST_FOREACH(session, &ctx->free_sessions, link) {
if (session->task_id != 0) {
(void)rtems_task_delete(session->task_id);
}
(void)unlink(rtems_pty_get_path(&session->pty));
}
if (ctx->server_socket >= 0) {
(void)close(ctx->server_socket);
}
rtems_mutex_destroy(&ctx->mtx);
free(ctx);
}
static rtems_status_code telnetd_create_server_socket(telnetd_context *ctx)
{
uni_sa srv;
telnetd_address srv;
socklen_t address_len;
int enable;
@@ -285,25 +311,94 @@ static rtems_status_code telnetd_create_server_socket(telnetd_context *ctx)
return RTEMS_SUCCESSFUL;
}
static rtems_status_code telnetd_create_session_tasks(telnetd_context *ctx)
{
uint16_t i;
ctx->task_id = rtems_task_self();
for (i = 0; i < ctx->config.client_maximum; ++i) {
telnetd_session *session;
rtems_status_code sc;
const char *path;
rtems_event_set events;
session = &ctx->sessions[i];
session->ctx = ctx;
rtems_mutex_init(&ctx->mtx, "Telnet");
LIST_INSERT_HEAD(&ctx->free_sessions, session, link);
sc = rtems_task_create(
rtems_build_name('T', 'N', 'T', 'a' + i % 26),
ctx->config.priority,
ctx->config.stack_size,
RTEMS_DEFAULT_MODES,
RTEMS_FLOATING_POINT,
&session->task_id
);
if (sc != RTEMS_SUCCESSFUL) {
syslog(LOG_DAEMON | LOG_ERR, "telnetd: cannot create session task");
return RTEMS_UNSATISFIED;
}
path = rtems_pty_initialize(&session->pty, i);
if (path == NULL) {
syslog(LOG_DAEMON | LOG_ERR, "telnetd: cannot create session PTY");
return RTEMS_UNSATISFIED;
}
(void)rtems_task_start(
session->task_id,
telnetd_session_task,
(rtems_task_argument) session
);
(void)rtems_event_receive(
TELNETD_EVENT_SUCCESS | TELNETD_EVENT_ERROR,
RTEMS_WAIT | RTEMS_EVENT_ANY,
RTEMS_NO_TIMEOUT,
&events
);
if ((events & TELNETD_EVENT_ERROR) != 0) {
syslog(LOG_DAEMON | LOG_ERR, "telnetd: cannot initialize session task");
return RTEMS_UNSATISFIED;
}
}
return RTEMS_SUCCESSFUL;
}
rtems_status_code rtems_telnetd_start(const rtems_telnetd_config_table* config)
{
telnetd_context *ctx;
rtems_id task_id;
rtems_status_code sc;
uint16_t client_maximum;
if (config->command == NULL) {
syslog(LOG_DAEMON | LOG_ERR, "telnetd: configuration with invalid command");
return RTEMS_INVALID_ADDRESS;
}
ctx = calloc(1, sizeof(*ctx));
if (config->client_maximum == 0) {
client_maximum = 5;
} else {
client_maximum = config->client_maximum;
}
ctx = calloc(
1,
sizeof(*ctx) + client_maximum * sizeof(ctx->sessions[0])
);
if (ctx == NULL) {
syslog(LOG_DAEMON | LOG_ERR, "telnetd: cannot allocate server context");
return RTEMS_UNSATISFIED;
}
ctx->config = *config;
ctx->config.client_maximum = client_maximum;
ctx->server_socket = -1;
LIST_INIT(&ctx->free_sessions);
/* Check priority */
#ifdef RTEMS_NETWORKING
@@ -320,101 +415,38 @@ rtems_status_code rtems_telnetd_start(const rtems_telnetd_config_table* config)
ctx->config.stack_size = (size_t)32 * 1024;
}
if (ctx->config.client_maximum == 0) {
ctx->config.client_maximum = 5;
}
sc = telnetd_create_server_socket(ctx);
if (sc != RTEMS_SUCCESSFUL) {
telnetd_destroy_context(ctx);
return sc;
}
task_id = telnetd_spawn_task(
sc = telnetd_create_session_tasks(ctx);
if (sc != RTEMS_SUCCESSFUL) {
telnetd_destroy_context(ctx);
return sc;
}
sc = rtems_task_create(
rtems_build_name('T', 'N', 'T', 'D'),
ctx->config.priority,
RTEMS_MINIMUM_STACK_SIZE,
telnetd_server_task,
ctx
RTEMS_DEFAULT_MODES,
RTEMS_FLOATING_POINT,
&ctx->task_id
);
if (task_id == RTEMS_ID_NONE) {
ctx->config.command = NULL;
if (sc != RTEMS_SUCCESSFUL) {
syslog(LOG_DAEMON | LOG_ERR, "telnetd: cannot create server task");
telnetd_destroy_context(ctx);
return RTEMS_UNSATISFIED;
}
(void)rtems_task_start(
ctx->task_id,
telnetd_server_task,
(rtems_task_argument) ctx
);
syslog(LOG_DAEMON | LOG_INFO, "telnetd: started successfully");
return RTEMS_SUCCESSFUL;
}
/* utility wrapper */
static void
telnetd_session_task(rtems_task_argument arg)
{
rtems_status_code sc;
FILE *nstd[3]={0};
FILE *ostd[3]={ stdin, stdout, stderr };
int i=0;
telnetd_session *session = (telnetd_session *) arg;
telnetd_context *ctx = session->ctx;
bool login_failed = false;
bool start = true;
sc=rtems_libio_set_private_env();
/* newlib hack/workaround. Before we change stdin/out/err we must make
* sure the internal data are initialized (fileno(stdout) has this sideeffect).
* This should probably be done from RTEMS' libc support layer...
* (T.S., newlibc-1.13; 2005/10)
*/
fileno(stdout);
if (RTEMS_SUCCESSFUL != sc) {
rtems_error(sc,"rtems_libio_set_private_env");
goto cleanup;
}
/* redirect stdio */
for (i=0; i<3; i++) {
if ( !(nstd[i]=fopen(session->pty.name,"r+")) ) {
perror("unable to open stdio");
goto cleanup;
}
}
stdin = nstd[0];
stdout = nstd[1];
stderr = nstd[2];
/* call their routine */
if (ctx->config.login_check != NULL) {
start = rtems_shell_login_prompt(
stdin,
stderr,
session->pty.name,
ctx->config.login_check
);
login_failed = !start;
}
if (start) {
ctx->config.command( session->pty.name, ctx->config.arg);
}
stdin = ostd[0];
stdout = ostd[1];
stderr = ostd[2];
if (login_failed) {
syslog(
LOG_AUTHPRIV | LOG_WARNING,
"telnetd: to many wrong passwords entered from %s",
session->peername
);
}
cleanup:
release_a_Connection(ctx, session, nstd, i);
free(session);
}

View File

@@ -105,9 +105,11 @@ static rtems_task Init(rtems_task_argument argument)
#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
#define CONFIGURE_APPLICATION_NEEDS_SIMPLE_CONSOLE_DRIVER
#define CONFIGURE_LIBIO_MAXIMUM_FILE_DESCRIPTORS 32
#define CONFIGURE_LIBIO_MAXIMUM_FILE_DESCRIPTORS (3 + 1 + 5 * 4)
#define CONFIGURE_MAXIMUM_TASKS 7
#define CONFIGURE_MAXIMUM_TASKS 8
#define CONFIGURE_MAXIMUM_POSIX_KEYS 1
#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION

View File

@@ -4,9 +4,8 @@
*** TEST BUILD: RTEMS_NETWORKING
*** TEST TOOLS: 7.3.0 20180125 (RTEMS 5, RSB 9670d7541e0621915e521fe76e7bb33de8cee661, Newlib d13c84eb07e35984bf7a974cd786a6cdac29e6b9)
syslog: telnetd: configuration with invalid command
Telnetd: spawning task failed (status: RTEMS_INVALID_PRIORITY)
syslog: telnetd: cannot create server task
syslog: telnetd: cannot create session task
syslog: telnetd: started successfully
syslog: telnetd: already started
syslog: telnetd: cannot bind server socket
*** END OF TEST TELNETD 1 ***