36#if defined(HTTPS_SUPPORT) && defined(UPGRADE_SUPPORT)
47urh_update_pollfd (
struct MHD_UpgradeResponseHandle *urh,
53 if (urh->in_buffer_used < urh->in_buffer_size)
54 p[0].events |= POLLIN;
55 if (0 != urh->out_buffer_used)
56 p[0].events |= POLLOUT;
61 ((0 != urh->in_buffer_size) ||
62 (0 != urh->out_buffer_size) ||
63 (0 != urh->out_buffer_used)))
64 p[0].events |= MHD_POLL_EVENTS_ERR_DISC;
66 if (urh->out_buffer_used < urh->out_buffer_size)
67 p[1].events |= POLLIN;
68 if (0 != urh->in_buffer_used)
69 p[1].events |= POLLOUT;
74 ((0 != urh->out_buffer_size) ||
75 (0 != urh->in_buffer_size) ||
76 (0 != urh->in_buffer_used)))
77 p[1].events |= MHD_POLL_EVENTS_ERR_DISC;
88urh_to_pollfd (
struct MHD_UpgradeResponseHandle *urh,
91 p[0].fd = urh->connection->socket_fd;
92 p[1].fd = urh->mhd.socket;
93 urh_update_pollfd (urh,
104urh_from_pollfd (
struct MHD_UpgradeResponseHandle *urh,
108 urh->app.celi &= (~MHD_EPOLL_STATE_READ_READY & ~MHD_EPOLL_STATE_WRITE_READY);
109 urh->mhd.celi &= (~MHD_EPOLL_STATE_READ_READY & ~MHD_EPOLL_STATE_WRITE_READY);
111 if (0 != (p[0].revents & POLLIN))
113 if (0 != (p[0].revents & POLLOUT))
115 if (0 != (p[0].revents & POLLHUP))
117 if (0 != (p[0].revents & MHD_POLL_REVENTS_ERRROR))
119 if (0 != (p[1].revents & POLLIN))
121 if (0 != (p[1].revents & POLLOUT))
123 if (0 != (p[1].revents & POLLHUP))
125 if (0 != (p[1].revents & MHD_POLL_REVENTS_ERRROR))
142MHD_daemon_poll_all_ (
struct MHD_Daemon *daemon,
145 unsigned int num_connections;
148#if defined(HTTPS_SUPPORT) && defined(UPGRADE_SUPPORT)
149 struct MHD_UpgradeResponseHandle *urh;
150 struct MHD_UpgradeResponseHandle *urhn;
161#if defined(HTTPS_SUPPORT) && defined(UPGRADE_SUPPORT)
162 for (urh = daemon->urh_head;
NULL != urh; urh = urh->next)
163 num_connections += 2;
169 unsigned int poll_server;
176 sizeof (
struct pollfd));
181 MHD_SC_POLL_MALLOC_FAILURE,
182 _ (
"Error allocating memory: %s\n"),
185 return MHD_SC_POLL_MALLOC_FAILURE;
195 p[poll_server].fd = ls;
196 p[poll_server].events = POLLIN;
197 p[poll_server].revents = 0;
198 poll_listen = (int) poll_server;
202 if (MHD_ITC_IS_VALID_ (daemon->
itc))
204 p[poll_server].fd = MHD_itc_r_fd_ (daemon->
itc);
205 p[poll_server].events = POLLIN;
206 p[poll_server].revents = 0;
207 poll_itc_idx = (int) poll_server;
212 else if ( (MHD_TM_THREAD_PER_CONNECTION == daemon->
threading_mode) ||
218 timeout = (ltimeout > INT_MAX) ? INT_MAX : (int) ltimeout;
227 p[poll_server + i].events |= POLLIN | MHD_POLL_EVENTS_ERR_DISC;
230 p[poll_server + i].events |= POLLOUT | MHD_POLL_EVENTS_ERR_DISC;
233 p[poll_server + i].events |= MHD_POLL_EVENTS_ERR_DISC;
241#if defined(HTTPS_SUPPORT) && defined(UPGRADE_SUPPORT)
242 for (urh = daemon->urh_tail;
NULL != urh; urh = urh->prev)
245 &(p[poll_server + i]));
249 if (0 == poll_server + num_connections)
254 if (MHD_sys_poll_ (p,
255 poll_server + num_connections,
266 MHD_SC_UNEXPECTED_POLL_ERROR,
267 _ (
"poll failed: %s\n"),
271 return MHD_SC_UNEXPECTED_POLL_ERROR;
280 if ( (-1 != poll_itc_idx) &&
281 (0 != (p[poll_itc_idx].revents & POLLIN)) )
282 MHD_itc_clear_ (daemon->
itc);
288 return MHD_SC_DAEMON_ALREADY_SHUTDOWN;
292 while (
NULL != (pos = prev))
296 if (i >= num_connections)
301 0 != (p[poll_server + i].revents & POLLIN),
302 0 != (p[poll_server + i].revents
304 0 != (p[poll_server + i].revents
305 & MHD_POLL_REVENTS_ERR_DISC));
308#if defined(HTTPS_SUPPORT) && defined(UPGRADE_SUPPORT)
309 for (urh = daemon->urh_tail;
NULL != urh; urh = urhn)
311 if (i >= num_connections)
318 if ((p[poll_server + i].
fd != urh->connection->socket_fd) ||
319 (p[poll_server + i + 1].fd != urh->mhd.socket))
321 urh_from_pollfd (urh,
322 &p[poll_server + i]);
324 MHD_upgrade_response_handle_process_ (urh);
326 if ( (0 == urh->in_buffer_size) &&
327 (0 == urh->out_buffer_size) &&
328 (0 == urh->in_buffer_used) &&
329 (0 == urh->out_buffer_used) )
334 urh->clean_ready =
true;
344 if ( (-1 != poll_listen) &&
345 (0 != (p[poll_listen].revents & POLLIN)) )
362MHD_daemon_poll_listen_socket_ (
struct MHD_Daemon *daemon,
367 unsigned int poll_count;
382 p[poll_count].fd = ls;
383 p[poll_count].events = POLLIN;
384 p[poll_count].revents = 0;
385 poll_listen = poll_count;
388 if (MHD_ITC_IS_VALID_ (daemon->
itc))
390 p[poll_count].fd = MHD_itc_r_fd_ (daemon->
itc);
391 p[poll_count].events = POLLIN;
392 p[poll_count].revents = 0;
393 poll_itc_idx = poll_count;
406 if (MHD_sys_poll_ (p,
416 MHD_SC_UNEXPECTED_POLL_ERROR,
417 _ (
"poll failed: %s\n"),
420 return MHD_SC_UNEXPECTED_POLL_ERROR;
422 if ( (-1 != poll_itc_idx) &&
423 (0 != (p[poll_itc_idx].revents & POLLIN)) )
424 MHD_itc_clear_ (daemon->
itc);
428 return MHD_SC_DAEMON_ALREADY_SHUTDOWN;
429 if ( (-1 != poll_listen) &&
430 (0 != (p[poll_listen].revents & POLLIN)) )
452 return MHD_SC_DAEMON_ALREADY_SHUTDOWN;
454 return MHD_daemon_poll_all_ (daemon,
456 return MHD_daemon_poll_listen_socket_ (daemon,
461 return MHD_SC_POLL_NOT_SUPPORTED;
475MHD_daemon_upgrade_connection_with_poll_ (
struct MHD_Connection *con)
477 struct MHD_UpgradeResponseHandle *urh = con->
request.urh;
483 p[0].fd = urh->connection->socket_fd;
484 p[1].fd = urh->mhd.socket;
486 while ( (0 != urh->in_buffer_size) ||
487 (0 != urh->out_buffer_size) ||
488 (0 != urh->in_buffer_used) ||
489 (0 != urh->out_buffer_used) )
493 urh_update_pollfd (urh,
497 (urh->in_buffer_used < urh->in_buffer_size))
502 if (MHD_sys_poll_ (p,
512 MHD_SC_UNEXPECTED_POLL_ERROR,
513 _ (
"Error during poll: `%s'\n"),
518 urh_from_pollfd (urh,
520 MHD_upgrade_response_handle_process_ (urh);
#define MHD_connection_finish_forward_(conn)
enum MHD_StatusCode MHD_accept_connection_(struct MHD_Daemon *daemon)
functions to add connection to our active set
int MHD_connection_call_handlers_(struct MHD_Connection *con, bool read_ready, bool write_ready, bool force_close)
function to call event handlers based on event mask
complete upgrade socket forwarding operation in TLS mode
enum MHD_StatusCode MHD_daemon_poll_(struct MHD_Daemon *daemon, bool may_block)
non-public functions provided by daemon_poll.c
enum MHD_StatusCode MHD_daemon_get_timeout(struct MHD_Daemon *daemon, MHD_UNSIGNED_LONG_LONG *timeout)
@ MHD_EPOLL_STATE_READ_READY
@ MHD_EPOLL_STATE_WRITE_READY
void * MHD_calloc_(size_t nelem, size_t elsize)
#define MHD_strerror_(errnum)
#define MHD_socket_strerr_(err)
#define MHD_socket_get_error_()
#define MHD_SCKT_ERR_IS_EINTR_(err)
MHD internal shared structures.
@ MHD_EVENT_LOOP_INFO_READ
@ MHD_EVENT_LOOP_INFO_WRITE
@ MHD_EVENT_LOOP_INFO_CLEANUP
@ MHD_EVENT_LOOP_INFO_BLOCK
#define MHD_UNSIGNED_LONG_LONG
#define MHD_INVALID_SOCKET
void MHD_request_resume(struct MHD_Request *request)
bool MHD_resume_suspended_connections_(struct MHD_Daemon *daemon)
implementation of MHD_request_resume()
struct MHD_Request request
struct MHD_Connection * next
struct MHD_Connection * prev
struct MHD_Daemon * daemon
bool data_already_pending
struct MHD_Connection * connections_head
bool disallow_suspend_resume
enum MHD_ThreadingMode threading_mode
unsigned int global_connection_limit
struct MHD_Connection * connections_tail
enum MHD_RequestEventLoopInfo event_loop_info
function to process upgrade activity (over TLS)