2019独角兽企业重金招聘Python工程师标准>>>
学习 MySQL Proxy 0.8.3 的源码后可知,其全部事件处理线程均对全局 socketpair 的读端进行了监听,以实现通知管道的功能:threads->event_notify_fds[0] 。
该 socketpair 是在主线程初始化过程中创建的: int chassis_event_threads_init_thread(chassis_event_threads_t *threads, chassis_event_thread_t *event_thread, chassis *chas) {event_thread->event_base = event_base_new();...// 设置当前线程监听 fd 为 socketpair 的读端 fdevent_thread->notify_fd = dup(threads->event_notify_fds[0]);...event_set(&(event_thread->notify_fd_event), event_thread->notify_fd, EV_READ | EV_PERSIST, chassis_event_handle, event_thread);event_base_set(event_thread->event_base, &(event_thread->notify_fd_event));event_add(&(event_thread->notify_fd_event), NULL);return 0;
}
其中 evutil_socketpair 实现如下(取自 libevent 1.4.13): chassis_event_threads_t *chassis_event_threads_new() {...threads = g_new0(chassis_event_threads_t, 1);/* create the ping-fds** the event-thread write a byte to the ping-pipe to trigger a fd-event when* something is available in the event-async-queues*/// 创建 socketpairif (0 != evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, threads->event_notify_fds)) {...}.../* make both ends non-blocking */evutil_make_socket_nonblocking(threads->event_notify_fds[0]);evutil_make_socket_nonblocking(threads->event_notify_fds[1]);return threads;
}
从上述实现中可以看出,在非 WIN32 平台,直接就可以使用现成的 API 函数创建 socketpair ;在 WIN32 平台上,是通过创建两个本地 socket 相互连接建立的 socketpair 。 int
evutil_socketpair(int family, int type, int protocol, int fd[2])
{
#ifndef WIN32return socketpair(family, type, protocol, fd);
#else/* This code is originally from Tor. Used with permission. *//* This socketpair does not work when localhost is down. So* it's really not the same thing at all. But it's close enough* for now, and really, when localhost is down sometimes, we* have other problems too.*/int listener = -1;int connector = -1;int acceptor = -1;struct sockaddr_in listen_addr;struct sockaddr_in connect_addr;int size;int saved_errno = -1;if (protocol
#ifdef AF_UNIX|| family != AF_UNIX
#endif) {EVUTIL_SET_SOCKET_ERROR(WSAEAFNOSUPPORT);return -1;}if (!fd) {EVUTIL_SET_SOCKET_ERROR(WSAEINVAL);return -1;}// 创建作为listener 的socketlistener &#61; socket(AF_INET, type, 0);if (listener <0)return -1;memset(&listen_addr, 0, sizeof(listen_addr));listen_addr.sin_family &#61; AF_INET;listen_addr.sin_addr.s_addr &#61; htonl(INADDR_LOOPBACK);listen_addr.sin_port &#61; 0; /* kernel chooses port. */// 进行绑定&#xff0c;内核会分配portif (bind(listener, (struct sockaddr *) &listen_addr, sizeof (listen_addr)) &#61;&#61; -1)goto tidy_up_and_fail;// 宣告开始监听连接请求if (listen(listener, 1) &#61;&#61; -1)goto tidy_up_and_fail;// 创建作为connector 的socketconnector &#61; socket(AF_INET, type, 0);if (connector <0)goto tidy_up_and_fail;/* We want to find out the port number to connect to. */size &#61; sizeof(connect_addr);// 获取bind 后内核为listener 分配的port ( ip 为INADDR_LOOPBACK )if (getsockname(listener, (struct sockaddr *) &connect_addr, &size) &#61;&#61; -1)goto tidy_up_and_fail;if (size !&#61; sizeof (connect_addr))goto abort_tidy_up_and_fail;// 从connector 向listener 发起连接&#xff0c;connect_addr 为连接目的地址if (connect(connector, (struct sockaddr *) &connect_addr, sizeof(connect_addr)) &#61;&#61; -1)goto tidy_up_and_fail;size &#61; sizeof(listen_addr);// 在套接字listener 上accept &#xff0c;函数返回后listen_addr 中为对端地址acceptor &#61; accept(listener, (struct sockaddr *) &listen_addr, &size);if (acceptor <0)goto tidy_up_and_fail;if (size !&#61; sizeof(listen_addr))goto abort_tidy_up_and_fail;// 关闭listenerEVUTIL_CLOSESOCKET(listener);/* Now check we are talking to ourself by matching port and host on thetwo sockets. */// 获取connect 后内核为connector 分配的地址信息-- 自动绑定功能if (getsockname(connector, (struct sockaddr *) &connect_addr, &size) &#61;&#61; -1)goto tidy_up_and_fail;// 将从两侧分别获得的地址地址进行比较if (size !&#61; sizeof (connect_addr)|| listen_addr.sin_family !&#61; connect_addr.sin_family|| listen_addr.sin_addr.s_addr !&#61; connect_addr.sin_addr.s_addr|| listen_addr.sin_port !&#61; connect_addr.sin_port)goto abort_tidy_up_and_fail;fd[0] &#61; connector;fd[1] &#61; acceptor;return 0;abort_tidy_up_and_fail:saved_errno &#61; WSAECONNABORTED;tidy_up_and_fail:if (saved_errno <0)saved_errno &#61; WSAGetLastError();if (listener !&#61; -1)EVUTIL_CLOSESOCKET(listener);if (connector !&#61; -1)EVUTIL_CLOSESOCKET(connector);if (acceptor !&#61; -1)EVUTIL_CLOSESOCKET(acceptor);EVUTIL_SET_SOCKET_ERROR(saved_errno);return -1;
#endif
}
实现上述功能的另外一种方式是&#xff0c;使用 pipe 。用法很简单&#xff0c;摘抄代码如下&#xff08;摘自 memcached-1.4.14&#xff09;&#xff1a;
至于用哪种更好&#xff0c;大家自己思考~~ void thread_init(int nthreads, struct event_base *main_base) {
...// nthreads 为创建的工作线程数for (i &#61; 0; i
}
&#61;&#61;&#61;&#61;&#61;&#61; 更新 2013-11-11 &#61;&#61;&#61;&#61;&#61;&#61;
最近写 Modb 代码时&#xff0c;想要利用上面的线程间通信机制&#xff0c;所以使用了相对简单的 pipe 实现方案&#xff0c;但在 windows 下调试时总会遇到 “Unknown error 10038” 错误。查阅相关文档后发现&#xff0c;结论是 windows 下不能将 pipe 和 select 一起使用&#xff0c;因为会认为 pipe 不是一个合法的 socket 句柄&#xff0c;然后 linux 下是没有这个问题的。
解决方案&#xff1a;
- 通过 socket 模拟 pipe 的实现&#xff1b;
- 使用上面的 socketpair 实现&#xff1b;
原文作者指出有如下缺点&#xff1a; int pipe(int fildes[2])
{int tcp1, tcp2;sockaddr_in name;memset(&name, 0, sizeof(name));name.sin_family &#61; AF_INET;name.sin_addr.s_addr &#61; htonl(INADDR_LOOPBACK);int namelen &#61; sizeof(name);tcp1 &#61; tcp2 &#61; -1;int tcp &#61; socket(AF_INET, SOCK_STREAM, 0);if (tcp &#61;&#61; -1){goto clean;}if (bind(tcp, (sockaddr*)&name, namelen) &#61;&#61; -1){goto clean;}if (listen(tcp, 5) &#61;&#61; -1){goto clean;}if (getsockname(tcp, (sockaddr*)&name, &namelen) &#61;&#61; -1){goto clean;}tcp1 &#61; socket(AF_INET, SOCK_STREAM, 0);if (tcp1 &#61;&#61; -1){goto clean;}if (-1 &#61;&#61; connect(tcp1, (sockaddr*)&name, namelen)){goto clean;}tcp2 &#61; accept(tcp, (sockaddr*)&name, &namelen);if (tcp2 &#61;&#61; -1){goto clean;}if (closesocket(tcp) &#61;&#61; -1){goto clean;}fildes[0] &#61; tcp1;fildes[1] &#61; tcp2;return 0;
clean:if (tcp !&#61; -1){closesocket(tcp);}if (tcp2 !&#61; -1){closesocket(tcp2);}if (tcp1 !&#61; -1){closesocket(tcp1);}return -1;
}
- 效率低下&#xff08;是否所有其他实现方式都比基于 socket 的方式高效&#xff1f;&#xff09;
- 占用了两个 TCP 端口&#xff08;pipe 不会占用端口&#xff09;
- accept 的返回值未必就是 tcp1 连接过来的&#xff08;多线程或者别的进程在干预&#xff09;&#xff0c; 所以最好通过发送数据进行确认&#xff08;这个比较严重&#xff0c;在有多个连接同时进入的时候确实无法保证当前连接时正确的&#xff09;
- 由于不是匿名的&#xff0c; 所以可以在 netstat 里面看到&#xff08;看到又怎样&#xff1f;&#xff09;
将该 pipe 实现和上面的 socketpair 的实现进行对比&#xff0c;发现两者根本就是同一个东东&#xff0c;并且 pipe 的实现没有 libevent 中 socketpair 实现写的好。所以 pipe 实现的作者指出的那些缺点&#xff0c;本人持保留意见。看客自己斟酌。
补充&#xff1a;由于上面的 socketpair 是基于 INADDR_LOOPBACK 的&#xff0c;所以如果 lo 必须处于 up 状态才行。