1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Waiting for FDs via epoll(7).
15 readFlags = syscall.EPOLLIN | syscall.EPOLLRDHUP
16 writeFlags = syscall.EPOLLOUT
19 type pollster struct {
22 // Events we're already waiting for
23 // Must hold pollServer lock
26 // An event buffer for EpollWait.
27 // Used without a lock, may only be used by WaitFD.
28 waitEventBuf [10]syscall.EpollEvent
29 waitEvents []syscall.EpollEvent
31 // An event buffer for EpollCtl, to avoid a malloc.
32 // Must hold pollServer lock.
33 ctlEvent syscall.EpollEvent
36 func newpollster() (p *pollster, err error) {
38 if p.epfd, err = syscall.EpollCreate1(syscall.EPOLL_CLOEXEC); err != nil {
39 if err != syscall.ENOSYS {
40 return nil, os.NewSyscallError("epoll_create1", err)
42 // The arg to epoll_create is a hint to the kernel
43 // about the number of FDs we will care about.
44 // We don't know, and since 2.6.8 the kernel ignores it anyhow.
45 if p.epfd, err = syscall.EpollCreate(16); err != nil {
46 return nil, os.NewSyscallError("epoll_create", err)
48 syscall.CloseOnExec(p.epfd)
50 p.events = make(map[int]uint32)
54 func (p *pollster) AddFD(fd int, mode int, repeat bool) (bool, error) {
55 // pollServer is locked.
58 p.ctlEvent.Fd = int32(fd)
59 p.ctlEvent.Events, already = p.events[fd]
61 p.ctlEvent.Events |= syscall.EPOLLONESHOT
64 p.ctlEvent.Events |= readFlags
66 p.ctlEvent.Events |= writeFlags
71 op = syscall.EPOLL_CTL_MOD
73 op = syscall.EPOLL_CTL_ADD
75 if err := syscall.EpollCtl(p.epfd, op, fd, &p.ctlEvent); err != nil {
76 return false, os.NewSyscallError("epoll_ctl", err)
78 p.events[fd] = p.ctlEvent.Events
82 func (p *pollster) StopWaiting(fd int, bits uint) {
83 // pollServer is locked.
85 events, already := p.events[fd]
87 // The fd returned by the kernel may have been
88 // cancelled already; return silently.
92 // If syscall.EPOLLONESHOT is not set, the wait
93 // is a repeating wait, so don't change it.
94 if events&syscall.EPOLLONESHOT == 0 {
98 // Disable the given bits.
99 // If we're still waiting for other events, modify the fd
100 // event in the kernel. Otherwise, delete it.
101 events &= ^uint32(bits)
102 if int32(events)&^syscall.EPOLLONESHOT != 0 {
103 p.ctlEvent.Fd = int32(fd)
104 p.ctlEvent.Events = events
105 if err := syscall.EpollCtl(p.epfd, syscall.EPOLL_CTL_MOD, fd, &p.ctlEvent); err != nil {
106 print("Epoll modify fd=", fd, ": ", err.Error(), "\n")
108 p.events[fd] = events
110 if err := syscall.EpollCtl(p.epfd, syscall.EPOLL_CTL_DEL, fd, nil); err != nil {
111 print("Epoll delete fd=", fd, ": ", err.Error(), "\n")
117 func (p *pollster) DelFD(fd int, mode int) {
118 // pollServer is locked.
121 p.StopWaiting(fd, readFlags)
123 p.StopWaiting(fd, writeFlags)
126 // Discard any queued up events.
128 for i < len(p.waitEvents) {
129 if fd == int(p.waitEvents[i].Fd) {
130 copy(p.waitEvents[i:], p.waitEvents[i+1:])
131 p.waitEvents = p.waitEvents[:len(p.waitEvents)-1]
138 func (p *pollster) WaitFD(s *pollServer, nsec int64) (fd int, mode int, err error) {
139 for len(p.waitEvents) == 0 {
142 msec = int((nsec + 1e6 - 1) / 1e6)
146 n, err := syscall.EpollWait(p.epfd, p.waitEventBuf[0:], msec)
150 if err == syscall.EAGAIN || err == syscall.EINTR {
153 return -1, 0, os.NewSyscallError("epoll_wait", err)
158 p.waitEvents = p.waitEventBuf[0:n]
161 ev := &p.waitEvents[0]
162 p.waitEvents = p.waitEvents[1:]
166 if ev.Events&writeFlags != 0 {
167 p.StopWaiting(fd, writeFlags)
170 if ev.Events&readFlags != 0 {
171 p.StopWaiting(fd, readFlags)
175 // Other events are error conditions - wake whoever is waiting.
176 events, _ := p.events[fd]
177 if events&writeFlags != 0 {
178 p.StopWaiting(fd, writeFlags)
181 p.StopWaiting(fd, readFlags)
185 func (p *pollster) Close() error {
186 return os.NewSyscallError("close", syscall.Close(p.epfd))