深入分析Golang Server源码实现过程
raoxiaoya 人气:0func (srv *Server) Serve(l net.Listener) error { ...... for { rw, err := l.Accept() if err != nil { select { case <-srv.getDoneChan(): return ErrServerClosed default: } if ne, ok := err.(net.Error); ok && ne.Temporary() { if tempDelay == 0 { tempDelay = 5 * time.Millisecond } else { tempDelay *= 2 } if max := 1 * time.Second; tempDelay > max { tempDelay = max } srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay) time.Sleep(tempDelay) continue } return err } connCtx := ctx if cc := srv.ConnContext; cc != nil { connCtx = cc(connCtx, rw) if connCtx == nil { panic("ConnContext returned nil") } } tempDelay = 0 c := srv.newConn(rw) c.setState(c.rwc, StateNew, runHooks) // before Serve can return go c.serve(connCtx) } }
func (c *conn) serve(ctx context.Context) { ...... // HTTP/1.x from here on. ctx, cancelCtx := context.WithCancel(ctx) c.cancelCtx = cancelCtx defer cancelCtx() c.r = &connReader{conn: c} c.bufr = newBufioReader(c.r) c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) for { w, err := c.readRequest(ctx) if c.r.remain != c.server.initialReadLimitSize() { // If we read any bytes off the wire, we're active. c.setState(c.rwc, StateActive, runHooks) } if err != nil { const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" switch { case err == errTooLarge: // Their HTTP client may or may not be // able to read this if we're // responding to them and hanging up // while they're still writing their // request. Undefined behavior. const publicErr = "431 Request Header Fields Too Large" fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) c.closeWriteAndWait() return case isUnsupportedTEError(err): // Respond as per RFC 7230 Section 3.3.1 which says, // A server that receives a request message with a // transfer coding it does not understand SHOULD // respond with 501 (Unimplemented). code := StatusNotImplemented // We purposefully aren't echoing back the transfer-encoding's value, // so as to mitigate the risk of cross side scripting by an attacker. fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders) return case isCommonNetReadError(err): return // don't reply default: if v, ok := err.(statusError); ok { fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s: %s%s%d %s: %s", v.code, StatusText(v.code), v.text, errorHeaders, v.code, StatusText(v.code), v.text) return } publicErr := "400 Bad Request" fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) return } } // Expect 100 Continue support req := w.req if req.expectsContinue() { if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { // Wrap the Body reader with one that replies on the connection req.Body = &expectContinueReader{readCloser: req.Body, resp: w} w.canWriteContinue.setTrue() } } else if req.Header.get("Expect") != "" { w.sendExpectationFailed() return } c.curReq.Store(w) if requestBodyRemains(req.Body) { registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) } else { w.conn.r.startBackgroundRead() } // HTTP cannot have multiple simultaneous active requests.[*] // Until the server replies to this request, it can't read another, // so we might as well run the handler in this goroutine. // [*] Not strictly true: HTTP pipelining. We could let them all process // in parallel even if their responses need to be serialized. // But we're not going to implement HTTP pipelining because it // was never deployed in the wild and the answer is HTTP/2. inFlightResponse = w serverHandler{c.server}.ServeHTTP(w, w.req) inFlightResponse = nil w.cancelCtx() if c.hijacked() { return } w.finishRequest() if !w.shouldReuseConnection() { if w.requestBodyLimitHit || w.closedRequestBodyEarly() { c.closeWriteAndWait() } return } c.setState(c.rwc, StateIdle, runHooks) c.curReq.Store((*response)(nil)) if !w.conn.server.doKeepAlives() { // We're in shutdown mode. We might've replied // to the user without "Connection: close" and // they might think they can send another // request, but such is life with HTTP/1.1. return } if d := c.server.idleTimeout(); d != 0 { c.rwc.SetReadDeadline(time.Now().Add(d)) if _, err := c.bufr.Peek(4); err != nil { return } } c.rwc.SetReadDeadline(time.Time{}) } }
1、c.readRequest(ctx)
放在 for 循环里面,是为了 HTTP Keep-Alive,可以复用TCP连接,并且是串行的,上一个请求处理完才会去读取下一个请求的数据,如果连接被客户端断开,那么c.readRequest(ctx)
会因为读取报错而退出。
通过继续追踪源码,发现这里只是读取了 Header,并做一些判断,因此会有readHeaderDeadline
这样的配置,然后设置Body的类型,Header和Body之间有一个空行,这个作为Header读完的标志,通过 Content-Length 可以知道是否有Body内容,以及有多少内容。
switch { case t.Chunked: if noResponseBodyExpected(t.RequestMethod) || !bodyAllowedForStatus(t.StatusCode) { t.Body = NoBody } else { t.Body = &body{src: internal.NewChunkedReader(r), hdr: msg, r: r, closing: t.Close} } case realLength == 0: t.Body = NoBody case realLength > 0: t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close} default: // realLength < 0, i.e. "Content-Length" not mentioned in header if t.Close { // Close semantics (i.e. HTTP/1.0) t.Body = &body{src: r, closing: t.Close} } else { // Persistent connection (i.e. HTTP/1.1) t.Body = NoBody } }
func (l *LimitedReader) Read(p []byte) (n int, err error) { if l.N <= 0 { return 0, EOF } if int64(len(p)) > l.N { p = p[0:l.N] } n, err = l.R.Read(p) l.N -= int64(n) return }
io.LimitReader
在读取到指定的长度后就会返回EOF错误,表示读取完毕。
2、w.conn.r.startBackgroundRead
if requestBodyRemains(req.Body) { registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) } else { w.conn.r.startBackgroundRead() }
当Body读取完之后才会开启startBackgroundRead
。
func (cr *connReader) backgroundRead() { n, err := cr.conn.rwc.Read(cr.byteBuf[:]) cr.lock() if n == 1 { cr.hasByte = true ...... } if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { // Ignore this error. It's the expected error from // another goroutine calling abortPendingRead. } else if err != nil { cr.handleReadError(err) } cr.aborted = false cr.inRead = false cr.unlock() cr.cond.Broadcast() } func (cr *connReader) handleReadError(_ error) { cr.conn.cancelCtx() cr.closeNotify() } // may be called from multiple goroutines. func (cr *connReader) closeNotify() { res, _ := cr.conn.curReq.Load().(*response) if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { res.closeNotifyCh <- true } }
其实startBackgroundRead
就是为了监控客户端是否关闭了连接,它不能影响业务数据读取,因此需要等Body被读取完之后才开启,它象征性的读取一个字节,如果客户端关闭了,对应的 fd 是可读的,它会像一个通道写入数据,此协程的生命周期是当前请求,而不是当前连接,它的作用是为了中断当前请求的 Handler 处理阶段,它认为客户端已经放弃了这个请求,服务端也没必要做过多的业务处理,但是这个在实际业务中很难实现,或者说是多余的,在我们看来,只要请求到达了,服务端就有义务正确的给予处理,不应该将其中断。
当请求处理完毕,就会调用abortPendingRead
,使得startBackgroundRead
协程退出。为什么startBackgroundRead
协程的生命周期不是跟着连接呢,因为 Keep-Alive 的连接会持续一段时间,即便没有请求到来,这会导致startBackgroundRead
协程一直在运行。
那么服务端何时去关闭此连接呢,毕竟客户端是不可信的,它是通过设置SetReadDeadline
为ReadHeaderTimeout
来修改定时器时间,当然如果没有设置ReadHeaderTimeout
,那么会使用ReadTimeout
代替,超时还没发来请求就可以认为客户端已经没有重用此连接了,for 循环退出,defer 中关闭此连接。
实际上客户端只会在一个短的时间内要发送多个请求的情况下才会重用连接,比如在页面初始化的时候,浏览器会视情况重用连接。
ReadDeadline
是一个总的时间,一个截止时间,是读取Header和读取Body的总时间。
3、serverHandler{c.server}.ServeHTTP(w, w.req)
后面就开始调用Handler,如果需要用到Body的信息,则需要接着读取Body内容,可见Header和Body是分开来读的。第二次读取是不会阻塞的因为fd里面有内容,当然如果有人恶意攻击,只发请求头不填Body,那么也会阻塞。
加载全部内容