37 "github.com/ThomsonReutersEikon/go-ntlm/ntlm"
42 largeObjects = newLfsStorage()
43 server *httptest.Server
44 serverTLS *httptest.Server
45 serverClientCert *httptest.Server
47 // maps OIDs to content strings. Both the LFS and Storage test servers below
49 oidHandlers map[string]string
51 // These magic strings tell the test lfs server change their behavior so the
52 // integration tests can check those use cases. Tests will create objects with
53 // the magic strings as the contents.
55 // printf "status:lfs:404" > 404.dat
57 contentHandlers = []string{
58 "status-batch-403", "status-batch-404", "status-batch-410", "status-batch-422", "status-batch-500",
59 "status-storage-403", "status-storage-404", "status-storage-410", "status-storage-422", "status-storage-500", "status-storage-503",
60 "status-batch-resume-206", "batch-resume-fail-fallback", "return-expired-action", "return-expired-action-forever", "return-invalid-size",
61 "object-authenticated", "storage-download-retry", "storage-upload-retry", "unknown-oid",
62 "send-verify-action", "send-deprecated-links",
67 repoDir = os.Getenv("LFSTEST_DIR")
69 mux := http.NewServeMux()
70 server = httptest.NewServer(mux)
71 serverTLS = httptest.NewTLSServer(mux)
72 serverClientCert = httptest.NewUnstartedServer(mux)
74 //setup Client Cert server
75 rootKey, rootCert := generateCARootCertificates()
76 _, clientCertPEM, clientKeyPEM := generateClientCertificates(rootCert, rootKey)
78 certPool := x509.NewCertPool()
79 certPool.AddCert(rootCert)
81 serverClientCert.TLS = &tls.Config{
82 Certificates: []tls.Certificate{serverTLS.TLS.Certificates[0]},
83 ClientAuth: tls.RequireAndVerifyClientCert,
86 serverClientCert.StartTLS()
88 ntlmSession, err := ntlm.CreateServerSession(ntlm.Version2, ntlm.ConnectionOrientedMode)
90 fmt.Println("Error creating ntlm session:", err)
93 ntlmSession.SetUserInfo("ntlmuser", "ntlmpass", "NTLMDOMAIN")
95 stopch := make(chan bool)
97 mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) {
101 mux.HandleFunc("/storage/", storageHandler)
102 mux.HandleFunc("/verify", verifyHandler)
103 mux.HandleFunc("/redirect307/", redirect307Handler)
104 mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
110 if strings.Contains(r.URL.Path, "/info/lfs") {
111 if !skipIfBadAuth(w, r, id, ntlmSession) {
118 debug(id, "git http-backend %s %s", r.Method, r.URL)
122 urlname := writeTestStateFile([]byte(server.URL), "LFSTEST_URL", "lfstest-gitserver")
123 defer os.RemoveAll(urlname)
125 sslurlname := writeTestStateFile([]byte(serverTLS.URL), "LFSTEST_SSL_URL", "lfstest-gitserver-ssl")
126 defer os.RemoveAll(sslurlname)
128 clientCertUrlname := writeTestStateFile([]byte(serverClientCert.URL), "LFSTEST_CLIENT_CERT_URL", "lfstest-gitserver-ssl")
129 defer os.RemoveAll(clientCertUrlname)
131 block := &pem.Block{}
132 block.Type = "CERTIFICATE"
133 block.Bytes = serverTLS.TLS.Certificates[0].Certificate[0]
134 pembytes := pem.EncodeToMemory(block)
136 certname := writeTestStateFile(pembytes, "LFSTEST_CERT", "lfstest-gitserver-cert")
137 defer os.RemoveAll(certname)
139 cccertname := writeTestStateFile(clientCertPEM, "LFSTEST_CLIENT_CERT", "lfstest-gitserver-client-cert")
140 defer os.RemoveAll(cccertname)
142 ckcertname := writeTestStateFile(clientKeyPEM, "LFSTEST_CLIENT_KEY", "lfstest-gitserver-client-key")
143 defer os.RemoveAll(ckcertname)
145 debug("init", "server url: %s", server.URL)
146 debug("init", "server tls url: %s", serverTLS.URL)
147 debug("init", "server client cert url: %s", serverClientCert.URL)
150 debug("init", "git server done")
153 // writeTestStateFile writes contents to either the file referenced by the
154 // environment variable envVar, or defaultFilename if that's not set. Returns
155 // the filename that was used
156 func writeTestStateFile(contents []byte, envVar, defaultFilename string) string {
157 f := os.Getenv(envVar)
161 file, err := os.Create(f)
170 type lfsObject struct {
171 Oid string `json:"oid,omitempty"`
172 Size int64 `json:"size,omitempty"`
173 Authenticated bool `json:"authenticated,omitempty"`
174 Actions map[string]*lfsLink `json:"actions,omitempty"`
175 Links map[string]*lfsLink `json:"_links,omitempty"`
176 Err *lfsError `json:"error,omitempty"`
179 type lfsLink struct {
180 Href string `json:"href"`
181 Header map[string]string `json:"header,omitempty"`
182 ExpiresAt time.Time `json:"expires_at,omitempty"`
183 ExpiresIn int `json:"expires_in,omitempty"`
186 type lfsError struct {
187 Code int `json:"code,omitempty"`
188 Message string `json:"message"`
191 func writeLFSError(w http.ResponseWriter, code int, msg string) {
192 by, err := json.Marshal(&lfsError{Message: msg})
194 http.Error(w, "json encoding error: "+err.Error(), 500)
198 w.Header().Set("Content-Type", "application/vnd.git-lfs+json")
203 // handles any requests with "{name}.server.git/info/lfs" in the path
204 func lfsHandler(w http.ResponseWriter, r *http.Request, id string) {
205 repo, err := repoFromLfsUrl(r.URL.Path)
208 w.Write([]byte(err.Error()))
212 debug(id, "git lfs %s %s repo: %s", r.Method, r.URL, repo)
213 w.Header().Set("Content-Type", "application/vnd.git-lfs+json")
216 if strings.HasSuffix(r.URL.String(), "batch") {
217 lfsBatchHandler(w, r, id, repo)
219 locksHandler(w, r, repo)
222 lfsDeleteHandler(w, r, id, repo)
224 if strings.Contains(r.URL.String(), "/locks") {
225 locksHandler(w, r, repo)
228 w.Write([]byte("lock request"))
235 func lfsUrl(repo, oid string) string {
236 return server.URL + "/storage/" + oid + "?r=" + repo
240 retries = make(map[string]uint32)
244 func incrementRetriesFor(api, direction, repo, oid string, check bool) (after uint32, ok bool) {
245 // fmtStr formats a string like "<api>-<direction>-[check]-<retry>",
246 // i.e., "legacy-upload-check-retry", or "storage-download-retry".
249 fmtStr = "%s-%s-check-retry"
251 fmtStr = "%s-%s-retry"
254 if oidHandlers[oid] != fmt.Sprintf(fmtStr, api, direction) {
259 defer retriesMu.Unlock()
261 retryKey := strings.Join([]string{direction, repo, oid}, ":")
264 retries := retries[retryKey]
269 func lfsDeleteHandler(w http.ResponseWriter, r *http.Request, id, repo string) {
270 parts := strings.Split(r.URL.Path, "/")
271 oid := parts[len(parts)-1]
273 largeObjects.Delete(repo, oid)
274 debug(id, "DELETE:", oid)
278 type batchReq struct {
279 Transfers []string `json:"transfers"`
280 Operation string `json:"operation"`
281 Objects []lfsObject `json:"objects"`
282 Ref *Ref `json:"ref,omitempty"`
285 func (r *batchReq) RefName() string {
292 type batchResp struct {
293 Transfer string `json:"transfer,omitempty"`
294 Objects []lfsObject `json:"objects"`
297 func lfsBatchHandler(w http.ResponseWriter, r *http.Request, id, repo string) {
298 checkingObject := r.Header.Get("X-Check-Object") == "1"
299 if !checkingObject && repo == "batchunsupported" {
304 if !checkingObject && repo == "badbatch" {
309 if repo == "netrctest" {
310 user, pass, err := extractAuth(r.Header.Get("Authorization"))
311 if err != nil || (user != "netrcuser" || pass != "netrcpass") {
317 if missingRequiredCreds(w, r, repo) {
321 buf := &bytes.Buffer{}
322 tee := io.TeeReader(r.Body, buf)
324 err := json.NewDecoder(tee).Decode(objs)
325 io.Copy(ioutil.Discard, r.Body)
329 debug(id, buf.String())
335 if strings.HasSuffix(repo, "branch-required") {
336 parts := strings.Split(repo, "-")
337 lenParts := len(parts)
338 if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != objs.RefName() {
340 json.NewEncoder(w).Encode(struct {
341 Message string `json:"message"`
342 }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], objs.RefName())})
348 testingChunked := testingChunkedTransferEncoding(r)
349 testingTus := testingTusUploadInBatchReq(r)
350 testingTusInterrupt := testingTusUploadInterruptedInBatchReq(r)
351 testingCustomTransfer := testingCustomTransfer(r)
352 var transferChoice string
353 var searchForTransfer string
355 searchForTransfer = "tus"
356 } else if testingCustomTransfer {
357 searchForTransfer = "testcustom"
359 if len(searchForTransfer) > 0 {
360 for _, t := range objs.Transfers {
361 if t == searchForTransfer {
362 transferChoice = searchForTransfer
368 for _, obj := range objs.Objects {
369 handler := oidHandlers[obj.Oid]
370 action := objs.Operation
374 Actions: make(map[string]*lfsLink),
377 // Clobber the OID if told to do so.
378 if handler == "unknown-oid" {
379 o.Oid = "unknown-oid"
384 exists := largeObjects.Has(repo, obj.Oid)
386 if action == "download" {
388 o.Err = &lfsError{Code: 404, Message: fmt.Sprintf("Object %v does not exist", obj.Oid)}
393 // not an error but don't add an action
398 if handler == "object-authenticated" {
399 o.Authenticated = true
403 case "status-batch-403":
404 o.Err = &lfsError{Code: 403, Message: "welp"}
405 case "status-batch-404":
406 o.Err = &lfsError{Code: 404, Message: "welp"}
407 case "status-batch-410":
408 o.Err = &lfsError{Code: 410, Message: "welp"}
409 case "status-batch-422":
410 o.Err = &lfsError{Code: 422, Message: "welp"}
411 case "status-batch-500":
412 o.Err = &lfsError{Code: 500, Message: "welp"}
413 default: // regular 200 response
414 if handler == "return-invalid-size" {
418 if handler == "send-deprecated-links" {
419 o.Links = make(map[string]*lfsLink)
424 Href: lfsUrl(repo, obj.Oid),
425 Header: map[string]string{},
427 a = serveExpired(a, repo, handler)
429 if handler == "send-deprecated-links" {
432 o.Actions[action] = a
436 if handler == "send-verify-action" {
437 o.Actions["verify"] = &lfsLink{
438 Href: server.URL + "/verify",
439 Header: map[string]string{
446 if testingChunked && addAction {
447 if handler == "send-deprecated-links" {
448 o.Links[action].Header["Transfer-Encoding"] = "chunked"
450 o.Actions[action].Header["Transfer-Encoding"] = "chunked"
453 if testingTusInterrupt && addAction {
454 if handler == "send-deprecated-links" {
455 o.Links[action].Header["Lfs-Tus-Interrupt"] = "true"
457 o.Actions[action].Header["Lfs-Tus-Interrupt"] = "true"
464 ores := batchResp{Transfer: transferChoice, Objects: res}
466 by, err := json.Marshal(ores)
471 debug(id, "RESPONSE: 200")
472 debug(id, string(by))
478 // emu guards expiredRepos
481 // expiredRepos is a map keyed by repository name, valuing to whether or not it
482 // has yet served an expired object.
483 var expiredRepos = map[string]bool{}
485 // serveExpired marks the given repo as having served an expired object, making
486 // it unable for that same repository to return an expired object in the future,
487 func serveExpired(a *lfsLink, repo, handler string) *lfsLink {
489 dur = -5 * time.Minute
490 at = time.Now().Add(dur)
493 if handler == "return-expired-action-forever" ||
494 (handler == "return-expired-action" && canServeExpired(repo)) {
497 expiredRepos[repo] = true
505 case "expired-absolute":
507 case "expired-relative":
517 // canServeExpired returns whether or not a repository is capable of serving an
518 // expired object. In other words, canServeExpired returns whether or not the
519 // given repo has yet served an expired object.
520 func canServeExpired(repo string) bool {
524 return !expiredRepos[repo]
527 // Persistent state across requests
528 var batchResumeFailFallbackStorageAttempts = 0
529 var tusStorageAttempts = 0
533 verifyCounts = make(map[string]int)
534 verifyRetryRe = regexp.MustCompile(`verify-fail-(\d+)-times?$`)
537 func verifyHandler(w http.ResponseWriter, r *http.Request) {
538 repo := r.Header.Get("repo")
540 Oid string `json:"oid"`
541 Size int64 `json:"size"`
544 if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
545 writeLFSError(w, http.StatusUnprocessableEntity, err.Error())
550 if matches := verifyRetryRe.FindStringSubmatch(repo); len(matches) < 2 {
553 max, _ = strconv.Atoi(matches[1])
556 key := strings.Join([]string{repo, payload.Oid}, ":")
559 verifyCounts[key] = verifyCounts[key] + 1
560 count := verifyCounts[key]
564 writeLFSError(w, http.StatusServiceUnavailable, fmt.Sprintf(
565 "intentionally failing verify request %d (out of %d)", count, max,
571 // handles any /storage/{oid} requests
572 func storageHandler(w http.ResponseWriter, r *http.Request) {
578 repo := r.URL.Query().Get("r")
579 parts := strings.Split(r.URL.Path, "/")
580 oid := parts[len(parts)-1]
581 if missingRequiredCreds(w, r, repo) {
585 debug(id, "storage %s %s repo: %s", r.Method, oid, repo)
588 switch oidHandlers[oid] {
589 case "status-storage-403":
592 case "status-storage-404":
595 case "status-storage-410":
598 case "status-storage-422":
601 case "status-storage-500":
604 case "status-storage-503":
605 writeLFSError(w, 503, "LFS is temporarily unavailable")
607 case "object-authenticated":
608 if len(r.Header.Get("Authorization")) > 0 {
610 w.Write([]byte("Should not send authentication"))
613 case "storage-upload-retry":
614 if retries, ok := incrementRetriesFor("storage", "upload", repo, oid, false); ok && retries < 3 {
616 w.Write([]byte("malformed content"))
622 if testingChunkedTransferEncoding(r) {
624 for _, value := range r.TransferEncoding {
625 if value == "chunked" {
631 debug(id, "Chunked transfer encoding expected")
636 buf := &bytes.Buffer{}
638 io.Copy(io.MultiWriter(hash, buf), r.Body)
639 oid := hex.EncodeToString(hash.Sum(nil))
640 if !strings.HasSuffix(r.URL.Path, "/"+oid) {
645 largeObjects.Set(repo, oid, buf.Bytes())
648 parts := strings.Split(r.URL.Path, "/")
649 oid := parts[len(parts)-1]
654 if by, ok := largeObjects.Get(repo, oid); ok {
655 if len(by) == len("storage-download-retry") && string(by) == "storage-download-retry" {
656 if retries, ok := incrementRetriesFor("storage", "download", repo, oid, false); ok && retries < 3 {
658 by = []byte("malformed content")
660 } else if len(by) == len("status-batch-resume-206") && string(by) == "status-batch-resume-206" {
661 // Resume if header includes range, otherwise deliberately interrupt
662 if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
663 regex := regexp.MustCompile(`bytes=(\d+)\-.*`)
664 match := regex.FindStringSubmatch(rangeHdr)
665 if match != nil && len(match) > 1 {
667 resumeAt, _ = strconv.ParseInt(match[1], 10, 32)
668 w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by))))
673 } else if len(by) == len("batch-resume-fail-fallback") && string(by) == "batch-resume-fail-fallback" {
674 // Fail any Range: request even though we said we supported it
675 // To make sure client can fall back
676 if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
680 if batchResumeFailFallbackStorageAttempts == 0 {
681 // Truncate output on FIRST attempt to cause resume
682 // Second attempt (without range header) is fallback, complete successfully
684 batchResumeFailFallbackStorageAttempts++
687 w.WriteHeader(statusCode)
689 w.Write(by[0:byteLimit])
690 } else if resumeAt > 0 {
691 w.Write(by[resumeAt:])
701 if !validateTusHeaders(r, id) {
705 parts := strings.Split(r.URL.Path, "/")
706 oid := parts[len(parts)-1]
708 if by, ok := largeObjects.GetIncomplete(repo, oid); ok {
709 offset = int64(len(by))
711 w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10))
715 if !validateTusHeaders(r, id) {
719 parts := strings.Split(r.URL.Path, "/")
720 oid := parts[len(parts)-1]
722 offsetHdr := r.Header.Get("Upload-Offset")
723 offset, err := strconv.ParseInt(offsetHdr, 10, 64)
725 log.Fatal("Unable to parse Upload-Offset header in request: ", err)
730 buf := &bytes.Buffer{}
731 out := io.MultiWriter(hash, buf)
733 if by, ok := largeObjects.GetIncomplete(repo, oid); ok {
734 if offset != int64(len(by)) {
735 log.Fatal(fmt.Sprintf("Incorrect offset in request, got %d expected %d", offset, len(by)))
739 _, err := out.Write(by)
741 log.Fatal("Error reading incomplete bytes from store: ", err)
745 largeObjects.DeleteIncomplete(repo, oid)
746 debug(id, "Resuming upload of %v at byte %d", oid, offset)
749 // As a test, we intentionally break the upload from byte 0 by only
750 // reading some bytes the quitting & erroring, this forces a resume
751 // any offset > 0 will work ok
753 if r.Header.Get("Lfs-Tus-Interrupt") == "true" && offset == 0 {
754 chdr := r.Header.Get("Content-Length")
755 contentLen, err := strconv.ParseInt(chdr, 10, 64)
757 log.Fatal(fmt.Sprintf("Invalid Content-Length %q", chdr))
761 truncated := contentLen / 3
762 _, _ = io.CopyN(out, r.Body, truncated)
764 copyErr = fmt.Errorf("Simulated copy error")
766 _, copyErr = io.Copy(out, r.Body)
771 debug(id, "Incomplete upload of %v, %d bytes", oid, len(b))
772 largeObjects.SetIncomplete(repo, oid, b)
776 checkoid := hex.EncodeToString(hash.Sum(nil))
778 log.Fatal(fmt.Sprintf("Incorrect oid after calculation, got %q expected %q", checkoid, oid))
784 largeObjects.Set(repo, oid, b)
785 w.Header().Set("Upload-Offset", strconv.FormatInt(int64(len(b)), 10))
794 func validateTusHeaders(r *http.Request, id string) bool {
795 if len(r.Header.Get("Tus-Resumable")) == 0 {
796 debug(id, "Missing Tus-Resumable header in request")
802 func gitHandler(w http.ResponseWriter, r *http.Request) {
804 io.Copy(ioutil.Discard, r.Body)
808 cmd := exec.Command("git", "http-backend")
810 fmt.Sprintf("GIT_PROJECT_ROOT=%s", repoDir),
811 fmt.Sprintf("GIT_HTTP_EXPORT_ALL="),
812 fmt.Sprintf("PATH_INFO=%s", r.URL.Path),
813 fmt.Sprintf("QUERY_STRING=%s", r.URL.RawQuery),
814 fmt.Sprintf("REQUEST_METHOD=%s", r.Method),
815 fmt.Sprintf("CONTENT_TYPE=%s", r.Header.Get("Content-Type")),
818 buffer := &bytes.Buffer{}
821 cmd.Stderr = os.Stderr
823 if err := cmd.Run(); err != nil {
827 text := textproto.NewReader(bufio.NewReader(buffer))
829 code, _, _ := text.ReadCodeLine(-1)
835 headers, _ := text.ReadMIMEHeader()
837 for key, values := range headers {
838 for _, value := range values {
846 func redirect307Handler(w http.ResponseWriter, r *http.Request) {
852 // Send a redirect to info/lfs
853 // Make it either absolute or relative depending on subpath
854 parts := strings.Split(r.URL.Path, "/")
855 // first element is always blank since rooted
856 var redirectTo string
857 if parts[2] == "rel" {
858 redirectTo = "/" + strings.Join(parts[3:], "/")
859 } else if parts[2] == "abs" {
860 redirectTo = server.URL + "/" + strings.Join(parts[3:], "/")
862 debug(id, "Invalid URL for redirect: %v", r.URL)
866 w.Header().Set("Location", redirectTo)
871 Name string `json:"name"`
875 Id string `json:"id"`
876 Path string `json:"path"`
877 Owner User `json:"owner"`
878 LockedAt time.Time `json:"locked_at"`
881 type LockRequest struct {
882 Path string `json:"path"`
883 Ref *Ref `json:"ref,omitempty"`
886 func (r *LockRequest) RefName() string {
893 type LockResponse struct {
894 Lock *Lock `json:"lock"`
895 Message string `json:"message,omitempty"`
898 type UnlockRequest struct {
899 Force bool `json:"force"`
900 Ref *Ref `json:"ref,omitempty"`
903 func (r *UnlockRequest) RefName() string {
910 type UnlockResponse struct {
911 Lock *Lock `json:"lock"`
912 Message string `json:"message,omitempty"`
915 type LockList struct {
916 Locks []Lock `json:"locks"`
917 NextCursor string `json:"next_cursor,omitempty"`
918 Message string `json:"message,omitempty"`
922 Name string `json:"name,omitempty"`
925 type VerifiableLockRequest struct {
926 Ref *Ref `json:"ref,omitempty"`
927 Cursor string `json:"cursor,omitempty"`
928 Limit int `json:"limit,omitempty"`
931 func (r *VerifiableLockRequest) RefName() string {
938 type VerifiableLockList struct {
939 Ours []Lock `json:"ours"`
940 Theirs []Lock `json:"theirs"`
941 NextCursor string `json:"next_cursor,omitempty"`
942 Message string `json:"message,omitempty"`
947 repoLocks = map[string][]Lock{}
950 func addLocks(repo string, l ...Lock) {
953 repoLocks[repo] = append(repoLocks[repo], l...)
954 sort.Sort(LocksByCreatedAt(repoLocks[repo]))
957 func getLocks(repo string) []Lock {
961 locks := repoLocks[repo]
962 cp := make([]Lock, len(locks))
963 for i, l := range locks {
970 func getFilteredLocks(repo, path, cursor, limit string) ([]Lock, string, error) {
971 locks := getLocks(repo)
974 for i, l := range locks {
982 locks = locks[lastSeen:]
984 return nil, "", fmt.Errorf("cursor (%s) not found", cursor)
990 for _, l := range locks {
992 filtered = append(filtered, l)
1000 size, err := strconv.Atoi(limit)
1002 return nil, "", errors.New("unable to parse limit amount")
1005 size = int(math.Min(float64(len(locks)), 3))
1010 if size+1 < len(locks) {
1011 return locks[:size], locks[size+1].Id, nil
1015 return locks, "", nil
1018 func delLock(repo string, id string) *Lock {
1023 locks := make([]Lock, 0, len(repoLocks[repo]))
1024 for _, l := range repoLocks[repo] {
1029 locks = append(locks, l)
1031 repoLocks[repo] = locks
1035 type LocksByCreatedAt []Lock
1037 func (c LocksByCreatedAt) Len() int { return len(c) }
1038 func (c LocksByCreatedAt) Less(i, j int) bool { return c[i].LockedAt.Before(c[j].LockedAt) }
1039 func (c LocksByCreatedAt) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
1042 lockRe = regexp.MustCompile(`/locks/?$`)
1043 unlockRe = regexp.MustCompile(`locks/([^/]+)/unlock\z`)
1046 func locksHandler(w http.ResponseWriter, r *http.Request, repo string) {
1047 dec := json.NewDecoder(r.Body)
1048 enc := json.NewEncoder(w)
1052 if !lockRe.MatchString(r.URL.Path) {
1053 w.Header().Set("Content-Type", "application/json")
1054 w.WriteHeader(http.StatusNotFound)
1055 w.Write([]byte(`{"message":"unknown path: ` + r.URL.Path + `"}`))
1059 if err := r.ParseForm(); err != nil {
1060 http.Error(w, "could not parse form values", http.StatusInternalServerError)
1064 if strings.HasSuffix(repo, "branch-required") {
1065 parts := strings.Split(repo, "-")
1066 lenParts := len(parts)
1067 if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != r.FormValue("refspec") {
1070 Message string `json:"message"`
1071 }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], r.FormValue("refspec"))})
1077 w.Header().Set("Content-Type", "application/json")
1078 locks, nextCursor, err := getFilteredLocks(repo,
1079 r.FormValue("path"),
1080 r.FormValue("cursor"),
1081 r.FormValue("limit"))
1084 ll.Message = err.Error()
1087 ll.NextCursor = nextCursor
1093 w.Header().Set("Content-Type", "application/json")
1094 if strings.HasSuffix(r.URL.Path, "unlock") {
1096 if matches := unlockRe.FindStringSubmatch(r.URL.Path); len(matches) > 1 {
1100 if len(lockId) == 0 {
1101 enc.Encode(&UnlockResponse{Message: "Invalid lock"})
1104 unlockRequest := &UnlockRequest{}
1105 if err := dec.Decode(unlockRequest); err != nil {
1106 enc.Encode(&UnlockResponse{Message: err.Error()})
1110 if strings.HasSuffix(repo, "branch-required") {
1111 parts := strings.Split(repo, "-")
1112 lenParts := len(parts)
1113 if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != unlockRequest.RefName() {
1116 Message string `json:"message"`
1117 }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], unlockRequest.RefName())})
1122 if l := delLock(repo, lockId); l != nil {
1123 enc.Encode(&UnlockResponse{Lock: l})
1125 enc.Encode(&UnlockResponse{Message: "unable to find lock"})
1130 if strings.HasSuffix(r.URL.Path, "/locks/verify") {
1131 if strings.HasSuffix(repo, "verify-5xx") {
1135 if strings.HasSuffix(repo, "verify-501") {
1139 if strings.HasSuffix(repo, "verify-403") {
1145 case "pre_push_locks_verify_404":
1146 w.WriteHeader(http.StatusNotFound)
1147 w.Write([]byte(`{"message":"pre_push_locks_verify_404"}`))
1149 case "pre_push_locks_verify_410":
1150 w.WriteHeader(http.StatusGone)
1151 w.Write([]byte(`{"message":"pre_push_locks_verify_410"}`))
1155 reqBody := &VerifiableLockRequest{}
1156 if err := dec.Decode(reqBody); err != nil {
1157 w.WriteHeader(http.StatusBadRequest)
1159 Message string `json:"message"`
1160 }{"json decode error: " + err.Error()})
1164 if strings.HasSuffix(repo, "branch-required") {
1165 parts := strings.Split(repo, "-")
1166 lenParts := len(parts)
1167 if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != reqBody.RefName() {
1170 Message string `json:"message"`
1171 }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], reqBody.RefName())})
1176 ll := &VerifiableLockList{}
1177 locks, nextCursor, err := getFilteredLocks(repo, "",
1179 strconv.Itoa(reqBody.Limit))
1181 ll.Message = err.Error()
1183 ll.NextCursor = nextCursor
1185 for _, l := range locks {
1186 if strings.Contains(l.Path, "theirs") {
1187 ll.Theirs = append(ll.Theirs, l)
1189 ll.Ours = append(ll.Ours, l)
1198 if strings.HasSuffix(r.URL.Path, "/locks") {
1199 lockRequest := &LockRequest{}
1200 if err := dec.Decode(lockRequest); err != nil {
1201 enc.Encode(&LockResponse{Message: err.Error()})
1204 if strings.HasSuffix(repo, "branch-required") {
1205 parts := strings.Split(repo, "-")
1206 lenParts := len(parts)
1207 if lenParts > 3 && "refs/heads/"+parts[lenParts-3] != lockRequest.RefName() {
1210 Message string `json:"message"`
1211 }{fmt.Sprintf("Expected ref %q, got %q", "refs/heads/"+parts[lenParts-3], lockRequest.RefName())})
1216 for _, l := range getLocks(repo) {
1217 if l.Path == lockRequest.Path {
1218 enc.Encode(&LockResponse{Message: "lock already created"})
1227 Id: fmt.Sprintf("%x", id[:]),
1228 Path: lockRequest.Path,
1229 Owner: User{Name: "Git LFS Tests"},
1230 LockedAt: time.Now(),
1233 addLocks(repo, *lock)
1235 // TODO(taylor): commit_needed case
1236 // TODO(taylor): err case
1238 enc.Encode(&LockResponse{
1248 func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) bool {
1249 if !strings.HasPrefix(repo, "requirecreds") {
1253 auth := r.Header.Get("Authorization")
1254 user, pass, err := extractAuth(auth)
1256 writeLFSError(w, 403, err.Error())
1260 if user != "requirecreds" || pass != "pass" {
1261 writeLFSError(w, 403, fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass))
1268 func testingChunkedTransferEncoding(r *http.Request) bool {
1269 return strings.HasPrefix(r.URL.String(), "/test-chunked-transfer-encoding")
1272 func testingTusUploadInBatchReq(r *http.Request) bool {
1273 return strings.HasPrefix(r.URL.String(), "/test-tus-upload")
1275 func testingTusUploadInterruptedInBatchReq(r *http.Request) bool {
1276 return strings.HasPrefix(r.URL.String(), "/test-tus-upload-interrupt")
1278 func testingCustomTransfer(r *http.Request) bool {
1279 return strings.HasPrefix(r.URL.String(), "/test-custom-transfer")
1282 var lfsUrlRE = regexp.MustCompile(`\A/?([^/]+)/info/lfs`)
1284 func repoFromLfsUrl(urlpath string) (string, error) {
1285 matches := lfsUrlRE.FindStringSubmatch(urlpath)
1286 if len(matches) != 2 {
1287 return "", fmt.Errorf("LFS url '%s' does not match %v", urlpath, lfsUrlRE)
1291 if strings.HasSuffix(repo, ".git") {
1292 return repo[0 : len(repo)-4], nil
1297 type lfsStorage struct {
1298 objects map[string]map[string][]byte
1299 incomplete map[string]map[string][]byte
1303 func (s *lfsStorage) Get(repo, oid string) ([]byte, bool) {
1305 defer s.mutex.Unlock()
1306 repoObjects, ok := s.objects[repo]
1311 by, ok := repoObjects[oid]
1315 func (s *lfsStorage) Has(repo, oid string) bool {
1317 defer s.mutex.Unlock()
1318 repoObjects, ok := s.objects[repo]
1323 _, ok = repoObjects[oid]
1327 func (s *lfsStorage) Set(repo, oid string, by []byte) {
1329 defer s.mutex.Unlock()
1330 repoObjects, ok := s.objects[repo]
1332 repoObjects = make(map[string][]byte)
1333 s.objects[repo] = repoObjects
1335 repoObjects[oid] = by
1338 func (s *lfsStorage) Delete(repo, oid string) {
1340 defer s.mutex.Unlock()
1341 repoObjects, ok := s.objects[repo]
1343 delete(repoObjects, oid)
1347 func (s *lfsStorage) GetIncomplete(repo, oid string) ([]byte, bool) {
1349 defer s.mutex.Unlock()
1350 repoObjects, ok := s.incomplete[repo]
1355 by, ok := repoObjects[oid]
1359 func (s *lfsStorage) SetIncomplete(repo, oid string, by []byte) {
1361 defer s.mutex.Unlock()
1362 repoObjects, ok := s.incomplete[repo]
1364 repoObjects = make(map[string][]byte)
1365 s.incomplete[repo] = repoObjects
1367 repoObjects[oid] = by
1370 func (s *lfsStorage) DeleteIncomplete(repo, oid string) {
1372 defer s.mutex.Unlock()
1373 repoObjects, ok := s.incomplete[repo]
1375 delete(repoObjects, oid)
1379 func newLfsStorage() *lfsStorage {
1381 objects: make(map[string]map[string][]byte),
1382 incomplete: make(map[string]map[string][]byte),
1383 mutex: &sync.Mutex{},
1387 func extractAuth(auth string) (string, string, error) {
1388 if strings.HasPrefix(auth, "Basic ") {
1389 decodeBy, err := base64.StdEncoding.DecodeString(auth[6:len(auth)])
1390 decoded := string(decodeBy)
1396 parts := strings.SplitN(decoded, ":", 2)
1397 if len(parts) == 2 {
1398 return parts[0], parts[1], nil
1406 func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string, ntlmSession ntlm.ServerSession) bool {
1407 auth := r.Header.Get("Authorization")
1408 if strings.Contains(r.URL.Path, "ntlm") {
1417 user, pass, err := extractAuth(auth)
1420 debug(id, "Error decoding auth: %s", err)
1429 case "netrcuser", "requirecreds":
1432 if strings.HasPrefix(r.URL.Path, "/"+pass) {
1435 debug(id, "auth attempt against: %q", r.URL.Path)
1439 debug(id, "Bad auth: %q", auth)
1443 func handleNTLM(w http.ResponseWriter, r *http.Request, authHeader string, session ntlm.ServerSession) {
1444 if strings.HasPrefix(strings.ToUpper(authHeader), "BASIC ") {
1450 w.Header().Set("Www-Authenticate", "ntlm")
1453 // ntlmNegotiateMessage from httputil pkg
1454 case "NTLM TlRMTVNTUAABAAAAB7IIogwADAAzAAAACwALACgAAAAKAAAoAAAAD1dJTExISS1NQUlOTk9SVEhBTUVSSUNB":
1455 ch, err := session.GenerateChallengeMessage()
1457 writeLFSError(w, 500, err.Error())
1461 chMsg := base64.StdEncoding.EncodeToString(ch.Bytes())
1462 w.Header().Set("Www-Authenticate", "ntlm "+chMsg)
1466 if !strings.HasPrefix(strings.ToUpper(authHeader), "NTLM ") {
1467 writeLFSError(w, 500, "bad authorization header: "+authHeader)
1471 auth := authHeader[5:] // strip "ntlm " prefix
1472 val, err := base64.StdEncoding.DecodeString(auth)
1474 writeLFSError(w, 500, "base64 decode error: "+err.Error())
1478 _, err = ntlm.ParseAuthenticateMessage(val, 2)
1480 writeLFSError(w, 500, "auth parse error: "+err.Error())
1487 oidHandlers = make(map[string]string)
1488 for _, content := range contentHandlers {
1490 h.Write([]byte(content))
1491 oidHandlers[hex.EncodeToString(h.Sum(nil))] = content
1495 func debug(reqid, msg string, args ...interface{}) {
1496 fullargs := make([]interface{}, len(args)+1)
1498 for i, a := range args {
1501 log.Printf("[%s] "+msg+"\n", fullargs...)
1504 func reqId(w http.ResponseWriter) (string, bool) {
1505 b := make([]byte, 16)
1506 _, err := rand.Read(b)
1508 http.Error(w, "error generating id: "+err.Error(), 500)
1511 return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]), true
1514 // https://ericchiang.github.io/post/go-tls/
1515 func generateCARootCertificates() (rootKey *rsa.PrivateKey, rootCert *x509.Certificate) {
1517 // generate a new key-pair
1518 rootKey, err := rsa.GenerateKey(rand.Reader, 2048)
1520 log.Fatalf("generating random key: %v", err)
1523 rootCertTmpl, err := CertTemplate()
1525 log.Fatalf("creating cert template: %v", err)
1527 // describe what the certificate will be used for
1528 rootCertTmpl.IsCA = true
1529 rootCertTmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature
1530 rootCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}
1531 // rootCertTmpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")}
1533 rootCert, _, err = CreateCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey)
1538 func generateClientCertificates(rootCert *x509.Certificate, rootKey interface{}) (clientKey *rsa.PrivateKey, clientCertPEM []byte, clientKeyPEM []byte) {
1540 // create a key-pair for the client
1541 clientKey, err := rsa.GenerateKey(rand.Reader, 2048)
1543 log.Fatalf("generating random key: %v", err)
1546 // create a template for the client
1547 clientCertTmpl, err1 := CertTemplate()
1549 log.Fatalf("creating cert template: %v", err1)
1551 clientCertTmpl.KeyUsage = x509.KeyUsageDigitalSignature
1552 clientCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
1554 // the root cert signs the cert by again providing its private key
1555 _, clientCertPEM, err2 := CreateCert(clientCertTmpl, rootCert, &clientKey.PublicKey, rootKey)
1557 log.Fatalf("error creating cert: %v", err2)
1560 // encode and load the cert and private key for the client
1561 clientKeyPEM = pem.EncodeToMemory(&pem.Block{
1562 Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey),
1568 // helper function to create a cert template with a serial number and other required fields
1569 func CertTemplate() (*x509.Certificate, error) {
1570 // generate a random serial number (a real cert authority would have some logic behind this)
1571 serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
1572 serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
1574 return nil, errors.New("failed to generate serial number: " + err.Error())
1577 tmpl := x509.Certificate{
1578 SerialNumber: serialNumber,
1579 Subject: pkix.Name{Organization: []string{"Yhat, Inc."}},
1580 SignatureAlgorithm: x509.SHA256WithRSA,
1581 NotBefore: time.Now(),
1582 NotAfter: time.Now().Add(time.Hour), // valid for an hour
1583 BasicConstraintsValid: true,
1588 func CreateCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) (
1589 cert *x509.Certificate, certPEM []byte, err error) {
1591 certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv)
1595 // parse the resulting certificate so we can use it again
1596 cert, err = x509.ParseCertificate(certDER)
1600 // PEM encode the certificate (this is a standard TLS encoding)
1601 b := pem.Block{Type: "CERTIFICATE", Bytes: certDER}
1602 certPEM = pem.EncodeToMemory(&b)