+++ /dev/null
-# This is a comment.\r
-# Each line is a file pattern followed by one or more owners.\r
-# For more details, visit https://help.github.com/articles/about-codeowners/\r
-# The lists of people and groups are for github.com\r
-\r
-# All members are supposed to review each other\r
-* @myeong-jeong @jyong2-kim @wansu-yoo @jaehoon-hyun @chacha @h-w-park @daeken-kwon @damon92-lee
\ No newline at end of file
vendor/src
doc/generate-api/*
3rdparty/
-vendor/
# Test binary, build with `go test -c`
*.test
+++ /dev/null
-../vendor
\ No newline at end of file
BINARY_FILE := edge-orchestration
EXEC_SRC_DIR := GoMain
OBJ_SRC_DIR := interface
-PKG_DIRS := devicemgr discoverymgr interface restapi/v1 servicemgr
+PKG_DIRS := devicemgr discoverymgr interface restapi/v1 servicemgr scoringmgr orchestrationapi configuremgr
# CMain target
ORG_HEADER_FILE_C := liborchestration.h
## edge-orchestration binary build
build-binary:
$(GOBUILD) -a $(GO_LDFLAGS) -o $(BIN_DIR)/$(BINARY_FILE) $(EXEC_SRC_DIR) || exit 1
- ls -al $(BIN_DIR)
-
- $(MAKE) -C $(CMAIN_DIR)
+ ls -al $(BIN_DIR)
## edge-orchestration shared object build
build-object:
CGO_ENABLED=1 $(GOBUILD) -o $(CMAIN_LIB_DIR)/$(OBJECT_FILE_C) -buildmode=c-archive $(OBJ_SRC_DIR) || exit 1
mv $(CMAIN_LIB_DIR)/$(ORG_HEADER_FILE_C) $(CMAIN_INC_DIR)/$(HEADER_FILE_C)
+ $(MAKE) -C $(CMAIN_DIR)
+
## go test and coverage
test:
$(GOCOVER) test $(PKG_DIRS) > coverage.out
[Unit]
Description=edge orchestration
-After=connman.service
+After=dbus.socket
+Requires=dbus.socket
[Service]
SmackProcessLabel=System
Release: 0
Group: System/Edge Framework
License: Apache-2.0
-Summary: edge orchestration
+Summary: Edge orchestration service
ExclusiveArch: armv7l
-Source0: %{name}-%{version}.tar.gz
-Source1: %{name}.manifest
-Source2: %{name}.service
+Source0: %{name}-%{version}.tar.gz
+Source1: %{name}.manifest
+Source2: %{name}.service
+Source3: lib%{name}.manifest
+Source11: go1.12.linux-armv7.tar.gz
-Requires(post): /sbin/ldconfig, /usr/bin/systemctl
-Requires(postun): /sbin/ldconfig, /usr/bin/systemctl
+BuildRequires: pkgconfig(glib-2.0)
+BuildRequires: pkgconfig(gio-2.0)
-BuildRequires: go
+Requires(post): /sbin/ldconfig, /usr/bin/systemctl
+Requires(postun): /sbin/ldconfig, /usr/bin/systemctl
%description
Technologies for Device/Service management.
Edge service deployment/monitoring and data sharing in home environment with limited available H/W resources.
+%package -n libedge-orchestration
+Summary: edge-orchestration library
+Group: System/Edge Framework
+Requires: %{name} = %{version}-%{release}
+
+%description -n libedge-orchestration
+Edge orchestration library for use of Edge-orchestration
+
+%package -n libedge-orchestration-devel
+Summary: edge-orchestration library for (devel)
+Group: System/Edge Framework
+Requires: %{name} = %{version}-%{release}
+
+%description -n libedge-orchestration-devel
+Edge orchestration library for use of Edge-orchestration (devel)
+
+
%prep
%setup -q
-
chmod g-w %_sourcedir/*
cp %{SOURCE1} ./%{name}.manifest
cp %{SOURCE2} ./%{name}.services
+cp %{SOURCE3} ./lib%{name}.manifest
+%ifarch armv7l
+cp %{SOURCE11} ./
+tar -zxf %{SOURCE11}
+rm -f %{_builddir}/%{name}-%{version}/go1.12.linux-armv7.tar.gz
+%endif
%build
ORG_VENDOR_DIR='vendor'
ln -s %{_builddir}/%{name}-%{version}/${ORG_VENDOR_DIR} %{_builddir}/%{name}-%{version}/${CUR_VENDOR_DIR}
export GOARCH=arm GOARM=7
-export GOROOT=/usr/local/go
-export PATH=$PATH:/usr/local/go/bin/linux_arm
+export GOROOT=%{_builddir}/%{name}-%{version}/go
+export PATH=$PATH:%{_builddir}/%{name}-%{version}/go/bin/linux_arm
export GOPATH=%{_builddir}/%{name}-%{version}:%{_builddir}/%{name}-%{version}/vendor
-go build -a -v -ldflags '-extldflags "-static"' -o ./bin/%{name} orchestration
+export BASE_DIR=.
-###ls -al $RPM_BUILD_ROOT%{_sysconfdir}/%{name}
+make build-object %{?_smp_mflags}
%install
rm -rf %{buildroot}
-install -d $RPM_BUILD_ROOT%{_bindir}
-install -p -m 755 ./bin/%{name} $RPM_BUILD_ROOT%{_bindir}/%{name}
+install -d %{buildroot}%{_bindir}
+install -p -m 755 ./bin/%{name} %{buildroot}%{_bindir}/%{name}
mkdir -p %{buildroot}/usr/lib/systemd/system/multi-user.target.wants
-install -m 0644 %SOURCE2 %{buildroot}/usr/lib/systemd/system/%{name}.service
+install -m 0644 %{SOURCE2} %{buildroot}/usr/lib/systemd/system/%{name}.service
ln -s ../%{name}.service %{buildroot}/usr/lib/systemd/system/multi-user.target.wants/
-mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/%{name}
+mkdir -p %{buildroot}%{_sysconfdir}/%{name}
%files
%manifest %{name}.manifest
--- /dev/null
+<manifest>
+ <request>
+ <domain name="_"/>
+ </request>
+</manifest>
+
package main
import (
- "devicemgr"
- "discoverymgr"
+ _ "devicemgr"
+ _ "discoverymgr"
"log"
- "net/http"
- restapi "restapi/v1"
- "servicemgr"
+ _ "log"
+ _ "net/http"
+
+ // restapi "restapi/v1"
+ _ "servicemgr"
)
func main() {
log.Printf("[%s] Server started", logPrefix)
- devicemgr.InitDeviceMgr()
- servicemgr.InitServiceMap()
- discoverymgr.InitDiscovery()
- router := restapi.NewRouter()
+ // devicemgr.InitDeviceMgr()
+ // servicemgr.InitServiceMap()
+ // discoverymgr.InitDiscovery()
+ // router := restapi.NewRouter()
- log.Fatal(http.ListenAndServe(":9090", router))
+ // log.Fatal(http.ListenAndServe(":9090", router))
}
package orchestrationapi
import (
- "log"
- "os"
+ "log"
+ "os"
)
-var ILog *log.Logger = log.New(os.Stdout, "[orchestrationapi] INFO : ", log.LstdFlags)
-var ELog *log.Logger = log.New(os.Stdout, "[orchestrationapi] ERROR : ", log.LstdFlags)
-var DLog *log.Logger = log.New(os.Stdout, "[orchestrationapi] DEBUG : ", log.LstdFlags)
\ No newline at end of file
+var (
+ // ILog inforamton level log
+ ILog = log.New(os.Stdout, "[scoringmgr] INFO : ", log.LstdFlags)
+
+ // ELog Error level log
+ ELog = log.New(os.Stdout, "[scoringmgr] ERROR : ", log.LstdFlags)
+
+ // DLog Debug level log
+ DLog = log.New(os.Stdout, "[scoringmgr] DEBUG : ", log.LstdFlags)
+)
package orchestrationapi
-
import (
- "testing"
- "time"
- "os/exec"
- "fmt"
+ "fmt"
+ "os/exec"
+ "testing"
+ "time"
- configuremgr "configuremgr"
- confdescription "configuremgr/description"
+ configuremgr "configuremgr"
+ confdescription "configuremgr/description"
- // mockconfiguremgr "configuremgr/mock"
+ // mockconfiguremgr "configuremgr/mock"
- scoringmgr "scoringmgr"
- mockscoringmgr "scoringmgr/mock"
-
- // discoverymgr "discoverymgr"
- servicemgr "servicemgr"
+ scoringmgr "scoringmgr"
+ mockscoringmgr "scoringmgr/mock"
+ // discoverymgr "discoverymgr"
+ servicemgr "servicemgr"
)
-
//jaehoon.hyun , jaehoon.hyun
-func TestConfigureMgrScoringMgr(t *testing.T){
+func TestConfigureMgrScoringMgr(t *testing.T) {
- //make orche
- orcheMock := new(Orche)
+ //make orche
+ orcheMock := new(Orche)
- orcheMock.IScoringmgr = scoringmgr.Init()
- orcheMock.IConfiguremgr = configuremgr.Init()
+ orcheMock.IScoringmgr = scoringmgr.Init()
+ orcheMock.IConfiguremgr = configuremgr.Init()
- orcheMock.IScoringmgr.IRunningScore = mockscoringmgr.LoadScoringAddInterface
- orcheMock.IScoringmgr.IGetScore = mockscoringmgr.GetScoreRandom100Mock
- orcheMock.IScoringmgr.Ch = make(chan interface{}, 1024)
+ orcheMock.IScoringmgr.IRunningScore = mockscoringmgr.LoadScoringAddInterface
+ orcheMock.IScoringmgr.IGetScore = mockscoringmgr.GetScoreRandom100Mock
+ orcheMock.IScoringmgr.Ch = make(chan interface{}, 1024)
- orcheMock.IConfiguremgr.IDiscoveryMgr.PushConfPath = pushConfPathDiscoveryDeviceMock
- orcheMock.IConfiguremgr.IScoringMgr.PushLibPath = scoringmgr.PushLibPath
- orcheMock.IConfiguremgr.IScoringMgr.Ch = orcheMock.IScoringmgr.Ch
+ orcheMock.IConfiguremgr.IDiscoveryMgr.PushConfPath = pushConfPathDiscoveryDeviceMock
+ orcheMock.IConfiguremgr.IScoringMgr.PushLibPath = scoringmgr.PushLibPath
+ orcheMock.IConfiguremgr.IScoringMgr.Ch = orcheMock.IScoringmgr.Ch
- orcheMock.IDiscoverymgr.GetEndpointDevices = getEndpointDevicesMock
- orcheMock.IServicemgr.ExecuteApp = executeAppMock
+ orcheMock.IDiscoverymgr.GetEndpointDevices = getEndpointDevicesMock
+ orcheMock.IServicemgr.ExecuteApp = executeAppMock
+ //start module function
+ orcheMock.IScoringmgr.Listening()
+ go orcheMock.IConfiguremgr.Watch("/tmp/foo")
- //start module function
- orcheMock.IScoringmgr.Listening()
- go orcheMock.IConfiguremgr.Watch("/tmp/foo")
-
- //init scenario
- execCommand("rm -rf /tmp/foo/mysum")
- time.Sleep(time.Duration(1) * time.Second)
+ //init scenario
+ execCommand("rm -rf /tmp/foo/mysum")
+ time.Sleep(time.Duration(1) * time.Second)
- //user scenario
- execCommand(fmt.Sprintf("cp -ar %s %s", "./mock/mysum/", "/tmp/foo"))
- time.Sleep(time.Duration(3) * time.Second)
+ //user scenario
+ execCommand(fmt.Sprintf("cp -ar %s %s", "./mock/mysum/", "/tmp/foo"))
+ time.Sleep(time.Duration(3) * time.Second)
- //resource release
- orcheMock.IScoringmgr.RemoveLib("mysum")
- orcheMock.IConfiguremgr.Done <- true
+ //resource release
+ orcheMock.IScoringmgr.RemoveLib("mysum")
+ orcheMock.IConfiguremgr.Done <- true
}
}
//jaehoon.hyun, chacha
-func TestRequestServiceFunctionServiceMgr(t *testing.T){
-
- orcheMock := Init("")
+func TestRequestServiceFunctionServiceMgr(t *testing.T) {
- orcheMock.IScoringmgr = scoringmgr.Init()
- orcheMock.IConfiguremgr = configuremgr.Init()
+ orcheMock := Init("")
- orcheMock.IScoringmgr.IRunningScore = mockscoringmgr.LoadScoringAddInterface
- orcheMock.IScoringmgr.IGetScore = mockscoringmgr.GetScoreRandom100Mock
- orcheMock.IScoringmgr.Ch = make(chan interface{}, 1024)
+ orcheMock.IScoringmgr = scoringmgr.Init()
+ orcheMock.IConfiguremgr = configuremgr.Init()
- orcheMock.IConfiguremgr.IDiscoveryMgr.PushConfPath = pushConfPathDiscoveryDeviceMock
- orcheMock.IConfiguremgr.IScoringMgr.PushLibPath = scoringmgr.PushLibPath
- orcheMock.IConfiguremgr.IScoringMgr.Ch = orcheMock.IScoringmgr.Ch
+ orcheMock.IScoringmgr.IRunningScore = mockscoringmgr.LoadScoringAddInterface
+ orcheMock.IScoringmgr.IGetScore = mockscoringmgr.GetScoreRandom100Mock
+ orcheMock.IScoringmgr.Ch = make(chan interface{}, 1024)
- orcheMock.IDiscoverymgr.GetEndpointDevices = getEndpointDevicesMock
- orcheMock.IServicemgr.ExecuteApp = servicemgr.ExecuteApp
+ orcheMock.IConfiguremgr.IDiscoveryMgr.PushConfPath = pushConfPathDiscoveryDeviceMock
+ orcheMock.IConfiguremgr.IScoringMgr.PushLibPath = scoringmgr.PushLibPath
+ orcheMock.IConfiguremgr.IScoringMgr.Ch = orcheMock.IScoringmgr.Ch
+ orcheMock.IDiscoverymgr.GetEndpointDevices = getEndpointDevicesMock
+ orcheMock.IServicemgr.ExecuteApp = servicemgr.ExecuteApp
- //scoringmgr init
- orcheMock.IScoringmgr.Listening()
+ //scoringmgr init
+ orcheMock.IScoringmgr.Listening()
- //configuremgr init
- go orcheMock.IConfiguremgr.Watch("/tmp/foo")
+ //configuremgr init
+ go orcheMock.IConfiguremgr.Watch("/tmp/foo")
- //servicemgr init
- servicemgr.InitServiceMap()
+ //servicemgr init
+ servicemgr.Init()
-
- //init scenario
- execCommand("rm -rf /tmp/foo/mysum")
- time.Sleep(time.Duration(1) * time.Second)
+ //init scenario
+ execCommand("rm -rf /tmp/foo/mysum")
+ time.Sleep(time.Duration(1) * time.Second)
- //user scenario
- execCommand(fmt.Sprintf("cp -ar %s %s", "./mock/mysum/", "/tmp/foo"))
- time.Sleep(time.Duration(3) * time.Second)
- RequestService("mysum", "ls", []string{"-al"})
+ //user scenario
+ execCommand(fmt.Sprintf("cp -ar %s %s", "./mock/mysum/", "/tmp/foo"))
+ time.Sleep(time.Duration(3) * time.Second)
+ RequestService("mysum", "ls", []string{"-al"})
+ //resource release
+ time.Sleep(time.Duration(1) * time.Second)
+ orcheMock.IScoringmgr.RemoveLib("mysum")
+ orcheMock.IConfiguremgr.Done <- true
- //resource release
- time.Sleep(time.Duration(1) * time.Second)
- orcheMock.IScoringmgr.RemoveLib("mysum")
- orcheMock.IConfiguremgr.Done <- true
-
- for {}
+ // for {
+ // }
}
//daemon92-lee, chacha
-func TestDiscoveryMgrServiceMgr(t *testing.T){
-
+func TestDiscoveryMgrServiceMgr(t *testing.T) {
}
//jaehoon.hyun, daemon92-lee, jaehoon.hyun
-func TestConfigureMgrDiscoveryMgrScoringMgr(t *testing.T){
+func TestConfigureMgrDiscoveryMgrScoringMgr(t *testing.T) {
}
//jaehoon.hyun, daemon92-lee, chacha
-func TestConfigureMgrDiscoveryMgrServiceMgr(t *testing.T){
-
+func TestConfigureMgrDiscoveryMgrServiceMgr(t *testing.T) {
}
//jaehoon.hyun, chacha, jaehoon.hyun
-func TestConfigureMgrServiceMgrScoringMgr(t *testing.T){
+func TestConfigureMgrServiceMgrScoringMgr(t *testing.T) {
}
//jaehoon.hyun, daemon92-lee, chacha, jaehoon.hyun
-func TestConfigureMgrDiscoveryMgrScoringMgrServiceMgr(t *testing.T){
-
+func TestConfigureMgrDiscoveryMgrScoringMgrServiceMgr(t *testing.T) {
}
-
-func pushConfPathDiscoveryDeviceMock (doc *confdescription.Doc) (err error) {
- ILog.Println("pushConfPathDiscoveryDeviceMock")
- ILog.Println(*doc)
- return
+func pushConfPathDiscoveryDeviceMock(doc *confdescription.Doc) (err error) {
+ ILog.Println("pushConfPathDiscoveryDeviceMock")
+ ILog.Println(*doc)
+ return
}
func pushLibPathScoringAppMock(libPath string, doc *confdescription.Doc, handlersCh chan<- interface{}) (err error) {
- ILog.Println("pushLibPathScoringAppMock")
- ILog.Println(libPath)
- return
+ ILog.Println("pushLibPathScoringAppMock")
+ ILog.Println(libPath)
+ return
}
func getEndpointDevicesMock(serviceName string) []string {
- DLog.Printf("getEndpointDevicesMock serviceName: %s\n", serviceName)
- return []string{"localhost", "localhost"}
+ DLog.Printf("getEndpointDevicesMock serviceName: %s\n", serviceName)
+ return []string{"localhost", "localhost"}
}
func executeAppMock(target string, name string, args []string, notiChan chan string) (serviceID uint64, err error) {
- ILog.Println("ExecuteApp")
- ILog.Println(target)
- ILog.Println(name)
- ILog.Println(args)
- return
+ ILog.Println("ExecuteApp")
+ ILog.Println(target)
+ ILog.Println(name)
+ ILog.Println(args)
+ return
}
-
func execCommand(command string) {
- configuremgr.DLog.Println(command)
- cmd := exec.Command("sh", "-c", command)
- stdoutStderr, err := cmd.CombinedOutput()
- configuremgr.DLog.Printf("%s", stdoutStderr)
- if err != nil {
- configuremgr.ELog.Fatal(err)
- }
-}
\ No newline at end of file
+ configuremgr.DLog.Println(command)
+ cmd := exec.Command("sh", "-c", command)
+ stdoutStderr, err := cmd.CombinedOutput()
+ configuremgr.DLog.Printf("%s", stdoutStderr)
+ if err != nil {
+ configuremgr.ELog.Fatal(err)
+ }
+}
package orchestrationapi
-
import (
-
- "sync/atomic"
- "sort"
- // "time"
- // "os/exec"
-
- configuremgr "configuremgr"
- scoringmgr "scoringmgr"
-
- // discoverymgr "discoverymgr"
- // servicemgr "servicemgr"
+ "sort"
+ "sync/atomic"
+ // "time"
+ // "os/exec"
+ configuremgr "configuremgr"
+ scoringmgr "scoringmgr"
+ // discoverymgr "discoverymgr"
+ // servicemgr "servicemgr"
)
type Orche struct {
-
- IScoringmgr *scoringmgr.Handlers
- IConfiguremgr *configuremgr.ConfigureMgr
+ IScoringmgr *scoringmgr.Handlers
+ IConfiguremgr *configuremgr.ConfigureMgr
- IDiscoverymgr struct {
- GetEndpointDevices func(serviceName string) []string
- }
-
- IServicemgr struct {
- ExecuteApp func(target string, name string, args []string, notiChan chan string) (serviceID uint64, err error)
- }
+ IDiscoverymgr struct {
+ GetEndpointDevices func(serviceName string) []string
+ }
+ IServicemgr struct {
+ ExecuteApp func(target string, name string, args []string, notiChan chan string) (serviceID uint64, err error)
+ }
}
var orcheEngine *Orche
-
-type deviceScore struct{
- endpoint string
- score float64
+type deviceScore struct {
+ endpoint string
+ score float64
}
-
type orcheClient struct {
-
- libName string
- serviceName string
- args []string
- notiChan chan string
+ libName string
+ serviceName string
+ args []string
+ notiChan chan string
}
-var orchClientId int32
-var orcheClients = [1024]orcheClient{}
+var (
+ orchClientID int32
+ orcheClients = [1024]orcheClient{}
+)
func Init(settingPath string) *Orche {
- orcheEngine = new(Orche)
-
- return orcheEngine
+ orcheEngine = new(Orche)
+ return orcheEngine
}
-func RequestService(libName string, serviceName string, args []string) (handle int){
+func RequestService(libName string, serviceName string, args []string) (handle int) {
- clientId := atomic.LoadInt32(&orchClientId)
- atomic.AddInt32(&orchClientId, 1)
+ clientID := atomic.LoadInt32(&orchClientID)
+ atomic.AddInt32(&orchClientID, 1)
- serviceClient := addServiceClient(clientId, libName, serviceName, args)
- go serviceClient.listenNotify()
- endpoints := getEndpointDevices(serviceName)
- deviceScores := sortByScore(gatheringDevicesScore(endpoints, libName))
- executeApp(deviceScores[0].endpoint, serviceName, args, serviceClient.notiChan)
+ serviceClient := addServiceClient(clientID, libName, serviceName, args)
+ go serviceClient.listenNotify()
+ endpoints := getEndpointDevices(serviceName)
+ deviceScores := sortByScore(gatheringDevicesScore(endpoints, libName))
+ executeApp(deviceScores[0].endpoint, serviceName, args, serviceClient.notiChan)
- ILog.Println(deviceScores)
- return
+ ILog.Println(deviceScores)
+ return
}
-
func Close(handle uint64) {
}
-
-func (client *orcheClient) listenNotify(){
- select {
- case str := <-client.notiChan:
- ILog.Printf("service status changed [path:%s][serviceName:%s][status:%s]\n", client.libName, client.serviceName, str)
+func (client *orcheClient) listenNotify() {
+ select {
+ case str := <-client.notiChan:
+ ILog.Printf("service status changed [path:%s][serviceName:%s][status:%s]\n", client.libName, client.serviceName, str)
}
}
+func addServiceClient(clientID int32, libName string, serviceName string, args []string) (client *orcheClient) {
+ orcheClients[clientID].libName = libName
+ orcheClients[clientID].args = args
+ orcheClients[clientID].serviceName = serviceName
-func addServiceClient(clientId int32, libName string, serviceName string, args []string) (client *orcheClient){
- orcheClients[clientId].libName = libName
- orcheClients[clientId].args = args
- orcheClients[clientId].serviceName = serviceName
-
- client = &orcheClients[clientId]
- return
+ client = &orcheClients[clientID]
+ return
}
-func getEndpointDevices(serviceName string) []string{
- return orcheEngine.IDiscoverymgr.GetEndpointDevices(serviceName)
+func getEndpointDevices(serviceName string) []string {
+ return orcheEngine.IDiscoverymgr.GetEndpointDevices(serviceName)
}
-func gatheringDevicesScore(endpoints []string, libName string) (deviceScores []deviceScore){
+func gatheringDevicesScore(endpoints []string, libName string) (deviceScores []deviceScore) {
+
+ for _, endpoint := range endpoints {
- for _ , endpoint := range endpoints {
-
- //(chacha)TODO : err occured , notify devicemgr to delete
- score, _ := orcheEngine.IScoringmgr.IGetScore(endpoint, libName)
+ //(chacha)TODO : err occured , notify devicemgr to delete
+ score, _ := orcheEngine.IScoringmgr.IGetScore(endpoint, libName)
- deviceScores = append(deviceScores, deviceScore{endpoint, score})
- }
+ deviceScores = append(deviceScores, deviceScore{endpoint, score})
+ }
- return
+ return
}
-func sortByScore(deviceScores []deviceScore) ( []deviceScore ){
- sort.Slice(deviceScores, func(i, j int) bool {
+func sortByScore(deviceScores []deviceScore) []deviceScore {
+ sort.Slice(deviceScores, func(i, j int) bool {
return deviceScores[i].score > deviceScores[j].score
})
- return deviceScores
+ return deviceScores
}
func executeApp(endpoint string, serviceName string, args []string, notiChan chan string) {
- orcheEngine.IServicemgr.ExecuteApp(endpoint, serviceName, args, notiChan)
-}
\ No newline at end of file
+ orcheEngine.IServicemgr.ExecuteApp(endpoint, serviceName, args, notiChan)
+}
--- /dev/null
+package httpclient
+
+const (
+ // ConstWellknownPort is wellknonw port
+ ConstWellknownPort = 56001
+
+ // ConstLocalTarget is for knowing local environments
+ ConstLocalTarget = "localhost"
+
+ // ConstPrefixHTTP is "http://"
+ ConstPrefixHTTP = "http://"
+)
"net"
"net/http"
"os/exec"
+ "restapi/httpclient"
"scoringmgr"
"servicemgr"
"strconv"
libName := vars["libname"]
name := libName
- target := scoringmgr.ConstLocalTarget
+ target := httpclient.ConstLocalTarget
scoreValue, err := scoringmgr.GetScore(target, name)
package scoringmgr
import (
- "log"
- "os"
+ "log"
+ "os"
)
-var ILog *log.Logger = log.New(os.Stdout, "[scoringmgr] INFO : ", log.LstdFlags)
-var ELog *log.Logger = log.New(os.Stdout, "[scoringmgr] ERROR : ", log.LstdFlags)
-var DLog *log.Logger = log.New(os.Stdout, "[scoringmgr] DEBUG : ", log.LstdFlags)
\ No newline at end of file
+var (
+ // ILog inforamton level log
+ ILog = log.New(os.Stdout, "[scoringmgr] INFO : ", log.LstdFlags)
+
+ // ELog Error level log
+ ELog = log.New(os.Stdout, "[scoringmgr] ERROR : ", log.LstdFlags)
+
+ // DLog Debug level log
+ DLog = log.New(os.Stdout, "[scoringmgr] DEBUG : ", log.LstdFlags)
+)
package scoringmgr
+
/*
#include <stdlib.h>
#include <dlfcn.h>
import "unsafe"
import (
-
- "strings"
- "time"
-
- confdescription "configuremgr/description"
+ "strings"
+ "time"
+ confdescription "configuremgr/description"
)
const (
- LIB_STATUS_INIT = 1
- LIB_STATUS_RUN = 2
+ constLibStatusInit = 1
+ constLibStatusRun = 2
+ constLibStatusDone = true
)
-const (
- LIB_STATUS_DONE = true
-)
-
+// Handler is for handling library
type Handler struct {
- handlerName string
- libPath string
- functionName string
- devicesScore map[string]float64
- intervalMs int
- resourceCount int
- scoreValue float64
- statusSignal chan int
- endSignal chan bool
- parents *Handlers
-
- //for dynamic loading
- symbol uintptr
- dl uintptr
-
+ handlerName string
+ libPath string
+ functionName string
+ devicesScore map[string]float64
+ intervalMs int
+ resourceCount int
+ scoreValue float64
+ statusSignal chan int
+ endSignal chan bool
+ parents *Handlers
+
+ //for dynamic loading
+ symbol uintptr
+ dl uintptr
}
+// Handlers composite handler
type Handlers struct {
table map[string]*Handler
- Ch chan interface{}
+ Ch chan interface{}
- IRunningScore func (uintptr) (float64)
- IGetScore func (string, string) (float64, error)
+ IRunningScore func(uintptr) float64
+ IGetScore func(string, string) (float64, error)
}
var (
handlers *Handlers
)
-const (
- // ConstLocalTarget is for knowing local environments
- ConstLocalTarget = "localhost"
-
- // ConstPrefixHTTP is "http://"
- ConstPrefixHTTP = "http://"
-
- // ConstWellknownPort is wellknonw port
- ConstWellknownPort = 56001
-)
-
+// Init for initializing library handlers
func Init() *Handlers {
handlers = new(Handlers)
return handlers
}
-//TODO : async notify lib loading
+// PushLibPath is that pushing library path to channel
+// @TODO : async notify lib loading
func PushLibPath(libPath string, doc *confdescription.Doc, handlersCh chan<- interface{}) (err error) {
- ILog.Printf("input PushLibPath : %s", libPath)
+ ILog.Printf("input PushLibPath : %s", libPath)
handlersCh <- pair{libPath, doc}
return nil
}
-
-// Listening function
+// Listening for waiting handler channel in
func (handlers *Handlers) Listening() {
go func() {
- ILog.Println("listening started")
- for {
-
- select {
- case obj := <- handlers.Ch :
- ILog.Printf("input handlers.Ch from configuremgr")
- handlers.makeHandler(obj.(pair)).runScoring()
+ ILog.Println("listening started")
+ for {
+
+ select {
+ case obj := <-handlers.Ch:
+ ILog.Printf("input handlers.Ch from configuremgr")
+ handlers.makeHandler(obj.(pair)).runScoring()
- } //select end
+ } //select end
} // for end
}() //function call
}
+// RemoveLib is for clean-up channel / handler
func (handlers *Handlers) RemoveLib(libName string) {
- handler := handlers.table[libName]
+ handler := handlers.table[libName]
- //for producer
- handler.endSignal <- LIB_STATUS_DONE
+ //for producer
+ handler.endSignal <- constLibStatusDone
- //for consumer
- handler.endSignal <- LIB_STATUS_DONE
+ //for consumer
+ handler.endSignal <- constLibStatusDone
- //Close dynamic loader
- defer C.dlclose(unsafe.Pointer(handler.dl))
+ //Close dynamic loader
+ defer C.dlclose(unsafe.Pointer(handler.dl))
- ILog.Printf("RemoveLib : %s\n", libName)
+ ILog.Printf("RemoveLib : %s\n", libName)
}
func (handlers *Handlers) makeHandler(pairObj pair) (handlerObj *Handler) {
- handlerObj = new(Handler)
- handlerObj.handlerName = getLibName(pairObj.libPath)
- handlerObj.libPath = pairObj.libPath
- handlerObj.intervalMs = pairObj.doc.ResourceType.IntervalTimeMs
- handlerObj.functionName = pairObj.doc.ScoringMethod.FunctionName
- handlerObj.devicesScore = make(map[string]float64)
- handlerObj.statusSignal = make(chan int, 1024)
- handlerObj.endSignal = make(chan bool, 1024)
- handlerObj.parents = handlers
+ handlerObj = new(Handler)
+ handlerObj.handlerName = getLibName(pairObj.libPath)
+ handlerObj.libPath = pairObj.libPath
+ handlerObj.intervalMs = pairObj.doc.ResourceType.IntervalTimeMs
+ handlerObj.functionName = pairObj.doc.ScoringMethod.FunctionName
+ handlerObj.devicesScore = make(map[string]float64)
+ handlerObj.statusSignal = make(chan int, 1024)
+ handlerObj.endSignal = make(chan bool, 1024)
+ handlerObj.parents = handlers
handlers.table[handlerObj.handlerName] = handlerObj
func (handler *Handler) runScoring() {
- ILog.Println("Run scoring")
+ ILog.Println("Run scoring")
+
+ go func() {
- go func(){
+ for {
+ select {
+ case status := <-handler.statusSignal:
+ handler.process(status)
+ case <-handler.endSignal:
+ ILog.Println("consumer signal go routine die")
+ return
+ }
+ }
+ }()
- for {
- select {
- case status := <- handler.statusSignal :
- handler.process(status)
- case <- handler.endSignal :
- ILog.Println("consumer signal go routine die")
- return
- }
- }
- }()
+ go func() {
- go func() {
-
- handler.statusSignal <- LIB_STATUS_INIT
+ handler.statusSignal <- constLibStatusInit
- for {
+ for {
- select {
- case <- handler.endSignal :
- ILog.Println("producer signal go routine die")
- return
- default:
- handler.statusSignal <- LIB_STATUS_RUN
- time.Sleep(time.Duration(1000) * time.Millisecond)
- ILog.Printf("status run sleep time : %d\n", handler.intervalMs)
- }
+ select {
+ case <-handler.endSignal:
+ ILog.Println("producer signal go routine die")
+ return
+ default:
+ handler.statusSignal <- constLibStatusRun
+ time.Sleep(time.Duration(1000) * time.Millisecond)
+ ILog.Printf("status run sleep time : %d\n", handler.intervalMs)
+ }
- }
+ }
- }()
+ }()
- return
+ return
}
func (handler *Handler) process(status int) {
- switch(status){
- case LIB_STATUS_INIT:
- ILog.Printf("init\n")
- handler.init()
- break
- case LIB_STATUS_RUN:
- ILog.Printf("run\n")
- handler.running()
- break
- }
+ switch status {
+ case constLibStatusInit:
+ ILog.Printf("init\n")
+ handler.init()
+ break
+ case constLibStatusRun:
+ ILog.Printf("run\n")
+ handler.running()
+ break
+ }
}
-
func (handler *Handler) init() {
- sym := C.CString(handler.functionName)
- defer C.free(unsafe.Pointer(sym))
+ sym := C.CString(handler.functionName)
+ defer C.free(unsafe.Pointer(sym))
- lib := C.CString(handler.libPath)
- defer C.free(unsafe.Pointer(lib))
+ lib := C.CString(handler.libPath)
+ defer C.free(unsafe.Pointer(lib))
- dl, err := C.dlopen(lib , C.RTLD_LAZY)
- handler.dl = uintptr(dl)
+ dl, err := C.dlopen(lib, C.RTLD_LAZY)
+ handler.dl = uintptr(dl)
- if err != nil {
- ELog.Fatal("dlopen error occured")
- }
-
- symbolPtr, symbolErr := C.dlsym(dl, sym)
- if symbolErr != nil {
- ELog.Fatal("symbol error occured")
- }
+ if err != nil {
+ ELog.Fatal("dlopen error occured")
+ }
- handler.symbol = uintptr(symbolPtr)
+ symbolPtr, symbolErr := C.dlsym(dl, sym)
+ if symbolErr != nil {
+ ELog.Fatal("symbol error occured")
+ }
- ILog.Printf("functionName : %s\n", handler.functionName)
- ILog.Printf("libPath : %s\n", handler.libPath)
- ILog.Printf("symbol : %#08X\n", handler.symbol)
+ handler.symbol = uintptr(symbolPtr)
+ ILog.Printf("functionName : %s\n", handler.functionName)
+ ILog.Printf("libPath : %s\n", handler.libPath)
+ ILog.Printf("symbol : %#08X\n", handler.symbol)
- return
+ return
}
func (handler *Handler) running() {
- handler.scoreValue = handler.parents.IRunningScore(handler.symbol)
+ handler.scoreValue = handler.parents.IRunningScore(handler.symbol)
}
func getLibName(libPath string) string {
libName := strings.Split(name[lastIdx-1], ".")
return strings.TrimPrefix(libName[0], "lib")
-}
\ No newline at end of file
+}
+++ /dev/null
-package scoringmgr
-
-import (
- "io/ioutil"
- "net"
- "net/http"
- "time"
-)
-
-func doGet(targetURL string) (respBytes []byte, err error) {
- req, err := http.NewRequest("GET", targetURL, nil)
- if err != nil {
- return
- }
-
- var netTransport = &http.Transport{
- Dial: (&net.Dialer{
- Timeout: 5 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 5 * time.Second,
- }
-
- client := &http.Client{
- Timeout: time.Second * 10,
- Transport: netTransport,
- }
-
- resp, err := client.Do(req)
- if err != nil {
- return
- }
- defer resp.Body.Close()
-
- respBytes, err = ioutil.ReadAll(resp.Body)
- return
-}
import "unsafe"
import (
- scoringmgr "scoringmgr"
+ scoringmgr "scoringmgr"
)
-func LoadScoringAddInterface(symbol uintptr) float64{
+// LoadScoringAddInterface function
+func LoadScoringAddInterface(symbol uintptr) float64 {
- ret := C.wrap_add(unsafe.Pointer(symbol),2,3)
- scoringmgr.ILog.Println(ret)
+ ret := C.wrap_add(unsafe.Pointer(symbol), 2, 3)
+ scoringmgr.ILog.Println(ret)
- return float64(ret)
+ return float64(ret)
}
package mockscoringmgr
import (
- "math/rand"
- "time"
- "log"
+ "log"
+ "math/rand"
+ "time"
)
-func GetScoreRandom100Mock(endpoint string, libName string) (score float64, err error){
+// GetScoreRandom100Mock mocking
+func GetScoreRandom100Mock(endpoint string, libName string) (score float64, err error) {
- log.Printf("libName : %s ", libName)
- seed := rand.NewSource(time.Now().UnixNano())
- random := rand.New(seed)
+ log.Printf("libName : %s ", libName)
+ seed := rand.NewSource(time.Now().UnixNano())
+ random := rand.New(seed)
- score = random.Float64() * 100
-
- return
+ score = random.Float64() * 100
+
+ return
}
package scoringmgr
import (
- "strings"
- "errors"
- "strconv"
- "encoding/json"
- "net"
+ "encoding/json"
+ "errors"
+ "net"
+ "restapi/httpclient"
+ "strconv"
+ "strings"
)
-
// GetScore is getting score of device
func GetScore(target string, name string) (scoreValue float64, err error) {
- if strings.Compare(target, getOutboundIP()) == 0 || strings.Compare(target, ConstLocalTarget) == 0 {
+ if strings.Compare(target, getOutboundIP()) == 0 || strings.Compare(target, httpclient.ConstLocalTarget) == 0 {
scoreValue, err = getScoreLocalEnv(name)
} else {
scoreValue, err = getScoreRemoteEnv(target, name)
return
}
-// func getScore(name string) (score float64, err error) {
-// handlerObj := handlers.table[name]
-
-// if handlerObj == nil {
-// err = errors.New("Invalid Service Name")
-// return
-// }
-
-// score = handlerObj.scoreValue
-
-// return
-// }
-
func getScoreLocalEnv(name string) (scoreValue float64, err error) {
- // scoreChan, err := getScore(name)
- // if err != nil {
- // return
- // }
-
- // select {
- // case scoreValue = <-scoreChan:
- // DLog.Println(scoreValue)
- // }
- // return
-
- handlerObj := handlers.table[name]
+ handlerObj := handlers.table[name]
if handlerObj == nil {
err = errors.New("Invalid Library Name")
}
scoreValue = handlerObj.scoreValue
-
- return
+
+ return
}
func getScoreRemoteEnv(target string, name string) (scoreValue float64, err error) {
- targetURL := ConstPrefixHTTP + target + ":" + strconv.Itoa(ConstWellknownPort) + "/api/v1/scoringmgr/score/" + name
-
- respBytes, err := doGet(targetURL)
+ targetURL := httpclient.ConstPrefixHTTP + target + ":" + strconv.Itoa(httpclient.ConstWellknownPort) + "/api/v1/scoringmgr/score/" + name
+ respBytes, err := httpclient.DoGet(targetURL)
if checkError(err) == true {
return scoreValue, err
}
package scoringmgr_test
import (
- "testing"
- "time"
+ "testing"
+ "time"
- confdescription "configuremgr/description"
- scoringmgr "scoringmgr"
- mockscoringmgr "scoringmgr/mock"
+ confdescription "configuremgr/description"
+ scoringmgr "scoringmgr"
+ mockscoringmgr "scoringmgr/mock"
- "gopkg.in/sconf/ini.v0"
+ "gopkg.in/sconf/ini.v0"
"gopkg.in/sconf/sconf.v0"
)
+func TestBasicMockScoringMgr(t *testing.T) {
-func TestBasicMockScoringMgr(t *testing.T){
+ scoringHandlers := scoringmgr.Init()
+ scoringHandlers.IRunningScore = mockscoringmgr.LoadScoringAddInterface
+ scoringHandlers.IGetScore = mockscoringmgr.GetScoreRandom100Mock
+ scoringHandlers.Ch = make(chan interface{}, 1024)
- scoringHandlers := scoringmgr.Init()
- scoringHandlers.IloadScoringLibrary = mockscoringmgr.LoadScoringAdd
- scoringHandlers.Ch = make(chan interface{}, 1024)
+ scoringHandlers.Listening()
- scoringHandlers.Listening()
+ time.Sleep(time.Duration(1) * time.Second)
- time.Sleep(time.Duration(1) * time.Second)
-
- libPath := "mock/mysum/libmysum.so"
- confPath := "mock/mysum/mysum.conf"
+ libPath := "mock/mysum/libmysum.so"
+ confPath := "mock/mysum/mysum.conf"
- cfg := new(confdescription.Doc)
- sconf.Must(cfg).Read(ini.File(confPath))
- scoringmgr.PushLibPath(libPath, cfg, scoringHandlers.Ch)
-
- time.Sleep(time.Duration(5) * time.Second)
+ cfg := new(confdescription.Doc)
+ sconf.Must(cfg).Read(ini.File(confPath))
+ scoringmgr.PushLibPath(libPath, cfg, scoringHandlers.Ch)
+ time.Sleep(time.Duration(5) * time.Second)
-}
\ No newline at end of file
+}
// ConstServiceNotFound is service status is not found
ConstServiceNotFound = "NotFound"
- // ConstWellknownPort is wellknonw port
- ConstWellknownPort = 56001
-
// ConstServiceExecuteURI is URI for creating & executing service
ConstServiceExecuteURI = "/api/v1/servicemgr/services"
// ConstServiceStatusNotiURI is URI for notification status of service
ConstServiceStatusNotiURI = "/api/v1/servicemgr/services/notification/"
-
- // ConstLocalTarget is for knowing local environments
- ConstLocalTarget = "localhost"
-
- // ConstPrefixHTTP is "http://"
- ConstPrefixHTTP = "http://"
)
var (
reqbytes, _ := json.Marshal(statusNotificationRequest)
- if strings.Compare(p.notificationTargetURL, ConstLocalTarget) == 0 {
+ if strings.Compare(p.notificationTargetURL, httpclient.ConstLocalTarget) == 0 {
HandleNoti(statusNotificationRequest)
} else {
targetURL := p.notificationTargetURL + ConstServiceStatusNotiURI + strconv.FormatUint(p.serviceID, 10)
}
appInfo[ConstKeyServiceName] = name
- if strings.Compare(target, getOutboundIP()) == 0 || strings.Compare(target, ConstLocalTarget) == 0 {
- appInfo[ConstKeyNotiTargetURL] = ConstLocalTarget
+ if strings.Compare(target, getOutboundIP()) == 0 || strings.Compare(target, httpclient.ConstLocalTarget) == 0 {
+ appInfo[ConstKeyNotiTargetURL] = httpclient.ConstLocalTarget
err = executeLocalEnv(appInfo)
} else {
- appInfo[ConstKeyNotiTargetURL] = ConstPrefixHTTP + target + ":" + strconv.Itoa(ConstWellknownPort)
+ appInfo[ConstKeyNotiTargetURL] = httpclient.ConstPrefixHTTP + target + ":" + strconv.Itoa(httpclient.ConstWellknownPort)
err = executeRemoteEnv(appInfo, target)
}
func executeRemoteEnv(appInfo map[string]interface{}, target string) (err error) {
reqBytes, _ := json.Marshal(appInfo)
- executeTarget := target + ":" + strconv.Itoa(ConstWellknownPort) + ConstServiceExecuteURI
+ executeTarget := target + ":" + strconv.Itoa(httpclient.ConstWellknownPort) + ConstServiceExecuteURI
respBytes, err := httpclient.DoPost(executeTarget, reqBytes)
--- /dev/null
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4
--- /dev/null
+Before reporting an issue, please ensure you are using the latest release of fsnotify.
+
+### Which operating system (GOOS) and version are you using?
+
+Linux: lsb_release -a
+macOS: sw_vers
+Windows: systeminfo | findstr /B /C:OS
+
+### Please describe the issue that occurred.
+
+### Are you able to reproduce the issue? Please provide steps to reproduce and a code sample if possible.
--- /dev/null
+#### What does this pull request do?
+
+
+#### Where should the reviewer start?
+
+
+#### How should this be manually tested?
+
--- /dev/null
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
--- /dev/null
+sudo: false
+language: go
+
+go:
+ - "stable"
+ - "1.11.x"
+ - "1.10.x"
+ - "1.9.x"
+ - "1.8.x"
+
+matrix:
+ include:
+ - go: "stable"
+ env: GOLINT=true
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+
+before_install:
+ - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi
+
+script:
+ - go test --race ./...
+
+after_script:
+ - test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
+ - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi
+ - go vet ./...
+
+os:
+ - linux
+ - osx
+ - windows
+
+notifications:
+ email: false
--- /dev/null
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L <aaron@bettercoder.net>
+Adrien Bustany <adrien@bustany.org>
+Amit Krishnan <amit.krishnan@oracle.com>
+Anmol Sethi <me@anmol.io>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Caleb Spare <cespare@gmail.com>
+Case Nelson <case@teammating.com>
+Chris Howey <chris@howey.me> <howeyc@gmail.com>
+Christoffer Buchholz <christoffer.buchholz@gmail.com>
+Daniel Wagner-Hall <dawagner@gmail.com>
+Dave Cheney <dave@cheney.net>
+Evan Phoenix <evan@fallingsnow.net>
+Francisco Souza <f@souza.cc>
+Hari haran <hariharan.uno@gmail.com>
+John C Barstow
+Kelvin Fo <vmirage@gmail.com>
+Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
+Matt Layher <mdlayher@gmail.com>
+Nathan Youngman <git@nathany.com>
+Nickolai Zeldovich <nickolai@csail.mit.edu>
+Patrick <patrick@dropbox.com>
+Paul Hammond <paul@paulhammond.org>
+Pawel Knap <pawelknap88@gmail.com>
+Pieter Droogendijk <pieter@binky.org.uk>
+Pursuit92 <JoshChase@techpursuit.net>
+Riku Voipio <riku.voipio@linaro.org>
+Rob Figueiredo <robfig@gmail.com>
+Rodrigo Chiossi <rodrigochiossi@gmail.com>
+Slawek Ligus <root@ooz.ie>
+Soge Zhang <zhssoge@gmail.com>
+Tiffany Jernigan <tiffany.jernigan@intel.com>
+Tilak Sharma <tilaks@google.com>
+Tom Payne <twpayne@gmail.com>
+Travis Cline <travis.cline@gmail.com>
+Tudor Golubenco <tudor.g@gmail.com>
+Vahe Khachikyan <vahe@live.ca>
+Yukang <moorekang@gmail.com>
+bronze1man <bronze1man@gmail.com>
+debrando <denis.brandolini@gmail.com>
+henrikedwards <henrik.edwards@gmail.com>
+铁哥 <guotie.9@gmail.com>
--- /dev/null
+# Changelog
+
+## v1.4.7 / 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
--- /dev/null
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
--- /dev/null
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+|Adapter |OS |Status |
+|----------|----------|----------|
+|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
+|fanotify |Linux 2.6.37+ | |
+|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
+|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify_test
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func ExampleNewWatcher() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ return
+ }
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err, ok := <-watcher.Errors:
+ if !ok {
+ return
+ }
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+func (op Op) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+ if buffer.Len() == 0 {
+ return ""
+ }
+ return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var (
+ ErrEventOverflow = errors.New("fsnotify queue overflow")
+)
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package fsnotify
+
+import (
+ "os"
+ "testing"
+ "time"
+)
+
+func TestEventStringWithValue(t *testing.T) {
+ for opMask, expectedString := range map[Op]string{
+ Chmod | Create: `"/usr/someFile": CREATE|CHMOD`,
+ Rename: `"/usr/someFile": RENAME`,
+ Remove: `"/usr/someFile": REMOVE`,
+ Write | Chmod: `"/usr/someFile": WRITE|CHMOD`,
+ } {
+ event := Event{Name: "/usr/someFile", Op: opMask}
+ if event.String() != expectedString {
+ t.Fatalf("Expected %s, got: %v", expectedString, event.String())
+ }
+
+ }
+}
+
+func TestEventOpStringWithValue(t *testing.T) {
+ expectedOpString := "WRITE|CHMOD"
+ event := Event{Name: "someFile", Op: Write | Chmod}
+ if event.Op.String() != expectedOpString {
+ t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
+ }
+}
+
+func TestEventOpStringWithNoValue(t *testing.T) {
+ expectedOpString := ""
+ event := Event{Name: "testFile", Op: 0}
+ if event.Op.String() != expectedOpString {
+ t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
+ }
+}
+
+// TestWatcherClose tests that the goroutine started by creating the watcher can be
+// signalled to return at any time, even if there is no goroutine listening on the events
+// or errors channels.
+func TestWatcherClose(t *testing.T) {
+ t.Parallel()
+
+ name := tempMkFile(t, "")
+ w := newWatcher(t)
+ err := w.Add(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = os.Remove(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Allow the watcher to receive the event.
+ time.Sleep(time.Millisecond * 100)
+
+ err = w.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ unix.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer unix.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = unix.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == unix.EINTR {
+ continue
+ }
+
+ // unix.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
+ w.mu.Unlock()
+
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := unix.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = unix.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]unix.EpollEvent, 7)
+ for {
+ n, errno := unix.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == unix.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let unix.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := unix.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := unix.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ unix.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ unix.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ unix.Close(poller.epfd)
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type testFd [2]int
+
+func makeTestFd(t *testing.T) testFd {
+ var tfd testFd
+ errno := unix.Pipe(tfd[:])
+ if errno != nil {
+ t.Fatalf("Failed to create pipe: %v", errno)
+ }
+ return tfd
+}
+
+func (tfd testFd) fd() int {
+ return tfd[0]
+}
+
+func (tfd testFd) closeWrite(t *testing.T) {
+ errno := unix.Close(tfd[1])
+ if errno != nil {
+ t.Fatalf("Failed to close write end of pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) put(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Write(tfd[1], buf)
+ if errno != nil {
+ t.Fatalf("Failed to write to pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) get(t *testing.T) {
+ buf := make([]byte, 10)
+ _, errno := unix.Read(tfd[0], buf)
+ if errno != nil {
+ t.Fatalf("Failed to read from pipe: %v", errno)
+ }
+}
+
+func (tfd testFd) close() {
+ unix.Close(tfd[1])
+ unix.Close(tfd[0])
+}
+
+func makePoller(t *testing.T) (testFd, *fdPoller) {
+ tfd := makeTestFd(t)
+ poller, err := newFdPoller(tfd.fd())
+ if err != nil {
+ t.Fatalf("Failed to create poller: %v", err)
+ }
+ return tfd, poller
+}
+
+func TestPollerWithBadFd(t *testing.T) {
+ _, err := newFdPoller(-1)
+ if err != unix.EBADF {
+ t.Fatalf("Expected EBADF, got: %v", err)
+ }
+}
+
+func TestPollerWithData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+ tfd.get(t)
+}
+
+func TestPollerWithWakeup(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerWithClose(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.closeWrite(t)
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+}
+
+func TestPollerWithWakeupAndData(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ tfd.put(t)
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+
+ // both data and wakeup
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ // data is still in the buffer, wakeup is cleared
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if !ok {
+ t.Fatalf("expected poller to return true")
+ }
+
+ tfd.get(t)
+ // data is gone, only wakeup now
+ err = poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ ok, err = poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ if ok {
+ t.Fatalf("expected poller to return false")
+ }
+}
+
+func TestPollerConcurrent(t *testing.T) {
+ tfd, poller := makePoller(t)
+ defer tfd.close()
+ defer poller.close()
+
+ oks := make(chan bool)
+ live := make(chan bool)
+ defer close(live)
+ go func() {
+ defer close(oks)
+ for {
+ ok, err := poller.wait()
+ if err != nil {
+ t.Fatalf("poller failed: %v", err)
+ }
+ oks <- ok
+ if !<-live {
+ return
+ }
+ }
+ }()
+
+ // Try a write
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.put(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+ live <- true
+
+ // Try a wakeup
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ err := poller.wake()
+ if err != nil {
+ t.Fatalf("wake failed: %v", err)
+ }
+ if <-oks {
+ t.Fatalf("expected false")
+ }
+ live <- true
+
+ // Try a close
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-oks:
+ t.Fatalf("poller did not wait")
+ }
+ tfd.closeWrite(t)
+ if !<-oks {
+ t.Fatalf("expected true")
+ }
+ tfd.get(t)
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestInotifyCloseRightAway(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Close immediately; it won't even reach the first unix.Read.
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLater(t *testing.T) {
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+ w.Add(testDir)
+
+ // Wait until readEvents has reached unix.Read, and Close.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func TestInotifyCloseAfterRead(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher")
+ }
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add .")
+ }
+
+ // Generate an event.
+ os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
+
+ // Wait for readEvents to read the event, then close the watcher.
+ <-time.After(50 * time.Millisecond)
+ w.Close()
+
+ // Wait for the close to complete.
+ <-time.After(50 * time.Millisecond)
+ isWatcherReallyClosed(t, w)
+}
+
+func isWatcherReallyClosed(t *testing.T, w *Watcher) {
+ select {
+ case err, ok := <-w.Errors:
+ if ok {
+ t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
+ }
+ default:
+ t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
+ }
+
+ select {
+ case _, ok := <-w.Events:
+ if ok {
+ t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
+ }
+ default:
+ t.Fatalf("w.Events would have blocked; readEvents is still alive!")
+ }
+}
+
+func TestInotifyCloseCreate(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+ h, err := os.Create(filepath.Join(testDir, "testfile"))
+ if err != nil {
+ t.Fatalf("Failed to create file in testdir: %v", err)
+ }
+ h.Close()
+ select {
+ case _ = <-w.Events:
+ case err := <-w.Errors:
+ t.Fatalf("Error from watcher: %v", err)
+ case <-time.After(50 * time.Millisecond):
+ t.Fatalf("Took too long to wait for event")
+ }
+
+ // At this point, we've received one event, so the goroutine is ready.
+ // It's also blocking on unix.Read.
+ // Now we try to swap the file descriptor under its nose.
+ w.Close()
+ w, err = NewWatcher()
+ defer w.Close()
+ if err != nil {
+ t.Fatalf("Failed to create second watcher: %v", err)
+ }
+
+ <-time.After(50 * time.Millisecond)
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Error adding testDir again: %v", err)
+ }
+}
+
+// This test verifies the watcher can keep up with file creations/deletions
+// when under load.
+func TestInotifyStress(t *testing.T) {
+ maxNumToCreate := 1000
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFilePrefix := filepath.Join(testDir, "testfile")
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testDir)
+ if err != nil {
+ t.Fatalf("Failed to add testDir: %v", err)
+ }
+
+ doneChan := make(chan struct{})
+ // The buffer ensures that the file generation goroutine is never blocked.
+ errChan := make(chan error, 2*maxNumToCreate)
+
+ go func() {
+ for i := 0; i < maxNumToCreate; i++ {
+ testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+
+ // If we delete a newly created file too quickly, inotify will skip the
+ // create event and only send the delete event.
+ time.Sleep(100 * time.Millisecond)
+
+ for i := 0; i < maxNumToCreate; i++ {
+ testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
+ err = os.Remove(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Remove failed: %v", err)
+ }
+ }
+
+ close(doneChan)
+ }()
+
+ creates := 0
+ removes := 0
+
+ finished := false
+ after := time.After(10 * time.Second)
+ for !finished {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case <-doneChan:
+ finished = true
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testFilePrefix) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ }
+ }
+
+ // Drain remaining events from channels
+ count := 0
+ for count < 10 {
+ select {
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ t.Fatalf("Got an error from watcher: %v", err)
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testFilePrefix) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ if evt.Op == Remove {
+ removes++
+ }
+ count = 0
+ default:
+ count++
+ // Give the watcher chances to fill the channels.
+ time.Sleep(time.Millisecond)
+ }
+ }
+
+ if creates-removes > 1 || creates-removes < -1 {
+ t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
+ }
+ if creates < 50 {
+ t.Fatalf("Expected at least 50 creates, got %d", creates)
+ }
+}
+
+func TestInotifyRemoveTwice(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err != nil {
+ t.Fatalf("wanted successful remove but got: %v", err)
+ }
+
+ err = w.Remove(testFile)
+ if err == nil {
+ t.Fatalf("no error on removing invalid file")
+ }
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
+
+func TestInotifyInnerMapLength(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+ testFile := filepath.Join(testDir, "testfile")
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ t.Fatalf("Create failed: %v", err)
+ }
+ handle.Close()
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ err = w.Add(testFile)
+ if err != nil {
+ t.Fatalf("Failed to add testFile: %v", err)
+ }
+ go func() {
+ for err := range w.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ err = os.Remove(testFile)
+ if err != nil {
+ t.Fatalf("Failed to remove testFile: %v", err)
+ }
+ _ = <-w.Events // consume Remove event
+ <-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if len(w.watches) != 0 {
+ t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
+ }
+ if len(w.paths) != 0 {
+ t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
+ }
+}
+
+func TestInotifyOverflow(t *testing.T) {
+ // We need to generate many more events than the
+ // fs.inotify.max_queued_events sysctl setting.
+ // We use multiple goroutines (one per directory)
+ // to speed up file creation.
+ numDirs := 128
+ numFiles := 1024
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ w, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("Failed to create watcher: %v", err)
+ }
+ defer w.Close()
+
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ err := os.Mkdir(testSubdir, 0777)
+ if err != nil {
+ t.Fatalf("Cannot create subdir: %v", err)
+ }
+
+ err = w.Add(testSubdir)
+ if err != nil {
+ t.Fatalf("Failed to add subdir: %v", err)
+ }
+ }
+
+ errChan := make(chan error, numDirs*numFiles)
+
+ // All events need to be in the inotify queue before pulling events off it to trigger this error.
+ wg := sync.WaitGroup{}
+ for dn := 0; dn < numDirs; dn++ {
+ testSubdir := fmt.Sprintf("%s/%d", testDir, dn)
+
+ wg.Add(1)
+ go func() {
+ for fn := 0; fn < numFiles; fn++ {
+ testFile := fmt.Sprintf("%s/%d", testSubdir, fn)
+
+ handle, err := os.Create(testFile)
+ if err != nil {
+ errChan <- fmt.Errorf("Create failed: %v", err)
+ continue
+ }
+
+ err = handle.Close()
+ if err != nil {
+ errChan <- fmt.Errorf("Close failed: %v", err)
+ continue
+ }
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ creates := 0
+ overflows := 0
+
+ after := time.After(10 * time.Second)
+ for overflows == 0 && creates < numDirs*numFiles {
+ select {
+ case <-after:
+ t.Fatalf("Not done")
+ case err := <-errChan:
+ t.Fatalf("Got an error from file creator goroutine: %v", err)
+ case err := <-w.Errors:
+ if err == ErrEventOverflow {
+ overflows++
+ } else {
+ t.Fatalf("Got an error from watcher: %v", err)
+ }
+ case evt := <-w.Events:
+ if !strings.HasPrefix(evt.Name, testDir) {
+ t.Fatalf("Got an event for an unknown file: %s", evt.Name)
+ }
+ if evt.Op == Create {
+ creates++
+ }
+ }
+ }
+
+ if creates == numDirs*numFiles {
+ t.Fatalf("Could not trigger overflow")
+ }
+
+ if overflows == 0 {
+ t.Fatalf("No overflow and not enough creates (expected %d, got %d)",
+ numDirs*numFiles, creates)
+ }
+}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fsnotify
+
+import (
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// darwinVersion returns version os Darwin (17 is macOS 10.13).
+func darwinVersion() (int, error) {
+ s, err := unix.Sysctl("kern.osrelease")
+ if err != nil {
+ return 0, err
+ }
+ s = strings.Split(s, ".")[0]
+ return strconv.Atoi(s)
+}
+
+// testExchangedataForWatcher tests the watcher with the exchangedata operation on macOS.
+//
+// This is widely used for atomic saves on macOS, e.g. TextMate and in Apple's NSDocument.
+//
+// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
+// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
+func testExchangedataForWatcher(t *testing.T, watchDir bool) {
+ osVersion, err := darwinVersion()
+ if err != nil {
+ t.Fatal("unable to get Darwin version:", err)
+ }
+ if osVersion >= 17 {
+ t.Skip("Exchangedata is deprecated in macOS 10.13")
+ }
+
+ // Create directory to watch
+ testDir1 := tempMkdir(t)
+
+ // For the intermediate file
+ testDir2 := tempMkdir(t)
+
+ defer os.RemoveAll(testDir1)
+ defer os.RemoveAll(testDir2)
+
+ resolvedFilename := "TestFsnotifyEvents.file"
+
+ // TextMate does:
+ //
+ // 1. exchangedata (intermediate, resolved)
+ // 2. unlink intermediate
+ //
+ // Let's try to simulate that:
+ resolved := filepath.Join(testDir1, resolvedFilename)
+ intermediate := filepath.Join(testDir2, resolvedFilename+"~")
+
+ // Make sure we create the file before we start watching
+ createAndSyncFile(t, resolved)
+
+ watcher := newWatcher(t)
+
+ // Test both variants in isolation
+ if watchDir {
+ addWatch(t, watcher, testDir1)
+ } else {
+ addWatch(t, watcher, resolved)
+ }
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var removeReceived counter
+ var createReceived counter
+
+ done := make(chan bool)
+
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(resolved) {
+ if event.Op&Remove == Remove {
+ removeReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ }
+ t.Logf("event received: %s", event)
+ }
+ done <- true
+ }()
+
+ // Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
+ for i := 1; i <= 3; i++ {
+ // The intermediate file is created in a folder outside the watcher
+ createAndSyncFile(t, intermediate)
+
+ // 1. Swap
+ if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
+ t.Fatalf("[%d] exchangedata failed: %s", i, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ // 2. Delete the intermediate file
+ err := os.Remove(intermediate)
+
+ if err != nil {
+ t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
+ if removeReceived.value() < 3 {
+ t.Fatal("fsnotify remove events have not been received after 500 ms")
+ }
+
+ if createReceived.value() < 3 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
+func TestExchangedataInWatchedDir(t *testing.T) {
+ testExchangedataForWatcher(t, true)
+}
+
+// TestExchangedataInWatchedDir test exchangedata operation on watched file.
+func TestExchangedataInWatchedFile(t *testing.T) {
+ testExchangedataForWatcher(t, false)
+}
+
+func createAndSyncFile(t *testing.T, filepath string) {
+ f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating %s failed: %s", filepath, err)
+ }
+ f1.Sync()
+ f1.Close()
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!solaris
+
+package fsnotify
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+// An atomic counter
+type counter struct {
+ val int32
+}
+
+func (c *counter) increment() {
+ atomic.AddInt32(&c.val, 1)
+}
+
+func (c *counter) value() int32 {
+ return atomic.LoadInt32(&c.val)
+}
+
+func (c *counter) reset() {
+ atomic.StoreInt32(&c.val, 0)
+}
+
+// tempMkdir makes a temporary directory
+func tempMkdir(t *testing.T) string {
+ dir, err := ioutil.TempDir("", "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test directory: %s", err)
+ }
+ return dir
+}
+
+// tempMkFile makes a temporary file.
+func tempMkFile(t *testing.T, dir string) string {
+ f, err := ioutil.TempFile(dir, "fsnotify")
+ if err != nil {
+ t.Fatalf("failed to create test file: %v", err)
+ }
+ defer f.Close()
+ return f.Name()
+}
+
+// newWatcher initializes an fsnotify Watcher instance.
+func newWatcher(t *testing.T) *Watcher {
+ watcher, err := NewWatcher()
+ if err != nil {
+ t.Fatalf("NewWatcher() failed: %s", err)
+ }
+ return watcher
+}
+
+// addWatch adds a watch for a directory
+func addWatch(t *testing.T, watcher *Watcher, dir string) {
+ if err := watcher.Add(dir); err != nil {
+ t.Fatalf("watcher.Add(%q) failed: %s", dir, err)
+ }
+}
+
+func TestFsnotifyMultipleOperations(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Fatalf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory that's not watched
+ testDirToMoveFiles := tempMkdir(t)
+ defer os.RemoveAll(testDirToMoveFiles)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+ testFileRenamed := filepath.Join(testDirToMoveFiles, "TestFsnotifySeqRename.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived, renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // Modify the file outside of the watched dir
+ f, err = os.Open(testFileRenamed)
+ if err != nil {
+ t.Fatalf("open test renamed file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file that was moved
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ rReceived := renameReceived.value()
+ if dReceived+rReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", rReceived+dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyMultipleCreates(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile := filepath.Join(testDir, "TestFsnotifySeq.testfile")
+
+ addWatch(t, watcher, testDir)
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Recreate the file
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Close()
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // Modify
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived < 3 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs atleast %d)", mReceived, 3)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 1 {
+ t.Fatalf("incorrect number of rename+delete events received after 500 ms (%d vs %d)", dReceived, 1)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDirOnly(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ // This should NOT add any events to the fsnotify event queue
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyDirOnly.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, modifyReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ time.Sleep(time.Millisecond)
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(50 * time.Millisecond) // give system time to sync write change before delete
+
+ os.Remove(testFile)
+ os.Remove(testFileAlreadyExists)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 1 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 1)
+ }
+ mReceived := modifyReceived.value()
+ if mReceived != 1 {
+ t.Fatalf("incorrect number of modify events received after 500 ms (%d vs %d)", mReceived, 1)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyDeleteWatchedDir(t *testing.T) {
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ addWatch(t, watcher, testDir)
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFileAlreadyExists)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var deleteReceived counter
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFileAlreadyExists) {
+ t.Logf("event received: %s", event)
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ }()
+
+ os.RemoveAll(testDir)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ dReceived := deleteReceived.value()
+ if dReceived < 2 {
+ t.Fatalf("did not receive at least %d delete events, received %d after 500 ms", 2, dReceived)
+ }
+}
+
+func TestFsnotifySubDir(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ testFile1 := filepath.Join(testDir, "TestFsnotifyFile1.testfile")
+ testSubDir := filepath.Join(testDir, "sub")
+ testSubDirFile := filepath.Join(testDir, "sub/TestFsnotifyFile1.testfile")
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived, deleteReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testSubDir) || event.Name == filepath.Clean(testFile1) {
+ t.Logf("event received: %s", event)
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ if event.Op&Remove == Remove {
+ deleteReceived.increment()
+ }
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ // Create sub-directory
+ if err := os.Mkdir(testSubDir, 0777); err != nil {
+ t.Fatalf("failed to create test sub-directory: %s", err)
+ }
+
+ // Create a file
+ var f *os.File
+ f, err := os.OpenFile(testFile1, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ // Create a file (Should not see this! we are not watching subdir)
+ var fs *os.File
+ fs, err = os.OpenFile(testSubDirFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fs.Sync()
+ fs.Close()
+
+ time.Sleep(200 * time.Millisecond)
+
+ // Make sure receive deletes for both file and sub-directory
+ os.RemoveAll(testSubDir)
+ os.Remove(testFile1)
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ cReceived := createReceived.value()
+ if cReceived != 2 {
+ t.Fatalf("incorrect number of create events received after 500 ms (%d vs %d)", cReceived, 2)
+ }
+ dReceived := deleteReceived.value()
+ if dReceived != 2 {
+ t.Fatalf("incorrect number of delete events received after 500 ms (%d vs %d)", dReceived, 2)
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+}
+
+func TestFsnotifyRename(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var renameReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Rename == Rename {
+ renameReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if renameReceived.value() == 0 {
+ t.Fatal("fsnotify rename events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToCreate(t *testing.T) {
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var createReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) || event.Name == filepath.Clean(testFileRenamed) {
+ if event.Op&Create == Create {
+ createReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if createReceived.value() == 0 {
+ t.Fatal("fsnotify create events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestFsnotifyRenameToOverwrite(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("skipping test on %q (os.Rename over existing file does not create event).", runtime.GOOS)
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create directory to get file
+ testDirFrom := tempMkdir(t)
+ defer os.RemoveAll(testDirFrom)
+
+ testFile := filepath.Join(testDirFrom, "TestFsnotifyEvents.testfile")
+ testFileRenamed := filepath.Join(testDir, "TestFsnotifyEvents.testfileRenamed")
+
+ // Create a file
+ var fr *os.File
+ fr, err := os.OpenFile(testFileRenamed, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ fr.Sync()
+ fr.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ var eventReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testFileRenamed) {
+ eventReceived.increment()
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err = os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+
+ if err := testRename(testFile, testFileRenamed); err != nil {
+ t.Fatalf("rename failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+ if eventReceived.value() == 0 {
+ t.Fatal("fsnotify events have not been received after 500 ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(2 * time.Second):
+ t.Fatal("event stream was not closed after 2 seconds")
+ }
+
+ os.Remove(testFileRenamed)
+}
+
+func TestRemovalOfWatch(t *testing.T) {
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+ if err := watcher.Remove(testDir); err != nil {
+ t.Fatalf("Could not remove the watch: %v\n", err)
+ }
+
+ go func() {
+ select {
+ case ev := <-watcher.Events:
+ t.Fatalf("We received event: %v\n", ev)
+ case <-time.After(500 * time.Millisecond):
+ t.Log("No event received, as expected.")
+ }
+ }()
+
+ time.Sleep(200 * time.Millisecond)
+ // Modify the file outside of the watched dir
+ f, err := os.Open(testFileAlreadyExists)
+ if err != nil {
+ t.Fatalf("Open test file failed: %s", err)
+ }
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+ if err := os.Chmod(testFileAlreadyExists, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+ time.Sleep(400 * time.Millisecond)
+}
+
+func TestFsnotifyAttrib(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("attributes don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for err := range watcher.Errors {
+ t.Errorf("error received: %s", err)
+ }
+ }()
+
+ testFile := filepath.Join(testDir, "TestFsnotifyAttrib.testfile")
+
+ // Receive events on the event channel on a separate goroutine
+ eventstream := watcher.Events
+ // The modifyReceived counter counts IsModify events that are not IsAttrib,
+ // and the attribReceived counts IsAttrib events (which are also IsModify as
+ // a consequence).
+ var modifyReceived counter
+ var attribReceived counter
+ done := make(chan bool)
+ go func() {
+ for event := range eventstream {
+ // Only count relevant events
+ if event.Name == filepath.Clean(testDir) || event.Name == filepath.Clean(testFile) {
+ if event.Op&Write == Write {
+ modifyReceived.increment()
+ }
+ if event.Op&Chmod == Chmod {
+ attribReceived.increment()
+ }
+ t.Logf("event received: %s", event)
+ } else {
+ t.Logf("unexpected event received: %s", event)
+ }
+ }
+ done <- true
+ }()
+
+ // Create a file
+ // This should add at least one event to the fsnotify event queue
+ var f *os.File
+ f, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+
+ f.WriteString("data")
+ f.Sync()
+ f.Close()
+
+ // Add a watch for testFile
+ addWatch(t, watcher, testFile)
+
+ if err := os.Chmod(testFile, 0700); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ // Creating/writing a file changes also the mtime, so IsAttrib should be set to true here
+ time.Sleep(500 * time.Millisecond)
+ if modifyReceived.value() != 0 {
+ t.Fatal("received an unexpected modify event when creating a test file")
+ }
+ if attribReceived.value() == 0 {
+ t.Fatal("fsnotify attribute events have not received after 500 ms")
+ }
+
+ // Modifying the contents of the file does not set the attrib flag (although eg. the mtime
+ // might have been modified).
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ f, err = os.OpenFile(testFile, os.O_WRONLY, 0)
+ if err != nil {
+ t.Fatalf("reopening test file failed: %s", err)
+ }
+
+ f.WriteString("more data")
+ f.Sync()
+ f.Close()
+
+ time.Sleep(500 * time.Millisecond)
+
+ if modifyReceived.value() != 1 {
+ t.Fatal("didn't receive a modify event after changing test file contents")
+ }
+
+ if attribReceived.value() != 0 {
+ t.Fatal("did receive an unexpected attrib event after changing test file contents")
+ }
+
+ modifyReceived.reset()
+ attribReceived.reset()
+
+ // Doing a chmod on the file should trigger an event with the "attrib" flag set (the contents
+ // of the file are not changed though)
+ if err := os.Chmod(testFile, 0600); err != nil {
+ t.Fatalf("chmod failed: %s", err)
+ }
+
+ time.Sleep(500 * time.Millisecond)
+
+ if attribReceived.value() != 1 {
+ t.Fatal("didn't receive an attribute change after 500ms")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+ t.Log("waiting for the event channel to become closed...")
+ select {
+ case <-done:
+ t.Log("event channel closed")
+ case <-time.After(1e9):
+ t.Fatal("event stream was not closed after 1 second")
+ }
+
+ os.Remove(testFile)
+}
+
+func TestFsnotifyClose(t *testing.T) {
+ watcher := newWatcher(t)
+ watcher.Close()
+
+ var done int32
+ go func() {
+ watcher.Close()
+ atomic.StoreInt32(&done, 1)
+ }()
+
+ time.Sleep(50e6) // 50 ms
+ if atomic.LoadInt32(&done) == 0 {
+ t.Fatal("double Close() test failed: second Close() call didn't return")
+ }
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ if err := watcher.Add(testDir); err == nil {
+ t.Fatal("expected error on Watch() after Close(), got nil")
+ }
+}
+
+func TestFsnotifyFakeSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ var errorsReceived counter
+ // Receive errors on the error channel on a separate goroutine
+ go func() {
+ for errors := range watcher.Errors {
+ t.Logf("Received error: %s", errors)
+ errorsReceived.increment()
+ }
+ }()
+
+ // Count the CREATE events received
+ var createEventsReceived, otherEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ t.Logf("event received: %s", ev)
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ } else {
+ otherEventsReceived.increment()
+ }
+ }
+ }()
+
+ addWatch(t, watcher, testDir)
+
+ if err := os.Symlink(filepath.Join(testDir, "zzz"), filepath.Join(testDir, "zzznew")); err != nil {
+ t.Fatalf("Failed to create bogus symlink: %s", err)
+ }
+ t.Logf("Created bogus symlink")
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ // Should not be error, just no events for broken links (watching nothing)
+ if errorsReceived.value() > 0 {
+ t.Fatal("fsnotify errors have been received.")
+ }
+ if otherEventsReceived.value() > 0 {
+ t.Fatal("fsnotify other events received on the broken link")
+ }
+
+ // Except for 1 create event (for the link itself)
+ if createEventsReceived.value() == 0 {
+ t.Fatal("fsnotify create events were not received after 500 ms")
+ }
+ if createEventsReceived.value() > 1 {
+ t.Fatal("fsnotify more create events received than expected")
+ }
+
+ // Try closing the fsnotify instance
+ t.Log("calling Close()")
+ watcher.Close()
+}
+
+func TestCyclicSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks don't work on Windows.")
+ }
+
+ watcher := newWatcher(t)
+
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ link := path.Join(testDir, "link")
+ if err := os.Symlink(".", link); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+ addWatch(t, watcher, testDir)
+
+ var createEventsReceived counter
+ go func() {
+ for ev := range watcher.Events {
+ if ev.Op&Create == Create {
+ createEventsReceived.increment()
+ }
+ }
+ }()
+
+ if err := os.Remove(link); err != nil {
+ t.Fatalf("Error removing link: %v", err)
+ }
+
+ // It would be nice to be able to expect a delete event here, but kqueue has
+ // no way for us to get events on symlinks themselves, because opening them
+ // opens an fd to the file to which they point.
+
+ if err := ioutil.WriteFile(link, []byte("foo"), 0700); err != nil {
+ t.Fatalf("could not make symlink: %v", err)
+ }
+
+ // We expect this event to be received almost immediately, but let's wait 500 ms to be sure
+ time.Sleep(500 * time.Millisecond)
+
+ if got := createEventsReceived.value(); got == 0 {
+ t.Errorf("want at least 1 create event got %v", got)
+ }
+
+ watcher.Close()
+}
+
+// TestConcurrentRemovalOfWatch tests that concurrent calls to RemoveWatch do not race.
+// See https://codereview.appspot.com/103300045/
+// go test -test.run=TestConcurrentRemovalOfWatch -test.cpu=1,1,1,1,1 -race
+func TestConcurrentRemovalOfWatch(t *testing.T) {
+ if runtime.GOOS != "darwin" {
+ t.Skip("regression test for race only present on darwin")
+ }
+
+ // Create directory to watch
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ // Create a file before watching directory
+ testFileAlreadyExists := filepath.Join(testDir, "TestFsnotifyEventsExisting.testfile")
+ {
+ var f *os.File
+ f, err := os.OpenFile(testFileAlreadyExists, os.O_WRONLY|os.O_CREATE, 0666)
+ if err != nil {
+ t.Fatalf("creating test file failed: %s", err)
+ }
+ f.Sync()
+ f.Close()
+ }
+
+ watcher := newWatcher(t)
+ defer watcher.Close()
+
+ addWatch(t, watcher, testDir)
+
+ // Test that RemoveWatch can be invoked concurrently, with no data races.
+ removed1 := make(chan struct{})
+ go func() {
+ defer close(removed1)
+ watcher.Remove(testDir)
+ }()
+ removed2 := make(chan struct{})
+ go func() {
+ close(removed2)
+ watcher.Remove(testDir)
+ }()
+ <-removed1
+ <-removed2
+}
+
+func TestClose(t *testing.T) {
+ // Regression test for #59 bad file descriptor from Close
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ err := watcher.Close()
+ if err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+// TestRemoveWithClose tests if one can handle Remove events and, at the same
+// time, close Watcher object without any data races.
+func TestRemoveWithClose(t *testing.T) {
+ testDir := tempMkdir(t)
+ defer os.RemoveAll(testDir)
+
+ const fileN = 200
+ tempFiles := make([]string, 0, fileN)
+ for i := 0; i < fileN; i++ {
+ tempFiles = append(tempFiles, tempMkFile(t, testDir))
+ }
+ watcher := newWatcher(t)
+ if err := watcher.Add(testDir); err != nil {
+ t.Fatalf("Expected no error on Add, got %v", err)
+ }
+ startC, stopC := make(chan struct{}), make(chan struct{})
+ errC := make(chan error)
+ go func() {
+ for {
+ select {
+ case <-watcher.Errors:
+ case <-watcher.Events:
+ case <-stopC:
+ return
+ }
+ }
+ }()
+ go func() {
+ <-startC
+ for _, fileName := range tempFiles {
+ os.Remove(fileName)
+ }
+ }()
+ go func() {
+ <-startC
+ errC <- watcher.Close()
+ }()
+ close(startC)
+ defer close(stopC)
+ if err := <-errC; err != nil {
+ t.Fatalf("Expected no error on Close, got %v.", err)
+ }
+}
+
+func testRename(file1, file2 string) error {
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ return os.Rename(file1, file2)
+ default:
+ cmd := exec.Command("mv", file1, file2)
+ return cmd.Run()
+ }
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+
+ // copy paths to remove while locked
+ var pathsToRemove = make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock()
+ // unlock before calling Remove, which also locks
+
+ for _, name := range pathsToRemove {
+ w.Remove(name)
+ }
+
+ // send a "quit" message to the reader goroutine
+ close(w.done)
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = unix.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return "", nil
+ }
+
+ // Don't watch named pipes.
+ if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ watchfd, err = unix.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]unix.Kevent_t, 10)
+
+loop:
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ break loop
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := unix.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+ return unix.NsecToTimespec(d.Nanoseconds())
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sysFSONESHOT = 0x80000000
+ sysFSONLYDIR = 0x1000000
+
+ // Events
+ sysFSACCESS = 0x1
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCLOSE = 0x18
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+
+ // Special events
+ sysFSIGNORED = 0x8000
+ sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sysFSONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sysFSQOVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sysFSONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sysFSMODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
--- /dev/null
+### Please give general description of the problem
+
+### Please provide code snippets to reproduce the problem described above
+
+### Do you have any suggestion to fix the problem?
\ No newline at end of file
--- /dev/null
+### What problem should be fixed?
+
+### Have you added test cases to catch the problem?
--- /dev/null
+testdata/conf_out.ini
+ini.sublime-project
+ini.sublime-workspace
+testdata/conf_reflect.ini
+.idea
+/.vscode
--- /dev/null
+sudo: false
+language: go
+go:
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+
+script:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/smartystreets/goconvey
+ - mkdir -p $HOME/gopath/src/gopkg.in
+ - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1
+ - cd $HOME/gopath/src/gopkg.in/ini.v1
+ - go test -v -cover -race
--- /dev/null
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright 2014 Unknwon
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+.PHONY: build test bench vet coverage
+
+build: vet bench
+
+test:
+ go test -v -cover -race
+
+bench:
+ go test -v -cover -race -test.bench=. -test.benchmem
+
+vet:
+ go vet
+
+coverage:
+ go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
--- /dev/null
+INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini)
+===
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+## Features
+
+- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+The minimum requirement of Go is **1.6**.
+
+To use a tagged revision:
+
+```sh
+$ go get gopkg.in/ini.v1
+```
+
+To use with latest changes:
+
+```sh
+$ go get github.com/go-ini/ini
+```
+
+Please add `-u` flag to update in the future.
+
+## Getting Help
+
+- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
--- /dev/null
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini_test
+
+import (
+ "testing"
+
+ "gopkg.in/ini.v1"
+)
+
+func newTestFile(block bool) *ini.File {
+ c, _ := ini.Load([]byte(_CONF_DATA))
+ c.BlockMode = block
+ return c
+}
+
+func Benchmark_Key_Value(b *testing.B) {
+ c := newTestFile(true)
+ for i := 0; i < b.N; i++ {
+ c.Section("").Key("NAME").Value()
+ }
+}
+
+func Benchmark_Key_Value_NonBlock(b *testing.B) {
+ c := newTestFile(false)
+ for i := 0; i < b.N; i++ {
+ c.Section("").Key("NAME").Value()
+ }
+}
+
+func Benchmark_Key_Value_ViaSection(b *testing.B) {
+ c := newTestFile(true)
+ sec := c.Section("")
+ for i := 0; i < b.N; i++ {
+ sec.Key("NAME").Value()
+ }
+}
+
+func Benchmark_Key_Value_ViaSection_NonBlock(b *testing.B) {
+ c := newTestFile(false)
+ sec := c.Section("")
+ for i := 0; i < b.N; i++ {
+ sec.Key("NAME").Value()
+ }
+}
+
+func Benchmark_Key_Value_Direct(b *testing.B) {
+ c := newTestFile(true)
+ key := c.Section("").Key("NAME")
+ for i := 0; i < b.N; i++ {
+ key.Value()
+ }
+}
+
+func Benchmark_Key_Value_Direct_NonBlock(b *testing.B) {
+ c := newTestFile(false)
+ key := c.Section("").Key("NAME")
+ for i := 0; i < b.N; i++ {
+ key.Value()
+ }
+}
+
+func Benchmark_Key_String(b *testing.B) {
+ c := newTestFile(true)
+ for i := 0; i < b.N; i++ {
+ _ = c.Section("").Key("NAME").String()
+ }
+}
+
+func Benchmark_Key_String_NonBlock(b *testing.B) {
+ c := newTestFile(false)
+ for i := 0; i < b.N; i++ {
+ _ = c.Section("").Key("NAME").String()
+ }
+}
+
+func Benchmark_Key_String_ViaSection(b *testing.B) {
+ c := newTestFile(true)
+ sec := c.Section("")
+ for i := 0; i < b.N; i++ {
+ _ = sec.Key("NAME").String()
+ }
+}
+
+func Benchmark_Key_String_ViaSection_NonBlock(b *testing.B) {
+ c := newTestFile(false)
+ sec := c.Section("")
+ for i := 0; i < b.N; i++ {
+ _ = sec.Key("NAME").String()
+ }
+}
+
+func Benchmark_Key_SetValue(b *testing.B) {
+ c := newTestFile(true)
+ for i := 0; i < b.N; i++ {
+ c.Section("").Key("NAME").SetValue("10")
+ }
+}
+
+func Benchmark_Key_SetValue_VisSection(b *testing.B) {
+ c := newTestFile(true)
+ sec := c.Section("")
+ for i := 0; i < b.N; i++ {
+ sec.Key("NAME").SetValue("10")
+ }
+}
--- /dev/null
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
--- /dev/null
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "sync"
+)
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+ options LoadOptions
+ dataSources []dataSource
+
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ lock sync.RWMutex
+
+ // To keep data in order.
+ sectionList []string
+ // Actual data is stored here.
+ sections map[string]*Section
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ if len(opts.KeyValueDelimiters) == 0 {
+ opts.KeyValueDelimiters = "=:"
+ }
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string]*Section),
+ sectionList: make([]string, 0, 10),
+ options: opts,
+ }
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+ // Ignore error here, we sure our data is good.
+ f, _ := Load([]byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new section: empty section name")
+ } else if f.options.Insensitive && name != DEFAULT_SECTION {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if inSlice(name, f.sectionList) {
+ return f.sections[name], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+ f.sections[name] = newSection(f, name)
+ return f.sections[name], nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+ if f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sec := f.sections[name]
+ if sec == nil {
+ return nil, fmt.Errorf("section '%s' does not exist", name)
+ }
+ return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sections := make([]*Section, len(f.sectionList))
+ for i, name := range f.sectionList {
+ sections[i] = f.sections[name]
+ }
+ return sections
+}
+
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ for i, s := range f.sectionList {
+ if s == name {
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ delete(f.sections, name)
+ return
+ }
+ }
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
+ equalSign := DefaultFormatLeft + "=" + DefaultFormatRight
+
+ if PrettyFormat || PrettyEqual {
+ equalSign = " = "
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.Section(sname)
+ if len(sec.Comment) > 0 {
+ // Support multiline comments
+ lines := strings.Split(sec.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + lines[i]
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if i > 0 || DefaultHeader {
+ if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return nil, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ if sec.isRawSection {
+ if _, err := buf.WriteString(sec.rawBody); err != nil {
+ return nil, err
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modifed if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ KEY_LIST:
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+
+ // Support multiline comments
+ lines := strings.Split(key.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + strings.TrimSpace(lines[i])
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ for _, val := range key.ValueWithShadows() {
+ if _, err := buf.WriteString(kname); err != nil {
+ return nil, err
+ }
+
+ if key.isBooleanType {
+ if kname != sec.keyList[len(sec.keyList)-1] {
+ buf.WriteString(LineBreak)
+ }
+ continue KEY_LIST
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ }
+ if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, val := range key.nestedValues {
+ if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return 0, err
+ }
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename afte done.
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(filename, buf.Bytes(), 0666)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
--- /dev/null
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini_test
+
+import (
+ "bytes"
+ "io/ioutil"
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/ini.v1"
+)
+
+func TestEmpty(t *testing.T) {
+ Convey("Create an empty object", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ // Should only have the default section
+ So(len(f.Sections()), ShouldEqual, 1)
+
+ // Default section should not contain any key
+ So(len(f.Section("").Keys()), ShouldBeZeroValue)
+ })
+}
+
+func TestFile_NewSection(t *testing.T) {
+ Convey("Create a new section", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ sec, err := f.NewSection("author")
+ So(err, ShouldBeNil)
+ So(sec, ShouldNotBeNil)
+ So(sec.Name(), ShouldEqual, "author")
+
+ So(f.SectionStrings(), ShouldResemble, []string{ini.DEFAULT_SECTION, "author"})
+
+ Convey("With duplicated name", func() {
+ sec, err := f.NewSection("author")
+ So(err, ShouldBeNil)
+ So(sec, ShouldNotBeNil)
+
+ // Does nothing if section already exists
+ So(f.SectionStrings(), ShouldResemble, []string{ini.DEFAULT_SECTION, "author"})
+ })
+
+ Convey("With empty string", func() {
+ _, err := f.NewSection("")
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestFile_NewRawSection(t *testing.T) {
+ Convey("Create a new raw section", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ sec, err := f.NewRawSection("comments", `1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000`)
+ So(err, ShouldBeNil)
+ So(sec, ShouldNotBeNil)
+ So(sec.Name(), ShouldEqual, "comments")
+
+ So(f.SectionStrings(), ShouldResemble, []string{ini.DEFAULT_SECTION, "comments"})
+ So(f.Section("comments").Body(), ShouldEqual, `1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000`)
+
+ Convey("With duplicated name", func() {
+ sec, err := f.NewRawSection("comments", `1111111111111111111000000000000000001110000`)
+ So(err, ShouldBeNil)
+ So(sec, ShouldNotBeNil)
+ So(f.SectionStrings(), ShouldResemble, []string{ini.DEFAULT_SECTION, "comments"})
+
+ // Overwrite previous existed section
+ So(f.Section("comments").Body(), ShouldEqual, `1111111111111111111000000000000000001110000`)
+ })
+
+ Convey("With empty string", func() {
+ _, err := f.NewRawSection("", "")
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestFile_NewSections(t *testing.T) {
+ Convey("Create new sections", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ So(f.NewSections("package", "author"), ShouldBeNil)
+ So(f.SectionStrings(), ShouldResemble, []string{ini.DEFAULT_SECTION, "package", "author"})
+
+ Convey("With duplicated name", func() {
+ So(f.NewSections("author", "features"), ShouldBeNil)
+
+ // Ignore section already exists
+ So(f.SectionStrings(), ShouldResemble, []string{ini.DEFAULT_SECTION, "package", "author", "features"})
+ })
+
+ Convey("With empty string", func() {
+ So(f.NewSections("", ""), ShouldNotBeNil)
+ })
+ })
+}
+
+func TestFile_GetSection(t *testing.T) {
+ Convey("Get a section", t, func() {
+ f, err := ini.Load(_FULL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ sec, err := f.GetSection("author")
+ So(err, ShouldBeNil)
+ So(sec, ShouldNotBeNil)
+ So(sec.Name(), ShouldEqual, "author")
+
+ Convey("Section not exists", func() {
+ _, err := f.GetSection("404")
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestFile_Section(t *testing.T) {
+ Convey("Get a section", t, func() {
+ f, err := ini.Load(_FULL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ sec := f.Section("author")
+ So(sec, ShouldNotBeNil)
+ So(sec.Name(), ShouldEqual, "author")
+
+ Convey("Section not exists", func() {
+ sec := f.Section("404")
+ So(sec, ShouldNotBeNil)
+ So(sec.Name(), ShouldEqual, "404")
+ })
+ })
+
+ Convey("Get default section in lower case with insensitive load", t, func() {
+ f, err := ini.InsensitiveLoad([]byte(`
+[default]
+NAME = ini
+VERSION = v1`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("name").String(), ShouldEqual, "ini")
+ So(f.Section("").Key("version").String(), ShouldEqual, "v1")
+ })
+}
+
+func TestFile_Sections(t *testing.T) {
+ Convey("Get all sections", t, func() {
+ f, err := ini.Load(_FULL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ secs := f.Sections()
+ names := []string{ini.DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "string escapes", "advance"}
+ So(len(secs), ShouldEqual, len(names))
+ for i, name := range names {
+ So(secs[i].Name(), ShouldEqual, name)
+ }
+ })
+}
+
+func TestFile_ChildSections(t *testing.T) {
+ Convey("Get child sections by parent name", t, func() {
+ f, err := ini.Load([]byte(`
+[node]
+[node.biz1]
+[node.biz2]
+[node.biz3]
+[node.bizN]
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ children := f.ChildSections("node")
+ names := []string{"node.biz1", "node.biz2", "node.biz3", "node.bizN"}
+ So(len(children), ShouldEqual, len(names))
+ for i, name := range names {
+ So(children[i].Name(), ShouldEqual, name)
+ }
+ })
+}
+
+func TestFile_SectionStrings(t *testing.T) {
+ Convey("Get all section names", t, func() {
+ f, err := ini.Load(_FULL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.SectionStrings(), ShouldResemble, []string{ini.DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "string escapes", "advance"})
+ })
+}
+
+func TestFile_DeleteSection(t *testing.T) {
+ Convey("Delete a section", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ f.NewSections("author", "package", "features")
+ f.DeleteSection("features")
+ f.DeleteSection("")
+ So(f.SectionStrings(), ShouldResemble, []string{"author", "package"})
+ })
+}
+
+func TestFile_Append(t *testing.T) {
+ Convey("Append a data source", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ So(f.Append(_MINIMAL_CONF, []byte(`
+[author]
+NAME = Unknwon`)), ShouldBeNil)
+
+ Convey("With bad input", func() {
+ So(f.Append(123), ShouldNotBeNil)
+ So(f.Append(_MINIMAL_CONF, 123), ShouldNotBeNil)
+ })
+ })
+}
+
+func TestFile_WriteTo(t *testing.T) {
+ Convey("Write content to somewhere", t, func() {
+ f, err := ini.Load(_FULL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ f.Section("author").Comment = `Information about package author
+# Bio can be written in multiple lines.`
+ f.Section("author").Key("NAME").Comment = "This is author name"
+ f.Section("note").NewBooleanKey("boolean_key")
+ f.Section("note").NewKey("more", "notes")
+
+ var buf bytes.Buffer
+ _, err = f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+
+ golden := "testdata/TestFile_WriteTo.golden"
+ if *update {
+ ioutil.WriteFile(golden, buf.Bytes(), 0644)
+ }
+
+ expected, err := ioutil.ReadFile(golden)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, string(expected))
+ })
+
+ Convey("Support multiline comments", t, func() {
+ f, err := ini.Load([]byte(`
+#
+# general.domain
+#
+# Domain name of XX system.
+domain = mydomain.com
+`))
+ So(err, ShouldBeNil)
+
+ f.Section("").Key("test").Comment = "Multiline\nComment"
+
+ var buf bytes.Buffer
+ _, err = f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+
+ So(buf.String(), ShouldEqual, `#
+# general.domain
+#
+# Domain name of XX system.
+domain = mydomain.com
+; Multiline
+; Comment
+test =
+
+`)
+
+ })
+}
+
+func TestFile_SaveTo(t *testing.T) {
+ Convey("Write content to somewhere", t, func() {
+ f, err := ini.Load(_FULL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.SaveTo("testdata/conf_out.ini"), ShouldBeNil)
+ So(f.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil)
+ })
+}
--- /dev/null
+// +build go1.6
+
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "runtime"
+)
+
+const (
+ // Name for default section. You can use this constant or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DEFAULT_SECTION = "DEFAULT"
+
+ // Maximum allowed depth when recursively substituing variable names.
+ _DEPTH_VALUES = 99
+ _VERSION = "1.42.0"
+)
+
+// Version returns current package version literal.
+func Version() string {
+ return _VERSION
+}
+
+var (
+ // Delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows
+ // at package init time.
+ LineBreak = "\n"
+
+ // Place custom spaces when PrettyFormat and PrettyEqual are both disabled
+ DefaultFormatLeft = ""
+ DefaultFormatRight = ""
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+ // Indicate whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+
+ // Place spaces around "=" sign even when PrettyFormat is false
+ PrettyEqual = false
+
+ // Explicitly write DEFAULT section header
+ DefaultHeader = false
+
+ // Indicate whether to put a line between sections
+ PrettySection = true
+)
+
+func init() {
+ if runtime.GOOS == "windows" {
+ LineBreak = "\r\n"
+ }
+}
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+ }
+}
+
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
+ // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
+ SkipUnrecognizableLines bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
+ // AllowNestedValues indicates whether to allow AWS-like nested values.
+ // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
+ AllowNestedValues bool
+ // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
+ // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
+ // Relevant quote: Values can also span multiple lines, as long as they are indented deeper
+ // than the first line of the value.
+ AllowPythonMultilineValues bool
+ // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
+ // Docs: https://docs.python.org/2/library/configparser.html
+ // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
+ // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
+ SpaceBeforeInlineComment bool
+ // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
+ // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
+ UnescapeValueDoubleQuotes bool
+ // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
+ // when value is NOT surrounded by any quotes.
+ // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
+ UnescapeValueCommentSymbols bool
+ // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
+ // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
+ KeyValueDelimiters string
+ // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
+ PreserveSurroundedQuote bool
+}
+
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// ShadowLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
--- /dev/null
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func Test_Version(t *testing.T) {
+ Convey("Get version", t, func() {
+ So(Version(), ShouldEqual, _VERSION)
+ })
+}
+
+func Test_isSlice(t *testing.T) {
+ Convey("Check if a string is in the slice", t, func() {
+ ss := []string{"a", "b", "c"}
+ So(inSlice("a", ss), ShouldBeTrue)
+ So(inSlice("d", ss), ShouldBeFalse)
+ })
+}
--- /dev/null
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini_test
+
+import (
+ "bytes"
+ "flag"
+ "io/ioutil"
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/ini.v1"
+)
+
+const (
+ _CONF_DATA = `
+ ; Package name
+ NAME = ini
+ ; Package version
+ VERSION = v1
+ ; Package import path
+ IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+ # Information about package author
+ # Bio can be written in multiple lines.
+ [author]
+ NAME = Unknwon ; Succeeding comment
+ E-MAIL = fake@localhost
+ GITHUB = https://github.com/%(NAME)s
+ BIO = """Gopher.
+ Coding addict.
+ Good man.
+ """ # Succeeding comment`
+ _MINIMAL_CONF = "testdata/minimal.ini"
+ _FULL_CONF = "testdata/full.ini"
+ _NOT_FOUND_CONF = "testdata/404.ini"
+)
+
+var update = flag.Bool("update", false, "Update .golden files")
+
+func TestLoad(t *testing.T) {
+ Convey("Load from good data sources", t, func() {
+ f, err := ini.Load([]byte(`
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s`),
+ "testdata/minimal.ini",
+ ioutil.NopCloser(bytes.NewReader([]byte(`
+[author]
+NAME = Unknwon
+`))),
+ )
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ // Validate values make sure all sources are loaded correctly
+ sec := f.Section("")
+ So(sec.Key("NAME").String(), ShouldEqual, "ini")
+ So(sec.Key("VERSION").String(), ShouldEqual, "v1")
+ So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1")
+
+ sec = f.Section("author")
+ So(sec.Key("NAME").String(), ShouldEqual, "Unknwon")
+ So(sec.Key("E-MAIL").String(), ShouldEqual, "u@gogs.io")
+ })
+
+ Convey("Load from bad data sources", t, func() {
+ Convey("Invalid input", func() {
+ _, err := ini.Load(_NOT_FOUND_CONF)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("Unsupported type", func() {
+ _, err := ini.Load(123)
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Can't properly parse INI files containing `#` or `;` in value", t, func() {
+ f, err := ini.Load([]byte(`
+ [author]
+ NAME = U#n#k#n#w#o#n
+ GITHUB = U;n;k;n;w;o;n
+ `))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ sec := f.Section("author")
+ nameValue := sec.Key("NAME").String()
+ githubValue := sec.Key("GITHUB").String()
+ So(nameValue, ShouldEqual, "U")
+ So(githubValue, ShouldEqual, "U")
+ })
+
+ Convey("Can't parse small python-compatible INI files", t, func() {
+ f, err := ini.Load([]byte(`
+[long]
+long_rsa_private_key = -----BEGIN RSA PRIVATE KEY-----
+ foo
+ bar
+ foobar
+ barfoo
+ -----END RSA PRIVATE KEY-----
+`))
+ So(err, ShouldNotBeNil)
+ So(f, ShouldBeNil)
+ So(err.Error(), ShouldEqual, "key-value delimiter not found: foo\n")
+ })
+
+ Convey("Can't parse big python-compatible INI files", t, func() {
+ f, err := ini.Load([]byte(`
+[long]
+long_rsa_private_key = -----BEGIN RSA PRIVATE KEY-----
+ 1foo
+ 2bar
+ 3foobar
+ 4barfoo
+ 5foo
+ 6bar
+ 7foobar
+ 8barfoo
+ 9foo
+ 10bar
+ 11foobar
+ 12barfoo
+ 13foo
+ 14bar
+ 15foobar
+ 16barfoo
+ 17foo
+ 18bar
+ 19foobar
+ 20barfoo
+ 21foo
+ 22bar
+ 23foobar
+ 24barfoo
+ 25foo
+ 26bar
+ 27foobar
+ 28barfoo
+ 29foo
+ 30bar
+ 31foobar
+ 32barfoo
+ 33foo
+ 34bar
+ 35foobar
+ 36barfoo
+ 37foo
+ 38bar
+ 39foobar
+ 40barfoo
+ 41foo
+ 42bar
+ 43foobar
+ 44barfoo
+ 45foo
+ 46bar
+ 47foobar
+ 48barfoo
+ 49foo
+ 50bar
+ 51foobar
+ 52barfoo
+ 53foo
+ 54bar
+ 55foobar
+ 56barfoo
+ 57foo
+ 58bar
+ 59foobar
+ 60barfoo
+ 61foo
+ 62bar
+ 63foobar
+ 64barfoo
+ 65foo
+ 66bar
+ 67foobar
+ 68barfoo
+ 69foo
+ 70bar
+ 71foobar
+ 72barfoo
+ 73foo
+ 74bar
+ 75foobar
+ 76barfoo
+ 77foo
+ 78bar
+ 79foobar
+ 80barfoo
+ 81foo
+ 82bar
+ 83foobar
+ 84barfoo
+ 85foo
+ 86bar
+ 87foobar
+ 88barfoo
+ 89foo
+ 90bar
+ 91foobar
+ 92barfoo
+ 93foo
+ 94bar
+ 95foobar
+ 96barfoo
+ -----END RSA PRIVATE KEY-----
+`))
+ So(err, ShouldNotBeNil)
+ So(f, ShouldBeNil)
+ So(err.Error(), ShouldEqual, "key-value delimiter not found: 1foo\n")
+ })
+}
+
+func TestLooseLoad(t *testing.T) {
+ Convey("Load from data sources with option `Loose` true", t, func() {
+ f, err := ini.LoadSources(ini.LoadOptions{Loose: true}, _NOT_FOUND_CONF, _MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ Convey("Inverse case", func() {
+ _, err = ini.Load(_NOT_FOUND_CONF)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestInsensitiveLoad(t *testing.T) {
+ Convey("Insensitive to section and key names", t, func() {
+ f, err := ini.InsensitiveLoad(_MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("Author").Key("e-mail").String(), ShouldEqual, "u@gogs.io")
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `[author]
+e-mail = u@gogs.io
+
+`)
+ })
+
+ Convey("Inverse case", func() {
+ f, err := ini.Load(_MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("Author").Key("e-mail").String(), ShouldBeEmpty)
+ })
+ })
+}
+
+func TestLoadSources(t *testing.T) {
+ Convey("Load from data sources with options", t, func() {
+ Convey("with true `AllowPythonMultilineValues`", func() {
+ Convey("Ignore nonexistent files", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true, Loose: true}, _NOT_FOUND_CONF, _MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ Convey("Inverse case", func() {
+ _, err = ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true}, _NOT_FOUND_CONF)
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Insensitive to section and key names", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true, Insensitive: true}, _MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("Author").Key("e-mail").String(), ShouldEqual, "u@gogs.io")
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `[author]
+e-mail = u@gogs.io
+
+`)
+ })
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true}, _MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("Author").Key("e-mail").String(), ShouldBeEmpty)
+ })
+ })
+
+ Convey("Ignore continuation lines", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: true,
+ IgnoreContinuation: true,
+ }, []byte(`
+key1=a\b\
+key2=c\d\
+key3=value`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key1").String(), ShouldEqual, `a\b\`)
+ So(f.Section("").Key("key2").String(), ShouldEqual, `c\d\`)
+ So(f.Section("").Key("key3").String(), ShouldEqual, "value")
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true}, []byte(`
+key1=a\b\
+key2=c\d\`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key1").String(), ShouldEqual, `a\bkey2=c\d`)
+ })
+ })
+
+ Convey("Ignore inline comments", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: true,
+ IgnoreInlineComment: true,
+ }, []byte(`
+key1=value ;comment
+key2=value2 #comment2
+key3=val#ue #comment3`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key1").String(), ShouldEqual, `value ;comment`)
+ So(f.Section("").Key("key2").String(), ShouldEqual, `value2 #comment2`)
+ So(f.Section("").Key("key3").String(), ShouldEqual, `val#ue #comment3`)
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true}, []byte(`
+key1=value ;comment
+key2=value2 #comment2`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key1").String(), ShouldEqual, `value`)
+ So(f.Section("").Key("key1").Comment, ShouldEqual, `;comment`)
+ So(f.Section("").Key("key2").String(), ShouldEqual, `value2`)
+ So(f.Section("").Key("key2").Comment, ShouldEqual, `#comment2`)
+ })
+ })
+
+ Convey("Skip unrecognizable lines", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ SkipUnrecognizableLines: true,
+ }, []byte(`
+GenerationDepth: 13
+
+BiomeRarityScale: 100
+
+################
+# Biome Groups #
+################
+
+BiomeGroup(NormalBiomes, 3, 99, RoofedForestEnchanted, ForestSakura, FloatingJungle
+BiomeGroup(IceBiomes, 4, 85, Ice Plains)
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("GenerationDepth").String(), ShouldEqual, "13")
+ So(f.Section("").Key("BiomeRarityScale").String(), ShouldEqual, "100")
+ So(f.Section("").HasKey("BiomeGroup"), ShouldBeFalse)
+ })
+
+ Convey("Allow boolean type keys", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: true,
+ AllowBooleanKeys: true,
+ }, []byte(`
+key1=hello
+#key2
+key3`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").KeyStrings(), ShouldResemble, []string{"key1", "key3"})
+ So(f.Section("").Key("key3").MustBool(false), ShouldBeTrue)
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `key1 = hello
+# key2
+key3
+`)
+ })
+
+ Convey("Inverse case", func() {
+ _, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true}, []byte(`
+key1=hello
+#key2
+key3`))
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Allow shadow keys", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowShadows: true, AllowPythonMultilineValues: true}, []byte(`
+[remote "origin"]
+url = https://github.com/Antergone/test1.git
+url = https://github.com/Antergone/test2.git
+fetch = +refs/heads/*:refs/remotes/origin/*`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test1.git")
+ So(f.Section(`remote "origin"`).Key("url").ValueWithShadows(), ShouldResemble, []string{
+ "https://github.com/Antergone/test1.git",
+ "https://github.com/Antergone/test2.git",
+ })
+ So(f.Section(`remote "origin"`).Key("fetch").String(), ShouldEqual, "+refs/heads/*:refs/remotes/origin/*")
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `[remote "origin"]
+url = https://github.com/Antergone/test1.git
+url = https://github.com/Antergone/test2.git
+fetch = +refs/heads/*:refs/remotes/origin/*
+
+`)
+ })
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true}, []byte(`
+[remote "origin"]
+url = https://github.com/Antergone/test1.git
+url = https://github.com/Antergone/test2.git`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test2.git")
+ })
+ })
+
+ Convey("Unescape double quotes inside value", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: true,
+ UnescapeValueDoubleQuotes: true,
+ }, []byte(`
+create_repo="创建了仓库 <a href=\"%s\">%s</a>"`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("create_repo").String(), ShouldEqual, `创建了仓库 <a href="%s">%s</a>`)
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true}, []byte(`
+create_repo="创建了仓库 <a href=\"%s\">%s</a>"`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("create_repo").String(), ShouldEqual, `"创建了仓库 <a href=\"%s\">%s</a>"`)
+ })
+ })
+
+ Convey("Unescape comment symbols inside value", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: true,
+ IgnoreInlineComment: true,
+ UnescapeValueCommentSymbols: true,
+ }, []byte(`
+key = test value <span style="color: %s\; background: %s">more text</span>
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key").String(), ShouldEqual, `test value <span style="color: %s; background: %s">more text</span>`)
+ })
+
+ Convey("Can parse small python-compatible INI files", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: true,
+ Insensitive: true,
+ UnparseableSections: []string{"core_lesson", "comments"},
+ }, []byte(`
+[long]
+long_rsa_private_key = -----BEGIN RSA PRIVATE KEY-----
+ foo
+ bar
+ foobar
+ barfoo
+ -----END RSA PRIVATE KEY-----
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("long").Key("long_rsa_private_key").String(), ShouldEqual, "-----BEGIN RSA PRIVATE KEY-----\nfoo\nbar\nfoobar\nbarfoo\n-----END RSA PRIVATE KEY-----")
+ })
+
+ Convey("Can parse big python-compatible INI files", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: true,
+ Insensitive: true,
+ UnparseableSections: []string{"core_lesson", "comments"},
+ }, []byte(`
+[long]
+long_rsa_private_key = -----BEGIN RSA PRIVATE KEY-----
+ 1foo
+ 2bar
+ 3foobar
+ 4barfoo
+ 5foo
+ 6bar
+ 7foobar
+ 8barfoo
+ 9foo
+ 10bar
+ 11foobar
+ 12barfoo
+ 13foo
+ 14bar
+ 15foobar
+ 16barfoo
+ 17foo
+ 18bar
+ 19foobar
+ 20barfoo
+ 21foo
+ 22bar
+ 23foobar
+ 24barfoo
+ 25foo
+ 26bar
+ 27foobar
+ 28barfoo
+ 29foo
+ 30bar
+ 31foobar
+ 32barfoo
+ 33foo
+ 34bar
+ 35foobar
+ 36barfoo
+ 37foo
+ 38bar
+ 39foobar
+ 40barfoo
+ 41foo
+ 42bar
+ 43foobar
+ 44barfoo
+ 45foo
+ 46bar
+ 47foobar
+ 48barfoo
+ 49foo
+ 50bar
+ 51foobar
+ 52barfoo
+ 53foo
+ 54bar
+ 55foobar
+ 56barfoo
+ 57foo
+ 58bar
+ 59foobar
+ 60barfoo
+ 61foo
+ 62bar
+ 63foobar
+ 64barfoo
+ 65foo
+ 66bar
+ 67foobar
+ 68barfoo
+ 69foo
+ 70bar
+ 71foobar
+ 72barfoo
+ 73foo
+ 74bar
+ 75foobar
+ 76barfoo
+ 77foo
+ 78bar
+ 79foobar
+ 80barfoo
+ 81foo
+ 82bar
+ 83foobar
+ 84barfoo
+ 85foo
+ 86bar
+ 87foobar
+ 88barfoo
+ 89foo
+ 90bar
+ 91foobar
+ 92barfoo
+ 93foo
+ 94bar
+ 95foobar
+ 96barfoo
+ -----END RSA PRIVATE KEY-----
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("long").Key("long_rsa_private_key").String(), ShouldEqual, `-----BEGIN RSA PRIVATE KEY-----
+1foo
+2bar
+3foobar
+4barfoo
+5foo
+6bar
+7foobar
+8barfoo
+9foo
+10bar
+11foobar
+12barfoo
+13foo
+14bar
+15foobar
+16barfoo
+17foo
+18bar
+19foobar
+20barfoo
+21foo
+22bar
+23foobar
+24barfoo
+25foo
+26bar
+27foobar
+28barfoo
+29foo
+30bar
+31foobar
+32barfoo
+33foo
+34bar
+35foobar
+36barfoo
+37foo
+38bar
+39foobar
+40barfoo
+41foo
+42bar
+43foobar
+44barfoo
+45foo
+46bar
+47foobar
+48barfoo
+49foo
+50bar
+51foobar
+52barfoo
+53foo
+54bar
+55foobar
+56barfoo
+57foo
+58bar
+59foobar
+60barfoo
+61foo
+62bar
+63foobar
+64barfoo
+65foo
+66bar
+67foobar
+68barfoo
+69foo
+70bar
+71foobar
+72barfoo
+73foo
+74bar
+75foobar
+76barfoo
+77foo
+78bar
+79foobar
+80barfoo
+81foo
+82bar
+83foobar
+84barfoo
+85foo
+86bar
+87foobar
+88barfoo
+89foo
+90bar
+91foobar
+92barfoo
+93foo
+94bar
+95foobar
+96barfoo
+-----END RSA PRIVATE KEY-----`)
+ })
+
+ Convey("Allow unparsable sections", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: true,
+ Insensitive: true,
+ UnparseableSections: []string{"core_lesson", "comments"},
+ }, []byte(`
+Lesson_Location = 87
+Lesson_Status = C
+Score = 3
+Time = 00:02:30
+
+[CORE_LESSON]
+my lesson state data – 1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000 – end my lesson state data
+
+[COMMENTS]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("score").String(), ShouldEqual, "3")
+ So(f.Section("").Body(), ShouldBeEmpty)
+ So(f.Section("core_lesson").Body(), ShouldEqual, `my lesson state data – 1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000 – end my lesson state data`)
+ So(f.Section("comments").Body(), ShouldEqual, `<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`)
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `lesson_location = 87
+lesson_status = C
+score = 3
+time = 00:02:30
+
+[core_lesson]
+my lesson state data – 1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000 – end my lesson state data
+
+[comments]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
+`)
+ })
+
+ Convey("Inverse case", func() {
+ _, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: true}, []byte(`
+[CORE_LESSON]
+my lesson state data – 1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000 – end my lesson state data`))
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("And false `SpaceBeforeInlineComment`", func() {
+ Convey("Can't parse INI files containing `#` or `;` in value", func() {
+ f, err := ini.LoadSources(
+ ini.LoadOptions{AllowPythonMultilineValues: false, SpaceBeforeInlineComment: false},
+ []byte(`
+[author]
+NAME = U#n#k#n#w#o#n
+GITHUB = U;n;k;n;w;o;n
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+ sec := f.Section("author")
+ nameValue := sec.Key("NAME").String()
+ githubValue := sec.Key("GITHUB").String()
+ So(nameValue, ShouldEqual, "U")
+ So(githubValue, ShouldEqual, "U")
+ })
+ })
+
+ Convey("And true `SpaceBeforeInlineComment`", func() {
+ Convey("Can parse INI files containing `#` or `;` in value", func() {
+ f, err := ini.LoadSources(
+ ini.LoadOptions{AllowPythonMultilineValues: false, SpaceBeforeInlineComment: true},
+ []byte(`
+[author]
+NAME = U#n#k#n#w#o#n
+GITHUB = U;n;k;n;w;o;n
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+ sec := f.Section("author")
+ nameValue := sec.Key("NAME").String()
+ githubValue := sec.Key("GITHUB").String()
+ So(nameValue, ShouldEqual, "U#n#k#n#w#o#n")
+ So(githubValue, ShouldEqual, "U;n;k;n;w;o;n")
+ })
+ })
+ })
+
+ Convey("with false `AllowPythonMultilineValues`", func() {
+ Convey("Ignore nonexistent files", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false, Loose: true}, _NOT_FOUND_CONF, _MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ Convey("Inverse case", func() {
+ _, err = ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, _NOT_FOUND_CONF)
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Insensitive to section and key names", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false, Insensitive: true}, _MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("Author").Key("e-mail").String(), ShouldEqual, "u@gogs.io")
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `[author]
+e-mail = u@gogs.io
+
+`)
+ })
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, _MINIMAL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("Author").Key("e-mail").String(), ShouldBeEmpty)
+ })
+ })
+
+ Convey("Ignore continuation lines", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: false,
+ IgnoreContinuation: true,
+ }, []byte(`
+key1=a\b\
+key2=c\d\
+key3=value`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key1").String(), ShouldEqual, `a\b\`)
+ So(f.Section("").Key("key2").String(), ShouldEqual, `c\d\`)
+ So(f.Section("").Key("key3").String(), ShouldEqual, "value")
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, []byte(`
+key1=a\b\
+key2=c\d\`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key1").String(), ShouldEqual, `a\bkey2=c\d`)
+ })
+ })
+
+ Convey("Ignore inline comments", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: false,
+ IgnoreInlineComment: true,
+ }, []byte(`
+key1=value ;comment
+key2=value2 #comment2
+key3=val#ue #comment3`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key1").String(), ShouldEqual, `value ;comment`)
+ So(f.Section("").Key("key2").String(), ShouldEqual, `value2 #comment2`)
+ So(f.Section("").Key("key3").String(), ShouldEqual, `val#ue #comment3`)
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, []byte(`
+key1=value ;comment
+key2=value2 #comment2`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key1").String(), ShouldEqual, `value`)
+ So(f.Section("").Key("key1").Comment, ShouldEqual, `;comment`)
+ So(f.Section("").Key("key2").String(), ShouldEqual, `value2`)
+ So(f.Section("").Key("key2").Comment, ShouldEqual, `#comment2`)
+ })
+ })
+
+ Convey("Allow boolean type keys", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: false,
+ AllowBooleanKeys: true,
+ }, []byte(`
+key1=hello
+#key2
+key3`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").KeyStrings(), ShouldResemble, []string{"key1", "key3"})
+ So(f.Section("").Key("key3").MustBool(false), ShouldBeTrue)
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `key1 = hello
+# key2
+key3
+`)
+ })
+
+ Convey("Inverse case", func() {
+ _, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, []byte(`
+key1=hello
+#key2
+key3`))
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Allow shadow keys", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false, AllowShadows: true}, []byte(`
+[remote "origin"]
+url = https://github.com/Antergone/test1.git
+url = https://github.com/Antergone/test2.git
+fetch = +refs/heads/*:refs/remotes/origin/*`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test1.git")
+ So(f.Section(`remote "origin"`).Key("url").ValueWithShadows(), ShouldResemble, []string{
+ "https://github.com/Antergone/test1.git",
+ "https://github.com/Antergone/test2.git",
+ })
+ So(f.Section(`remote "origin"`).Key("fetch").String(), ShouldEqual, "+refs/heads/*:refs/remotes/origin/*")
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `[remote "origin"]
+url = https://github.com/Antergone/test1.git
+url = https://github.com/Antergone/test2.git
+fetch = +refs/heads/*:refs/remotes/origin/*
+
+`)
+ })
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, []byte(`
+[remote "origin"]
+url = https://github.com/Antergone/test1.git
+url = https://github.com/Antergone/test2.git`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section(`remote "origin"`).Key("url").String(), ShouldEqual, "https://github.com/Antergone/test2.git")
+ })
+ })
+
+ Convey("Unescape double quotes inside value", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: false,
+ UnescapeValueDoubleQuotes: true,
+ }, []byte(`
+create_repo="创建了仓库 <a href=\"%s\">%s</a>"`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("create_repo").String(), ShouldEqual, `创建了仓库 <a href="%s">%s</a>`)
+
+ Convey("Inverse case", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, []byte(`
+create_repo="创建了仓库 <a href=\"%s\">%s</a>"`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("create_repo").String(), ShouldEqual, `"创建了仓库 <a href=\"%s\">%s</a>"`)
+ })
+ })
+
+ Convey("Unescape comment symbols inside value", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: false,
+ IgnoreInlineComment: true,
+ UnescapeValueCommentSymbols: true,
+ }, []byte(`
+key = test value <span style="color: %s\; background: %s">more text</span>
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key").String(), ShouldEqual, `test value <span style="color: %s; background: %s">more text</span>`)
+ })
+
+ Convey("Can't parse small python-compatible INI files", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, []byte(`
+[long]
+long_rsa_private_key = -----BEGIN RSA PRIVATE KEY-----
+ foo
+ bar
+ foobar
+ barfoo
+ -----END RSA PRIVATE KEY-----
+`))
+ So(err, ShouldNotBeNil)
+ So(f, ShouldBeNil)
+ So(err.Error(), ShouldEqual, "key-value delimiter not found: foo\n")
+ })
+
+ Convey("Can't parse big python-compatible INI files", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, []byte(`
+[long]
+long_rsa_private_key = -----BEGIN RSA PRIVATE KEY-----
+ 1foo
+ 2bar
+ 3foobar
+ 4barfoo
+ 5foo
+ 6bar
+ 7foobar
+ 8barfoo
+ 9foo
+ 10bar
+ 11foobar
+ 12barfoo
+ 13foo
+ 14bar
+ 15foobar
+ 16barfoo
+ 17foo
+ 18bar
+ 19foobar
+ 20barfoo
+ 21foo
+ 22bar
+ 23foobar
+ 24barfoo
+ 25foo
+ 26bar
+ 27foobar
+ 28barfoo
+ 29foo
+ 30bar
+ 31foobar
+ 32barfoo
+ 33foo
+ 34bar
+ 35foobar
+ 36barfoo
+ 37foo
+ 38bar
+ 39foobar
+ 40barfoo
+ 41foo
+ 42bar
+ 43foobar
+ 44barfoo
+ 45foo
+ 46bar
+ 47foobar
+ 48barfoo
+ 49foo
+ 50bar
+ 51foobar
+ 52barfoo
+ 53foo
+ 54bar
+ 55foobar
+ 56barfoo
+ 57foo
+ 58bar
+ 59foobar
+ 60barfoo
+ 61foo
+ 62bar
+ 63foobar
+ 64barfoo
+ 65foo
+ 66bar
+ 67foobar
+ 68barfoo
+ 69foo
+ 70bar
+ 71foobar
+ 72barfoo
+ 73foo
+ 74bar
+ 75foobar
+ 76barfoo
+ 77foo
+ 78bar
+ 79foobar
+ 80barfoo
+ 81foo
+ 82bar
+ 83foobar
+ 84barfoo
+ 85foo
+ 86bar
+ 87foobar
+ 88barfoo
+ 89foo
+ 90bar
+ 91foobar
+ 92barfoo
+ 93foo
+ 94bar
+ 95foobar
+ 96barfoo
+ -----END RSA PRIVATE KEY-----
+`))
+ So(err, ShouldNotBeNil)
+ So(f, ShouldBeNil)
+ So(err.Error(), ShouldEqual, "key-value delimiter not found: 1foo\n")
+ })
+
+ Convey("Allow unparsable sections", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowPythonMultilineValues: false,
+ Insensitive: true,
+ UnparseableSections: []string{"core_lesson", "comments"},
+ }, []byte(`
+Lesson_Location = 87
+Lesson_Status = C
+Score = 3
+Time = 00:02:30
+
+[CORE_LESSON]
+my lesson state data – 1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000 – end my lesson state data
+
+[COMMENTS]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("score").String(), ShouldEqual, "3")
+ So(f.Section("").Body(), ShouldBeEmpty)
+ So(f.Section("core_lesson").Body(), ShouldEqual, `my lesson state data – 1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000 – end my lesson state data`)
+ So(f.Section("comments").Body(), ShouldEqual, `<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`)
+
+ Convey("Write out", func() {
+ var buf bytes.Buffer
+ _, err := f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `lesson_location = 87
+lesson_status = C
+score = 3
+time = 00:02:30
+
+[core_lesson]
+my lesson state data – 1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000 – end my lesson state data
+
+[comments]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
+`)
+ })
+
+ Convey("Inverse case", func() {
+ _, err := ini.LoadSources(ini.LoadOptions{AllowPythonMultilineValues: false}, []byte(`
+[CORE_LESSON]
+my lesson state data – 1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000 – end my lesson state data`))
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("And false `SpaceBeforeInlineComment`", func() {
+ Convey("Can't parse INI files containing `#` or `;` in value", func() {
+ f, err := ini.LoadSources(
+ ini.LoadOptions{AllowPythonMultilineValues: true, SpaceBeforeInlineComment: false},
+ []byte(`
+[author]
+NAME = U#n#k#n#w#o#n
+GITHUB = U;n;k;n;w;o;n
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+ sec := f.Section("author")
+ nameValue := sec.Key("NAME").String()
+ githubValue := sec.Key("GITHUB").String()
+ So(nameValue, ShouldEqual, "U")
+ So(githubValue, ShouldEqual, "U")
+ })
+ })
+
+ Convey("And true `SpaceBeforeInlineComment`", func() {
+ Convey("Can parse INI files containing `#` or `;` in value", func() {
+ f, err := ini.LoadSources(
+ ini.LoadOptions{AllowPythonMultilineValues: true, SpaceBeforeInlineComment: true},
+ []byte(`
+[author]
+NAME = U#n#k#n#w#o#n
+GITHUB = U;n;k;n;w;o;n
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+ sec := f.Section("author")
+ nameValue := sec.Key("NAME").String()
+ githubValue := sec.Key("GITHUB").String()
+ So(nameValue, ShouldEqual, "U#n#k#n#w#o#n")
+ So(githubValue, ShouldEqual, "U;n;k;n;w;o;n")
+ })
+ })
+ })
+ })
+}
+
+func Test_KeyValueDelimiters(t *testing.T) {
+ Convey("Custom key-value delimiters", t, func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ KeyValueDelimiters: "?!",
+ }, []byte(`
+[section]
+key1?value1
+key2!value2
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("section").Key("key1").String(), ShouldEqual, "value1")
+ So(f.Section("section").Key("key2").String(), ShouldEqual, "value2")
+ })
+}
+
+func Test_PreserveSurroundedQuote(t *testing.T) {
+ Convey("Preserve surrounded quote test", t, func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ PreserveSurroundedQuote: true,
+ }, []byte(`
+[section]
+key1 = "value1"
+key2 = value2
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("section").Key("key1").String(), ShouldEqual, "\"value1\"")
+ So(f.Section("section").Key("key2").String(), ShouldEqual, "value2")
+ })
+
+ Convey("Preserve surrounded quote test inverse test", t, func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ PreserveSurroundedQuote: false,
+ }, []byte(`
+[section]
+key1 = "value1"
+key2 = value2
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("section").Key("key1").String(), ShouldEqual, "value1")
+ So(f.Section("section").Key("key2").String(), ShouldEqual, "value2")
+ })
+}
--- /dev/null
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ Comment string
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ isShadow bool
+ shadows []*Key
+
+ nestedValues []string
+}
+
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
+func (k *Key) addNestedValue(val string) error {
+ if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add nested value to auto-increment or boolean key")
+ }
+
+ k.nestedValues = append(k.nestedValues, val)
+ return nil
+}
+
+func (k *Key) AddNestedValue(val string) error {
+ if !k.s.f.options.AllowNestedValues {
+ return errors.New("nested value is not allowed")
+ }
+ return k.addNestedValue(val)
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// ValueWithShadows returns raw values of key and its shadows if any.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ return []string{k.value}
+ }
+ vals := make([]string, len(k.shadows)+1)
+ vals[0] = k.value
+ for i := range k.shadows {
+ vals[i+1] = k.shadows[i].value
+ }
+ return vals
+}
+
+// NestedValues returns nested values stored in the key.
+// It is possible returned value is nil if no nested values stored in the key.
+func (k *Key) NestedValues() []string {
+ return k.nestedValues
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
+ return val
+ }
+ for i := 0; i < _DEPTH_VALUES; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := vr[2 : len(vr)-2]
+
+ // Search in the same section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil || k == nk {
+ // Search again in default section.
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ v, err := strconv.ParseInt(k.String(), 0, 64)
+ return int(v), err
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 0, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 0, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 0, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ runes := []rune(str)
+ vals := make([]string, 0, 2)
+ var buf bytes.Buffer
+ escape := false
+ idx := 0
+ for {
+ if escape {
+ escape = false
+ if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
+ buf.WriteRune('\\')
+ }
+ buf.WriteRune(runes[idx])
+ } else {
+ if runes[idx] == '\\' {
+ escape = true
+ } else if strings.HasPrefix(string(runes[idx:]), delim) {
+ idx += len(delim) - 1
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ buf.Reset()
+ } else {
+ buf.WriteRune(runes[idx])
+ }
+ }
+ idx += 1
+ if idx == len(runes) {
+ break
+ }
+ }
+
+ if buf.Len() > 0 {
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ }
+
+ return vals
+}
+
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.parseFloat64s(k.Strings(delim), false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.parseInts(k.Strings(delim), false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.parseInt64s(k.Strings(delim), false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.parseUints(k.Strings(delim), false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.parseUint64s(k.Strings(delim), false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ vals := make([]float64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ vals := make([]int, 0, len(strs))
+ for _, str := range strs {
+ valInt64, err := strconv.ParseInt(str, 0, 64)
+ val := int(valInt64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ vals := make([]int64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseInt(str, 0, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ vals := make([]uint, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 0, 0)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, uint(val))
+ }
+ }
+ return vals, nil
+}
+
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ vals := make([]uint64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 0, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ vals := make([]time.Time, 0, len(strs))
+ for _, str := range strs {
+ val, err := time.Parse(format, str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
--- /dev/null
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini_test
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/ini.v1"
+)
+
+func TestKey_AddShadow(t *testing.T) {
+ Convey("Add shadow to a key", t, func() {
+ f, err := ini.ShadowLoad([]byte(`
+[notes]
+-: note1`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ So(k.AddShadow("ini.v1"), ShouldBeNil)
+ So(k.ValueWithShadows(), ShouldResemble, []string{"ini", "ini.v1"})
+
+ Convey("Add shadow to boolean key", func() {
+ k, err := f.Section("").NewBooleanKey("published")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ So(k.AddShadow("beta"), ShouldNotBeNil)
+ })
+
+ Convey("Add shadow to auto-increment key", func() {
+ So(f.Section("notes").Key("#1").AddShadow("beta"), ShouldNotBeNil)
+ })
+ })
+
+ Convey("Shadow is not allowed", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ So(k.AddShadow("ini.v1"), ShouldNotBeNil)
+ })
+}
+
+// Helpers for slice tests.
+func float64sEqual(values []float64, expected ...float64) {
+ So(values, ShouldHaveLength, len(expected))
+ for i, v := range expected {
+ So(values[i], ShouldEqual, v)
+ }
+}
+
+func intsEqual(values []int, expected ...int) {
+ So(values, ShouldHaveLength, len(expected))
+ for i, v := range expected {
+ So(values[i], ShouldEqual, v)
+ }
+}
+
+func int64sEqual(values []int64, expected ...int64) {
+ So(values, ShouldHaveLength, len(expected))
+ for i, v := range expected {
+ So(values[i], ShouldEqual, v)
+ }
+}
+
+func uintsEqual(values []uint, expected ...uint) {
+ So(values, ShouldHaveLength, len(expected))
+ for i, v := range expected {
+ So(values[i], ShouldEqual, v)
+ }
+}
+
+func uint64sEqual(values []uint64, expected ...uint64) {
+ So(values, ShouldHaveLength, len(expected))
+ for i, v := range expected {
+ So(values[i], ShouldEqual, v)
+ }
+}
+
+func timesEqual(values []time.Time, expected ...time.Time) {
+ So(values, ShouldHaveLength, len(expected))
+ for i, v := range expected {
+ So(values[i].String(), ShouldEqual, v.String())
+ }
+}
+
+func TestKey_Helpers(t *testing.T) {
+ Convey("Getting and setting values", t, func() {
+ f, err := ini.Load(_FULL_CONF)
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ Convey("Get string representation", func() {
+ sec := f.Section("")
+ So(sec, ShouldNotBeNil)
+ So(sec.Key("NAME").Value(), ShouldEqual, "ini")
+ So(sec.Key("NAME").String(), ShouldEqual, "ini")
+ So(sec.Key("NAME").Validate(func(in string) string {
+ return in
+ }), ShouldEqual, "ini")
+ So(sec.Key("NAME").Comment, ShouldEqual, "; Package name")
+ So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1")
+
+ Convey("With ValueMapper", func() {
+ f.ValueMapper = func(in string) string {
+ if in == "gopkg.in/%(NAME)s.%(VERSION)s" {
+ return "github.com/go-ini/ini"
+ }
+ return in
+ }
+ So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "github.com/go-ini/ini")
+ })
+ })
+
+ Convey("Get values in non-default section", func() {
+ sec := f.Section("author")
+ So(sec, ShouldNotBeNil)
+ So(sec.Key("NAME").String(), ShouldEqual, "Unknwon")
+ So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon")
+
+ sec = f.Section("package")
+ So(sec, ShouldNotBeNil)
+ So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1")
+ })
+
+ Convey("Get auto-increment key names", func() {
+ keys := f.Section("features").Keys()
+ for i, k := range keys {
+ So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1))
+ }
+ })
+
+ Convey("Get parent-keys that are available to the child section", func() {
+ parentKeys := f.Section("package.sub").ParentKeys()
+ for _, k := range parentKeys {
+ So(k.Name(), ShouldEqual, "CLONE_URL")
+ }
+ })
+
+ Convey("Get overwrite value", func() {
+ So(f.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io")
+ })
+
+ Convey("Get sections", func() {
+ sections := f.Sections()
+ for i, name := range []string{ini.DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "string escapes", "advance"} {
+ So(sections[i].Name(), ShouldEqual, name)
+ }
+ })
+
+ Convey("Get parent section value", func() {
+ So(f.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1")
+ So(f.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1")
+ })
+
+ Convey("Get multiple line value", func() {
+ So(f.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n")
+ })
+
+ Convey("Get values with type", func() {
+ sec := f.Section("types")
+ v1, err := sec.Key("BOOL").Bool()
+ So(err, ShouldBeNil)
+ So(v1, ShouldBeTrue)
+
+ v1, err = sec.Key("BOOL_FALSE").Bool()
+ So(err, ShouldBeNil)
+ So(v1, ShouldBeFalse)
+
+ v2, err := sec.Key("FLOAT64").Float64()
+ So(err, ShouldBeNil)
+ So(v2, ShouldEqual, 1.25)
+
+ v3, err := sec.Key("INT").Int()
+ So(err, ShouldBeNil)
+ So(v3, ShouldEqual, 10)
+
+ v4, err := sec.Key("INT").Int64()
+ So(err, ShouldBeNil)
+ So(v4, ShouldEqual, 10)
+
+ v5, err := sec.Key("UINT").Uint()
+ So(err, ShouldBeNil)
+ So(v5, ShouldEqual, 3)
+
+ v6, err := sec.Key("UINT").Uint64()
+ So(err, ShouldBeNil)
+ So(v6, ShouldEqual, 3)
+
+ t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
+ So(err, ShouldBeNil)
+ v7, err := sec.Key("TIME").Time()
+ So(err, ShouldBeNil)
+ So(v7.String(), ShouldEqual, t.String())
+
+ v8, err := sec.Key("HEX_NUMBER").Int()
+ So(err, ShouldBeNil)
+ So(v8, ShouldEqual, 0x3000)
+
+ Convey("Must get values with type", func() {
+ So(sec.Key("STRING").MustString("404"), ShouldEqual, "str")
+ So(sec.Key("BOOL").MustBool(), ShouldBeTrue)
+ So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25)
+ So(sec.Key("INT").MustInt(), ShouldEqual, 10)
+ So(sec.Key("INT").MustInt64(), ShouldEqual, 10)
+ So(sec.Key("UINT").MustUint(), ShouldEqual, 3)
+ So(sec.Key("UINT").MustUint64(), ShouldEqual, 3)
+ So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String())
+ So(sec.Key("HEX_NUMBER").MustInt(), ShouldEqual, 0x3000)
+
+ dur, err := time.ParseDuration("2h45m")
+ So(err, ShouldBeNil)
+ So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds())
+
+ Convey("Must get values with default value", func() {
+ So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404")
+ So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue)
+ So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5)
+ So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15)
+ So(sec.Key("INT64_404").MustInt64(15), ShouldEqual, 15)
+ So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6)
+ So(sec.Key("UINT64_404").MustUint64(6), ShouldEqual, 6)
+ So(sec.Key("HEX_NUMBER_404").MustInt(0x3001), ShouldEqual, 0x3001)
+
+ t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z")
+ So(err, ShouldBeNil)
+ So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String())
+
+ So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds())
+
+ Convey("Must should set default as key value", func() {
+ So(sec.Key("STRING_404").String(), ShouldEqual, "404")
+ So(sec.Key("BOOL_404").String(), ShouldEqual, "true")
+ So(sec.Key("FLOAT64_404").String(), ShouldEqual, "2.5")
+ So(sec.Key("INT_404").String(), ShouldEqual, "15")
+ So(sec.Key("INT64_404").String(), ShouldEqual, "15")
+ So(sec.Key("UINT_404").String(), ShouldEqual, "6")
+ So(sec.Key("UINT64_404").String(), ShouldEqual, "6")
+ So(sec.Key("TIME_404").String(), ShouldEqual, "2014-01-01T20:17:05Z")
+ So(sec.Key("DURATION_404").String(), ShouldEqual, "2h45m0s")
+ So(sec.Key("HEX_NUMBER_404").String(), ShouldEqual, "12289")
+ })
+ })
+ })
+ })
+
+ Convey("Get value with candidates", func() {
+ sec := f.Section("types")
+ So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str")
+ So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25)
+ So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10)
+ So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10)
+ So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3)
+ So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3)
+
+ zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z")
+ So(err, ShouldBeNil)
+ t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
+ So(err, ShouldBeNil)
+ So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String())
+
+ Convey("Get value with candidates and default value", func() {
+ So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str")
+ So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25)
+ So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10)
+ So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10)
+ So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3)
+ So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3)
+ So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String())
+ })
+ })
+
+ Convey("Get values in range", func() {
+ sec := f.Section("types")
+ So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25)
+ So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10)
+ So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10)
+
+ minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z")
+ So(err, ShouldBeNil)
+ midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z")
+ So(err, ShouldBeNil)
+ maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z")
+ So(err, ShouldBeNil)
+ t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
+ So(err, ShouldBeNil)
+ So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String())
+
+ Convey("Get value in range with default value", func() {
+ So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5)
+ So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7)
+ So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7)
+ So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String())
+ })
+ })
+
+ Convey("Get values into slice", func() {
+ sec := f.Section("array")
+ So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de")
+ So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0)
+
+ vals1 := sec.Key("FLOAT64S").Float64s(",")
+ float64sEqual(vals1, 1.1, 2.2, 3.3)
+
+ vals2 := sec.Key("INTS").Ints(",")
+ intsEqual(vals2, 1, 2, 3)
+
+ vals3 := sec.Key("INTS").Int64s(",")
+ int64sEqual(vals3, 1, 2, 3)
+
+ vals4 := sec.Key("UINTS").Uints(",")
+ uintsEqual(vals4, 1, 2, 3)
+
+ vals5 := sec.Key("UINTS").Uint64s(",")
+ uint64sEqual(vals5, 1, 2, 3)
+
+ t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
+ So(err, ShouldBeNil)
+ vals6 := sec.Key("TIMES").Times(",")
+ timesEqual(vals6, t, t, t)
+ })
+
+ Convey("Test string slice escapes", func() {
+ sec := f.Section("string escapes")
+ So(sec.Key("key1").Strings(","), ShouldResemble, []string{"value1", "value2", "value3"})
+ So(sec.Key("key2").Strings(","), ShouldResemble, []string{"value1, value2"})
+ So(sec.Key("key3").Strings(","), ShouldResemble, []string{`val\ue1`, "value2"})
+ So(sec.Key("key4").Strings(","), ShouldResemble, []string{`value1\`, `value\\2`})
+ So(sec.Key("key5").Strings(",,"), ShouldResemble, []string{"value1,, value2"})
+ So(sec.Key("key6").Strings(" "), ShouldResemble, []string{"aaa", "bbb and space", "ccc"})
+ })
+
+ Convey("Get valid values into slice", func() {
+ sec := f.Section("array")
+ vals1 := sec.Key("FLOAT64S").ValidFloat64s(",")
+ float64sEqual(vals1, 1.1, 2.2, 3.3)
+
+ vals2 := sec.Key("INTS").ValidInts(",")
+ intsEqual(vals2, 1, 2, 3)
+
+ vals3 := sec.Key("INTS").ValidInt64s(",")
+ int64sEqual(vals3, 1, 2, 3)
+
+ vals4 := sec.Key("UINTS").ValidUints(",")
+ uintsEqual(vals4, 1, 2, 3)
+
+ vals5 := sec.Key("UINTS").ValidUint64s(",")
+ uint64sEqual(vals5, 1, 2, 3)
+
+ t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
+ So(err, ShouldBeNil)
+ vals6 := sec.Key("TIMES").ValidTimes(",")
+ timesEqual(vals6, t, t, t)
+ })
+
+ Convey("Get values one type into slice of another type", func() {
+ sec := f.Section("array")
+ vals1 := sec.Key("STRINGS").ValidFloat64s(",")
+ So(vals1, ShouldBeEmpty)
+
+ vals2 := sec.Key("STRINGS").ValidInts(",")
+ So(vals2, ShouldBeEmpty)
+
+ vals3 := sec.Key("STRINGS").ValidInt64s(",")
+ So(vals3, ShouldBeEmpty)
+
+ vals4 := sec.Key("STRINGS").ValidUints(",")
+ So(vals4, ShouldBeEmpty)
+
+ vals5 := sec.Key("STRINGS").ValidUint64s(",")
+ So(vals5, ShouldBeEmpty)
+
+ vals6 := sec.Key("STRINGS").ValidTimes(",")
+ So(vals6, ShouldBeEmpty)
+ })
+
+ Convey("Get valid values into slice without errors", func() {
+ sec := f.Section("array")
+ vals1, err := sec.Key("FLOAT64S").StrictFloat64s(",")
+ So(err, ShouldBeNil)
+ float64sEqual(vals1, 1.1, 2.2, 3.3)
+
+ vals2, err := sec.Key("INTS").StrictInts(",")
+ So(err, ShouldBeNil)
+ intsEqual(vals2, 1, 2, 3)
+
+ vals3, err := sec.Key("INTS").StrictInt64s(",")
+ So(err, ShouldBeNil)
+ int64sEqual(vals3, 1, 2, 3)
+
+ vals4, err := sec.Key("UINTS").StrictUints(",")
+ So(err, ShouldBeNil)
+ uintsEqual(vals4, 1, 2, 3)
+
+ vals5, err := sec.Key("UINTS").StrictUint64s(",")
+ So(err, ShouldBeNil)
+ uint64sEqual(vals5, 1, 2, 3)
+
+ t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z")
+ So(err, ShouldBeNil)
+ vals6, err := sec.Key("TIMES").StrictTimes(",")
+ So(err, ShouldBeNil)
+ timesEqual(vals6, t, t, t)
+ })
+
+ Convey("Get invalid values into slice", func() {
+ sec := f.Section("array")
+ vals1, err := sec.Key("STRINGS").StrictFloat64s(",")
+ So(vals1, ShouldBeEmpty)
+ So(err, ShouldNotBeNil)
+
+ vals2, err := sec.Key("STRINGS").StrictInts(",")
+ So(vals2, ShouldBeEmpty)
+ So(err, ShouldNotBeNil)
+
+ vals3, err := sec.Key("STRINGS").StrictInt64s(",")
+ So(vals3, ShouldBeEmpty)
+ So(err, ShouldNotBeNil)
+
+ vals4, err := sec.Key("STRINGS").StrictUints(",")
+ So(vals4, ShouldBeEmpty)
+ So(err, ShouldNotBeNil)
+
+ vals5, err := sec.Key("STRINGS").StrictUint64s(",")
+ So(vals5, ShouldBeEmpty)
+ So(err, ShouldNotBeNil)
+
+ vals6, err := sec.Key("STRINGS").StrictTimes(",")
+ So(vals6, ShouldBeEmpty)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestKey_StringsWithShadows(t *testing.T) {
+ Convey("Get strings of shadows of a key", t, func() {
+ f, err := ini.ShadowLoad([]byte(""))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NUMS", "1,2")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ k, err = f.Section("").NewKey("NUMS", "4,5,6")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ So(k.StringsWithShadows(","), ShouldResemble, []string{"1", "2", "4", "5", "6"})
+ })
+}
+
+func TestKey_SetValue(t *testing.T) {
+ Convey("Set value of key", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ So(k.Value(), ShouldEqual, "ini")
+
+ k.SetValue("ini.v1")
+ So(k.Value(), ShouldEqual, "ini.v1")
+ })
+}
+
+func TestKey_NestedValues(t *testing.T) {
+ Convey("Read and write nested values", t, func() {
+ f, err := ini.LoadSources(ini.LoadOptions{
+ AllowNestedValues: true,
+ }, []byte(`
+aws_access_key_id = foo
+aws_secret_access_key = bar
+region = us-west-2
+s3 =
+ max_concurrent_requests=10
+ max_queue_size=1000`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("s3").NestedValues(), ShouldResemble, []string{"max_concurrent_requests=10", "max_queue_size=1000"})
+
+ var buf bytes.Buffer
+ _, err = f.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `aws_access_key_id = foo
+aws_secret_access_key = bar
+region = us-west-2
+s3 =
+ max_concurrent_requests=10
+ max_queue_size=1000
+
+`)
+ })
+}
+
+func TestRecursiveValues(t *testing.T) {
+ Convey("Recursive values should not reflect on same key", t, func() {
+ f, err := ini.Load([]byte(`
+NAME = ini
+expires = yes
+[package]
+NAME = %(NAME)s
+expires = %(expires)s`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+ So(f.Section("package").Key("NAME").String(), ShouldEqual, "ini")
+ So(f.Section("package").Key("expires").String(), ShouldEqual, "yes")
+ })
+}
--- /dev/null
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)")
+
+type tokenType int
+
+const (
+ _TOKEN_INVALID tokenType = iota
+ _TOKEN_COMMENT
+ _TOKEN_SECTION
+ _TOKEN_KEY
+)
+
+type parser struct {
+ buf *bufio.Reader
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func newParser(r io.Reader) *parser {
+ return &parser{
+ buf: bufio.NewReader(r),
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(2)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 2 {
+ return nil
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
+ p.buf.Read(mask)
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ p.buf.Read(mask)
+ }
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(delimiters string, in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && string(line[0:3]) == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ endIdx := -1
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], delimiters)
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, delimiters)
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte,
+ parserBufferSize int,
+ ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment, preserveSurroundedQuote bool) (string, error) {
+
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && string(line[0:3]) == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ } else if unescapeValueDoubleQuotes && line[0] == '"' {
+ valQuote = `"`
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ if unescapeValueDoubleQuotes && valQuote == `"` {
+ return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
+ }
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ lastChar := line[len(line)-1]
+ // Won't be able to reach here if value only contains whitespace
+ line = strings.TrimSpace(line)
+ trimmedLastChar := line[len(line)-1]
+
+ // Check continuation lines when desired
+ if !ignoreContinuation && trimmedLastChar == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ // Check if ignore inline comment
+ if !ignoreInlineComment {
+ var i int
+ if spaceBeforeInlineComment {
+ i = strings.Index(line, " #")
+ if i == -1 {
+ i = strings.Index(line, " ;")
+ }
+
+ } else {
+ i = strings.IndexAny(line, "#;")
+ }
+
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+
+ }
+
+ // Trim single and double quotes
+ if (hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"')) && !preserveSurroundedQuote {
+ line = line[1 : len(line)-1]
+ } else if len(valQuote) == 0 && unescapeValueCommentSymbols {
+ if strings.Contains(line, `\;`) {
+ line = strings.Replace(line, `\;`, ";", -1)
+ }
+ if strings.Contains(line, `\#`) {
+ line = strings.Replace(line, `\#`, "#", -1)
+ }
+ } else if allowPythonMultilines && lastChar == '\n' {
+ parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize)
+ peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
+
+ val := line
+
+ for {
+ peekData, peekErr := peekBuffer.ReadBytes('\n')
+ if peekErr != nil {
+ if peekErr == io.EOF {
+ return val, nil
+ }
+ return "", peekErr
+ }
+
+ peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
+ if len(peekMatches) != 3 {
+ return val, nil
+ }
+
+ // NOTE: Return if not a python-ini multi-line value.
+ currentIdentSize := len(peekMatches[1])
+ if currentIdentSize <= 0 {
+ return val, nil
+ }
+
+ // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer.
+ _, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+
+ val += fmt.Sprintf("\n%s", peekMatches[2])
+ }
+ }
+
+ return line, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader)
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ name := DEFAULT_SECTION
+ if f.options.Insensitive {
+ name = strings.ToLower(DEFAULT_SECTION)
+ }
+ section, _ := f.NewSection(name)
+
+ // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
+ var isLastValueEmpty bool
+ var lastRegularKey *Key
+
+ var line []byte
+ var inUnparseableSection bool
+
+ // NOTE: Iterate and increase `currentPeekSize` until
+ // the size of the parser buffer is found.
+ // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
+ parserBufferSize := 0
+ // NOTE: Peek 1kb at a time.
+ currentPeekSize := 1024
+
+ if f.options.AllowPythonMultilineValues {
+ for {
+ peekBytes, _ := p.buf.Peek(currentPeekSize)
+ peekBytesLength := len(peekBytes)
+
+ if parserBufferSize >= peekBytesLength {
+ break
+ }
+
+ currentPeekSize *= 2
+ parserBufferSize = peekBytesLength
+ }
+ }
+
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ if f.options.AllowNestedValues &&
+ isLastValueEmpty && len(line) > 0 {
+ if line[0] == ' ' || line[0] == '\t' {
+ lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
+ continue
+ }
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ closeIdx := bytes.LastIndexByte(line, ']')
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset aotu-counter and comments
+ p.comment.Reset()
+ p.count = 1
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
+ continue
+ }
+
+ kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
+ if err != nil {
+ // Treat as boolean key when desired, and whole line is key name.
+ if IsErrDelimiterNotFound(err) {
+ switch {
+ case f.options.AllowBooleanKeys:
+ kname, err := p.readValue(line,
+ parserBufferSize,
+ f.options.IgnoreContinuation,
+ f.options.IgnoreInlineComment,
+ f.options.UnescapeValueDoubleQuotes,
+ f.options.UnescapeValueCommentSymbols,
+ f.options.AllowPythonMultilineValues,
+ f.options.SpaceBeforeInlineComment,
+ f.options.PreserveSurroundedQuote)
+ if err != nil {
+ return err
+ }
+ key, err := section.NewBooleanKey(kname)
+ if err != nil {
+ return err
+ }
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+
+ case f.options.SkipUnrecognizableLines:
+ continue
+ }
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ value, err := p.readValue(line[offset:],
+ parserBufferSize,
+ f.options.IgnoreContinuation,
+ f.options.IgnoreInlineComment,
+ f.options.UnescapeValueDoubleQuotes,
+ f.options.UnescapeValueCommentSymbols,
+ f.options.AllowPythonMultilineValues,
+ f.options.SpaceBeforeInlineComment,
+ f.options.PreserveSurroundedQuote)
+ if err != nil {
+ return err
+ }
+ isLastValueEmpty = len(value) == 0
+
+ key, err := section.NewKey(kname, value)
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ lastRegularKey = key
+ }
+ return nil
+}
--- /dev/null
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini_test
+
+import (
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/ini.v1"
+)
+
+func TestBOM(t *testing.T) {
+ Convey("Test handling BOM", t, func() {
+ Convey("UTF-8-BOM", func() {
+ f, err := ini.Load("testdata/UTF-8-BOM.ini")
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("author").Key("E-MAIL").String(), ShouldEqual, "example@email.com")
+ })
+
+ Convey("UTF-16-LE-BOM", func() {
+ f, err := ini.Load("testdata/UTF-16-LE-BOM.ini")
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+ })
+
+ Convey("UTF-16-BE-BOM", func() {
+ })
+ })
+}
+
+func TestBadLoad(t *testing.T) {
+ Convey("Load with bad data", t, func() {
+ Convey("Bad section name", func() {
+ _, err := ini.Load([]byte("[]"))
+ So(err, ShouldNotBeNil)
+
+ _, err = ini.Load([]byte("["))
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("Bad keys", func() {
+ _, err := ini.Load([]byte(`"""name`))
+ So(err, ShouldNotBeNil)
+
+ _, err = ini.Load([]byte(`"""name"""`))
+ So(err, ShouldNotBeNil)
+
+ _, err = ini.Load([]byte(`""=1`))
+ So(err, ShouldNotBeNil)
+
+ _, err = ini.Load([]byte(`=`))
+ So(err, ShouldNotBeNil)
+
+ _, err = ini.Load([]byte(`name`))
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("Bad values", func() {
+ _, err := ini.Load([]byte(`name="""Unknwon`))
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
--- /dev/null
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
+// SetBody updates body content only if section is raw.
+func (s *Section) SetBody(body string) {
+ if !s.isRawSection {
+ return
+ }
+ s.rawBody = body
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ s.keysHash[name] = val
+ }
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = newKey(s, name, val)
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// NewBooleanKey creates a new boolean type key to given section.
+func (s *Section) NewBooleanKey(name string) (*Key, error) {
+ key, err := s.NewKey(name, "true")
+ if err != nil {
+ return nil, err
+ }
+
+ key.isBooleanType = true
+ return key, nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ // FIXME: change to section level lock?
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ } else {
+ break
+ }
+ }
+ return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Haskey is a backwards-compatible name for HasKey.
+// TODO: delete me in v2
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := map[string]string{}
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ delete(s.keysHash, name)
+ return
+ }
+ }
+}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + "."
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name])
+ }
+ }
+ return children
+}
--- /dev/null
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini_test
+
+import (
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/ini.v1"
+)
+
+func TestSection_SetBody(t *testing.T) {
+ Convey("Set body of raw section", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ sec, err := f.NewRawSection("comments", `1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000`)
+ So(err, ShouldBeNil)
+ So(sec, ShouldNotBeNil)
+ So(sec.Body(), ShouldEqual, `1111111111111111111000000000000000001110000
+111111111111111111100000000000111000000000`)
+
+ sec.SetBody("1111111111111111111000000000000000001110000")
+ So(sec.Body(), ShouldEqual, `1111111111111111111000000000000000001110000`)
+
+ Convey("Set for non-raw section", func() {
+ sec, err := f.NewSection("author")
+ So(err, ShouldBeNil)
+ So(sec, ShouldNotBeNil)
+ So(sec.Body(), ShouldBeEmpty)
+
+ sec.SetBody("1111111111111111111000000000000000001110000")
+ So(sec.Body(), ShouldBeEmpty)
+ })
+ })
+}
+
+func TestSection_NewKey(t *testing.T) {
+ Convey("Create a new key", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ So(k.Name(), ShouldEqual, "NAME")
+ So(k.Value(), ShouldEqual, "ini")
+
+ Convey("With duplicated name", func() {
+ k, err := f.Section("").NewKey("NAME", "ini.v1")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ // Overwrite previous existed key
+ So(k.Value(), ShouldEqual, "ini.v1")
+ })
+
+ Convey("With empty string", func() {
+ _, err := f.Section("").NewKey("", "")
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Create keys with same name and allow shadow", t, func() {
+ f, err := ini.ShadowLoad([]byte(""))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ k, err = f.Section("").NewKey("NAME", "ini.v1")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ So(k.ValueWithShadows(), ShouldResemble, []string{"ini", "ini.v1"})
+ })
+}
+
+func TestSection_NewBooleanKey(t *testing.T) {
+ Convey("Create a new boolean key", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewBooleanKey("start-ssh-server")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ So(k.Name(), ShouldEqual, "start-ssh-server")
+ So(k.Value(), ShouldEqual, "true")
+
+ Convey("With empty string", func() {
+ _, err := f.Section("").NewBooleanKey("")
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestSection_GetKey(t *testing.T) {
+ Convey("Get a key", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ k, err = f.Section("").GetKey("NAME")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ So(k.Name(), ShouldEqual, "NAME")
+ So(k.Value(), ShouldEqual, "ini")
+
+ Convey("Key not exists", func() {
+ _, err := f.Section("").GetKey("404")
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("Key exists in parent section", func() {
+ k, err := f.Section("parent").NewKey("AGE", "18")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ k, err = f.Section("parent.child.son").GetKey("AGE")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ So(k.Value(), ShouldEqual, "18")
+ })
+ })
+}
+
+func TestSection_HasKey(t *testing.T) {
+ Convey("Check if a key exists", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ So(f.Section("").HasKey("NAME"), ShouldBeTrue)
+ So(f.Section("").Haskey("NAME"), ShouldBeTrue)
+ So(f.Section("").HasKey("404"), ShouldBeFalse)
+ So(f.Section("").Haskey("404"), ShouldBeFalse)
+ })
+}
+
+func TestSection_HasValue(t *testing.T) {
+ Convey("Check if contains a value in any key", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ So(f.Section("").HasValue("ini"), ShouldBeTrue)
+ So(f.Section("").HasValue("404"), ShouldBeFalse)
+ })
+}
+
+func TestSection_Key(t *testing.T) {
+ Convey("Get a key", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ k = f.Section("").Key("NAME")
+ So(k, ShouldNotBeNil)
+ So(k.Name(), ShouldEqual, "NAME")
+ So(k.Value(), ShouldEqual, "ini")
+
+ Convey("Key not exists", func() {
+ k := f.Section("").Key("404")
+ So(k, ShouldNotBeNil)
+ So(k.Name(), ShouldEqual, "404")
+ })
+
+ Convey("Key exists in parent section", func() {
+ k, err := f.Section("parent").NewKey("AGE", "18")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ k = f.Section("parent.child.son").Key("AGE")
+ So(k, ShouldNotBeNil)
+ So(k.Value(), ShouldEqual, "18")
+ })
+ })
+}
+
+func TestSection_Keys(t *testing.T) {
+ Convey("Get all keys in a section", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ k, err = f.Section("").NewKey("VERSION", "v1")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ k, err = f.Section("").NewKey("IMPORT_PATH", "gopkg.in/ini.v1")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ keys := f.Section("").Keys()
+ names := []string{"NAME", "VERSION", "IMPORT_PATH"}
+ So(len(keys), ShouldEqual, len(names))
+ for i, name := range names {
+ So(keys[i].Name(), ShouldEqual, name)
+ }
+ })
+}
+
+func TestSection_ParentKeys(t *testing.T) {
+ Convey("Get all keys of parent sections", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("package").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ k, err = f.Section("package").NewKey("VERSION", "v1")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ k, err = f.Section("package").NewKey("IMPORT_PATH", "gopkg.in/ini.v1")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ keys := f.Section("package.sub.sub2").ParentKeys()
+ names := []string{"NAME", "VERSION", "IMPORT_PATH"}
+ So(len(keys), ShouldEqual, len(names))
+ for i, name := range names {
+ So(keys[i].Name(), ShouldEqual, name)
+ }
+ })
+}
+
+func TestSection_KeyStrings(t *testing.T) {
+ Convey("Get all key names in a section", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ k, err = f.Section("").NewKey("VERSION", "v1")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+ k, err = f.Section("").NewKey("IMPORT_PATH", "gopkg.in/ini.v1")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ So(f.Section("").KeyStrings(), ShouldResemble, []string{"NAME", "VERSION", "IMPORT_PATH"})
+ })
+}
+
+func TestSection_KeyHash(t *testing.T) {
+ Convey("Get clone of key hash", t, func() {
+ f, err := ini.Load([]byte(`
+key = one
+[log]
+name = app
+file = a.log
+`), []byte(`
+key = two
+[log]
+name = app2
+file = b.log
+`))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.Section("").Key("key").String(), ShouldEqual, "two")
+
+ hash := f.Section("log").KeysHash()
+ relation := map[string]string{
+ "name": "app2",
+ "file": "b.log",
+ }
+ for k, v := range hash {
+ So(v, ShouldEqual, relation[k])
+ }
+ })
+}
+
+func TestSection_DeleteKey(t *testing.T) {
+ Convey("Delete a key", t, func() {
+ f := ini.Empty()
+ So(f, ShouldNotBeNil)
+
+ k, err := f.Section("").NewKey("NAME", "ini")
+ So(err, ShouldBeNil)
+ So(k, ShouldNotBeNil)
+
+ So(f.Section("").HasKey("NAME"), ShouldBeTrue)
+ f.Section("").DeleteKey("NAME")
+ So(f.Section("").HasKey("NAME"), ShouldBeFalse)
+ })
+}
--- /dev/null
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= ('A' - 'a')
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+ var err error
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals, err = key.parseInts(strs, true, false)
+ case reflect.Int64:
+ vals, err = key.parseInt64s(strs, true, false)
+ case reflect.Uint:
+ vals, err = key.parseUints(strs, true, false)
+ case reflect.Uint64:
+ vals, err = key.parseUint64s(strs, true, false)
+ case reflect.Float64:
+ vals, err = key.parseFloat64s(strs, true, false)
+ case reflectTime:
+ vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ if err != nil && isStrict {
+ return err
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+func wrapStrictError(err error, isStrict bool) error {
+ if isStrict {
+ return err
+ }
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ switch t.Kind() {
+ case reflect.String:
+ if len(key.String()) == 0 {
+ return nil
+ }
+ field.SetString(key.String())
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetBool(boolVal)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int64(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetInt(intVal)
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && uint64(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetUint(uintVal)
+
+ case reflect.Float32, reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetFloat(floatVal)
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.Set(reflect.ValueOf(timeVal))
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
+ opts := strings.SplitN(tag, ",", 3)
+ rawName = opts[0]
+ if len(opts) > 1 {
+ omitEmpty = opts[1] == "omitempty"
+ }
+ if len(opts) > 2 {
+ allowShadow = opts[2] == "allowshadow"
+ }
+ return rawName, omitEmpty, allowShadow
+}
+
+func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, _, allowShadow := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ if isAnonymous {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if isAnonymous || isStruct {
+ if sec, err := s.f.GetSection(fieldName); err == nil {
+ if err = sec.mapTo(field, isStrict); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val, false)
+}
+
+// MapTo maps section to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (s *Section) StrictMapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val, true)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// MapTo maps file to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (f *File) StrictMapTo(v interface{}) error {
+ return f.Section("").StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// StrictMapTo maps data sources to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapTo(v, source interface{}, others ...interface{}) error {
+ return StrictMapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ sliceOf := field.Type().Elem().Kind()
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-1])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflectTime:
+ t, ok := v.Interface().(time.Time)
+ return ok && t.IsZero()
+ }
+ return false
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ opts := strings.SplitN(tag, ",", 2)
+ if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, opts[0])
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+ continue
+ }
+
+ // Note: Same reason as secion.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+
+ // Add comment from comment tag
+ if len(key.Comment) == 0 {
+ key.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot reflect from non-pointer struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
--- /dev/null
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini_test
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/ini.v1"
+)
+
+type testNested struct {
+ Cities []string `delim:"|"`
+ Visits []time.Time
+ Years []int
+ Numbers []int64
+ Ages []uint
+ Populations []uint64
+ Coordinates []float64
+ Note string
+ Unused int `ini:"-"`
+}
+
+type TestEmbeded struct {
+ GPA float64
+}
+
+type testStruct struct {
+ Name string `ini:"NAME"`
+ Age int
+ Male bool
+ Money float64
+ Born time.Time
+ Time time.Duration `ini:"Duration"`
+ Others testNested
+ *TestEmbeded `ini:"grade"`
+ Unused int `ini:"-"`
+ Unsigned uint
+ Omitted bool `ini:"omitthis,omitempty"`
+ Shadows []string `ini:",,allowshadow"`
+ ShadowInts []int `ini:"Shadows,,allowshadow"`
+}
+
+const _CONF_DATA_STRUCT = `
+NAME = Unknwon
+Age = 21
+Male = true
+Money = 1.25
+Born = 1993-10-07T20:17:05Z
+Duration = 2h45m
+Unsigned = 3
+omitthis = true
+Shadows = 1, 2
+Shadows = 3, 4
+
+[Others]
+Cities = HangZhou|Boston
+Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z
+Years = 1993,1994
+Numbers = 10010,10086
+Ages = 18,19
+Populations = 12345678,98765432
+Coordinates = 192.168,10.11
+Note = Hello world!
+
+[grade]
+GPA = 2.8
+
+[foo.bar]
+Here = there
+When = then
+`
+
+type unsupport struct {
+ Byte byte
+}
+
+type unsupport2 struct {
+ Others struct {
+ Cities byte
+ }
+}
+
+type Unsupport3 struct {
+ Cities byte
+}
+
+type unsupport4 struct {
+ *Unsupport3 `ini:"Others"`
+}
+
+type defaultValue struct {
+ Name string
+ Age int
+ Male bool
+ Money float64
+ Born time.Time
+ Cities []string
+}
+
+type fooBar struct {
+ Here, When string
+}
+
+const _INVALID_DATA_CONF_STRUCT = `
+Name =
+Age = age
+Male = 123
+Money = money
+Born = nil
+Cities =
+`
+
+func Test_MapToStruct(t *testing.T) {
+ Convey("Map to struct", t, func() {
+ Convey("Map file to struct", func() {
+ ts := new(testStruct)
+ So(ini.MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil)
+
+ So(ts.Name, ShouldEqual, "Unknwon")
+ So(ts.Age, ShouldEqual, 21)
+ So(ts.Male, ShouldBeTrue)
+ So(ts.Money, ShouldEqual, 1.25)
+ So(ts.Unsigned, ShouldEqual, 3)
+
+ t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z")
+ So(err, ShouldBeNil)
+ So(ts.Born.String(), ShouldEqual, t.String())
+
+ dur, err := time.ParseDuration("2h45m")
+ So(err, ShouldBeNil)
+ So(ts.Time.Seconds(), ShouldEqual, dur.Seconds())
+
+ So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston")
+ So(ts.Others.Visits[0].String(), ShouldEqual, t.String())
+ So(fmt.Sprint(ts.Others.Years), ShouldEqual, "[1993 1994]")
+ So(fmt.Sprint(ts.Others.Numbers), ShouldEqual, "[10010 10086]")
+ So(fmt.Sprint(ts.Others.Ages), ShouldEqual, "[18 19]")
+ So(fmt.Sprint(ts.Others.Populations), ShouldEqual, "[12345678 98765432]")
+ So(fmt.Sprint(ts.Others.Coordinates), ShouldEqual, "[192.168 10.11]")
+ So(ts.Others.Note, ShouldEqual, "Hello world!")
+ So(ts.TestEmbeded.GPA, ShouldEqual, 2.8)
+ })
+
+ Convey("Map section to struct", func() {
+ foobar := new(fooBar)
+ f, err := ini.Load([]byte(_CONF_DATA_STRUCT))
+ So(err, ShouldBeNil)
+
+ So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil)
+ So(foobar.Here, ShouldEqual, "there")
+ So(foobar.When, ShouldEqual, "then")
+ })
+
+ Convey("Map to non-pointer struct", func() {
+ f, err := ini.Load([]byte(_CONF_DATA_STRUCT))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ So(f.MapTo(testStruct{}), ShouldNotBeNil)
+ })
+
+ Convey("Map to unsupported type", func() {
+ f, err := ini.Load([]byte(_CONF_DATA_STRUCT))
+ So(err, ShouldBeNil)
+ So(f, ShouldNotBeNil)
+
+ f.NameMapper = func(raw string) string {
+ if raw == "Byte" {
+ return "NAME"
+ }
+ return raw
+ }
+ So(f.MapTo(&unsupport{}), ShouldNotBeNil)
+ So(f.MapTo(&unsupport2{}), ShouldNotBeNil)
+ So(f.MapTo(&unsupport4{}), ShouldNotBeNil)
+ })
+
+ Convey("Map to omitempty field", func() {
+ ts := new(testStruct)
+ So(ini.MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil)
+
+ So(ts.Omitted, ShouldEqual, true)
+ })
+
+ Convey("Map with shadows", func() {
+ f, err := ini.LoadSources(ini.LoadOptions{AllowShadows: true}, []byte(_CONF_DATA_STRUCT))
+ So(err, ShouldBeNil)
+ ts := new(testStruct)
+ So(f.MapTo(ts), ShouldBeNil)
+
+ So(strings.Join(ts.Shadows, " "), ShouldEqual, "1 2 3 4")
+ So(fmt.Sprintf("%v", ts.ShadowInts), ShouldEqual, "[1 2 3 4]")
+ })
+
+ Convey("Map from invalid data source", func() {
+ So(ini.MapTo(&testStruct{}, "hi"), ShouldNotBeNil)
+ })
+
+ Convey("Map to wrong types and gain default values", func() {
+ f, err := ini.Load([]byte(_INVALID_DATA_CONF_STRUCT))
+ So(err, ShouldBeNil)
+
+ t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z")
+ So(err, ShouldBeNil)
+ dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}}
+ So(f.MapTo(dv), ShouldBeNil)
+ So(dv.Name, ShouldEqual, "Joe")
+ So(dv.Age, ShouldEqual, 10)
+ So(dv.Male, ShouldBeTrue)
+ So(dv.Money, ShouldEqual, 1.25)
+ So(dv.Born.String(), ShouldEqual, t.String())
+ So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston")
+ })
+ })
+
+ Convey("Map to struct in strict mode", t, func() {
+ f, err := ini.Load([]byte(`
+name=bruce
+age=a30`))
+ So(err, ShouldBeNil)
+
+ type Strict struct {
+ Name string `ini:"name"`
+ Age int `ini:"age"`
+ }
+ s := new(Strict)
+
+ So(f.Section("").StrictMapTo(s), ShouldNotBeNil)
+ })
+
+ Convey("Map slice in strict mode", t, func() {
+ f, err := ini.Load([]byte(`
+names=alice, bruce`))
+ So(err, ShouldBeNil)
+
+ type Strict struct {
+ Names []string `ini:"names"`
+ }
+ s := new(Strict)
+
+ So(f.Section("").StrictMapTo(s), ShouldBeNil)
+ So(fmt.Sprint(s.Names), ShouldEqual, "[alice bruce]")
+ })
+}
+
+func Test_ReflectFromStruct(t *testing.T) {
+ Convey("Reflect from struct", t, func() {
+ type Embeded struct {
+ Dates []time.Time `delim:"|" comment:"Time data"`
+ Places []string
+ Years []int
+ Numbers []int64
+ Ages []uint
+ Populations []uint64
+ Coordinates []float64
+ None []int
+ }
+ type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int `comment:"Author's age"`
+ Height uint
+ GPA float64
+ Date time.Time
+ NeverMind string `ini:"-"`
+ *Embeded `ini:"infos" comment:"Embeded section"`
+ }
+
+ t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z")
+ So(err, ShouldBeNil)
+ a := &Author{"Unknwon", true, 21, 100, 2.8, t, "",
+ &Embeded{
+ []time.Time{t, t},
+ []string{"HangZhou", "Boston"},
+ []int{1993, 1994},
+ []int64{10010, 10086},
+ []uint{18, 19},
+ []uint64{12345678, 98765432},
+ []float64{192.168, 10.11},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ So(ini.ReflectFrom(cfg, a), ShouldBeNil)
+
+ var buf bytes.Buffer
+ _, err = cfg.WriteTo(&buf)
+ So(err, ShouldBeNil)
+ So(buf.String(), ShouldEqual, `NAME = Unknwon
+Male = true
+; Author's age
+Age = 21
+Height = 100
+GPA = 2.8
+Date = 1993-10-07T20:17:05Z
+
+; Embeded section
+[infos]
+; Time data
+Dates = 1993-10-07T20:17:05Z|1993-10-07T20:17:05Z
+Places = HangZhou,Boston
+Years = 1993,1994
+Numbers = 10010,10086
+Ages = 18,19
+Populations = 12345678,98765432
+Coordinates = 192.168,10.11
+None =
+
+`)
+
+ Convey("Reflect from non-point struct", func() {
+ So(ini.ReflectFrom(cfg, Author{}), ShouldNotBeNil)
+ })
+
+ Convey("Reflect from struct with omitempty", func() {
+ cfg := ini.Empty()
+ type SpecialStruct struct {
+ FirstName string `ini:"first_name"`
+ LastName string `ini:"last_name"`
+ JustOmitMe string `ini:"omitempty"`
+ LastLogin time.Time `ini:"last_login,omitempty"`
+ LastLogin2 time.Time `ini:",omitempty"`
+ NotEmpty int `ini:"omitempty"`
+ }
+
+ So(ini.ReflectFrom(cfg, &SpecialStruct{FirstName: "John", LastName: "Doe", NotEmpty: 9}), ShouldBeNil)
+
+ var buf bytes.Buffer
+ _, err = cfg.WriteTo(&buf)
+ So(buf.String(), ShouldEqual, `first_name = John
+last_name = Doe
+omitempty = 9
+
+`)
+ })
+ })
+}
+
+type testMapper struct {
+ PackageName string
+}
+
+func Test_NameGetter(t *testing.T) {
+ Convey("Test name mappers", t, func() {
+ So(ini.MapToWithMapper(&testMapper{}, ini.TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil)
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ So(err, ShouldBeNil)
+ So(cfg, ShouldNotBeNil)
+
+ cfg.NameMapper = ini.AllCapsUnderscore
+ tg := new(testMapper)
+ So(cfg.MapTo(tg), ShouldBeNil)
+ So(tg.PackageName, ShouldEqual, "ini")
+ })
+}
+
+type testDurationStruct struct {
+ Duration time.Duration `ini:"Duration"`
+}
+
+func Test_Duration(t *testing.T) {
+ Convey("Duration less than 16m50s", t, func() {
+ ds := new(testDurationStruct)
+ So(ini.MapTo(ds, []byte("Duration=16m49s")), ShouldBeNil)
+
+ dur, err := time.ParseDuration("16m49s")
+ So(err, ShouldBeNil)
+ So(ds.Duration.Seconds(), ShouldEqual, dur.Seconds())
+ })
+}
--- /dev/null
+; Package name
+NAME = ini
+; Package version
+VERSION = v1
+; Package import path
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+; Information about package author
+# Bio can be written in multiple lines.
+[author]
+; This is author name
+NAME = Unknwon
+E-MAIL = u@gogs.io
+GITHUB = https://github.com/%(NAME)s
+# Succeeding comment
+BIO = """Gopher.
+Coding addict.
+Good man.
+"""
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+UNUSED_KEY = should be deleted
+
+[features]
+- = Support read/write comments of keys and sections
+- = Support auto-increment of key names
+- = Support load multiple files to overwrite key values
+
+[types]
+STRING = str
+BOOL = true
+BOOL_FALSE = false
+FLOAT64 = 1.25
+INT = 10
+TIME = 2015-01-01T20:17:05Z
+DURATION = 2h45m
+UINT = 3
+HEX_NUMBER = 0x3000
+
+[array]
+STRINGS = en, zh, de
+FLOAT64S = 1.1, 2.2, 3.3
+INTS = 1, 2, 3
+UINTS = 1, 2, 3
+TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z
+
+[note]
+empty_lines = next line is empty
+boolean_key
+more = notes
+
+; Comment before the section
+; This is a comment for the section too
+[comments]
+; Comment before key
+key = value
+; This is a comment for key2
+key2 = value2
+key3 = "one", "two", "three"
+
+[string escapes]
+key1 = value1, value2, value3
+key2 = value1\, value2
+key3 = val\ue1, value2
+key4 = value1\\, value\\\\2
+key5 = value1\,, value2
+key6 = aaa bbb\ and\ space ccc
+
+[advance]
+value with quotes = some value
+value quote2 again = some value
+includes comment sign = `my#password`
+includes comment sign2 = `my;password`
+true = 2+3=5
+`1+1=2` = true
+`6+1=7` = true
+"""`5+5`""" = 10
+`"6+6"` = 12
+`7-2=4` = false
+ADDRESS = """404 road,
+NotFound, State, 50000"""
+two_lines = how about continuation lines?
+lots_of_lines = 1 2 3 4
+
--- /dev/null
+[author]
+E-MAIL = example@email.com
\ No newline at end of file
--- /dev/null
+; Package name
+NAME = ini
+; Package version
+VERSION = v1
+; Package import path
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+# Information about package author
+# Bio can be written in multiple lines.
+[author]
+NAME = Unknwon
+E-MAIL = u@gogs.io
+GITHUB = https://github.com/%(NAME)s
+BIO = """Gopher.
+Coding addict.
+Good man.
+""" # Succeeding comment
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+UNUSED_KEY = should be deleted
+
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+
+[types]
+STRING = str
+BOOL = true
+BOOL_FALSE = false
+FLOAT64 = 1.25
+INT = 10
+TIME = 2015-01-01T20:17:05Z
+DURATION = 2h45m
+UINT = 3
+HEX_NUMBER = 0x3000
+
+[array]
+STRINGS = en, zh, de
+FLOAT64S = 1.1, 2.2, 3.3
+INTS = 1, 2, 3
+UINTS = 1, 2, 3
+TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z
+
+[note]
+empty_lines = next line is empty\
+
+; Comment before the section
+[comments] ; This is a comment for the section too
+; Comment before key
+key = "value"
+key2 = "value2" ; This is a comment for key2
+key3 = "one", "two", "three"
+
+[string escapes]
+key1 = value1, value2, value3
+key2 = value1\, value2
+key3 = val\ue1, value2
+key4 = value1\\, value\\\\2
+key5 = value1\,, value2
+key6 = aaa bbb\ and\ space ccc
+
+[advance]
+value with quotes = "some value"
+value quote2 again = 'some value'
+includes comment sign = `my#password`
+includes comment sign2 = `my;password`
+true = 2+3=5
+"1+1=2" = true
+"""6+1=7""" = true
+"""`5+5`""" = 10
+`"6+6"` = 12
+`7-2=4` = false
+ADDRESS = `404 road,
+NotFound, State, 50000`
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4 \
--- /dev/null
+[author]
+E-MAIL = u@gogs.io
\ No newline at end of file
--- /dev/null
+Copyright (c) 2015 The Sconf Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of the authors nor the names of
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+sconf: structured configuration
+https://godoc.org/gopkg.in/sconf/sconf.v0
--- /dev/null
+// Copyright 2015 The Sconf Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ini // import "gopkg.in/sconf/ini.v0"
+
+import (
+ "io/ioutil"
+
+ "gopkg.in/sconf/internal.v0/internal-"
+)
+
+func File(name string) internal.IdemReader {
+ b, err := ioutil.ReadFile(name)
+ if err != nil {
+ return internal.ErrIdemReader{Err: err}
+ }
+ return internal.BytesIdemReader(b)
+}
+
+func Text(data string) internal.IdemReader {
+ return internal.BytesIdemReader([]byte(data))
+}
--- /dev/null
+Copyright (c) 2015 The Sconf Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of the authors nor the names of
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+sconf: structured configuration
+https://godoc.org/gopkg.in/sconf/sconf.v0
--- /dev/null
+Copyright (c) 2012 Péter Surányi. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+Portions of gcfg's source code have been derived from Go, and are
+covered by the following license:
+----------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+Gcfg reads INI-style configuration files into Go structs;
+supports user-defined types and subsections.
+
+Project page: https://code.google.com/p/gcfg
+Package docs: http://godoc.org/code.google.com/p/gcfg
+
+My other projects: https://speter.net
--- /dev/null
+// Package gcfg reads "INI-style" text-based configuration files with
+// "name=value" pairs grouped into sections (gcfg files).
+//
+// This package is still a work in progress; see the sections below for planned
+// changes.
+//
+// Syntax
+//
+// The syntax is based on that used by git config:
+// http://git-scm.com/docs/git-config#_syntax .
+// There are some (planned) differences compared to the git config format:
+// - improve data portability:
+// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
+// - include and "path" type is not supported
+// (path type may be implementable as a user-defined type)
+// - internationalization
+// - section and variable names can contain unicode letters, unicode digits
+// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
+// (U+002D), starting with a unicode letter
+// - disallow potentially ambiguous or misleading definitions:
+// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
+// - `[sec ""]` is not allowed
+// - use `[sec]` for section name "sec" and empty subsection name
+// - (planned) within a single file, definitions must be contiguous for each:
+// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
+// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
+// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
+//
+// Data structure
+//
+// The functions in this package read values into a user-defined struct.
+// Each section corresponds to a struct field in the config struct, and each
+// variable in a section corresponds to a data field in the section struct.
+// The mapping of each section or variable name to fields is done either based
+// on the "gcfg" struct tag or by matching the name of the section or variable,
+// ignoring case. In the latter case, hyphens '-' in section and variable names
+// correspond to underscores '_' in field names.
+// Fields must be exported; to use a section or variable name starting with a
+// letter that is neither upper- or lower-case, prefix the field name with 'X'.
+// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
+//
+// For sections with subsections, the corresponding field in config must be a
+// map, rather than a struct, with string keys and pointer-to-struct values.
+// Values for subsection variables are stored in the map with the subsection
+// name used as the map key.
+// (Note that unlike section and variable names, subsection names are case
+// sensitive.)
+// When using a map, and there is a section with the same section name but
+// without a subsection name, its values are stored with the empty string used
+// as the key.
+//
+// The functions in this package panic if config is not a pointer to a struct,
+// or when a field is not of a suitable type (either a struct or a map with
+// string keys and pointer-to-struct values).
+//
+// Parsing of values
+//
+// The section structs in the config struct may contain single-valued or
+// multi-valued variables. Variables of unnamed slice type (that is, a type
+// starting with `[]`) are treated as multi-value; all others (including named
+// slice types) are treated as single-valued variables.
+//
+// Single-valued variables are handled based on the type as follows.
+// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
+// and if necessary, a new instance is allocated.
+//
+// For types implementing the encoding.TextUnmarshaler interface, the
+// UnmarshalText method is used to set the value. Implementing this method is
+// the recommended way for parsing user-defined types.
+//
+// For fields of string kind, the value string is assigned to the field, after
+// unquoting and unescaping as needed.
+// For fields of bool kind, the field is set to true if the value is "true",
+// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
+// "0", ignoring case. In addition, single-valued bool fields can be specified
+// with a "blank" value (variable name without equals sign and value); in such
+// case the value is set to true.
+//
+// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
+// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
+// unintuitively handling zero-padded numbers as octal.) Other types having
+// [u]int* as the underlying type, such as os.FileMode and uintptr allow
+// decimal, hexadecimal, or octal values.
+// Parsing mode for integer types can be overridden using the struct tag option
+// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
+// (each standing for decimal, hexadecimal, and octal, respectively.)
+//
+// All other types are parsed using fmt.Sscanf with the "%v" verb.
+//
+// For multi-valued variables, each individual value is parsed as above and
+// appended to the slice. If the first value is specified as a "blank" value
+// (variable name without equals sign and value), a new slice is allocated;
+// that is any values previously set in the slice will be ignored.
+//
+// The types subpackage for provides helpers for parsing "enum-like" and integer
+// types.
+//
+// TODO
+//
+// The following is a list of changes under consideration:
+// - documentation
+// - self-contained syntax documentation
+// - more practical examples
+// - move TODOs to issue tracker (eventually)
+// - syntax
+// - reconsider valid escape sequences
+// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
+// - reading / parsing gcfg files
+// - define internal representation structure
+// - support multiple inputs (readers, strings, files)
+// - support declaring encoding (?)
+// - support varying fields sets for subsections (?)
+// - writing gcfg files
+// - error handling
+// - make error context accessible programmatically?
+// - limit input size?
+//
+package gcfg // import "gopkg.in/sconf/internal.v0/internal-/gcfg"
--- /dev/null
+package gcfg_test
+
+import (
+ "fmt"
+ "log"
+)
+
+import "gopkg.in/sconf/internal.v0/internal-/gcfg"
+
+func ExampleReadStringInto() {
+ cfgStr := `; Comment line
+[section]
+name=value # comment`
+ cfg := struct {
+ Section struct {
+ Name string
+ }
+ }{}
+ err := gcfg.ReadStringInto(&cfg, cfgStr)
+ if err != nil {
+ log.Fatalf("Failed to parse gcfg data: %s", err)
+ }
+ fmt.Println(cfg.Section.Name)
+ // Output: value
+}
+
+func ExampleReadStringInto_bool() {
+ cfgStr := `; Comment line
+[section]
+switch=on`
+ cfg := struct {
+ Section struct {
+ Switch bool
+ }
+ }{}
+ err := gcfg.ReadStringInto(&cfg, cfgStr)
+ if err != nil {
+ log.Fatalf("Failed to parse gcfg data: %s", err)
+ }
+ fmt.Println(cfg.Section.Switch)
+ // Output: true
+}
+
+func ExampleReadStringInto_hyphens() {
+ cfgStr := `; Comment line
+[section-name]
+variable-name=value # comment`
+ cfg := struct {
+ Section_Name struct {
+ Variable_Name string
+ }
+ }{}
+ err := gcfg.ReadStringInto(&cfg, cfgStr)
+ if err != nil {
+ log.Fatalf("Failed to parse gcfg data: %s", err)
+ }
+ fmt.Println(cfg.Section_Name.Variable_Name)
+ // Output: value
+}
+
+func ExampleReadStringInto_tags() {
+ cfgStr := `; Comment line
+[section]
+var-name=value # comment`
+ cfg := struct {
+ Section struct {
+ FieldName string `gcfg:"var-name"`
+ }
+ }{}
+ err := gcfg.ReadStringInto(&cfg, cfgStr)
+ if err != nil {
+ log.Fatalf("Failed to parse gcfg data: %s", err)
+ }
+ fmt.Println(cfg.Section.FieldName)
+ // Output: value
+}
+
+func ExampleReadStringInto_subsections() {
+ cfgStr := `; Comment line
+[profile "A"]
+color = white
+
+[profile "B"]
+color = black
+`
+ cfg := struct {
+ Profile map[string]*struct {
+ Color string
+ }
+ }{}
+ err := gcfg.ReadStringInto(&cfg, cfgStr)
+ if err != nil {
+ log.Fatalf("Failed to parse gcfg data: %s", err)
+ }
+ fmt.Printf("%s %s\n", cfg.Profile["A"].Color, cfg.Profile["B"].Color)
+ // Output: white black
+}
+
+func ExampleReadStringInto_multivalue() {
+ cfgStr := `; Comment line
+[section]
+multi=value1
+multi=value2`
+ cfg := struct {
+ Section struct {
+ Multi []string
+ }
+ }{}
+ err := gcfg.ReadStringInto(&cfg, cfgStr)
+ if err != nil {
+ log.Fatalf("Failed to parse gcfg data: %s", err)
+ }
+ fmt.Println(cfg.Section.Multi)
+ // Output: [value1 value2]
+}
+
+func ExampleReadStringInto_unicode() {
+ cfgStr := `; Comment line
+[甲]
+乙=丙 # comment`
+ cfg := struct {
+ X甲 struct {
+ X乙 string
+ }
+ }{}
+ err := gcfg.ReadStringInto(&cfg, cfgStr)
+ if err != nil {
+ log.Fatalf("Failed to parse gcfg data: %s", err)
+ }
+ fmt.Println(cfg.X甲.X乙)
+ // Output: 丙
+}
--- /dev/null
+// +build !go1.2
+
+package gcfg
+
+type textUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
--- /dev/null
+// +build go1.2
+
+package gcfg
+
+import (
+ "encoding"
+)
+
+type textUnmarshaler encoding.TextUnmarshaler
--- /dev/null
+package gcfg
+
+import (
+ "fmt"
+ "math/big"
+ "strings"
+ "testing"
+)
+
+type Config1 struct {
+ Section struct {
+ Int int
+ BigInt big.Int
+ }
+}
+
+var testsIssue1 = []struct {
+ cfg string
+ typename string
+}{
+ {"[section]\nint=X", "int"},
+ {"[section]\nint=", "int"},
+ {"[section]\nint=1A", "int"},
+ {"[section]\nbigint=X", "big.Int"},
+ {"[section]\nbigint=", "big.Int"},
+ {"[section]\nbigint=1A", "big.Int"},
+}
+
+// Value parse error should:
+// - include plain type name
+// - not include reflect internals
+func TestIssue1(t *testing.T) {
+ for i, tt := range testsIssue1 {
+ var c Config1
+ err := ReadStringInto(&c, tt.cfg)
+ switch {
+ case err == nil:
+ t.Errorf("%d fail: got ok; wanted error", i)
+ case !strings.Contains(err.Error(), tt.typename):
+ t.Errorf("%d fail: error message doesn't contain type name %q: %v",
+ i, tt.typename, err)
+ case strings.Contains(err.Error(), "reflect"):
+ t.Errorf("%d fail: error message includes reflect internals: %v",
+ i, err)
+ default:
+ t.Logf("%d pass: %v", i, err)
+ }
+ }
+}
+
+type confIssue2 struct{ Main struct{ Foo string } }
+
+var testsIssue2 = []readtest{
+ {"[main]\n;\nfoo = bar\n", &confIssue2{struct{ Foo string }{"bar"}}, true},
+ {"[main]\r\n;\r\nfoo = bar\r\n", &confIssue2{struct{ Foo string }{"bar"}}, true},
+}
+
+func TestIssue2(t *testing.T) {
+ for i, tt := range testsIssue2 {
+ id := fmt.Sprintf("issue2:%d", i)
+ testRead(t, id, tt)
+ }
+}
--- /dev/null
+package gcfg
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+import (
+ "gopkg.in/sconf/internal.v0/internal-/gcfg/scanner"
+ "gopkg.in/sconf/internal.v0/internal-/gcfg/token"
+)
+
+var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'}
+
+// no error: invalid literals should be caught by scanner
+func unquote(s string) string {
+ u, q, esc := make([]rune, 0, len(s)), false, false
+ for _, c := range s {
+ if esc {
+ uc, ok := unescape[c]
+ switch {
+ case ok:
+ u = append(u, uc)
+ fallthrough
+ case !q && c == '\n':
+ esc = false
+ continue
+ }
+ panic("invalid escape sequence")
+ }
+ switch c {
+ case '"':
+ q = !q
+ case '\\':
+ esc = true
+ default:
+ u = append(u, c)
+ }
+ }
+ if q {
+ panic("missing end quote")
+ }
+ if esc {
+ panic("invalid escape sequence")
+ }
+ return string(u)
+}
+
+func readInto(config interface{}, fset *token.FileSet, file *token.File, src []byte) error {
+ var s scanner.Scanner
+ var errs scanner.ErrorList
+ s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
+ sect, sectsub := "", ""
+ pos, tok, lit := s.Scan()
+ errfn := func(msg string) error {
+ return fmt.Errorf("%s: %s", fset.Position(pos), msg)
+ }
+ for {
+ if errs.Len() > 0 {
+ return errs.Err()
+ }
+ switch tok {
+ case token.EOF:
+ return nil
+ case token.EOL, token.COMMENT:
+ pos, tok, lit = s.Scan()
+ case token.LBRACK:
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ return errs.Err()
+ }
+ if tok != token.IDENT {
+ return errfn("expected section name")
+ }
+ sect, sectsub = lit, ""
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ return errs.Err()
+ }
+ if tok == token.STRING {
+ sectsub = unquote(lit)
+ if sectsub == "" {
+ return errfn("empty subsection name")
+ }
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ return errs.Err()
+ }
+ }
+ if tok != token.RBRACK {
+ if sectsub == "" {
+ return errfn("expected subsection name or right bracket")
+ }
+ return errfn("expected right bracket")
+ }
+ pos, tok, lit = s.Scan()
+ if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
+ return errfn("expected EOL, EOF, or comment")
+ }
+ case token.IDENT:
+ if sect == "" {
+ return errfn("expected section header")
+ }
+ n := lit
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ return errs.Err()
+ }
+ blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
+ if !blank {
+ if tok != token.ASSIGN {
+ return errfn("expected '='")
+ }
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ return errs.Err()
+ }
+ if tok != token.STRING {
+ return errfn("expected value")
+ }
+ v = unquote(lit)
+ pos, tok, lit = s.Scan()
+ if errs.Len() > 0 {
+ return errs.Err()
+ }
+ if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
+ return errfn("expected EOL, EOF, or comment")
+ }
+ }
+ err := set(config, sect, sectsub, n, blank, v)
+ if err != nil {
+ return err
+ }
+ default:
+ if sect == "" {
+ return errfn("expected section header")
+ }
+ return errfn("expected section header or variable declaration")
+ }
+ }
+}
+
+// ReadInto reads gcfg formatted data from reader and sets the values into the
+// corresponding fields in config.
+func ReadInto(config interface{}, reader io.Reader) error {
+ src, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+ return readInto(config, fset, file, src)
+}
+
+// ReadStringInto reads gcfg formatted data from str and sets the values into
+// the corresponding fields in config.
+func ReadStringInto(config interface{}, str string) error {
+ r := strings.NewReader(str)
+ return ReadInto(config, r)
+}
+
+// ReadFileInto reads gcfg formatted data from the file filename and sets the
+// values into the corresponding fields in config.
+func ReadFileInto(config interface{}, filename string) error {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ src, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ fset := token.NewFileSet()
+ file := fset.AddFile(filename, fset.Base(), len(src))
+ return readInto(config, fset, file, src)
+}
--- /dev/null
+package gcfg
+
+import (
+ "fmt"
+ "math/big"
+ "os"
+ "reflect"
+ "testing"
+)
+
+const (
+ // 64 spaces
+ sp64 = " "
+ // 512 spaces
+ sp512 = sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64
+ // 4096 spaces
+ sp4096 = sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512
+)
+
+type cBasic struct {
+ Section cBasicS1
+ Hyphen_In_Section cBasicS2
+ unexported cBasicS1
+ Exported cBasicS3
+ TagName cBasicS1 `gcfg:"tag-name"`
+}
+type cBasicS1 struct {
+ Name string
+ Int int
+ PName *string
+}
+type cBasicS2 struct {
+ Hyphen_In_Name string
+}
+type cBasicS3 struct {
+ unexported string
+}
+
+type nonMulti []string
+
+type unmarshalable string
+
+func (u *unmarshalable) UnmarshalText(text []byte) error {
+ s := string(text)
+ if s == "error" {
+ return fmt.Errorf("%s", s)
+ }
+ *u = unmarshalable(s)
+ return nil
+}
+
+var _ textUnmarshaler = new(unmarshalable)
+
+type cUni struct {
+ X甲 cUniS1
+ XSection cUniS2
+}
+type cUniS1 struct {
+ X乙 string
+}
+type cUniS2 struct {
+ XName string
+}
+
+type cMulti struct {
+ M1 cMultiS1
+ M2 cMultiS2
+ M3 cMultiS3
+}
+type cMultiS1 struct{ Multi []string }
+type cMultiS2 struct{ NonMulti nonMulti }
+type cMultiS3 struct{ MultiInt []int }
+
+type cSubs struct{ Sub map[string]*cSubsS1 }
+type cSubsS1 struct{ Name string }
+
+type cBool struct{ Section cBoolS1 }
+type cBoolS1 struct{ Bool bool }
+
+type cTxUnm struct{ Section cTxUnmS1 }
+type cTxUnmS1 struct{ Name unmarshalable }
+
+type cNum struct {
+ N1 cNumS1
+ N2 cNumS2
+ N3 cNumS3
+}
+type cNumS1 struct {
+ Int int
+ IntDHO int `gcfg:",int=dho"`
+ Big *big.Int
+}
+type cNumS2 struct {
+ MultiInt []int
+ MultiBig []*big.Int
+}
+type cNumS3 struct{ FileMode os.FileMode }
+type readtest struct {
+ gcfg string
+ exp interface{}
+ ok bool
+}
+
+func newString(s string) *string {
+ return &s
+}
+
+var readtests = []struct {
+ group string
+ tests []readtest
+}{{"scanning", []readtest{
+ {"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ // hyphen in name
+ {"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
+ // quoted string value
+ {"[section]\nname=\"\"", &cBasic{Section: cBasicS1{Name: ""}}, true},
+ {"[section]\nname=\" \"", &cBasic{Section: cBasicS1{Name: " "}}, true},
+ {"[section]\nname=\"value\"", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=\" value \"", &cBasic{Section: cBasicS1{Name: " value "}}, true},
+ {"\n[section]\nname=\"va ; lue\"", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
+ {"[section]\nname=\"val\" \"ue\"", &cBasic{Section: cBasicS1{Name: "val ue"}}, true},
+ {"[section]\nname=\"value", &cBasic{}, false},
+ // escape sequences
+ {"[section]\nname=\"va\\\\lue\"", &cBasic{Section: cBasicS1{Name: "va\\lue"}}, true},
+ {"[section]\nname=\"va\\\"lue\"", &cBasic{Section: cBasicS1{Name: "va\"lue"}}, true},
+ {"[section]\nname=\"va\\nlue\"", &cBasic{Section: cBasicS1{Name: "va\nlue"}}, true},
+ {"[section]\nname=\"va\\tlue\"", &cBasic{Section: cBasicS1{Name: "va\tlue"}}, true},
+ {"\n[section]\nname=\\", &cBasic{}, false},
+ {"\n[section]\nname=\\a", &cBasic{}, false},
+ {"\n[section]\nname=\"val\\a\"", &cBasic{}, false},
+ {"\n[section]\nname=val\\", &cBasic{}, false},
+ {"\n[sub \"A\\\n\"]\nname=value", &cSubs{}, false},
+ {"\n[sub \"A\\\t\"]\nname=value", &cSubs{}, false},
+ // broken line
+ {"[section]\nname=value \\\n value", &cBasic{Section: cBasicS1{Name: "value value"}}, true},
+ {"[section]\nname=\"value \\\n value\"", &cBasic{}, false},
+}}, {"scanning:whitespace", []readtest{
+ {" \n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {" [section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\t[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[ section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section ]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\n name=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname =value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname= value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=value ", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\r\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {";cmnt\r\n[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ // long lines
+ {sp4096 + "[section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[" + sp4096 + "section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section" + sp4096 + "]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]" + sp4096 + "\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\n" + sp4096 + "name=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname" + sp4096 + "=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=" + sp4096 + "value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=value\n" + sp4096, &cBasic{Section: cBasicS1{Name: "value"}}, true},
+}}, {"scanning:comments", []readtest{
+ {"; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"# cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {" ; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\t; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section] ; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=value; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=value ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=\"value\" ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=value ; \"cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=\"va ; lue\" ; cmnt", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
+ {"\n[section]\nname=; cmnt", &cBasic{Section: cBasicS1{Name: ""}}, true},
+}}, {"scanning:subsections", []readtest{
+ {"\n[sub \"A\"]\nname=value", &cSubs{map[string]*cSubsS1{"A": &cSubsS1{"value"}}}, true},
+ {"\n[sub \"b\"]\nname=value", &cSubs{map[string]*cSubsS1{"b": &cSubsS1{"value"}}}, true},
+ {"\n[sub \"A\\\\\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\\": &cSubsS1{"value"}}}, true},
+ {"\n[sub \"A\\\"\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\"": &cSubsS1{"value"}}}, true},
+}}, {"syntax", []readtest{
+ // invalid line
+ {"\n[section]\n=", &cBasic{}, false},
+ // no section
+ {"name=value", &cBasic{}, false},
+ // empty section
+ {"\n[]\nname=value", &cBasic{}, false},
+ // empty subsection
+ {"\n[sub \"\"]\nname=value", &cSubs{}, false},
+}}, {"setting", []readtest{
+ {"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ // pointer
+ {"[section]", &cBasic{Section: cBasicS1{PName: nil}}, true},
+ {"[section]\npname=value", &cBasic{Section: cBasicS1{PName: newString("value")}}, true},
+ // section name not matched
+ {"\n[nonexistent]\nname=value", &cBasic{}, false},
+ // subsection name not matched
+ {"\n[section \"nonexistent\"]\nname=value", &cBasic{}, false},
+ // variable name not matched
+ {"\n[section]\nnonexistent=value", &cBasic{}, false},
+ // hyphen in name
+ {"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
+ // ignore unexported fields
+ {"[unexported]\nname=value", &cBasic{}, false},
+ {"[exported]\nunexported=value", &cBasic{}, false},
+ // 'X' prefix for non-upper/lower-case letters
+ {"[甲]\n乙=丙", &cUni{X甲: cUniS1{X乙: "丙"}}, true},
+ //{"[section]\nxname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
+ //{"[xsection]\nname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
+ // name specified as struct tag
+ {"[tag-name]\nname=value", &cBasic{TagName: cBasicS1{Name: "value"}}, true},
+}}, {"multivalue", []readtest{
+ // unnamed slice type: treat as multi-value
+ {"\n[m1]", &cMulti{M1: cMultiS1{}}, true},
+ {"\n[m1]\nmulti=value", &cMulti{M1: cMultiS1{[]string{"value"}}}, true},
+ {"\n[m1]\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
+ // "blank" empties multi-valued slice -- here same result as above
+ {"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
+ // named slice type: do not treat as multi-value
+ {"\n[m2]", &cMulti{}, true},
+ {"\n[m2]\nmulti=value", &cMulti{}, false},
+ {"\n[m2]\nmulti=value1\nmulti=value2", &cMulti{}, false},
+}}, {"type:string", []readtest{
+ {"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=", &cBasic{Section: cBasicS1{Name: ""}}, true},
+}}, {"type:bool", []readtest{
+ // explicit values
+ {"[section]\nbool=true", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=yes", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=on", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=1", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=tRuE", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=false", &cBool{cBoolS1{false}}, true},
+ {"[section]\nbool=no", &cBool{cBoolS1{false}}, true},
+ {"[section]\nbool=off", &cBool{cBoolS1{false}}, true},
+ {"[section]\nbool=0", &cBool{cBoolS1{false}}, true},
+ {"[section]\nbool=NO", &cBool{cBoolS1{false}}, true},
+ // "blank" value handled as true
+ {"[section]\nbool", &cBool{cBoolS1{true}}, true},
+ // bool parse errors
+ {"[section]\nbool=maybe", &cBool{}, false},
+ {"[section]\nbool=t", &cBool{}, false},
+ {"[section]\nbool=truer", &cBool{}, false},
+ {"[section]\nbool=2", &cBool{}, false},
+ {"[section]\nbool=-1", &cBool{}, false},
+}}, {"type:numeric", []readtest{
+ {"[section]\nint=0", &cBasic{Section: cBasicS1{Int: 0}}, true},
+ {"[section]\nint=1", &cBasic{Section: cBasicS1{Int: 1}}, true},
+ {"[section]\nint=-1", &cBasic{Section: cBasicS1{Int: -1}}, true},
+ {"[section]\nint=0.2", &cBasic{}, false},
+ {"[section]\nint=1e3", &cBasic{}, false},
+ // primitive [u]int(|8|16|32|64) and big.Int is parsed as dec or hex (not octal)
+ {"[n1]\nint=010", &cNum{N1: cNumS1{Int: 10}}, true},
+ {"[n1]\nint=0x10", &cNum{N1: cNumS1{Int: 0x10}}, true},
+ {"[n1]\nbig=1", &cNum{N1: cNumS1{Big: big.NewInt(1)}}, true},
+ {"[n1]\nbig=0x10", &cNum{N1: cNumS1{Big: big.NewInt(0x10)}}, true},
+ {"[n1]\nbig=010", &cNum{N1: cNumS1{Big: big.NewInt(10)}}, true},
+ {"[n2]\nmultiint=010", &cNum{N2: cNumS2{MultiInt: []int{10}}}, true},
+ {"[n2]\nmultibig=010", &cNum{N2: cNumS2{MultiBig: []*big.Int{big.NewInt(10)}}}, true},
+ // set parse mode for int types via struct tag
+ {"[n1]\nintdho=010", &cNum{N1: cNumS1{IntDHO: 010}}, true},
+ // octal allowed for named type
+ {"[n3]\nfilemode=0777", &cNum{N3: cNumS3{FileMode: 0777}}, true},
+}}, {"type:textUnmarshaler", []readtest{
+ {"[section]\nname=value", &cTxUnm{Section: cTxUnmS1{Name: "value"}}, true},
+ {"[section]\nname=error", &cTxUnm{}, false},
+}},
+}
+
+func TestReadStringInto(t *testing.T) {
+ for _, tg := range readtests {
+ for i, tt := range tg.tests {
+ id := fmt.Sprintf("%s:%d", tg.group, i)
+ testRead(t, id, tt)
+ }
+ }
+}
+
+func TestReadStringIntoMultiBlankPreset(t *testing.T) {
+ tt := readtest{"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true}
+ cfg := &cMulti{M1: cMultiS1{[]string{"preset1", "preset2"}}}
+ testReadInto(t, "multi:blank", tt, cfg)
+}
+
+func testRead(t *testing.T, id string, tt readtest) {
+ // get the type of the expected result
+ restyp := reflect.TypeOf(tt.exp).Elem()
+ // create a new instance to hold the actual result
+ res := reflect.New(restyp).Interface()
+ testReadInto(t, id, tt, res)
+}
+
+func testReadInto(t *testing.T, id string, tt readtest, res interface{}) {
+ err := ReadStringInto(res, tt.gcfg)
+ if tt.ok {
+ if err != nil {
+ t.Errorf("%s fail: got error %v, wanted ok", id, err)
+ return
+ } else if !reflect.DeepEqual(res, tt.exp) {
+ t.Errorf("%s fail: got value %#v, wanted value %#v", id, res, tt.exp)
+ return
+ }
+ if !testing.Short() {
+ t.Logf("%s pass: got value %#v", id, res)
+ }
+ } else { // !tt.ok
+ if err == nil {
+ t.Errorf("%s fail: got value %#v, wanted error", id, res)
+ return
+ }
+ if !testing.Short() {
+ t.Logf("%s pass: got error %v", id, err)
+ }
+ }
+}
+
+func TestReadFileInto(t *testing.T) {
+ res := &struct{ Section struct{ Name string } }{}
+ err := ReadFileInto(res, "testdata/gcfg_test.gcfg")
+ if err != nil {
+ t.Errorf(err.Error())
+ }
+ if "value" != res.Section.Name {
+ t.Errorf("got %q, wanted %q", res.Section.Name, "value")
+ }
+}
+
+func TestReadFileIntoUnicode(t *testing.T) {
+ res := &struct{ X甲 struct{ X乙 string } }{}
+ err := ReadFileInto(res, "testdata/gcfg_unicode_test.gcfg")
+ if err != nil {
+ t.Errorf(err.Error())
+ }
+ if "丙" != res.X甲.X乙 {
+ t.Errorf("got %q, wanted %q", res.X甲.X乙, "丙")
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "fmt"
+ "io"
+ "sort"
+)
+
+import (
+ "gopkg.in/sconf/internal.v0/internal-/gcfg/token"
+)
+
+// In an ErrorList, an error is represented by an *Error.
+// The position Pos, if valid, points to the beginning of
+// the offending token, and the error condition is described
+// by Msg.
+//
+type Error struct {
+ Pos token.Position
+ Msg string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+ if e.Pos.Filename != "" || e.Pos.IsValid() {
+ // don't print "<unknown position>"
+ // TODO(gri) reconsider the semantics of Position.IsValid
+ return e.Pos.String() + ": " + e.Msg
+ }
+ return e.Msg
+}
+
+// ErrorList is a list of *Errors.
+// The zero value for an ErrorList is an empty ErrorList ready to use.
+//
+type ErrorList []*Error
+
+// Add adds an Error with given position and error message to an ErrorList.
+func (p *ErrorList) Add(pos token.Position, msg string) {
+ *p = append(*p, &Error{pos, msg})
+}
+
+// Reset resets an ErrorList to no errors.
+func (p *ErrorList) Reset() { *p = (*p)[0:0] }
+
+// ErrorList implements the sort Interface.
+func (p ErrorList) Len() int { return len(p) }
+func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p ErrorList) Less(i, j int) bool {
+ e := &p[i].Pos
+ f := &p[j].Pos
+ if e.Filename < f.Filename {
+ return true
+ }
+ if e.Filename == f.Filename {
+ return e.Offset < f.Offset
+ }
+ return false
+}
+
+// Sort sorts an ErrorList. *Error entries are sorted by position,
+// other errors are sorted by error message, and before any *Error
+// entry.
+//
+func (p ErrorList) Sort() {
+ sort.Sort(p)
+}
+
+// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
+func (p *ErrorList) RemoveMultiples() {
+ sort.Sort(p)
+ var last token.Position // initial last.Line is != any legal error line
+ i := 0
+ for _, e := range *p {
+ if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
+ last = e.Pos
+ (*p)[i] = e
+ i++
+ }
+ }
+ (*p) = (*p)[0:i]
+}
+
+// An ErrorList implements the error interface.
+func (p ErrorList) Error() string {
+ switch len(p) {
+ case 0:
+ return "no errors"
+ case 1:
+ return p[0].Error()
+ }
+ return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
+}
+
+// Err returns an error equivalent to this error list.
+// If the list is empty, Err returns nil.
+func (p ErrorList) Err() error {
+ if len(p) == 0 {
+ return nil
+ }
+ return p
+}
+
+// PrintError is a utility function that prints a list of errors to w,
+// one error per line, if the err parameter is an ErrorList. Otherwise
+// it prints the err string.
+//
+func PrintError(w io.Writer, err error) {
+ if list, ok := err.(ErrorList); ok {
+ for _, e := range list {
+ fmt.Fprintf(w, "%s\n", e)
+ }
+ } else if err != nil {
+ fmt.Fprintf(w, "%s\n", err)
+ }
+}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner_test
+
+import (
+ "fmt"
+)
+
+import (
+ "gopkg.in/sconf/internal.v0/internal-/gcfg/scanner"
+ "gopkg.in/sconf/internal.v0/internal-/gcfg/token"
+)
+
+func ExampleScanner_Scan() {
+ // src is the input that we want to tokenize.
+ src := []byte(`[profile "A"]
+color = blue ; Comment`)
+
+ // Initialize the scanner.
+ var s scanner.Scanner
+ fset := token.NewFileSet() // positions are relative to fset
+ file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
+ s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
+
+ // Repeated calls to Scan yield the token sequence found in the input.
+ for {
+ pos, tok, lit := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ fmt.Printf("%s\t%q\t%q\n", fset.Position(pos), tok, lit)
+ }
+
+ // output:
+ // 1:1 "[" ""
+ // 1:2 "IDENT" "profile"
+ // 1:10 "STRING" "\"A\""
+ // 1:13 "]" ""
+ // 1:14 "\n" ""
+ // 2:1 "IDENT" "color"
+ // 2:7 "=" ""
+ // 2:9 "STRING" "blue"
+ // 2:14 "COMMENT" "; Comment"
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scanner implements a scanner for gcfg configuration text.
+// It takes a []byte as source which can then be tokenized
+// through repeated calls to the Scan method.
+//
+// Note that the API for the scanner package may change to accommodate new
+// features or implementation changes in gcfg.
+//
+package scanner // import "gopkg.in/sconf/internal.v0/internal-/gcfg/scanner"
+
+import (
+ "fmt"
+ "path/filepath"
+ "unicode"
+ "unicode/utf8"
+)
+
+import (
+ "gopkg.in/sconf/internal.v0/internal-/gcfg/token"
+)
+
+// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
+// encountered and a handler was installed, the handler is called with a
+// position and an error message. The position points to the beginning of
+// the offending token.
+//
+type ErrorHandler func(pos token.Position, msg string)
+
+// A Scanner holds the scanner's internal state while processing
+// a given text. It can be allocated as part of another data
+// structure but must be initialized via Init before use.
+//
+type Scanner struct {
+ // immutable state
+ file *token.File // source file handle
+ dir string // directory portion of file.Name()
+ src []byte // source
+ err ErrorHandler // error reporting; or nil
+ mode Mode // scanning mode
+
+ // scanning state
+ ch rune // current character
+ offset int // character offset
+ rdOffset int // reading offset (position after current character)
+ lineOffset int // current line offset
+ nextVal bool // next token is expected to be a value
+
+ // public state - ok to modify
+ ErrorCount int // number of errors encountered
+}
+
+// Read the next Unicode char into s.ch.
+// s.ch < 0 means end-of-file.
+//
+func (s *Scanner) next() {
+ if s.rdOffset < len(s.src) {
+ s.offset = s.rdOffset
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ r, w := rune(s.src[s.rdOffset]), 1
+ switch {
+ case r == 0:
+ s.error(s.offset, "illegal character NUL")
+ case r >= 0x80:
+ // not ASCII
+ r, w = utf8.DecodeRune(s.src[s.rdOffset:])
+ if r == utf8.RuneError && w == 1 {
+ s.error(s.offset, "illegal UTF-8 encoding")
+ }
+ }
+ s.rdOffset += w
+ s.ch = r
+ } else {
+ s.offset = len(s.src)
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ s.ch = -1 // eof
+ }
+}
+
+// A mode value is a set of flags (or 0).
+// They control scanner behavior.
+//
+type Mode uint
+
+const (
+ ScanComments Mode = 1 << iota // return comments as COMMENT tokens
+)
+
+// Init prepares the scanner s to tokenize the text src by setting the
+// scanner at the beginning of src. The scanner uses the file set file
+// for position information and it adds line information for each line.
+// It is ok to re-use the same file when re-scanning the same file as
+// line information which is already present is ignored. Init causes a
+// panic if the file size does not match the src size.
+//
+// Calls to Scan will invoke the error handler err if they encounter a
+// syntax error and err is not nil. Also, for each error encountered,
+// the Scanner field ErrorCount is incremented by one. The mode parameter
+// determines how comments are handled.
+//
+// Note that Init may call err if there is an error in the first character
+// of the file.
+//
+func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
+ // Explicitly initialize all fields since a scanner may be reused.
+ if file.Size() != len(src) {
+ panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
+ }
+ s.file = file
+ s.dir, _ = filepath.Split(file.Name())
+ s.src = src
+ s.err = err
+ s.mode = mode
+
+ s.ch = ' '
+ s.offset = 0
+ s.rdOffset = 0
+ s.lineOffset = 0
+ s.ErrorCount = 0
+ s.nextVal = false
+
+ s.next()
+}
+
+func (s *Scanner) error(offs int, msg string) {
+ if s.err != nil {
+ s.err(s.file.Position(s.file.Pos(offs)), msg)
+ }
+ s.ErrorCount++
+}
+
+func (s *Scanner) scanComment() string {
+ // initial [;#] already consumed
+ offs := s.offset - 1 // position of initial [;#]
+
+ for s.ch != '\n' && s.ch >= 0 {
+ s.next()
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+func (s *Scanner) scanIdentifier() string {
+ offs := s.offset
+ for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
+ s.next()
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanEscape(val bool) {
+ offs := s.offset
+ ch := s.ch
+ s.next() // always make progress
+ switch ch {
+ case '\\', '"':
+ // ok
+ case 'n', 't':
+ if val {
+ break // ok
+ }
+ fallthrough
+ default:
+ s.error(offs, "unknown escape sequence")
+ }
+}
+
+func (s *Scanner) scanString() string {
+ // '"' opening already consumed
+ offs := s.offset - 1
+
+ for s.ch != '"' {
+ ch := s.ch
+ s.next()
+ if ch == '\n' || ch < 0 {
+ s.error(offs, "string not terminated")
+ break
+ }
+ if ch == '\\' {
+ s.scanEscape(false)
+ }
+ }
+
+ s.next()
+
+ return string(s.src[offs:s.offset])
+}
+
+func stripCR(b []byte) []byte {
+ c := make([]byte, len(b))
+ i := 0
+ for _, ch := range b {
+ if ch != '\r' {
+ c[i] = ch
+ i++
+ }
+ }
+ return c[:i]
+}
+
+func (s *Scanner) scanValString() string {
+ offs := s.offset
+
+ hasCR := false
+ end := offs
+ inQuote := false
+loop:
+ for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
+ ch := s.ch
+ s.next()
+ switch {
+ case inQuote && ch == '\\':
+ s.scanEscape(true)
+ case !inQuote && ch == '\\':
+ if s.ch == '\r' {
+ hasCR = true
+ s.next()
+ }
+ if s.ch != '\n' {
+ s.error(offs, "unquoted '\\' must be followed by new line")
+ break loop
+ }
+ s.next()
+ case ch == '"':
+ inQuote = !inQuote
+ case ch == '\r':
+ hasCR = true
+ case ch < 0 || inQuote && ch == '\n':
+ s.error(offs, "string not terminated")
+ break loop
+ }
+ if inQuote || !isWhiteSpace(ch) {
+ end = s.offset
+ }
+ }
+
+ lit := s.src[offs:end]
+ if hasCR {
+ lit = stripCR(lit)
+ }
+
+ return string(lit)
+}
+
+func isWhiteSpace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\r'
+}
+
+func (s *Scanner) skipWhitespace() {
+ for isWhiteSpace(s.ch) {
+ s.next()
+ }
+}
+
+// Scan scans the next token and returns the token position, the token,
+// and its literal string if applicable. The source end is indicated by
+// token.EOF.
+//
+// If the returned token is a literal (token.IDENT, token.STRING) or
+// token.COMMENT, the literal string has the corresponding value.
+//
+// If the returned token is token.ILLEGAL, the literal string is the
+// offending character.
+//
+// In all other cases, Scan returns an empty literal string.
+//
+// For more tolerant parsing, Scan will return a valid token if
+// possible even if a syntax error was encountered. Thus, even
+// if the resulting token sequence contains no illegal tokens,
+// a client may not assume that no error occurred. Instead it
+// must check the scanner's ErrorCount or the number of calls
+// of the error handler, if there was one installed.
+//
+// Scan adds line information to the file added to the file
+// set with Init. Token positions are relative to that file
+// and thus relative to the file set.
+//
+func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
+scanAgain:
+ s.skipWhitespace()
+
+ // current token start
+ pos = s.file.Pos(s.offset)
+
+ // determine token value
+ switch ch := s.ch; {
+ case s.nextVal:
+ lit = s.scanValString()
+ tok = token.STRING
+ s.nextVal = false
+ case isLetter(ch):
+ lit = s.scanIdentifier()
+ tok = token.IDENT
+ default:
+ s.next() // always make progress
+ switch ch {
+ case -1:
+ tok = token.EOF
+ case '\n':
+ tok = token.EOL
+ case '"':
+ tok = token.STRING
+ lit = s.scanString()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case ';', '#':
+ // comment
+ lit = s.scanComment()
+ if s.mode&ScanComments == 0 {
+ // skip comment
+ goto scanAgain
+ }
+ tok = token.COMMENT
+ case '=':
+ tok = token.ASSIGN
+ s.nextVal = true
+ default:
+ s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
+ tok = token.ILLEGAL
+ lit = string(ch)
+ }
+ }
+
+ return
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "os"
+ "strings"
+ "testing"
+)
+
+import (
+ "gopkg.in/sconf/internal.v0/internal-/gcfg/token"
+)
+
+var fset = token.NewFileSet()
+
+const /* class */ (
+ special = iota
+ literal
+ operator
+)
+
+func tokenclass(tok token.Token) int {
+ switch {
+ case tok.IsLiteral():
+ return literal
+ case tok.IsOperator():
+ return operator
+ }
+ return special
+}
+
+type elt struct {
+ tok token.Token
+ lit string
+ class int
+ pre string
+ suf string
+}
+
+var tokens = [...]elt{
+ // Special tokens
+ {token.COMMENT, "; a comment", special, "", "\n"},
+ {token.COMMENT, "# a comment", special, "", "\n"},
+
+ // Operators and delimiters
+ {token.ASSIGN, "=", operator, "", "value"},
+ {token.LBRACK, "[", operator, "", ""},
+ {token.RBRACK, "]", operator, "", ""},
+ {token.EOL, "\n", operator, "", ""},
+
+ // Identifiers
+ {token.IDENT, "foobar", literal, "", ""},
+ {token.IDENT, "a۰۱۸", literal, "", ""},
+ {token.IDENT, "foo६४", literal, "", ""},
+ {token.IDENT, "bar9876", literal, "", ""},
+ {token.IDENT, "foo-bar", literal, "", ""},
+ {token.IDENT, "foo", literal, ";\n", ""},
+ // String literals (subsection names)
+ {token.STRING, `"foobar"`, literal, "", ""},
+ {token.STRING, `"\""`, literal, "", ""},
+ // String literals (values)
+ {token.STRING, `"\n"`, literal, "=", ""},
+ {token.STRING, `"foobar"`, literal, "=", ""},
+ {token.STRING, `"foo\nbar"`, literal, "=", ""},
+ {token.STRING, `"foo\"bar"`, literal, "=", ""},
+ {token.STRING, `"foo\\bar"`, literal, "=", ""},
+ {token.STRING, `"foobar"`, literal, "=", ""},
+ {token.STRING, `"foobar"`, literal, "= ", ""},
+ {token.STRING, `"foobar"`, literal, "=", "\n"},
+ {token.STRING, `"foobar"`, literal, "=", ";"},
+ {token.STRING, `"foobar"`, literal, "=", " ;"},
+ {token.STRING, `"foobar"`, literal, "=", "#"},
+ {token.STRING, `"foobar"`, literal, "=", " #"},
+ {token.STRING, "foobar", literal, "=", ""},
+ {token.STRING, "foobar", literal, "= ", ""},
+ {token.STRING, "foobar", literal, "=", " "},
+ {token.STRING, `"foo" "bar"`, literal, "=", " "},
+ {token.STRING, "foo\\\nbar", literal, "=", ""},
+ {token.STRING, "foo\\\r\nbar", literal, "=", ""},
+}
+
+const whitespace = " \t \n\n\n" // to separate tokens
+
+var source = func() []byte {
+ var src []byte
+ for _, t := range tokens {
+ src = append(src, t.pre...)
+ src = append(src, t.lit...)
+ src = append(src, t.suf...)
+ src = append(src, whitespace...)
+ }
+ return src
+}()
+
+func newlineCount(s string) int {
+ n := 0
+ for i := 0; i < len(s); i++ {
+ if s[i] == '\n' {
+ n++
+ }
+ }
+ return n
+}
+
+func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
+ pos := fset.Position(p)
+ if pos.Filename != expected.Filename {
+ t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
+ }
+ if pos.Offset != expected.Offset {
+ t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
+ }
+ if pos.Line != expected.Line {
+ t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
+ }
+ if pos.Column != expected.Column {
+ t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
+ }
+}
+
+// Verify that calling Scan() provides the correct results.
+func TestScan(t *testing.T) {
+ // make source
+ src_linecount := newlineCount(string(source))
+ whitespace_linecount := newlineCount(whitespace)
+
+ index := 0
+
+ // error handler
+ eh := func(_ token.Position, msg string) {
+ t.Errorf("%d: error handler called (msg = %s)", index, msg)
+ }
+
+ // verify scan
+ var s Scanner
+ s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments)
+ // epos is the expected position
+ epos := token.Position{
+ Filename: "",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ }
+ for {
+ pos, tok, lit := s.Scan()
+ if lit == "" {
+ // no literal value for non-literal tokens
+ lit = tok.String()
+ }
+ e := elt{token.EOF, "", special, "", ""}
+ if index < len(tokens) {
+ e = tokens[index]
+ }
+ if tok == token.EOF {
+ lit = "<EOF>"
+ epos.Line = src_linecount
+ epos.Column = 2
+ }
+ if e.pre != "" && strings.ContainsRune("=;#", rune(e.pre[0])) {
+ epos.Column = 1
+ checkPos(t, lit, pos, epos)
+ var etok token.Token
+ if e.pre[0] == '=' {
+ etok = token.ASSIGN
+ } else {
+ etok = token.COMMENT
+ }
+ if tok != etok {
+ t.Errorf("bad token for %q: got %q, expected %q", lit, tok, etok)
+ }
+ pos, tok, lit = s.Scan()
+ }
+ epos.Offset += len(e.pre)
+ if tok != token.EOF {
+ epos.Column = 1 + len(e.pre)
+ }
+ if e.pre != "" && e.pre[len(e.pre)-1] == '\n' {
+ epos.Offset--
+ epos.Column--
+ checkPos(t, lit, pos, epos)
+ if tok != token.EOL {
+ t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.EOL)
+ }
+ epos.Line++
+ epos.Offset++
+ epos.Column = 1
+ pos, tok, lit = s.Scan()
+ }
+ checkPos(t, lit, pos, epos)
+ if tok != e.tok {
+ t.Errorf("bad token for %q: got %q, expected %q", lit, tok, e.tok)
+ }
+ if e.tok.IsLiteral() {
+ // no CRs in value string literals
+ elit := e.lit
+ if strings.ContainsRune(e.pre, '=') {
+ elit = string(stripCR([]byte(elit)))
+ epos.Offset += len(e.lit) - len(lit) // correct position
+ }
+ if lit != elit {
+ t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
+ }
+ }
+ if tokenclass(tok) != e.class {
+ t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
+ }
+ epos.Offset += len(lit) + len(e.suf) + len(whitespace)
+ epos.Line += newlineCount(lit) + newlineCount(e.suf) + whitespace_linecount
+ index++
+ if tok == token.EOF {
+ break
+ }
+ if e.suf == "value" {
+ pos, tok, lit = s.Scan()
+ if tok != token.STRING {
+ t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.STRING)
+ }
+ } else if strings.ContainsRune(e.suf, ';') || strings.ContainsRune(e.suf, '#') {
+ pos, tok, lit = s.Scan()
+ if tok != token.COMMENT {
+ t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.COMMENT)
+ }
+ }
+ // skip EOLs
+ for i := 0; i < whitespace_linecount+newlineCount(e.suf); i++ {
+ pos, tok, lit = s.Scan()
+ if tok != token.EOL {
+ t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.EOL)
+ }
+ }
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("found %d errors", s.ErrorCount)
+ }
+}
+
+func TestScanValStringEOF(t *testing.T) {
+ var s Scanner
+ src := "= value"
+ f := fset.AddFile("src", fset.Base(), len(src))
+ s.Init(f, []byte(src), nil, 0)
+ s.Scan() // =
+ s.Scan() // value
+ _, tok, _ := s.Scan() // EOF
+ if tok != token.EOF {
+ t.Errorf("bad token: got %s, expected %s", tok, token.EOF)
+ }
+ if s.ErrorCount > 0 {
+ t.Error("scanning error")
+ }
+}
+
+// Verify that initializing the same scanner more then once works correctly.
+func TestInit(t *testing.T) {
+ var s Scanner
+
+ // 1st init
+ src1 := "\nname = value"
+ f1 := fset.AddFile("src1", fset.Base(), len(src1))
+ s.Init(f1, []byte(src1), nil, 0)
+ if f1.Size() != len(src1) {
+ t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
+ }
+ s.Scan() // \n
+ s.Scan() // name
+ _, tok, _ := s.Scan() // =
+ if tok != token.ASSIGN {
+ t.Errorf("bad token: got %s, expected %s", tok, token.ASSIGN)
+ }
+
+ // 2nd init
+ src2 := "[section]"
+ f2 := fset.AddFile("src2", fset.Base(), len(src2))
+ s.Init(f2, []byte(src2), nil, 0)
+ if f2.Size() != len(src2) {
+ t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
+ }
+ _, tok, _ = s.Scan() // [
+ if tok != token.LBRACK {
+ t.Errorf("bad token: got %s, expected %s", tok, token.LBRACK)
+ }
+
+ if s.ErrorCount != 0 {
+ t.Errorf("found %d errors", s.ErrorCount)
+ }
+}
+
+func TestStdErrorHandler(t *testing.T) {
+ const src = "@\n" + // illegal character, cause an error
+ "@ @\n" // two errors on the same line
+
+ var list ErrorList
+ eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
+
+ var s Scanner
+ s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, 0)
+ for {
+ if _, tok, _ := s.Scan(); tok == token.EOF {
+ break
+ }
+ }
+
+ if len(list) != s.ErrorCount {
+ t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
+ }
+
+ if len(list) != 3 {
+ t.Errorf("found %d raw errors, expected 3", len(list))
+ PrintError(os.Stderr, list)
+ }
+
+ list.Sort()
+ if len(list) != 3 {
+ t.Errorf("found %d sorted errors, expected 3", len(list))
+ PrintError(os.Stderr, list)
+ }
+
+ list.RemoveMultiples()
+ if len(list) != 2 {
+ t.Errorf("found %d one-per-line errors, expected 2", len(list))
+ PrintError(os.Stderr, list)
+ }
+}
+
+type errorCollector struct {
+ cnt int // number of errors encountered
+ msg string // last error message encountered
+ pos token.Position // last error position encountered
+}
+
+func checkError(t *testing.T, src string, tok token.Token, pos int, err string) {
+ var s Scanner
+ var h errorCollector
+ eh := func(pos token.Position, msg string) {
+ h.cnt++
+ h.msg = msg
+ h.pos = pos
+ }
+ s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments)
+ if src[0] == '=' {
+ _, _, _ = s.Scan()
+ }
+ _, tok0, _ := s.Scan()
+ _, tok1, _ := s.Scan()
+ if tok0 != tok {
+ t.Errorf("%q: got %s, expected %s", src, tok0, tok)
+ }
+ if tok1 != token.EOF {
+ t.Errorf("%q: got %s, expected EOF", src, tok1)
+ }
+ cnt := 0
+ if err != "" {
+ cnt = 1
+ }
+ if h.cnt != cnt {
+ t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
+ }
+ if h.msg != err {
+ t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
+ }
+ if h.pos.Offset != pos {
+ t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
+ }
+}
+
+var errors = []struct {
+ src string
+ tok token.Token
+ pos int
+ err string
+}{
+ {"\a", token.ILLEGAL, 0, "illegal character U+0007"},
+ {"/", token.ILLEGAL, 0, "illegal character U+002F '/'"},
+ {"_", token.ILLEGAL, 0, "illegal character U+005F '_'"},
+ {`…`, token.ILLEGAL, 0, "illegal character U+2026 '…'"},
+ {`""`, token.STRING, 0, ""},
+ {`"`, token.STRING, 0, "string not terminated"},
+ {"\"\n", token.STRING, 0, "string not terminated"},
+ {`="`, token.STRING, 1, "string not terminated"},
+ {"=\"\n", token.STRING, 1, "string not terminated"},
+ {"=\\", token.STRING, 1, "unquoted '\\' must be followed by new line"},
+ {"=\\\r", token.STRING, 1, "unquoted '\\' must be followed by new line"},
+ {`"\z"`, token.STRING, 2, "unknown escape sequence"},
+ {`"\a"`, token.STRING, 2, "unknown escape sequence"},
+ {`"\b"`, token.STRING, 2, "unknown escape sequence"},
+ {`"\f"`, token.STRING, 2, "unknown escape sequence"},
+ {`"\r"`, token.STRING, 2, "unknown escape sequence"},
+ {`"\t"`, token.STRING, 2, "unknown escape sequence"},
+ {`"\v"`, token.STRING, 2, "unknown escape sequence"},
+ {`"\0"`, token.STRING, 2, "unknown escape sequence"},
+}
+
+func TestScanErrors(t *testing.T) {
+ for _, e := range errors {
+ checkError(t, e.src, e.tok, e.pos, e.err)
+ }
+}
+
+func BenchmarkScan(b *testing.B) {
+ b.StopTimer()
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(source))
+ var s Scanner
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ s.Init(file, source, nil, ScanComments)
+ for {
+ _, tok, _ := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ }
+ }
+}
--- /dev/null
+package gcfg
+
+import (
+ "fmt"
+ "math/big"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "gopkg.in/sconf/internal.v0/internal-/gcfg/types"
+)
+
+type tag struct {
+ ident string
+ intMode string
+}
+
+func newTag(ts string) tag {
+ t := tag{}
+ s := strings.Split(ts, ",")
+ t.ident = s[0]
+ for _, tse := range s[1:] {
+ if strings.HasPrefix(tse, "int=") {
+ t.intMode = tse[len("int="):]
+ }
+ }
+ return t
+}
+
+func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
+ var n string
+ r0, _ := utf8.DecodeRuneInString(name)
+ if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
+ n = "X"
+ }
+ n += strings.Replace(name, "-", "_", -1)
+ f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
+ if !v.FieldByName(fieldName).CanSet() {
+ return false
+ }
+ f, _ := v.Type().FieldByName(fieldName)
+ t := newTag(f.Tag.Get("gcfg"))
+ if t.ident != "" {
+ return strings.EqualFold(t.ident, name)
+ }
+ return strings.EqualFold(n, fieldName)
+ })
+ if !ok {
+ return reflect.Value{}, tag{}
+ }
+ return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
+}
+
+type setter func(destp interface{}, blank bool, val string, t tag) error
+
+var errUnsupportedType = fmt.Errorf("unsupported type")
+var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
+
+var setters = []setter{
+ typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
+}
+
+func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
+ dtu, ok := d.(textUnmarshaler)
+ if !ok {
+ return errUnsupportedType
+ }
+ if blank {
+ return errBlankUnsupported
+ }
+ return dtu.UnmarshalText([]byte(val))
+}
+
+func boolSetter(d interface{}, blank bool, val string, t tag) error {
+ if blank {
+ reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
+ return nil
+ }
+ b, err := types.ParseBool(val)
+ if err == nil {
+ reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
+ }
+ return err
+}
+
+func intMode(mode string) types.IntMode {
+ var m types.IntMode
+ if strings.ContainsAny(mode, "dD") {
+ m |= types.Dec
+ }
+ if strings.ContainsAny(mode, "hH") {
+ m |= types.Hex
+ }
+ if strings.ContainsAny(mode, "oO") {
+ m |= types.Oct
+ }
+ return m
+}
+
+var typeModes = map[reflect.Type]types.IntMode{
+ reflect.TypeOf(int(0)): types.Dec | types.Hex,
+ reflect.TypeOf(int8(0)): types.Dec | types.Hex,
+ reflect.TypeOf(int16(0)): types.Dec | types.Hex,
+ reflect.TypeOf(int32(0)): types.Dec | types.Hex,
+ reflect.TypeOf(int64(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
+ reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
+ // use default mode (allow dec/hex/oct) for uintptr type
+ reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
+}
+
+func intModeDefault(t reflect.Type) types.IntMode {
+ m, ok := typeModes[t]
+ if !ok {
+ m = types.Dec | types.Hex | types.Oct
+ }
+ return m
+}
+
+func intSetter(d interface{}, blank bool, val string, t tag) error {
+ if blank {
+ return errBlankUnsupported
+ }
+ mode := intMode(t.intMode)
+ if mode == 0 {
+ mode = intModeDefault(reflect.TypeOf(d).Elem())
+ }
+ return types.ParseInt(d, val, mode)
+}
+
+func stringSetter(d interface{}, blank bool, val string, t tag) error {
+ if blank {
+ return errBlankUnsupported
+ }
+ dsp, ok := d.(*string)
+ if !ok {
+ return errUnsupportedType
+ }
+ *dsp = val
+ return nil
+}
+
+var kindSetters = map[reflect.Kind]setter{
+ reflect.String: stringSetter,
+ reflect.Bool: boolSetter,
+ reflect.Int: intSetter,
+ reflect.Int8: intSetter,
+ reflect.Int16: intSetter,
+ reflect.Int32: intSetter,
+ reflect.Int64: intSetter,
+ reflect.Uint: intSetter,
+ reflect.Uint8: intSetter,
+ reflect.Uint16: intSetter,
+ reflect.Uint32: intSetter,
+ reflect.Uint64: intSetter,
+ reflect.Uintptr: intSetter,
+}
+
+var typeSetters = map[reflect.Type]setter{
+ reflect.TypeOf(big.Int{}): intSetter,
+}
+
+func typeSetter(d interface{}, blank bool, val string, tt tag) error {
+ t := reflect.ValueOf(d).Type().Elem()
+ setter, ok := typeSetters[t]
+ if !ok {
+ return errUnsupportedType
+ }
+ return setter(d, blank, val, tt)
+}
+
+func kindSetter(d interface{}, blank bool, val string, tt tag) error {
+ k := reflect.ValueOf(d).Type().Elem().Kind()
+ setter, ok := kindSetters[k]
+ if !ok {
+ return errUnsupportedType
+ }
+ return setter(d, blank, val, tt)
+}
+
+func scanSetter(d interface{}, blank bool, val string, tt tag) error {
+ if blank {
+ return errBlankUnsupported
+ }
+ return types.ScanFully(d, val, 'v')
+}
+
+func set(cfg interface{}, sect, sub, name string, blank bool, value string) error {
+ vPCfg := reflect.ValueOf(cfg)
+ if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
+ panic(fmt.Errorf("config must be a pointer to a struct"))
+ }
+ vCfg := vPCfg.Elem()
+ vSect, _ := fieldFold(vCfg, sect)
+ if !vSect.IsValid() {
+ return fmt.Errorf("invalid section: section %q", sect)
+ }
+ if vSect.Kind() == reflect.Map {
+ vst := vSect.Type()
+ if vst.Key().Kind() != reflect.String ||
+ vst.Elem().Kind() != reflect.Ptr ||
+ vst.Elem().Elem().Kind() != reflect.Struct {
+ panic(fmt.Errorf("map field for section must have string keys and "+
+ " pointer-to-struct values: section %q", sect))
+ }
+ if vSect.IsNil() {
+ vSect.Set(reflect.MakeMap(vst))
+ }
+ k := reflect.ValueOf(sub)
+ pv := vSect.MapIndex(k)
+ if !pv.IsValid() {
+ vType := vSect.Type().Elem().Elem()
+ pv = reflect.New(vType)
+ vSect.SetMapIndex(k, pv)
+ }
+ vSect = pv.Elem()
+ } else if vSect.Kind() != reflect.Struct {
+ panic(fmt.Errorf("field for section must be a map or a struct: "+
+ "section %q", sect))
+ } else if sub != "" {
+ return fmt.Errorf("invalid subsection: "+
+ "section %q subsection %q", sect, sub)
+ }
+ vVar, t := fieldFold(vSect, name)
+ if !vVar.IsValid() {
+ return fmt.Errorf("invalid variable: "+
+ "section %q subsection %q variable %q", sect, sub, name)
+ }
+ // vVal is either single-valued var, or newly allocated value within multi-valued var
+ var vVal reflect.Value
+ // multi-value if unnamed slice type
+ isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice
+ if isMulti && blank {
+ vVar.Set(reflect.Zero(vVar.Type()))
+ return nil
+ }
+ if isMulti {
+ vVal = reflect.New(vVar.Type().Elem()).Elem()
+ } else {
+ vVal = vVar
+ }
+ isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
+ isNew := isDeref && vVal.IsNil()
+ // vAddr is address of value to set (dereferenced & allocated as needed)
+ var vAddr reflect.Value
+ switch {
+ case isNew:
+ vAddr = reflect.New(vVal.Type().Elem())
+ case isDeref && !isNew:
+ vAddr = vVal
+ default:
+ vAddr = vVal.Addr()
+ }
+ vAddrI := vAddr.Interface()
+ err, ok := error(nil), false
+ for _, s := range setters {
+ err = s(vAddrI, blank, value, t)
+ if err == nil {
+ ok = true
+ break
+ }
+ if err != errUnsupportedType {
+ return err
+ }
+ }
+ if !ok {
+ // in case all setters returned errUnsupportedType
+ return err
+ }
+ if isNew { // set reference if it was dereferenced and newly allocated
+ vVal.Set(vAddr)
+ }
+ if isMulti { // append if multi-valued
+ vVar.Set(reflect.Append(vVar, vVal))
+ }
+ return nil
+}
--- /dev/null
+; Comment line
+[section]
+name=value # comment
--- /dev/null
+; Comment line
+[甲]
+乙=丙 # comment
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(gri) consider making this a separate package outside the go directory.
+
+package token
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+)
+
+// -----------------------------------------------------------------------------
+// Positions
+
+// Position describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+//
+type Position struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (pos *Position) IsValid() bool { return pos.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+//
+func (pos Position) String() string {
+ s := pos.Filename
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Pos is a compact encoding of a source position within a file set.
+// It can be converted into a Position for a more convenient, but much
+// larger, representation.
+//
+// The Pos value for a given file is a number in the range [base, base+size],
+// where base and size are specified when adding the file to the file set via
+// AddFile.
+//
+// To create the Pos value for a specific source offset, first add
+// the respective file to the current file set (via FileSet.AddFile)
+// and then call File.Pos(offset) for that file. Given a Pos value p
+// for a specific file set fset, the corresponding Position value is
+// obtained by calling fset.Position(p).
+//
+// Pos values can be compared directly with the usual comparison operators:
+// If two Pos values p and q are in the same file, comparing p and q is
+// equivalent to comparing the respective source file offsets. If p and q
+// are in different files, p < q is true if the file implied by p was added
+// to the respective file set before the file implied by q.
+//
+type Pos int
+
+// The zero value for Pos is NoPos; there is no file and line information
+// associated with it, and NoPos().IsValid() is false. NoPos is always
+// smaller than any other Pos value. The corresponding Position value
+// for NoPos is the zero value for Position.
+//
+const NoPos Pos = 0
+
+// IsValid returns true if the position is valid.
+func (p Pos) IsValid() bool {
+ return p != NoPos
+}
+
+// -----------------------------------------------------------------------------
+// File
+
+// A File is a handle for a file belonging to a FileSet.
+// A File has a name, size, and line offset table.
+//
+type File struct {
+ set *FileSet
+ name string // file name as provided to AddFile
+ base int // Pos value range for this file is [base...base+size]
+ size int // file size as provided to AddFile
+
+ // lines and infos are protected by set.mutex
+ lines []int
+ infos []lineInfo
+}
+
+// Name returns the file name of file f as registered with AddFile.
+func (f *File) Name() string {
+ return f.name
+}
+
+// Base returns the base offset of file f as registered with AddFile.
+func (f *File) Base() int {
+ return f.base
+}
+
+// Size returns the size of file f as registered with AddFile.
+func (f *File) Size() int {
+ return f.size
+}
+
+// LineCount returns the number of lines in file f.
+func (f *File) LineCount() int {
+ f.set.mutex.RLock()
+ n := len(f.lines)
+ f.set.mutex.RUnlock()
+ return n
+}
+
+// AddLine adds the line offset for a new line.
+// The line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise the line offset is ignored.
+//
+func (f *File) AddLine(offset int) {
+ f.set.mutex.Lock()
+ if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
+ f.lines = append(f.lines, offset)
+ }
+ f.set.mutex.Unlock()
+}
+
+// SetLines sets the line offsets for a file and returns true if successful.
+// The line offsets are the offsets of the first character of each line;
+// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
+// An empty file has an empty line offset table.
+// Each line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise SetLines fails and returns
+// false.
+//
+func (f *File) SetLines(lines []int) bool {
+ // verify validity of lines table
+ size := f.size
+ for i, offset := range lines {
+ if i > 0 && offset <= lines[i-1] || size <= offset {
+ return false
+ }
+ }
+
+ // set lines table
+ f.set.mutex.Lock()
+ f.lines = lines
+ f.set.mutex.Unlock()
+ return true
+}
+
+// SetLinesForContent sets the line offsets for the given file content.
+func (f *File) SetLinesForContent(content []byte) {
+ var lines []int
+ line := 0
+ for offset, b := range content {
+ if line >= 0 {
+ lines = append(lines, line)
+ }
+ line = -1
+ if b == '\n' {
+ line = offset + 1
+ }
+ }
+
+ // set lines table
+ f.set.mutex.Lock()
+ f.lines = lines
+ f.set.mutex.Unlock()
+}
+
+// A lineInfo object describes alternative file and line number
+// information (such as provided via a //line comment in a .go
+// file) for a given file offset.
+type lineInfo struct {
+ // fields are exported to make them accessible to gob
+ Offset int
+ Filename string
+ Line int
+}
+
+// AddLineInfo adds alternative file and line number information for
+// a given file offset. The offset must be larger than the offset for
+// the previously added alternative line info and smaller than the
+// file size; otherwise the information is ignored.
+//
+// AddLineInfo is typically used to register alternative position
+// information for //line filename:line comments in source files.
+//
+func (f *File) AddLineInfo(offset int, filename string, line int) {
+ f.set.mutex.Lock()
+ if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
+ f.infos = append(f.infos, lineInfo{offset, filename, line})
+ }
+ f.set.mutex.Unlock()
+}
+
+// Pos returns the Pos value for the given file offset;
+// the offset must be <= f.Size().
+// f.Pos(f.Offset(p)) == p.
+//
+func (f *File) Pos(offset int) Pos {
+ if offset > f.size {
+ panic("illegal file offset")
+ }
+ return Pos(f.base + offset)
+}
+
+// Offset returns the offset for the given file position p;
+// p must be a valid Pos value in that file.
+// f.Offset(f.Pos(offset)) == offset.
+//
+func (f *File) Offset(p Pos) int {
+ if int(p) < f.base || int(p) > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ return int(p) - f.base
+}
+
+// Line returns the line number for the given file position p;
+// p must be a Pos value in that file or NoPos.
+//
+func (f *File) Line(p Pos) int {
+ // TODO(gri) this can be implemented much more efficiently
+ return f.Position(p).Line
+}
+
+func searchLineInfos(a []lineInfo, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
+}
+
+// info returns the file name, line, and column number for a file offset.
+func (f *File) info(offset int) (filename string, line, column int) {
+ filename = f.name
+ if i := searchInts(f.lines, offset); i >= 0 {
+ line, column = i+1, offset-f.lines[i]+1
+ }
+ if len(f.infos) > 0 {
+ // almost no files have extra line infos
+ if i := searchLineInfos(f.infos, offset); i >= 0 {
+ alt := &f.infos[i]
+ filename = alt.Filename
+ if i := searchInts(f.lines, alt.Offset); i >= 0 {
+ line += alt.Line - i - 1
+ }
+ }
+ }
+ return
+}
+
+func (f *File) position(p Pos) (pos Position) {
+ offset := int(p) - f.base
+ pos.Offset = offset
+ pos.Filename, pos.Line, pos.Column = f.info(offset)
+ return
+}
+
+// Position returns the Position value for the given file position p;
+// p must be a Pos value in that file or NoPos.
+//
+func (f *File) Position(p Pos) (pos Position) {
+ if p != NoPos {
+ if int(p) < f.base || int(p) > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ pos = f.position(p)
+ }
+ return
+}
+
+// -----------------------------------------------------------------------------
+// FileSet
+
+// A FileSet represents a set of source files.
+// Methods of file sets are synchronized; multiple goroutines
+// may invoke them concurrently.
+//
+type FileSet struct {
+ mutex sync.RWMutex // protects the file set
+ base int // base offset for the next file
+ files []*File // list of files in the order added to the set
+ last *File // cache of last file looked up
+}
+
+// NewFileSet creates a new file set.
+func NewFileSet() *FileSet {
+ s := new(FileSet)
+ s.base = 1 // 0 == NoPos
+ return s
+}
+
+// Base returns the minimum base offset that must be provided to
+// AddFile when adding the next file.
+//
+func (s *FileSet) Base() int {
+ s.mutex.RLock()
+ b := s.base
+ s.mutex.RUnlock()
+ return b
+
+}
+
+// AddFile adds a new file with a given filename, base offset, and file size
+// to the file set s and returns the file. Multiple files may have the same
+// name. The base offset must not be smaller than the FileSet's Base(), and
+// size must not be negative.
+//
+// Adding the file will set the file set's Base() value to base + size + 1
+// as the minimum base value for the next file. The following relationship
+// exists between a Pos value p for a given file offset offs:
+//
+// int(p) = base + offs
+//
+// with offs in the range [0, size] and thus p in the range [base, base+size].
+// For convenience, File.Pos may be used to create file-specific position
+// values from a file offset.
+//
+func (s *FileSet) AddFile(filename string, base, size int) *File {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ if base < s.base || size < 0 {
+ panic("illegal base or size")
+ }
+ // base >= s.base && size >= 0
+ f := &File{s, filename, base, size, []int{0}, nil}
+ base += size + 1 // +1 because EOF also has a position
+ if base < 0 {
+ panic("token.Pos offset overflow (> 2G of source code in file set)")
+ }
+ // add the file to the file set
+ s.base = base
+ s.files = append(s.files, f)
+ s.last = f
+ return f
+}
+
+// Iterate calls f for the files in the file set in the order they were added
+// until f returns false.
+//
+func (s *FileSet) Iterate(f func(*File) bool) {
+ for i := 0; ; i++ {
+ var file *File
+ s.mutex.RLock()
+ if i < len(s.files) {
+ file = s.files[i]
+ }
+ s.mutex.RUnlock()
+ if file == nil || !f(file) {
+ break
+ }
+ }
+}
+
+func searchFiles(a []*File, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
+}
+
+func (s *FileSet) file(p Pos) *File {
+ // common case: p is in last file
+ if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
+ return f
+ }
+ // p is not in last file - search all files
+ if i := searchFiles(s.files, int(p)); i >= 0 {
+ f := s.files[i]
+ // f.base <= int(p) by definition of searchFiles
+ if int(p) <= f.base+f.size {
+ s.last = f
+ return f
+ }
+ }
+ return nil
+}
+
+// File returns the file that contains the position p.
+// If no such file is found (for instance for p == NoPos),
+// the result is nil.
+//
+func (s *FileSet) File(p Pos) (f *File) {
+ if p != NoPos {
+ s.mutex.RLock()
+ f = s.file(p)
+ s.mutex.RUnlock()
+ }
+ return
+}
+
+// Position converts a Pos in the fileset into a general Position.
+func (s *FileSet) Position(p Pos) (pos Position) {
+ if p != NoPos {
+ s.mutex.RLock()
+ if f := s.file(p); f != nil {
+ pos = f.position(p)
+ }
+ s.mutex.RUnlock()
+ }
+ return
+}
+
+// -----------------------------------------------------------------------------
+// Helper functions
+
+func searchInts(a []int, x int) int {
+ // This function body is a manually inlined version of:
+ //
+ // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+ //
+ // With better compiler optimizations, this may not be needed in the
+ // future, but at the moment this change improves the go/printer
+ // benchmark performance by ~30%. This has a direct impact on the
+ // speed of gofmt and thus seems worthwhile (2011-04-29).
+ // TODO(gri): Remove this when compilers have caught up.
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if a[h] <= x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i - 1
+}
--- /dev/null
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+ "fmt"
+ "testing"
+)
+
+func checkPos(t *testing.T, msg string, p, q Position) {
+ if p.Filename != q.Filename {
+ t.Errorf("%s: expected filename = %q; got %q", msg, q.Filename, p.Filename)
+ }
+ if p.Offset != q.Offset {
+ t.Errorf("%s: expected offset = %d; got %d", msg, q.Offset, p.Offset)
+ }
+ if p.Line != q.Line {
+ t.Errorf("%s: expected line = %d; got %d", msg, q.Line, p.Line)
+ }
+ if p.Column != q.Column {
+ t.Errorf("%s: expected column = %d; got %d", msg, q.Column, p.Column)
+ }
+}
+
+func TestNoPos(t *testing.T) {
+ if NoPos.IsValid() {
+ t.Errorf("NoPos should not be valid")
+ }
+ var fset *FileSet
+ checkPos(t, "nil NoPos", fset.Position(NoPos), Position{})
+ fset = NewFileSet()
+ checkPos(t, "fset NoPos", fset.Position(NoPos), Position{})
+}
+
+var tests = []struct {
+ filename string
+ source []byte // may be nil
+ size int
+ lines []int
+}{
+ {"a", []byte{}, 0, []int{}},
+ {"b", []byte("01234"), 5, []int{0}},
+ {"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}},
+ {"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}},
+ {"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}},
+ {"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}},
+ {"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}},
+ {"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}},
+}
+
+func linecol(lines []int, offs int) (int, int) {
+ prevLineOffs := 0
+ for line, lineOffs := range lines {
+ if offs < lineOffs {
+ return line, offs - prevLineOffs + 1
+ }
+ prevLineOffs = lineOffs
+ }
+ return len(lines), offs - prevLineOffs + 1
+}
+
+func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) {
+ for offs := 0; offs < f.Size(); offs++ {
+ p := f.Pos(offs)
+ offs2 := f.Offset(p)
+ if offs2 != offs {
+ t.Errorf("%s, Offset: expected offset %d; got %d", f.Name(), offs, offs2)
+ }
+ line, col := linecol(lines, offs)
+ msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
+ checkPos(t, msg, f.Position(f.Pos(offs)), Position{f.Name(), offs, line, col})
+ checkPos(t, msg, fset.Position(p), Position{f.Name(), offs, line, col})
+ }
+}
+
+func makeTestSource(size int, lines []int) []byte {
+ src := make([]byte, size)
+ for _, offs := range lines {
+ if offs > 0 {
+ src[offs-1] = '\n'
+ }
+ }
+ return src
+}
+
+func TestPositions(t *testing.T) {
+ const delta = 7 // a non-zero base offset increment
+ fset := NewFileSet()
+ for _, test := range tests {
+ // verify consistency of test case
+ if test.source != nil && len(test.source) != test.size {
+ t.Errorf("%s: inconsistent test case: expected file size %d; got %d", test.filename, test.size, len(test.source))
+ }
+
+ // add file and verify name and size
+ f := fset.AddFile(test.filename, fset.Base()+delta, test.size)
+ if f.Name() != test.filename {
+ t.Errorf("expected filename %q; got %q", test.filename, f.Name())
+ }
+ if f.Size() != test.size {
+ t.Errorf("%s: expected file size %d; got %d", f.Name(), test.size, f.Size())
+ }
+ if fset.File(f.Pos(0)) != f {
+ t.Errorf("%s: f.Pos(0) was not found in f", f.Name())
+ }
+
+ // add lines individually and verify all positions
+ for i, offset := range test.lines {
+ f.AddLine(offset)
+ if f.LineCount() != i+1 {
+ t.Errorf("%s, AddLine: expected line count %d; got %d", f.Name(), i+1, f.LineCount())
+ }
+ // adding the same offset again should be ignored
+ f.AddLine(offset)
+ if f.LineCount() != i+1 {
+ t.Errorf("%s, AddLine: expected unchanged line count %d; got %d", f.Name(), i+1, f.LineCount())
+ }
+ verifyPositions(t, fset, f, test.lines[0:i+1])
+ }
+
+ // add lines with SetLines and verify all positions
+ if ok := f.SetLines(test.lines); !ok {
+ t.Errorf("%s: SetLines failed", f.Name())
+ }
+ if f.LineCount() != len(test.lines) {
+ t.Errorf("%s, SetLines: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount())
+ }
+ verifyPositions(t, fset, f, test.lines)
+
+ // add lines with SetLinesForContent and verify all positions
+ src := test.source
+ if src == nil {
+ // no test source available - create one from scratch
+ src = makeTestSource(test.size, test.lines)
+ }
+ f.SetLinesForContent(src)
+ if f.LineCount() != len(test.lines) {
+ t.Errorf("%s, SetLinesForContent: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount())
+ }
+ verifyPositions(t, fset, f, test.lines)
+ }
+}
+
+func TestLineInfo(t *testing.T) {
+ fset := NewFileSet()
+ f := fset.AddFile("foo", fset.Base(), 500)
+ lines := []int{0, 42, 77, 100, 210, 220, 277, 300, 333, 401}
+ // add lines individually and provide alternative line information
+ for _, offs := range lines {
+ f.AddLine(offs)
+ f.AddLineInfo(offs, "bar", 42)
+ }
+ // verify positions for all offsets
+ for offs := 0; offs <= f.Size(); offs++ {
+ p := f.Pos(offs)
+ _, col := linecol(lines, offs)
+ msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
+ checkPos(t, msg, f.Position(f.Pos(offs)), Position{"bar", offs, 42, col})
+ checkPos(t, msg, fset.Position(p), Position{"bar", offs, 42, col})
+ }
+}
+
+func TestFiles(t *testing.T) {
+ fset := NewFileSet()
+ for i, test := range tests {
+ fset.AddFile(test.filename, fset.Base(), test.size)
+ j := 0
+ fset.Iterate(func(f *File) bool {
+ if f.Name() != tests[j].filename {
+ t.Errorf("expected filename = %s; got %s", tests[j].filename, f.Name())
+ }
+ j++
+ return true
+ })
+ if j != i+1 {
+ t.Errorf("expected %d files; got %d", i+1, j)
+ }
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+type serializedFile struct {
+ // fields correspond 1:1 to fields with same (lower-case) name in File
+ Name string
+ Base int
+ Size int
+ Lines []int
+ Infos []lineInfo
+}
+
+type serializedFileSet struct {
+ Base int
+ Files []serializedFile
+}
+
+// Read calls decode to deserialize a file set into s; s must not be nil.
+func (s *FileSet) Read(decode func(interface{}) error) error {
+ var ss serializedFileSet
+ if err := decode(&ss); err != nil {
+ return err
+ }
+
+ s.mutex.Lock()
+ s.base = ss.Base
+ files := make([]*File, len(ss.Files))
+ for i := 0; i < len(ss.Files); i++ {
+ f := &ss.Files[i]
+ files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
+ }
+ s.files = files
+ s.last = nil
+ s.mutex.Unlock()
+
+ return nil
+}
+
+// Write calls encode to serialize the file set s.
+func (s *FileSet) Write(encode func(interface{}) error) error {
+ var ss serializedFileSet
+
+ s.mutex.Lock()
+ ss.Base = s.base
+ files := make([]serializedFile, len(s.files))
+ for i, f := range s.files {
+ files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
+ }
+ ss.Files = files
+ s.mutex.Unlock()
+
+ return encode(ss)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "testing"
+)
+
+// equal returns nil if p and q describe the same file set;
+// otherwise it returns an error describing the discrepancy.
+func equal(p, q *FileSet) error {
+ if p == q {
+ // avoid deadlock if p == q
+ return nil
+ }
+
+ // not strictly needed for the test
+ p.mutex.Lock()
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ defer p.mutex.Unlock()
+
+ if p.base != q.base {
+ return fmt.Errorf("different bases: %d != %d", p.base, q.base)
+ }
+
+ if len(p.files) != len(q.files) {
+ return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
+ }
+
+ for i, f := range p.files {
+ g := q.files[i]
+ if f.set != p {
+ return fmt.Errorf("wrong fileset for %q", f.name)
+ }
+ if g.set != q {
+ return fmt.Errorf("wrong fileset for %q", g.name)
+ }
+ if f.name != g.name {
+ return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
+ }
+ if f.base != g.base {
+ return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
+ }
+ if f.size != g.size {
+ return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
+ }
+ for j, l := range f.lines {
+ m := g.lines[j]
+ if l != m {
+ return fmt.Errorf("different offsets for %q", f.name)
+ }
+ }
+ for j, l := range f.infos {
+ m := g.infos[j]
+ if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
+ return fmt.Errorf("different infos for %q", f.name)
+ }
+ }
+ }
+
+ // we don't care about .last - it's just a cache
+ return nil
+}
+
+func checkSerialize(t *testing.T, p *FileSet) {
+ var buf bytes.Buffer
+ encode := func(x interface{}) error {
+ return gob.NewEncoder(&buf).Encode(x)
+ }
+ if err := p.Write(encode); err != nil {
+ t.Errorf("writing fileset failed: %s", err)
+ return
+ }
+ q := NewFileSet()
+ decode := func(x interface{}) error {
+ return gob.NewDecoder(&buf).Decode(x)
+ }
+ if err := q.Read(decode); err != nil {
+ t.Errorf("reading fileset failed: %s", err)
+ return
+ }
+ if err := equal(p, q); err != nil {
+ t.Errorf("filesets not identical: %s", err)
+ }
+}
+
+func TestSerialization(t *testing.T) {
+ p := NewFileSet()
+ checkSerialize(t, p)
+ // add some files
+ for i := 0; i < 10; i++ {
+ f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
+ checkSerialize(t, p)
+ // add some lines and alternative file infos
+ line := 1000
+ for offs := 0; offs < f.Size(); offs += 40 + i {
+ f.AddLine(offs)
+ if offs%7 == 0 {
+ f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
+ line += 33
+ }
+ }
+ checkSerialize(t, p)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package token defines constants representing the lexical tokens of the gcfg
+// configuration syntax and basic operations on tokens (printing, predicates).
+//
+// Note that the API for the token package may change to accommodate new
+// features or implementation changes in gcfg.
+//
+package token // import "gopkg.in/sconf/internal.v0/internal-/gcfg/token"
+
+import "strconv"
+
+// Token is the set of lexical tokens of the gcfg configuration syntax.
+type Token int
+
+// The list of tokens.
+const (
+ // Special tokens
+ ILLEGAL Token = iota
+ EOF
+ COMMENT
+
+ literal_beg
+ // Identifiers and basic type literals
+ // (these tokens stand for classes of literals)
+ IDENT // section-name, variable-name
+ STRING // "subsection-name", variable value
+ literal_end
+
+ operator_beg
+ // Operators and delimiters
+ ASSIGN // =
+ LBRACK // [
+ RBRACK // ]
+ EOL // \n
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+ COMMENT: "COMMENT",
+
+ IDENT: "IDENT",
+ STRING: "STRING",
+
+ ASSIGN: "=",
+ LBRACK: "[",
+ RBRACK: "]",
+ EOL: "\n",
+}
+
+// String returns the string corresponding to the token tok.
+// For operators and delimiters, the string is the actual token character
+// sequence (e.g., for the token ASSIGN, the string is "="). For all other
+// tokens the string corresponds to the token constant name (e.g. for the
+// token IDENT, the string is "IDENT").
+//
+func (tok Token) String() string {
+ s := ""
+ if 0 <= tok && tok < Token(len(tokens)) {
+ s = tokens[tok]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(tok)) + ")"
+ }
+ return s
+}
+
+// Predicates
+
+// IsLiteral returns true for tokens corresponding to identifiers
+// and basic type literals; it returns false otherwise.
+//
+func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+//
+func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
--- /dev/null
+package types
+
+// BoolValues defines the name and value mappings for ParseBool.
+var BoolValues = map[string]interface{}{
+ "true": true, "yes": true, "on": true, "1": true,
+ "false": false, "no": false, "off": false, "0": false,
+}
+
+var boolParser = func() *EnumParser {
+ ep := &EnumParser{}
+ ep.AddVals(BoolValues)
+ return ep
+}()
+
+// ParseBool parses bool values according to the definitions in BoolValues.
+// Parsing is case-insensitive.
+func ParseBool(s string) (bool, error) {
+ v, err := boolParser.Parse(s)
+ if err != nil {
+ return false, err
+ }
+ return v.(bool), nil
+}
--- /dev/null
+// Package types defines helpers for type conversions.
+//
+// The API for this package is not finalized yet.
+package types // import "gopkg.in/sconf/internal.v0/internal-/gcfg/types"
--- /dev/null
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// EnumParser parses "enum" values; i.e. a predefined set of strings to
+// predefined values.
+type EnumParser struct {
+ Type string // type name; if not set, use type of first value added
+ CaseMatch bool // if true, matching of strings is case-sensitive
+ // PrefixMatch bool
+ vals map[string]interface{}
+}
+
+// AddVals adds strings and values to an EnumParser.
+func (ep *EnumParser) AddVals(vals map[string]interface{}) {
+ if ep.vals == nil {
+ ep.vals = make(map[string]interface{})
+ }
+ for k, v := range vals {
+ if ep.Type == "" {
+ ep.Type = reflect.TypeOf(v).Name()
+ }
+ if !ep.CaseMatch {
+ k = strings.ToLower(k)
+ }
+ ep.vals[k] = v
+ }
+}
+
+// Parse parses the string and returns the value or an error.
+func (ep EnumParser) Parse(s string) (interface{}, error) {
+ if !ep.CaseMatch {
+ s = strings.ToLower(s)
+ }
+ v, ok := ep.vals[s]
+ if !ok {
+ return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
+ }
+ return v, nil
+}
--- /dev/null
+package types
+
+import (
+ "testing"
+)
+
+func TestEnumParserBool(t *testing.T) {
+ for _, tt := range []struct {
+ val string
+ res bool
+ ok bool
+ }{
+ {val: "tRuE", res: true, ok: true},
+ {val: "False", res: false, ok: true},
+ {val: "t", ok: false},
+ } {
+ b, err := ParseBool(tt.val)
+ switch {
+ case tt.ok && err != nil:
+ t.Errorf("%q: got error %v, want %v", tt.val, err, tt.res)
+ case !tt.ok && err == nil:
+ t.Errorf("%q: got %v, want error", tt.val, b)
+ case tt.ok && b != tt.res:
+ t.Errorf("%q: got %v, want %v", tt.val, b, tt.res)
+ default:
+ t.Logf("%q: got %v, %v", tt.val, b, err)
+ }
+ }
+}
--- /dev/null
+package types
+
+import (
+ "fmt"
+ "strings"
+)
+
+// An IntMode is a mode for parsing integer values, representing a set of
+// accepted bases.
+type IntMode uint8
+
+// IntMode values for ParseInt; can be combined using binary or.
+const (
+ Dec IntMode = 1 << iota
+ Hex
+ Oct
+)
+
+// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
+func (m IntMode) String() string {
+ var modes []string
+ if m&Dec != 0 {
+ modes = append(modes, "Dec")
+ }
+ if m&Hex != 0 {
+ modes = append(modes, "Hex")
+ }
+ if m&Oct != 0 {
+ modes = append(modes, "Oct")
+ }
+ return "IntMode(" + strings.Join(modes, "|") + ")"
+}
+
+var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
+
+func prefix0(val string) bool {
+ return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
+}
+
+func prefix0x(val string) bool {
+ return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
+}
+
+// ParseInt parses val using mode into intptr, which must be a pointer to an
+// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
+// when mode permits ambiguity of base; otherwise the prefix can be omitted.
+func ParseInt(intptr interface{}, val string, mode IntMode) error {
+ val = strings.TrimSpace(val)
+ verb := byte(0)
+ switch mode {
+ case Dec:
+ verb = 'd'
+ case Dec + Hex:
+ if prefix0x(val) {
+ verb = 'v'
+ } else {
+ verb = 'd'
+ }
+ case Dec + Oct:
+ if prefix0(val) && !prefix0x(val) {
+ verb = 'v'
+ } else {
+ verb = 'd'
+ }
+ case Dec + Hex + Oct:
+ verb = 'v'
+ case Hex:
+ if prefix0x(val) {
+ verb = 'v'
+ } else {
+ verb = 'x'
+ }
+ case Oct:
+ verb = 'o'
+ case Hex + Oct:
+ if prefix0(val) {
+ verb = 'v'
+ } else {
+ return errIntAmbig
+ }
+ }
+ if verb == 0 {
+ panic("unsupported mode")
+ }
+ return ScanFully(intptr, val, verb)
+}
--- /dev/null
+package types
+
+import (
+ "reflect"
+ "testing"
+)
+
+func elem(p interface{}) interface{} {
+ return reflect.ValueOf(p).Elem().Interface()
+}
+
+func TestParseInt(t *testing.T) {
+ for _, tt := range []struct {
+ val string
+ mode IntMode
+ exp interface{}
+ ok bool
+ }{
+ {"0", Dec, int(0), true},
+ {"10", Dec, int(10), true},
+ {"-10", Dec, int(-10), true},
+ {"x", Dec, int(0), false},
+ {"0xa", Hex, int(0xa), true},
+ {"a", Hex, int(0xa), true},
+ {"10", Hex, int(0x10), true},
+ {"-0xa", Hex, int(-0xa), true},
+ {"0x", Hex, int(0x0), true}, // Scanf doesn't require digit behind 0x
+ {"-0x", Hex, int(0x0), true}, // Scanf doesn't require digit behind 0x
+ {"-a", Hex, int(-0xa), true},
+ {"-10", Hex, int(-0x10), true},
+ {"x", Hex, int(0), false},
+ {"10", Oct, int(010), true},
+ {"010", Oct, int(010), true},
+ {"-10", Oct, int(-010), true},
+ {"-010", Oct, int(-010), true},
+ {"10", Dec | Hex, int(10), true},
+ {"010", Dec | Hex, int(10), true},
+ {"0x10", Dec | Hex, int(0x10), true},
+ {"10", Dec | Oct, int(10), true},
+ {"010", Dec | Oct, int(010), true},
+ {"0x10", Dec | Oct, int(0), false},
+ {"10", Hex | Oct, int(0), false}, // need prefix to distinguish Hex/Oct
+ {"010", Hex | Oct, int(010), true},
+ {"0x10", Hex | Oct, int(0x10), true},
+ {"10", Dec | Hex | Oct, int(10), true},
+ {"010", Dec | Hex | Oct, int(010), true},
+ {"0x10", Dec | Hex | Oct, int(0x10), true},
+ } {
+ typ := reflect.TypeOf(tt.exp)
+ res := reflect.New(typ).Interface()
+ err := ParseInt(res, tt.val, tt.mode)
+ switch {
+ case tt.ok && err != nil:
+ t.Errorf("ParseInt(%v, %#v, %v): fail; got error %v, want ok",
+ typ, tt.val, tt.mode, err)
+ case !tt.ok && err == nil:
+ t.Errorf("ParseInt(%v, %#v, %v): fail; got %v, want error",
+ typ, tt.val, tt.mode, elem(res))
+ case tt.ok && !reflect.DeepEqual(elem(res), tt.exp):
+ t.Errorf("ParseInt(%v, %#v, %v): fail; got %v, want %v",
+ typ, tt.val, tt.mode, elem(res), tt.exp)
+ default:
+ t.Logf("ParseInt(%v, %#v, %s): pass; got %v, error %v",
+ typ, tt.val, tt.mode, elem(res), err)
+ }
+ }
+}
--- /dev/null
+package types
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+)
+
+// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
+func ScanFully(ptr interface{}, val string, verb byte) error {
+ t := reflect.ValueOf(ptr).Elem().Type()
+ // attempt to read extra bytes to make sure the value is consumed
+ var b []byte
+ n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
+ switch {
+ case n < 1 || n == 1 && err != io.EOF:
+ return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
+ case n > 1:
+ return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
+ }
+ // n == 1 && err == io.EOF
+ return nil
+}
--- /dev/null
+package types
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestScanFully(t *testing.T) {
+ for _, tt := range []struct {
+ val string
+ verb byte
+ res interface{}
+ ok bool
+ }{
+ {"a", 'v', int(0), false},
+ {"0x", 'v', int(0), true},
+ {"0x", 'd', int(0), false},
+ } {
+ d := reflect.New(reflect.TypeOf(tt.res)).Interface()
+ err := ScanFully(d, tt.val, tt.verb)
+ switch {
+ case tt.ok && err != nil:
+ t.Errorf("ScanFully(%T, %q, '%c'): want ok, got error %v",
+ d, tt.val, tt.verb, err)
+ case !tt.ok && err == nil:
+ t.Errorf("ScanFully(%T, %q, '%c'): want error, got %v",
+ d, tt.val, tt.verb, elem(d))
+ case tt.ok && err == nil && !reflect.DeepEqual(tt.res, elem(d)):
+ t.Errorf("ScanFully(%T, %q, '%c'): want %v, got %v",
+ d, tt.val, tt.verb, tt.res, elem(d))
+ default:
+ t.Logf("ScanFully(%T, %q, '%c') = %v; *ptr==%v",
+ d, tt.val, tt.verb, err, elem(d))
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Sconf Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal // import "gopkg.in/sconf/internal.v0/internal-"
+
+import (
+ "gopkg.in/sconf/internal.v0/internal-/gcfg"
+)
+
+type Struct struct {
+ Ptr interface{}
+}
+
+func (s Struct) Read(readers ...IdemReader) error {
+ for _, r := range readers {
+ b, err := r.Bytes()
+ if err != nil {
+ return err
+ }
+ err = gcfg.ReadStringInto(s.Ptr, string(b))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type IdemReader interface {
+ Bytes() ([]byte, error)
+}
+
+type ErrIdemReader struct {
+ Err error
+}
+
+func (e ErrIdemReader) Bytes() ([]byte, error) {
+ return nil, e.Err
+}
+
+type BytesIdemReader []byte
+
+func (b BytesIdemReader) Bytes() ([]byte, error) {
+ return b, nil
+}
--- /dev/null
+language: go
+
+install:
+ - mkdir -p $HOME/gopath/src/gopkg.in/sconf
+ - mv $HOME/gopath/src/github.com/sconf/sconf $HOME/gopath/src/gopkg.in/sconf/sconf.v0
+ - cd $HOME/gopath/src/gopkg.in/sconf/sconf.v0
+ - go get -t ./...
+ - env
--- /dev/null
+Copyright (c) 2015 The Sconf Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of the authors nor the names of
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- /dev/null
+sconf: structured configuration (previously gcfg)
+https://godoc.org/gopkg.in/sconf/sconf.v0
+
+[![Build Status](https://travis-ci.org/sconf/sconf.svg?branch=master)](https://travis-ci.org/sconf/sconf)
--- /dev/null
+// Copyright 2015 The Sconf Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sconf_test
+
+import (
+ "fmt"
+
+ "gopkg.in/sconf/ini.v0"
+ "gopkg.in/sconf/sconf.v0"
+)
+
+func Example() {
+ var cfg = struct {
+ Main struct {
+ Url string
+ }
+ }{}
+ sconf.Must(&cfg).Read(ini.File("testdata/example.ini"))
+ fmt.Println(cfg.Main.Url)
+ // Output: http://localhost/
+}
--- /dev/null
+// Copyright 2015 The Sconf Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sconf // import "gopkg.in/sconf/sconf.v0"
+
+import (
+ "gopkg.in/sconf/internal.v0/internal-"
+ "gopkg.in/sconf/internal.v0/internal-/gcfg"
+)
+
+func Must(ptr interface{}) internal.Struct {
+ _ = gcfg.ReadStringInto(ptr, "") // may panic but no parse error
+ return internal.Struct{Ptr: ptr}
+}
--- /dev/null
+// Copyright 2015 The Sconf Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sconf_test
+
+import (
+ "encoding"
+ "fmt"
+ "math/big"
+ "os"
+ "reflect"
+ "testing"
+
+ "gopkg.in/sconf/ini.v0"
+ "gopkg.in/sconf/sconf.v0"
+)
+
+const (
+ // 64 spaces
+ sp64 = " "
+ // 512 spaces
+ sp512 = sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64
+ // 4096 spaces
+ sp4096 = sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512
+)
+
+type cBasic struct {
+ Section cBasicS1
+ Hyphen_In_Section cBasicS2
+ unexported cBasicS1
+ Exported cBasicS3
+ TagName cBasicS1 `gcfg:"tag-name"`
+}
+type cBasicS1 struct {
+ Name string
+ Int int
+ PName *string
+}
+type cBasicS2 struct {
+ Hyphen_In_Name string
+}
+type cBasicS3 struct {
+ unexported string
+}
+
+type nonMulti []string
+
+type unmarshalable string
+
+func (u *unmarshalable) UnmarshalText(text []byte) error {
+ s := string(text)
+ if s == "error" {
+ return fmt.Errorf("%s", s)
+ }
+ *u = unmarshalable(s)
+ return nil
+}
+
+var _ encoding.TextUnmarshaler = new(unmarshalable)
+
+type cUni struct {
+ X甲 cUniS1
+ XSection cUniS2
+}
+type cUniS1 struct {
+ X乙 string
+}
+type cUniS2 struct {
+ XName string
+}
+
+type cMulti struct {
+ M1 cMultiS1
+ M2 cMultiS2
+ M3 cMultiS3
+}
+type cMultiS1 struct{ Multi []string }
+type cMultiS2 struct{ NonMulti nonMulti }
+type cMultiS3 struct{ MultiInt []int }
+
+type cSubs struct{ Sub map[string]*cSubsS1 }
+type cSubsS1 struct{ Name string }
+
+type cBool struct{ Section cBoolS1 }
+type cBoolS1 struct{ Bool bool }
+
+type cTxUnm struct{ Section cTxUnmS1 }
+type cTxUnmS1 struct{ Name unmarshalable }
+
+type cNum struct {
+ N1 cNumS1
+ N2 cNumS2
+ N3 cNumS3
+}
+type cNumS1 struct {
+ Int int
+ IntDHO int `gcfg:",int=dho"`
+ Big *big.Int
+}
+type cNumS2 struct {
+ MultiInt []int
+ MultiBig []*big.Int
+}
+type cNumS3 struct{ FileMode os.FileMode }
+type readtest struct {
+ gcfg string
+ exp interface{}
+ ok bool
+}
+
+func newString(s string) *string {
+ return &s
+}
+
+var readtests = []struct {
+ group string
+ tests []readtest
+}{{"scanning", []readtest{
+ {"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ // hyphen in name
+ {"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
+ // quoted string value
+ {"[section]\nname=\"\"", &cBasic{Section: cBasicS1{Name: ""}}, true},
+ {"[section]\nname=\" \"", &cBasic{Section: cBasicS1{Name: " "}}, true},
+ {"[section]\nname=\"value\"", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=\" value \"", &cBasic{Section: cBasicS1{Name: " value "}}, true},
+ {"\n[section]\nname=\"va ; lue\"", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
+ {"[section]\nname=\"val\" \"ue\"", &cBasic{Section: cBasicS1{Name: "val ue"}}, true},
+ {"[section]\nname=\"value", &cBasic{}, false},
+ // escape sequences
+ {"[section]\nname=\"va\\\\lue\"", &cBasic{Section: cBasicS1{Name: "va\\lue"}}, true},
+ {"[section]\nname=\"va\\\"lue\"", &cBasic{Section: cBasicS1{Name: "va\"lue"}}, true},
+ {"[section]\nname=\"va\\nlue\"", &cBasic{Section: cBasicS1{Name: "va\nlue"}}, true},
+ {"[section]\nname=\"va\\tlue\"", &cBasic{Section: cBasicS1{Name: "va\tlue"}}, true},
+ {"\n[section]\nname=\\", &cBasic{}, false},
+ {"\n[section]\nname=\\a", &cBasic{}, false},
+ {"\n[section]\nname=\"val\\a\"", &cBasic{}, false},
+ {"\n[section]\nname=val\\", &cBasic{}, false},
+ {"\n[sub \"A\\\n\"]\nname=value", &cSubs{}, false},
+ {"\n[sub \"A\\\t\"]\nname=value", &cSubs{}, false},
+ // broken line
+ {"[section]\nname=value \\\n value", &cBasic{Section: cBasicS1{Name: "value value"}}, true},
+ {"[section]\nname=\"value \\\n value\"", &cBasic{}, false},
+}}, {"scanning:whitespace", []readtest{
+ {" \n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {" [section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\t[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[ section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section ]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\n name=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname =value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname= value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=value ", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\r\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {";cmnt\r\n[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ // long lines
+ {sp4096 + "[section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[" + sp4096 + "section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section" + sp4096 + "]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]" + sp4096 + "\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\n" + sp4096 + "name=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname" + sp4096 + "=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=" + sp4096 + "value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=value\n" + sp4096, &cBasic{Section: cBasicS1{Name: "value"}}, true},
+}}, {"scanning:comments", []readtest{
+ {"; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"# cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {" ; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\t; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section] ; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=value; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=value ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=\"value\" ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=value ; \"cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"\n[section]\nname=\"va ; lue\" ; cmnt", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
+ {"\n[section]\nname=; cmnt", &cBasic{Section: cBasicS1{Name: ""}}, true},
+}}, {"scanning:subsections", []readtest{
+ {"\n[sub \"A\"]\nname=value", &cSubs{map[string]*cSubsS1{"A": &cSubsS1{"value"}}}, true},
+ {"\n[sub \"b\"]\nname=value", &cSubs{map[string]*cSubsS1{"b": &cSubsS1{"value"}}}, true},
+ {"\n[sub \"A\\\\\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\\": &cSubsS1{"value"}}}, true},
+ {"\n[sub \"A\\\"\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\"": &cSubsS1{"value"}}}, true},
+}}, {"syntax", []readtest{
+ // invalid line
+ {"\n[section]\n=", &cBasic{}, false},
+ // no section
+ {"name=value", &cBasic{}, false},
+ // empty section
+ {"\n[]\nname=value", &cBasic{}, false},
+ // empty subsection
+ {"\n[sub \"\"]\nname=value", &cSubs{}, false},
+}}, {"setting", []readtest{
+ {"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ // pointer
+ {"[section]", &cBasic{Section: cBasicS1{PName: nil}}, true},
+ {"[section]\npname=value", &cBasic{Section: cBasicS1{PName: newString("value")}}, true},
+ // section name not matched
+ {"\n[nonexistent]\nname=value", &cBasic{}, false},
+ // subsection name not matched
+ {"\n[section \"nonexistent\"]\nname=value", &cBasic{}, false},
+ // variable name not matched
+ {"\n[section]\nnonexistent=value", &cBasic{}, false},
+ // hyphen in name
+ {"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
+ // ignore unexported fields
+ {"[unexported]\nname=value", &cBasic{}, false},
+ {"[exported]\nunexported=value", &cBasic{}, false},
+ // 'X' prefix for non-upper/lower-case letters
+ {"[甲]\n乙=丙", &cUni{X甲: cUniS1{X乙: "丙"}}, true},
+ //{"[section]\nxname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
+ //{"[xsection]\nname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
+ // name specified as struct tag
+ {"[tag-name]\nname=value", &cBasic{TagName: cBasicS1{Name: "value"}}, true},
+}}, {"multivalue", []readtest{
+ // unnamed slice type: treat as multi-value
+ {"\n[m1]", &cMulti{M1: cMultiS1{}}, true},
+ {"\n[m1]\nmulti=value", &cMulti{M1: cMultiS1{[]string{"value"}}}, true},
+ {"\n[m1]\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
+ // "blank" empties multi-valued slice -- here same result as above
+ {"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
+ // named slice type: do not treat as multi-value
+ {"\n[m2]", &cMulti{}, true},
+ {"\n[m2]\nmulti=value", &cMulti{}, false},
+ {"\n[m2]\nmulti=value1\nmulti=value2", &cMulti{}, false},
+}}, {"type:string", []readtest{
+ {"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
+ {"[section]\nname=", &cBasic{Section: cBasicS1{Name: ""}}, true},
+}}, {"type:bool", []readtest{
+ // explicit values
+ {"[section]\nbool=true", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=yes", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=on", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=1", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=tRuE", &cBool{cBoolS1{true}}, true},
+ {"[section]\nbool=false", &cBool{cBoolS1{false}}, true},
+ {"[section]\nbool=no", &cBool{cBoolS1{false}}, true},
+ {"[section]\nbool=off", &cBool{cBoolS1{false}}, true},
+ {"[section]\nbool=0", &cBool{cBoolS1{false}}, true},
+ {"[section]\nbool=NO", &cBool{cBoolS1{false}}, true},
+ // "blank" value handled as true
+ {"[section]\nbool", &cBool{cBoolS1{true}}, true},
+ // bool parse errors
+ {"[section]\nbool=maybe", &cBool{}, false},
+ {"[section]\nbool=t", &cBool{}, false},
+ {"[section]\nbool=truer", &cBool{}, false},
+ {"[section]\nbool=2", &cBool{}, false},
+ {"[section]\nbool=-1", &cBool{}, false},
+}}, {"type:numeric", []readtest{
+ {"[section]\nint=0", &cBasic{Section: cBasicS1{Int: 0}}, true},
+ {"[section]\nint=1", &cBasic{Section: cBasicS1{Int: 1}}, true},
+ {"[section]\nint=-1", &cBasic{Section: cBasicS1{Int: -1}}, true},
+ {"[section]\nint=0.2", &cBasic{}, false},
+ {"[section]\nint=1e3", &cBasic{}, false},
+ // primitive [u]int(|8|16|32|64) and big.Int is parsed as dec or hex (not octal)
+ {"[n1]\nint=010", &cNum{N1: cNumS1{Int: 10}}, true},
+ {"[n1]\nint=0x10", &cNum{N1: cNumS1{Int: 0x10}}, true},
+ {"[n1]\nbig=1", &cNum{N1: cNumS1{Big: big.NewInt(1)}}, true},
+ {"[n1]\nbig=0x10", &cNum{N1: cNumS1{Big: big.NewInt(0x10)}}, true},
+ {"[n1]\nbig=010", &cNum{N1: cNumS1{Big: big.NewInt(10)}}, true},
+ {"[n2]\nmultiint=010", &cNum{N2: cNumS2{MultiInt: []int{10}}}, true},
+ {"[n2]\nmultibig=010", &cNum{N2: cNumS2{MultiBig: []*big.Int{big.NewInt(10)}}}, true},
+ // set parse mode for int types via struct tag
+ {"[n1]\nintdho=010", &cNum{N1: cNumS1{IntDHO: 010}}, true},
+ // octal allowed for named type
+ {"[n3]\nfilemode=0777", &cNum{N3: cNumS3{FileMode: 0777}}, true},
+}}, {"type:textUnmarshaler", []readtest{
+ {"[section]\nname=value", &cTxUnm{Section: cTxUnmS1{Name: "value"}}, true},
+ {"[section]\nname=error", &cTxUnm{}, false},
+}},
+}
+
+func TestReadStringInto(t *testing.T) {
+ for _, tg := range readtests {
+ for i, tt := range tg.tests {
+ id := fmt.Sprintf("%s:%d", tg.group, i)
+ testRead(t, id, tt)
+ }
+ }
+}
+
+func TestReadStringIntoMultiBlankPreset(t *testing.T) {
+ tt := readtest{"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true}
+ cfg := &cMulti{M1: cMultiS1{[]string{"preset1", "preset2"}}}
+ testReadInto(t, "multi:blank", tt, cfg)
+}
+
+func testRead(t *testing.T, id string, tt readtest) {
+ // get the type of the expected result
+ restyp := reflect.TypeOf(tt.exp).Elem()
+ // create a new instance to hold the actual result
+ res := reflect.New(restyp).Interface()
+ testReadInto(t, id, tt, res)
+}
+
+func testReadInto(t *testing.T, id string, tt readtest, res interface{}) {
+ err := sconf.Must(res).Read(ini.Text(tt.gcfg))
+ if tt.ok {
+ if err != nil {
+ t.Errorf("%s fail: got error %v, wanted ok", id, err)
+ return
+ } else if !reflect.DeepEqual(res, tt.exp) {
+ t.Errorf("%s fail: got value %#v, wanted value %#v", id, res, tt.exp)
+ return
+ }
+ if !testing.Short() {
+ t.Logf("%s pass: got value %#v", id, res)
+ }
+ } else { // !tt.ok
+ if err == nil {
+ t.Errorf("%s fail: got value %#v, wanted error", id, res)
+ return
+ }
+ if !testing.Short() {
+ t.Logf("%s pass: got error %v", id, err)
+ }
+ }
+}
+
+func TestReadFileInto(t *testing.T) {
+ res := &struct{ Section struct{ Name string } }{}
+ err := sconf.Must(res).Read(ini.File("testdata/gcfg_test.gcfg"))
+ if err != nil {
+ t.Errorf(err.Error())
+ }
+ if "value" != res.Section.Name {
+ t.Errorf("got %q, wanted %q", res.Section.Name, "value")
+ }
+}
+
+func TestReadFileIntoUnicode(t *testing.T) {
+ res := &struct{ X甲 struct{ X乙 string } }{}
+ err := sconf.Must(res).Read(ini.File("testdata/gcfg_unicode_test.gcfg"))
+ if err != nil {
+ t.Errorf(err.Error())
+ }
+ if "丙" != res.X甲.X乙 {
+ t.Errorf("got %q, wanted %q", res.X甲.X乙, "丙")
+ }
+}
--- /dev/null
+[main]
+url=http://localhost/
--- /dev/null
+; Comment line
+[section]
+name=value # comment
--- /dev/null
+; Comment line
+[甲]
+乙=丙 # comment