go sender(c, 100000)
receiver(c, dummy, 100000)
runtime.GC()
- runtime.MemStats.Alloc = 0
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ alloc := memstats.Alloc
// second time shouldn't increase footprint by much
go sender(c, 100000)
receiver(c, dummy, 100000)
runtime.GC()
+ runtime.ReadMemStats(memstats)
- if runtime.MemStats.Alloc > 1e5 {
- println("BUG: too much memory for 100,000 selects:", runtime.MemStats.Alloc)
+ if memstats.Alloc-alloc > 1e5 {
+ println("BUG: too much memory for 100,000 selects:", memstats.Alloc-alloc)
}
}
func main() {
const N = 10000
- st := runtime.MemStats
+ st := new(runtime.MemStats)
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(st)
for i := 0; i < N; i++ {
c := make(chan int, 10)
_ = c
}
}
- runtime.UpdateMemStats()
- obj := runtime.MemStats.HeapObjects - st.HeapObjects
+ runtime.ReadMemStats(memstats)
+ obj := memstats.HeapObjects - st.HeapObjects
if obj > N/5 {
fmt.Println("too many objects left:", obj)
os.Exit(1)
var chatty = flag.Bool("v", false, "chatty")
func main() {
+ memstats := new(runtime.MemStats)
runtime.Free(runtime.Alloc(1))
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(memstats)
if *chatty {
- fmt.Printf("%+v %v\n", runtime.MemStats, uint64(0))
+ fmt.Printf("%+v %v\n", memstats, uint64(0))
}
}
var allocated uint64
func bigger() {
- runtime.UpdateMemStats()
- if f := runtime.MemStats.Sys; footprint < f {
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ if f := memstats.Sys; footprint < f {
footprint = f
if *chatty {
println("Footprint", footprint, " for ", allocated)
var chatty = flag.Bool("v", false, "chatty")
var oldsys uint64
+var memstats runtime.MemStats
func bigger() {
- runtime.UpdateMemStats()
- if st := runtime.MemStats; oldsys < st.Sys {
+ st := &memstats
+ runtime.ReadMemStats(st)
+ if oldsys < st.Sys {
oldsys = st.Sys
if *chatty {
println(st.Sys, " system bytes for ", st.Alloc, " Go bytes")
}
func main() {
- runtime.GC() // clean up garbage from init
- runtime.UpdateMemStats() // first call can do some allocations
- runtime.MemProfileRate = 0 // disable profiler
- runtime.MemStats.Alloc = 0 // ignore stacks
+ runtime.GC() // clean up garbage from init
+ runtime.ReadMemStats(&memstats) // first call can do some allocations
+ runtime.MemProfileRate = 0 // disable profiler
+ stacks := memstats.Alloc // ignore stacks
flag.Parse()
for i := 0; i < 1<<7; i++ {
for j := 1; j <= 1<<22; j <<= 1 {
if i == 0 && *chatty {
println("First alloc:", j)
}
- if a := runtime.MemStats.Alloc; a != 0 {
+ if a := memstats.Alloc - stacks; a != 0 {
println("no allocations but stats report", a, "bytes allocated")
panic("fail")
}
b := runtime.Alloc(uintptr(j))
- runtime.UpdateMemStats()
- during := runtime.MemStats.Alloc
+ runtime.ReadMemStats(&memstats)
+ during := memstats.Alloc - stacks
runtime.Free(b)
- runtime.UpdateMemStats()
- if a := runtime.MemStats.Alloc; a != 0 {
+ runtime.ReadMemStats(&memstats)
+ if a := memstats.Alloc - stacks; a != 0 {
println("allocated ", j, ": wrong stats: during=", during, " after=", a, " (want 0)")
panic("fail")
}
var longtest = flag.Bool("l", false, "long test")
var b []*byte
-var stats = &runtime.MemStats
+var stats = new(runtime.MemStats)
func OkAmount(size, n uintptr) bool {
if n < size {
if *chatty {
fmt.Printf("size=%d count=%d ...\n", size, count)
}
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
n1 := stats.Alloc
for i := 0; i < count; i++ {
b[i] = runtime.Alloc(uintptr(size))
println("lookup failed: got", base, n, "for", b[i])
panic("fail")
}
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
if stats.Sys > 1e9 {
println("too much memory allocated")
panic("fail")
}
}
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
n2 := stats.Alloc
if *chatty {
fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats)
panic("fail")
}
runtime.Free(b[i])
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
if stats.Alloc != uint64(alloc-n) {
println("free alloc got", stats.Alloc, "expected", alloc-n, "after free of", n)
panic("fail")
}
- if runtime.MemStats.Sys > 1e9 {
+ if stats.Sys > 1e9 {
println("too much memory allocated")
panic("fail")
}
}
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
n4 := stats.Alloc
if *chatty {
-1107a7d3cb07
+52ba9506bd99
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
$(exp_inotify_gox) \
exp/norm.gox \
exp/proxy.gox \
+ exp/signal.gox \
exp/terminal.gox \
exp/types.gox \
exp/utf8string.gox
toolexeclibgoimagedir = $(toolexeclibgodir)/image
toolexeclibgoimage_DATA = \
- image/bmp.gox \
image/color.gox \
image/draw.gox \
image/gif.gox \
image/jpeg.gox \
- image/png.gox \
- image/tiff.gox
+ image/png.gox
toolexeclibgoindexdir = $(toolexeclibgodir)/index
toolexeclibgoos_DATA = \
os/exec.gox \
- os/user.gox \
- os/signal.gox
+ os/user.gox
toolexeclibgopathdir = $(toolexeclibgodir)/path
go/crypto/cipher/cipher.go \
go/crypto/cipher/ctr.go \
go/crypto/cipher/io.go \
- go/crypto/cipher/ocfb.go \
go/crypto/cipher/ofb.go
go_crypto_des_files = \
go/crypto/des/block.go \
go/exp/proxy/per_host.go \
go/exp/proxy/proxy.go \
go/exp/proxy/socks5.go
+go_exp_signal_files = \
+ go/exp/signal/signal.go
go_exp_terminal_files = \
go/exp/terminal/terminal.go \
go/exp/terminal/util.go
go/html/template/transition.go \
go/html/template/url.go
-go_image_bmp_files = \
- go/image/bmp/reader.go
-
go_image_color_files = \
go/image/color/color.go \
go/image/color/ycbcr.go
go/image/png/reader.go \
go/image/png/writer.go
-go_image_tiff_files = \
- go/image/tiff/buffer.go \
- go/image/tiff/compress.go \
- go/image/tiff/consts.go \
- go/image/tiff/reader.go
-
go_index_suffixarray_files = \
go/index/suffixarray/qsufsort.go \
go/index/suffixarray/suffixarray.go
go/os/user/user.go \
go/os/user/lookup_unix.go
-go_os_signal_files = \
- go/os/signal/signal.go
-
go_path_filepath_files = \
go/path/filepath/match.go \
go/path/filepath/path.go \
exp/html.lo \
exp/norm.lo \
exp/proxy.lo \
+ exp/signal.lo \
exp/terminal.lo \
exp/types.lo \
exp/utf8string.lo \
net/http/httptest.lo \
net/http/httputil.lo \
net/http/pprof.lo \
- image/bmp.lo \
image/color.lo \
image/draw.lo \
image/gif.lo \
image/jpeg.lo \
image/png.lo \
- image/tiff.lo \
index/suffixarray.lo \
io/ioutil.lo \
log/syslog.lo \
old/template.lo \
$(os_lib_inotify_lo) \
os/user.lo \
- os/signal.lo \
path/filepath.lo \
regexp/syntax.lo \
net/rpc/jsonrpc.lo \
@$(CHECK)
.PHONY: exp/proxy/check
+@go_include@ exp/signal.lo.dep
+exp/signal.lo.dep: $(go_exp_signal_files)
+ $(BUILDDEPS)
+exp/signal.lo: $(go_exp_signal_files)
+ $(BUILDPACKAGE)
+exp/signal/check: $(CHECK_DEPS)
+ @$(MKDIR_P) exp/signal
+ @$(CHECK)
+.PHONY: exp/signal/check
+
@go_include@ exp/terminal.lo.dep
exp/terminal.lo.dep: $(go_exp_terminal_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: hash/fnv/check
-@go_include@ image/bmp.lo.dep
-image/bmp.lo.dep: $(go_image_bmp_files)
- $(BUILDDEPS)
-image/bmp.lo: $(go_image_bmp_files)
- $(BUILDPACKAGE)
-image/bmp/check: $(CHECK_DEPS)
- @$(MKDIR_P) image/bmp
- @$(CHECK)
-.PHONY: image/bmp/check
-
@go_include@ image/color.lo.dep
image/color.lo.dep: $(go_image_color_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: image/png/check
-@go_include@ image/tiff.lo.dep
-image/tiff.lo.dep: $(go_image_tiff_files)
- $(BUILDDEPS)
-image/tiff.lo: $(go_image_tiff_files)
- $(BUILDPACKAGE)
-image/tiff/check: $(CHECK_DEPS)
- @$(MKDIR_P) image/tiff
- @$(CHECK)
-.PHONY: image/tiff/check
-
@go_include@ index/suffixarray.lo.dep
index/suffixarray.lo.dep: $(go_index_suffixarray_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: os/user/check
-@go_include@ os/signal.lo.dep
-os/signal.lo.dep: $(go_os_signal_files)
- $(BUILDDEPS)
-os/signal.lo: $(go_os_signal_files)
- $(BUILDPACKAGE)
-os/signal/check: $(CHECK_DEPS)
- @$(MKDIR_P) os/signal
- @$(CHECK)
-.PHONY: os/signal/check
-
@go_include@ path/filepath.lo.dep
path/filepath.lo.dep: $(go_path_filepath_files)
$(BUILDDEPS)
$(BUILDGOX)
exp/proxy.gox: exp/proxy.lo
$(BUILDGOX)
+exp/signal.gox: exp/signal.lo
+ $(BUILDGOX)
exp/terminal.gox: exp/terminal.lo
$(BUILDGOX)
exp/types.gox: exp/types.lo
hash/fnv.gox: hash/fnv.lo
$(BUILDGOX)
-image/bmp.gox: image/bmp.lo
- $(BUILDGOX)
image/color.gox: image/color.lo
$(BUILDGOX)
image/draw.gox: image/draw.lo
$(BUILDGOX)
image/png.gox: image/png.lo
$(BUILDGOX)
-image/tiff.gox: image/tiff.lo
- $(BUILDGOX)
index/suffixarray.gox: index/suffixarray.lo
$(BUILDGOX)
$(BUILDGOX)
os/user.gox: os/user.lo
$(BUILDGOX)
-os/signal.gox: os/signal.lo
- $(BUILDGOX)
path/filepath.gox: path/filepath.lo
$(BUILDGOX)
$(exp_inotify_check) \
exp/norm/check \
exp/proxy/check \
+ exp/signal/check \
exp/terminal/check \
exp/utf8string/check \
html/template/check \
image/draw/check \
image/jpeg/check \
image/png/check \
- image/tiff/check \
index/suffixarray/check \
io/ioutil/check \
log/syslog/check \
old/template/check \
os/exec/check \
os/user/check \
- os/signal/check \
path/filepath/check \
regexp/syntax/check \
sync/atomic/check \
encoding/base32.lo encoding/base64.lo encoding/binary.lo \
encoding/csv.lo encoding/gob.lo encoding/hex.lo \
encoding/json.lo encoding/pem.lo encoding/xml.lo exp/ebnf.lo \
- exp/html.lo exp/norm.lo exp/proxy.lo exp/terminal.lo \
- exp/types.lo exp/utf8string.lo html/template.lo go/ast.lo \
- go/build.lo go/doc.lo go/parser.lo go/printer.lo go/scanner.lo \
- go/token.lo hash/adler32.lo hash/crc32.lo hash/crc64.lo \
- hash/fnv.lo net/http/cgi.lo net/http/fcgi.lo \
- net/http/httptest.lo net/http/httputil.lo net/http/pprof.lo \
- image/bmp.lo image/color.lo image/draw.lo image/gif.lo \
- image/jpeg.lo image/png.lo image/tiff.lo index/suffixarray.lo \
- io/ioutil.lo log/syslog.lo log/syslog/syslog_c.lo math/big.lo \
- math/cmplx.lo math/rand.lo mime/mime.lo mime/multipart.lo \
- net/http.lo net/mail.lo net/rpc.lo net/smtp.lo \
- net/textproto.lo net/url.lo old/netchan.lo old/regexp.lo \
- old/template.lo $(am__DEPENDENCIES_1) os/user.lo os/signal.lo \
- path/filepath.lo regexp/syntax.lo net/rpc/jsonrpc.lo \
- runtime/debug.lo runtime/pprof.lo sync/atomic.lo \
- sync/atomic_c.lo syscall/syscall.lo syscall/errno.lo \
- syscall/wait.lo text/scanner.lo text/tabwriter.lo \
- text/template.lo text/template/parse.lo testing/testing.lo \
- testing/iotest.lo testing/quick.lo testing/script.lo \
- unicode/utf16.lo unicode/utf8.lo
+ exp/html.lo exp/norm.lo exp/proxy.lo exp/signal.lo \
+ exp/terminal.lo exp/types.lo exp/utf8string.lo \
+ html/template.lo go/ast.lo go/build.lo go/doc.lo go/parser.lo \
+ go/printer.lo go/scanner.lo go/token.lo hash/adler32.lo \
+ hash/crc32.lo hash/crc64.lo hash/fnv.lo net/http/cgi.lo \
+ net/http/fcgi.lo net/http/httptest.lo net/http/httputil.lo \
+ net/http/pprof.lo image/color.lo image/draw.lo image/gif.lo \
+ image/jpeg.lo image/png.lo index/suffixarray.lo io/ioutil.lo \
+ log/syslog.lo log/syslog/syslog_c.lo math/big.lo math/cmplx.lo \
+ math/rand.lo mime/mime.lo mime/multipart.lo net/http.lo \
+ net/mail.lo net/rpc.lo net/smtp.lo net/textproto.lo net/url.lo \
+ old/netchan.lo old/regexp.lo old/template.lo \
+ $(am__DEPENDENCIES_1) os/user.lo path/filepath.lo \
+ regexp/syntax.lo net/rpc/jsonrpc.lo runtime/debug.lo \
+ runtime/pprof.lo sync/atomic.lo sync/atomic_c.lo \
+ syscall/syscall.lo syscall/errno.lo syscall/wait.lo \
+ text/scanner.lo text/tabwriter.lo text/template.lo \
+ text/template/parse.lo testing/testing.lo testing/iotest.lo \
+ testing/quick.lo testing/script.lo unicode/utf16.lo \
+ unicode/utf8.lo
libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1)
$(exp_inotify_gox) \
exp/norm.gox \
exp/proxy.gox \
+ exp/signal.gox \
exp/terminal.gox \
exp/types.gox \
exp/utf8string.gox
toolexeclibgoimagedir = $(toolexeclibgodir)/image
toolexeclibgoimage_DATA = \
- image/bmp.gox \
image/color.gox \
image/draw.gox \
image/gif.gox \
image/jpeg.gox \
- image/png.gox \
- image/tiff.gox
+ image/png.gox
toolexeclibgoindexdir = $(toolexeclibgodir)/index
toolexeclibgoindex_DATA = \
toolexeclibgoosdir = $(toolexeclibgodir)/os
toolexeclibgoos_DATA = \
os/exec.gox \
- os/user.gox \
- os/signal.gox
+ os/user.gox
toolexeclibgopathdir = $(toolexeclibgodir)/path
toolexeclibgopath_DATA = \
go/crypto/cipher/cipher.go \
go/crypto/cipher/ctr.go \
go/crypto/cipher/io.go \
- go/crypto/cipher/ocfb.go \
go/crypto/cipher/ofb.go
go_crypto_des_files = \
go/exp/proxy/proxy.go \
go/exp/proxy/socks5.go
+go_exp_signal_files = \
+ go/exp/signal/signal.go
+
go_exp_terminal_files = \
go/exp/terminal/terminal.go \
go/exp/terminal/util.go
go/html/template/transition.go \
go/html/template/url.go
-go_image_bmp_files = \
- go/image/bmp/reader.go
-
go_image_color_files = \
go/image/color/color.go \
go/image/color/ycbcr.go
go/image/png/reader.go \
go/image/png/writer.go
-go_image_tiff_files = \
- go/image/tiff/buffer.go \
- go/image/tiff/compress.go \
- go/image/tiff/consts.go \
- go/image/tiff/reader.go
-
go_index_suffixarray_files = \
go/index/suffixarray/qsufsort.go \
go/index/suffixarray/suffixarray.go
go/os/user/user.go \
go/os/user/lookup_unix.go
-go_os_signal_files = \
- go/os/signal/signal.go
-
go_path_filepath_files = \
go/path/filepath/match.go \
go/path/filepath/path.go \
exp/html.lo \
exp/norm.lo \
exp/proxy.lo \
+ exp/signal.lo \
exp/terminal.lo \
exp/types.lo \
exp/utf8string.lo \
net/http/httptest.lo \
net/http/httputil.lo \
net/http/pprof.lo \
- image/bmp.lo \
image/color.lo \
image/draw.lo \
image/gif.lo \
image/jpeg.lo \
image/png.lo \
- image/tiff.lo \
index/suffixarray.lo \
io/ioutil.lo \
log/syslog.lo \
old/template.lo \
$(os_lib_inotify_lo) \
os/user.lo \
- os/signal.lo \
path/filepath.lo \
regexp/syntax.lo \
net/rpc/jsonrpc.lo \
$(exp_inotify_check) \
exp/norm/check \
exp/proxy/check \
+ exp/signal/check \
exp/terminal/check \
exp/utf8string/check \
html/template/check \
image/draw/check \
image/jpeg/check \
image/png/check \
- image/tiff/check \
index/suffixarray/check \
io/ioutil/check \
log/syslog/check \
old/template/check \
os/exec/check \
os/user/check \
- os/signal/check \
path/filepath/check \
regexp/syntax/check \
sync/atomic/check \
@$(CHECK)
.PHONY: exp/proxy/check
+@go_include@ exp/signal.lo.dep
+exp/signal.lo.dep: $(go_exp_signal_files)
+ $(BUILDDEPS)
+exp/signal.lo: $(go_exp_signal_files)
+ $(BUILDPACKAGE)
+exp/signal/check: $(CHECK_DEPS)
+ @$(MKDIR_P) exp/signal
+ @$(CHECK)
+.PHONY: exp/signal/check
+
@go_include@ exp/terminal.lo.dep
exp/terminal.lo.dep: $(go_exp_terminal_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: hash/fnv/check
-@go_include@ image/bmp.lo.dep
-image/bmp.lo.dep: $(go_image_bmp_files)
- $(BUILDDEPS)
-image/bmp.lo: $(go_image_bmp_files)
- $(BUILDPACKAGE)
-image/bmp/check: $(CHECK_DEPS)
- @$(MKDIR_P) image/bmp
- @$(CHECK)
-.PHONY: image/bmp/check
-
@go_include@ image/color.lo.dep
image/color.lo.dep: $(go_image_color_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: image/png/check
-@go_include@ image/tiff.lo.dep
-image/tiff.lo.dep: $(go_image_tiff_files)
- $(BUILDDEPS)
-image/tiff.lo: $(go_image_tiff_files)
- $(BUILDPACKAGE)
-image/tiff/check: $(CHECK_DEPS)
- @$(MKDIR_P) image/tiff
- @$(CHECK)
-.PHONY: image/tiff/check
-
@go_include@ index/suffixarray.lo.dep
index/suffixarray.lo.dep: $(go_index_suffixarray_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: os/user/check
-@go_include@ os/signal.lo.dep
-os/signal.lo.dep: $(go_os_signal_files)
- $(BUILDDEPS)
-os/signal.lo: $(go_os_signal_files)
- $(BUILDPACKAGE)
-os/signal/check: $(CHECK_DEPS)
- @$(MKDIR_P) os/signal
- @$(CHECK)
-.PHONY: os/signal/check
-
@go_include@ path/filepath.lo.dep
path/filepath.lo.dep: $(go_path_filepath_files)
$(BUILDDEPS)
$(BUILDGOX)
exp/proxy.gox: exp/proxy.lo
$(BUILDGOX)
+exp/signal.gox: exp/signal.lo
+ $(BUILDGOX)
exp/terminal.gox: exp/terminal.lo
$(BUILDGOX)
exp/types.gox: exp/types.lo
hash/fnv.gox: hash/fnv.lo
$(BUILDGOX)
-image/bmp.gox: image/bmp.lo
- $(BUILDGOX)
image/color.gox: image/color.lo
$(BUILDGOX)
image/draw.gox: image/draw.lo
$(BUILDGOX)
image/png.gox: image/png.lo
$(BUILDGOX)
-image/tiff.gox: image/tiff.lo
- $(BUILDGOX)
index/suffixarray.gox: index/suffixarray.lo
$(BUILDGOX)
$(BUILDGOX)
os/user.gox: os/user.lo
$(BUILDGOX)
-os/signal.gox: os/signal.lo
- $(BUILDGOX)
path/filepath.gox: path/filepath.lo
$(BUILDGOX)
}
// Open returns a ReadCloser that provides access to the File's contents.
-// It is safe to Open and Read from files concurrently.
+// Multiple files may be read concurrently.
func (f *File) Open() (rc io.ReadCloser, err error) {
bodyOffset, err := f.findBodyOffset()
if err != nil {
},
},
},
- {Name: "readme.zip"},
- {Name: "readme.notzip", Error: ErrFormat},
+ {
+ Name: "symlink.zip",
+ File: []ZipTestFile{
+ {
+ Name: "symlink",
+ Content: []byte("../target"),
+ Mode: 0777 | os.ModeSymlink,
+ },
+ },
+ },
+ {
+ Name: "readme.zip",
+ },
+ {
+ Name: "readme.notzip",
+ Error: ErrFormat,
+ },
{
Name: "dd.zip",
File: []ZipTestFile{
}
// FileInfo returns an os.FileInfo for the FileHeader.
-func (fh *FileHeader) FileInfo() os.FileInfo {
- return headerFileInfo{fh}
+func (h *FileHeader) FileInfo() os.FileInfo {
+ return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.fh.ModTime() }
func (fi headerFileInfo) Mode() os.FileMode { return fi.fh.Mode() }
+func (fi headerFileInfo) Sys() interface{} { return fi.fh }
// FileInfoHeader creates a partially-populated FileHeader from an
// os.FileInfo.
h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
}
-// traditional names for Unix constants
const (
- s_IFMT = 0xf000
- s_IFDIR = 0x4000
- s_IFREG = 0x8000
- s_ISUID = 0x800
- s_ISGID = 0x400
+ // Unix constants. The specification doesn't mention them,
+ // but these seem to be the values agreed on by tools.
+ s_IFMT = 0xf000
+ s_IFSOCK = 0xc000
+ s_IFLNK = 0xa000
+ s_IFREG = 0x8000
+ s_IFBLK = 0x6000
+ s_IFDIR = 0x4000
+ s_IFCHR = 0x2000
+ s_IFIFO = 0x1000
+ s_ISUID = 0x800
+ s_ISGID = 0x400
+ s_ISVTX = 0x200
msdosDir = 0x10
msdosReadOnly = 0x01
func fileModeToUnixMode(mode os.FileMode) uint32 {
var m uint32
- if mode&os.ModeDir != 0 {
- m = s_IFDIR
- } else {
+ switch mode & os.ModeType {
+ default:
m = s_IFREG
+ case os.ModeDir:
+ m = s_IFDIR
+ case os.ModeSymlink:
+ m = s_IFLNK
+ case os.ModeNamedPipe:
+ m = s_IFIFO
+ case os.ModeSocket:
+ m = s_IFSOCK
+ case os.ModeDevice:
+ if mode&os.ModeCharDevice != 0 {
+ m = s_IFCHR
+ } else {
+ m = s_IFBLK
+ }
}
if mode&os.ModeSetuid != 0 {
m |= s_ISUID
if mode&os.ModeSetgid != 0 {
m |= s_ISGID
}
+ if mode&os.ModeSticky != 0 {
+ m |= s_ISVTX
+ }
return m | uint32(mode&0777)
}
func unixModeToFileMode(m uint32) os.FileMode {
- var mode os.FileMode
- if m&s_IFMT == s_IFDIR {
+ mode := os.FileMode(m & 0777)
+ switch m & s_IFMT {
+ case s_IFBLK:
+ mode |= os.ModeDevice
+ case s_IFCHR:
+ mode |= os.ModeDevice | os.ModeCharDevice
+ case s_IFDIR:
mode |= os.ModeDir
+ case s_IFIFO:
+ mode |= os.ModeNamedPipe
+ case s_IFLNK:
+ mode |= os.ModeSymlink
+ case s_IFREG:
+ // nothing to do
+ case s_IFSOCK:
+ mode |= os.ModeSocket
}
if m&s_ISGID != 0 {
mode |= os.ModeSetgid
if m&s_ISUID != 0 {
mode |= os.ModeSetuid
}
- return mode | os.FileMode(m&0777)
+ if m&s_ISVTX != 0 {
+ mode |= os.ModeSticky
+ }
+ return mode
}
// Writer implements a zip file writer.
type Writer struct {
- *countWriter
+ countWriter
dir []*header
last *fileWriter
closed bool
// NewWriter returns a new Writer writing a zip file to w.
func NewWriter(w io.Writer) *Writer {
- return &Writer{countWriter: &countWriter{w: bufio.NewWriter(w)}}
+ return &Writer{countWriter: countWriter{w: bufio.NewWriter(w)}}
}
// Close finishes writing the zip file by writing the central directory.
Mode: 0755 | os.ModeSetgid,
},
{
- Name: "setgid",
- Data: []byte("setgid file"),
+ Name: "symlink",
+ Data: []byte("../link/target"),
Method: Deflate,
- Mode: 0755 | os.ModeSetgid,
+ Mode: 0755 | os.ModeSymlink,
},
}
if !reflect.DeepEqual(fh, fh2) {
t.Errorf("mismatch\n input=%#v\noutput=%#v\nerr=%v", fh, fh2, err)
}
+ if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh {
+ t.Errorf("Sys didn't return original *FileHeader")
+ }
}
import (
"bytes"
+ "errors"
"io"
- "strconv"
"unicode/utf8"
)
defaultBufSize = 4096
)
-// Errors introduced by this package.
-type Error struct {
- ErrorString string
-}
-
-func (err *Error) Error() string { return err.ErrorString }
-
var (
- ErrInvalidUnreadByte error = &Error{"bufio: invalid use of UnreadByte"}
- ErrInvalidUnreadRune error = &Error{"bufio: invalid use of UnreadRune"}
- ErrBufferFull error = &Error{"bufio: buffer full"}
- ErrNegativeCount error = &Error{"bufio: negative count"}
- errInternal error = &Error{"bufio: internal error"}
+ ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
+ ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
+ ErrBufferFull = errors.New("bufio: buffer full")
+ ErrNegativeCount = errors.New("bufio: negative count")
+ errInternal = errors.New("bufio: internal error")
)
-// BufSizeError is the error representing an invalid buffer size.
-type BufSizeError int
-
-func (b BufSizeError) Error() string {
- return "bufio: bad buffer size " + strconv.Itoa(int(b))
-}
-
// Buffered input.
// Reader implements buffering for an io.Reader object.
const minReadBufferSize = 16
-// NewReaderSize creates a new Reader whose buffer has the specified size,
-// which must be at least 16 bytes. If the argument io.Reader is already a
-// Reader with large enough size, it returns the underlying Reader.
-// It returns the Reader and any error.
-func NewReaderSize(rd io.Reader, size int) (*Reader, error) {
- if size < minReadBufferSize {
- return nil, BufSizeError(size)
- }
+// NewReaderSize returns a new Reader whose buffer has at least the specified
+// size. If the argument io.Reader is already a Reader with large enough
+// size, it returns the underlying Reader.
+func NewReaderSize(rd io.Reader, size int) *Reader {
// Is it already a Reader?
b, ok := rd.(*Reader)
if ok && len(b.buf) >= size {
- return b, nil
+ return b
+ }
+ if size < minReadBufferSize {
+ size = minReadBufferSize
+ }
+ return &Reader{
+ buf: make([]byte, size),
+ rd: rd,
+ lastByte: -1,
+ lastRuneSize: -1,
}
- b = new(Reader)
- b.buf = make([]byte, size)
- b.rd = rd
- b.lastByte = -1
- b.lastRuneSize = -1
- return b, nil
}
// NewReader returns a new Reader whose buffer has the default size.
func NewReader(rd io.Reader) *Reader {
- b, err := NewReaderSize(rd, defaultBufSize)
- if err != nil {
- // cannot happen - defaultBufSize is a valid size
- panic(err)
- }
- return b
+ return NewReaderSize(rd, defaultBufSize)
}
// fill reads a new chunk into the buffer.
}
// ReadRune reads a single UTF-8 encoded Unicode character and returns the
-// rune and its size in bytes.
+// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
+// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
func (b *Reader) ReadRune() (r rune, size int, err error) {
for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil {
b.fill()
// buffered output
// Writer implements buffering for an io.Writer object.
+// If an error occurs writing to a Writer, no more data will be
+// accepted and all subsequent writes will return the error.
type Writer struct {
err error
buf []byte
wr io.Writer
}
-// NewWriterSize creates a new Writer whose buffer has the specified size,
-// which must be greater than zero. If the argument io.Writer is already a
-// Writer with large enough size, it returns the underlying Writer.
-// It returns the Writer and any error.
-func NewWriterSize(wr io.Writer, size int) (*Writer, error) {
- if size <= 0 {
- return nil, BufSizeError(size)
- }
+// NewWriterSize returns a new Writer whose buffer has at least the specified
+// size. If the argument io.Writer is already a Writer with large enough
+// size, it returns the underlying Writer.
+func NewWriterSize(wr io.Writer, size int) *Writer {
// Is it already a Writer?
b, ok := wr.(*Writer)
if ok && len(b.buf) >= size {
- return b, nil
+ return b
+ }
+ if size <= 0 {
+ size = defaultBufSize
}
b = new(Writer)
b.buf = make([]byte, size)
b.wr = wr
- return b, nil
+ return b
}
// NewWriter returns a new Writer whose buffer has the default size.
func NewWriter(wr io.Writer) *Writer {
- b, err := NewWriterSize(wr, defaultBufSize)
- if err != nil {
- // cannot happen - defaultBufSize is valid size
- panic(err)
- }
- return b
+ return NewWriterSize(wr, defaultBufSize)
}
// Flush writes any buffered data to the underlying io.Writer.
bufreader := bufreaders[j]
bufsize := bufsizes[k]
read := readmaker.fn(bytes.NewBufferString(text))
- buf, _ := NewReaderSize(read, bufsize)
+ buf := NewReaderSize(read, bufsize)
s := bufreader.fn(buf)
if s != text {
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
// and that the data is correct.
w.Reset()
- buf, e := NewWriterSize(w, bs)
+ buf := NewWriterSize(w, bs)
context := fmt.Sprintf("nwrite=%d bufsize=%d", nwrite, bs)
- if e != nil {
- t.Errorf("%s: NewWriterSize %d: %v", context, bs, e)
- continue
- }
n, e1 := buf.Write(data[0:nwrite])
if e1 != nil || n != nwrite {
t.Errorf("%s: buf.Write %d = %d, %v", context, nwrite, n, e1)
continue
}
- if e = buf.Flush(); e != nil {
+ if e := buf.Flush(); e != nil {
t.Errorf("%s: buf.Flush = %v", context, e)
}
func TestNewReaderSizeIdempotent(t *testing.T) {
const BufSize = 1000
- b, err := NewReaderSize(bytes.NewBufferString("hello world"), BufSize)
- if err != nil {
- t.Error("NewReaderSize create fail", err)
- }
+ b := NewReaderSize(bytes.NewBufferString("hello world"), BufSize)
// Does it recognize itself?
- b1, err2 := NewReaderSize(b, BufSize)
- if err2 != nil {
- t.Error("NewReaderSize #2 create fail", err2)
- }
+ b1 := NewReaderSize(b, BufSize)
if b1 != b {
t.Error("NewReaderSize did not detect underlying Reader")
}
// Does it wrap if existing buffer is too small?
- b2, err3 := NewReaderSize(b, 2*BufSize)
- if err3 != nil {
- t.Error("NewReaderSize #3 create fail", err3)
- }
+ b2 := NewReaderSize(b, 2*BufSize)
if b2 == b {
t.Error("NewReaderSize did not enlarge buffer")
}
func TestNewWriterSizeIdempotent(t *testing.T) {
const BufSize = 1000
- b, err := NewWriterSize(new(bytes.Buffer), BufSize)
- if err != nil {
- t.Error("NewWriterSize create fail", err)
- }
+ b := NewWriterSize(new(bytes.Buffer), BufSize)
// Does it recognize itself?
- b1, err2 := NewWriterSize(b, BufSize)
- if err2 != nil {
- t.Error("NewWriterSize #2 create fail", err2)
- }
+ b1 := NewWriterSize(b, BufSize)
if b1 != b {
t.Error("NewWriterSize did not detect underlying Writer")
}
// Does it wrap if existing buffer is too small?
- b2, err3 := NewWriterSize(b, 2*BufSize)
- if err3 != nil {
- t.Error("NewWriterSize #3 create fail", err3)
- }
+ b2 := NewWriterSize(b, 2*BufSize)
if b2 == b {
t.Error("NewWriterSize did not enlarge buffer")
}
func TestWriteString(t *testing.T) {
const BufSize = 8
buf := new(bytes.Buffer)
- b, err := NewWriterSize(buf, BufSize)
- if err != nil {
- t.Error("NewWriterSize create fail", err)
- }
+ b := NewWriterSize(buf, BufSize)
b.WriteString("0") // easy
b.WriteString("123456") // still easy
b.WriteString("7890") // easy after flush
func TestBufferFull(t *testing.T) {
const longString = "And now, hello, world! It is the time for all good men to come to the aid of their party"
- buf, err := NewReaderSize(strings.NewReader(longString), minReadBufferSize)
- if err != nil {
- t.Fatal("NewReaderSize:", err)
- }
+ buf := NewReaderSize(strings.NewReader(longString), minReadBufferSize)
line, err := buf.ReadSlice('!')
if string(line) != "And now, hello, " || err != ErrBufferFull {
t.Errorf("first ReadSlice(,) = %q, %v", line, err)
func TestPeek(t *testing.T) {
p := make([]byte, 10)
// string is 16 (minReadBufferSize) long.
- buf, _ := NewReaderSize(strings.NewReader("abcdefghijklmnop"), minReadBufferSize)
+ buf := NewReaderSize(strings.NewReader("abcdefghijklmnop"), minReadBufferSize)
if s, err := buf.Peek(1); string(s) != "a" || err != nil {
t.Fatalf("want %q got %q, err=%v", "a", string(s), err)
}
for stride := 1; stride < 2; stride++ {
done := 0
reader := testReader{input, stride}
- l, _ := NewReaderSize(&reader, len(input)+1)
+ l := NewReaderSize(&reader, len(input)+1)
for {
line, isPrefix, err := l.ReadLine()
if len(line) > 0 && err != nil {
data = append(data, '0'+byte(i%10))
}
buf := bytes.NewBuffer(data)
- l, _ := NewReaderSize(buf, minReadBufferSize)
+ l := NewReaderSize(buf, minReadBufferSize)
line, isPrefix, err := l.ReadLine()
if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil {
t.Errorf("bad result for first line: got %q want %q %v", line, data[:minReadBufferSize], err)
inbuf := bytes.NewBuffer([]byte(line1 + "\n" + restData))
outbuf := new(bytes.Buffer)
maxLineLength := len(line1) + len(restData)/2
- l, _ := NewReaderSize(inbuf, maxLineLength)
+ l := NewReaderSize(inbuf, maxLineLength)
line, isPrefix, err := l.ReadLine()
if isPrefix || err != nil || string(line) != line1 {
t.Errorf("bad result for first line: isPrefix=%v err=%v line=%q", isPrefix, err, string(line))
}
func TestReadEmptyBuffer(t *testing.T) {
- l, _ := NewReaderSize(bytes.NewBuffer(nil), minReadBufferSize)
+ l := NewReaderSize(new(bytes.Buffer), minReadBufferSize)
line, isPrefix, err := l.ReadLine()
if err != io.EOF {
t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err)
}
func TestLinesAfterRead(t *testing.T) {
- l, _ := NewReaderSize(bytes.NewBuffer([]byte("foo")), minReadBufferSize)
+ l := NewReaderSize(bytes.NewBuffer([]byte("foo")), minReadBufferSize)
_, err := ioutil.ReadAll(l)
if err != nil {
t.Error(err)
}
func testReadLineNewlines(t *testing.T, input string, expect []readLineResult) {
- b, err := NewReaderSize(strings.NewReader(input), minReadBufferSize)
- if err != nil {
- t.Fatal(err)
- }
+ b := NewReaderSize(strings.NewReader(input), minReadBufferSize)
for i, e := range expect {
line, isPrefix, err := b.ReadLine()
if bytes.Compare(line, e.line) != 0 {
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Truncate discards all but the first n unread bytes from the buffer.
-// It is an error to call b.Truncate(n) with n > b.Len().
+// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
b.lastRead = opInvalid
- if n == 0 {
+ switch {
+ case n < 0 || n > b.Len():
+ panic("bytes.Buffer: truncation out of range")
+ case n == 0:
// Reuse buffer space.
b.off = 0
}
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
-// preferable to NewBuffer. In particular, passing a non-empty buf to
-// NewBuffer and then writing to the Buffer will overwrite buf, not append to
-// it.
+// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new Buffer using string s as its
-// initial contents. It is intended to prepare a buffer to read an existing
-// string. See the warnings about NewBuffer; similar issues apply here.
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}
if d.blockStart >= windowSize {
d.blockStart -= windowSize
} else {
- d.blockStart = skipNever
+ d.blockStart = math.MaxInt32
}
d.hashOffset += windowSize
}
}
func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name string, limit int) error {
- buffer := bytes.NewBuffer(nil)
- w := NewWriter(buffer, level)
+ var buffer bytes.Buffer
+ w := NewWriter(&buffer, level)
w.Write(input)
w.Close()
if limit > 0 && buffer.Len() > limit {
t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit)
}
- r := NewReader(buffer)
+ r := NewReader(&buffer)
out, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("read: %s", err)
return total
}
-// Generate elements in the chain using an iterative algorithm.
-func (h *huffmanEncoder) generateChains(top *levelInfo, list []literalNode) {
- n := len(list)
- list = list[0 : n+1]
- list[n] = maxNode()
-
- l := top
- for {
- if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
- // We've run out of both leafs and pairs.
- // End all calculations for this level.
- // To m sure we never come back to this level or any lower level,
- // set nextPairFreq impossibly large.
- l.lastChain = nil
- l.needed = 0
- l = l.up
- l.nextPairFreq = math.MaxInt32
- continue
- }
-
- prevFreq := l.lastChain.freq
- if l.nextCharFreq < l.nextPairFreq {
- // The next item on this row is a leaf node.
- n := l.lastChain.leafCount + 1
- l.lastChain = &chain{l.nextCharFreq, n, l.lastChain.up}
- l.nextCharFreq = list[n].freq
- } else {
- // The next item on this row is a pair from the previous row.
- // nextPairFreq isn't valid until we generate two
- // more values in the level below
- l.lastChain = &chain{l.nextPairFreq, l.lastChain.leafCount, l.down.lastChain}
- l.down.needed = 2
- }
-
- if l.needed--; l.needed == 0 {
- // We've done everything we need to do for this level.
- // Continue calculating one level up. Fill in nextPairFreq
- // of that level with the sum of the two nodes we've just calculated on
- // this level.
- up := l.up
- if up == nil {
- // All done!
- return
- }
- up.nextPairFreq = prevFreq + l.lastChain.freq
- l = up
- } else {
- // If we stole from below, move down temporarily to replenish it.
- for l.down.needed > 0 {
- l = l.down
- }
- }
- }
-}
-
// Return the number of literals assigned to each bit size in the Huffman encoding
//
// This method is only called when list.length >= 3
}
func TestReader(t *testing.T) {
- b := bytes.NewBuffer(nil)
+ var b bytes.Buffer
for _, tt := range lzwTests {
d := strings.Split(tt.desc, ";")
var order Order
rc := NewReader(strings.NewReader(tt.compressed), order, litWidth)
defer rc.Close()
b.Reset()
- n, err := io.Copy(b, rc)
+ n, err := io.Copy(&b, rc)
if err != nil {
if err != tt.err {
t.Errorf("%s: io.Copy: %v want %v", tt.desc, err, tt.err)
b.SetBytes(int64(n))
buf0, _ := ioutil.ReadFile("../testdata/e.txt")
buf0 = buf0[:10000]
- compressed := bytes.NewBuffer(nil)
+ compressed := new(bytes.Buffer)
w := NewWriter(compressed, LSB, 8)
for i := 0; i < n; i += len(buf0) {
io.Copy(w, bytes.NewBuffer(buf0))
func TestWriterDictIsUsed(t *testing.T) {
var input = []byte("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
- buf := bytes.NewBuffer(nil)
- compressor, err := NewWriterDict(buf, BestCompression, input)
+ var buf bytes.Buffer
+ compressor, err := NewWriterDict(&buf, BestCompression, input)
if err != nil {
t.Errorf("error in NewWriterDict: %s", err)
return
// NewCBCDecrypter returns a BlockMode which decrypts in cipher block chaining
// mode, using the given Block. The length of iv must be the same as the
-// Block's block size as must match the iv used to encrypt the data.
+// Block's block size and must match the iv used to encrypt the data.
func NewCBCDecrypter(b Block, iv []byte) BlockMode {
return (*cbcDecrypter)(newCBC(b, iv))
}
// The Stream* objects are so simple that all their members are public. Users
// can create them themselves.
-// StreamReader wraps a Stream into an io.Reader. It simply calls XORKeyStream
+// StreamReader wraps a Stream into an io.Reader. It calls XORKeyStream
// to process each slice of data which passes through.
type StreamReader struct {
S Stream
return
}
-// StreamWriter wraps a Stream into an io.Writer. It simply calls XORKeyStream
+// StreamWriter wraps a Stream into an io.Writer. It calls XORKeyStream
// to process each slice of data which passes through. If any Write call
// returns short then the StreamWriter is out of sync and must be discarded.
type StreamWriter struct {
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
-
-package cipher
-
-type ocfbEncrypter struct {
- b Block
- fre []byte
- outUsed int
-}
-
-// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
-// performed.
-type OCFBResyncOption bool
-
-const (
- OCFBResync OCFBResyncOption = true
- OCFBNoResync OCFBResyncOption = false
-)
-
-// NewOCFBEncrypter returns a Stream which encrypts data with OpenPGP's cipher
-// feedback mode using the given Block, and an initial amount of ciphertext.
-// randData must be random bytes and be the same length as the Block's block
-// size. Resync determines if the "resynchronization step" from RFC 4880, 13.9
-// step 7 is performed. Different parts of OpenPGP vary on this point.
-func NewOCFBEncrypter(block Block, randData []byte, resync OCFBResyncOption) (Stream, []byte) {
- blockSize := block.BlockSize()
- if len(randData) != blockSize {
- return nil, nil
- }
-
- x := &ocfbEncrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefix := make([]byte, blockSize+2)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefix[i] = randData[i] ^ x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
- prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- return x, prefix
-}
-
-func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- x.fre[x.outUsed] ^= src[i]
- dst[i] = x.fre[x.outUsed]
- x.outUsed++
- }
-}
-
-type ocfbDecrypter struct {
- b Block
- fre []byte
- outUsed int
-}
-
-// NewOCFBDecrypter returns a Stream which decrypts data with OpenPGP's cipher
-// feedback mode using the given Block. Prefix must be the first blockSize + 2
-// bytes of the ciphertext, where blockSize is the Block's block size. If an
-// incorrect key is detected then nil is returned. On successful exit,
-// blockSize+2 bytes of decrypted data are written into prefix. Resync
-// determines if the "resynchronization step" from RFC 4880, 13.9 step 7 is
-// performed. Different parts of OpenPGP vary on this point.
-func NewOCFBDecrypter(block Block, prefix []byte, resync OCFBResyncOption) Stream {
- blockSize := block.BlockSize()
- if len(prefix) != blockSize+2 {
- return nil
- }
-
- x := &ocfbDecrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefixCopy := make([]byte, len(prefix))
- copy(prefixCopy, prefix)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefixCopy[i] ^= x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefixCopy[blockSize] ^= x.fre[0]
- prefixCopy[blockSize+1] ^= x.fre[1]
-
- if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
- prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
- return nil
- }
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- copy(prefix, prefixCopy)
- return x
-}
-
-func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- c := src[i]
- dst[i] = x.fre[x.outUsed] ^ src[i]
- x.fre[x.outUsed] = c
- x.outUsed++
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cipher
-
-import (
- "bytes"
- "crypto/aes"
- "crypto/rand"
- "testing"
-)
-
-func testOCFB(t *testing.T, resync OCFBResyncOption) {
- block, err := aes.NewCipher(commonKey128)
- if err != nil {
- t.Error(err)
- return
- }
-
- plaintext := []byte("this is the plaintext, which is long enough to span several blocks.")
- randData := make([]byte, block.BlockSize())
- rand.Reader.Read(randData)
- ocfb, prefix := NewOCFBEncrypter(block, randData, resync)
- ciphertext := make([]byte, len(plaintext))
- ocfb.XORKeyStream(ciphertext, plaintext)
-
- ocfbdec := NewOCFBDecrypter(block, prefix, resync)
- if ocfbdec == nil {
- t.Errorf("NewOCFBDecrypter failed (resync: %t)", resync)
- return
- }
- plaintextCopy := make([]byte, len(plaintext))
- ocfbdec.XORKeyStream(plaintextCopy, ciphertext)
-
- if !bytes.Equal(plaintextCopy, plaintext) {
- t.Errorf("got: %x, want: %x (resync: %t)", plaintextCopy, plaintext, resync)
- }
-}
-
-func TestOCFB(t *testing.T) {
- testOCFB(t, OCFBNoResync)
- testOCFB(t, OCFBResync)
-}
type Hash uint
const (
- MD4 Hash = 1 + iota // in package crypto/md4
- MD5 // in package crypto/md5
- SHA1 // in package crypto/sha1
- SHA224 // in package crypto/sha256
- SHA256 // in package crypto/sha256
- SHA384 // in package crypto/sha512
- SHA512 // in package crypto/sha512
+ MD4 Hash = 1 + iota // import code.google.com/p/go.crypto/md4
+ MD5 // import crypto/md5
+ SHA1 // import crypto/sha1
+ SHA224 // import crypto/sha256
+ SHA256 // import crypto/sha256
+ SHA384 // import crypto/sha512
+ SHA512 // import crypto/sha512
MD5SHA1 // no implementation; MD5+SHA1 used for TLS RSA
- RIPEMD160 // in package crypto/ripemd160
+ RIPEMD160 // import code.google.com/p/go.crypto/ripemd160
maxHash
)
var hashes = make([]func() hash.Hash, maxHash)
-// New returns a new hash.Hash calculating the given hash function. If the
-// hash function is not linked into the binary, New returns nil.
+// New returns a new hash.Hash calculating the given hash function. New panics
+// if the hash function is not linked into the binary.
func (h Hash) New() hash.Hash {
if h > 0 && h < maxHash {
f := hashes[h]
return f()
}
}
- return nil
+ panic("crypto: requested hash function is unavailable")
+}
+
+// Available reports whether the given hash function is linked into the binary.
+func (h Hash) Available() bool {
+ return h < maxHash && hashes[h] != nil
}
// RegisterHash registers a function that returns a new instance of the given
// BlockSize returns the DES block size, 8 bytes.
func (c *Cipher) BlockSize() int { return BlockSize }
-// Encrypts the 8-byte buffer src and stores the result in dst.
+// Encrypt encrypts the 8-byte buffer src and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) { encryptBlock(c.subkeys[:], dst, src) }
-// Decrypts the 8-byte buffer src and stores the result in dst.
+// Decrypt decrypts the 8-byte buffer src and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) { decryptBlock(c.subkeys[:], dst, src) }
// Reset zeros the key data, so that it will no longer
qBytes[0] |= 0x80
q.SetBytes(qBytes)
- if !big.ProbablyPrime(q, numMRTests) {
+ if !q.ProbablyPrime(numMRTests) {
continue
}
continue
}
- if !big.ProbablyPrime(p, numMRTests) {
+ if !p.ProbablyPrime(numMRTests) {
continue
}
import (
"crypto/rand"
+ "encoding/hex"
"fmt"
"math/big"
"testing"
return
}
}
+
+func TestP224Overflow(t *testing.T) {
+ // This tests for a specific bug in the P224 implementation.
+ p224 := P224()
+ pointData, _ := hex.DecodeString("049B535B45FB0A2072398A6831834624C7E32CCFD5A4B933BCEAF77F1DD945E08BBE5178F5EDF5E733388F196D2A631D2E075BB16CBFEEA15B")
+ x, y := Unmarshal(p224, pointData)
+ if !p224.IsOnCurve(x, y) {
+ t.Error("P224 failed to validate a correct point")
+ }
+}
in[i] += p224ZeroModP63[i]
}
- // Elimintate the coefficients at 2**224 and greater.
+ // Eliminate the coefficients at 2**224 and greater.
for i := 14; i >= 8; i-- {
in[i-8] -= in[i]
in[i-5] += (in[i] & 0xffff) << 12
a[0] += mask & (1 << 28)
}
-// p224Invert calcuates *out = in**-1 by computing in**(2**224 - 2**96 - 1),
+// p224Invert calculates *out = in**-1 by computing in**(2**224 - 2**96 - 1),
// i.e. Fermat's little theorem.
func p224Invert(out, in *p224FieldElement) {
var f1, f2, f3, f4 p224FieldElement
// p224Contract converts a FieldElement to its unique, minimal form.
//
-// On entry, in[i] < 2**32
+// On entry, in[i] < 2**29
// On exit, in[i] < 2**28
func p224Contract(out, in *p224FieldElement) {
copy(out[:], in[:])
out[i+1] -= 1 & mask
}
+ // We might have pushed out[3] over 2**28 so we perform another, partial,
+ // carry chain.
+ for i := 3; i < 7; i++ {
+ out[i+1] += out[i] >> 28
+ out[i] &= bottom28Bits
+ }
+ top = out[7] >> 28
+ out[7] &= bottom28Bits
+
+ // Eliminate top while maintaining the same value mod p.
+ out[0] -= top
+ out[3] += top << 12
+
+ // There are two cases to consider for out[3]:
+ // 1) The first time that we eliminated top, we didn't push out[3] over
+ // 2**28. In this case, the partial carry chain didn't change any values
+ // and top is zero.
+ // 2) We did push out[3] over 2**28 the first time that we eliminated top.
+ // The first value of top was in [0..16), therefore, prior to eliminating
+ // the first top, 0xfff1000 <= out[3] <= 0xfffffff. Therefore, after
+ // overflowing and being reduced by the second carry chain, out[3] <=
+ // 0xf000. Thus it cannot have overflowed when we eliminated top for the
+ // second time.
+
+ // Again, we may just have made out[0] negative, so do the same carry down.
+ // As before, if we made out[0] negative then we know that out[3] is
+ // sufficiently positive.
+ for i := 0; i < 3; i++ {
+ mask := uint32(int32(out[i]) >> 31)
+ out[i] += (1 << 28) & mask
+ out[i+1] -= 1 & mask
+ }
+
// Now we see if the value is >= p and, if so, subtract p.
// First we build a mask from the top four limbs, which must all be
bytes[len(bytes)-1] |= 1
p.SetBytes(bytes)
- if big.ProbablyPrime(p, 20) {
+ if p.ProbablyPrime(20) {
return
}
}
// about the plaintext.
// See ``Chosen Ciphertext Attacks Against Protocols Based on the RSA
// Encryption Standard PKCS #1'', Daniel Bleichenbacher, Advances in Cryptology
-// (Crypto '98),
+// (Crypto '98).
func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) (err error) {
k := (priv.N.BitLen() + 7) / 8
if k-(len(key)+3+8) < 0 {
// ProbablyPrime are deterministic, given the candidate number, it's
// easy for an attack to generate composites that pass this test.
for _, prime := range priv.Primes {
- if !big.ProbablyPrime(prime, 20) {
+ if !prime.ProbablyPrime(20) {
return errors.New("prime factor is composite")
}
}
gcd := new(big.Int)
x := new(big.Int)
y := new(big.Int)
- big.GcdInt(gcd, x, y, totient, e)
+ gcd.GCD(x, y, totient, e)
if gcd.Cmp(bigOne) != 0 {
return errors.New("invalid public exponent E")
}
priv.D = new(big.Int)
y := new(big.Int)
e := big.NewInt(int64(priv.E))
- big.GcdInt(g, priv.D, y, e, totient)
+ g.GCD(priv.D, y, e, totient)
if g.Cmp(bigOne) == 0 {
priv.D.Add(priv.D, totient)
g := new(big.Int)
x := new(big.Int)
y := new(big.Int)
- big.GcdInt(g, x, y, a, n)
+ g.GCD(x, y, a, n)
if g.Cmp(bigOne) != 0 {
// In this case, a and n aren't coprime and we cannot calculate
// the inverse. This happens because the values of n are nearly
}
// DecryptOAEP decrypts ciphertext using RSA-OAEP.
-// If rand != nil, DecryptOAEP uses RSA blinding to avoid timing side-channel attacks.
+// If random != nil, DecryptOAEP uses RSA blinding to avoid timing side-channel attacks.
func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err error) {
k := (priv.N.BitLen() + 7) / 8
if len(ciphertext) > k ||
finishedHash.Write(serverHello.marshal())
vers, ok := mutualVersion(serverHello.vers)
- if !ok {
+ if !ok || vers < versionTLS10 {
+ // TLS 1.0 is the minimum version supported as a client.
return c.sendAlert(alertProtocolVersion)
}
c.vers = vers
return &Conn{conn: conn, config: config, isClient: true}
}
-// A Listener implements a network listener (net.Listener) for TLS connections.
-type Listener struct {
- listener net.Listener
- config *Config
+// A listener implements a network listener (net.Listener) for TLS connections.
+type listener struct {
+ net.Listener
+ config *Config
}
// Accept waits for and returns the next incoming TLS connection.
// The returned connection c is a *tls.Conn.
-func (l *Listener) Accept() (c net.Conn, err error) {
- c, err = l.listener.Accept()
+func (l *listener) Accept() (c net.Conn, err error) {
+ c, err = l.Listener.Accept()
if err != nil {
return
}
return
}
-// Close closes the listener.
-func (l *Listener) Close() error { return l.listener.Close() }
-
-// Addr returns the listener's network address.
-func (l *Listener) Addr() net.Addr { return l.listener.Addr() }
-
// NewListener creates a Listener which accepts connections from an inner
// Listener and wraps each connection with Server.
// The configuration config must be non-nil and must have
// at least one certificate.
-func NewListener(listener net.Listener, config *Config) (l *Listener) {
- l = new(Listener)
- l.listener = listener
+func NewListener(inner net.Listener, config *Config) net.Listener {
+ l := new(listener)
+ l.Listener = inner
l.config = config
- return
+ return l
}
// Listen creates a TLS listener accepting connections on the
// given network address using net.Listen.
// The configuration config must be non-nil and must have
// at least one certificate.
-func Listen(network, laddr string, config *Config) (*Listener, error) {
+func Listen(network, laddr string, config *Config) (net.Listener, error) {
if config == nil || len(config.Certificates) == 0 {
return nil, errors.New("tls.Listen: no certificates in configuration")
}
var priv pkcs1PrivateKey
rest, err := asn1.Unmarshal(der, &priv)
if len(rest) > 0 {
- err = asn1.SyntaxError{"trailing data"}
+ err = asn1.SyntaxError{Msg: "trailing data"}
return
}
if err != nil {
type RelativeDistinguishedNameSET []AttributeTypeAndValue
+// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
+// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier
Value interface{}
import (
"strings"
"time"
+ "unicode/utf8"
)
type InvalidReason int
return true
}
+// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
+// an explicitly ASCII function to avoid any sharp corners resulting from
+// performing Unicode operations on DNS labels.
+func toLowerCaseASCII(in string) string {
+ // If the string is already lower-case then there's nothing to do.
+ isAlreadyLowerCase := true
+ for _, c := range in {
+ if c == utf8.RuneError {
+ // If we get a UTF-8 error then there might be
+ // upper-case ASCII bytes in the invalid sequence.
+ isAlreadyLowerCase = false
+ break
+ }
+ if 'A' <= c && c <= 'Z' {
+ isAlreadyLowerCase = false
+ break
+ }
+ }
+
+ if isAlreadyLowerCase {
+ return in
+ }
+
+ out := []byte(in)
+ for i, c := range out {
+ if 'A' <= c && c <= 'Z' {
+ out[i] += 'a' - 'A'
+ }
+ }
+ return string(out)
+}
+
// VerifyHostname returns nil if c is a valid certificate for the named host.
// Otherwise it returns an error describing the mismatch.
func (c *Certificate) VerifyHostname(h string) error {
+ lowered := toLowerCaseASCII(h)
+
if len(c.DNSNames) > 0 {
for _, match := range c.DNSNames {
- if matchHostnames(match, h) {
+ if matchHostnames(toLowerCaseASCII(match), lowered) {
return nil
}
}
// If Subject Alt Name is given, we ignore the common name.
- } else if matchHostnames(c.Subject.CommonName, h) {
+ } else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
return nil
}
intermediates: []string{thawteIntermediate},
roots: []string{verisignRoot},
currentTime: 1302726541,
+ dnsName: "WwW.GooGLE.coM",
+
+ expectedChains: [][]string{
+ {"Google", "Thawte", "VeriSign"},
+ },
+ },
+ {
+ leaf: googleLeaf,
+ intermediates: []string{thawteIntermediate},
+ roots: []string{verisignRoot},
+ currentTime: 1302726541,
dnsName: "www.example.com",
errorCallback: expectHostnameError,
return nil, err
}
if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 {
- return nil, asn1.StructuralError{"bad SAN sequence"}
+ return nil, asn1.StructuralError{Msg: "bad SAN sequence"}
}
parsedName := false
return nil, err
}
if len(rest) > 0 {
- return nil, asn1.SyntaxError{"trailing data"}
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
}
return parseCertificate(&cert)
case *string:
*d = string(s)
return nil
+ case *interface{}:
+ bcopy := make([]byte, len(s))
+ copy(bcopy, s)
+ *d = bcopy
+ return nil
case *[]byte:
*d = s
return nil
*d = bv.(bool)
}
return err
+ case *interface{}:
+ *d = src
+ return nil
}
if scanner, ok := dest.(ScannerInto); ok {
s, d interface{} // source and destination
// following are used if they're non-zero
- wantint int64
- wantuint uint64
- wantstr string
- wantf32 float32
- wantf64 float64
- wanttime time.Time
- wantbool bool // used if d is of type *bool
- wanterr string
+ wantint int64
+ wantuint uint64
+ wantstr string
+ wantf32 float32
+ wantf64 float64
+ wanttime time.Time
+ wantbool bool // used if d is of type *bool
+ wanterr string
+ wantiface interface{}
}
// Target variables for scanning into.
scanf32 float32
scanf64 float64
scantime time.Time
+ scaniface interface{}
)
var conversionTests = []conversionTest{
{s: float64(1.5), d: &scanf32, wantf32: float32(1.5)},
{s: "1.5", d: &scanf32, wantf32: float32(1.5)},
{s: "1.5", d: &scanf64, wantf64: float64(1.5)},
+
+ // To interface{}
+ {s: float64(1.5), d: &scaniface, wantiface: float64(1.5)},
+ {s: int64(1), d: &scaniface, wantiface: int64(1)},
+ {s: "str", d: &scaniface, wantiface: "str"},
+ {s: []byte("byteslice"), d: &scaniface, wantiface: []byte("byteslice")},
+ {s: true, d: &scaniface, wantiface: true},
+ {s: nil, d: &scaniface},
}
func intValue(intptr interface{}) int64 {
if !ct.wanttime.IsZero() && !ct.wanttime.Equal(timeValue(ct.d)) {
errf("want time %v, got %v", ct.wanttime, timeValue(ct.d))
}
+ if ifptr, ok := ct.d.(*interface{}); ok {
+ if !reflect.DeepEqual(ct.wantiface, scaniface) {
+ errf("want interface %#v, got %#v", ct.wantiface, scaniface)
+ continue
+ }
+ if srcBytes, ok := ct.s.([]byte); ok {
+ dstBytes := (*ifptr).([]byte)
+ if &dstBytes[0] == &srcBytes[0] {
+ errf("copy into interface{} didn't copy []byte data")
+ }
+ }
+ }
}
}
// Package driver defines interfaces to be implemented by database
// drivers as used by package sql.
//
-// Code simply using databases should use package sql.
+// Most code should use package sql.
//
// Drivers only need to be aware of a subset of Go's types. The sql package
// will convert all types into one of the following:
case "bool":
return driver.Bool
case "nullbool":
- return driver.Null{driver.Bool}
+ return driver.Null{Converter: driver.Bool}
case "int32":
return driver.Int32
case "string":
- return driver.NotNull{driver.String}
+ return driver.NotNull{Converter: driver.String}
case "nullstring":
- return driver.Null{driver.String}
+ return driver.Null{Converter: driver.String}
case "int64":
// TODO(coopernurse): add type-specific converter
- return driver.NotNull{driver.DefaultParameterConverter}
+ return driver.NotNull{Converter: driver.DefaultParameterConverter}
case "nullint64":
// TODO(coopernurse): add type-specific converter
- return driver.Null{driver.DefaultParameterConverter}
+ return driver.Null{Converter: driver.DefaultParameterConverter}
case "float64":
// TODO(coopernurse): add type-specific converter
- return driver.NotNull{driver.DefaultParameterConverter}
+ return driver.NotNull{Converter: driver.DefaultParameterConverter}
case "nullfloat64":
// TODO(coopernurse): add type-specific converter
- return driver.Null{driver.DefaultParameterConverter}
+ return driver.Null{Converter: driver.DefaultParameterConverter}
case "datetime":
return driver.DefaultParameterConverter
}
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
+//
+// If an argument has type *interface{}, Scan copies the value
+// provided by the underlying driver without conversion. If the value
+// is of type []byte, a copy is made and the caller owns the result.
func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed {
return errors.New("sql: Rows closed")
}
func newForkableWriter() *forkableWriter {
- return &forkableWriter{bytes.NewBuffer(nil), nil, nil}
+ return &forkableWriter{new(bytes.Buffer), nil, nil}
}
func (f *forkableWriter) fork() (pre, post *forkableWriter) {
}
}
+// EncodeToString returns the base32 encoding of src.
+func (enc *Encoding) EncodeToString(src []byte) string {
+ buf := make([]byte, enc.EncodedLen(len(src)))
+ enc.Encode(buf, src)
+ return string(buf)
+}
+
type encoder struct {
err error
enc *Encoding
// decode is like Decode but returns an additional 'end' value, which
// indicates if end-of-message padding was encountered and thus any
-// additional data is an error. decode also assumes len(src)%8==0,
-// since it is meant for internal use.
+// additional data is an error.
func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {
- for i := 0; i < len(src)/8 && !end; i++ {
+ osrc := src
+ for len(src) > 0 && !end {
// Decode quantum using the base32 alphabet
var dbuf [8]byte
dlen := 8
// do the top bytes contain any data?
dbufloop:
- for j := 0; j < 8; j++ {
- in := src[i*8+j]
- if in == '=' && j >= 2 && i == len(src)/8-1 {
+ for j := 0; j < 8; {
+ if len(src) == 0 {
+ return n, false, CorruptInputError(len(osrc) - len(src) - j)
+ }
+ in := src[0]
+ src = src[1:]
+ if in == '\r' || in == '\n' {
+ // Ignore this character.
+ continue
+ }
+ if in == '=' && j >= 2 && len(src) < 8 {
// We've reached the end and there's
// padding, the rest should be padded
- for k := j; k < 8; k++ {
- if src[i*8+k] != '=' {
- return n, false, CorruptInputError(i*8 + j)
+ for k := 0; k < 8-j-1; k++ {
+ if len(src) > k && src[k] != '=' {
+ return n, false, CorruptInputError(len(osrc) - len(src) + k - 1)
}
}
dlen = j
}
dbuf[j] = enc.decodeMap[in]
if dbuf[j] == 0xFF {
- return n, false, CorruptInputError(i*8 + j)
+ return n, false, CorruptInputError(len(osrc) - len(src) - 1)
}
+ j++
}
// Pack 8x 5-bit source blocks into 5 byte destination
// quantum
switch dlen {
case 7, 8:
- dst[i*5+4] = dbuf[6]<<5 | dbuf[7]
+ dst[4] = dbuf[6]<<5 | dbuf[7]
fallthrough
case 6, 5:
- dst[i*5+3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3
+ dst[3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3
fallthrough
case 4:
- dst[i*5+2] = dbuf[3]<<4 | dbuf[4]>>1
+ dst[2] = dbuf[3]<<4 | dbuf[4]>>1
fallthrough
case 3:
- dst[i*5+1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4
+ dst[1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4
fallthrough
case 2:
- dst[i*5+0] = dbuf[0]<<3 | dbuf[1]>>2
+ dst[0] = dbuf[0]<<3 | dbuf[1]>>2
}
+ dst = dst[5:]
switch dlen {
case 2:
n += 1
// DecodedLen(len(src)) bytes to dst and returns the number of bytes
// written. If src contains invalid base32 data, it will return the
// number of bytes successfully written and CorruptInputError.
+// New line characters (\r and \n) are ignored.
func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
- if len(src)%8 != 0 {
- return 0, CorruptInputError(len(src) / 8 * 8)
- }
-
n, _, err = enc.decode(dst, src)
return
}
+// DecodeString returns the bytes represented by the base32 string s.
+func (enc *Encoding) DecodeString(s string) ([]byte, error) {
+ dbuf := make([]byte, enc.DecodedLen(len(s)))
+ n, err := enc.Decode(dbuf, []byte(s))
+ return dbuf[:n], err
+}
+
type decoder struct {
err error
enc *Encoding
func TestEncode(t *testing.T) {
for _, p := range pairs {
- buf := make([]byte, StdEncoding.EncodedLen(len(p.decoded)))
- StdEncoding.Encode(buf, []byte(p.decoded))
- testEqual(t, "Encode(%q) = %q, want %q", p.decoded, string(buf), p.encoded)
+ got := StdEncoding.EncodeToString([]byte(p.decoded))
+ testEqual(t, "Encode(%q) = %q, want %q", p.decoded, got, p.encoded)
}
}
testEqual(t, "Decode(%q) = %q, want %q", p.encoded,
string(dbuf[0:count]),
p.decoded)
+
+ dbuf, err = StdEncoding.DecodeString(p.encoded)
+ testEqual(t, "DecodeString(%q) = error %v, want %v", p.encoded, err, error(nil))
+ testEqual(t, "DecodeString(%q) = %q, want %q", p.encoded, string(dbuf), p.decoded)
}
}
t.Errorf("Decode(Encode(%d-byte string)) failed at offset %d", n, i)
}
}
+
+func TestNewLineCharacters(t *testing.T) {
+ // Each of these should decode to the string "sure", without errors.
+ const expected = "sure"
+ examples := []string{
+ "ON2XEZI=",
+ "ON2XEZI=\r",
+ "ON2XEZI=\n",
+ "ON2XEZI=\r\n",
+ "ON2XEZ\r\nI=",
+ "ON2X\rEZ\nI=",
+ "ON2X\nEZ\rI=",
+ "ON2XEZ\nI=",
+ "ON2XEZI\n=",
+ }
+ for _, e := range examples {
+ buf, err := StdEncoding.DecodeString(e)
+ if err != nil {
+ t.Errorf("Decode(%q) failed: %v", e, err)
+ continue
+ }
+ if s := string(buf); s != expected {
+ t.Errorf("Decode(%q) = %q, want %q", e, s, expected)
+ }
+ }
+}
// decode is like Decode but returns an additional 'end' value, which
// indicates if end-of-message padding was encountered and thus any
-// additional data is an error. decode also assumes len(src)%4==0,
-// since it is meant for internal use.
+// additional data is an error.
func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {
- for i := 0; i < len(src)/4 && !end; i++ {
+ osrc := src
+ for len(src) > 0 && !end {
// Decode quantum using the base64 alphabet
var dbuf [4]byte
dlen := 4
dbufloop:
- for j := 0; j < 4; j++ {
- in := src[i*4+j]
- if in == '=' && j >= 2 && i == len(src)/4-1 {
+ for j := 0; j < 4; {
+ if len(src) == 0 {
+ return n, false, CorruptInputError(len(osrc) - len(src) - j)
+ }
+ in := src[0]
+ src = src[1:]
+ if in == '\r' || in == '\n' {
+ // Ignore this character.
+ continue
+ }
+ if in == '=' && j >= 2 && len(src) < 4 {
// We've reached the end and there's
// padding
- if src[i*4+3] != '=' {
- return n, false, CorruptInputError(i*4 + 2)
+ if len(src) > 0 && src[0] != '=' {
+ return n, false, CorruptInputError(len(osrc) - len(src) - 1)
}
dlen = j
end = true
}
dbuf[j] = enc.decodeMap[in]
if dbuf[j] == 0xFF {
- return n, false, CorruptInputError(i*4 + j)
+ return n, false, CorruptInputError(len(osrc) - len(src) - 1)
}
+ j++
}
// Pack 4x 6-bit source blocks into 3 byte destination
// quantum
switch dlen {
case 4:
- dst[i*3+2] = dbuf[2]<<6 | dbuf[3]
+ dst[2] = dbuf[2]<<6 | dbuf[3]
fallthrough
case 3:
- dst[i*3+1] = dbuf[1]<<4 | dbuf[2]>>2
+ dst[1] = dbuf[1]<<4 | dbuf[2]>>2
fallthrough
case 2:
- dst[i*3+0] = dbuf[0]<<2 | dbuf[1]>>4
+ dst[0] = dbuf[0]<<2 | dbuf[1]>>4
}
+ dst = dst[3:]
n += dlen - 1
}
// DecodedLen(len(src)) bytes to dst and returns the number of bytes
// written. If src contains invalid base64 data, it will return the
// number of bytes successfully written and CorruptInputError.
+// New line characters (\r and \n) are ignored.
func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
- if len(src)%4 != 0 {
- return 0, CorruptInputError(len(src) / 4 * 4)
- }
-
n, _, err = enc.decode(dst, src)
return
}
t.Errorf("Decode(Encode(%d-byte string)) failed at offset %d", n, i)
}
}
+
+func TestNewLineCharacters(t *testing.T) {
+ // Each of these should decode to the string "sure", without errors.
+ const expected = "sure"
+ examples := []string{
+ "c3VyZQ==",
+ "c3VyZQ==\r",
+ "c3VyZQ==\n",
+ "c3VyZQ==\r\n",
+ "c3VyZ\r\nQ==",
+ "c3V\ryZ\nQ==",
+ "c3V\nyZ\rQ==",
+ "c3VyZ\nQ==",
+ "c3VyZQ\n==",
+ }
+ for _, e := range examples {
+ buf, err := StdEncoding.DecodeString(e)
+ if err != nil {
+ t.Errorf("Decode(%q) failed: %v", e, err)
+ continue
+ }
+ if s := string(buf); s != expected {
+ t.Errorf("Decode(%q) = %q, want %q", e, s, expected)
+ }
+ }
+}
default:
return errors.New("binary.Read: invalid type " + d.Type().String())
}
- size := TotalSize(v)
+ size := dataSize(v)
if size < 0 {
return errors.New("binary.Read: invalid type " + v.Type().String())
}
return err
}
v := reflect.Indirect(reflect.ValueOf(data))
- size := TotalSize(v)
+ size := dataSize(v)
if size < 0 {
return errors.New("binary.Write: invalid type " + v.Type().String())
}
return err
}
-func TotalSize(v reflect.Value) int {
+// dataSize returns the number of bytes the actual data represented by v occupies in memory.
+// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
+// it returns the length of the slice times the element size and does not count the memory
+// occupied by the header.
+func dataSize(v reflect.Value) int {
if v.Kind() == reflect.Slice {
elem := sizeof(v.Type().Elem())
if elem < 0 {
bsr := &byteSliceReader{}
var buf bytes.Buffer
Write(&buf, BigEndian, &s)
- n := TotalSize(reflect.ValueOf(s))
+ n := dataSize(reflect.ValueOf(s))
b.SetBytes(int64(n))
t := s
b.ResetTimer()
// ReadAll reads all the remaining records from r.
// Each record is a slice of fields.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read until EOF, it does not treat end of file as an error to be
+// reported.
func (r *Reader) ReadAll() (records [][]string, err error) {
for {
record, err := r.Read()
"bytes"
"errors"
"math"
+ "math/rand"
"reflect"
"strings"
"testing"
+ "time"
"unsafe"
)
}
debugFunc(debugBuffer)
}
+
+func encFuzzDec(rng *rand.Rand, in interface{}) error {
+ buf := new(bytes.Buffer)
+ enc := NewEncoder(buf)
+ if err := enc.Encode(&in); err != nil {
+ return err
+ }
+
+ b := buf.Bytes()
+ for i, bi := range b {
+ if rng.Intn(10) < 3 {
+ b[i] = bi + uint8(rng.Intn(256))
+ }
+ }
+
+ dec := NewDecoder(buf)
+ var e interface{}
+ if err := dec.Decode(&e); err != nil {
+ return err
+ }
+ return nil
+}
+
+// This does some "fuzz testing" by attempting to decode a sequence of random bytes.
+func TestFuzz(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ // all possible inputs
+ input := []interface{}{
+ new(int),
+ new(float32),
+ new(float64),
+ new(complex128),
+ &ByteStruct{255},
+ &ArrayStruct{},
+ &StringStruct{"hello"},
+ &GobTest1{0, &StringStruct{"hello"}},
+ }
+ testFuzz(t, time.Now().UnixNano(), 100, input...)
+}
+
+func TestFuzzRegressions(t *testing.T) {
+ // An instance triggering a type name of length ~102 GB.
+ testFuzz(t, 1328492090837718000, 100, new(float32))
+}
+
+func testFuzz(t *testing.T, seed int64, n int, input ...interface{}) {
+ t.Logf("seed=%d n=%d\n", seed, n)
+ for _, e := range input {
+ rng := rand.New(rand.NewSource(seed))
+ for i := 0; i < n; i++ {
+ encFuzzDec(rng, e)
+ }
+ }
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Delete the next line to include this file in the gob package.
-// +build ignore
+// Delete the next line to include in the gob package.
+// +build gob-debug
package gob
// Create a writable interface reflect.Value. We need one even for the nil case.
ivalue := allocValue(ityp)
// Read the name of the concrete type.
- b := make([]byte, state.decodeUint())
+ nr := state.decodeUint()
+ if nr < 0 || nr > 1<<31 { // zero is permissible for anonymous types
+ errorf("invalid type name length %d", nr)
+ }
+ b := make([]byte, nr)
state.b.Read(b)
name := string(b)
if name == "" {
// and returns the type id of the next value. It returns -1 at
// EOF. Upon return, the remainder of dec.buf is the value to be
// decoded. If this is an interface value, it can be ignored by
-// simply resetting that buffer.
+// resetting that buffer.
func (dec *Decoder) decodeTypeSequence(isInterface bool) typeId {
for dec.err == nil {
if dec.buf.Len() == 0 {
Structs, arrays and slices are also supported. Strings and arrays of bytes are
supported with a special, efficient representation (see below). When a slice is
decoded, if the existing slice has capacity the slice will be extended in place;
-if not, a new array is allocated. Regardless, the length of the resuling slice
+if not, a new array is allocated. Regardless, the length of the resulting slice
reports the number of elements decoded.
Functions and channels cannot be sent in a gob. Attempting
StructT *StructType
MapT *MapType
}
- type ArrayType struct {
+ type arrayType struct {
CommonType
Elem typeId
Len int
Name string // the name of the struct type
Id int // the id of the type, repeated so it's inside the type
}
- type SliceType struct {
+ type sliceType struct {
CommonType
Elem typeId
}
- type StructType struct {
+ type structType struct {
CommonType
Field []*fieldType // the fields of the struct.
}
- type FieldType struct {
+ type fieldType struct {
Name string // the name of the field.
Id int // the type id of the field, which must be already defined
}
- type MapType struct {
+ type mapType struct {
CommonType
Key typeId
Elem typeId
// Set the field number implicitly to -1; this is done at the beginning
// of every struct, including nested structs.
03 // Add 3 to field number; now 2 (wireType.structType; this is a struct).
- // structType starts with an embedded commonType, which appears
+ // structType starts with an embedded CommonType, which appears
// as a regular structure here too.
- 01 // add 1 to field number (now 0); start of embedded commonType.
+ 01 // add 1 to field number (now 0); start of embedded CommonType.
01 // add 1 to field number (now 0, the name of the type)
05 // string is (unsigned) 5 bytes long
- 50 6f 69 6e 74 // wireType.structType.commonType.name = "Point"
+ 50 6f 69 6e 74 // wireType.structType.CommonType.name = "Point"
01 // add 1 to field number (now 1, the id of the type)
- ff 82 // wireType.structType.commonType._id = 65
- 00 // end of embedded wiretype.structType.commonType struct
+ ff 82 // wireType.structType.CommonType._id = 65
+ 00 // end of embedded wiretype.structType.CommonType struct
01 // add 1 to field number (now 1, the field array in wireType.structType)
02 // There are two fields in the type (len(structType.field))
01 // Start of first field structure; add 1 to get field number 0: field[0].name
"bo": []bool{false},
"st": []string{"s"},
}
- buf := bytes.NewBuffer(nil)
- enc := NewEncoder(buf)
+ enc := NewEncoder(new(bytes.Buffer))
err := enc.Encode(m)
if err != nil {
t.Errorf("encode map: %s", err)
}
func TestSliceReusesMemory(t *testing.T) {
- buf := bytes.NewBuffer(nil)
+ buf := new(bytes.Buffer)
// Bytes
{
x := []byte("abcd")
// plain error. It overwrites the error return of the function that deferred its call.
func catchError(err *error) {
if e := recover(); e != nil {
- *err = e.(gobError).err // Will re-panic if not one of our errors, such as a runtime error.
+ ge, ok := e.(gobError)
+ if !ok {
+ panic(e)
+ }
+ *err = ge.err
}
return
}
var buf bytes.Buffer
enc := NewEncoder(&buf)
bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ mallocs := 0 - memstats.Mallocs
const count = 1000
for i := 0; i < count; i++ {
err := enc.Encode(bench)
t.Fatal("encode:", err)
}
}
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
+ runtime.ReadMemStats(memstats)
+ mallocs += memstats.Mallocs
fmt.Printf("mallocs per encode of type Bench: %d\n", mallocs/count)
}
}
}
dec := NewDecoder(&buf)
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ mallocs := 0 - memstats.Mallocs
for i := 0; i < count; i++ {
*bench = Bench{}
err := dec.Decode(&bench)
t.Fatal("decode:", err)
}
}
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
+ runtime.ReadMemStats(memstats)
+ mallocs += memstats.Mallocs
fmt.Printf("mallocs per decode of type Bench: %d\n", mallocs/count)
}
return t.gobType().name()
}
-// Common elements of all types.
+// CommonType holds elements of all types.
+// It is a historical artifact, kept for binary compatibility and exported
+// only for the benefit of the package's encoding of type descriptors. It is
+// not intended for direct use by clients.
type CommonType struct {
Name string
Id typeId
import (
"bytes"
+ "errors"
+ "fmt"
"io"
- "strconv"
)
const hextable = "0123456789abcdef"
return len(src) * 2
}
-// OddLengthInputError results from decoding an odd length slice.
-type OddLengthInputError struct{}
+// ErrLength results from decoding an odd length slice.
+var ErrLength = errors.New("encoding/hex: odd length hex string")
-func (OddLengthInputError) Error() string { return "odd length hex string" }
+// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
+type InvalidByteError byte
-// InvalidHexCharError results from finding an invalid character in a hex string.
-type InvalidHexCharError byte
-
-func (e InvalidHexCharError) Error() string {
- return "invalid hex char: " + strconv.Itoa(int(e))
+func (e InvalidByteError) Error() string {
+ return fmt.Sprintf("encoding/hex: invalid byte: %#U", rune(e))
}
func DecodedLen(x int) int { return x / 2 }
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
// number of bytes written to dst.
//
-// If Decode encounters invalid input, it returns an OddLengthInputError or an
-// InvalidHexCharError.
+// If Decode encounters invalid input, it returns an error describing the failure.
func Decode(dst, src []byte) (int, error) {
if len(src)%2 == 1 {
- return 0, OddLengthInputError{}
+ return 0, ErrLength
}
for i := 0; i < len(src)/2; i++ {
a, ok := fromHexChar(src[i*2])
if !ok {
- return 0, InvalidHexCharError(src[i*2])
+ return 0, InvalidByteError(src[i*2])
}
b, ok := fromHexChar(src[i*2+1])
if !ok {
- return 0, InvalidHexCharError(src[i*2+1])
+ return 0, InvalidByteError(src[i*2+1])
}
dst[i] = (a << 4) | b
}
// Dump returns a string that contains a hex dump of the given data. The format
// of the hex dump matches the output of `hexdump -C` on the command line.
func Dump(data []byte) string {
- buf := bytes.NewBuffer(nil)
- dumper := Dumper(buf)
+ var buf bytes.Buffer
+ dumper := Dumper(&buf)
dumper.Write(data)
dumper.Close()
return string(buf.Bytes())
"testing"
)
-type encodeTest struct {
- in, out []byte
+type encDecTest struct {
+ enc string
+ dec []byte
}
-var encodeTests = []encodeTest{
- {[]byte{}, []byte{}},
- {[]byte{0x01}, []byte{'0', '1'}},
- {[]byte{0xff}, []byte{'f', 'f'}},
- {[]byte{0xff, 00}, []byte{'f', 'f', '0', '0'}},
- {[]byte{0}, []byte{'0', '0'}},
- {[]byte{1}, []byte{'0', '1'}},
- {[]byte{2}, []byte{'0', '2'}},
- {[]byte{3}, []byte{'0', '3'}},
- {[]byte{4}, []byte{'0', '4'}},
- {[]byte{5}, []byte{'0', '5'}},
- {[]byte{6}, []byte{'0', '6'}},
- {[]byte{7}, []byte{'0', '7'}},
- {[]byte{8}, []byte{'0', '8'}},
- {[]byte{9}, []byte{'0', '9'}},
- {[]byte{10}, []byte{'0', 'a'}},
- {[]byte{11}, []byte{'0', 'b'}},
- {[]byte{12}, []byte{'0', 'c'}},
- {[]byte{13}, []byte{'0', 'd'}},
- {[]byte{14}, []byte{'0', 'e'}},
- {[]byte{15}, []byte{'0', 'f'}},
+var encDecTests = []encDecTest{
+ {"", []byte{}},
+ {"0001020304050607", []byte{0, 1, 2, 3, 4, 5, 6, 7}},
+ {"08090a0b0c0d0e0f", []byte{8, 9, 10, 11, 12, 13, 14, 15}},
+ {"f0f1f2f3f4f5f6f7", []byte{0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7}},
+ {"f8f9fafbfcfdfeff", []byte{0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff}},
+ {"67", []byte{'g'}},
+ {"e3a1", []byte{0xe3, 0xa1}},
}
func TestEncode(t *testing.T) {
- for i, test := range encodeTests {
- dst := make([]byte, EncodedLen(len(test.in)))
- n := Encode(dst, test.in)
+ for i, test := range encDecTests {
+ dst := make([]byte, EncodedLen(len(test.dec)))
+ n := Encode(dst, test.dec)
if n != len(dst) {
t.Errorf("#%d: bad return value: got: %d want: %d", i, n, len(dst))
}
- if bytes.Compare(dst, test.out) != 0 {
- t.Errorf("#%d: got: %#v want: %#v", i, dst, test.out)
+ if string(dst) != test.enc {
+ t.Errorf("#%d: got: %#v want: %#v", i, dst, test.enc)
}
}
}
-type decodeTest struct {
- in, out []byte
- ok bool
-}
-
-var decodeTests = []decodeTest{
- {[]byte{}, []byte{}, true},
- {[]byte{'0'}, []byte{}, false},
- {[]byte{'0', 'g'}, []byte{}, false},
- {[]byte{'0', '\x01'}, []byte{}, false},
- {[]byte{'0', '0'}, []byte{0}, true},
- {[]byte{'0', '1'}, []byte{1}, true},
- {[]byte{'0', '2'}, []byte{2}, true},
- {[]byte{'0', '3'}, []byte{3}, true},
- {[]byte{'0', '4'}, []byte{4}, true},
- {[]byte{'0', '5'}, []byte{5}, true},
- {[]byte{'0', '6'}, []byte{6}, true},
- {[]byte{'0', '7'}, []byte{7}, true},
- {[]byte{'0', '8'}, []byte{8}, true},
- {[]byte{'0', '9'}, []byte{9}, true},
- {[]byte{'0', 'a'}, []byte{10}, true},
- {[]byte{'0', 'b'}, []byte{11}, true},
- {[]byte{'0', 'c'}, []byte{12}, true},
- {[]byte{'0', 'd'}, []byte{13}, true},
- {[]byte{'0', 'e'}, []byte{14}, true},
- {[]byte{'0', 'f'}, []byte{15}, true},
- {[]byte{'0', 'A'}, []byte{10}, true},
- {[]byte{'0', 'B'}, []byte{11}, true},
- {[]byte{'0', 'C'}, []byte{12}, true},
- {[]byte{'0', 'D'}, []byte{13}, true},
- {[]byte{'0', 'E'}, []byte{14}, true},
- {[]byte{'0', 'F'}, []byte{15}, true},
-}
-
func TestDecode(t *testing.T) {
- for i, test := range decodeTests {
- dst := make([]byte, DecodedLen(len(test.in)))
- n, err := Decode(dst, test.in)
- if err == nil && n != len(dst) {
+ for i, test := range encDecTests {
+ dst := make([]byte, DecodedLen(len(test.enc)))
+ n, err := Decode(dst, []byte(test.enc))
+ if err != nil {
t.Errorf("#%d: bad return value: got:%d want:%d", i, n, len(dst))
- }
- if test.ok != (err == nil) {
- t.Errorf("#%d: unexpected err value: %s", i, err)
- }
- if err == nil && bytes.Compare(dst, test.out) != 0 {
- t.Errorf("#%d: got: %#v want: %#v", i, dst, test.out)
+ } else if !bytes.Equal(dst, test.dec) {
+ t.Errorf("#%d: got: %#v want: %#v", i, dst, test.dec)
}
}
}
-type encodeStringTest struct {
- in []byte
- out string
-}
-
-var encodeStringTests = []encodeStringTest{
- {[]byte{}, ""},
- {[]byte{0}, "00"},
- {[]byte{0, 1}, "0001"},
- {[]byte{0, 1, 255}, "0001ff"},
+func TestEncodeToString(t *testing.T) {
+ for i, test := range encDecTests {
+ s := EncodeToString(test.dec)
+ if s != test.enc {
+ t.Errorf("#%d got:%s want:%s", i, s, test.enc)
+ }
+ }
}
-func TestEncodeToString(t *testing.T) {
- for i, test := range encodeStringTests {
- s := EncodeToString(test.in)
- if s != test.out {
- t.Errorf("#%d got:%s want:%s", i, s, test.out)
+func TestDecodeString(t *testing.T) {
+ for i, test := range encDecTests {
+ dst, err := DecodeString(test.enc)
+ if err != nil {
+ t.Errorf("#%d: unexpected err value: %s", i, err)
+ continue
+ }
+ if bytes.Compare(dst, test.dec) != 0 {
+ t.Errorf("#%d: got: %#v want: #%v", i, dst, test.dec)
}
}
}
-type decodeStringTest struct {
+type errTest struct {
in string
- out []byte
- ok bool
+ err string
}
-var decodeStringTests = []decodeStringTest{
- {"", []byte{}, true},
- {"0", []byte{}, false},
- {"00", []byte{0}, true},
- {"0\x01", []byte{}, false},
- {"0g", []byte{}, false},
- {"00ff00", []byte{0, 255, 0}, true},
- {"0000ff", []byte{0, 0, 255}, true},
+var errTests = []errTest{
+ {"0", "encoding/hex: odd length hex string"},
+ {"0g", "encoding/hex: invalid byte: U+0067 'g'"},
+ {"0\x01", "encoding/hex: invalid byte: U+0001"},
}
-func TestDecodeString(t *testing.T) {
- for i, test := range decodeStringTests {
- dst, err := DecodeString(test.in)
- if test.ok != (err == nil) {
- t.Errorf("#%d: unexpected err value: %s", i, err)
+func TestInvalidErr(t *testing.T) {
+ for i, test := range errTests {
+ dst := make([]byte, DecodedLen(len(test.in)))
+ _, err := Decode(dst, []byte(test.in))
+ if err == nil {
+ t.Errorf("#%d: expected error; got none")
+ } else if err.Error() != test.err {
+ t.Errorf("#%d: got: %v want: %v", i, err, test.err)
}
- if err == nil && bytes.Compare(dst, test.out) != 0 {
- t.Errorf("#%d: got: %#v want: #%v", i, dst, test.out)
+ }
+}
+
+func TestInvalidStringErr(t *testing.T) {
+ for i, test := range errTests {
+ _, err := DecodeString(test.in)
+ if err == nil {
+ t.Errorf("#%d: expected error; got none")
+ } else if err.Error() != test.err {
+ t.Errorf("#%d: got: %v want: %v", i, err, test.err)
}
}
}
}
for stride := 1; stride < len(in); stride++ {
- out := bytes.NewBuffer(nil)
- dumper := Dumper(out)
+ var out bytes.Buffer
+ dumper := Dumper(&out)
done := 0
for done < len(in) {
todo := done + stride
}`
var pallValueCompact = strings.Map(noSpace, pallValueIndent)
+
+func TestRefUnmarshal(t *testing.T) {
+ type S struct {
+ // Ref is defined in encode_test.go.
+ R0 Ref
+ R1 *Ref
+ }
+ want := S{
+ R0: 12,
+ R1: new(Ref),
+ }
+ *want.R1 = 12
+
+ var got S
+ if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref"}`), &got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %+v, want %+v", got, want)
+ }
+}
return
}
- if j, ok := v.Interface().(Marshaler); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
- b, err := j.MarshalJSON()
+ m, ok := v.Interface().(Marshaler)
+ if !ok {
+ // T doesn't match the interface. Check against *T too.
+ if v.Kind() != reflect.Ptr && v.CanAddr() {
+ m, ok = v.Addr().Interface().(Marshaler)
+ if ok {
+ v = v.Addr()
+ }
+ }
+ }
+ if ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
+ b, err := m.MarshalJSON()
if err == nil {
// copy JSON into buffer, checking validity.
err = Compact(&e.Buffer, b)
}
}
}
+
+// Ref has Marshaler and Unmarshaler methods with pointer receiver.
+type Ref int
+
+func (*Ref) MarshalJSON() ([]byte, error) {
+ return []byte(`"ref"`), nil
+}
+
+func (r *Ref) UnmarshalJSON([]byte) error {
+ *r = 12
+ return nil
+}
+
+// Val has Marshaler methods with value receiver.
+type Val int
+
+func (Val) MarshalJSON() ([]byte, error) {
+ return []byte(`"val"`), nil
+}
+
+func TestRefValMarshal(t *testing.T) {
+ var s = struct {
+ R0 Ref
+ R1 *Ref
+ V0 Val
+ V1 *Val
+ }{
+ R0: 12,
+ R1: new(Ref),
+ V0: 13,
+ V1: new(Val),
+ }
+ const want = `{"R0":"ref","R1":"ref","V0":"val","V1":"val"}`
+ b, err := Marshal(&s)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
-// NOTE(rsc): The various instances of
-//
-// if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
-//
-// below should all be if c <= ' ' && isSpace(c), but inlining
-// the checks makes a significant difference (>10%) in tight loops
-// such as nextValue. These should be rewritten with the clearer
-// function call once 6g knows to inline the call.
-
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == ']' {
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
switch c {
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == '}' {
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == '"' {
s.endTop = true
return stateEndTop(s, c)
}
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
s.step = stateEndValue
return scanSkipSpace
}
}
func EncodeToMemory(b *Block) []byte {
- buf := bytes.NewBuffer(nil)
- Encode(buf, b)
+ var buf bytes.Buffer
+ Encode(&buf, b)
return buf.Bytes()
}
func TestLineBreaker(t *testing.T) {
for i, test := range lineBreakerTests {
- buf := bytes.NewBuffer(nil)
+ buf := new(bytes.Buffer)
var breaker lineBreaker
breaker.out = buf
_, err := breaker.Write([]byte(test.in))
}
for i, test := range lineBreakerTests {
- buf := bytes.NewBuffer(nil)
+ buf := new(bytes.Buffer)
var breaker lineBreaker
breaker.out = buf
Value: &NameInField{Name{Space: "ns", Local: "foo"}},
ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
},
+ {
+ Value: &NameInField{Name{Space: "ns", Local: "foo"}},
+ ExpectXML: `<NameInField><foo xmlns="ns"><ignore></ignore></foo></NameInField>`,
+ UnmarshalOnly: true,
+ },
// Marshaling zero xml.Name uses the tag or field name.
{
saveData = v
case reflect.Struct:
- sv = v
- typ := sv.Type()
+ typ := v.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
+
+ sv = v
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
panic("unreachable")
}
-// Have already read a start element.
-// Read tokens until we find the end element.
-// Token is taking care of making sure the
-// end element matches the start element we saw.
-func (p *Decoder) Skip() error {
+// Skip reads tokens until it has consumed the end element
+// matching the most recent start element already consumed.
+// It recurs if it encounters a start element, so it can be used to
+// skip nested structures.
+// It returns nil if it finds an end element matching the start
+// element; otherwise it returns an error describing the problem.
+func (d *Decoder) Skip() error {
for {
- tok, err := p.Token()
+ tok, err := d.Token()
if err != nil {
return err
}
switch tok.(type) {
case StartElement:
- if err := p.Skip(); err != nil {
+ if err := d.Skip(); err != nil {
return err
}
case EndElement:
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshalling and unmarshalling
- // is straighforward and unambiguous.
+ // is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
/*
-Ebnflint verifies that EBNF productions are consistent and gramatically correct.
+Ebnflint verifies that EBNF productions are consistent and grammatically correct.
It reads them from an HTML document such as the Go specification.
Grammar productions are grouped in boxes demarcated by the HTML elements
Usage:
- ebnflint [--start production] [file]
+ go tool ebnflint [--start production] [file]
The --start flag specifies the name of the start production for
the grammar; it defaults to "Start".
var start = flag.String("start", "Start", "name of start production")
func usage() {
- fmt.Fprintf(os.Stderr, "usage: ebnflint [flags] [filename]\n")
+ fmt.Fprintf(os.Stderr, "usage: go tool ebnflint [flags] [filename]\n")
flag.PrintDefaults()
os.Exit(1)
}
if strings.IndexAny(s, escapedChars) == -1 {
return s
}
- buf := bytes.NewBuffer(nil)
- escape(buf, s)
+ var buf bytes.Buffer
+ escape(&buf, s)
return buf.String()
}
if n == nil || len(n.Child) == 0 {
return "", nil
}
- b := bytes.NewBuffer(nil)
+ var b bytes.Buffer
for _, child := range n.Child {
- if err := dumpLevel(b, child, 0); err != nil {
+ if err := dumpLevel(&b, child, 0); err != nil {
return "", err
}
}
if len(t.Attr) == 0 {
return t.Data
}
- buf := bytes.NewBuffer(nil)
- buf.WriteString(t.Data)
+ buf := bytes.NewBufferString(t.Data)
for _, a := range t.Attr {
buf.WriteByte(' ')
buf.WriteString(a.Key)
func TestBufAPI(t *testing.T) {
s := "0<a>1</a>2<b>3<a>4<a>5</a>6</b>7</a>8<a/>9"
- z := NewTokenizer(bytes.NewBuffer([]byte(s)))
- result := bytes.NewBuffer(nil)
+ z := NewTokenizer(bytes.NewBufferString(s))
+ var result bytes.Buffer
depth := 0
loop:
for {
}
wd, err := syscall.InotifyAddWatch(w.fd, path, flags)
if err != nil {
- return &os.PathError{"inotify_add_watch", path, err}
+ return &os.PathError{
+ Op: "inotify_add_watch",
+ Path: path,
+ Err: err,
+ }
}
if !found {
func (rb *reorderBuffer) insert(src input, i int, info runeInfo) bool {
if info.size == 3 {
if rune := src.hangul(i); rune != 0 {
- return rb.decomposeHangul(uint32(rune))
+ return rb.decomposeHangul(rune)
}
}
- if info.flags.hasDecomposition() {
+ if info.hasDecomposition() {
dcomp := rb.f.decompose(src, i)
rb.tmpBytes = inputBytes(dcomp)
for i := 0; i < len(dcomp); {
}
// appendRune inserts a rune at the end of the buffer. It is used for Hangul.
-func (rb *reorderBuffer) appendRune(r uint32) {
+func (rb *reorderBuffer) appendRune(r rune) {
bn := rb.nbyte
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
rb.nbyte += utf8.UTFMax
- rb.rune[rb.nrune] = runeInfo{bn, uint8(sz), 0, 0}
+ rb.rune[rb.nrune] = runeInfo{pos: bn, size: uint8(sz)}
rb.nrune++
}
// assignRune sets a rune at position pos. It is used for Hangul and recomposition.
-func (rb *reorderBuffer) assignRune(pos int, r uint32) {
+func (rb *reorderBuffer) assignRune(pos int, r rune) {
bn := rb.rune[pos].pos
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
- rb.rune[pos] = runeInfo{bn, uint8(sz), 0, 0}
+ rb.rune[pos] = runeInfo{pos: bn, size: uint8(sz)}
}
// runeAt returns the rune at position n. It is used for Hangul and recomposition.
-func (rb *reorderBuffer) runeAt(n int) uint32 {
+func (rb *reorderBuffer) runeAt(n int) rune {
inf := rb.rune[n]
r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size])
- return uint32(r)
+ return r
}
// bytesAt returns the UTF-8 encoding of the rune at position n.
// decomposeHangul algorithmically decomposes a Hangul rune into
// its Jamo components.
// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
-func (rb *reorderBuffer) decomposeHangul(r uint32) bool {
+func (rb *reorderBuffer) decomposeHangul(r rune) bool {
b := rb.rune[:]
n := rb.nrune
if n+3 > len(b) {
// get the info for the combined character. This is more
// expensive than using the filter. Using combinesBackward()
// is safe.
- if ii.flags.combinesBackward() {
+ if ii.combinesBackward() {
cccB := b[k-1].ccc
cccC := ii.ccc
blocked := false // b[i] blocked by starter or greater or equal CCC?
}
// functions dispatchable per form
-type boundaryFunc func(f *formInfo, info runeInfo) bool
type lookupFunc func(b input, i int) runeInfo
type decompFunc func(b input, i int) []byte
composing, compatibility bool // form type
- decompose decompFunc
- info lookupFunc
- boundaryBefore boundaryFunc
- boundaryAfter boundaryFunc
+ decompose decompFunc
+ info lookupFunc
}
var formTable []*formInfo
}
if Form(i) == NFC || Form(i) == NFKC {
f.composing = true
- f.boundaryBefore = compBoundaryBefore
- f.boundaryAfter = compBoundaryAfter
- } else {
- f.boundaryBefore = decompBoundary
- f.boundaryAfter = decompBoundary
}
}
}
-func decompBoundary(f *formInfo, info runeInfo) bool {
- if info.ccc == 0 && info.flags.isYesD() { // Implies isHangul(b) == true
- return true
- }
- // We assume that the CCC of the first character in a decomposition
- // is always non-zero if different from info.ccc and that we can return
- // false at this point. This is verified by maketables.
- return false
-}
-
-func compBoundaryBefore(f *formInfo, info runeInfo) bool {
- if info.ccc == 0 && !info.flags.combinesBackward() {
+// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
+// unexpected behavior for the user. For example, in NFD, there is a boundary
+// after 'a'. However, a might combine with modifiers, so from the application's
+// perspective it is not a good boundary. We will therefore always use the
+// boundaries for the combining variants.
+func (i runeInfo) boundaryBefore() bool {
+ if i.ccc == 0 && !i.combinesBackward() {
return true
}
// We assume that the CCC of the first character in a decomposition
return false
}
-func compBoundaryAfter(f *formInfo, info runeInfo) bool {
- // This misses values where the last char in a decomposition is a
- // boundary such as Hangul with JamoT.
- return info.isInert()
+func (i runeInfo) boundaryAfter() bool {
+ return i.isInert()
}
// We pack quick check data in 4 bits:
// 0: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
-// 1..2: NFC_QC Yes(00), No (01), or Maybe (11)
+// 1..2: NFC_QC Yes(00), No (10), or Maybe (11)
// 3: Combines forward (0 == false, 1 == true)
//
// When all 4 bits are zero, the character is inert, meaning it is never
// We pack the bits for both NFC/D and NFKC/D in one byte.
type qcInfo uint8
-func (i qcInfo) isYesC() bool { return i&0x2 == 0 }
-func (i qcInfo) isNoC() bool { return i&0x6 == 0x2 }
-func (i qcInfo) isMaybe() bool { return i&0x4 != 0 }
-func (i qcInfo) isYesD() bool { return i&0x1 == 0 }
-func (i qcInfo) isNoD() bool { return i&0x1 != 0 }
+func (i runeInfo) isYesC() bool { return i.flags&0x4 == 0 }
+func (i runeInfo) isYesD() bool { return i.flags&0x1 == 0 }
-func (i qcInfo) combinesForward() bool { return i&0x8 != 0 }
-func (i qcInfo) combinesBackward() bool { return i&0x4 != 0 } // == isMaybe
-func (i qcInfo) hasDecomposition() bool { return i&0x1 != 0 } // == isNoD
+func (i runeInfo) combinesForward() bool { return i.flags&0x8 != 0 }
+func (i runeInfo) combinesBackward() bool { return i.flags&0x2 != 0 } // == isMaybe
+func (i runeInfo) hasDecomposition() bool { return i.flags&0x1 != 0 } // == isNoD
func (r runeInfo) isInert() bool {
return r.flags&0xf == 0 && r.ccc == 0
// Wrappers for tables.go
-// The 16-bit value of the decompostion tries is an index into a byte
+// The 16-bit value of the decomposition tries is an index into a byte
// array of UTF-8 decomposition sequences. The first byte is the number
// of bytes in the decomposition (excluding this length byte). The actual
// sequence starts at the offset+1.
// Note that the recomposition map for NFC and NFKC are identical.
// combine returns the combined rune or 0 if it doesn't exist.
-func combine(a, b uint32) uint32 {
+func combine(a, b rune) rune {
key := uint32(uint16(a))<<16 + uint32(uint16(b))
return recompMap[key]
}
// 12..15 qcInfo for NFKC/NFKD
func lookupInfoNFC(b input, i int) runeInfo {
v, sz := b.charinfo(i)
- return runeInfo{0, uint8(sz), uint8(v), qcInfo(v >> 8)}
+ return runeInfo{size: uint8(sz), ccc: uint8(v), flags: qcInfo(v >> 8)}
}
func lookupInfoNFKC(b input, i int) runeInfo {
v, sz := b.charinfo(i)
- return runeInfo{0, uint8(sz), uint8(v), qcInfo(v >> 12)}
+ return runeInfo{size: uint8(sz), ccc: uint8(v), flags: qcInfo(v >> 12)}
}
charinfo(p int) (uint16, int)
decomposeNFC(p int) uint16
decomposeNFKC(p int) uint16
- hangul(p int) uint32
+ hangul(p int) rune
}
type inputString string
return nfkcDecompTrie.lookupStringUnsafe(string(s[p:]))
}
-func (s inputString) hangul(p int) uint32 {
+func (s inputString) hangul(p int) rune {
if !isHangulString(string(s[p:])) {
return 0
}
rune, _ := utf8.DecodeRuneInString(string(s[p:]))
- return uint32(rune)
+ return rune
}
type inputBytes []byte
return nfkcDecompTrie.lookupUnsafe(s[p:])
}
-func (s inputBytes) hangul(p int) uint32 {
+func (s inputBytes) hangul(p int) rune {
if !isHangul(s[p:]) {
return 0
}
rune, _ := utf8.DecodeRune(s[p:])
- return uint32(rune)
+ return rune
}
switch f.quickCheck[MComposed] {
case QCYes:
case QCNo:
- e |= 0x2
+ e |= 0x4
case QCMaybe:
e |= 0x6
default:
sz := nrentries * 8
size += sz
fmt.Printf("// recompMap: %d bytes (entries only)\n", sz)
- fmt.Println("var recompMap = map[uint32]uint32{")
+ fmt.Println("var recompMap = map[uint32]rune{")
for i, c := range chars {
f := c.forms[FCanonical]
d := f.decomp
var info runeInfo
if p < n {
info = fd.info(src, p)
- if p == 0 && !fd.boundaryBefore(fd, info) {
+ if p == 0 && !info.boundaryBefore() {
out = decomposeToLastBoundary(rb, out)
}
}
- if info.size == 0 || fd.boundaryBefore(fd, info) {
+ if info.size == 0 || info.boundaryBefore() {
if fd.composing {
rb.compose()
}
}
cc := info.ccc
if rb.f.composing {
- if !info.flags.isYesC() {
+ if !info.isYesC() {
break
}
} else {
- if !info.flags.isYesD() {
+ if !info.isYesD() {
break
}
}
}
fd := &rb.f
info := fd.info(src, i)
- for n := 0; info.size != 0 && !fd.boundaryBefore(fd, info); {
+ for n := 0; info.size != 0 && !info.boundaryBefore(); {
i += int(info.size)
if n++; n >= maxCombiningChars {
return i
}
if i >= nsrc {
- if !fd.boundaryAfter(fd, info) {
+ if !info.boundaryAfter() {
return -1
}
return nsrc
if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8
return i
}
- if fd.boundaryAfter(fd, info) {
+ if info.boundaryAfter() {
return i
}
i = p
- for n := 0; i >= 0 && !fd.boundaryBefore(fd, info); {
+ for n := 0; i >= 0 && !info.boundaryBefore(); {
info, p = lastRuneStart(fd, b[:i])
if n++; n >= maxCombiningChars {
return len(b)
break
}
info = rb.f.info(rb.src, sp)
- bound := rb.f.boundaryBefore(&rb.f, info)
+ bound := info.boundaryBefore()
if bound || info.size == 0 {
break
}
for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- {
}
if p < 0 {
- return runeInfo{0, 0, 0, 0}, -1
+ return runeInfo{}, -1
}
return fd.info(inputBytes(buf), p), p
}
// illegal trailing continuation bytes
return buf
}
- if rb.f.boundaryAfter(fd, info) {
+ if info.boundaryAfter() {
return buf
}
var add [maxBackRunes]runeInfo // stores runeInfo in reverse order
padd := 1
n := 1
p := len(buf) - int(info.size)
- for ; p >= 0 && !rb.f.boundaryBefore(fd, info); p -= int(info.size) {
+ for ; p >= 0 && !info.boundaryBefore(); p -= int(info.size) {
info, i = lastRuneStart(fd, buf[:p])
if int(info.size) != p-i {
break
}
// Check that decomposition doesn't result in overflow.
- if info.flags.hasDecomposition() {
+ if info.hasDecomposition() {
dcomp := rb.f.decompose(inputBytes(buf), p-int(info.size))
for i := 0; i < len(dcomp); {
inf := rb.f.info(inputBytes(dcomp), i)
runAppendTests(t, "TestString", NFKC, stringF, appendTests)
}
-func doFormBenchmark(b *testing.B, f Form, s string) {
+func doFormBenchmark(b *testing.B, inf, f Form, s string) {
b.StopTimer()
- in := []byte(s)
+ in := inf.Bytes([]byte(s))
buf := make([]byte, 2*len(in))
- b.SetBytes(int64(len(s)))
+ b.SetBytes(int64(len(in)))
b.StartTimer()
for i := 0; i < b.N; i++ {
buf = f.Append(buf[0:0], in...)
var ascii = strings.Repeat("There is nothing to change here! ", 500)
func BenchmarkNormalizeAsciiNFC(b *testing.B) {
- doFormBenchmark(b, NFC, ascii)
+ doFormBenchmark(b, NFC, NFC, ascii)
}
func BenchmarkNormalizeAsciiNFD(b *testing.B) {
- doFormBenchmark(b, NFD, ascii)
+ doFormBenchmark(b, NFC, NFD, ascii)
}
func BenchmarkNormalizeAsciiNFKC(b *testing.B) {
- doFormBenchmark(b, NFKC, ascii)
+ doFormBenchmark(b, NFC, NFKC, ascii)
}
func BenchmarkNormalizeAsciiNFKD(b *testing.B) {
- doFormBenchmark(b, NFKD, ascii)
+ doFormBenchmark(b, NFC, NFKD, ascii)
+}
+
+func BenchmarkNormalizeNFC2NFC(b *testing.B) {
+ doFormBenchmark(b, NFC, NFC, txt_all)
+}
+func BenchmarkNormalizeNFC2NFD(b *testing.B) {
+ doFormBenchmark(b, NFC, NFD, txt_all)
+}
+func BenchmarkNormalizeNFD2NFC(b *testing.B) {
+ doFormBenchmark(b, NFD, NFC, txt_all)
+}
+func BenchmarkNormalizeNFD2NFD(b *testing.B) {
+ doFormBenchmark(b, NFD, NFD, txt_all)
+}
+
+// Hangul is often special-cased, so we test it separately.
+func BenchmarkNormalizeHangulNFC2NFC(b *testing.B) {
+ doFormBenchmark(b, NFC, NFC, txt_kr)
+}
+func BenchmarkNormalizeHangulNFC2NFD(b *testing.B) {
+ doFormBenchmark(b, NFC, NFD, txt_kr)
+}
+func BenchmarkNormalizeHangulNFD2NFC(b *testing.B) {
+ doFormBenchmark(b, NFD, NFC, txt_kr)
+}
+func BenchmarkNormalizeHangulNFD2NFD(b *testing.B) {
+ doFormBenchmark(b, NFD, NFD, txt_kr)
}
func doTextBenchmark(b *testing.B, s string) {
署名 — 您必须按照作者或者许可人指定的方式对作品进行署名。
相同方式共享 — 如果您改变、转换本作品或者以本作品为基础进行创作,
您只能采用与本协议相同的许可协议发布基于本作品的演绎作品。`
+
+const txt_cjk = txt_cn + txt_jp + txt_kr
+const txt_all = txt_vn + twoByteUtf8 + threeByteUtf8 + txt_cjk
var nfkcDecompTrie = trie{nfkcDecompLookup[:], nfkcDecompValues[:], nfkcDecompSparseValues[:], nfkcDecompSparseOffset[:], 66}
// recompMap: 7448 bytes (entries only)
-var recompMap = map[uint32]uint32{
+var recompMap = map[uint32]rune{
0x00410300: 0x00C0,
0x00410301: 0x00C1,
0x00410302: 0x00C2,
0x0136: 0x0001, 0x0137: 0x0001, 0x0138: 0x6601, 0x0139: 0x00dc, 0x013a: 0x00dc, 0x013b: 0x00dc,
0x013c: 0x00dc, 0x013d: 0x00e6, 0x013e: 0x00e6, 0x013f: 0x00e6,
// Block 0x5, offset 0x140
- 0x0140: 0x33e6, 0x0141: 0x33e6, 0x0142: 0x66e6, 0x0143: 0x33e6, 0x0144: 0x33e6, 0x0145: 0x66f0,
+ 0x0140: 0x55e6, 0x0141: 0x55e6, 0x0142: 0x66e6, 0x0143: 0x55e6, 0x0144: 0x55e6, 0x0145: 0x66f0,
0x0146: 0x00e6, 0x0147: 0x00dc, 0x0148: 0x00dc, 0x0149: 0x00dc, 0x014a: 0x00e6, 0x014b: 0x00e6,
0x014c: 0x00e6, 0x014d: 0x00dc, 0x014e: 0x00dc, 0x0150: 0x00e6, 0x0151: 0x00e6,
0x0152: 0x00e6, 0x0153: 0x00dc, 0x0154: 0x00dc, 0x0155: 0x00dc, 0x0156: 0x00dc, 0x0157: 0x00e6,
0x015e: 0x00ea, 0x015f: 0x00e9, 0x0160: 0x00ea, 0x0161: 0x00ea, 0x0162: 0x00e9, 0x0163: 0x00e6,
0x0164: 0x00e6, 0x0165: 0x00e6, 0x0166: 0x00e6, 0x0167: 0x00e6, 0x0168: 0x00e6, 0x0169: 0x00e6,
0x016a: 0x00e6, 0x016b: 0x00e6, 0x016c: 0x00e6, 0x016d: 0x00e6, 0x016e: 0x00e6, 0x016f: 0x00e6,
- 0x0174: 0x3300,
- 0x017a: 0x3000,
- 0x017e: 0x3300,
+ 0x0174: 0x5500,
+ 0x017a: 0x5000,
+ 0x017e: 0x5500,
// Block 0x6, offset 0x180
- 0x0184: 0x3000, 0x0185: 0x3100,
- 0x0186: 0x1100, 0x0187: 0x3300, 0x0188: 0x1100, 0x0189: 0x1100, 0x018a: 0x1100,
+ 0x0184: 0x5000, 0x0185: 0x5100,
+ 0x0186: 0x1100, 0x0187: 0x5500, 0x0188: 0x1100, 0x0189: 0x1100, 0x018a: 0x1100,
0x018c: 0x1100, 0x018e: 0x1100, 0x018f: 0x1100, 0x0190: 0x1100, 0x0191: 0x8800,
0x0195: 0x8800, 0x0197: 0x8800,
0x0199: 0x8800,
0x01f6: 0x8800, 0x01f7: 0x8800, 0x01f8: 0x8800, 0x01f9: 0x1100, 0x01fa: 0x8800,
0x01fe: 0x8800,
// Block 0x8, offset 0x200
- 0x0207: 0x3000,
+ 0x0207: 0x5000,
0x0211: 0x00dc,
0x0212: 0x00e6, 0x0213: 0x00e6, 0x0214: 0x00e6, 0x0215: 0x00e6, 0x0216: 0x00dc, 0x0217: 0x00e6,
0x0218: 0x00e6, 0x0219: 0x00e6, 0x021a: 0x00de, 0x021b: 0x00dc, 0x021c: 0x00e6, 0x021d: 0x00e6,
0x0252: 0x0022, 0x0253: 0x66e6, 0x0254: 0x66e6, 0x0255: 0x66dc, 0x0256: 0x00dc, 0x0257: 0x00e6,
0x0258: 0x00e6, 0x0259: 0x00e6, 0x025a: 0x00e6, 0x025b: 0x00e6, 0x025c: 0x00dc, 0x025d: 0x00e6,
0x025e: 0x00e6, 0x025f: 0x00dc,
- 0x0270: 0x0023, 0x0275: 0x3000,
- 0x0276: 0x3000, 0x0277: 0x3000, 0x0278: 0x3000,
+ 0x0270: 0x0023, 0x0275: 0x5000,
+ 0x0276: 0x5000, 0x0277: 0x5000, 0x0278: 0x5000,
// Block 0xa, offset 0x280
0x0280: 0x9900, 0x0281: 0x9900, 0x0282: 0x1100, 0x0283: 0x1100, 0x0284: 0x1100, 0x0285: 0x1100,
0x0288: 0x9900, 0x0289: 0x9900, 0x028a: 0x1100, 0x028b: 0x1100,
0x029f: 0x1100, 0x02a0: 0x9900, 0x02a1: 0x9900, 0x02a2: 0x9900, 0x02a3: 0x9900,
0x02a4: 0x9900, 0x02a5: 0x9900, 0x02a6: 0x9900, 0x02a7: 0x9900, 0x02a8: 0x9900, 0x02a9: 0x9900,
0x02aa: 0x9900, 0x02ab: 0x9900, 0x02ac: 0x9900, 0x02ad: 0x9900, 0x02ae: 0x9900, 0x02af: 0x9900,
- 0x02b0: 0x9900, 0x02b1: 0x3300, 0x02b2: 0x1100, 0x02b3: 0x3300, 0x02b4: 0x9900, 0x02b5: 0x3300,
- 0x02b6: 0x1100, 0x02b7: 0x3300, 0x02b8: 0x1100, 0x02b9: 0x3300, 0x02ba: 0x1100, 0x02bb: 0x3300,
- 0x02bc: 0x9900, 0x02bd: 0x3300,
+ 0x02b0: 0x9900, 0x02b1: 0x5500, 0x02b2: 0x1100, 0x02b3: 0x5500, 0x02b4: 0x9900, 0x02b5: 0x5500,
+ 0x02b6: 0x1100, 0x02b7: 0x5500, 0x02b8: 0x1100, 0x02b9: 0x5500, 0x02ba: 0x1100, 0x02bb: 0x5500,
+ 0x02bc: 0x9900, 0x02bd: 0x5500,
// Block 0xb, offset 0x2c0
- 0x02c0: 0x3000, 0x02c1: 0x3100, 0x02c2: 0x1100, 0x02c3: 0x1100, 0x02c4: 0x1100,
- 0x02c6: 0x9900, 0x02c7: 0x1100, 0x02c8: 0x1100, 0x02c9: 0x3300, 0x02ca: 0x1100, 0x02cb: 0x3300,
- 0x02cc: 0x1100, 0x02cd: 0x3100, 0x02ce: 0x3100, 0x02cf: 0x3100, 0x02d0: 0x1100, 0x02d1: 0x1100,
- 0x02d2: 0x1100, 0x02d3: 0x3300, 0x02d6: 0x1100, 0x02d7: 0x1100,
- 0x02d8: 0x1100, 0x02d9: 0x1100, 0x02da: 0x1100, 0x02db: 0x3300, 0x02dd: 0x3100,
- 0x02de: 0x3100, 0x02df: 0x3100, 0x02e0: 0x1100, 0x02e1: 0x1100, 0x02e2: 0x1100, 0x02e3: 0x3300,
+ 0x02c0: 0x5000, 0x02c1: 0x5100, 0x02c2: 0x1100, 0x02c3: 0x1100, 0x02c4: 0x1100,
+ 0x02c6: 0x9900, 0x02c7: 0x1100, 0x02c8: 0x1100, 0x02c9: 0x5500, 0x02ca: 0x1100, 0x02cb: 0x5500,
+ 0x02cc: 0x1100, 0x02cd: 0x5100, 0x02ce: 0x5100, 0x02cf: 0x5100, 0x02d0: 0x1100, 0x02d1: 0x1100,
+ 0x02d2: 0x1100, 0x02d3: 0x5500, 0x02d6: 0x1100, 0x02d7: 0x1100,
+ 0x02d8: 0x1100, 0x02d9: 0x1100, 0x02da: 0x1100, 0x02db: 0x5500, 0x02dd: 0x5100,
+ 0x02de: 0x5100, 0x02df: 0x5100, 0x02e0: 0x1100, 0x02e1: 0x1100, 0x02e2: 0x1100, 0x02e3: 0x5500,
0x02e4: 0x1100, 0x02e5: 0x1100, 0x02e6: 0x1100, 0x02e7: 0x1100, 0x02e8: 0x1100, 0x02e9: 0x1100,
- 0x02ea: 0x1100, 0x02eb: 0x3300, 0x02ec: 0x1100, 0x02ed: 0x3100, 0x02ee: 0x3300, 0x02ef: 0x3300,
+ 0x02ea: 0x1100, 0x02eb: 0x5500, 0x02ec: 0x1100, 0x02ed: 0x5100, 0x02ee: 0x5500, 0x02ef: 0x5500,
0x02f2: 0x1100, 0x02f3: 0x1100, 0x02f4: 0x1100,
- 0x02f6: 0x9900, 0x02f7: 0x1100, 0x02f8: 0x1100, 0x02f9: 0x3300, 0x02fa: 0x1100, 0x02fb: 0x3300,
- 0x02fc: 0x1100, 0x02fd: 0x3300, 0x02fe: 0x3800,
+ 0x02f6: 0x9900, 0x02f7: 0x1100, 0x02f8: 0x1100, 0x02f9: 0x5500, 0x02fa: 0x1100, 0x02fb: 0x5500,
+ 0x02fc: 0x1100, 0x02fd: 0x5500, 0x02fe: 0x5800,
// Block 0xc, offset 0x300
0x0301: 0x1100, 0x0303: 0x8800, 0x0304: 0x1100, 0x0305: 0x8800,
0x0307: 0x1100, 0x0308: 0x8800, 0x0309: 0x1100,
0x037c: 0x1100, 0x037d: 0x1100,
// Block 0xe, offset 0x380
0x0394: 0x1100,
- 0x0399: 0x6608, 0x039a: 0x6608, 0x039b: 0x3000, 0x039c: 0x3000, 0x039d: 0x8800,
- 0x039e: 0x1100, 0x039f: 0x3000,
+ 0x0399: 0x6608, 0x039a: 0x6608, 0x039b: 0x5000, 0x039c: 0x5000, 0x039d: 0x8800,
+ 0x039e: 0x1100, 0x039f: 0x5000,
0x03a6: 0x8800,
0x03ab: 0x8800, 0x03ac: 0x1100, 0x03ad: 0x8800, 0x03ae: 0x1100, 0x03af: 0x8800,
0x03b0: 0x1100, 0x03b1: 0x8800, 0x03b2: 0x1100, 0x03b3: 0x8800, 0x03b4: 0x1100, 0x03b5: 0x8800,
0x03ef: 0x8800,
0x03f0: 0x8800, 0x03f1: 0x8800, 0x03f2: 0x8800, 0x03f4: 0x1100,
0x03f7: 0x1100, 0x03f8: 0x1100, 0x03f9: 0x1100, 0x03fa: 0x1100,
- 0x03fd: 0x8800, 0x03fe: 0x1100, 0x03ff: 0x3000,
+ 0x03fd: 0x8800, 0x03fe: 0x1100, 0x03ff: 0x5000,
}
// charInfoSparseOffset: 156 entries, 312 bytes
var charInfoSparseValues = [757]valueRange{
// Block 0x0, offset 0x1
{value: 0x0000, lo: 0x07},
- {value: 0x3000, lo: 0xa0, hi: 0xa0},
- {value: 0x3800, lo: 0xa8, hi: 0xa8},
- {value: 0x3000, lo: 0xaa, hi: 0xaa},
- {value: 0x3000, lo: 0xaf, hi: 0xaf},
- {value: 0x3000, lo: 0xb2, hi: 0xb5},
- {value: 0x3000, lo: 0xb8, hi: 0xba},
- {value: 0x3000, lo: 0xbc, hi: 0xbe},
+ {value: 0x5000, lo: 0xa0, hi: 0xa0},
+ {value: 0x5800, lo: 0xa8, hi: 0xa8},
+ {value: 0x5000, lo: 0xaa, hi: 0xaa},
+ {value: 0x5000, lo: 0xaf, hi: 0xaf},
+ {value: 0x5000, lo: 0xb2, hi: 0xb5},
+ {value: 0x5000, lo: 0xb8, hi: 0xba},
+ {value: 0x5000, lo: 0xbc, hi: 0xbe},
// Block 0x1, offset 0x2
{value: 0x0000, lo: 0x0a},
{value: 0x1100, lo: 0x80, hi: 0x81},
{value: 0x9900, lo: 0x92, hi: 0x93},
{value: 0x1100, lo: 0x94, hi: 0xa5},
{value: 0x1100, lo: 0xa8, hi: 0xb0},
- {value: 0x3000, lo: 0xb2, hi: 0xb3},
+ {value: 0x5000, lo: 0xb2, hi: 0xb3},
{value: 0x1100, lo: 0xb4, hi: 0xb7},
{value: 0x1100, lo: 0xb9, hi: 0xbe},
- {value: 0x3000, lo: 0xbf, hi: 0xbf},
+ {value: 0x5000, lo: 0xbf, hi: 0xbf},
// Block 0x2, offset 0x3
{value: 0x0000, lo: 0x0d},
- {value: 0x3000, lo: 0x80, hi: 0x80},
+ {value: 0x5000, lo: 0x80, hi: 0x80},
{value: 0x1100, lo: 0x83, hi: 0x88},
- {value: 0x3000, lo: 0x89, hi: 0x89},
+ {value: 0x5000, lo: 0x89, hi: 0x89},
{value: 0x9900, lo: 0x8c, hi: 0x8d},
{value: 0x1100, lo: 0x8e, hi: 0x91},
{value: 0x1100, lo: 0x94, hi: 0x99},
{value: 0x1100, lo: 0xa2, hi: 0xa5},
{value: 0x9900, lo: 0xa8, hi: 0xab},
{value: 0x1100, lo: 0xac, hi: 0xbe},
- {value: 0x3800, lo: 0xbf, hi: 0xbf},
+ {value: 0x5800, lo: 0xbf, hi: 0xbf},
// Block 0x3, offset 0x4
{value: 0x0000, lo: 0x03},
{value: 0x9900, lo: 0xa0, hi: 0xa1},
{value: 0x8800, lo: 0xb7, hi: 0xb7},
// Block 0x4, offset 0x5
{value: 0x0000, lo: 0x09},
- {value: 0x3000, lo: 0x84, hi: 0x8c},
+ {value: 0x5000, lo: 0x84, hi: 0x8c},
{value: 0x1100, lo: 0x8d, hi: 0x9c},
{value: 0x1100, lo: 0x9e, hi: 0xa3},
{value: 0x1100, lo: 0xa6, hi: 0xa9},
{value: 0x9900, lo: 0xaa, hi: 0xab},
{value: 0x1100, lo: 0xac, hi: 0xb0},
- {value: 0x3000, lo: 0xb1, hi: 0xb3},
+ {value: 0x5000, lo: 0xb1, hi: 0xb3},
{value: 0x1100, lo: 0xb4, hi: 0xb5},
{value: 0x1100, lo: 0xb8, hi: 0xbf},
// Block 0x5, offset 0x6
// Block 0x6, offset 0x7
{value: 0x0000, lo: 0x02},
{value: 0x8800, lo: 0x92, hi: 0x92},
- {value: 0x3000, lo: 0xb0, hi: 0xb8},
+ {value: 0x5000, lo: 0xb0, hi: 0xb8},
// Block 0x7, offset 0x8
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x98, hi: 0x9d},
- {value: 0x3000, lo: 0xa0, hi: 0xa4},
+ {value: 0x5000, lo: 0x98, hi: 0x9d},
+ {value: 0x5000, lo: 0xa0, hi: 0xa4},
// Block 0x8, offset 0x9
{value: 0x0000, lo: 0x0d},
{value: 0x8800, lo: 0x81, hi: 0x81},
{value: 0x9900, lo: 0x8a, hi: 0x8b},
{value: 0x1100, lo: 0x8c, hi: 0x8d},
{value: 0x9900, lo: 0x8e, hi: 0x8e},
- {value: 0x3000, lo: 0x90, hi: 0x91},
- {value: 0x3800, lo: 0x92, hi: 0x92},
- {value: 0x3100, lo: 0x93, hi: 0x94},
- {value: 0x3000, lo: 0x95, hi: 0x96},
- {value: 0x3000, lo: 0xb0, hi: 0xb2},
- {value: 0x3000, lo: 0xb4, hi: 0xb5},
- {value: 0x3000, lo: 0xb9, hi: 0xb9},
+ {value: 0x5000, lo: 0x90, hi: 0x91},
+ {value: 0x5800, lo: 0x92, hi: 0x92},
+ {value: 0x5100, lo: 0x93, hi: 0x94},
+ {value: 0x5000, lo: 0x95, hi: 0x96},
+ {value: 0x5000, lo: 0xb0, hi: 0xb2},
+ {value: 0x5000, lo: 0xb4, hi: 0xb5},
+ {value: 0x5000, lo: 0xb9, hi: 0xb9},
// Block 0x9, offset 0xa
{value: 0x0000, lo: 0x0b},
{value: 0x8800, lo: 0x83, hi: 0x83},
{value: 0x00e6, lo: 0x91, hi: 0x91},
{value: 0x00dc, lo: 0x92, hi: 0x92},
{value: 0x00e6, lo: 0x93, hi: 0x94},
- {value: 0x3300, lo: 0x98, hi: 0x9f},
+ {value: 0x5500, lo: 0x98, hi: 0x9f},
// Block 0x16, offset 0x17
{value: 0x0000, lo: 0x02},
{value: 0x0007, lo: 0xbc, hi: 0xbc},
{value: 0x1100, lo: 0x8b, hi: 0x8c},
{value: 0x0009, lo: 0x8d, hi: 0x8d},
{value: 0x6600, lo: 0x97, hi: 0x97},
- {value: 0x3300, lo: 0x9c, hi: 0x9d},
- {value: 0x3300, lo: 0x9f, hi: 0x9f},
+ {value: 0x5500, lo: 0x9c, hi: 0x9d},
+ {value: 0x5500, lo: 0x9f, hi: 0x9f},
// Block 0x18, offset 0x19
{value: 0x0000, lo: 0x03},
- {value: 0x3300, lo: 0xb3, hi: 0xb3},
- {value: 0x3300, lo: 0xb6, hi: 0xb6},
+ {value: 0x5500, lo: 0xb3, hi: 0xb3},
+ {value: 0x5500, lo: 0xb6, hi: 0xb6},
{value: 0x0007, lo: 0xbc, hi: 0xbc},
// Block 0x19, offset 0x1a
{value: 0x0000, lo: 0x03},
{value: 0x0009, lo: 0x8d, hi: 0x8d},
- {value: 0x3300, lo: 0x99, hi: 0x9b},
- {value: 0x3300, lo: 0x9e, hi: 0x9e},
+ {value: 0x5500, lo: 0x99, hi: 0x9b},
+ {value: 0x5500, lo: 0x9e, hi: 0x9e},
// Block 0x1a, offset 0x1b
{value: 0x0000, lo: 0x01},
{value: 0x0007, lo: 0xbc, hi: 0xbc},
{value: 0x1100, lo: 0x8b, hi: 0x8c},
{value: 0x0009, lo: 0x8d, hi: 0x8d},
{value: 0x6600, lo: 0x96, hi: 0x97},
- {value: 0x3300, lo: 0x9c, hi: 0x9d},
+ {value: 0x5500, lo: 0x9c, hi: 0x9d},
// Block 0x1d, offset 0x1e
{value: 0x0000, lo: 0x03},
{value: 0x8800, lo: 0x92, hi: 0x92},
{value: 0x6600, lo: 0x9f, hi: 0x9f},
// Block 0x24, offset 0x25
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0xb3, hi: 0xb3},
+ {value: 0x5000, lo: 0xb3, hi: 0xb3},
{value: 0x0067, lo: 0xb8, hi: 0xb9},
{value: 0x0009, lo: 0xba, hi: 0xba},
// Block 0x25, offset 0x26
{value: 0x006b, lo: 0x88, hi: 0x8b},
// Block 0x26, offset 0x27
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0xb3, hi: 0xb3},
+ {value: 0x5000, lo: 0xb3, hi: 0xb3},
{value: 0x0076, lo: 0xb8, hi: 0xb9},
// Block 0x27, offset 0x28
{value: 0x0000, lo: 0x02},
{value: 0x007a, lo: 0x88, hi: 0x8b},
- {value: 0x3000, lo: 0x9c, hi: 0x9d},
+ {value: 0x5000, lo: 0x9c, hi: 0x9d},
// Block 0x28, offset 0x29
{value: 0x0000, lo: 0x05},
- {value: 0x3000, lo: 0x8c, hi: 0x8c},
+ {value: 0x5000, lo: 0x8c, hi: 0x8c},
{value: 0x00dc, lo: 0x98, hi: 0x99},
{value: 0x00dc, lo: 0xb5, hi: 0xb5},
{value: 0x00dc, lo: 0xb7, hi: 0xb7},
{value: 0x00d8, lo: 0xb9, hi: 0xb9},
// Block 0x29, offset 0x2a
{value: 0x0000, lo: 0x0f},
- {value: 0x3300, lo: 0x83, hi: 0x83},
- {value: 0x3300, lo: 0x8d, hi: 0x8d},
- {value: 0x3300, lo: 0x92, hi: 0x92},
- {value: 0x3300, lo: 0x97, hi: 0x97},
- {value: 0x3300, lo: 0x9c, hi: 0x9c},
- {value: 0x3300, lo: 0xa9, hi: 0xa9},
+ {value: 0x5500, lo: 0x83, hi: 0x83},
+ {value: 0x5500, lo: 0x8d, hi: 0x8d},
+ {value: 0x5500, lo: 0x92, hi: 0x92},
+ {value: 0x5500, lo: 0x97, hi: 0x97},
+ {value: 0x5500, lo: 0x9c, hi: 0x9c},
+ {value: 0x5500, lo: 0xa9, hi: 0xa9},
{value: 0x0081, lo: 0xb1, hi: 0xb1},
{value: 0x0082, lo: 0xb2, hi: 0xb2},
- {value: 0x3300, lo: 0xb3, hi: 0xb3},
+ {value: 0x5500, lo: 0xb3, hi: 0xb3},
{value: 0x0084, lo: 0xb4, hi: 0xb4},
- {value: 0x3300, lo: 0xb5, hi: 0xb6},
- {value: 0x3000, lo: 0xb7, hi: 0xb7},
- {value: 0x3300, lo: 0xb8, hi: 0xb8},
- {value: 0x3000, lo: 0xb9, hi: 0xb9},
+ {value: 0x5500, lo: 0xb5, hi: 0xb6},
+ {value: 0x5000, lo: 0xb7, hi: 0xb7},
+ {value: 0x5500, lo: 0xb8, hi: 0xb8},
+ {value: 0x5000, lo: 0xb9, hi: 0xb9},
{value: 0x0082, lo: 0xba, hi: 0xbd},
// Block 0x2a, offset 0x2b
{value: 0x0000, lo: 0x0b},
{value: 0x0082, lo: 0x80, hi: 0x80},
- {value: 0x3300, lo: 0x81, hi: 0x81},
+ {value: 0x5500, lo: 0x81, hi: 0x81},
{value: 0x00e6, lo: 0x82, hi: 0x83},
{value: 0x0009, lo: 0x84, hi: 0x84},
{value: 0x00e6, lo: 0x86, hi: 0x87},
- {value: 0x3300, lo: 0x93, hi: 0x93},
- {value: 0x3300, lo: 0x9d, hi: 0x9d},
- {value: 0x3300, lo: 0xa2, hi: 0xa2},
- {value: 0x3300, lo: 0xa7, hi: 0xa7},
- {value: 0x3300, lo: 0xac, hi: 0xac},
- {value: 0x3300, lo: 0xb9, hi: 0xb9},
+ {value: 0x5500, lo: 0x93, hi: 0x93},
+ {value: 0x5500, lo: 0x9d, hi: 0x9d},
+ {value: 0x5500, lo: 0xa2, hi: 0xa2},
+ {value: 0x5500, lo: 0xa7, hi: 0xa7},
+ {value: 0x5500, lo: 0xac, hi: 0xac},
+ {value: 0x5500, lo: 0xb9, hi: 0xb9},
// Block 0x2b, offset 0x2c
{value: 0x0000, lo: 0x01},
{value: 0x00dc, lo: 0x86, hi: 0x86},
{value: 0x00dc, lo: 0x8d, hi: 0x8d},
// Block 0x2e, offset 0x2f
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xbc, hi: 0xbc},
+ {value: 0x5000, lo: 0xbc, hi: 0xbc},
// Block 0x2f, offset 0x30
{value: 0x0000, lo: 0x01},
{value: 0x8800, lo: 0x80, hi: 0x92},
{value: 0x00dc, lo: 0xad, hi: 0xad},
// Block 0x40, offset 0x41
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0xac, hi: 0xae},
- {value: 0x3000, lo: 0xb0, hi: 0xba},
- {value: 0x3000, lo: 0xbc, hi: 0xbf},
+ {value: 0x5000, lo: 0xac, hi: 0xae},
+ {value: 0x5000, lo: 0xb0, hi: 0xba},
+ {value: 0x5000, lo: 0xbc, hi: 0xbf},
// Block 0x41, offset 0x42
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x80, hi: 0x8d},
- {value: 0x3000, lo: 0x8f, hi: 0xaa},
- {value: 0x3000, lo: 0xb8, hi: 0xb8},
+ {value: 0x5000, lo: 0x80, hi: 0x8d},
+ {value: 0x5000, lo: 0x8f, hi: 0xaa},
+ {value: 0x5000, lo: 0xb8, hi: 0xb8},
// Block 0x42, offset 0x43
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x9b, hi: 0xbf},
+ {value: 0x5000, lo: 0x9b, hi: 0xbf},
// Block 0x43, offset 0x44
{value: 0x0000, lo: 0x0e},
{value: 0x00e6, lo: 0x80, hi: 0x81},
// Block 0x46, offset 0x47
{value: 0x0000, lo: 0x07},
{value: 0x1100, lo: 0x80, hi: 0x99},
- {value: 0x3000, lo: 0x9a, hi: 0x9a},
- {value: 0x3100, lo: 0x9b, hi: 0x9b},
+ {value: 0x5000, lo: 0x9a, hi: 0x9a},
+ {value: 0x5100, lo: 0x9b, hi: 0x9b},
{value: 0x9900, lo: 0xa0, hi: 0xa1},
{value: 0x1100, lo: 0xa2, hi: 0xb7},
{value: 0x9900, lo: 0xb8, hi: 0xb9},
{value: 0x1100, lo: 0x80, hi: 0xb4},
{value: 0x9900, lo: 0xb6, hi: 0xb6},
{value: 0x1100, lo: 0xb7, hi: 0xba},
- {value: 0x3300, lo: 0xbb, hi: 0xbb},
+ {value: 0x5500, lo: 0xbb, hi: 0xbb},
{value: 0x1100, lo: 0xbc, hi: 0xbc},
- {value: 0x3000, lo: 0xbd, hi: 0xbd},
- {value: 0x3300, lo: 0xbe, hi: 0xbe},
- {value: 0x3800, lo: 0xbf, hi: 0xbf},
+ {value: 0x5000, lo: 0xbd, hi: 0xbd},
+ {value: 0x5500, lo: 0xbe, hi: 0xbe},
+ {value: 0x5800, lo: 0xbf, hi: 0xbf},
// Block 0x4a, offset 0x4b
{value: 0x0000, lo: 0x0a},
- {value: 0x3300, lo: 0x80, hi: 0x81},
- {value: 0x3000, lo: 0x82, hi: 0x8a},
- {value: 0x3000, lo: 0x91, hi: 0x91},
- {value: 0x3000, lo: 0x97, hi: 0x97},
- {value: 0x3000, lo: 0xa4, hi: 0xa6},
- {value: 0x3000, lo: 0xaf, hi: 0xaf},
- {value: 0x3000, lo: 0xb3, hi: 0xb4},
- {value: 0x3000, lo: 0xb6, hi: 0xb7},
- {value: 0x3000, lo: 0xbc, hi: 0xbc},
- {value: 0x3000, lo: 0xbe, hi: 0xbe},
+ {value: 0x5500, lo: 0x80, hi: 0x81},
+ {value: 0x5000, lo: 0x82, hi: 0x8a},
+ {value: 0x5000, lo: 0x91, hi: 0x91},
+ {value: 0x5000, lo: 0x97, hi: 0x97},
+ {value: 0x5000, lo: 0xa4, hi: 0xa6},
+ {value: 0x5000, lo: 0xaf, hi: 0xaf},
+ {value: 0x5000, lo: 0xb3, hi: 0xb4},
+ {value: 0x5000, lo: 0xb6, hi: 0xb7},
+ {value: 0x5000, lo: 0xbc, hi: 0xbc},
+ {value: 0x5000, lo: 0xbe, hi: 0xbe},
// Block 0x4b, offset 0x4c
{value: 0x0000, lo: 0x05},
- {value: 0x3000, lo: 0x87, hi: 0x89},
- {value: 0x3000, lo: 0x97, hi: 0x97},
- {value: 0x3000, lo: 0x9f, hi: 0x9f},
- {value: 0x3000, lo: 0xb0, hi: 0xb1},
- {value: 0x3000, lo: 0xb4, hi: 0xbf},
+ {value: 0x5000, lo: 0x87, hi: 0x89},
+ {value: 0x5000, lo: 0x97, hi: 0x97},
+ {value: 0x5000, lo: 0x9f, hi: 0x9f},
+ {value: 0x5000, lo: 0xb0, hi: 0xb1},
+ {value: 0x5000, lo: 0xb4, hi: 0xbf},
// Block 0x4c, offset 0x4d
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x80, hi: 0x8e},
- {value: 0x3000, lo: 0x90, hi: 0x9c},
- {value: 0x3000, lo: 0xa8, hi: 0xa8},
+ {value: 0x5000, lo: 0x80, hi: 0x8e},
+ {value: 0x5000, lo: 0x90, hi: 0x9c},
+ {value: 0x5000, lo: 0xa8, hi: 0xa8},
// Block 0x4d, offset 0x4e
{value: 0x0000, lo: 0x0d},
{value: 0x00e6, lo: 0x90, hi: 0x91},
{value: 0x00e6, lo: 0xb0, hi: 0xb0},
// Block 0x4e, offset 0x4f
{value: 0x0000, lo: 0x0e},
- {value: 0x3000, lo: 0x80, hi: 0x83},
- {value: 0x3000, lo: 0x85, hi: 0x87},
- {value: 0x3000, lo: 0x89, hi: 0x93},
- {value: 0x3000, lo: 0x95, hi: 0x96},
- {value: 0x3000, lo: 0x99, hi: 0x9d},
- {value: 0x3000, lo: 0xa0, hi: 0xa2},
- {value: 0x3000, lo: 0xa4, hi: 0xa4},
- {value: 0x3300, lo: 0xa6, hi: 0xa6},
- {value: 0x3000, lo: 0xa8, hi: 0xa8},
- {value: 0x3300, lo: 0xaa, hi: 0xab},
- {value: 0x3000, lo: 0xac, hi: 0xad},
- {value: 0x3000, lo: 0xaf, hi: 0xb1},
- {value: 0x3000, lo: 0xb3, hi: 0xb9},
- {value: 0x3000, lo: 0xbb, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x83},
+ {value: 0x5000, lo: 0x85, hi: 0x87},
+ {value: 0x5000, lo: 0x89, hi: 0x93},
+ {value: 0x5000, lo: 0x95, hi: 0x96},
+ {value: 0x5000, lo: 0x99, hi: 0x9d},
+ {value: 0x5000, lo: 0xa0, hi: 0xa2},
+ {value: 0x5000, lo: 0xa4, hi: 0xa4},
+ {value: 0x5500, lo: 0xa6, hi: 0xa6},
+ {value: 0x5000, lo: 0xa8, hi: 0xa8},
+ {value: 0x5500, lo: 0xaa, hi: 0xab},
+ {value: 0x5000, lo: 0xac, hi: 0xad},
+ {value: 0x5000, lo: 0xaf, hi: 0xb1},
+ {value: 0x5000, lo: 0xb3, hi: 0xb9},
+ {value: 0x5000, lo: 0xbb, hi: 0xbf},
// Block 0x4f, offset 0x50
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x80, hi: 0x80},
- {value: 0x3000, lo: 0x85, hi: 0x89},
- {value: 0x3000, lo: 0x90, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x80},
+ {value: 0x5000, lo: 0x85, hi: 0x89},
+ {value: 0x5000, lo: 0x90, hi: 0xbf},
// Block 0x50, offset 0x51
{value: 0x0000, lo: 0x06},
- {value: 0x3000, lo: 0x89, hi: 0x89},
+ {value: 0x5000, lo: 0x89, hi: 0x89},
{value: 0x8800, lo: 0x90, hi: 0x90},
{value: 0x8800, lo: 0x92, hi: 0x92},
{value: 0x8800, lo: 0x94, hi: 0x94},
{value: 0x1100, lo: 0xa4, hi: 0xa4},
{value: 0x8800, lo: 0xa5, hi: 0xa5},
{value: 0x1100, lo: 0xa6, hi: 0xa6},
- {value: 0x3000, lo: 0xac, hi: 0xad},
- {value: 0x3000, lo: 0xaf, hi: 0xb0},
+ {value: 0x5000, lo: 0xac, hi: 0xad},
+ {value: 0x5000, lo: 0xaf, hi: 0xb0},
{value: 0x8800, lo: 0xbc, hi: 0xbc},
// Block 0x53, offset 0x54
{value: 0x0000, lo: 0x0b},
{value: 0x1100, lo: 0xaa, hi: 0xad},
// Block 0x55, offset 0x56
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0xa9, hi: 0xaa},
+ {value: 0x5500, lo: 0xa9, hi: 0xaa},
// Block 0x56, offset 0x57
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xa0, hi: 0xbf},
+ {value: 0x5000, lo: 0xa0, hi: 0xbf},
// Block 0x57, offset 0x58
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0xbf},
// Block 0x58, offset 0x59
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xaa},
+ {value: 0x5000, lo: 0x80, hi: 0xaa},
// Block 0x59, offset 0x5a
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x8c, hi: 0x8c},
+ {value: 0x5000, lo: 0x8c, hi: 0x8c},
// Block 0x5a, offset 0x5b
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xb4, hi: 0xb6},
+ {value: 0x5000, lo: 0xb4, hi: 0xb6},
// Block 0x5b, offset 0x5c
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x9c, hi: 0x9c},
+ {value: 0x5500, lo: 0x9c, hi: 0x9c},
// Block 0x5c, offset 0x5d
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xbc, hi: 0xbd},
+ {value: 0x5000, lo: 0xbc, hi: 0xbd},
// Block 0x5d, offset 0x5e
{value: 0x0000, lo: 0x01},
{value: 0x00e6, lo: 0xaf, hi: 0xb1},
// Block 0x5e, offset 0x5f
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0xaf, hi: 0xaf},
+ {value: 0x5000, lo: 0xaf, hi: 0xaf},
{value: 0x0009, lo: 0xbf, hi: 0xbf},
// Block 0x5f, offset 0x60
{value: 0x0000, lo: 0x01},
{value: 0x00e6, lo: 0xa0, hi: 0xbf},
// Block 0x60, offset 0x61
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x9f, hi: 0x9f},
+ {value: 0x5000, lo: 0x9f, hi: 0x9f},
// Block 0x61, offset 0x62
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xb3, hi: 0xb3},
+ {value: 0x5000, lo: 0xb3, hi: 0xb3},
// Block 0x62, offset 0x63
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0x95},
+ {value: 0x5000, lo: 0x80, hi: 0x95},
// Block 0x63, offset 0x64
{value: 0x0000, lo: 0x08},
- {value: 0x3000, lo: 0x80, hi: 0x80},
+ {value: 0x5000, lo: 0x80, hi: 0x80},
{value: 0x00da, lo: 0xaa, hi: 0xaa},
{value: 0x00e4, lo: 0xab, hi: 0xab},
{value: 0x00e8, lo: 0xac, hi: 0xac},
{value: 0x00de, lo: 0xad, hi: 0xad},
{value: 0x00e0, lo: 0xae, hi: 0xaf},
- {value: 0x3000, lo: 0xb6, hi: 0xb6},
- {value: 0x3000, lo: 0xb8, hi: 0xba},
+ {value: 0x5000, lo: 0xb6, hi: 0xb6},
+ {value: 0x5000, lo: 0xb8, hi: 0xba},
// Block 0x64, offset 0x65
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xb1, hi: 0xbf},
+ {value: 0x5000, lo: 0xb1, hi: 0xbf},
// Block 0x65, offset 0x66
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x8e},
- {value: 0x3000, lo: 0x92, hi: 0x9f},
+ {value: 0x5000, lo: 0x80, hi: 0x8e},
+ {value: 0x5000, lo: 0x92, hi: 0x9f},
// Block 0x66, offset 0x67
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x9e},
- {value: 0x3000, lo: 0xa0, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x9e},
+ {value: 0x5000, lo: 0xa0, hi: 0xbf},
// Block 0x67, offset 0x68
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x87},
- {value: 0x3000, lo: 0x90, hi: 0xbe},
+ {value: 0x5000, lo: 0x80, hi: 0x87},
+ {value: 0x5000, lo: 0x90, hi: 0xbe},
// Block 0x68, offset 0x69
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xbe},
+ {value: 0x5000, lo: 0x80, hi: 0xbe},
// Block 0x69, offset 0x6a
{value: 0x0000, lo: 0x02},
{value: 0x00e6, lo: 0xaf, hi: 0xaf},
{value: 0x00e6, lo: 0xb0, hi: 0xb1},
// Block 0x6b, offset 0x6c
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xb0, hi: 0xb0},
+ {value: 0x5000, lo: 0xb0, hi: 0xb0},
// Block 0x6c, offset 0x6d
{value: 0x0000, lo: 0x01},
{value: 0x0009, lo: 0x86, hi: 0x86},
{value: 0x1100, lo: 0x80, hi: 0xa3},
// Block 0x77, offset 0x78
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x80, hi: 0xbf},
+ {value: 0x5500, lo: 0x80, hi: 0xbf},
// Block 0x78, offset 0x79
{value: 0x0000, lo: 0x09},
- {value: 0x3300, lo: 0x80, hi: 0x8d},
- {value: 0x3300, lo: 0x90, hi: 0x90},
- {value: 0x3300, lo: 0x92, hi: 0x92},
- {value: 0x3300, lo: 0x95, hi: 0x9e},
- {value: 0x3300, lo: 0xa0, hi: 0xa0},
- {value: 0x3300, lo: 0xa2, hi: 0xa2},
- {value: 0x3300, lo: 0xa5, hi: 0xa6},
- {value: 0x3300, lo: 0xaa, hi: 0xad},
- {value: 0x3300, lo: 0xb0, hi: 0xbf},
+ {value: 0x5500, lo: 0x80, hi: 0x8d},
+ {value: 0x5500, lo: 0x90, hi: 0x90},
+ {value: 0x5500, lo: 0x92, hi: 0x92},
+ {value: 0x5500, lo: 0x95, hi: 0x9e},
+ {value: 0x5500, lo: 0xa0, hi: 0xa0},
+ {value: 0x5500, lo: 0xa2, hi: 0xa2},
+ {value: 0x5500, lo: 0xa5, hi: 0xa6},
+ {value: 0x5500, lo: 0xaa, hi: 0xad},
+ {value: 0x5500, lo: 0xb0, hi: 0xbf},
// Block 0x79, offset 0x7a
{value: 0x0000, lo: 0x02},
- {value: 0x3300, lo: 0x80, hi: 0xad},
- {value: 0x3300, lo: 0xb0, hi: 0xbf},
+ {value: 0x5500, lo: 0x80, hi: 0xad},
+ {value: 0x5500, lo: 0xb0, hi: 0xbf},
// Block 0x7a, offset 0x7b
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x80, hi: 0x99},
+ {value: 0x5500, lo: 0x80, hi: 0x99},
// Block 0x7b, offset 0x7c
{value: 0x0000, lo: 0x09},
- {value: 0x3000, lo: 0x80, hi: 0x86},
- {value: 0x3000, lo: 0x93, hi: 0x97},
- {value: 0x3300, lo: 0x9d, hi: 0x9d},
+ {value: 0x5000, lo: 0x80, hi: 0x86},
+ {value: 0x5000, lo: 0x93, hi: 0x97},
+ {value: 0x5500, lo: 0x9d, hi: 0x9d},
{value: 0x001a, lo: 0x9e, hi: 0x9e},
- {value: 0x3300, lo: 0x9f, hi: 0x9f},
- {value: 0x3000, lo: 0xa0, hi: 0xa9},
- {value: 0x3300, lo: 0xaa, hi: 0xb6},
- {value: 0x3300, lo: 0xb8, hi: 0xbc},
- {value: 0x3300, lo: 0xbe, hi: 0xbe},
+ {value: 0x5500, lo: 0x9f, hi: 0x9f},
+ {value: 0x5000, lo: 0xa0, hi: 0xa9},
+ {value: 0x5500, lo: 0xaa, hi: 0xb6},
+ {value: 0x5500, lo: 0xb8, hi: 0xbc},
+ {value: 0x5500, lo: 0xbe, hi: 0xbe},
// Block 0x7c, offset 0x7d
{value: 0x0000, lo: 0x04},
- {value: 0x3300, lo: 0x80, hi: 0x81},
- {value: 0x3300, lo: 0x83, hi: 0x84},
- {value: 0x3300, lo: 0x86, hi: 0x8e},
- {value: 0x3000, lo: 0x8f, hi: 0xbf},
+ {value: 0x5500, lo: 0x80, hi: 0x81},
+ {value: 0x5500, lo: 0x83, hi: 0x84},
+ {value: 0x5500, lo: 0x86, hi: 0x8e},
+ {value: 0x5000, lo: 0x8f, hi: 0xbf},
// Block 0x7d, offset 0x7e
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xb1},
+ {value: 0x5000, lo: 0x80, hi: 0xb1},
// Block 0x7e, offset 0x7f
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x93, hi: 0xbf},
+ {value: 0x5000, lo: 0x93, hi: 0xbf},
// Block 0x7f, offset 0x80
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xbd},
+ {value: 0x5000, lo: 0x80, hi: 0xbd},
// Block 0x80, offset 0x81
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x90, hi: 0xbf},
+ {value: 0x5000, lo: 0x90, hi: 0xbf},
// Block 0x81, offset 0x82
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x8f},
- {value: 0x3000, lo: 0x92, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x8f},
+ {value: 0x5000, lo: 0x92, hi: 0xbf},
// Block 0x82, offset 0x83
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x87},
- {value: 0x3000, lo: 0xb0, hi: 0xbc},
+ {value: 0x5000, lo: 0x80, hi: 0x87},
+ {value: 0x5000, lo: 0xb0, hi: 0xbc},
// Block 0x83, offset 0x84
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x90, hi: 0x99},
+ {value: 0x5000, lo: 0x90, hi: 0x99},
{value: 0x00e6, lo: 0xa0, hi: 0xa6},
- {value: 0x3000, lo: 0xb0, hi: 0xbf},
+ {value: 0x5000, lo: 0xb0, hi: 0xbf},
// Block 0x84, offset 0x85
{value: 0x0000, lo: 0x07},
- {value: 0x3000, lo: 0x80, hi: 0x84},
- {value: 0x3000, lo: 0x87, hi: 0x92},
- {value: 0x3000, lo: 0x94, hi: 0xa6},
- {value: 0x3000, lo: 0xa8, hi: 0xab},
- {value: 0x3000, lo: 0xb0, hi: 0xb2},
- {value: 0x3000, lo: 0xb4, hi: 0xb4},
- {value: 0x3000, lo: 0xb6, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x84},
+ {value: 0x5000, lo: 0x87, hi: 0x92},
+ {value: 0x5000, lo: 0x94, hi: 0xa6},
+ {value: 0x5000, lo: 0xa8, hi: 0xab},
+ {value: 0x5000, lo: 0xb0, hi: 0xb2},
+ {value: 0x5000, lo: 0xb4, hi: 0xb4},
+ {value: 0x5000, lo: 0xb6, hi: 0xbf},
// Block 0x85, offset 0x86
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xbc},
+ {value: 0x5000, lo: 0x80, hi: 0xbc},
// Block 0x86, offset 0x87
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x81, hi: 0xbf},
+ {value: 0x5000, lo: 0x81, hi: 0xbf},
// Block 0x87, offset 0x88
{value: 0x0000, lo: 0x06},
- {value: 0x3000, lo: 0x82, hi: 0x87},
- {value: 0x3000, lo: 0x8a, hi: 0x8f},
- {value: 0x3000, lo: 0x92, hi: 0x97},
- {value: 0x3000, lo: 0x9a, hi: 0x9c},
- {value: 0x3000, lo: 0xa0, hi: 0xa6},
- {value: 0x3000, lo: 0xa8, hi: 0xae},
+ {value: 0x5000, lo: 0x82, hi: 0x87},
+ {value: 0x5000, lo: 0x8a, hi: 0x8f},
+ {value: 0x5000, lo: 0x92, hi: 0x97},
+ {value: 0x5000, lo: 0x9a, hi: 0x9c},
+ {value: 0x5000, lo: 0xa0, hi: 0xa6},
+ {value: 0x5000, lo: 0xa8, hi: 0xae},
// Block 0x88, offset 0x89
{value: 0x0000, lo: 0x01},
{value: 0x00dc, lo: 0xbd, hi: 0xbd},
{value: 0x0009, lo: 0xb9, hi: 0xba},
// Block 0x8b, offset 0x8c
{value: 0x0000, lo: 0x06},
- {value: 0x3300, lo: 0x9e, hi: 0xa4},
+ {value: 0x5500, lo: 0x9e, hi: 0xa4},
{value: 0x00d8, lo: 0xa5, hi: 0xa6},
{value: 0x0001, lo: 0xa7, hi: 0xa9},
{value: 0x00e2, lo: 0xad, hi: 0xad},
{value: 0x00e6, lo: 0x85, hi: 0x89},
{value: 0x00dc, lo: 0x8a, hi: 0x8b},
{value: 0x00e6, lo: 0xaa, hi: 0xad},
- {value: 0x3300, lo: 0xbb, hi: 0xbf},
+ {value: 0x5500, lo: 0xbb, hi: 0xbf},
// Block 0x8d, offset 0x8e
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x80, hi: 0x80},
+ {value: 0x5500, lo: 0x80, hi: 0x80},
// Block 0x8e, offset 0x8f
{value: 0x0000, lo: 0x01},
{value: 0x00e6, lo: 0x82, hi: 0x84},
// Block 0x8f, offset 0x90
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x94},
- {value: 0x3000, lo: 0x96, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x94},
+ {value: 0x5000, lo: 0x96, hi: 0xbf},
// Block 0x90, offset 0x91
{value: 0x0000, lo: 0x08},
- {value: 0x3000, lo: 0x80, hi: 0x9c},
- {value: 0x3000, lo: 0x9e, hi: 0x9f},
- {value: 0x3000, lo: 0xa2, hi: 0xa2},
- {value: 0x3000, lo: 0xa5, hi: 0xa6},
- {value: 0x3000, lo: 0xa9, hi: 0xac},
- {value: 0x3000, lo: 0xae, hi: 0xb9},
- {value: 0x3000, lo: 0xbb, hi: 0xbb},
- {value: 0x3000, lo: 0xbd, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x9c},
+ {value: 0x5000, lo: 0x9e, hi: 0x9f},
+ {value: 0x5000, lo: 0xa2, hi: 0xa2},
+ {value: 0x5000, lo: 0xa5, hi: 0xa6},
+ {value: 0x5000, lo: 0xa9, hi: 0xac},
+ {value: 0x5000, lo: 0xae, hi: 0xb9},
+ {value: 0x5000, lo: 0xbb, hi: 0xbb},
+ {value: 0x5000, lo: 0xbd, hi: 0xbf},
// Block 0x91, offset 0x92
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x83},
- {value: 0x3000, lo: 0x85, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x83},
+ {value: 0x5000, lo: 0x85, hi: 0xbf},
// Block 0x92, offset 0x93
{value: 0x0000, lo: 0x06},
- {value: 0x3000, lo: 0x80, hi: 0x85},
- {value: 0x3000, lo: 0x87, hi: 0x8a},
- {value: 0x3000, lo: 0x8d, hi: 0x94},
- {value: 0x3000, lo: 0x96, hi: 0x9c},
- {value: 0x3000, lo: 0x9e, hi: 0xb9},
- {value: 0x3000, lo: 0xbb, hi: 0xbe},
+ {value: 0x5000, lo: 0x80, hi: 0x85},
+ {value: 0x5000, lo: 0x87, hi: 0x8a},
+ {value: 0x5000, lo: 0x8d, hi: 0x94},
+ {value: 0x5000, lo: 0x96, hi: 0x9c},
+ {value: 0x5000, lo: 0x9e, hi: 0xb9},
+ {value: 0x5000, lo: 0xbb, hi: 0xbe},
// Block 0x93, offset 0x94
{value: 0x0000, lo: 0x04},
- {value: 0x3000, lo: 0x80, hi: 0x84},
- {value: 0x3000, lo: 0x86, hi: 0x86},
- {value: 0x3000, lo: 0x8a, hi: 0x90},
- {value: 0x3000, lo: 0x92, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x84},
+ {value: 0x5000, lo: 0x86, hi: 0x86},
+ {value: 0x5000, lo: 0x8a, hi: 0x90},
+ {value: 0x5000, lo: 0x92, hi: 0xbf},
// Block 0x94, offset 0x95
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0xa5},
- {value: 0x3000, lo: 0xa8, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0xa5},
+ {value: 0x5000, lo: 0xa8, hi: 0xbf},
// Block 0x95, offset 0x96
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x8b},
- {value: 0x3000, lo: 0x8e, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x8b},
+ {value: 0x5000, lo: 0x8e, hi: 0xbf},
// Block 0x96, offset 0x97
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x80, hi: 0x8a},
- {value: 0x3000, lo: 0x90, hi: 0xae},
- {value: 0x3000, lo: 0xb0, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x8a},
+ {value: 0x5000, lo: 0x90, hi: 0xae},
+ {value: 0x5000, lo: 0xb0, hi: 0xbf},
// Block 0x97, offset 0x98
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0x8f},
+ {value: 0x5000, lo: 0x80, hi: 0x8f},
// Block 0x98, offset 0x99
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x90, hi: 0x90},
+ {value: 0x5000, lo: 0x90, hi: 0x90},
// Block 0x99, offset 0x9a
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x82},
- {value: 0x3000, lo: 0x90, hi: 0xba},
+ {value: 0x5000, lo: 0x80, hi: 0x82},
+ {value: 0x5000, lo: 0x90, hi: 0xba},
// Block 0x9a, offset 0x9b
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x88},
- {value: 0x3000, lo: 0x90, hi: 0x91},
+ {value: 0x5000, lo: 0x80, hi: 0x88},
+ {value: 0x5000, lo: 0x90, hi: 0x91},
// Block 0x9b, offset 0x9c
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x80, hi: 0x9d},
+ {value: 0x5500, lo: 0x80, hi: 0x9d},
}
// charInfoLookup: 1152 bytes
"testing"
)
+const sighup = os.UnixSignal(syscall.SIGHUP)
+
func TestSignal(t *testing.T) {
// Send this process a SIGHUP.
syscall.Syscall(syscall.SYS_KILL, uintptr(syscall.Getpid()), syscall.SIGHUP, 0)
- if sig := (<-Incoming).(os.UnixSignal); sig != os.SIGHUP {
- t.Errorf("signal was %v, want %v", sig, os.SIGHUP)
+ if sig := (<-Incoming).(os.UnixSignal); sig != sighup {
+ t.Errorf("signal was %v, want %v", sig, sighup)
}
}
gcPath = gcName
return
}
- gcPath, _ = exec.LookPath(gcName)
+ gcPath = filepath.Join(runtime.GOROOT(), "/bin/tool/", gcName)
}
func compile(t *testing.T, dirname, filename string) {
//
// The package is sometimes only imported for the side effect of
// registering its HTTP handler and the above variables. To use it
-// this way, simply link this package into your program:
+// this way, link this package into your program:
// import _ "expvar"
//
package expvar
// Map is a string-to-Var map variable that satisfies the Var interface.
type Map struct {
m map[string]Var
- mu sync.Mutex
+ mu sync.RWMutex
}
// KeyValue represents a single entry in a Map.
}
func (v *Map) String() string {
- v.mu.Lock()
- defer v.mu.Unlock()
+ v.mu.RLock()
+ defer v.mu.RUnlock()
b := new(bytes.Buffer)
fmt.Fprintf(b, "{")
first := true
}
func (v *Map) Get(key string) Var {
- v.mu.Lock()
- defer v.mu.Unlock()
+ v.mu.RLock()
+ defer v.mu.RUnlock()
return v.m[key]
}
}
func (v *Map) Add(key string, delta int64) {
- v.mu.Lock()
- defer v.mu.Unlock()
+ v.mu.RLock()
av, ok := v.m[key]
+ v.mu.RUnlock()
if !ok {
- av = new(Int)
- v.m[key] = av
+ // check again under the write lock
+ v.mu.Lock()
+ if _, ok = v.m[key]; !ok {
+ av = new(Int)
+ v.m[key] = av
+ }
+ v.mu.Unlock()
}
// Add to Int; ignore otherwise.
// AddFloat adds delta to the *Float value stored under the given map key.
func (v *Map) AddFloat(key string, delta float64) {
- v.mu.Lock()
- defer v.mu.Unlock()
+ v.mu.RLock()
av, ok := v.m[key]
+ v.mu.RUnlock()
if !ok {
- av = new(Float)
- v.m[key] = av
+ // check again under the write lock
+ v.mu.Lock()
+ if _, ok = v.m[key]; !ok {
+ av = new(Float)
+ v.m[key] = av
+ }
+ v.mu.Unlock()
}
// Add to Float; ignore otherwise.
}
}
-// TODO(rsc): Make sure map access in separate thread is safe.
-func (v *Map) iterate(c chan<- KeyValue) {
+// Do calls f for each entry in the map.
+// The map is locked during the iteration,
+// but existing entries may be concurrently updated.
+func (v *Map) Do(f func(KeyValue)) {
+ v.mu.RLock()
+ defer v.mu.RUnlock()
for k, v := range v.m {
- c <- KeyValue{k, v}
+ f(KeyValue{k, v})
}
- close(c)
-}
-
-func (v *Map) Iter() <-chan KeyValue {
- c := make(chan KeyValue)
- go v.iterate(c)
- return c
}
// String is a string variable, and satisfies the Var interface.
}
// All published variables.
-var vars map[string]Var = make(map[string]Var)
-var mutex sync.Mutex
+var (
+ mutex sync.RWMutex
+ vars map[string]Var = make(map[string]Var)
+)
// Publish declares a named exported variable. This should be called from a
// package's init function when it creates its Vars. If the name is already
// Get retrieves a named exported variable.
func Get(name string) Var {
+ mutex.RLock()
+ defer mutex.RUnlock()
return vars[name]
}
-// RemoveAll removes all exported variables.
-// This is for tests; don't call this on a real server.
-func RemoveAll() {
- mutex.Lock()
- defer mutex.Unlock()
- vars = make(map[string]Var)
-}
-
// Convenience functions for creating new exported variables.
func NewInt(name string) *Int {
return v
}
-// TODO(rsc): Make sure map access in separate thread is safe.
-func iterate(c chan<- KeyValue) {
+// Do calls f for each exported variable.
+// The global variable map is locked during the iteration,
+// but existing entries may be concurrently updated.
+func Do(f func(KeyValue)) {
+ mutex.RLock()
+ defer mutex.RUnlock()
for k, v := range vars {
- c <- KeyValue{k, v}
+ f(KeyValue{k, v})
}
- close(c)
-}
-
-func Iter() <-chan KeyValue {
- c := make(chan KeyValue)
- go iterate(c)
- return c
}
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
- for name, value := range vars {
+ Do(func(kv KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
- fmt.Fprintf(w, "%q: %s", name, value)
- }
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
fmt.Fprintf(w, "\n}\n")
}
}
func memstats() interface{} {
- return runtime.MemStats
+ stats := new(runtime.MemStats)
+ runtime.ReadMemStats(stats)
+ return *stats
}
func init() {
- http.Handle("/debug/vars", http.HandlerFunc(expvarHandler))
+ http.HandleFunc("/debug/vars", expvarHandler)
Publish("cmdline", Func(cmdline))
Publish("memstats", Func(memstats))
}
"testing"
)
+// RemoveAll removes all exported variables.
+// This is for tests only.
+func RemoveAll() {
+ mutex.Lock()
+ defer mutex.Unlock()
+ vars = make(map[string]Var)
+}
+
func TestInt(t *testing.T) {
reqs := NewInt("requests")
if reqs.i != 0 {
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False.
+ Duration flags accept any input valid for time.ParseDuration.
The default set of command-line flags is controlled by
top-level functions. The FlagSet type allows one to define
import (
"errors"
"fmt"
+ "io"
"os"
"sort"
"strconv"
args []string // arguments after flags
exitOnError bool // does the program exit if there's an error?
errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use out() accessor
}
// A Flag represents the state of a flag.
return result
}
+func (f *FlagSet) out() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
// VisitAll visits the flags in lexicographical order, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
return commandLine.Set(name, value)
}
-// PrintDefaults prints to standard error the default values of all defined flags in the set.
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
func (f *FlagSet) PrintDefaults() {
- f.VisitAll(func(f *Flag) {
+ f.VisitAll(func(flag *Flag) {
format := " -%s=%s: %s\n"
- if _, ok := f.Value.(*stringValue); ok {
+ if _, ok := flag.Value.(*stringValue); ok {
// put quotes on the value
format = " -%s=%q: %s\n"
}
- fmt.Fprintf(os.Stderr, format, f.Name, f.DefValue, f.Usage)
+ fmt.Fprintf(f.out(), format, flag.Name, flag.DefValue, flag.Usage)
})
}
// defaultUsage is the default function to print a usage message.
func defaultUsage(f *FlagSet) {
- fmt.Fprintf(os.Stderr, "Usage of %s:\n", f.name)
+ fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
f.PrintDefaults()
}
flag := &Flag{name, usage, value, value.String()}
_, alreadythere := f.formal[name]
if alreadythere {
- fmt.Fprintf(os.Stderr, "%s flag redefined: %s\n", f.name, name)
+ fmt.Fprintf(f.out(), "%s flag redefined: %s\n", f.name, name)
panic("flag redefinition") // Happens only if flags are declared with identical names
}
if f.formal == nil {
// returns the error.
func (f *FlagSet) failf(format string, a ...interface{}) error {
err := fmt.Errorf(format, a...)
- fmt.Fprintln(os.Stderr, err)
+ fmt.Fprintln(f.out(), err)
f.usage()
return err
}
package flag_test
import (
+ "bytes"
. "flag"
"fmt"
"os"
"sort"
+ "strings"
"testing"
"time"
)
}
}
+func TestSetOutput(t *testing.T) {
+ var flags FlagSet
+ var buf bytes.Buffer
+ flags.SetOutput(&buf)
+ flags.Init("test", ContinueOnError)
+ flags.Parse([]string{"-unknown"})
+ if out := buf.String(); !strings.Contains(out, "-unknown") {
+ t.Logf("expected output mentioning unknown; got %q", out)
+ }
+}
+
// This tests that one can reset the flags. This still works but not well, and is
// superseded by FlagSet.
func TestChangingArgs(t *testing.T) {
{"%s", nil, "%!s(<nil>)"},
{"%T", nil, "<nil>"},
{"%-1", 100, "%!(NOVERB)%!(EXTRA int=100)"},
+
+ // The "<nil>" show up because maps are printed by
+ // first obtaining a list of keys and then looking up
+ // each key. Since NaNs can be map keys but cannot
+ // be fetched directly, the lookup fails and returns a
+ // zero reflect.Value, which formats as <nil>.
+ // This test is just to check that it shows the two NaNs at all.
+ {"%v", map[float64]int{math.NaN(): 1, math.NaN(): 2}, "map[NaN:<nil> NaN:<nil>]"},
}
func TestSprintf(t *testing.T) {
func TestCountMallocs(t *testing.T) {
for _, mt := range mallocTest {
const N = 100
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ mallocs := 0 - memstats.Mallocs
for i := 0; i < N; i++ {
mt.fn()
}
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
+ runtime.ReadMemStats(memstats)
+ mallocs += memstats.Mallocs
if mallocs/N > uint64(mt.count) {
t.Errorf("%s: expected %d mallocs, got %d", mt.desc, mt.count, mallocs/N)
}
s.fieldLimit = hugeWid
s.maxWid = hugeWid
s.validSave = true
+ s.count = 0
return
}
t.Errorf("ScanDir(%#q): %v", tt.dir, err)
continue
}
+ // Don't bother testing import positions.
+ tt.info.ImportPos, tt.info.TestImportPos = info.ImportPos, info.TestImportPos
if !reflect.DeepEqual(info, tt.info) {
t.Errorf("ScanDir(%#q) = %#v, want %#v\n", tt.dir, info, tt.info)
continue
}
type DirInfo struct {
- Package string // Name of package in dir
- PackageComment *ast.CommentGroup // Package comments from GoFiles
- ImportPath string // Import path of package in dir
- Imports []string // All packages imported by GoFiles
+ Package string // Name of package in dir
+ PackageComment *ast.CommentGroup // Package comments from GoFiles
+ ImportPath string // Import path of package in dir
+ Imports []string // All packages imported by GoFiles
+ ImportPos map[string][]token.Position // Source code location of imports
// Source files
GoFiles []string // .go files in dir (excluding CgoFiles, TestGoFiles, XTestGoFiles)
CgoLDFLAGS []string // Cgo LDFLAGS directives
// Test information
- TestGoFiles []string // _test.go files in package
- XTestGoFiles []string // _test.go files outside package
- TestImports []string // All packages imported by (X)TestGoFiles
+ TestGoFiles []string // _test.go files in package
+ XTestGoFiles []string // _test.go files outside package
+ TestImports []string // All packages imported by (X)TestGoFiles
+ TestImportPos map[string][]token.Position
}
func (d *DirInfo) IsCommand() bool {
var Sfiles []string // files with ".S" (capital S)
var di DirInfo
- imported := make(map[string]bool)
- testImported := make(map[string]bool)
+ imported := make(map[string][]token.Position)
+ testImported := make(map[string][]token.Position)
fset := token.NewFileSet()
for _, d := range dirs {
if d.IsDir() {
log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
}
if isTest {
- testImported[path] = true
+ testImported[path] = append(testImported[path], fset.Position(spec.Pos()))
} else {
- imported[path] = true
+ imported[path] = append(imported[path], fset.Position(spec.Pos()))
}
if path == "C" {
if isTest {
return nil, fmt.Errorf("%s: no Go source files", dir)
}
di.Imports = make([]string, len(imported))
+ di.ImportPos = imported
i := 0
for p := range imported {
di.Imports[i] = p
i++
}
di.TestImports = make([]string, len(testImported))
+ di.TestImportPos = testImported
i = 0
for p := range testImported {
di.TestImports[i] = p
filePart + `([:.,]` + filePart + `)*`
)
-var matchRx = regexp.MustCompile(`(` + identRx + `)|(` + urlRx + `)`)
+var matchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`)
var (
html_a = []byte(`<a href="`)
if m == nil {
break
}
- // m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is identRx)
+ // m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx)
// write text before match
commentEscape(w, line[0:m[0]], nice)
if words != nil {
url, italics = words[string(match)]
}
- if m[2] < 0 {
- // didn't match against first parenthesized sub-regexp; must be match against urlRx
+ if m[2] >= 0 {
+ // match against first parenthesized sub-regexp; must be match against urlRx
if !italics {
// no alternative URL in words list, use match instead
url = string(match)
if lastWasBlank && !lastWasHeading && i+2 < len(lines) &&
isBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {
- // current line is non-blank, sourounded by blank lines
+ // current line is non-blank, surrounded by blank lines
// and the next non-blank line is not indented: this
// might be a heading.
if head := heading(line); head != "" {
package doc
import (
+ "bytes"
"reflect"
"testing"
)
}
}
}
+
+var emphasizeTests = []struct {
+ in string
+ out string
+}{
+ {"http://www.google.com/", `<a href="http://www.google.com/">http://www.google.com/</a>`},
+ {"https://www.google.com/", `<a href="https://www.google.com/">https://www.google.com/</a>`},
+ {"http://www.google.com/path.", `<a href="http://www.google.com/path">http://www.google.com/path</a>.`},
+ {"(http://www.google.com/)", `(<a href="http://www.google.com/">http://www.google.com/</a>)`},
+ {"Foo bar http://example.com/ quux!", `Foo bar <a href="http://example.com/">http://example.com/</a> quux!`},
+ {"Hello http://example.com/%2f/ /world.", `Hello <a href="http://example.com/%2f/">http://example.com/%2f/</a> /world.`},
+ {"Lorem http: ipsum //host/path", "Lorem http: ipsum //host/path"},
+ {"javascript://is/not/linked", "javascript://is/not/linked"},
+}
+
+func TestEmphasize(t *testing.T) {
+ for i, tt := range emphasizeTests {
+ var buf bytes.Buffer
+ emphasize(&buf, tt.in, nil, true)
+ out := buf.String()
+ if out != tt.out {
+ t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out)
+ }
+ }
+}
// extract documentation for all package-level declarations,
// not just exported ones
AllDecls Mode = 1 << iota
+
+ // show all embedded methods, not just the ones of
+ // invisible (unexported) anonymous fields
+ AllMethods
)
// New computes the package documentation for the given package AST.
Filenames: r.filenames,
Bugs: r.bugs,
Consts: sortedValues(r.values, token.CONST),
- Types: sortedTypes(r.types),
+ Types: sortedTypes(r.types, mode&AllMethods != 0),
Vars: sortedValues(r.values, token.VAR),
- Funcs: sortedFuncs(r.funcs),
+ Funcs: sortedFuncs(r.funcs, true),
}
}
func Test(t *testing.T) {
test(t, 0)
test(t, AllDecls)
+ test(t, AllMethods)
}
continue
}
examples = append(examples, &Example{
- Name: name[len("Example"):],
- Body: &printer.CommentedNode{f.Body, src.Comments},
+ Name: name[len("Example"):],
+ Body: &printer.CommentedNode{
+ Node: f.Body,
+ Comments: src.Comments,
+ },
Output: f.Doc.Text(),
})
}
}
// filterFieldList removes unexported fields (field names) from the field list
-// in place and returns true if fields were removed. Removed fields that are
-// anonymous (embedded) fields are added as embedded types to base. filterType
-// is called with the types of all remaining fields.
+// in place and returns true if fields were removed. Anonymous fields are
+// recorded with the parent type. filterType is called with the types of
+// all remaining fields.
//
-func (r *reader) filterFieldList(base *baseType, fields *ast.FieldList) (removedFields bool) {
+func (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList) (removedFields bool) {
if fields == nil {
return
}
keepField := false
if n := len(field.Names); n == 0 {
// anonymous field
- name, imp := baseTypeName(field.Type)
+ name := r.recordAnonymousField(parent, field.Type)
if ast.IsExported(name) {
- // we keep the field - in this case r.readDecl
- // will take care of adding the embedded type
keepField = true
- } else if base != nil && !imp {
- // we don't keep the field - add it as an embedded
- // type so we won't loose its methods, if any
- if embedded := r.lookupType(name); embedded != nil {
- _, ptr := field.Type.(*ast.StarExpr)
- base.addEmbeddedType(embedded, ptr)
- }
}
} else {
field.Names = filterIdentList(field.Names)
// in place. If fields (or methods) have been removed, the corresponding
// struct or interface type has the Incomplete field set to true.
//
-func (r *reader) filterType(base *baseType, typ ast.Expr) {
+func (r *reader) filterType(parent *namedType, typ ast.Expr) {
switch t := typ.(type) {
case *ast.Ident:
// nothing to do
case *ast.ArrayType:
r.filterType(nil, t.Elt)
case *ast.StructType:
- if r.filterFieldList(base, t.Fields) {
+ if r.filterFieldList(parent, t.Fields) {
t.Incomplete = true
}
case *ast.FuncType:
r.filterParamList(t.Params)
r.filterParamList(t.Results)
case *ast.InterfaceType:
- if r.filterFieldList(base, t.Methods) {
+ if r.filterFieldList(parent, t.Methods) {
t.Incomplete = true
}
case *ast.MapType:
d.Specs = r.filterSpecList(d.Specs)
return len(d.Specs) > 0
case *ast.FuncDecl:
+ // ok to filter these methods early because any
+ // conflicting method will be filtered here, too -
+ // thus, removing these methods early will not lead
+ // to the false removal of possible conflicts
return ast.IsExported(d.Name.Name)
}
return false
}
// ----------------------------------------------------------------------------
-// Base types
+// Named types
// baseTypeName returns the name of the base type of x (or "")
// and whether the type is imported or not.
return
}
-// embeddedType describes the type of an anonymous field.
+// A namedType represents a named unqualified (package local, or possibly
+// predeclared) type. The namedType for a type name is always found via
+// reader.lookupType.
//
-type embeddedType struct {
- typ *baseType // the corresponding base type
- ptr bool // if set, the anonymous field type is a pointer
-}
-
-type baseType struct {
+type namedType struct {
doc string // doc comment for type
- name string // local type name (excluding package qualifier)
+ name string // type name
decl *ast.GenDecl // nil if declaration hasn't been seen yet
+ isEmbedded bool // true if this type is embedded
+ isStruct bool // true if this type is a struct
+ embedded map[*namedType]bool // true if the embedded type is a pointer
+
// associated declarations
values []*Value // consts and vars
funcs methodSet
methods methodSet
-
- isEmbedded bool // true if this type is embedded
- isStruct bool // true if this type is a struct
- embedded []embeddedType // list of embedded types
-}
-
-func (typ *baseType) addEmbeddedType(e *baseType, isPtr bool) {
- e.isEmbedded = true
- typ.embedded = append(typ.embedded, embeddedType{e, isPtr})
}
// ----------------------------------------------------------------------------
// declarations
imports map[string]int
values []*Value // consts and vars
- types map[string]*baseType
+ types map[string]*namedType
funcs methodSet
}
-// isVisible reports whether name is visible in the documentation.
-//
func (r *reader) isVisible(name string) bool {
return r.mode&AllDecls != 0 || ast.IsExported(name)
}
// type with the given name but no associated declaration
// is added to the type map.
//
-func (r *reader) lookupType(name string) *baseType {
+func (r *reader) lookupType(name string) *namedType {
if name == "" || name == "_" {
return nil // no type docs for anonymous types
}
return typ
}
// type not found - add one without declaration
- typ := &baseType{
- name: name,
- funcs: make(methodSet),
- methods: make(methodSet),
+ typ := &namedType{
+ name: name,
+ embedded: make(map[*namedType]bool),
+ funcs: make(methodSet),
+ methods: make(methodSet),
}
r.types[name] = typ
return typ
}
+// recordAnonymousField registers fieldType as the type of an
+// anonymous field in the parent type. If the field is imported
+// (qualified name) or the parent is nil, the field is ignored.
+// The function returns the field name.
+//
+func (r *reader) recordAnonymousField(parent *namedType, fieldType ast.Expr) (fname string) {
+ fname, imp := baseTypeName(fieldType)
+ if parent == nil || imp {
+ return
+ }
+ if ftype := r.lookupType(fname); ftype != nil {
+ ftype.isEmbedded = true
+ _, ptr := fieldType.(*ast.StarExpr)
+ parent.embedded[ftype] = ptr
+ }
+ return
+}
+
func (r *reader) readDoc(comment *ast.CommentGroup) {
// By convention there should be only one package comment
// but collect all of them if there are more then one.
switch {
case s.Type != nil:
// a type is present; determine its name
- if n, imp := baseTypeName(s.Type); !imp && r.isVisible(n) {
+ if n, imp := baseTypeName(s.Type); !imp {
name = n
}
case decl.Tok == token.CONST:
const threshold = 0.75
if domName != "" && domFreq >= int(float64(len(decl.Specs))*threshold) {
// typed entries are sufficiently frequent
- typ := r.lookupType(domName)
- if typ != nil {
+ if typ := r.lookupType(domName); typ != nil {
values = &typ.values // associate with that type
}
}
decl.Doc = nil // doc consumed - remove from AST
typ.doc = doc.Text()
- // look for anonymous fields that might contribute methods
+ // record anonymous fields (they may contribute methods)
+ // (some fields may have been recorded already when filtering
+ // exports, but that's ok)
var list []*ast.Field
list, typ.isStruct = fields(spec.Type)
for _, field := range list {
if len(field.Names) == 0 {
- // anonymous field - add corresponding field type to typ
- n, imp := baseTypeName(field.Type)
- if imp {
- // imported type - we don't handle this case
- // at the moment
- return
- }
- if embedded := r.lookupType(n); embedded != nil {
- _, ptr := field.Type.(*ast.StarExpr)
- typ.addEmbeddedType(embedded, ptr)
- }
+ r.recordAnonymousField(typ, field.Type)
}
}
}
// strip function body
fun.Body = nil
- // determine if it should be associated with a type
+ // associate methods with the receiver type, if any
if fun.Recv != nil {
// method
recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type)
// don't show this method
return
}
- var typ *baseType
- if r.isVisible(recvTypeName) {
- // visible recv type: if not found, add it to r.types
- typ = r.lookupType(recvTypeName)
- } else {
- // invisible recv type: if not found, do not add it
- // (invisible embedded types are added before this
- // phase, so if the type doesn't exist yet, we don't
- // care about this method)
- typ = r.types[recvTypeName]
- }
- if typ != nil {
- // associate method with the type
- // (if the type is not exported, it may be embedded
- // somewhere so we need to collect the method anyway)
+ if typ := r.lookupType(recvTypeName); typ != nil {
typ.methods.set(fun)
}
- // otherwise don't show the method
+ // otherwise ignore the method
// TODO(gri): There may be exported methods of non-exported types
// that can be called because of exported values (consts, vars, or
// function results) of that type. Could determine if that is the
return
}
- // perhaps a factory function
- // determine result type, if any
+ // associate factory functions with the first visible result type, if any
if fun.Type.Results.NumFields() >= 1 {
res := fun.Type.Results.List[0]
if len(res.Names) <= 1 {
// be more than one result)
if n, imp := baseTypeName(res.Type); !imp && r.isVisible(n) {
if typ := r.lookupType(n); typ != nil {
- // associate Func with typ
+ // associate function with typ
typ.funcs.set(fun)
return
}
r.filenames = make([]string, len(pkg.Files))
r.imports = make(map[string]int)
r.mode = mode
- r.types = make(map[string]*baseType)
+ r.types = make(map[string]*namedType)
r.funcs = make(methodSet)
// sort package files before reading them so that the
"byte": true,
"complex64": true,
"complex128": true,
+ "error": true,
"float32": true,
"float64": true,
"int": true,
"int16": true,
"int32": true,
"int64": true,
+ "rune": true,
"string": true,
"uint": true,
"uint8": true,
return &newF
}
-// collectEmbeddedMethods collects the embedded methods from
-// all processed embedded types found in info in mset.
+// collectEmbeddedMethods collects the embedded methods of typ in mset.
//
-func collectEmbeddedMethods(mset methodSet, typ *baseType, recvTypeName string, embeddedIsPtr bool, level int) {
- for _, e := range typ.embedded {
+func (r *reader) collectEmbeddedMethods(mset methodSet, typ *namedType, recvTypeName string, embeddedIsPtr bool, level int) {
+ for embedded, isPtr := range typ.embedded {
// Once an embedded type is embedded as a pointer type
// all embedded types in those types are treated like
// pointer types for the purpose of the receiver type
// computation; i.e., embeddedIsPtr is sticky for this
// embedding hierarchy.
- thisEmbeddedIsPtr := embeddedIsPtr || e.ptr
- for _, m := range e.typ.methods {
+ thisEmbeddedIsPtr := embeddedIsPtr || isPtr
+ for _, m := range embedded.methods {
// only top-level methods are embedded
if m.Level == 0 {
mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level))
}
}
- collectEmbeddedMethods(mset, e.typ, recvTypeName, thisEmbeddedIsPtr, level+1)
+ r.collectEmbeddedMethods(mset, embedded, recvTypeName, thisEmbeddedIsPtr, level+1)
}
}
// collect embedded methods for t
if t.isStruct {
// struct
- collectEmbeddedMethods(t.methods, t, t.name, false, 1)
+ r.collectEmbeddedMethods(t.methods, t, t.name, false, 1)
} else {
// interface
// TODO(gri) fix this
r.values = append(r.values, t.values...)
// 2) move factory functions
for name, f := range t.funcs {
+ // in a correct AST, package-level function names
+ // are all different - no need to check for conflicts
r.funcs[name] = f
}
// 3) move methods
return list
}
-func sortedTypes(m map[string]*baseType) []*Type {
+func sortedTypes(m map[string]*namedType, allMethods bool) []*Type {
list := make([]*Type, len(m))
i := 0
for _, t := range m {
Decl: t.decl,
Consts: sortedValues(t.values, token.CONST),
Vars: sortedValues(t.values, token.VAR),
- Funcs: sortedFuncs(t.funcs),
- Methods: sortedFuncs(t.methods),
+ Funcs: sortedFuncs(t.funcs, true),
+ Methods: sortedFuncs(t.methods, allMethods),
}
i++
}
return list
}
-func sortedFuncs(m methodSet) []*Func {
+func removeStar(s string) string {
+ if len(s) > 0 && s[0] == '*' {
+ return s[1:]
+ }
+ return s
+}
+
+func sortedFuncs(m methodSet, allMethods bool) []*Func {
list := make([]*Func, len(m))
i := 0
for _, m := range m {
- // exclude conflict entries
- if m.Decl != nil {
+ // determine which methods to include
+ switch {
+ case m.Decl == nil:
+ // exclude conflict entry
+ case allMethods, m.Level == 0, !ast.IsExported(removeStar(m.Orig)):
+ // forced inclusion, method not embedded, or method
+ // embedded but original receiver type not exported
list[i] = m
i++
}
--- /dev/null
+// comment 0 comment 1
+PACKAGE a
+
+IMPORTPATH
+ testdata/a
+
+FILENAMES
+ testdata/a0.go
+ testdata/a1.go
+
+BUGS
+ // bug0
+ // bug1
--- /dev/null
+//
+PACKAGE b
+
+IMPORTPATH
+ testdata/b
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/b.go
+
+CONSTANTS
+ //
+ const Pi = 3.14 // Pi
+
+
+VARIABLES
+ //
+ var MaxInt int // MaxInt
+
+
+FUNCTIONS
+ //
+ func F(x int) int
+
+ // Always under the package functions list.
+ func NotAFactory() int
+
+ // Associated with uint type if AllDecls is set.
+ func UintFactory() uint
+
+
+TYPES
+ //
+ type T struct{} // T
+
+ //
+ var V T // v
+
+ //
+ func (x *T) M()
+
--- /dev/null
+//
+PACKAGE c
+
+IMPORTPATH
+ testdata/c
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/c.go
+
+TYPES
+ // A (should see this)
+ type A struct{}
+
+ // B (should see this)
+ type B struct{}
+
+ // C (should see this)
+ type C struct{}
+
+ // D (should see this)
+ type D struct{}
+
+ // E1 (should see this)
+ type E1 struct{}
+
+ // E (should see this for E2 and E3)
+ type E2 struct{}
+
+ // E (should see this for E2 and E3)
+ type E3 struct{}
+
+ // E4 (should see this)
+ type E4 struct{}
+
+ //
+ type T1 struct{}
+
+ //
+ func (t1 *T1) M()
+
+ // T2 must not show methods of local T1
+ type T2 struct {
+ a.T1 // not the same as locally declared T1
+ }
+
--- /dev/null
+//
+PACKAGE d
+
+IMPORTPATH
+ testdata/d
+
+FILENAMES
+ testdata/d1.go
+ testdata/d2.go
+
+CONSTANTS
+ // CBx constants should appear before CAx constants.
+ const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+ )
+
+ // CAx constants should appear after CBx constants.
+ const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+ )
+
+ // C0 should be first.
+ const C0 = 0
+
+ // C1 should be second.
+ const C1 = 1
+
+ // C2 should be third.
+ const C2 = 2
+
+ //
+ const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+ )
+
+
+VARIABLES
+ // VBx variables should appear before VAx variables.
+ var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+ )
+
+ // VAx variables should appear after VBx variables.
+ var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+ )
+
+ // V0 should be first.
+ var V0 uintptr
+
+ // V1 should be second.
+ var V1 uint
+
+ // V2 should be third.
+ var V2 int
+
+ //
+ var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+ )
+
+
+FUNCTIONS
+ // F0 should be first.
+ func F0()
+
+ // F1 should be second.
+ func F1()
+
+ // F2 should be third.
+ func F2()
+
+
+TYPES
+ // T0 should be first.
+ type T0 struct{}
+
+ // T1 should be second.
+ type T1 struct{}
+
+ // T2 should be third.
+ type T2 struct{}
+
+ // TG0 should be first.
+ type TG0 struct{}
+
+ // TG1 should be second.
+ type TG1 struct{}
+
+ // TG2 should be third.
+ type TG2 struct{}
+
-//
+// The package e is a go/doc test for embedded methods.
PACKAGE e
IMPORTPATH
testdata/e.go
TYPES
- // T1 has no (top-level) M method due to conflict.
+ // T1 has no embedded (level 1) M method due to conflict.
type T1 struct {
// contains filtered or unexported fields
}
// T3.M should appear as method of T3.
func (T3) M()
+ //
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
-//
+// The package e is a go/doc test for embedded methods.
PACKAGE e
IMPORTPATH
testdata/e.go
TYPES
- // T1 has no (top-level) M method due to conflict.
+ // T1 has no embedded (level 1) M method due to conflict.
type T1 struct {
t1
t2
func (T3) M()
//
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
+ //
type t1 struct{}
// t1.M should not appear as method in a Tx type.
--- /dev/null
+// The package e is a go/doc test for embedded methods.
+PACKAGE e
+
+IMPORTPATH
+ testdata/e
+
+FILENAMES
+ testdata/e.go
+
+TYPES
+ // T1 has no embedded (level 1) M method due to conflict.
+ type T1 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2 has only M as top-level method.
+ type T2 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2.M should appear as method of T2.
+ func (T2) M()
+
+ // T3 has only M as top-level method.
+ type T3 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T3.M should appear as method of T3.
+ func (T3) M()
+
+ //
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T5) M()
+
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Embedding tests.
-// TODO(gri): This should be comprehensive.
-
+// The package e is a go/doc test for embedded methods.
package e
// ----------------------------------------------------------------------------
// t2.M should not appear as method in a Tx type.
func (t2) M() {}
-// T1 has no (top-level) M method due to conflict.
+// T1 has no embedded (level 1) M method due to conflict.
type T1 struct {
t1
t2
// T3.M should appear as method of T3.
func (T3) M() {}
+
+// ----------------------------------------------------------------------------
+// Don't show conflicting methods M embedded via an exported and non-exported
+// type.
+
+// T1 has no embedded (level 1) M method due to conflict.
+type T4 struct {
+ t2
+ T2
+}
+
+// ----------------------------------------------------------------------------
+// Don't show embedded methods of exported anonymous fields unless AllMethods
+// is set.
+
+type T4 struct{}
+
+// T4.M should appear as method of T5 only if AllMethods is set.
+func (*T4) M() {}
+
+type T5 struct {
+ T4
+}
--- /dev/null
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+FUNCTIONS
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
--- /dev/null
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+TYPES
+ //
+ type private struct{}
+
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
--- /dev/null
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+FUNCTIONS
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The package f is a go/doc test for functions and factory methods.
+package f
+
+// ----------------------------------------------------------------------------
+// Factory functions for non-exported types must not get lost.
+
+type private struct{}
+
+// Exported must always be visible. Was issue 2824.
+func Exported() private {}
--- /dev/null
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+ testdata/testing
+
+IMPORTS
+ bytes
+ flag
+ fmt
+ io
+ os
+ runtime
+ runtime/pprof
+ strconv
+ strings
+ time
+
+FILENAMES
+ testdata/benchmark.go
+ testdata/example.go
+ testdata/testing.go
+
+FUNCTIONS
+ // An internal function but exported because it is cross-package; ...
+ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+ // An internal function but exported because it is cross-package; ...
+ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+ //
+ func RunExamples(examples []InternalExample) (ok bool)
+
+ //
+ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+ // Short reports whether the -test.short flag is set.
+ func Short() bool
+
+
+TYPES
+ // B is a type passed to Benchmark functions to manage benchmark ...
+ type B struct {
+ N int
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *B) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *B) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *B) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *B) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *B) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *B) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *B) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *B) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *B) Logf(format string, args ...interface{})
+
+ // ResetTimer sets the elapsed benchmark time to zero. It does not ...
+ func (b *B) ResetTimer()
+
+ // SetBytes records the number of bytes processed in a single ...
+ func (b *B) SetBytes(n int64)
+
+ // StartTimer starts timing a test. This function is called ...
+ func (b *B) StartTimer()
+
+ // StopTimer stops timing a test. This can be used to pause the ...
+ func (b *B) StopTimer()
+
+ // The results of a benchmark run.
+ type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ }
+
+ // Benchmark benchmarks a single function. Useful for creating ...
+ func Benchmark(f func(b *B)) BenchmarkResult
+
+ //
+ func (r BenchmarkResult) NsPerOp() int64
+
+ //
+ func (r BenchmarkResult) String() string
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+ }
+
+ //
+ type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ }
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalTest struct {
+ Name string
+ F func(*T)
+ }
+
+ // T is a type passed to Test functions to manage test state and ...
+ type T struct {
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *T) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *T) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *T) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *T) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *T) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *T) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *T) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *T) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *T) Logf(format string, args ...interface{})
+
+ // Parallel signals that this test is to be run in parallel with ...
+ func (t *T) Parallel()
+
}
p.comments[0] = g
p.cindex = 0
+ p.nextComment() // get comment ready for use
}
type exprListMode uint
"go/token"
"io"
"os"
- "path/filepath"
"strconv"
"strings"
"text/tabwriter"
fset *token.FileSet
// Current state
- output bytes.Buffer // raw printer result
- indent int // current indentation
- mode pmode // current printer mode
- lastTok token.Token // the last token printed (token.ILLEGAL if it's whitespace)
- wsbuf []whiteSpace // delayed white space
+ output bytes.Buffer // raw printer result
+ indent int // current indentation
+ mode pmode // current printer mode
+ impliedSemi bool // if set, a linebreak implies a semicolon
+ lastTok token.Token // the last token printed (token.ILLEGAL if it's whitespace)
+ wsbuf []whiteSpace // delayed white space
// The (possibly estimated) position in the generated output;
// in AST space (i.e., pos is set whenever a token position is
cindex int // current comment index
useNodeComments bool // if not set, ignore lead and line comments of nodes
+ // Information about p.comments[p.cindex]; set up by nextComment.
+ comment *ast.CommentGroup // = p.comments[p.cindex]; or nil
+ commentOffset int // = p.posFor(p.comments[p.cindex].List[0].Pos()).Offset; or infinity
+ commentNewline bool // true if the comment group contains newlines
+
// Cache of already computed node sizes.
nodeSizes map[ast.Node]int
p.cachedPos = -1
}
+// commentsHaveNewline reports whether a list of comments belonging to
+// an *ast.CommentGroup contains newlines. Because the position information
+// may only be partially correct, we also have to read the comment text.
+func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {
+ // len(list) > 0
+ line := p.lineFor(list[0].Pos())
+ for i, c := range list {
+ if i > 0 && p.lineFor(list[i].Pos()) != line {
+ // not all comments on the same line
+ return true
+ }
+ if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) {
+ return true
+ }
+ }
+ _ = line
+ return false
+}
+
+func (p *printer) nextComment() {
+ for p.cindex < len(p.comments) {
+ c := p.comments[p.cindex]
+ p.cindex++
+ if list := c.List; len(list) > 0 {
+ p.comment = c
+ p.commentOffset = p.posFor(list[0].Pos()).Offset
+ p.commentNewline = p.commentsHaveNewline(list)
+ return
+ }
+ // we should not reach here (correct ASTs don't have empty
+ // ast.CommentGroup nodes), but be conservative and try again
+ }
+ // no more comments
+ p.commentOffset = infinity
+}
+
func (p *printer) internalError(msg ...interface{}) {
if debug {
fmt.Print(p.pos.String() + ": ")
}
if debug {
// do not update p.pos - use write0
- _, filename := filepath.Split(pos.Filename)
- fmt.Fprintf(&p.output, "[%s:%d:%d]", filename, pos.Line, pos.Column)
+ fmt.Fprintf(&p.output, "/*%s*/", pos)
}
p.writeString(data, isLit)
p.last = p.pos
//
func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
var last *ast.Comment
- for ; p.commentBefore(next); p.cindex++ {
- for _, c := range p.comments[p.cindex].List {
+ for p.commentBefore(next) {
+ for _, c := range p.comment.List {
p.writeCommentPrefix(p.posFor(c.Pos()), next, last, c, tok.IsKeyword())
p.writeComment(c)
last = c
}
+ p.nextComment()
}
if last != nil {
// printed, followed by the actual token.
//
func (p *printer) print(args ...interface{}) {
- for _, f := range args {
- next := p.pos // estimated position of next item
- data := ""
- isLit := false
- var tok token.Token
+ for _, arg := range args {
+ // information about the current arg
+ var data string
+ var isLit bool
+ var impliedSemi bool // value for p.impliedSemi after this arg
- switch x := f.(type) {
+ switch x := arg.(type) {
case pmode:
// toggle printer mode
p.mode ^= x
+ continue
+
case whiteSpace:
if x == ignore {
// don't add ignore's to the buffer; they
// may screw up "correcting" unindents (see
// LabeledStmt)
- break
+ continue
}
i := len(p.wsbuf)
if i == cap(p.wsbuf) {
}
p.wsbuf = p.wsbuf[0 : i+1]
p.wsbuf[i] = x
+ if x == newline || x == formfeed {
+ // newlines affect the current state (p.impliedSemi)
+ // and not the state after printing arg (impliedSemi)
+ // because comments can be interspersed before the arg
+ // in this case
+ p.impliedSemi = false
+ }
+ p.lastTok = token.ILLEGAL
+ continue
+
case *ast.Ident:
data = x.Name
- tok = token.IDENT
+ impliedSemi = true
+ p.lastTok = token.IDENT
+
case *ast.BasicLit:
data = x.Value
isLit = true
- tok = x.Kind
+ impliedSemi = true
+ p.lastTok = x.Kind
+
case token.Token:
s := x.String()
if mayCombine(p.lastTok, s[0]) {
p.wsbuf[0] = ' '
}
data = s
- tok = x
+ // some keywords followed by a newline imply a semicolon
+ switch x {
+ case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN,
+ token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE:
+ impliedSemi = true
+ }
+ p.lastTok = x
+
case token.Pos:
if x.IsValid() {
- next = p.posFor(x) // accurate position of next item
+ p.pos = p.posFor(x) // accurate position of next item
}
- tok = p.lastTok
+ continue
+
case string:
// incorrect AST - print error message
data = x
isLit = true
- tok = token.STRING
+ impliedSemi = true
+ p.lastTok = token.STRING
+
default:
- fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", f, f)
+ fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg)
panic("go/printer type")
}
- p.lastTok = tok
- p.pos = next
+ // data != ""
- if data != "" {
- wroteNewline, droppedFF := p.flush(next, tok)
+ next := p.pos // estimated/accurate position of next item
+ wroteNewline, droppedFF := p.flush(next, p.lastTok)
- // intersperse extra newlines if present in the source
- // (don't do this in flush as it will cause extra newlines
- // at the end of a file)
+ // intersperse extra newlines if present in the source and
+ // if they don't cause extra semicolons (don't do this in
+ // flush as it will cause extra newlines at the end of a file)
+ if !p.impliedSemi {
n := nlimit(next.Line - p.pos.Line)
// don't exceed maxNewlines if we already wrote one
if wroteNewline && n == maxNewlines {
ch = '\f' // use formfeed since we dropped one before
}
p.writeByteN(ch, n)
+ impliedSemi = false
}
-
- p.writeItem(next, data, isLit)
}
+
+ p.writeItem(next, data, isLit)
+ p.impliedSemi = impliedSemi
}
}
-// commentBefore returns true iff the current comment occurs
-// before the next position in the source code.
+// commentBefore returns true iff the current comment group occurs
+// before the next position in the source code and printing it does
+// not introduce implicit semicolons.
//
-func (p *printer) commentBefore(next token.Position) bool {
- return p.cindex < len(p.comments) && p.posFor(p.comments[p.cindex].List[0].Pos()).Offset < next.Offset
+func (p *printer) commentBefore(next token.Position) (result bool) {
+ return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)
}
-// Flush prints any pending comments and whitespace occurring textually
-// before the position of the next token tok. The Flush result indicates
+// flush prints any pending comments and whitespace occurring textually
+// before the position of the next token tok. The flush result indicates
// if a newline was written or if a formfeed was dropped from the whitespace
// buffer.
//
// if there are no comments, use node comments
p.useNodeComments = p.comments == nil
+ // get comments ready for use
+ p.nextComment()
+
// format node
switch n := node.(type) {
case ast.Expr:
if err = p.printNode(node); err != nil {
return
}
+ // print outstanding comments
+ p.impliedSemi = false // EOF acts like a newline
p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
// redirect output through a trimmer to eliminate trailing whitespace
}
// A CommentedNode bundles an AST node and corresponding comments.
-// It may be provided as argument to any of the FPrint functions.
+// It may be provided as argument to any of the Fprint functions.
//
type CommentedNode struct {
Node interface{} // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt
`
fset := token.NewFileSet()
- ast1, err1 := parser.ParseFile(fset, "", src, parser.ParseComments)
- if err1 != nil {
- panic(err1)
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
}
var buf bytes.Buffer
fset = token.NewFileSet() // use the wrong file set
- Fprint(&buf, fset, ast1)
+ Fprint(&buf, fset, f)
nlines := 0
for _, ch := range buf.Bytes() {
const expected = 3
if nlines < expected {
t.Errorf("got %d, expected %d\n", nlines, expected)
+ t.Errorf("result:\n%s", buf.Bytes())
}
}
const name = "foobar"
var buf bytes.Buffer
if err := Fprint(&buf, fset, &ast.Ident{Name: name}); err != nil {
- panic(err)
+ panic(err) // error in test
}
- if s := buf.String(); s != name {
+ // in debug mode, the result contains additional information;
+ // ignore it
+ if s := buf.String(); !debug && s != name {
panic("got " + s + ", want " + name)
}
}
const res = "package p\nBadDecl\n"
f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
if err == nil {
- t.Errorf("expected illegal program")
+ t.Error("expected illegal program") // error in test
}
var buf bytes.Buffer
Fprint(&buf, fset, f)
t.Errorf("got %q, expected %q", buf.String(), res)
}
}
+
+// Print and parse f with
+func testComment(t *testing.T, f *ast.File, srclen int, comment *ast.Comment) {
+ f.Comments[0].List[0] = comment
+ var buf bytes.Buffer
+ for offs := 0; offs <= srclen; offs++ {
+ buf.Reset()
+ // Printing f should result in a correct program no
+ // matter what the (incorrect) comment position is.
+ if err := Fprint(&buf, fset, f); err != nil {
+ t.Error(err)
+ }
+ if _, err := parser.ParseFile(fset, "", buf.Bytes(), 0); err != nil {
+ t.Fatalf("incorrect program for pos = %d:\n%s", comment.Slash, buf.String())
+ }
+ // Position information is just an offset.
+ // Move comment one byte down in the source.
+ comment.Slash++
+ }
+}
+
+// Verify that the printer produces always produces a correct program
+// even if the position information of comments introducing newlines
+// is incorrect.
+func TestBadComments(t *testing.T) {
+ const src = `
+// first comment - text and position changed by test
+package p
+import "fmt"
+const pi = 3.14 // rough circle
+var (
+ x, y, z int = 1, 2, 3
+ u, v float64
+)
+func fibo(n int) {
+ if n < 2 {
+ return n /* seed values */
+ }
+ return fibo(n-1) + fibo(n-2)
+}
+`
+
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ t.Error(err) // error in test
+ }
+
+ comment := f.Comments[0].List[0]
+ pos := comment.Pos()
+ if fset.Position(pos).Offset != 1 {
+ t.Error("expected offset 1") // error in test
+ }
+
+ testComment(t, f, len(src), &ast.Comment{pos, "//-style comment"})
+ testComment(t, f, len(src), &ast.Comment{pos, "/*-style comment */"})
+ testComment(t, f, len(src), &ast.Comment{pos, "/*-style \n comment */"})
+ testComment(t, f, len(src), &ast.Comment{pos, "/*-style comment \n\n\n */"})
+}
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
-//
+
package parser
import (
//
// var s scanner.Scanner
// fset := token.NewFileSet() // position information is relative to fset
-// file := fset.AddFile(filename, fset.Base(), len(src)) // register file
+// file := fset.AddFile(filename, fset.Base(), len(src)) // register file
// s.Init(file, src, nil /* no error handler */, 0)
// for {
// pos, tok, lit := s.Scan()
ErrorCount int // number of errors encountered
}
-// Read the next Unicode char into S.ch.
-// S.ch < 0 means end-of-file.
+// Read the next Unicode char into s.ch.
+// s.ch < 0 means end-of-file.
//
-func (S *Scanner) next() {
- if S.rdOffset < len(S.src) {
- S.offset = S.rdOffset
- if S.ch == '\n' {
- S.lineOffset = S.offset
- S.file.AddLine(S.offset)
+func (s *Scanner) next() {
+ if s.rdOffset < len(s.src) {
+ s.offset = s.rdOffset
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
}
- r, w := rune(S.src[S.rdOffset]), 1
+ r, w := rune(s.src[s.rdOffset]), 1
switch {
case r == 0:
- S.error(S.offset, "illegal character NUL")
+ s.error(s.offset, "illegal character NUL")
case r >= 0x80:
// not ASCII
- r, w = utf8.DecodeRune(S.src[S.rdOffset:])
+ r, w = utf8.DecodeRune(s.src[s.rdOffset:])
if r == utf8.RuneError && w == 1 {
- S.error(S.offset, "illegal UTF-8 encoding")
+ s.error(s.offset, "illegal UTF-8 encoding")
}
}
- S.rdOffset += w
- S.ch = r
+ s.rdOffset += w
+ s.ch = r
} else {
- S.offset = len(S.src)
- if S.ch == '\n' {
- S.lineOffset = S.offset
- S.file.AddLine(S.offset)
+ s.offset = len(s.src)
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
}
- S.ch = -1 // eof
+ s.ch = -1 // eof
}
}
dontInsertSemis // do not automatically insert semicolons - for testing only
)
-// Init prepares the scanner S to tokenize the text src by setting the
+// Init prepares the scanner s to tokenize the text src by setting the
// scanner at the beginning of src. The scanner uses the file set file
// for position information and it adds line information for each line.
// It is ok to re-use the same file when re-scanning the same file as
// Note that Init may call err if there is an error in the first character
// of the file.
//
-func (S *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
+func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
panic("file size does not match src len")
}
- S.file = file
- S.dir, _ = filepath.Split(file.Name())
- S.src = src
- S.err = err
- S.mode = mode
+ s.file = file
+ s.dir, _ = filepath.Split(file.Name())
+ s.src = src
+ s.err = err
+ s.mode = mode
- S.ch = ' '
- S.offset = 0
- S.rdOffset = 0
- S.lineOffset = 0
- S.insertSemi = false
- S.ErrorCount = 0
+ s.ch = ' '
+ s.offset = 0
+ s.rdOffset = 0
+ s.lineOffset = 0
+ s.insertSemi = false
+ s.ErrorCount = 0
- S.next()
+ s.next()
}
-func (S *Scanner) error(offs int, msg string) {
- if S.err != nil {
- S.err.Error(S.file.Position(S.file.Pos(offs)), msg)
+func (s *Scanner) error(offs int, msg string) {
+ if s.err != nil {
+ s.err.Error(s.file.Position(s.file.Pos(offs)), msg)
}
- S.ErrorCount++
+ s.ErrorCount++
}
var prefix = []byte("//line ")
-func (S *Scanner) interpretLineComment(text []byte) {
+func (s *Scanner) interpretLineComment(text []byte) {
if bytes.HasPrefix(text, prefix) {
// get filename and line number, if any
if i := bytes.LastIndex(text, []byte{':'}); i > 0 {
filename := filepath.Clean(string(text[len(prefix):i]))
if !filepath.IsAbs(filename) {
// make filename relative to current directory
- filename = filepath.Join(S.dir, filename)
+ filename = filepath.Join(s.dir, filename)
}
// update scanner position
- S.file.AddLineInfo(S.lineOffset+len(text)+1, filename, line) // +len(text)+1 since comment applies to next line
+ s.file.AddLineInfo(s.lineOffset+len(text)+1, filename, line) // +len(text)+1 since comment applies to next line
}
}
}
}
-func (S *Scanner) scanComment() string {
- // initial '/' already consumed; S.ch == '/' || S.ch == '*'
- offs := S.offset - 1 // position of initial '/'
+func (s *Scanner) scanComment() string {
+ // initial '/' already consumed; s.ch == '/' || s.ch == '*'
+ offs := s.offset - 1 // position of initial '/'
- if S.ch == '/' {
+ if s.ch == '/' {
//-style comment
- S.next()
- for S.ch != '\n' && S.ch >= 0 {
- S.next()
+ s.next()
+ for s.ch != '\n' && s.ch >= 0 {
+ s.next()
}
- if offs == S.lineOffset {
+ if offs == s.lineOffset {
// comment starts at the beginning of the current line
- S.interpretLineComment(S.src[offs:S.offset])
+ s.interpretLineComment(s.src[offs:s.offset])
}
goto exit
}
/*-style comment */
- S.next()
- for S.ch >= 0 {
- ch := S.ch
- S.next()
- if ch == '*' && S.ch == '/' {
- S.next()
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
goto exit
}
}
- S.error(offs, "comment not terminated")
+ s.error(offs, "comment not terminated")
exit:
- return string(S.src[offs:S.offset])
+ return string(s.src[offs:s.offset])
}
-func (S *Scanner) findLineEnd() bool {
+func (s *Scanner) findLineEnd() bool {
// initial '/' already consumed
defer func(offs int) {
// reset scanner state to where it was upon calling findLineEnd
- S.ch = '/'
- S.offset = offs
- S.rdOffset = offs + 1
- S.next() // consume initial '/' again
- }(S.offset - 1)
+ s.ch = '/'
+ s.offset = offs
+ s.rdOffset = offs + 1
+ s.next() // consume initial '/' again
+ }(s.offset - 1)
// read ahead until a newline, EOF, or non-comment token is found
- for S.ch == '/' || S.ch == '*' {
- if S.ch == '/' {
+ for s.ch == '/' || s.ch == '*' {
+ if s.ch == '/' {
//-style comment always contains a newline
return true
}
/*-style comment: look for newline */
- S.next()
- for S.ch >= 0 {
- ch := S.ch
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
if ch == '\n' {
return true
}
- S.next()
- if ch == '*' && S.ch == '/' {
- S.next()
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
break
}
}
- S.skipWhitespace() // S.insertSemi is set
- if S.ch < 0 || S.ch == '\n' {
+ s.skipWhitespace() // s.insertSemi is set
+ if s.ch < 0 || s.ch == '\n' {
return true
}
- if S.ch != '/' {
+ if s.ch != '/' {
// non-comment token
return false
}
- S.next() // consume '/'
+ s.next() // consume '/'
}
return false
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
-func (S *Scanner) scanIdentifier() string {
- offs := S.offset
- for isLetter(S.ch) || isDigit(S.ch) {
- S.next()
+func (s *Scanner) scanIdentifier() string {
+ offs := s.offset
+ for isLetter(s.ch) || isDigit(s.ch) {
+ s.next()
}
- return string(S.src[offs:S.offset])
+ return string(s.src[offs:s.offset])
}
func digitVal(ch rune) int {
return 16 // larger than any legal digit val
}
-func (S *Scanner) scanMantissa(base int) {
- for digitVal(S.ch) < base {
- S.next()
+func (s *Scanner) scanMantissa(base int) {
+ for digitVal(s.ch) < base {
+ s.next()
}
}
-func (S *Scanner) scanNumber(seenDecimalPoint bool) (token.Token, string) {
- // digitVal(S.ch) < 10
- offs := S.offset
+func (s *Scanner) scanNumber(seenDecimalPoint bool) (token.Token, string) {
+ // digitVal(s.ch) < 10
+ offs := s.offset
tok := token.INT
if seenDecimalPoint {
offs--
tok = token.FLOAT
- S.scanMantissa(10)
+ s.scanMantissa(10)
goto exponent
}
- if S.ch == '0' {
+ if s.ch == '0' {
// int or float
- offs := S.offset
- S.next()
- if S.ch == 'x' || S.ch == 'X' {
+ offs := s.offset
+ s.next()
+ if s.ch == 'x' || s.ch == 'X' {
// hexadecimal int
- S.next()
- S.scanMantissa(16)
- if S.offset-offs <= 2 {
+ s.next()
+ s.scanMantissa(16)
+ if s.offset-offs <= 2 {
// only scanned "0x" or "0X"
- S.error(offs, "illegal hexadecimal number")
+ s.error(offs, "illegal hexadecimal number")
}
} else {
// octal int or float
seenDecimalDigit := false
- S.scanMantissa(8)
- if S.ch == '8' || S.ch == '9' {
+ s.scanMantissa(8)
+ if s.ch == '8' || s.ch == '9' {
// illegal octal int or float
seenDecimalDigit = true
- S.scanMantissa(10)
+ s.scanMantissa(10)
}
- if S.ch == '.' || S.ch == 'e' || S.ch == 'E' || S.ch == 'i' {
+ if s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' {
goto fraction
}
// octal int
if seenDecimalDigit {
- S.error(offs, "illegal octal number")
+ s.error(offs, "illegal octal number")
}
}
goto exit
}
// decimal int or float
- S.scanMantissa(10)
+ s.scanMantissa(10)
fraction:
- if S.ch == '.' {
+ if s.ch == '.' {
tok = token.FLOAT
- S.next()
- S.scanMantissa(10)
+ s.next()
+ s.scanMantissa(10)
}
exponent:
- if S.ch == 'e' || S.ch == 'E' {
+ if s.ch == 'e' || s.ch == 'E' {
tok = token.FLOAT
- S.next()
- if S.ch == '-' || S.ch == '+' {
- S.next()
+ s.next()
+ if s.ch == '-' || s.ch == '+' {
+ s.next()
}
- S.scanMantissa(10)
+ s.scanMantissa(10)
}
- if S.ch == 'i' {
+ if s.ch == 'i' {
tok = token.IMAG
- S.next()
+ s.next()
}
exit:
- return tok, string(S.src[offs:S.offset])
+ return tok, string(s.src[offs:s.offset])
}
-func (S *Scanner) scanEscape(quote rune) {
- offs := S.offset
+func (s *Scanner) scanEscape(quote rune) {
+ offs := s.offset
var i, base, max uint32
- switch S.ch {
+ switch s.ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
- S.next()
+ s.next()
return
case '0', '1', '2', '3', '4', '5', '6', '7':
i, base, max = 3, 8, 255
case 'x':
- S.next()
+ s.next()
i, base, max = 2, 16, 255
case 'u':
- S.next()
+ s.next()
i, base, max = 4, 16, unicode.MaxRune
case 'U':
- S.next()
+ s.next()
i, base, max = 8, 16, unicode.MaxRune
default:
- S.next() // always make progress
- S.error(offs, "unknown escape sequence")
+ s.next() // always make progress
+ s.error(offs, "unknown escape sequence")
return
}
var x uint32
- for ; i > 0 && S.ch != quote && S.ch >= 0; i-- {
- d := uint32(digitVal(S.ch))
+ for ; i > 0 && s.ch != quote && s.ch >= 0; i-- {
+ d := uint32(digitVal(s.ch))
if d >= base {
- S.error(S.offset, "illegal character in escape sequence")
+ s.error(s.offset, "illegal character in escape sequence")
break
}
x = x*base + d
- S.next()
+ s.next()
}
// in case of an error, consume remaining chars
- for ; i > 0 && S.ch != quote && S.ch >= 0; i-- {
- S.next()
+ for ; i > 0 && s.ch != quote && s.ch >= 0; i-- {
+ s.next()
}
if x > max || 0xd800 <= x && x < 0xe000 {
- S.error(offs, "escape sequence is invalid Unicode code point")
+ s.error(offs, "escape sequence is invalid Unicode code point")
}
}
-func (S *Scanner) scanChar() string {
+func (s *Scanner) scanChar() string {
// '\'' opening already consumed
- offs := S.offset - 1
+ offs := s.offset - 1
n := 0
- for S.ch != '\'' {
- ch := S.ch
+ for s.ch != '\'' {
+ ch := s.ch
n++
- S.next()
+ s.next()
if ch == '\n' || ch < 0 {
- S.error(offs, "character literal not terminated")
+ s.error(offs, "character literal not terminated")
n = 1
break
}
if ch == '\\' {
- S.scanEscape('\'')
+ s.scanEscape('\'')
}
}
- S.next()
+ s.next()
if n != 1 {
- S.error(offs, "illegal character literal")
+ s.error(offs, "illegal character literal")
}
- return string(S.src[offs:S.offset])
+ return string(s.src[offs:s.offset])
}
-func (S *Scanner) scanString() string {
+func (s *Scanner) scanString() string {
// '"' opening already consumed
- offs := S.offset - 1
+ offs := s.offset - 1
- for S.ch != '"' {
- ch := S.ch
- S.next()
+ for s.ch != '"' {
+ ch := s.ch
+ s.next()
if ch == '\n' || ch < 0 {
- S.error(offs, "string not terminated")
+ s.error(offs, "string not terminated")
break
}
if ch == '\\' {
- S.scanEscape('"')
+ s.scanEscape('"')
}
}
- S.next()
+ s.next()
- return string(S.src[offs:S.offset])
+ return string(s.src[offs:s.offset])
}
func stripCR(b []byte) []byte {
return c[:i]
}
-func (S *Scanner) scanRawString() string {
+func (s *Scanner) scanRawString() string {
// '`' opening already consumed
- offs := S.offset - 1
+ offs := s.offset - 1
hasCR := false
- for S.ch != '`' {
- ch := S.ch
- S.next()
+ for s.ch != '`' {
+ ch := s.ch
+ s.next()
if ch == '\r' {
hasCR = true
}
if ch < 0 {
- S.error(offs, "string not terminated")
+ s.error(offs, "string not terminated")
break
}
}
- S.next()
+ s.next()
- lit := S.src[offs:S.offset]
+ lit := s.src[offs:s.offset]
if hasCR {
lit = stripCR(lit)
}
return string(lit)
}
-func (S *Scanner) skipWhitespace() {
- for S.ch == ' ' || S.ch == '\t' || S.ch == '\n' && !S.insertSemi || S.ch == '\r' {
- S.next()
+func (s *Scanner) skipWhitespace() {
+ for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !s.insertSemi || s.ch == '\r' {
+ s.next()
}
}
// respectively. Otherwise, the result is tok0 if there was no other
// matching character, or tok2 if the matching character was ch2.
-func (S *Scanner) switch2(tok0, tok1 token.Token) token.Token {
- if S.ch == '=' {
- S.next()
+func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
return tok1
}
return tok0
}
-func (S *Scanner) switch3(tok0, tok1 token.Token, ch2 rune, tok2 token.Token) token.Token {
- if S.ch == '=' {
- S.next()
+func (s *Scanner) switch3(tok0, tok1 token.Token, ch2 rune, tok2 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
return tok1
}
- if S.ch == ch2 {
- S.next()
+ if s.ch == ch2 {
+ s.next()
return tok2
}
return tok0
}
-func (S *Scanner) switch4(tok0, tok1 token.Token, ch2 rune, tok2, tok3 token.Token) token.Token {
- if S.ch == '=' {
- S.next()
+func (s *Scanner) switch4(tok0, tok1 token.Token, ch2 rune, tok2, tok3 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
return tok1
}
- if S.ch == ch2 {
- S.next()
- if S.ch == '=' {
- S.next()
+ if s.ch == ch2 {
+ s.next()
+ if s.ch == '=' {
+ s.next()
return tok3
}
return tok2
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
-func (S *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
+func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
- S.skipWhitespace()
+ s.skipWhitespace()
// current token start
- pos = S.file.Pos(S.offset)
+ pos = s.file.Pos(s.offset)
// determine token value
insertSemi := false
- switch ch := S.ch; {
+ switch ch := s.ch; {
case isLetter(ch):
- lit = S.scanIdentifier()
+ lit = s.scanIdentifier()
tok = token.Lookup(lit)
switch tok {
case token.IDENT, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN:
}
case digitVal(ch) < 10:
insertSemi = true
- tok, lit = S.scanNumber(false)
+ tok, lit = s.scanNumber(false)
default:
- S.next() // always make progress
+ s.next() // always make progress
switch ch {
case -1:
- if S.insertSemi {
- S.insertSemi = false // EOF consumed
+ if s.insertSemi {
+ s.insertSemi = false // EOF consumed
return pos, token.SEMICOLON, "\n"
}
tok = token.EOF
case '\n':
- // we only reach here if S.insertSemi was
+ // we only reach here if s.insertSemi was
// set in the first place and exited early
- // from S.skipWhitespace()
- S.insertSemi = false // newline consumed
+ // from s.skipWhitespace()
+ s.insertSemi = false // newline consumed
return pos, token.SEMICOLON, "\n"
case '"':
insertSemi = true
tok = token.STRING
- lit = S.scanString()
+ lit = s.scanString()
case '\'':
insertSemi = true
tok = token.CHAR
- lit = S.scanChar()
+ lit = s.scanChar()
case '`':
insertSemi = true
tok = token.STRING
- lit = S.scanRawString()
+ lit = s.scanRawString()
case ':':
- tok = S.switch2(token.COLON, token.DEFINE)
+ tok = s.switch2(token.COLON, token.DEFINE)
case '.':
- if digitVal(S.ch) < 10 {
+ if digitVal(s.ch) < 10 {
insertSemi = true
- tok, lit = S.scanNumber(true)
- } else if S.ch == '.' {
- S.next()
- if S.ch == '.' {
- S.next()
+ tok, lit = s.scanNumber(true)
+ } else if s.ch == '.' {
+ s.next()
+ if s.ch == '.' {
+ s.next()
tok = token.ELLIPSIS
}
} else {
insertSemi = true
tok = token.RBRACE
case '+':
- tok = S.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC)
+ tok = s.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC)
if tok == token.INC {
insertSemi = true
}
case '-':
- tok = S.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC)
+ tok = s.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC)
if tok == token.DEC {
insertSemi = true
}
case '*':
- tok = S.switch2(token.MUL, token.MUL_ASSIGN)
+ tok = s.switch2(token.MUL, token.MUL_ASSIGN)
case '/':
- if S.ch == '/' || S.ch == '*' {
+ if s.ch == '/' || s.ch == '*' {
// comment
- if S.insertSemi && S.findLineEnd() {
+ if s.insertSemi && s.findLineEnd() {
// reset position to the beginning of the comment
- S.ch = '/'
- S.offset = S.file.Offset(pos)
- S.rdOffset = S.offset + 1
- S.insertSemi = false // newline consumed
+ s.ch = '/'
+ s.offset = s.file.Offset(pos)
+ s.rdOffset = s.offset + 1
+ s.insertSemi = false // newline consumed
return pos, token.SEMICOLON, "\n"
}
- lit = S.scanComment()
- if S.mode&ScanComments == 0 {
+ lit = s.scanComment()
+ if s.mode&ScanComments == 0 {
// skip comment
- S.insertSemi = false // newline consumed
+ s.insertSemi = false // newline consumed
goto scanAgain
}
tok = token.COMMENT
} else {
- tok = S.switch2(token.QUO, token.QUO_ASSIGN)
+ tok = s.switch2(token.QUO, token.QUO_ASSIGN)
}
case '%':
- tok = S.switch2(token.REM, token.REM_ASSIGN)
+ tok = s.switch2(token.REM, token.REM_ASSIGN)
case '^':
- tok = S.switch2(token.XOR, token.XOR_ASSIGN)
+ tok = s.switch2(token.XOR, token.XOR_ASSIGN)
case '<':
- if S.ch == '-' {
- S.next()
+ if s.ch == '-' {
+ s.next()
tok = token.ARROW
} else {
- tok = S.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN)
+ tok = s.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN)
}
case '>':
- tok = S.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN)
+ tok = s.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN)
case '=':
- tok = S.switch2(token.ASSIGN, token.EQL)
+ tok = s.switch2(token.ASSIGN, token.EQL)
case '!':
- tok = S.switch2(token.NOT, token.NEQ)
+ tok = s.switch2(token.NOT, token.NEQ)
case '&':
- if S.ch == '^' {
- S.next()
- tok = S.switch2(token.AND_NOT, token.AND_NOT_ASSIGN)
+ if s.ch == '^' {
+ s.next()
+ tok = s.switch2(token.AND_NOT, token.AND_NOT_ASSIGN)
} else {
- tok = S.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND)
+ tok = s.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND)
}
case '|':
- tok = S.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
+ tok = s.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
default:
- S.error(S.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
- insertSemi = S.insertSemi // preserve insertSemi info
+ s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
+ insertSemi = s.insertSemi // preserve insertSemi info
tok = token.ILLEGAL
lit = string(ch)
}
}
- if S.mode&dontInsertSemis == 0 {
- S.insertSemi = insertSemi
+ if s.mode&dontInsertSemis == 0 {
+ s.insertSemi = insertSemi
}
return
var s Scanner
s.Init(fset.AddFile("", fset.Base(), len(source)), source, &testErrorHandler{t}, ScanComments|dontInsertSemis)
index := 0
- epos := token.Position{"", 0, 1, 1} // expected position
+ // epos is the expected position
+ epos := token.Position{
+ Filename: "",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ }
for {
pos, tok, lit := s.Scan()
if lit == "" {
for _, s := range segs {
p, _, lit := S.Scan()
pos := file.Position(p)
- checkPos(t, lit, p, token.Position{s.filename, pos.Offset, s.line, pos.Column})
+ checkPos(t, lit, p, token.Position{
+ Filename: s.filename,
+ Offset: pos.Offset,
+ Line: s.line,
+ Column: pos.Column,
+ })
}
if S.ErrorCount != 0 {
package token
-import (
- "encoding/gob"
- "io"
-)
-
type serializedFile struct {
// fields correspond 1:1 to fields with same (lower-case) name in File
Name string
Files []serializedFile
}
-func (s *serializedFileSet) Read(r io.Reader) error {
- return gob.NewDecoder(r).Decode(s)
-}
-
-func (s *serializedFileSet) Write(w io.Writer) error {
- return gob.NewEncoder(w).Encode(s)
-}
-
-// Read reads the fileset from r into s; s must not be nil.
-// If r does not also implement io.ByteReader, it will be wrapped in a bufio.Reader.
-func (s *FileSet) Read(r io.Reader) error {
+// Read calls decode to deserialize a file set into s; s must not be nil.
+func (s *FileSet) Read(decode func(interface{}) error) error {
var ss serializedFileSet
- if err := ss.Read(r); err != nil {
+ if err := decode(&ss); err != nil {
return err
}
return nil
}
-// Write writes the fileset s to w.
-func (s *FileSet) Write(w io.Writer) error {
+// Write calls encode to serialize the file set s.
+func (s *FileSet) Write(encode func(interface{}) error) error {
var ss serializedFileSet
s.mutex.Lock()
ss.Files = files
s.mutex.Unlock()
- return ss.Write(w)
+ return encode(ss)
}
import (
"bytes"
+ "encoding/gob"
"fmt"
"testing"
)
func checkSerialize(t *testing.T, p *FileSet) {
var buf bytes.Buffer
- if err := p.Write(&buf); err != nil {
+ encode := func(x interface{}) error {
+ return gob.NewEncoder(&buf).Encode(x)
+ }
+ if err := p.Write(encode); err != nil {
t.Errorf("writing fileset failed: %s", err)
return
}
q := NewFileSet()
- if err := q.Read(&buf); err != nil {
+ decode := func(x interface{}) error {
+ return gob.NewDecoder(&buf).Decode(x)
+ }
+ if err := q.Read(decode); err != nil {
t.Errorf("reading fileset failed: %s", err)
return
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Package html provides functions for escaping and unescaping HTML text.
package html
import (
if strings.IndexAny(s, escapedChars) == -1 {
return s
}
- buf := bytes.NewBuffer(nil)
- escape(buf, s)
+ var buf bytes.Buffer
+ escape(&buf, s)
return buf.String()
}
for _, test := range tests {
b, e := []byte(test.input), newEscaper(nil)
- c := e.escapeText(context{}, &parse.TextNode{parse.NodeText, b})
+ c := e.escapeText(context{}, &parse.TextNode{NodeType: parse.NodeText, Text: b})
if !test.output.eq(c) {
t.Errorf("input %q: want context\n\t%v\ngot\n\t%v", test.input, test.output, c)
continue
a = fmt.Sprint(args...)
}
// TODO: detect cycles before calling Marshal which loops infinitely on
- // cyclic data. This may be an unnacceptable DoS risk.
+ // cyclic data. This may be an unacceptable DoS risk.
b, err := json.Marshal(a)
if err != nil {
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bmp implements a BMP image decoder.
-//
-// The BMP specification is at http://www.digicamsoft.com/bmp/bmp.html.
-package bmp
-
-import (
- "errors"
- "image"
- "image/color"
- "io"
-)
-
-// ErrUnsupported means that the input BMP image uses a valid but unsupported
-// feature.
-var ErrUnsupported = errors.New("bmp: unsupported BMP image")
-
-func readUint16(b []byte) uint16 {
- return uint16(b[0]) | uint16(b[1])<<8
-}
-
-func readUint32(b []byte) uint32 {
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-// decodePaletted reads an 8 bit-per-pixel BMP image from r.
-func decodePaletted(r io.Reader, c image.Config) (image.Image, error) {
- var tmp [4]byte
- paletted := image.NewPaletted(image.Rect(0, 0, c.Width, c.Height), c.ColorModel.(color.Palette))
- // BMP images are stored bottom-up rather than top-down.
- for y := c.Height - 1; y >= 0; y-- {
- p := paletted.Pix[y*paletted.Stride : y*paletted.Stride+c.Width]
- _, err := io.ReadFull(r, p)
- if err != nil {
- return nil, err
- }
- // Each row is 4-byte aligned.
- if c.Width%4 != 0 {
- _, err := io.ReadFull(r, tmp[:4-c.Width%4])
- if err != nil {
- return nil, err
- }
- }
- }
- return paletted, nil
-}
-
-// decodeRGBA reads a 24 bit-per-pixel BMP image from r.
-func decodeRGBA(r io.Reader, c image.Config) (image.Image, error) {
- rgba := image.NewRGBA(image.Rect(0, 0, c.Width, c.Height))
- // There are 3 bytes per pixel, and each row is 4-byte aligned.
- b := make([]byte, (3*c.Width+3)&^3)
- // BMP images are stored bottom-up rather than top-down.
- for y := c.Height - 1; y >= 0; y-- {
- _, err := io.ReadFull(r, b)
- if err != nil {
- return nil, err
- }
- p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4]
- for i, j := 0, 0; i < len(p); i, j = i+4, j+3 {
- // BMP images are stored in BGR order rather than RGB order.
- p[i+0] = b[j+2]
- p[i+1] = b[j+1]
- p[i+2] = b[j+0]
- p[i+3] = 0xFF
- }
- }
- return rgba, nil
-}
-
-// Decode reads a BMP image from r and returns it as an image.Image.
-// Limitation: The file must be 8 or 24 bits per pixel.
-func Decode(r io.Reader) (image.Image, error) {
- c, err := DecodeConfig(r)
- if err != nil {
- return nil, err
- }
- if c.ColorModel == color.RGBAModel {
- return decodeRGBA(r, c)
- }
- return decodePaletted(r, c)
-}
-
-// DecodeConfig returns the color model and dimensions of a BMP image without
-// decoding the entire image.
-// Limitation: The file must be 8 or 24 bits per pixel.
-func DecodeConfig(r io.Reader) (config image.Config, err error) {
- // We only support those BMP images that are a BITMAPFILEHEADER
- // immediately followed by a BITMAPINFOHEADER.
- const (
- fileHeaderLen = 14
- infoHeaderLen = 40
- )
- var b [1024]byte
- if _, err = io.ReadFull(r, b[:fileHeaderLen+infoHeaderLen]); err != nil {
- return
- }
- if string(b[:2]) != "BM" {
- err = errors.New("bmp: invalid format")
- return
- }
- offset := readUint32(b[10:14])
- if readUint32(b[14:18]) != infoHeaderLen {
- err = ErrUnsupported
- return
- }
- width := int(readUint32(b[18:22]))
- height := int(readUint32(b[22:26]))
- if width < 0 || height < 0 {
- err = ErrUnsupported
- return
- }
- // We only support 1 plane, 8 or 24 bits per pixel and no compression.
- planes, bpp, compression := readUint16(b[26:28]), readUint16(b[28:30]), readUint32(b[30:34])
- if planes != 1 || compression != 0 {
- err = ErrUnsupported
- return
- }
- switch bpp {
- case 8:
- if offset != fileHeaderLen+infoHeaderLen+256*4 {
- err = ErrUnsupported
- return
- }
- _, err = io.ReadFull(r, b[:256*4])
- if err != nil {
- return
- }
- pcm := make(color.Palette, 256)
- for i := range pcm {
- // BMP images are stored in BGR order rather than RGB order.
- // Every 4th byte is padding.
- pcm[i] = color.RGBA{b[4*i+2], b[4*i+1], b[4*i+0], 0xFF}
- }
- return image.Config{pcm, width, height}, nil
- case 24:
- if offset != fileHeaderLen+infoHeaderLen {
- err = ErrUnsupported
- return
- }
- return image.Config{color.RGBAModel, width, height}, nil
- }
- err = ErrUnsupported
- return
-}
-
-func init() {
- image.RegisterFormat("bmp", "BM????\x00\x00\x00\x00", Decode, DecodeConfig)
-}
"os"
"testing"
- _ "image/bmp"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
- _ "image/tiff"
)
type imageTest struct {
}
var imageTests = []imageTest{
- {"testdata/video-001.png", "testdata/video-001.bmp", 0},
+ {"testdata/video-001.png", "testdata/video-001.png", 0},
// GIF images are restricted to a 256-color palette and the conversion
// to GIF loses significant image quality.
{"testdata/video-001.png", "testdata/video-001.gif", 64 << 8},
{"testdata/video-001.png", "testdata/video-001.5bpp.gif", 128 << 8},
// JPEG is a lossy format and hence needs a non-zero tolerance.
{"testdata/video-001.png", "testdata/video-001.jpeg", 8 << 8},
- {"testdata/video-001.png", "testdata/video-001.png", 0},
- {"testdata/video-001.png", "testdata/video-001.tiff", 0},
-
- // Test grayscale images.
+ // Grayscale images.
{"testdata/video-005.gray.png", "testdata/video-005.gray.jpeg", 8 << 8},
{"testdata/video-005.gray.png", "testdata/video-005.gray.png", 0},
}
var src image.Image
switch scm {
case nil:
- src = &image.Uniform{color.RGBA{0x11, 0x22, 0x33, 0xff}}
+ src = &image.Uniform{C: color.RGBA{0x11, 0x22, 0x33, 0xff}}
case color.RGBAModel:
src1 := image.NewRGBA(image.Rect(0, 0, srcw, srch))
for y := 0; y < srch; y++ {
x := 3 * i % (dstw - srcw)
y := 7 * i % (dsth - srch)
- DrawMask(dst, dst.Bounds().Add(image.Point{x, y}), src, image.ZP, mask, image.ZP, op)
+ DrawMask(dst, dst.Bounds().Add(image.Pt(x, y)), src, image.ZP, mask, image.ZP, op)
}
}
sy := y + sp.Y - r.Min.Y
my := y + mp.Y - r.Min.Y
for x := r.Min.X; x < r.Max.X; x++ {
- if !(image.Point{x, y}.In(b)) {
+ if !(image.Pt(x, y).In(b)) {
continue
}
sx := x + sp.X - r.Min.X
- if !(image.Point{sx, sy}.In(sb)) {
+ if !(image.Pt(sx, sy).In(sb)) {
continue
}
mx := x + mp.X - r.Min.X
- if !(image.Point{mx, my}.In(mb)) {
+ if !(image.Pt(mx, my).In(mb)) {
continue
}
m := image.NewRGBA(image.Rect(0, 0, 40, 30)).SubImage(r).(*image.RGBA)
b := m.Bounds()
c := color.RGBA{11, 0, 0, 255}
- src := &image.Uniform{c}
+ src := &image.Uniform{C: c}
check := func(desc string) {
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
check("pixel")
// Draw 1 row at a time.
c = color.RGBA{0, 22, 0, 255}
- src = &image.Uniform{c}
+ src = &image.Uniform{C: c}
for y := b.Min.Y; y < b.Max.Y; y++ {
DrawMask(m, image.Rect(b.Min.X, y, b.Max.X, y+1), src, image.ZP, nil, image.ZP, Src)
}
check("row")
// Draw 1 column at a time.
c = color.RGBA{0, 0, 33, 255}
- src = &image.Uniform{c}
+ src = &image.Uniform{C: c}
for x := b.Min.X; x < b.Max.X; x++ {
DrawMask(m, image.Rect(x, b.Min.Y, x+1, b.Max.Y), src, image.ZP, nil, image.ZP, Src)
}
check("column")
// Draw the whole image at once.
c = color.RGBA{44, 55, 66, 77}
- src = &image.Uniform{c}
+ src = &image.Uniform{C: c}
DrawMask(m, b, src, image.ZP, nil, image.ZP, Src)
check("whole")
}
"io"
)
-// An UnknownFormatErr indicates that decoding encountered an unknown format.
-var UnknownFormatErr = errors.New("image: unknown format")
+// ErrFormat indicates that decoding encountered an unknown format.
+var ErrFormat = errors.New("image: unknown format")
// A format holds an image format's name, magic header and how to decode it.
type format struct {
rr := asReader(r)
f := sniff(rr)
if f.decode == nil {
- return nil, "", UnknownFormatErr
+ return nil, "", ErrFormat
}
m, err := f.decode(rr)
return m, f.name, err
rr := asReader(r)
f := sniff(rr)
if f.decodeConfig == nil {
- return Config{}, "", UnknownFormatErr
+ return Config{}, "", ErrFormat
}
c, err := f.decodeConfig(rr)
return c, f.name, err
if err := d.decode(r, true); err != nil {
return image.Config{}, err
}
- return image.Config{d.globalColorMap, d.width, d.height}, nil
+ return image.Config{
+ ColorModel: d.globalColorMap,
+ Width: d.width,
+ Height: d.height,
+ }, nil
}
func init() {
// Package image implements a basic 2-D image library.
//
-// See "The Go image package" for an introduction to this package:
+// The fundamental interface is called Image. An Image contains colors, which
+// are described in the image/color package.
+//
+// Values of the Image interface are created either by calling functions such
+// as NewRGBA and NewPaletted, or by calling Decode on an io.Reader containing
+// image data in a format such as GIF, JPEG or PNG. Decoding any particular
+// image format requires the prior registration of a decoder function.
+// Registration is typically automatic as a side effect of initializing that
+// format's package so that, to decode a PNG image, it suffices to have
+// import _ "image/png"
+// in a program's main package. The _ means to import a package purely for its
+// initialization side effects.
+//
+// See "The Go image package" for more details:
// http://blog.golang.org/2011/09/go-image-package.html
package image
Width, Height int
}
-// Image is a finite rectangular grid of Colors drawn from a color model.
+// Image is a finite rectangular grid of color.Color values taken from a color
+// model.
type Image interface {
// ColorModel returns the Image's color model.
ColorModel() color.Model
Image
}
-// RGBA is an in-memory image of RGBAColor values.
+// RGBA is an in-memory image whose At method returns color.RGBA values.
type RGBA struct {
// Pix holds the image's pixels, in R, G, B, A order. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
return &RGBA{buf, 4 * w, r}
}
-// RGBA64 is an in-memory image of RGBA64Color values.
+// RGBA64 is an in-memory image whose At method returns color.RGBA64 values.
type RGBA64 struct {
// Pix holds the image's pixels, in R, G, B, A order and big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*8].
return &RGBA64{pix, 8 * w, r}
}
-// NRGBA is an in-memory image of NRGBAColor values.
+// NRGBA is an in-memory image whose At method returns color.NRGBA values.
type NRGBA struct {
// Pix holds the image's pixels, in R, G, B, A order. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*4].
return &NRGBA{pix, 4 * w, r}
}
-// NRGBA64 is an in-memory image of NRGBA64Color values.
+// NRGBA64 is an in-memory image whose At method returns color.NRGBA64 values.
type NRGBA64 struct {
// Pix holds the image's pixels, in R, G, B, A order and big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*8].
return &NRGBA64{pix, 8 * w, r}
}
-// Alpha is an in-memory image of AlphaColor values.
+// Alpha is an in-memory image whose At method returns color.Alpha values.
type Alpha struct {
// Pix holds the image's pixels, as alpha values. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
return &Alpha{pix, 1 * w, r}
}
-// Alpha16 is an in-memory image of Alpha16Color values.
+// Alpha16 is an in-memory image whose At method returns color.Alpha64 values.
type Alpha16 struct {
// Pix holds the image's pixels, as alpha values in big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
return &Alpha16{pix, 2 * w, r}
}
-// Gray is an in-memory image of GrayColor values.
+// Gray is an in-memory image whose At method returns color.Gray values.
type Gray struct {
// Pix holds the image's pixels, as gray values. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*1].
return &Gray{pix, 1 * w, r}
}
-// Gray16 is an in-memory image of Gray16Color values.
+// Gray16 is an in-memory image whose At method returns color.Gray16 values.
type Gray16 struct {
// Pix holds the image's pixels, as gray values in big-endian format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
}
switch d.nComp {
case nGrayComponent:
- return image.Config{color.GrayModel, d.width, d.height}, nil
+ return image.Config{
+ ColorModel: color.GrayModel,
+ Width: d.width,
+ Height: d.height,
+ }, nil
case nColorComponent:
- return image.Config{color.YCbCrModel, d.width, d.height}, nil
+ return image.Config{
+ ColorModel: color.YCbCrModel,
+ Width: d.width,
+ Height: d.height,
+ }, nil
}
return image.Config{}, FormatError("missing SOF marker")
}
for i := 0; i < 4; i++ {
xOff := (i & 1) * 8
yOff := (i & 2) * 4
- p := image.Point{x + xOff, y + yOff}
+ p := image.Pt(x+xOff, y+yOff)
if rgba != nil {
rgbaToYCbCr(rgba, p, &yBlock, &cbBlock[i], &crBlock[i])
} else {
continue
}
// Encode that image as JPEG.
- buf := bytes.NewBuffer(nil)
- err = Encode(buf, m0, &Options{Quality: tc.quality})
+ var buf bytes.Buffer
+ err = Encode(&buf, m0, &Options{Quality: tc.quality})
if err != nil {
t.Error(tc.filename, err)
continue
}
// Decode that JPEG.
- m1, err := Decode(buf)
+ m1, err := Decode(&buf)
if err != nil {
t.Error(tc.filename, err)
continue
func NewUniform(c color.Color) *Uniform {
return &Uniform{c}
}
-
-// Repeated is an infinite-sized Image that repeats another Image in both
-// directions. Repeated{i, p}.At(x, y) will equal i.At(x+p.X, y+p.Y) for all
-// points {x+p.X, y+p.Y} within i's Bounds.
-type Repeated struct {
- I Image
- Offset Point
-}
-
-func (r *Repeated) ColorModel() color.Model {
- return r.I.ColorModel()
-}
-
-func (r *Repeated) Bounds() Rectangle { return Rectangle{Point{-1e9, -1e9}, Point{1e9, 1e9}} }
-
-func (r *Repeated) At(x, y int) color.Color {
- p := Point{x, y}.Add(r.Offset).Mod(r.I.Bounds())
- return r.I.At(p.X, p.Y)
-}
-
-func NewRepeated(i Image, offset Point) *Repeated {
- return &Repeated{i, offset}
-}
case cbTCA16:
cm = color.NRGBA64Model
}
- return image.Config{cm, d.width, d.height}, nil
+ return image.Config{
+ ColorModel: cm,
+ Width: d.width,
+ Height: d.height,
+ }, nil
}
func init() {
return
}
var bw *bufio.Writer
- bw, e.err = bufio.NewWriterSize(e, 1<<15)
- if e.err != nil {
- return
- }
+ bw = bufio.NewWriterSize(e, 1<<15)
e.err = writeImage(bw, e.m, e.cb)
if e.err != nil {
return
}
func encodeDecode(m image.Image) (image.Image, error) {
- b := bytes.NewBuffer(nil)
- err := Encode(b, m)
+ var b bytes.Buffer
+ err := Encode(&b, m)
if err != nil {
return nil, err
}
- m, err = Decode(b)
+ m, err = Decode(&b)
if err != nil {
return nil, err
}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tiff
-
-import "io"
-
-// buffer buffers an io.Reader to satisfy io.ReaderAt.
-type buffer struct {
- r io.Reader
- buf []byte
-}
-
-func (b *buffer) ReadAt(p []byte, off int64) (int, error) {
- o := int(off)
- end := o + len(p)
- if int64(end) != off+int64(len(p)) {
- return 0, io.ErrUnexpectedEOF
- }
-
- m := len(b.buf)
- if end > m {
- if end > cap(b.buf) {
- newcap := 1024
- for newcap < end {
- newcap *= 2
- }
- newbuf := make([]byte, end, newcap)
- copy(newbuf, b.buf)
- b.buf = newbuf
- } else {
- b.buf = b.buf[:end]
- }
- if n, err := io.ReadFull(b.r, b.buf[m:end]); err != nil {
- end = m + n
- b.buf = b.buf[:end]
- return copy(p, b.buf[o:end]), err
- }
- }
-
- return copy(p, b.buf[o:end]), nil
-}
-
-// newReaderAt converts an io.Reader into an io.ReaderAt.
-func newReaderAt(r io.Reader) io.ReaderAt {
- if ra, ok := r.(io.ReaderAt); ok {
- return ra
- }
- return &buffer{
- r: r,
- buf: make([]byte, 0, 1024),
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tiff
-
-import (
- "io"
- "strings"
- "testing"
-)
-
-var readAtTests = []struct {
- n int
- off int64
- s string
- err error
-}{
- {2, 0, "ab", nil},
- {6, 0, "abcdef", nil},
- {3, 3, "def", nil},
- {3, 5, "f", io.EOF},
- {3, 6, "", io.EOF},
-}
-
-func TestReadAt(t *testing.T) {
- r := newReaderAt(strings.NewReader("abcdef"))
- b := make([]byte, 10)
- for _, test := range readAtTests {
- n, err := r.ReadAt(b[:test.n], test.off)
- s := string(b[:n])
- if s != test.s || err != test.err {
- t.Errorf("buffer.ReadAt(<%v bytes>, %v): got %v, %q; want %v, %q", test.n, test.off, err, s, test.err, test.s)
- }
- }
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tiff
-
-import (
- "bufio"
- "io"
-)
-
-type byteReader interface {
- io.Reader
- io.ByteReader
-}
-
-// unpackBits decodes the PackBits-compressed data in src and returns the
-// uncompressed data.
-//
-// The PackBits compression format is described in section 9 (p. 42)
-// of the TIFF spec.
-func unpackBits(r io.Reader) ([]byte, error) {
- buf := make([]byte, 128)
- dst := make([]byte, 0, 1024)
- br, ok := r.(byteReader)
- if !ok {
- br = bufio.NewReader(r)
- }
-
- for {
- b, err := br.ReadByte()
- if err != nil {
- if err == io.EOF {
- return dst, nil
- }
- return nil, err
- }
- code := int(int8(b))
- switch {
- case code >= 0:
- n, err := io.ReadFull(br, buf[:code+1])
- if err != nil {
- return nil, err
- }
- dst = append(dst, buf[:n]...)
- case code == -128:
- // No-op.
- default:
- if b, err = br.ReadByte(); err != nil {
- return nil, err
- }
- for j := 0; j < 1-code; j++ {
- buf[j] = b
- }
- dst = append(dst, buf[:1-code]...)
- }
- }
- panic("unreachable")
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tiff
-
-// A tiff image file contains one or more images. The metadata
-// of each image is contained in an Image File Directory (IFD),
-// which contains entries of 12 bytes each and is described
-// on page 14-16 of the specification. An IFD entry consists of
-//
-// - a tag, which describes the signification of the entry,
-// - the data type and length of the entry,
-// - the data itself or a pointer to it if it is more than 4 bytes.
-//
-// The presence of a length means that each IFD is effectively an array.
-
-const (
- leHeader = "II\x2A\x00" // Header for little-endian files.
- beHeader = "MM\x00\x2A" // Header for big-endian files.
-
- ifdLen = 12 // Length of an IFD entry in bytes.
-)
-
-// Data types (p. 14-16 of the spec).
-const (
- dtByte = 1
- dtASCII = 2
- dtShort = 3
- dtLong = 4
- dtRational = 5
-)
-
-// The length of one instance of each data type in bytes.
-var lengths = [...]uint32{0, 1, 1, 2, 4, 8}
-
-// Tags (see p. 28-41 of the spec).
-const (
- tImageWidth = 256
- tImageLength = 257
- tBitsPerSample = 258
- tCompression = 259
- tPhotometricInterpretation = 262
-
- tStripOffsets = 273
- tSamplesPerPixel = 277
- tRowsPerStrip = 278
- tStripByteCounts = 279
-
- tXResolution = 282
- tYResolution = 283
- tResolutionUnit = 296
-
- tPredictor = 317
- tColorMap = 320
- tExtraSamples = 338
- tSampleFormat = 339
-)
-
-// Compression types (defined in various places in the spec and supplements).
-const (
- cNone = 1
- cCCITT = 2
- cG3 = 3 // Group 3 Fax.
- cG4 = 4 // Group 4 Fax.
- cLZW = 5
- cJPEGOld = 6 // Superseded by cJPEG.
- cJPEG = 7
- cDeflate = 8 // zlib compression.
- cPackBits = 32773
- cDeflateOld = 32946 // Superseded by cDeflate.
-)
-
-// Photometric interpretation values (see p. 37 of the spec).
-const (
- pWhiteIsZero = 0
- pBlackIsZero = 1
- pRGB = 2
- pPaletted = 3
- pTransMask = 4 // transparency mask
- pCMYK = 5
- pYCbCr = 6
- pCIELab = 8
-)
-
-// Values for the tPredictor tag (page 64-65 of the spec).
-const (
- prNone = 1
- prHorizontal = 2
-)
-
-// imageMode represents the mode of the image.
-type imageMode int
-
-const (
- mBilevel imageMode = iota
- mPaletted
- mGray
- mGrayInvert
- mRGB
- mRGBA
- mNRGBA
-)
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package tiff implements a TIFF image decoder.
-//
-// The TIFF specification is at http://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
-package tiff
-
-import (
- "compress/lzw"
- "compress/zlib"
- "encoding/binary"
- "image"
- "image/color"
- "io"
- "io/ioutil"
-)
-
-// A FormatError reports that the input is not a valid TIFF image.
-type FormatError string
-
-func (e FormatError) Error() string {
- return "tiff: invalid format: " + string(e)
-}
-
-// An UnsupportedError reports that the input uses a valid but
-// unimplemented feature.
-type UnsupportedError string
-
-func (e UnsupportedError) Error() string {
- return "tiff: unsupported feature: " + string(e)
-}
-
-// An InternalError reports that an internal error was encountered.
-type InternalError string
-
-func (e InternalError) Error() string {
- return "tiff: internal error: " + string(e)
-}
-
-type decoder struct {
- r io.ReaderAt
- byteOrder binary.ByteOrder
- config image.Config
- mode imageMode
- features map[int][]uint
- palette []color.Color
-
- buf []byte
- off int // Current offset in buf.
- v uint32 // Buffer value for reading with arbitrary bit depths.
- nbits uint // Remaining number of bits in v.
-}
-
-// firstVal returns the first uint of the features entry with the given tag,
-// or 0 if the tag does not exist.
-func (d *decoder) firstVal(tag int) uint {
- f := d.features[tag]
- if len(f) == 0 {
- return 0
- }
- return f[0]
-}
-
-// ifdUint decodes the IFD entry in p, which must be of the Byte, Short
-// or Long type, and returns the decoded uint values.
-func (d *decoder) ifdUint(p []byte) (u []uint, err error) {
- var raw []byte
- datatype := d.byteOrder.Uint16(p[2:4])
- count := d.byteOrder.Uint32(p[4:8])
- if datalen := lengths[datatype] * count; datalen > 4 {
- // The IFD contains a pointer to the real value.
- raw = make([]byte, datalen)
- _, err = d.r.ReadAt(raw, int64(d.byteOrder.Uint32(p[8:12])))
- } else {
- raw = p[8 : 8+datalen]
- }
- if err != nil {
- return nil, err
- }
-
- u = make([]uint, count)
- switch datatype {
- case dtByte:
- for i := uint32(0); i < count; i++ {
- u[i] = uint(raw[i])
- }
- case dtShort:
- for i := uint32(0); i < count; i++ {
- u[i] = uint(d.byteOrder.Uint16(raw[2*i : 2*(i+1)]))
- }
- case dtLong:
- for i := uint32(0); i < count; i++ {
- u[i] = uint(d.byteOrder.Uint32(raw[4*i : 4*(i+1)]))
- }
- default:
- return nil, UnsupportedError("data type")
- }
- return u, nil
-}
-
-// parseIFD decides whether the the IFD entry in p is "interesting" and
-// stows away the data in the decoder.
-func (d *decoder) parseIFD(p []byte) error {
- tag := d.byteOrder.Uint16(p[0:2])
- switch tag {
- case tBitsPerSample,
- tExtraSamples,
- tPhotometricInterpretation,
- tCompression,
- tPredictor,
- tStripOffsets,
- tStripByteCounts,
- tRowsPerStrip,
- tImageLength,
- tImageWidth:
- val, err := d.ifdUint(p)
- if err != nil {
- return err
- }
- d.features[int(tag)] = val
- case tColorMap:
- val, err := d.ifdUint(p)
- if err != nil {
- return err
- }
- numcolors := len(val) / 3
- if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 {
- return FormatError("bad ColorMap length")
- }
- d.palette = make([]color.Color, numcolors)
- for i := 0; i < numcolors; i++ {
- d.palette[i] = color.RGBA64{
- uint16(val[i]),
- uint16(val[i+numcolors]),
- uint16(val[i+2*numcolors]),
- 0xffff,
- }
- }
- case tSampleFormat:
- // Page 27 of the spec: If the SampleFormat is present and
- // the value is not 1 [= unsigned integer data], a Baseline
- // TIFF reader that cannot handle the SampleFormat value
- // must terminate the import process gracefully.
- val, err := d.ifdUint(p)
- if err != nil {
- return err
- }
- for _, v := range val {
- if v != 1 {
- return UnsupportedError("sample format")
- }
- }
- }
- return nil
-}
-
-// readBits reads n bits from the internal buffer starting at the current offset.
-func (d *decoder) readBits(n uint) uint32 {
- for d.nbits < n {
- d.v <<= 8
- d.v |= uint32(d.buf[d.off])
- d.off++
- d.nbits += 8
- }
- d.nbits -= n
- rv := d.v >> d.nbits
- d.v &^= rv << d.nbits
- return rv
-}
-
-// flushBits discards the unread bits in the buffer used by readBits.
-// It is used at the end of a line.
-func (d *decoder) flushBits() {
- d.v = 0
- d.nbits = 0
-}
-
-// decode decodes the raw data of an image.
-// It reads from d.buf and writes the strip with ymin <= y < ymax into dst.
-func (d *decoder) decode(dst image.Image, ymin, ymax int) error {
- d.off = 0
-
- // Apply horizontal predictor if necessary.
- // In this case, p contains the color difference to the preceding pixel.
- // See page 64-65 of the spec.
- if d.firstVal(tPredictor) == prHorizontal && d.firstVal(tBitsPerSample) == 8 {
- var off int
- spp := len(d.features[tBitsPerSample]) // samples per pixel
- for y := ymin; y < ymax; y++ {
- off += spp
- for x := 0; x < (dst.Bounds().Dx()-1)*spp; x++ {
- d.buf[off] += d.buf[off-spp]
- off++
- }
- }
- }
-
- switch d.mode {
- case mGray, mGrayInvert:
- img := dst.(*image.Gray)
- bpp := d.firstVal(tBitsPerSample)
- max := uint32((1 << bpp) - 1)
- for y := ymin; y < ymax; y++ {
- for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
- v := uint8(d.readBits(bpp) * 0xff / max)
- if d.mode == mGrayInvert {
- v = 0xff - v
- }
- img.SetGray(x, y, color.Gray{v})
- }
- d.flushBits()
- }
- case mPaletted:
- img := dst.(*image.Paletted)
- bpp := d.firstVal(tBitsPerSample)
- for y := ymin; y < ymax; y++ {
- for x := img.Rect.Min.X; x < img.Rect.Max.X; x++ {
- img.SetColorIndex(x, y, uint8(d.readBits(bpp)))
- }
- d.flushBits()
- }
- case mRGB:
- img := dst.(*image.RGBA)
- min := img.PixOffset(0, ymin)
- max := img.PixOffset(0, ymax)
- var off int
- for i := min; i < max; i += 4 {
- img.Pix[i+0] = d.buf[off+0]
- img.Pix[i+1] = d.buf[off+1]
- img.Pix[i+2] = d.buf[off+2]
- img.Pix[i+3] = 0xff
- off += 3
- }
- case mNRGBA:
- img := dst.(*image.NRGBA)
- min := img.PixOffset(0, ymin)
- max := img.PixOffset(0, ymax)
- if len(d.buf) != max-min {
- return FormatError("short data strip")
- }
- copy(img.Pix[min:max], d.buf)
- case mRGBA:
- img := dst.(*image.RGBA)
- min := img.PixOffset(0, ymin)
- max := img.PixOffset(0, ymax)
- if len(d.buf) != max-min {
- return FormatError("short data strip")
- }
- copy(img.Pix[min:max], d.buf)
- }
-
- return nil
-}
-
-func newDecoder(r io.Reader) (*decoder, error) {
- d := &decoder{
- r: newReaderAt(r),
- features: make(map[int][]uint),
- }
-
- p := make([]byte, 8)
- if _, err := d.r.ReadAt(p, 0); err != nil {
- return nil, err
- }
- switch string(p[0:4]) {
- case leHeader:
- d.byteOrder = binary.LittleEndian
- case beHeader:
- d.byteOrder = binary.BigEndian
- default:
- return nil, FormatError("malformed header")
- }
-
- ifdOffset := int64(d.byteOrder.Uint32(p[4:8]))
-
- // The first two bytes contain the number of entries (12 bytes each).
- if _, err := d.r.ReadAt(p[0:2], ifdOffset); err != nil {
- return nil, err
- }
- numItems := int(d.byteOrder.Uint16(p[0:2]))
-
- // All IFD entries are read in one chunk.
- p = make([]byte, ifdLen*numItems)
- if _, err := d.r.ReadAt(p, ifdOffset+2); err != nil {
- return nil, err
- }
-
- for i := 0; i < len(p); i += ifdLen {
- if err := d.parseIFD(p[i : i+ifdLen]); err != nil {
- return nil, err
- }
- }
-
- d.config.Width = int(d.firstVal(tImageWidth))
- d.config.Height = int(d.firstVal(tImageLength))
-
- if _, ok := d.features[tBitsPerSample]; !ok {
- return nil, FormatError("BitsPerSample tag missing")
- }
-
- // Determine the image mode.
- switch d.firstVal(tPhotometricInterpretation) {
- case pRGB:
- for _, b := range d.features[tBitsPerSample] {
- if b != 8 {
- return nil, UnsupportedError("non-8-bit RGB image")
- }
- }
- d.config.ColorModel = color.RGBAModel
- // RGB images normally have 3 samples per pixel.
- // If there are more, ExtraSamples (p. 31-32 of the spec)
- // gives their meaning (usually an alpha channel).
- //
- // This implementation does not support extra samples
- // of an unspecified type.
- switch len(d.features[tBitsPerSample]) {
- case 3:
- d.mode = mRGB
- case 4:
- switch d.firstVal(tExtraSamples) {
- case 1:
- d.mode = mRGBA
- case 2:
- d.mode = mNRGBA
- d.config.ColorModel = color.NRGBAModel
- default:
- return nil, FormatError("wrong number of samples for RGB")
- }
- default:
- return nil, FormatError("wrong number of samples for RGB")
- }
- case pPaletted:
- d.mode = mPaletted
- d.config.ColorModel = color.Palette(d.palette)
- case pWhiteIsZero:
- d.mode = mGrayInvert
- d.config.ColorModel = color.GrayModel
- case pBlackIsZero:
- d.mode = mGray
- d.config.ColorModel = color.GrayModel
- default:
- return nil, UnsupportedError("color model")
- }
-
- return d, nil
-}
-
-// DecodeConfig returns the color model and dimensions of a TIFF image without
-// decoding the entire image.
-func DecodeConfig(r io.Reader) (image.Config, error) {
- d, err := newDecoder(r)
- if err != nil {
- return image.Config{}, err
- }
- return d.config, nil
-}
-
-// Decode reads a TIFF image from r and returns it as an image.Image.
-// The type of Image returned depends on the contents of the TIFF.
-func Decode(r io.Reader) (img image.Image, err error) {
- d, err := newDecoder(r)
- if err != nil {
- return
- }
-
- // Check if we have the right number of strips, offsets and counts.
- rps := int(d.firstVal(tRowsPerStrip))
- if rps == 0 {
- // Assume only one strip.
- rps = d.config.Height
- }
- numStrips := (d.config.Height + rps - 1) / rps
- if rps == 0 || len(d.features[tStripOffsets]) < numStrips || len(d.features[tStripByteCounts]) < numStrips {
- return nil, FormatError("inconsistent header")
- }
-
- switch d.mode {
- case mGray, mGrayInvert:
- img = image.NewGray(image.Rect(0, 0, d.config.Width, d.config.Height))
- case mPaletted:
- img = image.NewPaletted(image.Rect(0, 0, d.config.Width, d.config.Height), d.palette)
- case mNRGBA:
- img = image.NewNRGBA(image.Rect(0, 0, d.config.Width, d.config.Height))
- case mRGB, mRGBA:
- img = image.NewRGBA(image.Rect(0, 0, d.config.Width, d.config.Height))
- }
-
- for i := 0; i < numStrips; i++ {
- ymin := i * rps
- // The last strip may be shorter.
- if i == numStrips-1 && d.config.Height%rps != 0 {
- rps = d.config.Height % rps
- }
- offset := int64(d.features[tStripOffsets][i])
- n := int64(d.features[tStripByteCounts][i])
- switch d.firstVal(tCompression) {
- case cNone:
- // TODO(bsiegert): Avoid copy if r is a tiff.buffer.
- d.buf = make([]byte, n)
- _, err = d.r.ReadAt(d.buf, offset)
- case cLZW:
- r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
- d.buf, err = ioutil.ReadAll(r)
- r.Close()
- case cDeflate, cDeflateOld:
- r, err := zlib.NewReader(io.NewSectionReader(d.r, offset, n))
- if err != nil {
- return nil, err
- }
- d.buf, err = ioutil.ReadAll(r)
- r.Close()
- case cPackBits:
- d.buf, err = unpackBits(io.NewSectionReader(d.r, offset, n))
- default:
- err = UnsupportedError("compression")
- }
- if err != nil {
- return
- }
- err = d.decode(img, ymin, ymin+rps)
- }
- return
-}
-
-func init() {
- image.RegisterFormat("tiff", leHeader, Decode, DecodeConfig)
- image.RegisterFormat("tiff", beHeader, Decode, DecodeConfig)
-}
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package tiff
-
-import (
- "image"
- "io/ioutil"
- "os"
- "strings"
- "testing"
-)
-
-// Read makes *buffer implements io.Reader, so that we can pass one to Decode.
-func (*buffer) Read([]byte) (int, error) {
- panic("unimplemented")
-}
-
-// TestNoRPS tries to decode an image that has no RowsPerStrip tag.
-// The tag is mandatory according to the spec but some software omits
-// it in the case of a single strip.
-func TestNoRPS(t *testing.T) {
- f, err := os.Open("testdata/no_rps.tiff")
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- _, err = Decode(f)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// TestUnpackBits tests the decoding of PackBits-encoded data.
-func TestUnpackBits(t *testing.T) {
- var unpackBitsTests = []struct {
- compressed string
- uncompressed string
- }{{
- // Example data from Wikipedia.
- "\xfe\xaa\x02\x80\x00\x2a\xfd\xaa\x03\x80\x00\x2a\x22\xf7\xaa",
- "\xaa\xaa\xaa\x80\x00\x2a\xaa\xaa\xaa\xaa\x80\x00\x2a\x22\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
- }}
- for _, u := range unpackBitsTests {
- buf, err := unpackBits(strings.NewReader(u.compressed))
- if err != nil {
- t.Fatal(err)
- }
- if string(buf) != u.uncompressed {
- t.Fatalf("unpackBits: want %x, got %x", u.uncompressed, buf)
- }
- }
-}
-
-// TestDecompress tests that decoding some TIFF images that use different
-// compression formats result in the same pixel data.
-func TestDecompress(t *testing.T) {
- var decompressTests = []string{
- "bw-uncompressed.tiff",
- "bw-deflate.tiff",
- "bw-packbits.tiff",
- }
- var img0 image.Image
- for _, name := range decompressTests {
- f, err := os.Open("testdata/" + name)
- if err != nil {
- t.Fatal(err)
- }
- defer f.Close()
- if img0 == nil {
- img0, err = Decode(f)
- if err != nil {
- t.Fatalf("decoding %s: %v", name, err)
- }
- continue
- }
-
- img1, err := Decode(f)
- if err != nil {
- t.Fatalf("decoding %s: %v", name, err)
- }
- b := img1.Bounds()
- // Compare images.
- if !b.Eq(img0.Bounds()) {
- t.Fatalf("wrong image size: want %s, got %s", img0.Bounds(), b)
- }
- for y := b.Min.Y; y < b.Max.Y; y++ {
- for x := b.Min.X; x < b.Max.X; x++ {
- c0 := img0.At(x, y)
- c1 := img1.At(x, y)
- r0, g0, b0, a0 := c0.RGBA()
- r1, g1, b1, a1 := c1.RGBA()
- if r0 != r1 || g0 != g1 || b0 != b1 || a0 != a1 {
- t.Fatalf("pixel at (%d, %d) has wrong color: want %v, got %v", x, y, c0, c1)
- }
- }
- }
- }
-}
-
-const filename = "testdata/video-001-uncompressed.tiff"
-
-// BenchmarkDecode benchmarks the decoding of an image.
-func BenchmarkDecode(b *testing.B) {
- b.StopTimer()
- contents, err := ioutil.ReadFile(filename)
- if err != nil {
- panic(err)
- }
- r := &buffer{buf: contents}
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- _, err := Decode(r)
- if err != nil {
- b.Fatal("Decode:", err)
- }
- }
-}
// abstract the functionality, plus some other related primitives.
package io
-// Error represents an unexpected I/O behavior.
-type Error struct {
- ErrorString string
-}
-
-func (err *Error) Error() string { return err.ErrorString }
+import (
+ "errors"
+)
// ErrShortWrite means that a write accepted fewer bytes than requested
// but failed to return an explicit error.
-var ErrShortWrite error = &Error{"short write"}
+var ErrShortWrite = errors.New("short write")
// ErrShortBuffer means that a read required a longer buffer than was provided.
-var ErrShortBuffer error = &Error{"short buffer"}
+var ErrShortBuffer = errors.New("short buffer")
// EOF is the error returned by Read when no more input is available.
// Functions should return EOF only to signal a graceful end of input.
// If the EOF occurs unexpectedly in a structured data stream,
// the appropriate error is either ErrUnexpectedEOF or some other error
// giving more detail.
-var EOF error = &Error{"EOF"}
+var EOF = errors.New("EOF")
// ErrUnexpectedEOF means that EOF was encountered in the
// middle of reading a fixed-size block or data structure.
-var ErrUnexpectedEOF error = &Error{"unexpected EOF"}
+var ErrUnexpectedEOF = errors.New("unexpected EOF")
// Reader is the interface that wraps the basic Read method.
//
}
// WriteString writes the contents of the string s to w, which accepts an array of bytes.
+// If w already implements a WriteString method, it is invoked directly.
func WriteString(w Writer, s string) (n int, err error) {
if sw, ok := w.(stringWriter); ok {
return sw.WriteString(s)
// (including EOF), so can CopyN.
//
// If dst implements the ReaderFrom interface,
-// the copy is implemented by calling dst.ReadFrom(src).
+// the copy is implemented using it.
func CopyN(dst Writer, src Reader, n int64) (written int64, err error) {
// If the writer has a ReadFrom method, use it to do the copy.
// Avoids a buffer allocation and a copy.
return
}
-var errWhence = &Error{"Seek: invalid whence"}
-var errOffset = &Error{"Seek: invalid offset"}
+var errWhence = errors.New("Seek: invalid whence")
+var errOffset = errors.New("Seek: invalid offset")
func (s *SectionReader) Seek(offset int64, whence int) (ret int64, err error) {
switch whence {
}
// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
func ReadAll(r io.Reader) ([]byte, error) {
return readAll(r, bytes.MinRead)
}
// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
func ReadFile(filename string) ([]byte, error) {
f, err := os.Open(filename)
if err != nil {
package io
-import "sync"
+import (
+ "errors"
+ "sync"
+)
// ErrClosedPipe is the error used for read or write operations on a closed pipe.
-var ErrClosedPipe = &Error{"io: read/write on closed pipe"}
+var ErrClosedPipe = errors.New("io: read/write on closed pipe")
type pipeResult struct {
n int
Ln2 = 6.93147180559945286227e-01 // 0x3FE62E42FEFA39EF
Large = 1 << 28 // 2**28
)
- // TODO(rsc): Remove manual inlining of IsNaN
- // when compiler does it for us
// first case is special case
switch {
- case x < 1 || x != x: // x < 1 || IsNaN(x):
+ case x < 1 || IsNaN(x):
return NaN()
case x == 1:
return 0
NearZero = 1.0 / (1 << 28) // 2**-28
Large = 1 << 28 // 2**28
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
- if x != x || x > MaxFloat64 || x < -MaxFloat64 { // IsNaN(x) || IsInf(x, 0)
+ if IsNaN(x) || IsInf(x, 0) {
return x
}
sign := false
}
func atan2(y, x float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case y != y || x != x: // IsNaN(y) || IsNaN(x):
+ case IsNaN(y) || IsNaN(x):
return NaN()
case y == 0:
if x >= 0 && !Signbit(x) {
return Copysign(Pi, y)
case x == 0:
return Copysign(Pi/2, y)
- case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
- if x > MaxFloat64 { // IsInf(x, 1) {
+ case IsInf(x, 0):
+ if IsInf(x, 1) {
switch {
- case y < -MaxFloat64 || y > MaxFloat64: // IsInf(y, -1) || IsInf(y, 1):
+ case IsInf(y, 0):
return Copysign(Pi/4, y)
default:
return Copysign(0, y)
}
}
switch {
- case y < -MaxFloat64 || y > MaxFloat64: // IsInf(y, -1) || IsInf(y, 1):
+ case IsInf(y, 0):
return Copysign(3*Pi/4, y)
default:
return Copysign(Pi, y)
}
- case y < -MaxFloat64 || y > MaxFloat64: //IsInf(y, 0):
+ case IsInf(y, 0):
return Copysign(Pi/2, y)
}
// Atanh(NaN) = NaN
func Atanh(x float64) float64 {
const NearZero = 1.0 / (1 << 28) // 2**-28
- // TODO(rsc): Remove manual inlining of IsNaN
- // when compiler does it for us
// special cases
switch {
- case x < -1 || x > 1 || x != x: // x < -1 || x > 1 || IsNaN(x):
+ case x < -1 || x > 1 || IsNaN(x):
return NaN()
case x == 1:
return Inf(1)
package big
-// TODO(gri) Decide if Word needs to remain exported.
-
+// A Word represents a single digit of a multi-precision unsigned integer.
type Word uintptr
const (
return z
}
+// Bits provides raw (unchecked but fast) access to x by returning its
+// absolute value as a little-endian Word slice. The result and x share
+// the same underlying array.
+// Bits is intended to support implementation of missing low-level Int
+// functionality outside this package; it should be avoided otherwise.
+func (x *Int) Bits() []Word {
+ return x.abs
+}
+
+// SetBits provides raw (unchecked but fast) access to z by setting its
+// value to abs, interpreted as a little-endian Word slice, and returning
+// z. The result and abs share the same underlying array.
+// SetBits is intended to support implementation of missing low-level Int
+// functionality outside this package; it should be avoided otherwise.
+func (z *Int) SetBits(abs []Word) *Int {
+ z.abs = nat(abs).norm()
+ z.neg = false
+ return z
+}
+
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Int) Abs(x *Int) *Int {
z.Set(x)
// r = x - y*q
//
// (See Daan Leijen, ``Division and Modulus for Computer Scientists''.)
+// See DivMod for Euclidean division and modulus (unlike Go).
//
func (z *Int) QuoRem(x, y, r *Int) (*Int, *Int) {
z.abs, r.abs = z.abs.div(r.abs, x.abs, y.abs)
// div and mod''. ACM Transactions on Programming Languages and
// Systems (TOPLAS), 14(2):127-144, New York, NY, USA, 4/1992.
// ACM press.)
+// See QuoRem for T-division and modulus (like Go).
//
func (z *Int) DivMod(x, y, m *Int) (*Int, *Int) {
y0 := y // save y
}
// Bytes returns the absolute value of z as a big-endian byte slice.
-func (z *Int) Bytes() []byte {
- buf := make([]byte, len(z.abs)*_S)
- return buf[z.abs.bytes(buf):]
+func (x *Int) Bytes() []byte {
+ buf := make([]byte, len(x.abs)*_S)
+ return buf[x.abs.bytes(buf):]
}
// BitLen returns the length of the absolute value of z in bits.
// The bit length of 0 is 0.
-func (z *Int) BitLen() int {
- return z.abs.bitLen()
+func (x *Int) BitLen() int {
+ return x.abs.bitLen()
}
-// Exp sets z = x**y mod m. If m is nil, z = x**y.
+// Exp sets z = x**y mod m and returns z. If m is nil, z = x**y.
// See Knuth, volume 2, section 4.6.3.
func (z *Int) Exp(x, y, m *Int) *Int {
if y.neg || len(y.abs) == 0 {
return z
}
-// GcdInt sets d to the greatest common divisor of a and b, which must be
-// positive numbers.
-// If x and y are not nil, GcdInt sets x and y such that d = a*x + b*y.
-// If either a or b is not positive, GcdInt sets d = x = y = 0.
-func GcdInt(d, x, y, a, b *Int) {
+// GCD sets z to the greatest common divisor of a and b, which must be
+// positive numbers, and returns z.
+// If x and y are not nil, GCD sets x and y such that z = a*x + b*y.
+// If either a or b is not positive, GCD sets z = x = y = 0.
+func (z *Int) GCD(x, y, a, b *Int) *Int {
if a.neg || b.neg {
- d.SetInt64(0)
+ z.SetInt64(0)
if x != nil {
x.SetInt64(0)
}
if y != nil {
y.SetInt64(0)
}
- return
+ return z
}
A := new(Int).Set(a)
*y = *lastY
}
- *d = *A
+ *z = *A
+ return z
}
-// ProbablyPrime performs n Miller-Rabin tests to check whether z is prime.
-// If it returns true, z is prime with probability 1 - 1/4^n.
-// If it returns false, z is not prime.
-func ProbablyPrime(z *Int, n int) bool {
- return !z.neg && z.abs.probablyPrime(n)
+// ProbablyPrime performs n Miller-Rabin tests to check whether x is prime.
+// If it returns true, x is prime with probability 1 - 1/4^n.
+// If it returns false, x is not prime.
+func (x *Int) ProbablyPrime(n int) bool {
+ return !x.neg && x.abs.probablyPrime(n)
}
// Rand sets z to a pseudo-random number in [0, n) and returns z.
// p is a prime) and returns z.
func (z *Int) ModInverse(g, p *Int) *Int {
var d Int
- GcdInt(&d, z, nil, g, p)
+ d.GCD(z, nil, g, p)
// x and y are such that g*x + p*y = d. Since p is prime, d = 1. Taking
// that modulo p results in g*x = 1, therefore x is the inverse element.
if z.neg {
return z
}
-// Bit returns the value of the i'th bit of z. That is, it
-// returns (z>>i)&1. The bit index i must be >= 0.
-func (z *Int) Bit(i int) uint {
+// Bit returns the value of the i'th bit of x. That is, it
+// returns (x>>i)&1. The bit index i must be >= 0.
+func (x *Int) Bit(i int) uint {
if i < 0 {
panic("negative bit index")
}
- if z.neg {
- t := nat(nil).sub(z.abs, natOne)
+ if x.neg {
+ t := nat(nil).sub(x.abs, natOne)
return t.bit(uint(i)) ^ 1
}
- return z.abs.bit(uint(i))
+ return x.abs.bit(uint(i))
}
// SetBit sets z to x, with x's i'th bit set to b (0 or 1).
const intGobVersion byte = 1
// GobEncode implements the gob.GobEncoder interface.
-func (z *Int) GobEncode() ([]byte, error) {
- buf := make([]byte, 1+len(z.abs)*_S) // extra byte for version and sign bit
- i := z.abs.bytes(buf) - 1 // i >= 0
+func (x *Int) GobEncode() ([]byte, error) {
+ buf := make([]byte, 1+len(x.abs)*_S) // extra byte for version and sign bit
+ i := x.abs.bytes(buf) - 1 // i >= 0
b := intGobVersion << 1 // make space for sign bit
- if z.neg {
+ if x.neg {
b |= 1
}
buf[i] = b
y := new(Int)
d := new(Int)
- GcdInt(d, x, y, a, b)
+ d.GCD(x, y, a, b)
x.Mul(x, a)
y.Mul(y, b)
x.Add(x, y)
expectedY := NewInt(test.y)
expectedD := NewInt(test.d)
- GcdInt(d, x, y, a, b)
+ d.GCD(x, y, a, b)
if expectedX.Cmp(x) != 0 ||
expectedY.Cmp(y) != 0 ||
}
for i, s := range primes {
p, _ := new(Int).SetString(s, 10)
- if !ProbablyPrime(p, nreps) {
+ if !p.ProbablyPrime(nreps) {
t.Errorf("#%d prime found to be non-prime (%s)", i, s)
}
}
for i, s := range composites {
c, _ := new(Int).SetString(s, 10)
- if ProbablyPrime(c, nreps) {
+ if c.ProbablyPrime(nreps) {
t.Errorf("#%d composite found to be prime (%s)", i, s)
}
if testing.Short() {
// - Int signed integers
// - Rat rational numbers
//
-// All methods on Int take the result as the receiver; if it is one
-// of the operands it may be overwritten (and its memory reused).
-// To enable chaining of operations, the result is also returned.
+// Methods are typically of the form:
+//
+// func (z *Int) Op(x, y *Int) *Int (similar for *Rat)
+//
+// and implement operations z = x Op y with the result as receiver; if it
+// is one of the operands it may be overwritten (and its memory reused).
+// To enable chaining of operations, the result is also returned. Methods
+// returning a result other than *Int or *Rat take one of the operands as
+// the receiver.
//
package big
}
// String returns a string representation of z in the form "a/b" (even if b == 1).
-func (z *Rat) String() string {
+func (x *Rat) String() string {
s := "/1"
- if len(z.b) != 0 {
- s = "/" + z.b.decimalString()
+ if len(x.b) != 0 {
+ s = "/" + x.b.decimalString()
}
- return z.a.String() + s
+ return x.a.String() + s
}
// RatString returns a string representation of z in the form "a/b" if b != 1,
// and in the form "a" if b == 1.
-func (z *Rat) RatString() string {
- if z.IsInt() {
- return z.a.String()
+func (x *Rat) RatString() string {
+ if x.IsInt() {
+ return x.a.String()
}
- return z.String()
+ return x.String()
}
// FloatString returns a string representation of z in decimal form with prec
// digits of precision after the decimal point and the last digit rounded.
-func (z *Rat) FloatString(prec int) string {
- if z.IsInt() {
- s := z.a.String()
+func (x *Rat) FloatString(prec int) string {
+ if x.IsInt() {
+ s := x.a.String()
if prec > 0 {
s += "." + strings.Repeat("0", prec)
}
return s
}
- // z.b != 0
+ // x.b != 0
- q, r := nat(nil).div(nat(nil), z.a.abs, z.b)
+ q, r := nat(nil).div(nat(nil), x.a.abs, x.b)
p := natOne
if prec > 0 {
}
r = r.mul(r, p)
- r, r2 := r.div(nat(nil), r, z.b)
+ r, r2 := r.div(nat(nil), r, x.b)
// see if we need to round up
r2 = r2.add(r2, r2)
- if z.b.cmp(r2) <= 0 {
+ if x.b.cmp(r2) <= 0 {
r = r.add(r, natOne)
if r.cmp(p) >= 0 {
q = nat(nil).add(q, natOne)
}
s := q.decimalString()
- if z.a.neg {
+ if x.a.neg {
s = "-" + s
}
const ratGobVersion byte = 1
// GobEncode implements the gob.GobEncoder interface.
-func (z *Rat) GobEncode() ([]byte, error) {
- buf := make([]byte, 1+4+(len(z.a.abs)+len(z.b))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
- i := z.b.bytes(buf)
- j := z.a.abs.bytes(buf[0:i])
+func (x *Rat) GobEncode() ([]byte, error) {
+ buf := make([]byte, 1+4+(len(x.a.abs)+len(x.b))*_S) // extra bytes for version and sign bit (1), and numerator length (4)
+ i := x.b.bytes(buf)
+ j := x.a.abs.bytes(buf[0:i])
n := i - j
if int(uint32(n)) != n {
// this should never happen
binary.BigEndian.PutUint32(buf[j-4:j], uint32(n))
j -= 1 + 4
b := ratGobVersion << 1 // make space for sign bit
- if z.a.neg {
+ if x.a.neg {
b |= 1
}
buf[j] = b
C3 = 6.46502159e-02
C4 = 1.412333954e-01
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x == 0 || x != x || x < -MaxFloat64 || x > MaxFloat64: // x == 0 || IsNaN(x) || IsInf(x, 0):
+ case x == 0 || IsNaN(x) || IsInf(x, 0):
return x
}
sign := false
}
func max(x, y float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x > MaxFloat64 || y > MaxFloat64: // IsInf(x, 1) || IsInf(y, 1):
+ case IsInf(x, 1) || IsInf(y, 1):
return Inf(1)
- case x != x || y != y: // IsNaN(x) || IsNaN(y):
+ case IsNaN(x) || IsNaN(y):
return NaN()
case x == 0 && x == y:
if Signbit(x) {
}
func min(x, y float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x < -MaxFloat64 || y < -MaxFloat64: // IsInf(x, -1) || IsInf(y, -1):
+ case IsInf(x, -1) || IsInf(y, -1):
return Inf(-1)
- case x != x || y != y: // IsNaN(x) || IsNaN(y):
+ case IsNaN(x) || IsNaN(y):
return NaN()
case x == 0 && x == y:
if Signbit(x) {
Small = 1.0 / (1 << 28) // 2**-28
)
// special cases
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
switch {
- case x != x: // IsNaN(x):
+ case IsNaN(x):
return NaN()
- case x > MaxFloat64: // IsInf(x, 1):
+ case IsInf(x, 1):
return 1
- case x < -MaxFloat64: // IsInf(x, -1):
+ case IsInf(x, -1):
return -1
}
sign := false
func Erfc(x float64) float64 {
const Tiny = 1.0 / (1 << 56) // 2**-56
// special cases
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
switch {
- case x != x: // IsNaN(x):
+ case IsNaN(x):
return NaN()
- case x > MaxFloat64: // IsInf(x, 1):
+ case IsInf(x, 1):
return 0
- case x < -MaxFloat64: // IsInf(x, -1):
+ case IsInf(x, -1):
return 2
}
sign := false
NearZero = 1.0 / (1 << 28) // 2**-28
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x != x || x > MaxFloat64: // IsNaN(x) || IsInf(x, 1):
+ case IsNaN(x) || IsInf(x, 1):
return x
- case x < -MaxFloat64: // IsInf(x, -1):
+ case IsInf(x, -1):
return 0
case x > Overflow:
return Inf(1)
Underflow = -1.0740e+03
)
- // TODO: remove manual inlining of IsNaN and IsInf
- // when compiler does it for us
// special cases
switch {
- case x != x || x > MaxFloat64: // IsNaN(x) || IsInf(x, 1):
+ case IsNaN(x) || IsInf(x, 1):
return x
- case x < -MaxFloat64: // IsInf(x, -1):
+ case IsInf(x, -1):
return 0
case x > Overflow:
return Inf(1)
)
// special cases
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
switch {
- case x > MaxFloat64 || x != x: // IsInf(x, 1) || IsNaN(x):
+ case IsInf(x, 1) || IsNaN(x):
return x
- case x < -MaxFloat64: // IsInf(x, -1):
+ case IsInf(x, -1):
return -1
}
}
func floor(x float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
- if x == 0 || x != x || x > MaxFloat64 || x < -MaxFloat64 { // x == 0 || IsNaN(x) || IsInf(x, 0)
+ if x == 0 || IsNaN(x) || IsInf(x, 0) {
return x
}
if x < 0 {
}
func trunc(x float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
- if x == 0 || x != x || x > MaxFloat64 || x < -MaxFloat64 { // x == 0 || IsNaN(x) || IsInf(x, 0)
+ if x == 0 || IsNaN(x) || IsInf(x, 0) {
return x
}
d, _ := Modf(x)
}
func frexp(f float64) (frac float64, exp int) {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
case f == 0:
return f, 0 // correctly return -0
- case f < -MaxFloat64 || f > MaxFloat64 || f != f: // IsInf(f, 0) || IsNaN(f):
+ case IsInf(f, 0) || IsNaN(f):
return f, 0
}
f, exp = normalize(f)
const Euler = 0.57721566490153286060651209008240243104215933593992 // A001620
// special cases
switch {
- case x < -MaxFloat64 || x != x: // IsInf(x, -1) || IsNaN(x):
+ case IsInf(x, -1) || IsNaN(x):
return x
case x < -170.5674972726612 || x > 171.61447887182298:
return Inf(1)
}
func hypot(p, q float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case p < -MaxFloat64 || p > MaxFloat64 || q < -MaxFloat64 || q > MaxFloat64: // IsInf(p, 0) || IsInf(q, 0):
+ case IsInf(p, 0) || IsInf(q, 0):
return Inf(1)
- case p != p || q != q: // IsNaN(p) || IsNaN(q):
+ case IsNaN(p) || IsNaN(q):
return NaN()
}
if p < 0 {
S03 = 5.13546550207318111446e-07 // 0x3EA13B54CE84D5A9
S04 = 1.16614003333790000205e-09 // 0x3E1408BCF4745D8F
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x != x: // IsNaN(x)
+ case IsNaN(x):
return x
- case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
+ case IsInf(x, 0):
return 0
case x == 0:
return 1
V03 = 2.59150851840457805467e-07 // 0x3E91642D7FF202FD
V04 = 4.41110311332675467403e-10 // 0x3DFE50183BD6D9EF
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x < 0 || x != x: // x < 0 || IsNaN(x):
+ case x < 0 || IsNaN(x):
return NaN()
- case x > MaxFloat64: // IsInf(x, 1):
+ case IsInf(x, 1):
return 0
case x == 0:
return Inf(-1)
S04 = 5.04636257076217042715e-09 // 0x3E35AC88C97DFF2C
S05 = 1.23542274426137913908e-11 // 0x3DAB2ACFCFB97ED8
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x != x: // IsNaN(x)
+ case IsNaN(x):
return x
- case x < -MaxFloat64 || x > MaxFloat64 || x == 0: // IsInf(x, 0) || x == 0:
+ case IsInf(x, 0) || x == 0:
return 0
}
V03 = 6.22741452364621501295e-09 // 0x3E3ABF1D5BA69A86
V04 = 1.66559246207992079114e-11 // 0x3DB25039DACA772A
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x < 0 || x != x: // x < 0 || IsNaN(x):
+ case x < 0 || IsNaN(x):
return NaN()
- case x > MaxFloat64: // IsInf(x, 1):
+ case IsInf(x, 1):
return 0
case x == 0:
return Inf(-1)
TwoM29 = 1.0 / (1 << 29) // 2**-29 0x3e10000000000000
Two302 = 1 << 302 // 2**302 0x52D0000000000000
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x != x: // IsNaN(x)
+ case IsNaN(x):
return x
- case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
+ case IsInf(x, 0):
return 0
}
// J(-n, x) = (-1)**n * J(n, x), J(n, -x) = (-1)**n * J(n, x)
// Y1(n, NaN) = NaN
func Yn(n int, x float64) float64 {
const Two302 = 1 << 302 // 2**302 0x52D0000000000000
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x < 0 || x != x: // x < 0 || IsNaN(x):
+ case x < 0 || IsNaN(x):
return NaN()
- case x > MaxFloat64: // IsInf(x, 1)
+ case IsInf(x, 1):
return 0
}
a := Y0(x)
b = Y1(x)
// quit if b is -inf
- for i := 1; i < n && b >= -MaxFloat64; i++ { // for i := 1; i < n && !IsInf(b, -1); i++ {
+ for i := 1; i < n && !IsInf(b, -1); i++ {
a, b = b, (float64(i+i)/x)*b-a
}
}
}
func ldexp(frac float64, exp int) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
case frac == 0:
return frac // correctly return -0
- case frac < -MaxFloat64 || frac > MaxFloat64 || frac != frac: // IsInf(frac, 0) || IsNaN(frac):
+ case IsInf(frac, 0) || IsNaN(frac):
return frac
}
frac, e := normalize(frac)
// Tt = -(tail of Tf)
Tt = -3.63867699703950536541e-18 // 0xBC50C7CAA48A971F
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
sign = 1
switch {
- case x != x: // IsNaN(x):
+ case IsNaN(x):
lgamma = x
return
- case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
+ case IsInf(x, 0):
lgamma = x
return
case x == 0:
L7 = 1.479819860511658591e-01 /* 3FC2F112 DF3E5244 */
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x != x || x > MaxFloat64: // IsNaN(x) || IsInf(x, 1):
+ case IsNaN(x) || IsInf(x, 1):
return x
case x < 0:
return NaN()
)
// special cases
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
switch {
- case x < -1 || x != x: // x < -1 || IsNaN(x): // includes -Inf
+ case x < -1 || IsNaN(x): // includes -Inf
return NaN()
case x == -1:
return Inf(-1)
- case x > MaxFloat64: // IsInf(x, 1):
+ case IsInf(x, 1):
return Inf(1)
}
// Logb(0) = -Inf
// Logb(NaN) = NaN
func Logb(x float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
case x == 0:
return Inf(-1)
- case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
+ case IsInf(x, 0):
return Inf(1)
- case x != x: // IsNaN(x):
+ case IsNaN(x):
return x
}
return float64(ilogb(x))
// Ilogb(0) = MinInt32
// Ilogb(NaN) = MaxInt32
func Ilogb(x float64) int {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
case x == 0:
return MinInt32
- case x != x: // IsNaN(x):
+ case IsNaN(x):
return MaxInt32
- case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
+ case IsInf(x, 0):
return MaxInt32
}
return ilogb(x)
}
func mod(x, y float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us.
- if y == 0 || x > MaxFloat64 || x < -MaxFloat64 || x != x || y != y { // y == 0 || IsInf(x, 0) || IsNaN(x) || IsNan(y)
+ if y == 0 || IsInf(x, 0) || IsNaN(x) || IsNaN(y) {
return NaN()
}
if y < 0 {
// Nextafter(NaN, y) = NaN
// Nextafter(x, NaN) = NaN
func Nextafter(x, y float64) (r float64) {
- // TODO(rsc): Remove manual inlining of IsNaN
- // when compiler does it for us
switch {
- case x != x || y != y: // IsNaN(x) || IsNaN(y): // special case
+ case IsNaN(x) || IsNaN(y): // special case
r = NaN()
case x == y:
r = x
// Pow(-Inf, y) = Pow(-0, -y)
// Pow(x, y) = NaN for finite x < 0 and finite non-integer y
func Pow(x, y float64) float64 {
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
switch {
case y == 0 || x == 1:
return 1
return Sqrt(x)
case y == -0.5:
return 1 / Sqrt(x)
- case x != x || y != y: // IsNaN(x) || IsNaN(y):
+ case IsNaN(x) || IsNaN(y):
return NaN()
case x == 0:
switch {
}
return 0
}
- case y > MaxFloat64 || y < -MaxFloat64: // IsInf(y, 0):
+ case IsInf(y, 0):
switch {
case x == -1:
return 1
default:
return Inf(1)
}
- case x > MaxFloat64 || x < -MaxFloat64: // IsInf(x, 0):
+ case IsInf(x, 0):
if IsInf(x, -1) {
return Pow(1/x, -y) // Pow(-0, -y)
}
Tiny = 4.45014771701440276618e-308 // 0x0020000000000000
HalfMax = MaxFloat64 / 2
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x != x || y != y || x < -MaxFloat64 || x > MaxFloat64 || y == 0: // IsNaN(x) || IsNaN(y) || IsInf(x, 0) || y == 0:
+ case IsNaN(x) || IsNaN(y) || IsInf(x, 0) || y == 0:
return NaN()
- case y < -MaxFloat64 || y > MaxFloat64: // IsInf(y):
+ case IsInf(y, 0):
return x
}
sign := false
PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170,
M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x != x || x < -MaxFloat64 || x > MaxFloat64: // IsNaN(x) || IsInf(x, 0):
+ case IsNaN(x) || IsInf(x, 0):
return NaN()
}
PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170,
M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x == 0 || x != x: // x == 0 || IsNaN():
+ case x == 0 || IsNaN(x):
return x // return ±0 || NaN()
- case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
+ case IsInf(x, 0):
return NaN()
}
PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170,
M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
case x == 0:
return x, 1 // return ±0.0, 1.0
- case x != x || x < -MaxFloat64 || x > MaxFloat64: // IsNaN(x) || IsInf(x, 0):
+ case IsNaN(x) || IsInf(x, 0):
return NaN(), NaN()
}
// Sqrt(NaN) = NaN
func sqrt(x float64) float64 {
// special cases
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
switch {
- case x == 0 || x != x || x > MaxFloat64: // x == 0 || IsNaN(x) || IsInf(x, 1):
+ case x == 0 || IsNaN(x) || IsInf(x, 1):
return x
case x < 0:
return NaN()
PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170,
M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi
)
- // TODO(rsc): Remove manual inlining of IsNaN, IsInf
- // when compiler does it for us
// special cases
switch {
- case x == 0 || x != x: // x == 0 || IsNaN():
+ case x == 0 || IsNaN(x):
return x // return ±0 || NaN()
- case x < -MaxFloat64 || x > MaxFloat64: // IsInf(x, 0):
+ case IsInf(x, 0):
return NaN()
}
// Read reads the body of a part, after its headers and before the
// next part (if any) begins.
-func (bp *Part) Read(p []byte) (n int, err error) {
- if bp.buffer.Len() >= len(p) {
+func (p *Part) Read(d []byte) (n int, err error) {
+ if p.buffer.Len() >= len(d) {
// Internal buffer of unconsumed data is large enough for
// the read request. No need to parse more at the moment.
- return bp.buffer.Read(p)
+ return p.buffer.Read(d)
}
- peek, err := bp.mr.bufReader.Peek(4096) // TODO(bradfitz): add buffer size accessor
+ peek, err := p.mr.bufReader.Peek(4096) // TODO(bradfitz): add buffer size accessor
unexpectedEof := err == io.EOF
if err != nil && !unexpectedEof {
return 0, fmt.Errorf("multipart: Part Read: %v", err)
// string.
nCopy := 0
foundBoundary := false
- if idx := bytes.Index(peek, bp.mr.nlDashBoundary); idx != -1 {
+ if idx := bytes.Index(peek, p.mr.nlDashBoundary); idx != -1 {
nCopy = idx
foundBoundary = true
- } else if safeCount := len(peek) - len(bp.mr.nlDashBoundary); safeCount > 0 {
+ } else if safeCount := len(peek) - len(p.mr.nlDashBoundary); safeCount > 0 {
nCopy = safeCount
} else if unexpectedEof {
// If we've run out of peek buffer and the boundary
return 0, io.ErrUnexpectedEOF
}
if nCopy > 0 {
- if _, err := io.CopyN(bp.buffer, bp.mr.bufReader, int64(nCopy)); err != nil {
+ if _, err := io.CopyN(p.buffer, p.mr.bufReader, int64(nCopy)); err != nil {
return 0, err
}
}
- n, err = bp.buffer.Read(p)
+ n, err = p.buffer.Read(d)
if err == io.EOF && !foundBoundary {
// If the boundary hasn't been reached there's more to
// read, so don't pass through an EOF from the buffer
return
}
-func (bp *Part) Close() error {
- io.Copy(ioutil.Discard, bp)
+func (p *Part) Close() error {
+ io.Copy(ioutil.Discard, p)
return nil
}
// NextPart returns the next part in the multipart or an error.
// When there are no more parts, the error io.EOF is returned.
-func (mr *Reader) NextPart() (*Part, error) {
- if mr.currentPart != nil {
- mr.currentPart.Close()
+func (r *Reader) NextPart() (*Part, error) {
+ if r.currentPart != nil {
+ r.currentPart.Close()
}
expectNewPart := false
for {
- line, err := mr.bufReader.ReadSlice('\n')
+ line, err := r.bufReader.ReadSlice('\n')
if err != nil {
return nil, fmt.Errorf("multipart: NextPart: %v", err)
}
- if mr.isBoundaryDelimiterLine(line) {
- mr.partsRead++
- bp, err := newPart(mr)
+ if r.isBoundaryDelimiterLine(line) {
+ r.partsRead++
+ bp, err := newPart(r)
if err != nil {
return nil, err
}
- mr.currentPart = bp
+ r.currentPart = bp
return bp, nil
}
- if hasPrefixThenNewline(line, mr.dashBoundaryDash) {
+ if hasPrefixThenNewline(line, r.dashBoundaryDash) {
// Expected EOF
return nil, io.EOF
}
return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
}
- if mr.partsRead == 0 {
+ if r.partsRead == 0 {
// skip line
continue
}
// body of the previous part and the boundary line we
// now expect will follow. (either a new part or the
// end boundary)
- if bytes.Equal(line, mr.nl) {
+ if bytes.Equal(line, r.nl) {
expectNewPart = true
continue
}
errc <- err
}()
}
- case "darwin":
+ case "darwin", "windows":
// At least OS X 10.7 seems to accept any number of
// connections, ignoring listen's backlog, so resort
// to connecting to a hopefully-dead 127/8 address.
+ // Same for windows.
go func() {
_, err := DialTimeout("tcp", "127.0.71.111:80", 200*time.Millisecond)
errc <- err
}()
default:
- // TODO(bradfitz): this probably doesn't work on
- // Windows? SOMAXCONN is huge there. I'm not sure how
- // listen works there.
+ // TODO(bradfitz):
// OpenBSD may have a reject route to 10/8.
// FreeBSD likely works, but is untested.
t.Logf("skipping test on %q; untested.", runtime.GOOS)
rotate bool // round robin among servers
}
-var dnsconfigError error
-
-type DNSConfigError struct {
- Err error
-}
-
-func (e *DNSConfigError) Error() string {
- return "error reading DNS config: " + e.Err.Error()
-}
-
-func (e *DNSConfigError) Timeout() bool { return false }
-func (e *DNSConfigError) Temporary() bool { return false }
-
// See resolv.conf(5) on a Linux machine.
// TODO(rsc): Supposed to call uname() and chop the beginning
// of the host name to get the default search domain.
// DNS packet assembly. See RFC 1035.
//
-// This is intended to support name resolution during net.Dial.
+// This is intended to support name resolution during Dial.
// It doesn't have to be blazing fast.
//
// Rather than write the usual handful of routines to pack and
ncr, ncw int
}
-type InvalidConnError struct{}
-
-func (e *InvalidConnError) Error() string { return "invalid net.Conn" }
-func (e *InvalidConnError) Temporary() bool { return false }
-func (e *InvalidConnError) Timeout() bool { return false }
-
// A pollServer helps FDs determine when to retry a non-blocking
// read or write after they get EAGAIN. When an FD needs to wait,
// send the fd on s.cr (for a read) or s.cw (for a write) to pass the
pollserver = p
}
-func newFD(fd, family, sotype int, net string) (f *netFD, err error) {
+func newFD(fd, family, sotype int, net string) (*netFD, error) {
onceStartServer.Do(startServer)
- if e := syscall.SetNonblock(fd, true); e != nil {
- return nil, e
+ if err := syscall.SetNonblock(fd, true); err != nil {
+ return nil, err
}
- f = &netFD{
+ netfd := &netFD{
sysfd: fd,
family: family,
sotype: sotype,
net: net,
}
- f.cr = make(chan bool, 1)
- f.cw = make(chan bool, 1)
- return f, nil
+ netfd.cr = make(chan bool, 1)
+ netfd.cw = make(chan bool, 1)
+ return netfd, nil
}
func (fd *netFD) setAddr(laddr, raddr Addr) {
fd.sysfile = os.NewFile(fd.sysfd, fd.net+":"+ls+"->"+rs)
}
-func (fd *netFD) connect(ra syscall.Sockaddr) (err error) {
- err = syscall.Connect(fd.sysfd, ra)
+func (fd *netFD) connect(ra syscall.Sockaddr) error {
+ err := syscall.Connect(fd.sysfd, ra)
if err == syscall.EINPROGRESS {
pollserver.WaitWrite(fd)
var e int
return
}
-func (fd *netFD) Write(p []byte) (n int, err error) {
+func (fd *netFD) Write(p []byte) (int, error) {
if fd == nil {
return 0, os.EINVAL
}
if fd.sysfile == nil {
return 0, os.EINVAL
}
- nn := 0
+ var err error
+ nn := 0
for {
var n int
n, err = syscall.Write(fd.sysfile.Fd(), p[nn:])
return
}
-func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (nfd *netFD, err error) {
+func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (netfd *netFD, err error) {
if fd == nil || fd.sysfile == nil {
return nil, os.EINVAL
}
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
- if nfd, err = newFD(s, fd.family, fd.sotype, fd.net); err != nil {
+ if netfd, err = newFD(s, fd.family, fd.sotype, fd.net); err != nil {
syscall.Close(s)
return nil, err
}
- lsa, _ := syscall.Getsockname(nfd.sysfd)
- nfd.setAddr(toAddr(lsa), toAddr(rsa))
- return nfd, nil
+ lsa, _ := syscall.Getsockname(netfd.sysfd)
+ netfd.setAddr(toAddr(lsa), toAddr(rsa))
+ return netfd, nil
}
func (fd *netFD) dup() (f *os.File, err error) {
func newpollster() (p *pollster, err error) {
p = new(pollster)
- var e error
-
- if p.epfd, e = syscall.EpollCreate1(syscall.EPOLL_CLOEXEC); e != nil {
- if e != syscall.ENOSYS {
- return nil, os.NewSyscallError("epoll_create1", e)
+ if p.epfd, err = syscall.EpollCreate1(syscall.EPOLL_CLOEXEC); err != nil {
+ if err != syscall.ENOSYS {
+ return nil, os.NewSyscallError("epoll_create1", err)
}
// The arg to epoll_create is a hint to the kernel
// about the number of FDs we will care about.
// We don't know, and since 2.6.8 the kernel ignores it anyhow.
- if p.epfd, e = syscall.EpollCreate(16); e != nil {
- return nil, os.NewSyscallError("epoll_create", e)
+ if p.epfd, err = syscall.EpollCreate(16); err != nil {
+ return nil, os.NewSyscallError("epoll_create", err)
}
syscall.CloseOnExec(p.epfd)
}
} else {
op = syscall.EPOLL_CTL_ADD
}
- if e := syscall.EpollCtl(p.epfd, op, fd, &p.ctlEvent); e != nil {
- return false, os.NewSyscallError("epoll_ctl", e)
+ if err := syscall.EpollCtl(p.epfd, op, fd, &p.ctlEvent); err != nil {
+ return false, os.NewSyscallError("epoll_ctl", err)
}
p.events[fd] = p.ctlEvent.Events
return false, nil
if int32(events)&^syscall.EPOLLONESHOT != 0 {
p.ctlEvent.Fd = int32(fd)
p.ctlEvent.Events = events
- if e := syscall.EpollCtl(p.epfd, syscall.EPOLL_CTL_MOD, fd, &p.ctlEvent); e != nil {
- print("Epoll modify fd=", fd, ": ", e.Error(), "\n")
+ if err := syscall.EpollCtl(p.epfd, syscall.EPOLL_CTL_MOD, fd, &p.ctlEvent); err != nil {
+ print("Epoll modify fd=", fd, ": ", err.Error(), "\n")
}
p.events[fd] = events
} else {
- if e := syscall.EpollCtl(p.epfd, syscall.EPOLL_CTL_DEL, fd, nil); e != nil {
- print("Epoll delete fd=", fd, ": ", e.Error(), "\n")
+ if err := syscall.EpollCtl(p.epfd, syscall.EPOLL_CTL_DEL, fd, nil); err != nil {
+ print("Epoll delete fd=", fd, ": ", err.Error(), "\n")
}
delete(p.events, fd)
}
}
s.Unlock()
- n, e := syscall.EpollWait(p.epfd, p.waitEventBuf[0:], msec)
+ n, err := syscall.EpollWait(p.epfd, p.waitEventBuf[0:], msec)
s.Lock()
- if e != nil {
- if e == syscall.EAGAIN || e == syscall.EINTR {
+ if err != nil {
+ if err == syscall.EAGAIN || err == syscall.EINTR {
continue
}
- return -1, 0, os.NewSyscallError("epoll_wait", e)
+ return -1, 0, os.NewSyscallError("epoll_wait", err)
}
if n == 0 {
return -1, 0, nil
}
syscall.SetKevent(ev, fd, kmode, flags)
- n, e := syscall.Kevent(p.kq, p.kbuf[:], nil, nil)
- if e != nil {
- return false, os.NewSyscallError("kevent", e)
+ n, err := syscall.Kevent(p.kq, p.kbuf[:], nil, nil)
+ if err != nil {
+ return false, os.NewSyscallError("kevent", err)
}
if n != 1 || (ev.Flags&syscall.EV_ERROR) == 0 || int(ev.Ident) != fd || int(ev.Filter) != kmode {
- return false, os.NewSyscallError("kqueue phase error", e)
+ return false, os.NewSyscallError("kqueue phase error", err)
}
if ev.Data != 0 {
return false, syscall.Errno(int(ev.Data))
}
s.Unlock()
- nn, e := syscall.Kevent(p.kq, nil, p.eventbuf[:], t)
+ n, err := syscall.Kevent(p.kq, nil, p.eventbuf[:], t)
s.Lock()
- if e != nil {
- if e == syscall.EINTR {
+ if err != nil {
+ if err == syscall.EINTR {
continue
}
- return -1, 0, os.NewSyscallError("kevent", e)
+ return -1, 0, os.NewSyscallError("kevent", err)
}
- if nn == 0 {
+ if n == 0 {
return -1, 0, nil
}
- p.events = p.eventbuf[0:nn]
+ p.events = p.eventbuf[:n]
}
ev := &p.events[0]
p.events = p.events[1:]
}
syscall.SetKevent(ev, fd, kmode, flags)
- n, e := syscall.Kevent(p.kq, p.kbuf[:], nil, nil)
- if e != nil {
- return false, os.NewSyscallError("kevent", e)
+ n, err := syscall.Kevent(p.kq, p.kbuf[:], nil, nil)
+ if err != nil {
+ return false, os.NewSyscallError("kevent", err)
}
if n != 1 || (ev.Flags&syscall.EV_ERROR) == 0 || int(ev.Ident) != fd || int(ev.Filter) != kmode {
- return false, os.NewSyscallError("kqueue phase error", e)
+ return false, os.NewSyscallError("kqueue phase error", err)
}
if ev.Data != 0 {
return false, syscall.Errno(int(ev.Data))
}
s.Unlock()
- nn, e := syscall.Kevent(p.kq, nil, p.eventbuf[:], t)
+ n, err := syscall.Kevent(p.kq, nil, p.eventbuf[:], t)
s.Lock()
- if e != nil {
- if e == syscall.EINTR {
+ if err != nil {
+ if err == syscall.EINTR {
continue
}
- return -1, 0, os.NewSyscallError("kevent", e)
+ return -1, 0, os.NewSyscallError("kevent", err)
}
- if nn == 0 {
+ if n == 0 {
return -1, 0, nil
}
- p.events = p.eventbuf[0:nn]
+ p.events = p.eventbuf[:n]
}
ev := &p.events[0]
p.events = p.events[1:]
"unsafe"
)
-type InvalidConnError struct{}
-
-func (e *InvalidConnError) Error() string { return "invalid net.Conn" }
-func (e *InvalidConnError) Temporary() bool { return false }
-func (e *InvalidConnError) Timeout() bool { return false }
-
var initErr error
func init() {
}
}
-func closesocket(s syscall.Handle) (err error) {
+func closesocket(s syscall.Handle) error {
return syscall.Closesocket(s)
}
type anOpIface interface {
Op() *anOp
Name() string
- Submit() (err error)
+ Submit() error
}
// IO completion result parameters.
// inline, or, if a deadline is employed, passes the request onto
// a special goroutine and waits for completion or cancels request.
// deadline is unix nanos.
-func (s *ioSrv) ExecIO(oi anOpIface, deadline int64) (n int, err error) {
- var e error
+func (s *ioSrv) ExecIO(oi anOpIface, deadline int64) (int, error) {
+ var err error
o := oi.Op()
if deadline != 0 {
// Send request to a special dedicated thread,
// so it can stop the io with CancelIO later.
s.submchan <- oi
- e = <-o.errnoc
+ err = <-o.errnoc
} else {
- e = oi.Submit()
+ err = oi.Submit()
}
- switch e {
+ switch err {
case nil:
// IO completed immediately, but we need to get our completion message anyway.
case syscall.ERROR_IO_PENDING:
// IO started, and we have to wait for its completion.
+ err = nil
default:
- return 0, &OpError{oi.Name(), o.fd.net, o.fd.laddr, e}
+ return 0, &OpError{oi.Name(), o.fd.net, o.fd.laddr, err}
}
// Wait for our request to complete.
var r ioResult
wio sync.Mutex
}
-func allocFD(fd syscall.Handle, family, sotype int, net string) (f *netFD) {
- f = &netFD{
+func allocFD(fd syscall.Handle, family, sotype int, net string) *netFD {
+ netfd := &netFD{
sysfd: fd,
family: family,
sotype: sotype,
net: net,
}
- runtime.SetFinalizer(f, (*netFD).Close)
- return f
+ runtime.SetFinalizer(netfd, (*netFD).Close)
+ return netfd
}
-func newFD(fd syscall.Handle, family, proto int, net string) (f *netFD, err error) {
+func newFD(fd syscall.Handle, family, proto int, net string) (*netFD, error) {
if initErr != nil {
return nil, initErr
}
onceStartServer.Do(startServer)
// Associate our socket with resultsrv.iocp.
- if _, e := syscall.CreateIoCompletionPort(syscall.Handle(fd), resultsrv.iocp, 0, 0); e != nil {
- return nil, e
+ if _, err := syscall.CreateIoCompletionPort(syscall.Handle(fd), resultsrv.iocp, 0, 0); err != nil {
+ return nil, err
}
return allocFD(fd, family, proto, net), nil
}
fd.raddr = raddr
}
-func (fd *netFD) connect(ra syscall.Sockaddr) (err error) {
+func (fd *netFD) connect(ra syscall.Sockaddr) error {
return syscall.Connect(fd.sysfd, ra)
}
bufOp
}
-func (o *readOp) Submit() (err error) {
+func (o *readOp) Submit() error {
var d, f uint32
return syscall.WSARecv(syscall.Handle(o.fd.sysfd), &o.buf, 1, &d, &f, &o.o, nil)
}
return "WSARecv"
}
-func (fd *netFD) Read(buf []byte) (n int, err error) {
+func (fd *netFD) Read(buf []byte) (int, error) {
if fd == nil {
return 0, os.EINVAL
}
}
var o readOp
o.Init(fd, buf, 'r')
- n, err = iosrv.ExecIO(&o, fd.rdeadline)
+ n, err := iosrv.ExecIO(&o, fd.rdeadline)
if err == nil && n == 0 {
err = io.EOF
}
- return
+ return n, err
}
// ReadFrom from network.
rsan int32
}
-func (o *readFromOp) Submit() (err error) {
+func (o *readFromOp) Submit() error {
var d, f uint32
return syscall.WSARecvFrom(o.fd.sysfd, &o.buf, 1, &d, &f, &o.rsa, &o.rsan, &o.o, nil)
}
bufOp
}
-func (o *writeOp) Submit() (err error) {
+func (o *writeOp) Submit() error {
var d uint32
return syscall.WSASend(o.fd.sysfd, &o.buf, 1, &d, 0, &o.o, nil)
}
return "WSASend"
}
-func (fd *netFD) Write(buf []byte) (n int, err error) {
+func (fd *netFD) Write(buf []byte) (int, error) {
if fd == nil {
return 0, os.EINVAL
}
sa syscall.Sockaddr
}
-func (o *writeToOp) Submit() (err error) {
+func (o *writeToOp) Submit() error {
var d uint32
return syscall.WSASendto(o.fd.sysfd, &o.buf, 1, &d, 0, o.sa, &o.o, nil)
}
return "WSASendto"
}
-func (fd *netFD) WriteTo(buf []byte, sa syscall.Sockaddr) (n int, err error) {
+func (fd *netFD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) {
if fd == nil {
return 0, os.EINVAL
}
attrs [2]syscall.RawSockaddrAny // space for local and remote address only
}
-func (o *acceptOp) Submit() (err error) {
+func (o *acceptOp) Submit() error {
var d uint32
l := uint32(unsafe.Sizeof(o.attrs[0]))
return syscall.AcceptEx(o.fd.sysfd, o.newsock,
return "AcceptEx"
}
-func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (nfd *netFD, err error) {
+func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (*netFD, error) {
if fd == nil || fd.sysfd == syscall.InvalidHandle {
return nil, os.EINVAL
}
// Get new socket.
// See ../syscall/exec.go for description of ForkLock.
syscall.ForkLock.RLock()
- s, e := syscall.Socket(fd.family, fd.sotype, 0)
- if e != nil {
+ s, err := syscall.Socket(fd.family, fd.sotype, 0)
+ if err != nil {
syscall.ForkLock.RUnlock()
- return nil, e
+ return nil, err
}
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
// Associate our new socket with IOCP.
onceStartServer.Do(startServer)
- if _, e = syscall.CreateIoCompletionPort(s, resultsrv.iocp, 0, 0); e != nil {
- return nil, &OpError{"CreateIoCompletionPort", fd.net, fd.laddr, e}
+ if _, err := syscall.CreateIoCompletionPort(s, resultsrv.iocp, 0, 0); err != nil {
+ return nil, &OpError{"CreateIoCompletionPort", fd.net, fd.laddr, err}
}
// Submit accept request.
}
// Inherit properties of the listening socket.
- e = syscall.Setsockopt(s, syscall.SOL_SOCKET, syscall.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
- if e != nil {
+ err = syscall.Setsockopt(s, syscall.SOL_SOCKET, syscall.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
+ if err != nil {
closesocket(s)
- return nil, e
+ return nil, err
}
// Get local and peer addr out of AcceptEx buffer.
lsa, _ := lrsa.Sockaddr()
rsa, _ := rrsa.Sockaddr()
- nfd = allocFD(s, fd.family, fd.sotype, fd.net)
- nfd.setAddr(toAddr(lsa), toAddr(rsa))
- return nfd, nil
+ netfd := allocFD(s, fd.family, fd.sotype, fd.net)
+ netfd.setAddr(toAddr(lsa), toAddr(rsa))
+ return netfd, nil
}
// Unimplemented functions.
-func (fd *netFD) dup() (f *os.File, err error) {
+func (fd *netFD) dup() (*os.File, error) {
// TODO: Implement this
return nil, os.NewSyscallError("dup", syscall.EWINDOWS)
}
"syscall"
)
-func newFileFD(f *os.File) (nfd *netFD, err error) {
- fd, errno := syscall.Dup(f.Fd())
- if errno != nil {
- return nil, os.NewSyscallError("dup", errno)
+func newFileFD(f *os.File) (*netFD, error) {
+ fd, err := syscall.Dup(f.Fd())
+ if err != nil {
+ return nil, os.NewSyscallError("dup", err)
}
- proto, errno := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_TYPE)
- if errno != nil {
- return nil, os.NewSyscallError("getsockopt", errno)
+ proto, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_TYPE)
+ if err != nil {
+ return nil, os.NewSyscallError("getsockopt", err)
}
family := syscall.AF_UNSPEC
sa, _ = syscall.Getpeername(fd)
raddr := toAddr(sa)
- if nfd, err = newFD(fd, family, proto, laddr.Network()); err != nil {
+ netfd, err := newFD(fd, family, proto, laddr.Network())
+ if err != nil {
return nil, err
}
- nfd.setAddr(laddr, raddr)
- return nfd, nil
+ netfd.setAddr(laddr, raddr)
+ return netfd, nil
}
// FileConn returns a copy of the network connection corresponding to
defer cmd.Wait()
defer stdoutRead.Close()
- linebody, _ := bufio.NewReaderSize(stdoutRead, 1024)
+ linebody := bufio.NewReaderSize(stdoutRead, 1024)
headers := make(http.Header)
statusCode := 0
for {
}
method := ireq.Method
- err = &url.Error{method[0:1] + strings.ToLower(method[1:]), urlStr, err}
+ err = &url.Error{
+ Op: method[0:1] + strings.ToLower(method[1:]),
+ URL: urlStr,
+ Err: err,
+ }
return
}
return nil, err
}
req.Header.Set("Content-Type", bodyType)
- return send(req, c.Transport)
+ r, err = send(req, c.Transport)
+ if err == nil && c.Jar != nil {
+ c.Jar.SetCookies(req.URL, r.Cookies())
+ }
+ return r, err
}
// PostForm issues a POST to the specified URL,
func newWriter(c *conn, recType recType, reqId uint16) *bufWriter {
s := &streamWriter{c: c, recType: recType, reqId: reqId}
- w, _ := bufio.NewWriterSize(s, maxWrite)
+ w := bufio.NewWriterSize(s, maxWrite)
return &bufWriter{s, w}
}
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
- if !gfi.(*os.FileStat).SameFile(wfi.(*os.FileStat)) {
+ if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
package httputil
import (
+ "bufio"
"bytes"
- "errors"
"fmt"
"io"
"io/ioutil"
// DumpRequestOut is like DumpRequest but includes
// headers that the standard http.Transport adds,
// such as User-Agent.
-func DumpRequestOut(req *http.Request, body bool) (dump []byte, err error) {
+func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
save := req.Body
if !body || req.Body == nil {
req.Body = nil
} else {
+ var err error
save, req.Body, err = drainBody(req.Body)
if err != nil {
- return
+ return nil, err
}
}
- var b bytes.Buffer
- dialed := false
+ // Use the actual Transport code to record what we would send
+ // on the wire, but not using TCP. Use a Transport with a
+ // customer dialer that returns a fake net.Conn that waits
+ // for the full input (and recording it), and then responds
+ // with a dummy response.
+ var buf bytes.Buffer // records the output
+ pr, pw := io.Pipe()
+ dr := &delegateReader{c: make(chan io.Reader)}
+ // Wait for the request before replying with a dummy response:
+ go func() {
+ http.ReadRequest(bufio.NewReader(pr))
+ dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n")
+ }()
+
t := &http.Transport{
- Dial: func(net, addr string) (c net.Conn, err error) {
- if dialed {
- return nil, errors.New("unexpected second dial")
- }
- c = &dumpConn{
- Writer: &b,
- Reader: strings.NewReader("HTTP/1.1 500 Fake Error\r\n\r\n"),
- }
- return
+ Dial: func(net, addr string) (net.Conn, error) {
+ return &dumpConn{io.MultiWriter(pw, &buf), dr}, nil
},
}
- _, err = t.RoundTrip(req)
+ _, err := t.RoundTrip(req)
req.Body = save
if err != nil {
- return
+ return nil, err
}
- dump = b.Bytes()
- return
+ return buf.Bytes(), nil
+}
+
+// delegateReader is a reader that delegates to another reader,
+// once it arrives on a channel.
+type delegateReader struct {
+ c chan io.Reader
+ r io.Reader // nil until received from c
+}
+
+func (r *delegateReader) Read(p []byte) (int, error) {
+ if r.r == nil {
+ r.r = <-r.c
+ }
+ return r.r.Read(p)
}
// Return value if nonempty, def otherwise.
)
var (
- ErrPersistEOF = &http.ProtocolError{"persistent connection closed"}
- ErrPipeline = &http.ProtocolError{"pipeline error"}
+ ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"}
+ ErrPipeline = &http.ProtocolError{ErrorString: "pipeline error"}
)
// This is an API usage error - the local side is closed.
// target's path is "/base" and the incoming request was for "/dir",
// the target request will be for /base/dir.
func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {
+ targetQuery := target.RawQuery
director := func(req *http.Request) {
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
- req.URL.RawQuery = target.RawQuery
+ if targetQuery == "" || req.URL.RawQuery == "" {
+ req.URL.RawQuery = targetQuery + req.URL.RawQuery
+ } else {
+ req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
+ }
}
return &ReverseProxy{Director: director}
}
t.Errorf("got body %q; expected %q", g, e)
}
}
+
+var proxyQueryTests = []struct {
+ baseSuffix string // suffix to add to backend URL
+ reqSuffix string // suffix to add to frontend's request URL
+ want string // what backend should see for final request URL (without ?)
+}{
+ {"", "", ""},
+ {"?sta=tic", "?us=er", "sta=tic&us=er"},
+ {"", "?us=er", "us=er"},
+ {"?sta=tic", "", "sta=tic"},
+}
+
+func TestReverseProxyQuery(t *testing.T) {
+ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("X-Got-Query", r.URL.RawQuery)
+ w.Write([]byte("hi"))
+ }))
+ defer backend.Close()
+
+ for i, tt := range proxyQueryTests {
+ backendURL, err := url.Parse(backend.URL + tt.baseSuffix)
+ if err != nil {
+ t.Fatal(err)
+ }
+ frontend := httptest.NewServer(NewSingleHostReverseProxy(backendURL))
+ req, _ := http.NewRequest("GET", frontend.URL+tt.reqSuffix, nil)
+ req.Close = true
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ t.Fatalf("%d. Get: %v", i, err)
+ }
+ if g, e := res.Header.Get("X-Got-Query"), tt.want; g != e {
+ t.Errorf("%d. got query %q; expected %q", i, g, e)
+ }
+ res.Body.Close()
+ frontend.Close()
+ }
+}
const defaultUserAgent = "Go http package"
// Write writes an HTTP/1.1 request -- header and body -- in wire format.
-// This method consults the following fields of req:
+// This method consults the following fields of the request:
// Host
// URL
// Method (defaults to "GET")
// If Body is present, Content-Length is <= 0 and TransferEncoding
// hasn't been set to "identity", Write adds "Transfer-Encoding:
// chunked" to the header. Body is closed after it is sent.
-func (req *Request) Write(w io.Writer) error {
- return req.write(w, false, nil)
+func (r *Request) Write(w io.Writer) error {
+ return r.write(w, false, nil)
}
// WriteProxy is like Write but writes the request in the form
// expected by an HTTP proxy. In particular, WriteProxy writes the
// initial Request-URI line of the request with an absolute URI, per
-// section 5.1.2 of RFC 2616, including the scheme and host. In
-// either case, WriteProxy also writes a Host header, using either
-// req.Host or req.URL.Host.
-func (req *Request) WriteProxy(w io.Writer) error {
- return req.write(w, true, nil)
+// section 5.1.2 of RFC 2616, including the scheme and host.
+// In either case, WriteProxy also writes a Host header, using
+// either r.Host or r.URL.Host.
+func (r *Request) WriteProxy(w io.Writer) error {
+ return r.write(w, true, nil)
}
// extraHeaders may be nil
}
// Writes the response (header, body and trailer) in wire format. This method
-// consults the following fields of resp:
+// consults the following fields of the response:
//
// StatusCode
// ProtoMajor
// ContentLength
// Header, values for non-canonical keys will have unpredictable behavior
//
-func (resp *Response) Write(w io.Writer) error {
+func (r *Response) Write(w io.Writer) error {
// RequestMethod should be upper-case
- if resp.Request != nil {
- resp.Request.Method = strings.ToUpper(resp.Request.Method)
+ if r.Request != nil {
+ r.Request.Method = strings.ToUpper(r.Request.Method)
}
// Status line
- text := resp.Status
+ text := r.Status
if text == "" {
var ok bool
- text, ok = statusText[resp.StatusCode]
+ text, ok = statusText[r.StatusCode]
if !ok {
- text = "status code " + strconv.Itoa(resp.StatusCode)
+ text = "status code " + strconv.Itoa(r.StatusCode)
}
}
- io.WriteString(w, "HTTP/"+strconv.Itoa(resp.ProtoMajor)+".")
- io.WriteString(w, strconv.Itoa(resp.ProtoMinor)+" ")
- io.WriteString(w, strconv.Itoa(resp.StatusCode)+" "+text+"\r\n")
+ io.WriteString(w, "HTTP/"+strconv.Itoa(r.ProtoMajor)+".")
+ io.WriteString(w, strconv.Itoa(r.ProtoMinor)+" ")
+ io.WriteString(w, strconv.Itoa(r.StatusCode)+" "+text+"\r\n")
// Process Body,ContentLength,Close,Trailer
- tw, err := newTransferWriter(resp)
+ tw, err := newTransferWriter(r)
if err != nil {
return err
}
}
// Rest of header
- err = resp.Header.WriteSubset(w, respExcludeHeader)
+ err = r.Header.WriteSubset(w, respExcludeHeader)
if err != nil {
return err
}
}
// TestHeadResponses verifies that responses to HEAD requests don't
-// declare that they're chunking in their response headers and aren't
-// allowed to produce output.
+// declare that they're chunking in their response headers, aren't
+// allowed to produce output, and don't set a Content-Type since
+// the real type of the body data cannot be inferred.
func TestHeadResponses(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
_, err := w.Write([]byte("Ignored body"))
if len(res.TransferEncoding) > 0 {
t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding)
}
+ ct := res.Header.Get("Content-Type")
+ if ct != "" {
+ t.Errorf("expected no Content-Type; got %s", ct)
+ }
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Error(err)
}
} else {
// If no content type, apply sniffing algorithm to body.
- if w.header.Get("Content-Type") == "" {
+ if w.header.Get("Content-Type") == "" && w.req.Method != "HEAD" {
w.needSniff = true
}
}
// of the server's certificate followed by the CA's certificate.
//
// If srv.Addr is blank, ":https" is used.
-func (s *Server) ListenAndServeTLS(certFile, keyFile string) error {
- addr := s.Addr
+func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
+ addr := srv.Addr
if addr == "" {
addr = ":https"
}
}
tlsListener := tls.NewListener(conn, config)
- return s.Serve(tlsListener)
+ return srv.Serve(tlsListener)
}
// TimeoutHandler returns a Handler that runs h with the given time limit.
}
resp, err := ReadResponse(pc.br, rc.req)
- if err == nil {
+ if err != nil {
+ pc.close()
+ } else {
hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0
if rc.addedGzip && hasBody && resp.Header.Get("Content-Encoding") == "gzip" {
resp.Header.Del("Content-Encoding")
}
if accept == "gzip" {
rw.Header().Set("Content-Encoding", "gzip")
- gz, _ := gzip.NewWriter(rw)
+ gz, err := gzip.NewWriter(rw)
+ if err != nil {
+ t.Errorf("gzip NewWriter: %v", err)
+ return
+ }
gz.Write([]byte(responseBody))
gz.Close()
} else {
res, err := DefaultTransport.RoundTrip(req)
var body []byte
if test.compressed {
- gzip, _ := gzip.NewReader(res.Body)
+ gzip, err := gzip.NewReader(res.Body)
+ if err != nil {
+ t.Errorf("%d. gzip NewReader: %v", i, err)
+ continue
+ }
body, err = ioutil.ReadAll(gzip)
res.Body.Close()
} else {
)
// If the ifindex is zero, interfaceTable returns mappings of all
-// network interfaces. Otheriwse it returns a mapping of a specific
+// network interfaces. Otherwise it returns a mapping of a specific
// interface.
func interfaceTable(ifindex int) ([]Interface, error) {
- var ift []Interface
-
tab, err := syscall.RouteRIB(syscall.NET_RT_IFLIST, ifindex)
if err != nil {
return nil, os.NewSyscallError("route rib", err)
return nil, os.NewSyscallError("route message", err)
}
+ var ift []Interface
for _, m := range msgs {
switch v := m.(type) {
case *syscall.InterfaceMessage:
}
}
}
-
return ift, nil
}
func newLink(m *syscall.InterfaceMessage) ([]Interface, error) {
- var ift []Interface
-
sas, err := syscall.ParseRoutingSockaddr(m)
if err != nil {
return nil, os.NewSyscallError("route sockaddr", err)
}
+ var ift []Interface
for _, s := range sas {
switch v := s.(type) {
case *syscall.SockaddrDatalink:
ift = append(ift, ifi)
}
}
-
return ift, nil
}
// for all network interfaces. Otherwise it returns addresses
// for a specific interface.
func interfaceAddrTable(ifindex int) ([]Addr, error) {
- var ifat []Addr
-
tab, err := syscall.RouteRIB(syscall.NET_RT_IFLIST, ifindex)
if err != nil {
return nil, os.NewSyscallError("route rib", err)
return nil, os.NewSyscallError("route message", err)
}
+ var ifat []Addr
for _, m := range msgs {
switch v := m.(type) {
case *syscall.InterfaceAddrMessage:
}
}
}
-
return ifat, nil
}
func newAddr(m *syscall.InterfaceAddrMessage) (Addr, error) {
- ifa := &IPNet{}
-
sas, err := syscall.ParseRoutingSockaddr(m)
if err != nil {
return nil, os.NewSyscallError("route sockaddr", err)
}
+ ifa := &IPNet{}
for i, s := range sas {
switch v := s.(type) {
case *syscall.SockaddrInet4:
}
}
}
-
return ifa, nil
}
// addresses for all network interfaces. Otherwise it returns
// addresses for a specific interface.
func interfaceMulticastAddrTable(ifindex int) ([]Addr, error) {
- var (
- tab []byte
- e error
- msgs []syscall.RoutingMessage
- ifmat []Addr
- )
-
- tab, e = syscall.RouteRIB(syscall.NET_RT_IFLIST2, ifindex)
- if e != nil {
- return nil, os.NewSyscallError("route rib", e)
+ tab, err := syscall.RouteRIB(syscall.NET_RT_IFLIST2, ifindex)
+ if err != nil {
+ return nil, os.NewSyscallError("route rib", err)
}
- msgs, e = syscall.ParseRoutingMessage(tab)
- if e != nil {
- return nil, os.NewSyscallError("route message", e)
+ msgs, err := syscall.ParseRoutingMessage(tab)
+ if err != nil {
+ return nil, os.NewSyscallError("route message", err)
}
+ var ifmat []Addr
for _, m := range msgs {
switch v := m.(type) {
case *syscall.InterfaceMulticastAddrMessage:
}
}
}
-
return ifmat, nil
}
func newMulticastAddr(m *syscall.InterfaceMulticastAddrMessage) ([]Addr, error) {
- var ifmat []Addr
-
- sas, e := syscall.ParseRoutingSockaddr(m)
- if e != nil {
- return nil, os.NewSyscallError("route sockaddr", e)
+ sas, err := syscall.ParseRoutingSockaddr(m)
+ if err != nil {
+ return nil, os.NewSyscallError("route sockaddr", err)
}
+ var ifmat []Addr
for _, s := range sas {
switch v := s.(type) {
case *syscall.SockaddrInet4:
ifmat = append(ifmat, ifma.toAddr())
}
}
-
return ifmat, nil
}
// addresses for all network interfaces. Otherwise it returns
// addresses for a specific interface.
func interfaceMulticastAddrTable(ifindex int) ([]Addr, error) {
- var (
- tab []byte
- e error
- msgs []syscall.RoutingMessage
- ifmat []Addr
- )
-
- tab, e = syscall.RouteRIB(syscall.NET_RT_IFMALIST, ifindex)
- if e != nil {
- return nil, os.NewSyscallError("route rib", e)
+ tab, err := syscall.RouteRIB(syscall.NET_RT_IFMALIST, ifindex)
+ if err != nil {
+ return nil, os.NewSyscallError("route rib", err)
}
- msgs, e = syscall.ParseRoutingMessage(tab)
- if e != nil {
- return nil, os.NewSyscallError("route message", e)
+ msgs, err := syscall.ParseRoutingMessage(tab)
+ if err != nil {
+ return nil, os.NewSyscallError("route message", err)
}
+ var ifmat []Addr
for _, m := range msgs {
switch v := m.(type) {
case *syscall.InterfaceMulticastAddrMessage:
}
}
}
-
return ifmat, nil
}
func newMulticastAddr(m *syscall.InterfaceMulticastAddrMessage) ([]Addr, error) {
- var ifmat []Addr
-
- sas, e := syscall.ParseRoutingSockaddr(m)
- if e != nil {
- return nil, os.NewSyscallError("route sockaddr", e)
+ sas, err := syscall.ParseRoutingSockaddr(m)
+ if err != nil {
+ return nil, os.NewSyscallError("route sockaddr", err)
}
+ var ifmat []Addr
for _, s := range sas {
switch v := s.(type) {
case *syscall.SockaddrInet4:
ifmat = append(ifmat, ifma.toAddr())
}
}
-
return ifmat, nil
}
// network interfaces. Otheriwse it returns a mapping of a specific
// interface.
func interfaceTable(ifindex int) ([]Interface, error) {
- var ift []Interface
-
tab, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC)
if err != nil {
return nil, os.NewSyscallError("netlink rib", err)
return nil, os.NewSyscallError("netlink message", err)
}
+ var ift []Interface
for _, m := range msgs {
switch m.Header.Type {
case syscall.NLMSG_DONE:
}
}
}
-
done:
return ift, nil
}
if err != nil {
return nil, err
}
-
return ifat, nil
}
func addrTable(msgs []syscall.NetlinkMessage, ifindex int) ([]Addr, error) {
var ifat []Addr
-
for _, m := range msgs {
switch m.Header.Type {
case syscall.NLMSG_DONE:
}
}
}
-
done:
return ifat, nil
}
err error
ifi *Interface
)
-
if ifindex > 0 {
ifi, err = InterfaceByIndex(ifindex)
if err != nil {
return nil, err
}
}
-
ifmat4 := parseProcNetIGMP(ifi)
ifmat6 := parseProcNetIGMP6(ifi)
-
return append(ifmat4, ifmat6...), nil
}
func parseProcNetIGMP(ifi *Interface) []Addr {
- var (
- ifmat []Addr
- name string
- )
-
fd, err := open("/proc/net/igmp")
if err != nil {
return nil
}
defer fd.close()
+ var (
+ ifmat []Addr
+ name string
+ )
fd.readLine() // skip first line
b := make([]byte, IPv4len)
for l, ok := fd.readLine(); ok; l, ok = fd.readLine() {
name = f[1]
}
}
-
return ifmat
}
func parseProcNetIGMP6(ifi *Interface) []Addr {
- var ifmat []Addr
-
fd, err := open("/proc/net/igmp6")
if err != nil {
return nil
}
defer fd.close()
+ var ifmat []Addr
b := make([]byte, IPv6len)
for l, ok := fd.readLine(); ok; l, ok = fd.readLine() {
f := getFields(l)
}
}
-
return ifmat
}
b := make([]byte, 1000)
l := uint32(len(b))
a := (*syscall.IpAdapterInfo)(unsafe.Pointer(&b[0]))
- e := syscall.GetAdaptersInfo(a, &l)
- if e == syscall.ERROR_BUFFER_OVERFLOW {
+ err := syscall.GetAdaptersInfo(a, &l)
+ if err == syscall.ERROR_BUFFER_OVERFLOW {
b = make([]byte, l)
a = (*syscall.IpAdapterInfo)(unsafe.Pointer(&b[0]))
- e = syscall.GetAdaptersInfo(a, &l)
+ err = syscall.GetAdaptersInfo(a, &l)
}
- if e != nil {
- return nil, os.NewSyscallError("GetAdaptersInfo", e)
+ if err != nil {
+ return nil, os.NewSyscallError("GetAdaptersInfo", err)
}
return a, nil
}
func getInterfaceList() ([]syscall.InterfaceInfo, error) {
- s, e := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_UDP)
- if e != nil {
- return nil, os.NewSyscallError("Socket", e)
+ s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_UDP)
+ if err != nil {
+ return nil, os.NewSyscallError("Socket", err)
}
defer syscall.Closesocket(s)
ii := [20]syscall.InterfaceInfo{}
ret := uint32(0)
size := uint32(unsafe.Sizeof(ii))
- e = syscall.WSAIoctl(s, syscall.SIO_GET_INTERFACE_LIST, nil, 0, (*byte)(unsafe.Pointer(&ii[0])), size, &ret, nil, 0)
- if e != nil {
- return nil, os.NewSyscallError("WSAIoctl", e)
+ err = syscall.WSAIoctl(s, syscall.SIO_GET_INTERFACE_LIST, nil, 0, (*byte)(unsafe.Pointer(&ii[0])), size, &ret, nil, 0)
+ if err != nil {
+ return nil, os.NewSyscallError("WSAIoctl", err)
}
c := ret / uint32(unsafe.Sizeof(ii[0]))
return ii[:c-1], nil
// network interfaces. Otheriwse it returns a mapping of a specific
// interface.
func interfaceTable(ifindex int) ([]Interface, error) {
- ai, e := getAdapterList()
- if e != nil {
- return nil, e
+ ai, err := getAdapterList()
+ if err != nil {
+ return nil, err
}
- ii, e := getInterfaceList()
- if e != nil {
- return nil, e
+ ii, err := getInterfaceList()
+ if err != nil {
+ return nil, err
}
var ift []Interface
// for all network interfaces. Otherwise it returns addresses
// for a specific interface.
func interfaceAddrTable(ifindex int) ([]Addr, error) {
- ai, e := getAdapterList()
- if e != nil {
- return nil, e
+ ai, err := getAdapterList()
+ if err != nil {
+ return nil, err
}
var ifat []Addr
// interfaces for IP network connections.
type IPConn bool
-// SetDeadline implements the net.Conn SetDeadline method.
+// SetDeadline implements the Conn SetDeadline method.
func (c *IPConn) SetDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetReadDeadline implements the net.Conn SetReadDeadline method.
+// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *IPConn) SetReadDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *IPConn) SetWriteDeadline(t time.Time) error {
return os.EPLAN9
}
// Convert "host:port" into IP address and port.
func hostPortToIP(net, hostport string) (ip IP, iport int, err error) {
- var (
- addr IP
- p, i int
- ok bool
- )
host, port, err := SplitHostPort(hostport)
if err != nil {
- goto Error
+ return nil, 0, err
}
+ var addr IP
if host != "" {
// Try as an IP address.
addr = ParseIP(host)
filter = ipv6only
}
// Not an IP address. Try as a DNS name.
- addrs, err1 := LookupHost(host)
- if err1 != nil {
- err = err1
- goto Error
+ addrs, err := LookupHost(host)
+ if err != nil {
+ return nil, 0, err
}
addr = firstFavoriteAddr(filter, addrs)
if addr == nil {
// should not happen
- err = &AddrError{"LookupHost returned no suitable address", addrs[0]}
- goto Error
+ return nil, 0, &AddrError{"LookupHost returned no suitable address", addrs[0]}
}
}
}
- p, i, ok = dtoi(port, 0)
+ p, i, ok := dtoi(port, 0)
if !ok || i != len(port) {
p, err = LookupPort(net, port)
if err != nil {
- goto Error
+ return nil, 0, err
}
}
if p < 0 || p > 0xFFFF {
- err = &AddrError{"invalid port", port}
- goto Error
+ return nil, 0, &AddrError{"invalid port", port}
}
return addr, p, nil
-Error:
- return nil, 0, err
}
// parsePlan9Addr parses address of the form [ip!]port (e.g. 127.0.0.1!80).
func parsePlan9Addr(s string) (ip IP, iport int, err error) {
- var (
- addr IP
- p, i int
- ok bool
- )
- addr = IPv4zero // address contains port only
- i = byteIndex(s, '!')
+ addr := IPv4zero // address contains port only
+ i := byteIndex(s, '!')
if i >= 0 {
addr = ParseIP(s[:i])
if addr == nil {
- err = errors.New("net: parsing IP failed")
- goto Error
+ return nil, 0, errors.New("net: parsing IP failed")
}
}
- p, _, ok = dtoi(s[i+1:], 0)
+ p, _, ok := dtoi(s[i+1:], 0)
if !ok {
- err = errors.New("net: parsing port failed")
- goto Error
+ return nil, 0, errors.New("net: parsing port failed")
}
if p < 0 || p > 0xFFFF {
- err = &AddrError{"invalid port", string(p)}
- goto Error
+ return nil, 0, &AddrError{"invalid port", string(p)}
}
return addr, p, nil
-
-Error:
- return nil, 0, err
}
func readPlan9Addr(proto, filename string) (addr Addr, err error) {
// Implementation of the Conn interface - see Conn for documentation.
-// Read implements the net.Conn Read method.
+// Read implements the Conn Read method.
func (c *plan9Conn) Read(b []byte) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
return
}
-// Write implements the net.Conn Write method.
+// Write implements the Conn Write method.
func (c *plan9Conn) Write(b []byte) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
return c.raddr
}
-// SetDeadline implements the net.Conn SetDeadline method.
+// SetDeadline implements the Conn SetDeadline method.
func (c *plan9Conn) SetDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetReadDeadline implements the net.Conn SetReadDeadline method.
+// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *plan9Conn) SetReadDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *plan9Conn) SetWriteDeadline(t time.Time) error {
return os.EPLAN9
}
func lookupProtocol(name string) (proto int, err error) {
protoentLock.Lock()
defer protoentLock.Unlock()
- p, e := syscall.GetProtoByName(name)
- if e != nil {
- return 0, os.NewSyscallError("GetProtoByName", e)
+ p, err := syscall.GetProtoByName(name)
+ if err != nil {
+ return 0, os.NewSyscallError("GetProtoByName", err)
}
return int(p.Proto), nil
}
func lookupIP(name string) (addrs []IP, err error) {
hostentLock.Lock()
defer hostentLock.Unlock()
- h, e := syscall.GetHostByName(name)
- if e != nil {
- return nil, os.NewSyscallError("GetHostByName", e)
+ h, err := syscall.GetHostByName(name)
+ if err != nil {
+ return nil, os.NewSyscallError("GetHostByName", err)
}
switch h.AddrType {
case syscall.AF_INET:
}
serventLock.Lock()
defer serventLock.Unlock()
- s, e := syscall.GetServByName(service, network)
- if e != nil {
- return 0, os.NewSyscallError("GetServByName", e)
+ s, err := syscall.GetServByName(service, network)
+ if err != nil {
+ return 0, os.NewSyscallError("GetServByName", err)
}
return int(syscall.Ntohs(s.Port)), nil
}
package net
import (
- "flag"
"os"
"runtime"
"testing"
)
-var multicast = flag.Bool("multicast", false, "enable multicast tests")
-
-var multicastUDPTests = []struct {
+var listenMulticastUDPTests = []struct {
net string
- laddr IP
- gaddr IP
+ gaddr *UDPAddr
flags Flags
ipv6 bool
}{
// cf. RFC 4727: Experimental Values in IPv4, IPv6, ICMPv4, ICMPv6, UDP, and TCP Headers
- {"udp", IPv4zero, IPv4(224, 0, 0, 254), (FlagUp | FlagLoopback), false},
- {"udp4", IPv4zero, IPv4(224, 0, 0, 254), (FlagUp | FlagLoopback), false},
- {"udp", IPv6unspecified, ParseIP("ff0e::114"), (FlagUp | FlagLoopback), true},
- {"udp6", IPv6unspecified, ParseIP("ff01::114"), (FlagUp | FlagLoopback), true},
- {"udp6", IPv6unspecified, ParseIP("ff02::114"), (FlagUp | FlagLoopback), true},
- {"udp6", IPv6unspecified, ParseIP("ff04::114"), (FlagUp | FlagLoopback), true},
- {"udp6", IPv6unspecified, ParseIP("ff05::114"), (FlagUp | FlagLoopback), true},
- {"udp6", IPv6unspecified, ParseIP("ff08::114"), (FlagUp | FlagLoopback), true},
- {"udp6", IPv6unspecified, ParseIP("ff0e::114"), (FlagUp | FlagLoopback), true},
+ {"udp", &UDPAddr{IPv4(224, 0, 0, 254), 12345}, FlagUp | FlagLoopback, false},
+ {"udp4", &UDPAddr{IPv4(224, 0, 0, 254), 12345}, FlagUp | FlagLoopback, false},
+ {"udp", &UDPAddr{ParseIP("ff0e::114"), 12345}, FlagUp | FlagLoopback, true},
+ {"udp6", &UDPAddr{ParseIP("ff01::114"), 12345}, FlagUp | FlagLoopback, true},
+ {"udp6", &UDPAddr{ParseIP("ff02::114"), 12345}, FlagUp | FlagLoopback, true},
+ {"udp6", &UDPAddr{ParseIP("ff04::114"), 12345}, FlagUp | FlagLoopback, true},
+ {"udp6", &UDPAddr{ParseIP("ff05::114"), 12345}, FlagUp | FlagLoopback, true},
+ {"udp6", &UDPAddr{ParseIP("ff08::114"), 12345}, FlagUp | FlagLoopback, true},
+ {"udp6", &UDPAddr{ParseIP("ff0e::114"), 12345}, FlagUp | FlagLoopback, true},
}
-func TestMulticastUDP(t *testing.T) {
- if runtime.GOOS == "plan9" || runtime.GOOS == "windows" {
- return
- }
- if !*multicast {
- t.Logf("test disabled; use --multicast to enable")
+func TestListenMulticastUDP(t *testing.T) {
+ switch runtime.GOOS {
+ case "netbsd", "openbsd", "plan9", "windows":
return
+ case "linux":
+ if runtime.GOARCH == "arm" {
+ return
+ }
}
- for _, tt := range multicastUDPTests {
- var (
- ifi *Interface
- found bool
- )
+ for _, tt := range listenMulticastUDPTests {
if tt.ipv6 && (!supportsIPv6 || os.Getuid() != 0) {
continue
}
if err != nil {
t.Fatalf("Interfaces failed: %v", err)
}
+ var ifi *Interface
for _, x := range ift {
if x.Flags&tt.flags == tt.flags {
ifi = &x
t.Logf("an appropriate multicast interface not found")
return
}
- c, err := ListenUDP(tt.net, &UDPAddr{IP: tt.laddr})
+ c, err := ListenMulticastUDP(tt.net, ifi, tt.gaddr)
if err != nil {
- t.Fatalf("ListenUDP failed: %v", err)
- }
- defer c.Close()
- if err := c.JoinGroup(ifi, tt.gaddr); err != nil {
- t.Fatalf("JoinGroup failed: %v", err)
+ t.Fatalf("ListenMulticastUDP failed: %v", err)
}
+ defer c.Close() // test to listen concurrently across multiple listeners
if !tt.ipv6 {
testIPv4MulticastSocketOptions(t, c.fd, ifi)
} else {
if err != nil {
t.Fatalf("MulticastAddrs failed: %v", err)
}
+ var found bool
for _, ifma := range ifmat {
- if ifma.(*IPAddr).IP.Equal(tt.gaddr) {
+ if ifma.(*IPAddr).IP.Equal(tt.gaddr.IP) {
found = true
break
}
if !found {
t.Fatalf("%q not found in RIB", tt.gaddr.String())
}
- if err := c.LeaveGroup(ifi, tt.gaddr); err != nil {
- t.Fatalf("LeaveGroup failed: %v", err)
- }
}
}
-func TestSimpleMulticastUDP(t *testing.T) {
- if runtime.GOOS == "plan9" {
- return
- }
- if !*multicast {
- t.Logf("test disabled; use --multicast to enable")
+func TestSimpleListenMulticastUDP(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9":
return
}
- for _, tt := range multicastUDPTests {
- var ifi *Interface
+ for _, tt := range listenMulticastUDPTests {
if tt.ipv6 {
continue
}
if err != nil {
t.Fatalf("Interfaces failed: %v", err)
}
+ var ifi *Interface
for _, x := range ift {
if x.Flags&tt.flags == tt.flags {
ifi = &x
t.Logf("an appropriate multicast interface not found")
return
}
- c, err := ListenUDP(tt.net, &UDPAddr{IP: tt.laddr})
+ c, err := ListenMulticastUDP(tt.net, ifi, tt.gaddr)
if err != nil {
- t.Fatalf("ListenUDP failed: %v", err)
- }
- defer c.Close()
- if err := c.JoinGroup(ifi, tt.gaddr); err != nil {
- t.Fatalf("JoinGroup failed: %v", err)
- }
- if err := c.LeaveGroup(ifi, tt.gaddr); err != nil {
- t.Fatalf("LeaveGroup failed: %v", err)
+ t.Fatalf("ListenMulticastUDP failed: %v", err)
}
+ c.Close()
}
}
// Conn is a generic stream-oriented network connection.
type Conn interface {
// Read reads data from the connection.
- // Read can be made to time out and return a net.Error with Timeout() == true
+ // Read can be made to time out and return a Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
Read(b []byte) (n int, err error)
// Write writes data to the connection.
- // Write can be made to time out and return a net.Error with Timeout() == true
+ // Write can be made to time out and return a Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetWriteDeadline.
Write(b []byte) (n int, err error)
func (e UnknownNetworkError) Error() string { return "unknown network " + string(e) }
func (e UnknownNetworkError) Temporary() bool { return false }
func (e UnknownNetworkError) Timeout() bool { return false }
+
+// DNSConfigError represents an error reading the machine's DNS configuration.
+type DNSConfigError struct {
+ Err error
+}
+
+func (e *DNSConfigError) Error() string {
+ return "error reading DNS config: " + e.Err.Error()
+}
+
+func (e *DNSConfigError) Timeout() bool { return false }
+func (e *DNSConfigError) Temporary() bool { return false }
return
}
for i, tt := range dialErrorTests {
- c, e := Dial(tt.Net, tt.Raddr)
+ c, err := Dial(tt.Net, tt.Raddr)
if c != nil {
c.Close()
}
- if e == nil {
+ if err == nil {
t.Errorf("#%d: nil error, want match for %#q", i, tt.Pattern)
continue
}
- s := e.Error()
+ s := err.Error()
match, _ := regexp.MatchString(tt.Pattern, s)
if !match {
t.Errorf("#%d: %q, want match for %#q", i, s, tt.Pattern)
func TestReverseAddress(t *testing.T) {
for i, tt := range revAddrTests {
- a, e := reverseaddr(tt.Addr)
- if len(tt.ErrPrefix) > 0 && e == nil {
+ a, err := reverseaddr(tt.Addr)
+ if len(tt.ErrPrefix) > 0 && err == nil {
t.Errorf("#%d: expected %q, got <nil> (error)", i, tt.ErrPrefix)
continue
}
- if len(tt.ErrPrefix) == 0 && e != nil {
- t.Errorf("#%d: expected <nil>, got %q (error)", i, e)
+ if len(tt.ErrPrefix) == 0 && err != nil {
+ t.Errorf("#%d: expected <nil>, got %q (error)", i, err)
}
- if e != nil && e.(*DNSError).Err != tt.ErrPrefix {
- t.Errorf("#%d: expected %q, got %q (mismatched error)", i, tt.ErrPrefix, e.(*DNSError).Err)
+ if err != nil && err.(*DNSError).Err != tt.ErrPrefix {
+ t.Errorf("#%d: expected %q, got %q (mismatched error)", i, tt.ErrPrefix, err.(*DNSError).Err)
}
if a != tt.Reverse {
t.Errorf("#%d: expected %q, got %q (reverse address)", i, tt.Reverse, a)
return s, nil
Errno:
- err = &os.PathError{"setnonblock", s.pr.Name(), err}
+ err = &os.PathError{
+ Op: "setnonblock",
+ Path: s.pr.Name(),
+ Err: err,
+ }
Error:
s.pr.Close()
s.pw.Close()
Args interface{} // The argument to the function (*struct).
Reply interface{} // The reply from the function (*struct).
Error error // After completion, the error status.
- Done chan *Call // Strobes when call is complete; value is the error status.
- seq uint64
+ Done chan *Call // Strobes when call is complete.
}
// Client represents an RPC Client.
Close() error
}
-func (client *Client) send(c *Call) {
+func (client *Client) send(call *Call) {
+ client.sending.Lock()
+ defer client.sending.Unlock()
+
// Register this call.
client.mutex.Lock()
if client.shutdown {
- c.Error = ErrShutdown
+ call.Error = ErrShutdown
client.mutex.Unlock()
- c.done()
+ call.done()
return
}
- c.seq = client.seq
+ seq := client.seq
client.seq++
- client.pending[c.seq] = c
+ client.pending[seq] = call
client.mutex.Unlock()
// Encode and send the request.
- client.sending.Lock()
- defer client.sending.Unlock()
- client.request.Seq = c.seq
- client.request.ServiceMethod = c.ServiceMethod
- if err := client.codec.WriteRequest(&client.request, c.Args); err != nil {
- c.Error = err
- c.done()
+ client.request.Seq = seq
+ client.request.ServiceMethod = call.ServiceMethod
+ err := client.codec.WriteRequest(&client.request, call.Args)
+ if err != nil {
+ client.mutex.Lock()
+ delete(client.pending, seq)
+ client.mutex.Unlock()
+ call.Error = err
+ call.done()
}
}
}
seq := response.Seq
client.mutex.Lock()
- c := client.pending[seq]
+ call := client.pending[seq]
delete(client.pending, seq)
client.mutex.Unlock()
if response.Error == "" {
- err = client.codec.ReadResponseBody(c.Reply)
+ err = client.codec.ReadResponseBody(call.Reply)
if err != nil {
- c.Error = errors.New("reading body " + err.Error())
+ call.Error = errors.New("reading body " + err.Error())
}
} else {
// We've got an error response. Give this to the request;
// any subsequent requests will get the ReadResponseBody
// error if there is one.
- c.Error = ServerError(response.Error)
+ call.Error = ServerError(response.Error)
err = client.codec.ReadResponseBody(nil)
if err != nil {
err = errors.New("reading error body: " + err.Error())
}
}
- c.done()
+ call.done()
}
// Terminate pending calls.
+ client.sending.Lock()
client.mutex.Lock()
client.shutdown = true
+ closing := client.closing
for _, call := range client.pending {
call.Error = err
call.done()
}
client.mutex.Unlock()
- if err != io.EOF || !client.closing {
+ client.sending.Unlock()
+ if err != io.EOF || !closing {
log.Println("rpc: client protocol error:", err)
}
}
err = errors.New("unexpected HTTP response: " + resp.Status)
}
conn.Close()
- return nil, &net.OpError{"dial-http", network + " " + address, nil, err}
+ return nil, &net.OpError{
+ Op: "dial-http",
+ Net: network + " " + address,
+ Addr: nil,
+ Err: err,
+ }
}
// Dial connects to an RPC server at the specified network address.
}
}
call.Done = done
- if client.shutdown {
- call.Error = ErrShutdown
- call.done()
- return call
- }
client.send(call)
return call
}
// Call invokes the named function, waits for it to complete, and returns its error status.
func (client *Client) Call(serviceMethod string, args interface{}, reply interface{}) error {
- if client.shutdown {
- return ErrShutdown
- }
call := <-client.Go(serviceMethod, args, reply, make(chan *Call, 1)).Done
return call.Error
}
}
args := &Args{7, 8}
reply := new(Reply)
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ mallocs := 0 - memstats.Mallocs
const count = 100
for i := 0; i < count; i++ {
err := client.Call("Arith.Add", args, reply)
t.Errorf("Add: expected %d got %d", reply.C, args.A+args.B)
}
}
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
+ runtime.ReadMemStats(memstats)
+ mallocs += memstats.Mallocs
return mallocs / count
}
if int64(n) > remain {
n = int(remain)
}
- n, errno := syscall.Sendfile(dst, src, nil, n)
+ n, err1 := syscall.Sendfile(dst, src, nil, n)
if n > 0 {
written += int64(n)
remain -= int64(n)
}
- if n == 0 && errno == nil {
+ if n == 0 && err1 == nil {
break
}
- if errno == syscall.EAGAIN && c.wdeadline >= 0 {
+ if err1 == syscall.EAGAIN && c.wdeadline >= 0 {
pollserver.WaitWrite(c)
continue
}
- if errno != nil {
+ if err1 != nil {
// This includes syscall.ENOSYS (no kernel
// support) and syscall.EINVAL (fd types which
// don't implement sendfile together)
- err = &OpError{"sendfile", c.net, c.raddr, errno}
+ err = &OpError{"sendfile", c.net, c.raddr, err1}
break
}
}
// the last message isn't base64 because it isn't a challenge
msg = []byte(msg64)
default:
- err = &textproto.Error{code, msg64}
+ err = &textproto.Error{Code: code, Msg: msg64}
}
resp, err = a.Next(msg, code == 334)
if err != nil {
import (
"io"
- "reflect"
"syscall"
)
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
- setDefaultSockopts(s, f, t)
+ err = setDefaultSockopts(s, f, t)
+ if err != nil {
+ closesocket(s)
+ return nil, err
+ }
if la != nil {
+ la, err = listenerSockaddr(s, f, la, toAddr)
+ if err != nil {
+ closesocket(s)
+ return nil, err
+ }
err = syscall.Bind(s, la)
if err != nil {
closesocket(s)
return fd, nil
}
-type UnknownSocketError struct {
- sa syscall.Sockaddr
-}
-
-func (e *UnknownSocketError) Error() string {
- return "unknown socket address type " + reflect.TypeOf(e.sa).String()
-}
-
type writerOnly struct {
io.Writer
}
}
return int(n)
}
+
+func listenerSockaddr(s, f int, la syscall.Sockaddr, toAddr func(syscall.Sockaddr) Addr) (syscall.Sockaddr, error) {
+ a := toAddr(la)
+ if a == nil {
+ return la, nil
+ }
+ switch v := a.(type) {
+ case *UDPAddr:
+ if v.IP.IsMulticast() {
+ err := setDefaultMulticastSockopts(s)
+ if err != nil {
+ return nil, err
+ }
+ switch f {
+ case syscall.AF_INET:
+ v.IP = IPv4zero
+ case syscall.AF_INET6:
+ v.IP = IPv6unspecified
+ }
+ return v.sockaddr(f)
+ }
+ }
+ return la, nil
+}
}
return n
}
+
+func listenerSockaddr(s, f int, la syscall.Sockaddr, toAddr func(syscall.Sockaddr) Addr) (syscall.Sockaddr, error) {
+ a := toAddr(la)
+ if a == nil {
+ return la, nil
+ }
+ switch v := a.(type) {
+ case *UDPAddr:
+ if v.IP.IsMulticast() {
+ err := setDefaultMulticastSockopts(s)
+ if err != nil {
+ return nil, err
+ }
+ switch f {
+ case syscall.AF_INET:
+ v.IP = IPv4zero
+ case syscall.AF_INET6:
+ v.IP = IPv6unspecified
+ }
+ return v.sockaddr(f)
+ }
+ }
+ return la, nil
+}
// TODO: Implement this
return syscall.SOMAXCONN
}
+
+func listenerSockaddr(s syscall.Handle, f int, la syscall.Sockaddr, toAddr func(syscall.Sockaddr) Addr) (syscall.Sockaddr, error) {
+ a := toAddr(la)
+ if a == nil {
+ return la, nil
+ }
+ switch v := a.(type) {
+ case *UDPAddr:
+ if v.IP.IsMulticast() {
+ err := setDefaultMulticastSockopts(s)
+ if err != nil {
+ return nil, err
+ }
+ switch f {
+ case syscall.AF_INET:
+ v.IP = IPv4zero
+ case syscall.AF_INET6:
+ v.IP = IPv6unspecified
+ }
+ return v.sockaddr(f)
+ }
+ }
+ return la, nil
+}
}
func setDeadline(fd *netFD, t time.Time) error {
- if e := setReadDeadline(fd, t); e != nil {
- return e
+ if err := setReadDeadline(fd, t); err != nil {
+ return err
}
return setWriteDeadline(fd, t)
}
package net
import (
+ "os"
"syscall"
)
-func setDefaultSockopts(s, f, t int) {
+func setDefaultSockopts(s, f, t int) error {
switch f {
case syscall.AF_INET6:
// Allow both IP versions even if the OS default is otherwise.
+ // Note that some operating systems never admit this option.
syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
}
if f == syscall.AF_UNIX ||
(f == syscall.AF_INET || f == syscall.AF_INET6) && t == syscall.SOCK_STREAM {
// Allow reuse of recently-used addresses.
- syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
// Allow reuse of recently-used ports.
// This option is supported only in descendants of 4.4BSD,
// to make an effective multicast application and an application
// that requires quick draw possible.
- syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1)
+ err = syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
}
// Allow broadcast.
- syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
+ err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+
+ return nil
}
-func setDefaultMulticastSockopts(fd *netFD) {
- fd.incref()
- defer fd.decref()
+func setDefaultMulticastSockopts(s int) error {
// Allow multicast UDP and raw IP datagram sockets to listen
// concurrently across multiple listeners.
- syscall.SetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
- syscall.SetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1)
+ err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ err = syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ return nil
}
package net
import (
+ "os"
"syscall"
)
-func setDefaultSockopts(s, f, t int) {
+func setDefaultSockopts(s, f, t int) error {
switch f {
case syscall.AF_INET6:
// Allow both IP versions even if the OS default is otherwise.
+ // Note that some operating systems never admit this option.
syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
}
if f == syscall.AF_UNIX ||
(f == syscall.AF_INET || f == syscall.AF_INET6) && t == syscall.SOCK_STREAM {
// Allow reuse of recently-used addresses.
- syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+
}
// Allow broadcast.
- syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
+ err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ return nil
}
-func setDefaultMulticastSockopts(fd *netFD) {
- fd.incref()
- defer fd.decref()
+func setDefaultMulticastSockopts(s int) error {
// Allow multicast UDP and raw IP datagram sockets to listen
// concurrently across multiple listeners.
- syscall.SetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ return nil
}
package net
import (
+ "os"
"syscall"
)
-func setDefaultSockopts(s syscall.Handle, f, t int) {
+func setDefaultSockopts(s syscall.Handle, f, t int) error {
switch f {
case syscall.AF_INET6:
// Allow both IP versions even if the OS default is otherwise.
+ // Note that some operating systems never admit this option.
syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
}
// Allow broadcast.
syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)
-
+ return nil
}
-func setDefaultMulticastSockopts(fd *netFD) {
- fd.incref()
- defer fd.decref()
+func setDefaultMulticastSockopts(s syscall.Handle) error {
// Allow multicast UDP and raw IP datagram sockets to listen
// concurrently across multiple listeners.
- syscall.SetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ return nil
}
package net
import (
+ "os"
"syscall"
)
}
func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error {
- // TODO: Implement this
- return syscall.EWINDOWS
+ ip, err := interfaceToIPv4Addr(ifi)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ var x [4]byte
+ copy(x[:], ip.To4())
+ fd.incref()
+ defer fd.decref()
+ err = syscall.SetsockoptInet4Addr(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, x)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ return nil
}
func ipv4MulticastTTL(fd *netFD) (int, error) {
}
func setIPv4MulticastTTL(fd *netFD, v int) error {
- // TODO: Implement this
- return syscall.EWINDOWS
+ fd.incref()
+ defer fd.decref()
+ err := syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_TTL, v)
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ return nil
+
}
func ipv4MulticastLoopback(fd *netFD) (bool, error) {
}
func setIPv4MulticastLoopback(fd *netFD, v bool) error {
- // TODO: Implement this
- return syscall.EWINDOWS
+ fd.incref()
+ defer fd.decref()
+ err := syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, boolint(v))
+ if err != nil {
+ return os.NewSyscallError("setsockopt", err)
+ }
+ return nil
+
}
func ipv4ReceiveInterface(fd *netFD) (bool, error) {
plan9Conn
}
-// SetDeadline implements the net.Conn SetDeadline method.
+// SetDeadline implements the Conn SetDeadline method.
func (c *TCPConn) SetDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetReadDeadline implements the net.Conn SetReadDeadline method.
+// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *TCPConn) SetReadDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *TCPConn) SetWriteDeadline(t time.Time) error {
return os.EPLAN9
}
// Implementation of the Conn interface - see Conn for documentation.
-// Read implements the net.Conn Read method.
+// Read implements the Conn Read method.
func (c *TCPConn) Read(b []byte) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
return genericReadFrom(c, r)
}
-// Write implements the net.Conn Write method.
+// Write implements the Conn Write method.
func (c *TCPConn) Write(b []byte) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
return c.fd.raddr
}
-// SetDeadline implements the net.Conn SetDeadline method.
+// SetDeadline implements the Conn SetDeadline method.
func (c *TCPConn) SetDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
return setDeadline(c.fd, t)
}
-// SetReadDeadline implements the net.Conn SetReadDeadline method.
+// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *TCPConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
return setReadDeadline(c.fd, t)
}
-// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *TCPConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
// DialTCP connects to the remote address raddr on the network net,
// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
// as the local address for the connection.
-func DialTCP(net string, laddr, raddr *TCPAddr) (c *TCPConn, err error) {
+func DialTCP(net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
if raddr == nil {
return nil, &OpError{"dial", net, nil, errMissingAddress}
}
- fd, e := internetSocket(net, laddr.toAddr(), raddr.toAddr(), syscall.SOCK_STREAM, 0, "dial", sockaddrToTCP)
- if e != nil {
- return nil, e
+ fd, err := internetSocket(net, laddr.toAddr(), raddr.toAddr(), syscall.SOCK_STREAM, 0, "dial", sockaddrToTCP)
+ if err != nil {
+ return nil, err
}
return newTCPConn(fd), nil
}
// Net must be "tcp", "tcp4", or "tcp6".
// If laddr has a port of 0, it means to listen on some available port.
// The caller can use l.Addr() to retrieve the chosen address.
-func ListenTCP(net string, laddr *TCPAddr) (l *TCPListener, err error) {
+func ListenTCP(net string, laddr *TCPAddr) (*TCPListener, error) {
fd, err := internetSocket(net, laddr.toAddr(), nil, syscall.SOCK_STREAM, 0, "listen", sockaddrToTCP)
if err != nil {
return nil, err
closesocket(fd.sysfd)
return nil, &OpError{"listen", net, laddr, err}
}
- l = new(TCPListener)
+ l := new(TCPListener)
l.fd = fd
return l, nil
}
plan9Conn
}
-// SetDeadline implements the net.Conn SetDeadline method.
+// SetDeadline implements the Conn SetDeadline method.
func (c *UDPConn) SetDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetReadDeadline implements the net.Conn SetReadDeadline method.
+// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *UDPConn) SetReadDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *UDPConn) SetWriteDeadline(t time.Time) error {
return os.EPLAN9
}
return n, &UDPAddr{h.raddr, int(h.rport)}, nil
}
-// ReadFrom implements the net.PacketConn ReadFrom method.
+// ReadFrom implements the PacketConn ReadFrom method.
func (c *UDPConn) ReadFrom(b []byte) (n int, addr Addr, err error) {
if !c.ok() {
return 0, nil, os.EINVAL
return c.data.Write(buf)
}
-// WriteTo implements the net.PacketConn WriteTo method.
+// WriteTo implements the PacketConn WriteTo method.
func (c *UDPConn) WriteTo(b []byte, addr Addr) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
return &UDPConn{*l.plan9Conn()}, nil
}
-// JoinGroup joins the IP multicast group named by addr on ifi,
-// which specifies the interface to join. JoinGroup uses the
-// default multicast interface if ifi is nil.
-func (c *UDPConn) JoinGroup(ifi *Interface, addr IP) error {
- if !c.ok() {
- return os.EINVAL
- }
- return os.EPLAN9
-}
-
-// LeaveGroup exits the IP multicast group named by addr on ifi.
-func (c *UDPConn) LeaveGroup(ifi *Interface, addr IP) error {
- if !c.ok() {
- return os.EINVAL
- }
- return os.EPLAN9
+// ListenMulticastUDP listens for incoming multicast UDP packets
+// addressed to the group address gaddr on ifi, which specifies
+// the interface to join. ListenMulticastUDP uses default
+// multicast interface if ifi is nil.
+func ListenMulticastUDP(net string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) {
+ return nil, os.EPLAN9
}
// Implementation of the Conn interface - see Conn for documentation.
-// Read implements the net.Conn Read method.
-func (c *UDPConn) Read(b []byte) (n int, err error) {
+// Read implements the Conn Read method.
+func (c *UDPConn) Read(b []byte) (int, error) {
if !c.ok() {
return 0, os.EINVAL
}
return c.fd.Read(b)
}
-// Write implements the net.Conn Write method.
-func (c *UDPConn) Write(b []byte) (n int, err error) {
+// Write implements the Conn Write method.
+func (c *UDPConn) Write(b []byte) (int, error) {
if !c.ok() {
return 0, os.EINVAL
}
return c.fd.raddr
}
-// SetDeadline implements the net.Conn SetDeadline method.
+// SetDeadline implements the Conn SetDeadline method.
func (c *UDPConn) SetDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
return setDeadline(c.fd, t)
}
-// SetReadDeadline implements the net.Conn SetReadDeadline method.
+// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *UDPConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
return setReadDeadline(c.fd, t)
}
-// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *UDPConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
return
}
-// ReadFrom implements the net.PacketConn ReadFrom method.
-func (c *UDPConn) ReadFrom(b []byte) (n int, addr Addr, err error) {
+// ReadFrom implements the PacketConn ReadFrom method.
+func (c *UDPConn) ReadFrom(b []byte) (int, Addr, error) {
if !c.ok() {
return 0, nil, os.EINVAL
}
return c.fd.WriteTo(b, sa)
}
-// WriteTo implements the net.PacketConn WriteTo method.
+// WriteTo implements the PacketConn WriteTo method.
func (c *UDPConn) WriteTo(b []byte, addr Addr) (int, error) {
if !c.ok() {
return 0, os.EINVAL
return c.WriteToUDP(b, a)
}
+// File returns a copy of the underlying os.File, set to blocking mode.
+// It is the caller's responsibility to close f when finished.
+// Closing c does not affect f, and closing f does not affect c.
+func (c *UDPConn) File() (f *os.File, err error) { return c.fd.dup() }
+
// DialUDP connects to the remote address raddr on the network net,
// which must be "udp", "udp4", or "udp6". If laddr is not nil, it is used
// as the local address for the connection.
-func DialUDP(net string, laddr, raddr *UDPAddr) (c *UDPConn, err error) {
+func DialUDP(net string, laddr, raddr *UDPAddr) (*UDPConn, error) {
switch net {
case "udp", "udp4", "udp6":
default:
if raddr == nil {
return nil, &OpError{"dial", net, nil, errMissingAddress}
}
- fd, e := internetSocket(net, laddr.toAddr(), raddr.toAddr(), syscall.SOCK_DGRAM, 0, "dial", sockaddrToUDP)
- if e != nil {
- return nil, e
+ fd, err := internetSocket(net, laddr.toAddr(), raddr.toAddr(), syscall.SOCK_DGRAM, 0, "dial", sockaddrToUDP)
+ if err != nil {
+ return nil, err
}
return newUDPConn(fd), nil
}
return newUDPConn(fd), nil
}
-// File returns a copy of the underlying os.File, set to blocking mode.
-// It is the caller's responsibility to close f when finished.
-// Closing c does not affect f, and closing f does not affect c.
-func (c *UDPConn) File() (f *os.File, err error) { return c.fd.dup() }
+// ListenMulticastUDP listens for incoming multicast UDP packets
+// addressed to the group address gaddr on ifi, which specifies
+// the interface to join. ListenMulticastUDP uses default
+// multicast interface if ifi is nil.
+func ListenMulticastUDP(net string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error) {
+ switch net {
+ case "udp", "udp4", "udp6":
+ default:
+ return nil, UnknownNetworkError(net)
+ }
+ if gaddr == nil || gaddr.IP == nil {
+ return nil, &OpError{"listenmulticastudp", "udp", nil, errMissingAddress}
+ }
+ fd, err := internetSocket(net, gaddr.toAddr(), nil, syscall.SOCK_DGRAM, 0, "listen", sockaddrToUDP)
+ if err != nil {
+ return nil, err
+ }
+ c := newUDPConn(fd)
+ ip4 := gaddr.IP.To4()
+ if ip4 != nil {
+ err := listenIPv4MulticastUDP(c, ifi, ip4)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ } else {
+ err := listenIPv6MulticastUDP(c, ifi, gaddr.IP)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ }
+ return c, nil
+}
-// JoinGroup joins the IP multicast group named by addr on ifi,
-// which specifies the interface to join. JoinGroup uses the
-// default multicast interface if ifi is nil.
-func (c *UDPConn) JoinGroup(ifi *Interface, addr IP) error {
- if !c.ok() {
- return os.EINVAL
+func listenIPv4MulticastUDP(c *UDPConn, ifi *Interface, ip IP) error {
+ if ifi != nil {
+ err := setIPv4MulticastInterface(c.fd, ifi)
+ if err != nil {
+ return err
+ }
+ }
+ err := setIPv4MulticastLoopback(c.fd, false)
+ if err != nil {
+ return err
}
- setDefaultMulticastSockopts(c.fd)
- ip := addr.To4()
- if ip != nil {
- return joinIPv4GroupUDP(c, ifi, ip)
+ err = joinIPv4GroupUDP(c, ifi, ip)
+ if err != nil {
+ return err
}
- return joinIPv6GroupUDP(c, ifi, addr)
+ return nil
}
-// LeaveGroup exits the IP multicast group named by addr on ifi.
-func (c *UDPConn) LeaveGroup(ifi *Interface, addr IP) error {
- if !c.ok() {
- return os.EINVAL
+func listenIPv6MulticastUDP(c *UDPConn, ifi *Interface, ip IP) error {
+ if ifi != nil {
+ err := setIPv6MulticastInterface(c.fd, ifi)
+ if err != nil {
+ return err
+ }
}
- ip := addr.To4()
- if ip != nil {
- return leaveIPv4GroupUDP(c, ifi, ip)
+ err := setIPv6MulticastLoopback(c.fd, false)
+ if err != nil {
+ return err
+ }
+ err = joinIPv6GroupUDP(c, ifi, ip)
+ if err != nil {
+ return err
}
- return leaveIPv6GroupUDP(c, ifi, addr)
+ return nil
}
func joinIPv4GroupUDP(c *UDPConn, ifi *Interface, ip IP) error {
// Implementation of the Conn interface - see Conn for documentation.
-// Read implements the net.Conn Read method.
+// Read implements the Conn Read method.
func (c *UnixConn) Read(b []byte) (n int, err error) {
return 0, os.EPLAN9
}
-// Write implements the net.Conn Write method.
+// Write implements the Conn Write method.
func (c *UnixConn) Write(b []byte) (n int, err error) {
return 0, os.EPLAN9
}
return nil
}
-// SetDeadline implements the net.Conn SetDeadline method.
+// SetDeadline implements the Conn SetDeadline method.
func (c *UnixConn) SetDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetReadDeadline implements the net.Conn SetReadDeadline method.
+// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *UnixConn) SetReadDeadline(t time.Time) error {
return os.EPLAN9
}
-// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *UnixConn) SetWriteDeadline(t time.Time) error {
return os.EPLAN9
}
-// ReadFrom implements the net.PacketConn ReadFrom method.
+// ReadFrom implements the PacketConn ReadFrom method.
func (c *UnixConn) ReadFrom(b []byte) (n int, addr Addr, err error) {
err = os.EPLAN9
return
}
-// WriteTo implements the net.PacketConn WriteTo method.
+// WriteTo implements the PacketConn WriteTo method.
func (c *UnixConn) WriteTo(b []byte, addr Addr) (n int, err error) {
err = os.EPLAN9
return
// Implementation of the Conn interface - see Conn for documentation.
-// Read implements the net.Conn Read method.
+// Read implements the Conn Read method.
func (c *UnixConn) Read(b []byte) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
return c.fd.Read(b)
}
-// Write implements the net.Conn Write method.
+// Write implements the Conn Write method.
func (c *UnixConn) Write(b []byte) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
return c.fd.raddr
}
-// SetDeadline implements the net.Conn SetDeadline method.
+// SetDeadline implements the Conn SetDeadline method.
func (c *UnixConn) SetDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
return setDeadline(c.fd, t)
}
-// SetReadDeadline implements the net.Conn SetReadDeadline method.
+// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *UnixConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
return setReadDeadline(c.fd, t)
}
-// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *UnixConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return os.EINVAL
return
}
-// ReadFrom implements the net.PacketConn ReadFrom method.
+// ReadFrom implements the PacketConn ReadFrom method.
func (c *UnixConn) ReadFrom(b []byte) (n int, addr Addr, err error) {
if !c.ok() {
return 0, nil, os.EINVAL
return c.fd.WriteTo(b, sa)
}
-// WriteTo implements the net.PacketConn WriteTo method.
+// WriteTo implements the PacketConn WriteTo method.
func (c *UnixConn) WriteTo(b []byte, addr Addr) (n int, err error) {
if !c.ok() {
return 0, os.EINVAL
// DialUnix connects to the remote address raddr on the network net,
// which must be "unix" or "unixgram". If laddr is not nil, it is used
// as the local address for the connection.
-func DialUnix(net string, laddr, raddr *UnixAddr) (c *UnixConn, err error) {
- fd, e := unixSocket(net, laddr, raddr, "dial")
- if e != nil {
- return nil, e
+func DialUnix(net string, laddr, raddr *UnixAddr) (*UnixConn, error) {
+ fd, err := unixSocket(net, laddr, raddr, "dial")
+ if err != nil {
+ return nil, err
}
return newUnixConn(fd), nil
}
// AcceptUnix accepts the next incoming call and returns the new connection
// and the remote address.
-func (l *UnixListener) AcceptUnix() (c *UnixConn, err error) {
+func (l *UnixListener) AcceptUnix() (*UnixConn, error) {
if l == nil || l.fd == nil {
return nil, os.EINVAL
}
- fd, e := l.fd.accept(sockaddrToUnix)
- if e != nil {
- return nil, e
+ fd, err := l.fd.accept(sockaddrToUnix)
+ if err != nil {
+ return nil, err
}
- c = newUnixConn(fd)
+ c := newUnixConn(fd)
return c, nil
}
// local address laddr. The returned connection c's ReadFrom
// and WriteTo methods can be used to receive and send UDP
// packets with per-packet addressing. The network net must be "unixgram".
-func ListenUnixgram(net string, laddr *UnixAddr) (c *UDPConn, err error) {
+func ListenUnixgram(net string, laddr *UnixAddr) (*UDPConn, error) {
switch net {
case "unixgram":
default:
if laddr == nil {
return nil, &OpError{"listen", net, nil, errMissingAddress}
}
- fd, e := unixSocket(net, laddr, nil, "listen")
- if e != nil {
- return nil, e
+ fd, err := unixSocket(net, laddr, nil, "listen")
+ if err != nil {
+ return nil, err
}
return newUDPConn(fd), nil
}
return url, nil
}
-// String reassembles url into a valid URL string.
-func (url *URL) String() string {
+// String reassembles the URL into a valid URL string.
+func (u *URL) String() string {
// TODO: Rewrite to use bytes.Buffer
result := ""
- if url.Scheme != "" {
- result += url.Scheme + ":"
+ if u.Scheme != "" {
+ result += u.Scheme + ":"
}
- if url.Opaque != "" {
- result += url.Opaque
+ if u.Opaque != "" {
+ result += u.Opaque
} else {
- if url.Host != "" || url.User != nil {
+ if u.Host != "" || u.User != nil {
result += "//"
- if u := url.User; u != nil {
+ if u := u.User; u != nil {
result += u.String() + "@"
}
- result += url.Host
+ result += u.Host
}
- result += escape(url.Path, encodePath)
+ result += escape(u.Path, encodePath)
}
- if url.RawQuery != "" {
- result += "?" + url.RawQuery
+ if u.RawQuery != "" {
+ result += "?" + u.RawQuery
}
- if url.Fragment != "" {
- result += "#" + escape(url.Fragment, encodeFragment)
+ if u.Fragment != "" {
+ result += "#" + escape(u.Fragment, encodeFragment)
}
return result
}
}
// IsAbs returns true if the URL is absolute.
-func (url *URL) IsAbs() bool {
- return url.Scheme != ""
+func (u *URL) IsAbs() bool {
+ return u.Scheme != ""
}
// Parse parses a URL in the context of a base URL. The URL in ref
t.Errorf("For %q, expected error, got none.", c.in)
continue
}
- buf := bytes.NewBuffer(nil)
- err = tmpl.Execute(buf, data)
+ var buf bytes.Buffer
+ err = tmpl.Execute(&buf, data)
if err != nil {
t.Error("unexpected Execute error: ", err)
continue
// Process stores the information about a process created by StartProcess.
type Process struct {
Pid int
- handle int
+ handle uintptr
done bool // process has been successfuly waited on
}
-func newProcess(pid, handle int) *Process {
+func newProcess(pid int, handle uintptr) *Process {
p := &Process{Pid: pid, handle: handle}
runtime.SetFinalizer(p, (*Process).Release)
return p
"runtime"
"strconv"
"strings"
+ "syscall"
"testing"
)
return
}
+ // Ensure that file descriptors have not already been leaked into
+ // our environment.
+ for fd := os.Stderr.Fd() + 1; fd <= 101; fd++ {
+ err := syscall.Close(fd)
+ if err == nil {
+ t.Logf("Something already leaked - closed fd %d", fd)
+ }
+ }
+
// Force network usage, to verify the epoll (or whatever) fd
// doesn't leak to the child,
ln, err := net.Listen("tcp", "127.0.0.1:0")
}
defer os.Exit(0)
+ // Determine which command to use to display open files.
+ ofcmd := "lsof"
+ switch runtime.GOOS {
+ case "freebsd", "netbsd", "openbsd":
+ ofcmd = "fstat"
+ }
+
args := os.Args
for len(args) > 0 {
if args[0] == "--" {
}
if got := f.Fd(); got != wantfd {
fmt.Printf("leaked parent file. fd = %d; want %d\n", got, wantfd)
- out, _ := Command("lsof", "-p", fmt.Sprint(os.Getpid())).CombinedOutput()
+ out, _ := Command(ofcmd, "-p", fmt.Sprint(os.Getpid())).CombinedOutput()
fmt.Print(string(out))
os.Exit(1)
}
f.Close()
}
}
+ // Referring to fd3 here ensures that it is not
+ // garbage collected, and therefore closed, while
+ // executing the wantfd loop above. It doesn't matter
+ // what we do with fd3 as long as we refer to it;
+ // closing it is the easy choice.
fd3.Close()
os.Stderr.Write(bs)
case "exit":
// Kill causes the Process to exit immediately.
func (p *Process) Kill() error {
- return p.Signal(SIGKILL)
+ return p.Signal(UnixSignal(syscall.SIGKILL))
}
// Exec replaces the current process with an execution of the
return nil, NewSyscallError("GetExitCodeProcess", e)
}
p.done = true
- return &Waitmsg{p.Pid, syscall.WaitStatus{s, ec}, new(syscall.Rusage)}, nil
+ return &Waitmsg{p.Pid, syscall.WaitStatus{Status: s, ExitCode: ec}, new(syscall.Rusage)}, nil
}
// Signal sends a signal to the Process.
if p.done {
return errors.New("os: process already finished")
}
- switch sig.(UnixSignal) {
- case SIGKILL:
+ if us, ok := sig.(UnixSignal); ok && us == syscall.SIGKILL {
e := syscall.TerminateProcess(syscall.Handle(p.handle), 1)
return NewSyscallError("TerminateProcess", e)
}
// Release releases any resources associated with the Process.
func (p *Process) Release() error {
- if p.handle == -1 {
+ if p.handle == uintptr(syscall.InvalidHandle) {
return EINVAL
}
e := syscall.CloseHandle(syscall.Handle(p.handle))
if e != nil {
return NewSyscallError("CloseHandle", e)
}
- p.handle = -1
+ p.handle = uintptr(syscall.InvalidHandle)
// no need for a finalizer anymore
runtime.SetFinalizer(p, nil)
return nil
if e != nil {
return nil, NewSyscallError("OpenProcess", e)
}
- return newProcess(pid, int(h)), nil
+ return newProcess(pid, uintptr(h)), nil
}
func init() {
if err == nil {
fi[i] = fip
} else {
- fi[i] = &FileStat{name: filename}
+ fi[i] = &fileStat{name: filename}
}
}
return fi, err
pwd = Getenv("PWD")
if len(pwd) > 0 && pwd[0] == '/' {
d, err := Stat(pwd)
- if err == nil && dot.(*FileStat).SameFile(d.(*FileStat)) {
+ if err == nil && SameFile(dot, d) {
return pwd, nil
}
}
// Can't stat root - no hope.
return "", err
}
- if root.(*FileStat).SameFile(dot.(*FileStat)) {
+ if SameFile(root, dot) {
return "/", nil
}
}
for _, name := range names {
d, _ := Lstat(parent + "/" + name)
- if d.(*FileStat).SameFile(dot.(*FileStat)) {
+ if SameFile(d, dot) {
pwd = "/" + name + pwd
goto Found
}
return "", err
}
fd.Close()
- if pd.(*FileStat).SameFile(root.(*FileStat)) {
+ if SameFile(pd, root) {
break
}
// Set up for next round.
if err != nil {
t.Fatalf("stat %q failed: %v", from, err)
}
- if !tostat.(*FileStat).SameFile(fromstat.(*FileStat)) {
+ if !SameFile(tostat, fromstat) {
t.Errorf("link %q, %q did not create hard link", to, from)
}
}
if err != nil {
t.Fatalf("stat %q failed: %v", from, err)
}
- if !tostat.(*FileStat).SameFile(fromstat.(*FileStat)) {
+ if !SameFile(tostat, fromstat) {
t.Errorf("symlink %q, %q did not create symlink", to, from)
}
fromstat, err = Lstat(from)
if err != nil {
t.Fatalf("Stat %s: %s", f.Name(), err)
}
- preStat := st.(*FileStat)
+ preStat := st
// Move access and modification time back a second
at := Atime(preStat)
if err != nil {
t.Fatalf("second Stat %s: %s", f.Name(), err)
}
- postStat := st.(*FileStat)
+ postStat := st
/* Plan 9:
Mtime is the time of the last change of content. Similarly, atime is set whenever the
if err != nil {
t.Fatalf("Stat %q (looking for uid/gid %d/%d): %s", path, uid, gid, err)
}
- sys := dir.(*FileStat).Sys.(*syscall.Stat_t)
+ sys := dir.Sys().(*syscall.Stat_t)
if int(sys.Uid) != uid {
t.Errorf("Stat %q: uid %d want %d", path, sys.Uid, uid)
}
if err = Chown(f.Name(), -1, gid); err != nil {
t.Fatalf("chown %s -1 %d: %s", f.Name(), gid, err)
}
- sys := dir.(*FileStat).Sys.(*syscall.Stat_t)
+ sys := dir.Sys().(*syscall.Stat_t)
checkUidGid(t, f.Name(), int(sys.Uid), gid)
// Then try all the auxiliary groups.
"time"
)
-func sameFile(fs1, fs2 *FileStat) bool {
- sys1 := fs1.Sys.(*syscall.Stat_t)
- sys2 := fs2.Sys.(*syscall.Stat_t)
- return sys1.Dev == sys2.Dev && sys1.Ino == sys2.Ino
+func sameFile(sys1, sys2 interface{}) bool {
+ stat1 := sys1.(*syscall.Stat_t)
+ stat2 := sys2.(*syscall.Stat_t)
+ return stat1.Dev == stat2.Dev && stat1.Ino == stat2.Ino
}
func fileInfoFromStat(st *syscall.Stat_t, name string) FileInfo {
- fs := &FileStat{
+ fs := &fileStat{
name: basename(name),
size: int64(st.Size),
modTime: timespecToTime(st.Mtime),
- Sys: st,
+ sys: st,
}
fs.mode = FileMode(st.Mode & 0777)
switch st.Mode & syscall.S_IFMT {
// For testing.
func atime(fi FileInfo) time.Time {
- return timespecToTime(fi.(*FileStat).Sys.(*syscall.Stat_t).Atime)
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atime)
}
"time"
)
-func sameFile(fs1, fs2 *FileStat) bool {
- sys1 := fs1.Sys.(*syscall.Stat_t)
- sys2 := fs2.Sys.(*syscall.Stat_t)
- return sys1.Dev == sys2.Dev && sys1.Ino == sys2.Ino
+func sameFile(sys1, sys2 interface{}) bool {
+ stat1 := sys1.(*syscall.Stat_t)
+ stat2 := sys2.(*syscall.Stat_t)
+ return stat1.Dev == stat2.Dev && stat1.Ino == stat2.Ino
}
func fileInfoFromStat(st *syscall.Stat_t, name string) FileInfo {
- fs := &FileStat{
+ fs := &fileStat{
name: basename(name),
size: int64(st.Size),
modTime: timespecToTime(st.Mtim),
- Sys: st,
+ sys: st,
}
fs.mode = FileMode(st.Mode & 0777)
switch st.Mode & syscall.S_IFMT {
// For testing.
func atime(fi FileInfo) time.Time {
- return timespecToTime(fi.(*FileStat).Sys.(*syscall.Stat_t).Atim)
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
}
"time"
)
-func sameFile(fs1, fs2 *FileStat) bool {
- a := fs1.Sys.(*Dir)
- b := fs2.Sys.(*Dir)
+func sameFile(sys1, sys2 interface{}) bool {
+ a := sys1.(*Dir)
+ b := sys2.(*Dir)
return a.Qid.Path == b.Qid.Path && a.Type == b.Type && a.Dev == b.Dev
}
func fileInfoFromStat(d *Dir) FileInfo {
- fs := &FileStat{
+ fs := &fileStat{
name: d.Name,
size: int64(d.Length),
modTime: time.Unix(int64(d.Mtime), 0),
- Sys: d,
+ sys: d,
}
fs.mode = FileMode(d.Mode & 0777)
if d.Mode&syscall.DMDIR != 0 {
// For testing.
func atime(fi FileInfo) time.Time {
- return time.Unix(int64(fi.(*FileStat).Sys.(*Dir).Atime), 0)
+ return time.Unix(int64(fi.Sys().(*Dir).Atime), 0)
}
Mode() FileMode // file mode bits
ModTime() time.Time // modification time
IsDir() bool // abbreviation for Mode().IsDir()
+ Sys() interface{} // underlying data source (can return nil)
}
// A FileMode represents a file's mode and permission bits.
return m & ModePerm
}
-// A FileStat is the implementation of FileInfo returned by Stat and Lstat.
-// Clients that need access to the underlying system-specific stat information
-// can test for *os.FileStat and then consult the Sys field.
-type FileStat struct {
+// A fileStat is the implementation of FileInfo returned by Stat and Lstat.
+type fileStat struct {
name string
size int64
mode FileMode
modTime time.Time
-
- Sys interface{}
+ sys interface{}
}
-func (fs *FileStat) Name() string { return fs.name }
-func (fs *FileStat) Size() int64 { return fs.size }
-func (fs *FileStat) Mode() FileMode { return fs.mode }
-func (fs *FileStat) ModTime() time.Time { return fs.modTime }
-func (fs *FileStat) IsDir() bool { return fs.mode.IsDir() }
+func (fs *fileStat) Name() string { return fs.name }
+func (fs *fileStat) Size() int64 { return fs.size }
+func (fs *fileStat) Mode() FileMode { return fs.mode }
+func (fs *fileStat) ModTime() time.Time { return fs.modTime }
+func (fs *fileStat) IsDir() bool { return fs.mode.IsDir() }
+func (fs *fileStat) Sys() interface{} { return fs.sys }
-// SameFile reports whether fs and other describe the same file.
+// SameFile reports whether fi1 and fi2 describe the same file.
// For example, on Unix this means that the device and inode fields
// of the two underlying structures are identical; on other systems
// the decision may be based on the path names.
-func (fs *FileStat) SameFile(other *FileStat) bool {
- return sameFile(fs, other)
+// SameFile only applies to results returned by this package's Stat.
+// It returns false in other cases.
+func SameFile(fi1, fi2 FileInfo) bool {
+ fs1, ok1 := fi1.(*fileStat)
+ fs2, ok2 := fi2.(*fileStat)
+ if !ok1 || !ok2 {
+ return false
+ }
+ return sameFile(fs1.sys, fs2.sys)
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !cgo windows
+// +build !cgo,!windows
package user
"runtime"
)
+func init() {
+ implemented = false
+}
+
+func Current() (*User, error) {
+ return nil, fmt.Errorf("user: Current not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
func Lookup(username string) (*User, error) {
return nil, fmt.Errorf("user: Lookup not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
-func LookupId(int) (*User, error) {
+func LookupId(string) (*User, error) {
return nil, fmt.Errorf("user: LookupId not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
import (
"fmt"
+ "strconv"
"strings"
"syscall"
"unsafe"
return string(a[:i])
}
-func init() {
- implemented = true
+// Current returns the current user.
+func Current() (*User, error) {
+ return lookup(syscall.Getuid(), "", false)
}
// Lookup looks up a user by username. If the user cannot be found,
// LookupId looks up a user by userid. If the user cannot be found,
// the returned error is of type UnknownUserIdError.
-func LookupId(uid int) (*User, error) {
- return lookup(uid, "", false)
+func LookupId(uid string) (*User, error) {
+ i, e := strconv.Atoi(uid)
+ if e != nil {
+ return nil, e
+ }
+ return lookup(i, "", false)
}
func lookup(uid int, username string, lookupByName bool) (*User, error) {
}
}
u := &User{
- Uid: int(pwd.Pw_uid),
- Gid: int(pwd.Pw_gid),
+ Uid: strconv.Itoa(int(pwd.Pw_uid)),
+ Gid: strconv.Itoa(int(pwd.Pw_gid)),
Username: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_name))),
Name: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_gecos))),
HomeDir: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_dir))),
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package user
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+func lookupFullName(domain, username, domainAndUser string) (string, error) {
+ // try domain controller first
+ name, e := syscall.TranslateAccountName(domainAndUser,
+ syscall.NameSamCompatible, syscall.NameDisplay, 50)
+ if e != nil {
+ // domain lookup failed, perhaps this pc is not part of domain
+ d := syscall.StringToUTF16Ptr(domain)
+ u := syscall.StringToUTF16Ptr(username)
+ var p *byte
+ e := syscall.NetUserGetInfo(d, u, 10, &p)
+ if e != nil {
+ return "", e
+ }
+ defer syscall.NetApiBufferFree(p)
+ i := (*syscall.UserInfo10)(unsafe.Pointer(p))
+ if i.FullName == nil {
+ return "", nil
+ }
+ name = syscall.UTF16ToString((*[1024]uint16)(unsafe.Pointer(i.FullName))[:])
+ }
+ return name, nil
+}
+
+func newUser(usid *syscall.SID, gid, dir string) (*User, error) {
+ username, domain, t, e := usid.LookupAccount("")
+ if e != nil {
+ return nil, e
+ }
+ if t != syscall.SidTypeUser {
+ return nil, fmt.Errorf("user: should be user account type, not %d", t)
+ }
+ domainAndUser := domain + `\` + username
+ uid, e := usid.String()
+ if e != nil {
+ return nil, e
+ }
+ name, e := lookupFullName(domain, username, domainAndUser)
+ if e != nil {
+ return nil, e
+ }
+ u := &User{
+ Uid: uid,
+ Gid: gid,
+ Username: domainAndUser,
+ Name: name,
+ HomeDir: dir,
+ }
+ return u, nil
+}
+
+// Current returns the current user.
+func Current() (*User, error) {
+ t, e := syscall.OpenCurrentProcessToken()
+ if e != nil {
+ return nil, e
+ }
+ u, e := t.GetTokenUser()
+ if e != nil {
+ return nil, e
+ }
+ pg, e := t.GetTokenPrimaryGroup()
+ if e != nil {
+ return nil, e
+ }
+ gid, e := pg.PrimaryGroup.String()
+ if e != nil {
+ return nil, e
+ }
+ dir, e := t.GetUserProfileDirectory()
+ if e != nil {
+ return nil, e
+ }
+ return newUser(u.User.Sid, gid, dir)
+}
+
+// BUG(brainman): Lookup and LookupId functions do not set
+// Gid and HomeDir fields in the User struct returned on windows.
+
+func newUserFromSid(usid *syscall.SID) (*User, error) {
+ // TODO(brainman): do not know where to get gid and dir fields
+ gid := "unknown"
+ dir := "Unknown directory"
+ return newUser(usid, gid, dir)
+}
+
+// Lookup looks up a user by username.
+func Lookup(username string) (*User, error) {
+ sid, _, t, e := syscall.LookupSID("", username)
+ if e != nil {
+ return nil, e
+ }
+ if t != syscall.SidTypeUser {
+ return nil, fmt.Errorf("user: should be user account type, not %d", t)
+ }
+ return newUserFromSid(sid)
+}
+
+// LookupId looks up a user by userid.
+func LookupId(uid string) (*User, error) {
+ sid, e := syscall.StringToSid(uid)
+ if e != nil {
+ return nil, e
+ }
+ return newUserFromSid(sid)
+}
"strconv"
)
-var implemented = false // set to true by lookup_unix.go's init
+var implemented = true // set to false by lookup_stubs.go's init
// User represents a user account.
+//
+// On posix systems Uid and Gid contain a decimal number
+// representing uid and gid. On windows Uid and Gid
+// contain security identifier (SID) in a string format.
type User struct {
- Uid int // user id
- Gid int // primary group id
+ Uid string // user id
+ Gid string // primary group id
Username string
Name string
HomeDir string
import (
"os"
- "reflect"
"runtime"
- "syscall"
"testing"
)
return true
}
- if runtime.GOOS == "linux" || runtime.GOOS == "freebsd" || runtime.GOOS == "darwin" {
+ switch runtime.GOOS {
+ case "linux", "freebsd", "darwin", "windows":
return false
}
return true
}
-func TestLookup(t *testing.T) {
+func TestCurrent(t *testing.T) {
if skip(t) {
return
}
- // Test LookupId on the current user
- uid := syscall.Getuid()
- u, err := LookupId(uid)
+ u, err := Current()
if err != nil {
- t.Fatalf("LookupId: %v", err)
- }
- if e, g := uid, u.Uid; e != g {
- t.Errorf("expected Uid of %d; got %d", e, g)
+ t.Fatalf("Current: %v", err)
}
fi, err := os.Stat(u.HomeDir)
if err != nil || !fi.IsDir() {
- t.Errorf("expected a valid HomeDir; stat(%q): err=%v, IsDir=%v", u.HomeDir, err, fi.IsDir())
+ t.Errorf("expected a valid HomeDir; stat(%q): err=%v", u.HomeDir, err)
}
if u.Username == "" {
t.Fatalf("didn't get a username")
}
+}
+
+func compare(t *testing.T, want, got *User) {
+ if want.Uid != got.Uid {
+ t.Errorf("got Uid=%q; want %q", got.Uid, want.Uid)
+ }
+ if want.Username != got.Username {
+ t.Errorf("got Username=%q; want %q", got.Username, want.Username)
+ }
+ if want.Name != got.Name {
+ t.Errorf("got Name=%q; want %q", got.Name, want.Name)
+ }
+ // TODO(brainman): fix it once we know how.
+ if runtime.GOOS == "windows" {
+ t.Log("skipping Gid and HomeDir comparisons")
+ return
+ }
+ if want.Gid != got.Gid {
+ t.Errorf("got Gid=%q; want %q", got.Gid, want.Gid)
+ }
+ if want.HomeDir != got.HomeDir {
+ t.Errorf("got HomeDir=%q; want %q", got.HomeDir, want.HomeDir)
+ }
+}
+
+func TestLookup(t *testing.T) {
+ if skip(t) {
+ return
+ }
- // Test Lookup by username, using the username from LookupId
- un, err := Lookup(u.Username)
+ want, err := Current()
+ if err != nil {
+ t.Fatalf("Current: %v", err)
+ }
+ got, err := Lookup(want.Username)
if err != nil {
t.Fatalf("Lookup: %v", err)
}
- if !reflect.DeepEqual(u, un) {
- t.Errorf("Lookup by userid vs. name didn't match\n"+
- "LookupId(%d): %#v\n"+
- "Lookup(%q): %#v\n", uid, u, u.Username, un)
+ compare(t, want, got)
+}
+
+func TestLookupId(t *testing.T) {
+ if skip(t) {
+ return
+ }
+
+ want, err := Current()
+ if err != nil {
+ t.Fatalf("Current: %v", err)
+ }
+ got, err := LookupId(want.Uid)
+ if err != nil {
+ t.Fatalf("LookupId: %v", err)
}
+ compare(t, want, got)
}
package filepath_test
import (
+ "io/ioutil"
"os"
"path/filepath"
"reflect"
}
type EvalSymlinksTest struct {
+ // If dest is empty, the path is created; otherwise the dest is symlinked to the path.
path, dest string
}
{`c:\`, `c:\`},
}
-func testEvalSymlinks(t *testing.T, tests []EvalSymlinksTest) {
- for _, d := range tests {
- if p, err := filepath.EvalSymlinks(d.path); err != nil {
- t.Errorf("EvalSymlinks(%q) error: %v", d.path, err)
- } else if filepath.Clean(p) != filepath.Clean(d.dest) {
- t.Errorf("EvalSymlinks(%q)=%q, want %q", d.path, p, d.dest)
- }
- }
+// simpleJoin builds a file name from the directory and path.
+// It does not use Join because we don't want ".." to be evaluated.
+func simpleJoin(dir, path string) string {
+ return dir + string(filepath.Separator) + path
}
func TestEvalSymlinks(t *testing.T) {
- defer os.RemoveAll("test")
+ tmpDir, err := ioutil.TempDir("", "evalsymlink")
+ if err != nil {
+ t.Fatal("creating temp dir:", err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ // /tmp may itself be a symlink! Avoid the confusion, although
+ // it means trusting the thing we're testing.
+ tmpDir, err = filepath.EvalSymlinks(tmpDir)
+ if err != nil {
+ t.Fatal("eval symlink for tmp dir:", err)
+ }
+
+ // Create the symlink farm using relative paths.
for _, d := range EvalSymlinksTestDirs {
var err error
+ path := simpleJoin(tmpDir, d.path)
if d.dest == "" {
- err = os.Mkdir(d.path, 0755)
+ err = os.Mkdir(path, 0755)
} else {
if runtime.GOOS != "windows" {
- err = os.Symlink(d.dest, d.path)
+ err = os.Symlink(d.dest, path)
}
}
if err != nil {
t.Fatal(err)
}
}
+
var tests []EvalSymlinksTest
if runtime.GOOS == "windows" {
for _, d := range EvalSymlinksTests {
} else {
tests = EvalSymlinksTests
}
- // relative
- testEvalSymlinks(t, tests)
- // absolute
- /* These tests do not work in the gccgo test environment.
- goroot, err := filepath.EvalSymlinks(os.Getenv("GOROOT"))
- if err != nil {
- t.Fatalf("EvalSymlinks(%q) error: %v", os.Getenv("GOROOT"), err)
- }
- testroot := filepath.Join(goroot, "src", "pkg", "path", "filepath")
- for i, d := range tests {
- tests[i].path = filepath.Join(testroot, d.path)
- tests[i].dest = filepath.Join(testroot, d.dest)
- }
- if runtime.GOOS == "windows" {
- for _, d := range EvalSymlinksAbsWindowsTests {
- tests = append(tests, d)
+
+ // Evaluate the symlink farm.
+ for _, d := range tests {
+ path := simpleJoin(tmpDir, d.path)
+ dest := simpleJoin(tmpDir, d.dest)
+ if p, err := filepath.EvalSymlinks(path); err != nil {
+ t.Errorf("EvalSymlinks(%q) error: %v", d.path, err)
+ } else if filepath.Clean(p) != filepath.Clean(dest) {
+ t.Errorf("Clean(%q)=%q, want %q", path, p, dest)
}
}
- testEvalSymlinks(t, tests)
- */
}
/* These tests do not work in the gccgo test environment.
var abstests = []string{
"../AUTHORS",
"pkg/../../AUTHORS",
- "Make.pkg",
- "pkg/Makefile",
+ "Make.inc",
+ "pkg/math",
".",
- "$GOROOT/src/Make.pkg",
- "$GOROOT/src/../src/Make.pkg",
+ "$GOROOT/src/Make.inc",
+ "$GOROOT/src/../src/Make.inc",
"$GOROOT/misc/cgo",
"$GOROOT",
}
func TestAbs(t *testing.T) {
+ t.Logf("test needs to be rewritten; disabled")
+ return
+
oldwd, err := os.Getwd()
if err != nil {
t.Fatal("Getwd failed: " + err.Error())
continue
}
absinfo, err := os.Stat(abspath)
- if err != nil || !absinfo.(*os.FileStat).SameFile(info.(*os.FileStat)) {
+ if err != nil || !os.SameFile(absinfo, info) {
t.Errorf("Abs(%q)=%q, not the same file", path, abspath)
}
if !filepath.IsAbs(abspath) {
func noAlloc(t *testing.T, n int, f func(int)) {
// once to prime everything
f(-1)
- runtime.MemStats.Mallocs = 0
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ oldmallocs := memstats.Mallocs
for j := 0; j < n; j++ {
f(j)
}
// A few allocs may happen in the testing package when GOMAXPROCS > 1, so don't
// require zero mallocs.
- if runtime.MemStats.Mallocs > 5 {
- t.Fatalf("%d mallocs after %d iterations", runtime.MemStats.Mallocs, n)
+ runtime.ReadMemStats(memstats)
+ mallocs := memstats.Mallocs - oldmallocs
+ if mallocs > 5 {
+ t.Fatalf("%d mallocs after %d iterations", mallocs, n)
}
}
f.Tag = StructTag(*p.tag)
}
f.Offset = p.offset
+
+ // NOTE(rsc): This is the only allocation in the interface
+ // presented by a reflect.Type. It would be nice to avoid,
+ // at least in the common cases, but we need to make sure
+ // that misbehaving clients of reflect cannot affect other
+ // uses of reflect. One possibility is CL 5371098, but we
+ // postponed that ugliness until there is a demonstrated
+ // need for the performance. This is issue 2320.
f.Index = []int{i}
return
}
}
var anyTable = &unicode.RangeTable{
- []unicode.Range16{{0, 1<<16 - 1, 1}},
- []unicode.Range32{{1 << 16, unicode.MaxRune, 1}},
+ R16: []unicode.Range16{{Lo: 0, Hi: 1<<16 - 1, Stride: 1}},
+ R32: []unicode.Range32{{Lo: 1 << 16, Hi: unicode.MaxRune, Stride: 1}},
}
// unicodeTable returns the unicode.RangeTable identified by name
)
func TestGcSys(t *testing.T) {
+ memstats := new(runtime.MemStats)
runtime.GC()
- runtime.UpdateMemStats()
- sys := runtime.MemStats.Sys
+ runtime.ReadMemStats(memstats)
+ sys := memstats.Sys
for i := 0; i < 1000000; i++ {
workthegc()
}
// Should only be using a few MB.
- runtime.UpdateMemStats()
- if sys > runtime.MemStats.Sys {
+ runtime.ReadMemStats(memstats)
+ if sys > memstats.Sys {
sys = 0
} else {
- sys = runtime.MemStats.Sys - sys
+ sys = memstats.Sys - sys
}
t.Logf("used %d extra bytes", sys)
if sys > 4<<20 {
import "unsafe"
-type MemStatsType struct {
+// A MemStats records statistics about the memory allocator.
+type MemStats struct {
// General statistics.
- // Not locked during update; approximate.
Alloc uint64 // bytes allocated and still in use
TotalAlloc uint64 // bytes allocated (even if freed)
Sys uint64 // bytes obtained from system (should be sum of XxxSys below)
DebugGC bool
// Per-size allocation statistics.
- // Not locked during update; approximate.
// 61 is NumSizeClasses in the C code.
BySize [61]struct {
Size uint32
var Sizeof_C_MStats uintptr // filled in by malloc.goc
+var VmemStats MemStats
+
func init() {
- if Sizeof_C_MStats != unsafe.Sizeof(MemStats) {
- println(Sizeof_C_MStats, unsafe.Sizeof(MemStats))
+ if Sizeof_C_MStats != unsafe.Sizeof(VmemStats) {
+ println(Sizeof_C_MStats, unsafe.Sizeof(VmemStats))
panic("MStats vs MemStatsType size mismatch")
}
}
-// MemStats holds statistics about the memory system.
-// The statistics may be out of date, as the information is
-// updated lazily from per-thread caches.
-// Use UpdateMemStats to bring the statistics up to date.
-var MemStats MemStatsType
-
-// UpdateMemStats brings MemStats up to date.
-func UpdateMemStats()
+// ReadMemStats populates m with memory allocator statistics.
+func ReadMemStats(m *MemStats)
// GC runs a garbage collection.
func GC()
// Print memstats information too.
// Pprof will ignore, but useful for people.
- s := &runtime.MemStats
+ s := new(runtime.MemStats)
+ runtime.ReadMemStats(s)
fmt.Fprintf(b, "\n# runtime.MemStats\n")
fmt.Fprintf(b, "# Alloc = %d\n", s.Alloc)
fmt.Fprintf(b, "# TotalAlloc = %d\n", s.TotalAlloc)
}
func numAllocations(f func()) int {
- runtime.UpdateMemStats()
- n0 := runtime.MemStats.Mallocs
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ n0 := memstats.Mallocs
f()
- runtime.UpdateMemStats()
- return int(runtime.MemStats.Mallocs - n0)
+ runtime.ReadMemStats(memstats)
+ return int(memstats.Mallocs - n0)
}
/* This test relies on escape analysis which gccgo does not yet do.
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strings_test
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Fields are: ["foo" "bar" "baz"]
+func ExampleFields() {
+ fmt.Printf("Fields are: %q", strings.Fields(" foo bar baz "))
+}
package syscall
-import "errors"
+import (
+ "errors"
+ "sync"
+)
-func Getenv(key string) (value string, found bool) {
- if len(key) == 0 {
- return "", false
+var (
+ // envOnce guards initialization by copyenv, which populates env.
+ envOnce sync.Once
+
+ // envLock guards env.
+ envLock sync.RWMutex
+
+ // env maps from an environment variable to its value.
+ env map[string]string
+)
+
+func readenv(key string) (string, error) {
+ fd, err := Open("/env/"+key, O_RDONLY)
+ if err != nil {
+ return "", err
}
- f, e := Open("/env/"+key, O_RDONLY)
- if e != nil {
- return "", false
+ defer Close(fd)
+ l, _ := Seek(fd, 0, 2)
+ Seek(fd, 0, 0)
+ buf := make([]byte, l)
+ n, err := Read(fd, buf)
+ if err != nil {
+ return "", err
}
- defer Close(f)
+ if n > 0 && buf[n-1] == 0 {
+ buf = buf[:n-1]
+ }
+ return string(buf), nil
+}
- l, _ := Seek(f, 0, 2)
- Seek(f, 0, 0)
- buf := make([]byte, l)
- n, e := Read(f, buf)
- if e != nil {
+func writeenv(key, value string) error {
+ fd, err := Create("/env/"+key, O_RDWR, 0666)
+ if err != nil {
+ return err
+ }
+ defer Close(fd)
+ _, err = Write(fd, []byte(value))
+ return err
+}
+
+func copyenv() {
+ env = make(map[string]string)
+ fd, err := Open("/env", O_RDONLY)
+ if err != nil {
+ return
+ }
+ defer Close(fd)
+ files, err := readdirnames(fd)
+ if err != nil {
+ return
+ }
+ for _, key := range files {
+ v, err := readenv(key)
+ if err != nil {
+ continue
+ }
+ env[key] = v
+ }
+}
+
+func Getenv(key string) (value string, found bool) {
+ envOnce.Do(copyenv)
+ if len(key) == 0 {
return "", false
}
- if n > 0 && buf[n-1] == 0 {
- buf = buf[:n-1]
+ envLock.RLock()
+ defer envLock.RUnlock()
+
+ v, ok := env[key]
+ if !ok {
+ return "", false
}
- return string(buf), true
+ return v, true
}
func Setenv(key, value string) error {
+ envOnce.Do(copyenv)
if len(key) == 0 {
- return errors.New("bad arg in system call")
+ return errors.New("zero length key")
}
- f, e := Create("/env/"+key, O_RDWR, 0666)
- if e != nil {
- return e
- }
- defer Close(f)
+ envLock.Lock()
+ defer envLock.Unlock()
- _, e = Write(f, []byte(value))
+ err := writeenv(key, value)
+ if err != nil {
+ return err
+ }
+ env[key] = value
return nil
}
func Clearenv() {
+ envOnce.Do(copyenv) // prevent copyenv in Getenv/Setenv
+
+ envLock.Lock()
+ defer envLock.Unlock()
+
+ env = make(map[string]string)
RawSyscall(SYS_RFORK, RFCENVG, 0, 0)
}
func Environ() []string {
- env := make([]string, 0, 100)
-
- f, e := Open("/env", O_RDONLY)
- if e != nil {
- panic(e)
- }
- defer Close(f)
-
- names, e := readdirnames(f)
- if e != nil {
- panic(e)
- }
-
- for _, k := range names {
- if v, ok := Getenv(k); ok {
- env = append(env, k+"="+v)
- }
+ envOnce.Do(copyenv)
+ envLock.RLock()
+ defer envLock.RUnlock()
+ a := make([]string, len(env))
+ i := 0
+ for k, v := range env {
+ a[i] = k + "=" + v
+ i++
}
- return env[0:len(env)]
+ return a
}
}
// StartProcess wraps ForkExec for package os.
-func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid, handle int, err error) {
+func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error) {
pid, err = forkExec(argv0, argv, attr)
return pid, 0, err
}
var zeroProcAttr ProcAttr
var zeroSysProcAttr SysProcAttr
-func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid, handle int, err error) {
+func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error) {
if len(argv0) == 0 {
return 0, 0, EWINDOWS
}
}
defer CloseHandle(Handle(pi.Thread))
- return int(pi.ProcessId), int(pi.Process), nil
+ return int(pi.ProcessId), uintptr(pi.Process), nil
}
func Exec(argv0 string, argv []string, envv []string) (err error) {
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+import (
+ "unsafe"
+)
+
+const (
+ STANDARD_RIGHTS_REQUIRED = 0xf0000
+ STANDARD_RIGHTS_READ = 0x20000
+ STANDARD_RIGHTS_WRITE = 0x20000
+ STANDARD_RIGHTS_EXECUTE = 0x20000
+ STANDARD_RIGHTS_ALL = 0x1F0000
+)
+
+const (
+ NameUnknown = 0
+ NameFullyQualifiedDN = 1
+ NameSamCompatible = 2
+ NameDisplay = 3
+ NameUniqueId = 6
+ NameCanonical = 7
+ NameUserPrincipal = 8
+ NameCanonicalEx = 9
+ NameServicePrincipal = 10
+ NameDnsDomain = 12
+)
+
+// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL.
+// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx
+//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW
+//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW
+
+// TranslateAccountName converts a directory service
+// object name from one format to another.
+func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) {
+ u := StringToUTF16Ptr(username)
+ b := make([]uint16, 50)
+ n := uint32(len(b))
+ e := TranslateName(u, from, to, &b[0], &n)
+ if e != nil {
+ if e != ERROR_INSUFFICIENT_BUFFER {
+ return "", e
+ }
+ // make receive buffers of requested size and try again
+ b = make([]uint16, n)
+ e = TranslateName(u, from, to, &b[0], &n)
+ if e != nil {
+ return "", e
+ }
+ }
+ return UTF16ToString(b), nil
+}
+
+type UserInfo10 struct {
+ Name *uint16
+ Comment *uint16
+ UsrComment *uint16
+ FullName *uint16
+}
+
+//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo
+//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree
+
+const (
+ // do not reorder
+ SidTypeUser = 1 << iota
+ SidTypeGroup
+ SidTypeDomain
+ SidTypeAlias
+ SidTypeWellKnownGroup
+ SidTypeDeletedAccount
+ SidTypeInvalid
+ SidTypeUnknown
+ SidTypeComputer
+ SidTypeLabel
+)
+
+//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW
+//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW
+//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW
+//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW
+//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid
+//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid
+
+// The security identifier (SID) structure is a variable-length
+// structure used to uniquely identify users or groups.
+type SID struct{}
+
+// StringToSid converts a string-format security identifier
+// sid into a valid, functional sid.
+func StringToSid(s string) (*SID, error) {
+ var sid *SID
+ e := ConvertStringSidToSid(StringToUTF16Ptr(s), &sid)
+ if e != nil {
+ return nil, e
+ }
+ defer LocalFree((Handle)(unsafe.Pointer(sid)))
+ return sid.Copy()
+}
+
+// LookupSID retrieves a security identifier sid for the account
+// and the name of the domain on which the account was found.
+// System specify target computer to search.
+func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) {
+ if len(account) == 0 {
+ return nil, "", 0, EINVAL
+ }
+ acc := StringToUTF16Ptr(account)
+ var sys *uint16
+ if len(system) > 0 {
+ sys = StringToUTF16Ptr(system)
+ }
+ db := make([]uint16, 50)
+ dn := uint32(len(db))
+ b := make([]byte, 50)
+ n := uint32(len(b))
+ sid = (*SID)(unsafe.Pointer(&b[0]))
+ e := LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType)
+ if e != nil {
+ if e != ERROR_INSUFFICIENT_BUFFER {
+ return nil, "", 0, e
+ }
+ // make receive buffers of requested size and try again
+ b = make([]byte, n)
+ sid = (*SID)(unsafe.Pointer(&b[0]))
+ db = make([]uint16, dn)
+ e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType)
+ if e != nil {
+ return nil, "", 0, e
+ }
+ }
+ return sid, UTF16ToString(db), accType, nil
+}
+
+// String converts sid to a string format
+// suitable for display, storage, or transmission.
+func (sid *SID) String() (string, error) {
+ var s *uint16
+ e := ConvertSidToStringSid(sid, &s)
+ if e != nil {
+ return "", e
+ }
+ defer LocalFree((Handle)(unsafe.Pointer(s)))
+ return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil
+}
+
+// Len returns the length, in bytes, of a valid security identifier sid.
+func (sid *SID) Len() int {
+ return int(GetLengthSid(sid))
+}
+
+// Copy creates a duplicate of security identifier sid.
+func (sid *SID) Copy() (*SID, error) {
+ b := make([]byte, sid.Len())
+ sid2 := (*SID)(unsafe.Pointer(&b[0]))
+ e := CopySid(uint32(len(b)), sid2, sid)
+ if e != nil {
+ return nil, e
+ }
+ return sid2, nil
+}
+
+// LookupAccount retrieves the name of the account for this sid
+// and the name of the first domain on which this sid is found.
+// System specify target computer to search for.
+func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) {
+ var sys *uint16
+ if len(system) > 0 {
+ sys = StringToUTF16Ptr(system)
+ }
+ b := make([]uint16, 50)
+ n := uint32(len(b))
+ db := make([]uint16, 50)
+ dn := uint32(len(db))
+ e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType)
+ if e != nil {
+ if e != ERROR_INSUFFICIENT_BUFFER {
+ return "", "", 0, e
+ }
+ // make receive buffers of requested size and try again
+ b = make([]uint16, n)
+ db = make([]uint16, dn)
+ e = LookupAccountSid(nil, sid, &b[0], &n, &db[0], &dn, &accType)
+ if e != nil {
+ return "", "", 0, e
+ }
+ }
+ return UTF16ToString(b), UTF16ToString(db), accType, nil
+}
+
+const (
+ // do not reorder
+ TOKEN_ASSIGN_PRIMARY = 1 << iota
+ TOKEN_DUPLICATE
+ TOKEN_IMPERSONATE
+ TOKEN_QUERY
+ TOKEN_QUERY_SOURCE
+ TOKEN_ADJUST_PRIVILEGES
+ TOKEN_ADJUST_GROUPS
+ TOKEN_ADJUST_DEFAULT
+
+ TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |
+ TOKEN_ASSIGN_PRIMARY |
+ TOKEN_DUPLICATE |
+ TOKEN_IMPERSONATE |
+ TOKEN_QUERY |
+ TOKEN_QUERY_SOURCE |
+ TOKEN_ADJUST_PRIVILEGES |
+ TOKEN_ADJUST_GROUPS |
+ TOKEN_ADJUST_DEFAULT
+ TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY
+ TOKEN_WRITE = STANDARD_RIGHTS_WRITE |
+ TOKEN_ADJUST_PRIVILEGES |
+ TOKEN_ADJUST_GROUPS |
+ TOKEN_ADJUST_DEFAULT
+ TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE
+)
+
+const (
+ // do not reorder
+ TokenUser = 1 + iota
+ TokenGroups
+ TokenPrivileges
+ TokenOwner
+ TokenPrimaryGroup
+ TokenDefaultDacl
+ TokenSource
+ TokenType
+ TokenImpersonationLevel
+ TokenStatistics
+ TokenRestrictedSids
+ TokenSessionId
+ TokenGroupsAndPrivileges
+ TokenSessionReference
+ TokenSandBoxInert
+ TokenAuditPolicy
+ TokenOrigin
+ TokenElevationType
+ TokenLinkedToken
+ TokenElevation
+ TokenHasRestrictions
+ TokenAccessInformation
+ TokenVirtualizationAllowed
+ TokenVirtualizationEnabled
+ TokenIntegrityLevel
+ TokenUIAccess
+ TokenMandatoryPolicy
+ TokenLogonSid
+ MaxTokenInfoClass
+)
+
+type SIDAndAttributes struct {
+ Sid *SID
+ Attributes uint32
+}
+
+type Tokenuser struct {
+ User SIDAndAttributes
+}
+
+type Tokenprimarygroup struct {
+ PrimaryGroup *SID
+}
+
+//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken
+//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation
+//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW
+
+// An access token contains the security information for a logon session.
+// The system creates an access token when a user logs on, and every
+// process executed on behalf of the user has a copy of the token.
+// The token identifies the user, the user's groups, and the user's
+// privileges. The system uses the token to control access to securable
+// objects and to control the ability of the user to perform various
+// system-related operations on the local computer.
+type Token Handle
+
+// OpenCurrentProcessToken opens the access token
+// associated with current process.
+func OpenCurrentProcessToken() (Token, error) {
+ p, e := GetCurrentProcess()
+ if e != nil {
+ return 0, e
+ }
+ var t Token
+ e = OpenProcessToken(p, TOKEN_QUERY, &t)
+ if e != nil {
+ return 0, e
+ }
+ return t, nil
+}
+
+// Close releases access to access token.
+func (t Token) Close() error {
+ return CloseHandle(Handle(t))
+}
+
+// getInfo retrieves a specified type of information about an access token.
+func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) {
+ b := make([]byte, initSize)
+ var n uint32
+ e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n)
+ if e != nil {
+ if e != ERROR_INSUFFICIENT_BUFFER {
+ return nil, e
+ }
+ // make receive buffers of requested size and try again
+ b = make([]byte, n)
+ e = GetTokenInformation(t, class, &b[0], uint32(len(b)), &n)
+ if e != nil {
+ return nil, e
+ }
+ }
+ return unsafe.Pointer(&b[0]), nil
+}
+
+// GetTokenUser retrieves access token t user account information.
+func (t Token) GetTokenUser() (*Tokenuser, error) {
+ i, e := t.getInfo(TokenUser, 50)
+ if e != nil {
+ return nil, e
+ }
+ return (*Tokenuser)(i), nil
+}
+
+// GetTokenPrimaryGroup retrieves access token t primary group information.
+// A pointer to a SID structure representing a group that will become
+// the primary group of any objects created by a process using this access token.
+func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) {
+ i, e := t.getInfo(TokenPrimaryGroup, 50)
+ if e != nil {
+ return nil, e
+ }
+ return (*Tokenprimarygroup)(i), nil
+}
+
+// GetUserProfileDirectory retrieves path to the
+// root directory of the access token t user's profile.
+func (t Token) GetUserProfileDirectory() (string, error) {
+ b := make([]uint16, 100)
+ n := uint32(len(b))
+ e := GetUserProfileDirectory(t, &b[0], &n)
+ if e != nil {
+ if e != ERROR_INSUFFICIENT_BUFFER {
+ return "", e
+ }
+ // make receive buffers of requested size and try again
+ b = make([]uint16, n)
+ e = GetUserProfileDirectory(t, &b[0], &n)
+ if e != nil {
+ return "", e
+ }
+ }
+ return UTF16ToString(b), nil
+}
var _zero uintptr
var dummy *byte
+
const sizeofPtr uintptr = uintptr(unsafe.Sizeof(dummy))
func (ts *Timespec) Unix() (sec int64, nsec int64) {
func (tv *Timeval) Unix() (sec int64, nsec int64) {
return int64(tv.Sec), int64(tv.Usec) * 1000
}
+
+func (ts *Timespec) Nano() int64 {
+ return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+func (tv *Timeval) Nano() int64 {
+ return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
c.FailNow()
}
+// TODO(dsymonds): Consider hooking into runtime·traceback instead.
+func (c *common) stack() {
+ for i := 2; ; i++ { // Caller we care about is the user, 2 frames up
+ pc, file, line, ok := runtime.Caller(i)
+ f := runtime.FuncForPC(pc)
+ if !ok || f == nil {
+ break
+ }
+ c.Logf("%s:%d (0x%x)", file, line, pc)
+ c.Logf("\t%s", f.Name())
+ }
+}
+
// Parallel signals that this test is to be run in parallel with (and only with)
// other parallel tests in this CPU group.
func (t *T) Parallel() {
// a call to runtime.Goexit, record the duration and send
// a signal saying that the test is done.
defer func() {
+ // Consider any uncaught panic a failure.
+ if err := recover(); err != nil {
+ t.failed = true
+ t.Log(err)
+ t.stack()
+ }
+
t.duration = time.Now().Sub(t.start)
t.signal <- t
}()
// this flag.
//
// If a Writer is configured to filter HTML, HTML tags and entities
-// are simply passed through. The widths of tags and entities are
+// are passed through. The widths of tags and entities are
// assumed to be zero (tags) and one (entities) for formatting purposes.
//
// A segment of text may be escaped by bracketing it with Escape
// Flush should be called after the last call to Write to ensure
// that any data buffered in the Writer is written to output. Any
-// incomplete escape sequence at the end is simply considered
+// incomplete escape sequence at the end is considered
// complete for formatting purposes.
//
func (b *Writer) Flush() (err error) {
package unicode
-var TurkishCase = _TurkishCase
+var TurkishCase SpecialCase = _TurkishCase
var _TurkishCase = SpecialCase{
CaseRange{0x0049, 0x0049, d{0, 0x131 - 0x49, 0}},
CaseRange{0x0069, 0x0069, d{0x130 - 0x69, 0, 0x130 - 0x69}},
CaseRange{0x0131, 0x0131, d{0x49 - 0x131, 0, 0x49 - 0x131}},
}
-var AzeriCase = _TurkishCase
+var AzeriCase SpecialCase = _TurkishCase
},
}
+// The following variables are of type *RangeTable:
var (
Cc = _Cc // Cc is the set of Unicode characters in category Cc.
Cf = _Cf // Cf is the set of Unicode characters in category Cf.
},
}
+// The following variables are of type *RangeTable:
var (
Arabic = _Arabic // Arabic is the set of Unicode characters in script Arabic.
Armenian = _Armenian // Armenian is the set of Unicode characters in script Armenian.
},
}
+// The following variables are of type *RangeTable:
var (
ASCII_Hex_Digit = _ASCII_Hex_Digit // ASCII_Hex_Digit is the set of Unicode characters with property ASCII_Hex_Digit.
Bidi_Control = _Bidi_Control // Bidi_Control is the set of Unicode characters with property Bidi_Control.
};
extern MStats mstats
- __asm__ ("libgo_runtime.runtime.MemStats");
+ __asm__ ("libgo_runtime.runtime.VmemStats");
// Size classes. Computed and initialized by InitSizes.
runtime_gc(1);
}
-void runtime_UpdateMemStats(void)
- __asm__("libgo_runtime.runtime.UpdateMemStats");
+void runtime_ReadMemStats(MStats *)
+ __asm__("libgo_runtime.runtime.ReadMemStats");
void
-runtime_UpdateMemStats(void)
+runtime_ReadMemStats(MStats *stats)
{
M *m;
m->gcing = 1;
runtime_stoptheworld();
cachestats();
+ *stats = mstats;
m->gcing = 0;
runtime_semrelease(&gcsema);
runtime_starttheworld(false);
runtime_atomicstorep((void**)&runtime_allm, m);
m->id = runtime_sched.mcount++;
- m->fastrand = 0x49f6428aUL + m->id;
+ m->fastrand = 0x49f6428aUL + m->id + runtime_cputicks();
if(m->mcache == nil)
m->mcache = runtime_allocmcache();
return x;
}
+int64
+runtime_cputicks(void)
+{
+#if defined(__386__) || defined(__x86_64__)
+ uint32 low, high;
+ asm("rdtsc" : "=a" (low), "=d" (high));
+ return (int64)(((uint64)high << 32) | (uint64)low);
+#else
+ // FIXME: implement for other processors.
+ return 0;
+#endif
+}
+
struct funcline_go_return
{
String retfile;
void siginit(void);
bool __go_sigsend(int32 sig);
int64 runtime_nanotime(void);
+int64 runtime_cputicks(void);
void runtime_stoptheworld(void);
void runtime_starttheworld(bool);