go sender(c, 100000)
receiver(c, dummy, 100000)
runtime.GC()
- runtime.MemStats.Alloc = 0
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ alloc := memstats.Alloc
// second time shouldn't increase footprint by much
go sender(c, 100000)
receiver(c, dummy, 100000)
runtime.GC()
+ runtime.ReadMemStats(memstats)
- if runtime.MemStats.Alloc > 1e5 {
- println("BUG: too much memory for 100,000 selects:", runtime.MemStats.Alloc)
+ if memstats.Alloc-alloc > 1e5 {
+ println("BUG: too much memory for 100,000 selects:", memstats.Alloc-alloc)
}
}
func main() {
const N = 10000
- st := runtime.MemStats
+ st := new(runtime.MemStats)
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(st)
for i := 0; i < N; i++ {
c := make(chan int, 10)
_ = c
}
}
- runtime.UpdateMemStats()
- obj := runtime.MemStats.HeapObjects - st.HeapObjects
+ runtime.ReadMemStats(memstats)
+ obj := memstats.HeapObjects - st.HeapObjects
if obj > N/5 {
fmt.Println("too many objects left:", obj)
os.Exit(1)
var chatty = flag.Bool("v", false, "chatty")
func main() {
+ memstats := new(runtime.MemStats)
runtime.Free(runtime.Alloc(1))
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(memstats)
if *chatty {
- fmt.Printf("%+v %v\n", runtime.MemStats, uint64(0))
+ fmt.Printf("%+v %v\n", memstats, uint64(0))
}
}
var allocated uint64
func bigger() {
- runtime.UpdateMemStats()
- if f := runtime.MemStats.Sys; footprint < f {
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ if f := memstats.Sys; footprint < f {
footprint = f
if *chatty {
println("Footprint", footprint, " for ", allocated)
var chatty = flag.Bool("v", false, "chatty")
var oldsys uint64
+var memstats runtime.MemStats
func bigger() {
- runtime.UpdateMemStats()
- if st := runtime.MemStats; oldsys < st.Sys {
+ st := &memstats
+ runtime.ReadMemStats(st)
+ if oldsys < st.Sys {
oldsys = st.Sys
if *chatty {
println(st.Sys, " system bytes for ", st.Alloc, " Go bytes")
}
func main() {
- runtime.GC() // clean up garbage from init
- runtime.UpdateMemStats() // first call can do some allocations
- runtime.MemProfileRate = 0 // disable profiler
- runtime.MemStats.Alloc = 0 // ignore stacks
+ runtime.GC() // clean up garbage from init
+ runtime.ReadMemStats(&memstats) // first call can do some allocations
+ runtime.MemProfileRate = 0 // disable profiler
+ stacks := memstats.Alloc // ignore stacks
flag.Parse()
for i := 0; i < 1<<7; i++ {
for j := 1; j <= 1<<22; j <<= 1 {
if i == 0 && *chatty {
println("First alloc:", j)
}
- if a := runtime.MemStats.Alloc; a != 0 {
+ if a := memstats.Alloc - stacks; a != 0 {
println("no allocations but stats report", a, "bytes allocated")
panic("fail")
}
b := runtime.Alloc(uintptr(j))
- runtime.UpdateMemStats()
- during := runtime.MemStats.Alloc
+ runtime.ReadMemStats(&memstats)
+ during := memstats.Alloc - stacks
runtime.Free(b)
- runtime.UpdateMemStats()
- if a := runtime.MemStats.Alloc; a != 0 {
+ runtime.ReadMemStats(&memstats)
+ if a := memstats.Alloc - stacks; a != 0 {
println("allocated ", j, ": wrong stats: during=", during, " after=", a, " (want 0)")
panic("fail")
}
var longtest = flag.Bool("l", false, "long test")
var b []*byte
-var stats = &runtime.MemStats
+var stats = new(runtime.MemStats)
func OkAmount(size, n uintptr) bool {
if n < size {
if *chatty {
fmt.Printf("size=%d count=%d ...\n", size, count)
}
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
n1 := stats.Alloc
for i := 0; i < count; i++ {
b[i] = runtime.Alloc(uintptr(size))
println("lookup failed: got", base, n, "for", b[i])
panic("fail")
}
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
if stats.Sys > 1e9 {
println("too much memory allocated")
panic("fail")
}
}
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
n2 := stats.Alloc
if *chatty {
fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats)
panic("fail")
}
runtime.Free(b[i])
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
if stats.Alloc != uint64(alloc-n) {
println("free alloc got", stats.Alloc, "expected", alloc-n, "after free of", n)
panic("fail")
}
- if runtime.MemStats.Sys > 1e9 {
+ if stats.Sys > 1e9 {
println("too much memory allocated")
panic("fail")
}
}
- runtime.UpdateMemStats()
+ runtime.ReadMemStats(stats)
n4 := stats.Alloc
if *chatty {
-1107a7d3cb07
+52ba9506bd99
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
$(exp_inotify_gox) \
exp/norm.gox \
exp/proxy.gox \
+ exp/signal.gox \
exp/terminal.gox \
exp/types.gox \
exp/utf8string.gox
toolexeclibgoimagedir = $(toolexeclibgodir)/image
toolexeclibgoimage_DATA = \
- image/bmp.gox \
image/color.gox \
image/draw.gox \
image/gif.gox \
image/jpeg.gox \
- image/png.gox \
- image/tiff.gox
+ image/png.gox
toolexeclibgoindexdir = $(toolexeclibgodir)/index
toolexeclibgoos_DATA = \
os/exec.gox \
- os/user.gox \
- os/signal.gox
+ os/user.gox
toolexeclibgopathdir = $(toolexeclibgodir)/path
go/crypto/cipher/cipher.go \
go/crypto/cipher/ctr.go \
go/crypto/cipher/io.go \
- go/crypto/cipher/ocfb.go \
go/crypto/cipher/ofb.go
go_crypto_des_files = \
go/crypto/des/block.go \
go/exp/proxy/per_host.go \
go/exp/proxy/proxy.go \
go/exp/proxy/socks5.go
+go_exp_signal_files = \
+ go/exp/signal/signal.go
go_exp_terminal_files = \
go/exp/terminal/terminal.go \
go/exp/terminal/util.go
go/html/template/transition.go \
go/html/template/url.go
-go_image_bmp_files = \
- go/image/bmp/reader.go
-
go_image_color_files = \
go/image/color/color.go \
go/image/color/ycbcr.go
go/image/png/reader.go \
go/image/png/writer.go
-go_image_tiff_files = \
- go/image/tiff/buffer.go \
- go/image/tiff/compress.go \
- go/image/tiff/consts.go \
- go/image/tiff/reader.go
-
go_index_suffixarray_files = \
go/index/suffixarray/qsufsort.go \
go/index/suffixarray/suffixarray.go
go/os/user/user.go \
go/os/user/lookup_unix.go
-go_os_signal_files = \
- go/os/signal/signal.go
-
go_path_filepath_files = \
go/path/filepath/match.go \
go/path/filepath/path.go \
exp/html.lo \
exp/norm.lo \
exp/proxy.lo \
+ exp/signal.lo \
exp/terminal.lo \
exp/types.lo \
exp/utf8string.lo \
net/http/httptest.lo \
net/http/httputil.lo \
net/http/pprof.lo \
- image/bmp.lo \
image/color.lo \
image/draw.lo \
image/gif.lo \
image/jpeg.lo \
image/png.lo \
- image/tiff.lo \
index/suffixarray.lo \
io/ioutil.lo \
log/syslog.lo \
old/template.lo \
$(os_lib_inotify_lo) \
os/user.lo \
- os/signal.lo \
path/filepath.lo \
regexp/syntax.lo \
net/rpc/jsonrpc.lo \
@$(CHECK)
.PHONY: exp/proxy/check
+@go_include@ exp/signal.lo.dep
+exp/signal.lo.dep: $(go_exp_signal_files)
+ $(BUILDDEPS)
+exp/signal.lo: $(go_exp_signal_files)
+ $(BUILDPACKAGE)
+exp/signal/check: $(CHECK_DEPS)
+ @$(MKDIR_P) exp/signal
+ @$(CHECK)
+.PHONY: exp/signal/check
+
@go_include@ exp/terminal.lo.dep
exp/terminal.lo.dep: $(go_exp_terminal_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: hash/fnv/check
-@go_include@ image/bmp.lo.dep
-image/bmp.lo.dep: $(go_image_bmp_files)
- $(BUILDDEPS)
-image/bmp.lo: $(go_image_bmp_files)
- $(BUILDPACKAGE)
-image/bmp/check: $(CHECK_DEPS)
- @$(MKDIR_P) image/bmp
- @$(CHECK)
-.PHONY: image/bmp/check
-
@go_include@ image/color.lo.dep
image/color.lo.dep: $(go_image_color_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: image/png/check
-@go_include@ image/tiff.lo.dep
-image/tiff.lo.dep: $(go_image_tiff_files)
- $(BUILDDEPS)
-image/tiff.lo: $(go_image_tiff_files)
- $(BUILDPACKAGE)
-image/tiff/check: $(CHECK_DEPS)
- @$(MKDIR_P) image/tiff
- @$(CHECK)
-.PHONY: image/tiff/check
-
@go_include@ index/suffixarray.lo.dep
index/suffixarray.lo.dep: $(go_index_suffixarray_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: os/user/check
-@go_include@ os/signal.lo.dep
-os/signal.lo.dep: $(go_os_signal_files)
- $(BUILDDEPS)
-os/signal.lo: $(go_os_signal_files)
- $(BUILDPACKAGE)
-os/signal/check: $(CHECK_DEPS)
- @$(MKDIR_P) os/signal
- @$(CHECK)
-.PHONY: os/signal/check
-
@go_include@ path/filepath.lo.dep
path/filepath.lo.dep: $(go_path_filepath_files)
$(BUILDDEPS)
$(BUILDGOX)
exp/proxy.gox: exp/proxy.lo
$(BUILDGOX)
+exp/signal.gox: exp/signal.lo
+ $(BUILDGOX)
exp/terminal.gox: exp/terminal.lo
$(BUILDGOX)
exp/types.gox: exp/types.lo
hash/fnv.gox: hash/fnv.lo
$(BUILDGOX)
-image/bmp.gox: image/bmp.lo
- $(BUILDGOX)
image/color.gox: image/color.lo
$(BUILDGOX)
image/draw.gox: image/draw.lo
$(BUILDGOX)
image/png.gox: image/png.lo
$(BUILDGOX)
-image/tiff.gox: image/tiff.lo
- $(BUILDGOX)
index/suffixarray.gox: index/suffixarray.lo
$(BUILDGOX)
$(BUILDGOX)
os/user.gox: os/user.lo
$(BUILDGOX)
-os/signal.gox: os/signal.lo
- $(BUILDGOX)
path/filepath.gox: path/filepath.lo
$(BUILDGOX)
$(exp_inotify_check) \
exp/norm/check \
exp/proxy/check \
+ exp/signal/check \
exp/terminal/check \
exp/utf8string/check \
html/template/check \
image/draw/check \
image/jpeg/check \
image/png/check \
- image/tiff/check \
index/suffixarray/check \
io/ioutil/check \
log/syslog/check \
old/template/check \
os/exec/check \
os/user/check \
- os/signal/check \
path/filepath/check \
regexp/syntax/check \
sync/atomic/check \
encoding/base32.lo encoding/base64.lo encoding/binary.lo \
encoding/csv.lo encoding/gob.lo encoding/hex.lo \
encoding/json.lo encoding/pem.lo encoding/xml.lo exp/ebnf.lo \
- exp/html.lo exp/norm.lo exp/proxy.lo exp/terminal.lo \
- exp/types.lo exp/utf8string.lo html/template.lo go/ast.lo \
- go/build.lo go/doc.lo go/parser.lo go/printer.lo go/scanner.lo \
- go/token.lo hash/adler32.lo hash/crc32.lo hash/crc64.lo \
- hash/fnv.lo net/http/cgi.lo net/http/fcgi.lo \
- net/http/httptest.lo net/http/httputil.lo net/http/pprof.lo \
- image/bmp.lo image/color.lo image/draw.lo image/gif.lo \
- image/jpeg.lo image/png.lo image/tiff.lo index/suffixarray.lo \
- io/ioutil.lo log/syslog.lo log/syslog/syslog_c.lo math/big.lo \
- math/cmplx.lo math/rand.lo mime/mime.lo mime/multipart.lo \
- net/http.lo net/mail.lo net/rpc.lo net/smtp.lo \
- net/textproto.lo net/url.lo old/netchan.lo old/regexp.lo \
- old/template.lo $(am__DEPENDENCIES_1) os/user.lo os/signal.lo \
- path/filepath.lo regexp/syntax.lo net/rpc/jsonrpc.lo \
- runtime/debug.lo runtime/pprof.lo sync/atomic.lo \
- sync/atomic_c.lo syscall/syscall.lo syscall/errno.lo \
- syscall/wait.lo text/scanner.lo text/tabwriter.lo \
- text/template.lo text/template/parse.lo testing/testing.lo \
- testing/iotest.lo testing/quick.lo testing/script.lo \
- unicode/utf16.lo unicode/utf8.lo
+ exp/html.lo exp/norm.lo exp/proxy.lo exp/signal.lo \
+ exp/terminal.lo exp/types.lo exp/utf8string.lo \
+ html/template.lo go/ast.lo go/build.lo go/doc.lo go/parser.lo \
+ go/printer.lo go/scanner.lo go/token.lo hash/adler32.lo \
+ hash/crc32.lo hash/crc64.lo hash/fnv.lo net/http/cgi.lo \
+ net/http/fcgi.lo net/http/httptest.lo net/http/httputil.lo \
+ net/http/pprof.lo image/color.lo image/draw.lo image/gif.lo \
+ image/jpeg.lo image/png.lo index/suffixarray.lo io/ioutil.lo \
+ log/syslog.lo log/syslog/syslog_c.lo math/big.lo math/cmplx.lo \
+ math/rand.lo mime/mime.lo mime/multipart.lo net/http.lo \
+ net/mail.lo net/rpc.lo net/smtp.lo net/textproto.lo net/url.lo \
+ old/netchan.lo old/regexp.lo old/template.lo \
+ $(am__DEPENDENCIES_1) os/user.lo path/filepath.lo \
+ regexp/syntax.lo net/rpc/jsonrpc.lo runtime/debug.lo \
+ runtime/pprof.lo sync/atomic.lo sync/atomic_c.lo \
+ syscall/syscall.lo syscall/errno.lo syscall/wait.lo \
+ text/scanner.lo text/tabwriter.lo text/template.lo \
+ text/template/parse.lo testing/testing.lo testing/iotest.lo \
+ testing/quick.lo testing/script.lo unicode/utf16.lo \
+ unicode/utf8.lo
libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1)
$(exp_inotify_gox) \
exp/norm.gox \
exp/proxy.gox \
+ exp/signal.gox \
exp/terminal.gox \
exp/types.gox \
exp/utf8string.gox
toolexeclibgoimagedir = $(toolexeclibgodir)/image
toolexeclibgoimage_DATA = \
- image/bmp.gox \
image/color.gox \
image/draw.gox \
image/gif.gox \
image/jpeg.gox \
- image/png.gox \
- image/tiff.gox
+ image/png.gox
toolexeclibgoindexdir = $(toolexeclibgodir)/index
toolexeclibgoindex_DATA = \
toolexeclibgoosdir = $(toolexeclibgodir)/os
toolexeclibgoos_DATA = \
os/exec.gox \
- os/user.gox \
- os/signal.gox
+ os/user.gox
toolexeclibgopathdir = $(toolexeclibgodir)/path
toolexeclibgopath_DATA = \
go/crypto/cipher/cipher.go \
go/crypto/cipher/ctr.go \
go/crypto/cipher/io.go \
- go/crypto/cipher/ocfb.go \
go/crypto/cipher/ofb.go
go_crypto_des_files = \
go/exp/proxy/proxy.go \
go/exp/proxy/socks5.go
+go_exp_signal_files = \
+ go/exp/signal/signal.go
+
go_exp_terminal_files = \
go/exp/terminal/terminal.go \
go/exp/terminal/util.go
go/html/template/transition.go \
go/html/template/url.go
-go_image_bmp_files = \
- go/image/bmp/reader.go
-
go_image_color_files = \
go/image/color/color.go \
go/image/color/ycbcr.go
go/image/png/reader.go \
go/image/png/writer.go
-go_image_tiff_files = \
- go/image/tiff/buffer.go \
- go/image/tiff/compress.go \
- go/image/tiff/consts.go \
- go/image/tiff/reader.go
-
go_index_suffixarray_files = \
go/index/suffixarray/qsufsort.go \
go/index/suffixarray/suffixarray.go
go/os/user/user.go \
go/os/user/lookup_unix.go
-go_os_signal_files = \
- go/os/signal/signal.go
-
go_path_filepath_files = \
go/path/filepath/match.go \
go/path/filepath/path.go \
exp/html.lo \
exp/norm.lo \
exp/proxy.lo \
+ exp/signal.lo \
exp/terminal.lo \
exp/types.lo \
exp/utf8string.lo \
net/http/httptest.lo \
net/http/httputil.lo \
net/http/pprof.lo \
- image/bmp.lo \
image/color.lo \
image/draw.lo \
image/gif.lo \
image/jpeg.lo \
image/png.lo \
- image/tiff.lo \
index/suffixarray.lo \
io/ioutil.lo \
log/syslog.lo \
old/template.lo \
$(os_lib_inotify_lo) \
os/user.lo \
- os/signal.lo \
path/filepath.lo \
regexp/syntax.lo \
net/rpc/jsonrpc.lo \
$(exp_inotify_check) \
exp/norm/check \
exp/proxy/check \
+ exp/signal/check \
exp/terminal/check \
exp/utf8string/check \
html/template/check \
image/draw/check \
image/jpeg/check \
image/png/check \
- image/tiff/check \
index/suffixarray/check \
io/ioutil/check \
log/syslog/check \
old/template/check \
os/exec/check \
os/user/check \
- os/signal/check \
path/filepath/check \
regexp/syntax/check \
sync/atomic/check \
@$(CHECK)
.PHONY: exp/proxy/check
+@go_include@ exp/signal.lo.dep
+exp/signal.lo.dep: $(go_exp_signal_files)
+ $(BUILDDEPS)
+exp/signal.lo: $(go_exp_signal_files)
+ $(BUILDPACKAGE)
+exp/signal/check: $(CHECK_DEPS)
+ @$(MKDIR_P) exp/signal
+ @$(CHECK)
+.PHONY: exp/signal/check
+
@go_include@ exp/terminal.lo.dep
exp/terminal.lo.dep: $(go_exp_terminal_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: hash/fnv/check
-@go_include@ image/bmp.lo.dep
-image/bmp.lo.dep: $(go_image_bmp_files)
- $(BUILDDEPS)
-image/bmp.lo: $(go_image_bmp_files)
- $(BUILDPACKAGE)
-image/bmp/check: $(CHECK_DEPS)
- @$(MKDIR_P) image/bmp
- @$(CHECK)
-.PHONY: image/bmp/check
-
@go_include@ image/color.lo.dep
image/color.lo.dep: $(go_image_color_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: image/png/check
-@go_include@ image/tiff.lo.dep
-image/tiff.lo.dep: $(go_image_tiff_files)
- $(BUILDDEPS)
-image/tiff.lo: $(go_image_tiff_files)
- $(BUILDPACKAGE)
-image/tiff/check: $(CHECK_DEPS)
- @$(MKDIR_P) image/tiff
- @$(CHECK)
-.PHONY: image/tiff/check
-
@go_include@ index/suffixarray.lo.dep
index/suffixarray.lo.dep: $(go_index_suffixarray_files)
$(BUILDDEPS)
@$(CHECK)
.PHONY: os/user/check
-@go_include@ os/signal.lo.dep
-os/signal.lo.dep: $(go_os_signal_files)
- $(BUILDDEPS)
-os/signal.lo: $(go_os_signal_files)
- $(BUILDPACKAGE)
-os/signal/check: $(CHECK_DEPS)
- @$(MKDIR_P) os/signal
- @$(CHECK)
-.PHONY: os/signal/check
-
@go_include@ path/filepath.lo.dep
path/filepath.lo.dep: $(go_path_filepath_files)
$(BUILDDEPS)
$(BUILDGOX)
exp/proxy.gox: exp/proxy.lo
$(BUILDGOX)
+exp/signal.gox: exp/signal.lo
+ $(BUILDGOX)
exp/terminal.gox: exp/terminal.lo
$(BUILDGOX)
exp/types.gox: exp/types.lo
hash/fnv.gox: hash/fnv.lo
$(BUILDGOX)
-image/bmp.gox: image/bmp.lo
- $(BUILDGOX)
image/color.gox: image/color.lo
$(BUILDGOX)
image/draw.gox: image/draw.lo
$(BUILDGOX)
image/png.gox: image/png.lo
$(BUILDGOX)
-image/tiff.gox: image/tiff.lo
- $(BUILDGOX)
index/suffixarray.gox: index/suffixarray.lo
$(BUILDGOX)
$(BUILDGOX)
os/user.gox: os/user.lo
$(BUILDGOX)
-os/signal.gox: os/signal.lo
- $(BUILDGOX)
path/filepath.gox: path/filepath.lo
$(BUILDGOX)
}
// Open returns a ReadCloser that provides access to the File's contents.
-// It is safe to Open and Read from files concurrently.
+// Multiple files may be read concurrently.
func (f *File) Open() (rc io.ReadCloser, err error) {
bodyOffset, err := f.findBodyOffset()
if err != nil {
},
},
},
- {Name: "readme.zip"},
- {Name: "readme.notzip", Error: ErrFormat},
+ {
+ Name: "symlink.zip",
+ File: []ZipTestFile{
+ {
+ Name: "symlink",
+ Content: []byte("../target"),
+ Mode: 0777 | os.ModeSymlink,
+ },
+ },
+ },
+ {
+ Name: "readme.zip",
+ },
+ {
+ Name: "readme.notzip",
+ Error: ErrFormat,
+ },
{
Name: "dd.zip",
File: []ZipTestFile{
}
// FileInfo returns an os.FileInfo for the FileHeader.
-func (fh *FileHeader) FileInfo() os.FileInfo {
- return headerFileInfo{fh}
+func (h *FileHeader) FileInfo() os.FileInfo {
+ return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.fh.ModTime() }
func (fi headerFileInfo) Mode() os.FileMode { return fi.fh.Mode() }
+func (fi headerFileInfo) Sys() interface{} { return fi.fh }
// FileInfoHeader creates a partially-populated FileHeader from an
// os.FileInfo.
h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
}
-// traditional names for Unix constants
const (
- s_IFMT = 0xf000
- s_IFDIR = 0x4000
- s_IFREG = 0x8000
- s_ISUID = 0x800
- s_ISGID = 0x400
+ // Unix constants. The specification doesn't mention them,
+ // but these seem to be the values agreed on by tools.
+ s_IFMT = 0xf000
+ s_IFSOCK = 0xc000
+ s_IFLNK = 0xa000
+ s_IFREG = 0x8000
+ s_IFBLK = 0x6000
+ s_IFDIR = 0x4000
+ s_IFCHR = 0x2000
+ s_IFIFO = 0x1000
+ s_ISUID = 0x800
+ s_ISGID = 0x400
+ s_ISVTX = 0x200
msdosDir = 0x10
msdosReadOnly = 0x01
func fileModeToUnixMode(mode os.FileMode) uint32 {
var m uint32
- if mode&os.ModeDir != 0 {
- m = s_IFDIR
- } else {
+ switch mode & os.ModeType {
+ default:
m = s_IFREG
+ case os.ModeDir:
+ m = s_IFDIR
+ case os.ModeSymlink:
+ m = s_IFLNK
+ case os.ModeNamedPipe:
+ m = s_IFIFO
+ case os.ModeSocket:
+ m = s_IFSOCK
+ case os.ModeDevice:
+ if mode&os.ModeCharDevice != 0 {
+ m = s_IFCHR
+ } else {
+ m = s_IFBLK
+ }
}
if mode&os.ModeSetuid != 0 {
m |= s_ISUID
if mode&os.ModeSetgid != 0 {
m |= s_ISGID
}
+ if mode&os.ModeSticky != 0 {
+ m |= s_ISVTX
+ }
return m | uint32(mode&0777)
}
func unixModeToFileMode(m uint32) os.FileMode {
- var mode os.FileMode
- if m&s_IFMT == s_IFDIR {
+ mode := os.FileMode(m & 0777)
+ switch m & s_IFMT {
+ case s_IFBLK:
+ mode |= os.ModeDevice
+ case s_IFCHR:
+ mode |= os.ModeDevice | os.ModeCharDevice
+ case s_IFDIR:
mode |= os.ModeDir
+ case s_IFIFO:
+ mode |= os.ModeNamedPipe
+ case s_IFLNK:
+ mode |= os.ModeSymlink
+ case s_IFREG:
+ // nothing to do
+ case s_IFSOCK:
+ mode |= os.ModeSocket
}
if m&s_ISGID != 0 {
mode |= os.ModeSetgid
if m&s_ISUID != 0 {
mode |= os.ModeSetuid
}
- return mode | os.FileMode(m&0777)
+ if m&s_ISVTX != 0 {
+ mode |= os.ModeSticky
+ }
+ return mode
}
// Writer implements a zip file writer.
type Writer struct {
- *countWriter
+ countWriter
dir []*header
last *fileWriter
closed bool
// NewWriter returns a new Writer writing a zip file to w.
func NewWriter(w io.Writer) *Writer {
- return &Writer{countWriter: &countWriter{w: bufio.NewWriter(w)}}
+ return &Writer{countWriter: countWriter{w: bufio.NewWriter(w)}}
}
// Close finishes writing the zip file by writing the central directory.
Mode: 0755 | os.ModeSetgid,
},
{
- Name: "setgid",
- Data: []byte("setgid file"),
+ Name: "symlink",
+ Data: []byte("../link/target"),
Method: Deflate,
- Mode: 0755 | os.ModeSetgid,
+ Mode: 0755 | os.ModeSymlink,
},
}
if !reflect.DeepEqual(fh, fh2) {
t.Errorf("mismatch\n input=%#v\noutput=%#v\nerr=%v", fh, fh2, err)
}
+ if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh {
+ t.Errorf("Sys didn't return original *FileHeader")
+ }
}
import (
"bytes"
+ "errors"
"io"
- "strconv"
"unicode/utf8"
)
defaultBufSize = 4096
)
-// Errors introduced by this package.
-type Error struct {
- ErrorString string
-}
-
-func (err *Error) Error() string { return err.ErrorString }
-
var (
- ErrInvalidUnreadByte error = &Error{"bufio: invalid use of UnreadByte"}
- ErrInvalidUnreadRune error = &Error{"bufio: invalid use of UnreadRune"}
- ErrBufferFull error = &Error{"bufio: buffer full"}
- ErrNegativeCount error = &Error{"bufio: negative count"}
- errInternal error = &Error{"bufio: internal error"}
+ ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
+ ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
+ ErrBufferFull = errors.New("bufio: buffer full")
+ ErrNegativeCount = errors.New("bufio: negative count")
+ errInternal = errors.New("bufio: internal error")
)
-// BufSizeError is the error representing an invalid buffer size.
-type BufSizeError int
-
-func (b BufSizeError) Error() string {
- return "bufio: bad buffer size " + strconv.Itoa(int(b))
-}
-
// Buffered input.
// Reader implements buffering for an io.Reader object.
const minReadBufferSize = 16
-// NewReaderSize creates a new Reader whose buffer has the specified size,
-// which must be at least 16 bytes. If the argument io.Reader is already a
-// Reader with large enough size, it returns the underlying Reader.
-// It returns the Reader and any error.
-func NewReaderSize(rd io.Reader, size int) (*Reader, error) {
- if size < minReadBufferSize {
- return nil, BufSizeError(size)
- }
+// NewReaderSize returns a new Reader whose buffer has at least the specified
+// size. If the argument io.Reader is already a Reader with large enough
+// size, it returns the underlying Reader.
+func NewReaderSize(rd io.Reader, size int) *Reader {
// Is it already a Reader?
b, ok := rd.(*Reader)
if ok && len(b.buf) >= size {
- return b, nil
+ return b
+ }
+ if size < minReadBufferSize {
+ size = minReadBufferSize
+ }
+ return &Reader{
+ buf: make([]byte, size),
+ rd: rd,
+ lastByte: -1,
+ lastRuneSize: -1,
}
- b = new(Reader)
- b.buf = make([]byte, size)
- b.rd = rd
- b.lastByte = -1
- b.lastRuneSize = -1
- return b, nil
}
// NewReader returns a new Reader whose buffer has the default size.
func NewReader(rd io.Reader) *Reader {
- b, err := NewReaderSize(rd, defaultBufSize)
- if err != nil {
- // cannot happen - defaultBufSize is a valid size
- panic(err)
- }
- return b
+ return NewReaderSize(rd, defaultBufSize)
}
// fill reads a new chunk into the buffer.
}
// ReadRune reads a single UTF-8 encoded Unicode character and returns the
-// rune and its size in bytes.
+// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
+// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
func (b *Reader) ReadRune() (r rune, size int, err error) {
for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil {
b.fill()
// buffered output
// Writer implements buffering for an io.Writer object.
+// If an error occurs writing to a Writer, no more data will be
+// accepted and all subsequent writes will return the error.
type Writer struct {
err error
buf []byte
wr io.Writer
}
-// NewWriterSize creates a new Writer whose buffer has the specified size,
-// which must be greater than zero. If the argument io.Writer is already a
-// Writer with large enough size, it returns the underlying Writer.
-// It returns the Writer and any error.
-func NewWriterSize(wr io.Writer, size int) (*Writer, error) {
- if size <= 0 {
- return nil, BufSizeError(size)
- }
+// NewWriterSize returns a new Writer whose buffer has at least the specified
+// size. If the argument io.Writer is already a Writer with large enough
+// size, it returns the underlying Writer.
+func NewWriterSize(wr io.Writer, size int) *Writer {
// Is it already a Writer?
b, ok := wr.(*Writer)
if ok && len(b.buf) >= size {
- return b, nil
+ return b
+ }
+ if size <= 0 {
+ size = defaultBufSize
}
b = new(Writer)
b.buf = make([]byte, size)
b.wr = wr
- return b, nil
+ return b
}
// NewWriter returns a new Writer whose buffer has the default size.
func NewWriter(wr io.Writer) *Writer {
- b, err := NewWriterSize(wr, defaultBufSize)
- if err != nil {
- // cannot happen - defaultBufSize is valid size
- panic(err)
- }
- return b
+ return NewWriterSize(wr, defaultBufSize)
}
// Flush writes any buffered data to the underlying io.Writer.
bufreader := bufreaders[j]
bufsize := bufsizes[k]
read := readmaker.fn(bytes.NewBufferString(text))
- buf, _ := NewReaderSize(read, bufsize)
+ buf := NewReaderSize(read, bufsize)
s := bufreader.fn(buf)
if s != text {
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
// and that the data is correct.
w.Reset()
- buf, e := NewWriterSize(w, bs)
+ buf := NewWriterSize(w, bs)
context := fmt.Sprintf("nwrite=%d bufsize=%d", nwrite, bs)
- if e != nil {
- t.Errorf("%s: NewWriterSize %d: %v", context, bs, e)
- continue
- }
n, e1 := buf.Write(data[0:nwrite])
if e1 != nil || n != nwrite {
t.Errorf("%s: buf.Write %d = %d, %v", context, nwrite, n, e1)
continue
}
- if e = buf.Flush(); e != nil {
+ if e := buf.Flush(); e != nil {
t.Errorf("%s: buf.Flush = %v", context, e)
}
func TestNewReaderSizeIdempotent(t *testing.T) {
const BufSize = 1000
- b, err := NewReaderSize(bytes.NewBufferString("hello world"), BufSize)
- if err != nil {
- t.Error("NewReaderSize create fail", err)
- }
+ b := NewReaderSize(bytes.NewBufferString("hello world"), BufSize)
// Does it recognize itself?
- b1, err2 := NewReaderSize(b, BufSize)
- if err2 != nil {
- t.Error("NewReaderSize #2 create fail", err2)
- }
+ b1 := NewReaderSize(b, BufSize)
if b1 != b {
t.Error("NewReaderSize did not detect underlying Reader")
}
// Does it wrap if existing buffer is too small?
- b2, err3 := NewReaderSize(b, 2*BufSize)
- if err3 != nil {
- t.Error("NewReaderSize #3 create fail", err3)
- }
+ b2 := NewReaderSize(b, 2*BufSize)
if b2 == b {
t.Error("NewReaderSize did not enlarge buffer")
}
func TestNewWriterSizeIdempotent(t *testing.T) {
const BufSize = 1000
- b, err := NewWriterSize(new(bytes.Buffer), BufSize)
- if err != nil {
- t.Error("NewWriterSize create fail", err)
- }
+ b := NewWriterSize(new(bytes.Buffer), BufSize)
// Does it recognize itself?
- b1, err2 := NewWriterSize(b, BufSize)
- if err2 != nil {
- t.Error("NewWriterSize #2 create fail", err2)
- }
+ b1 := NewWriterSize(b, BufSize)
if b1 != b {
t.Error("NewWriterSize did not detect underlying Writer")
}
// Does it wrap if existing buffer is too small?
- b2, err3 := NewWriterSize(b, 2*BufSize)
- if err3 != nil {
- t.Error("NewWriterSize #3 create fail", err3)
- }
+ b2 := NewWriterSize(b, 2*BufSize)
if b2 == b {
t.Error("NewWriterSize did not enlarge buffer")
}
func TestWriteString(t *testing.T) {
const BufSize = 8
buf := new(bytes.Buffer)
- b, err := NewWriterSize(buf, BufSize)
- if err != nil {
- t.Error("NewWriterSize create fail", err)
- }
+ b := NewWriterSize(buf, BufSize)
b.WriteString("0") // easy
b.WriteString("123456") // still easy
b.WriteString("7890") // easy after flush
func TestBufferFull(t *testing.T) {
const longString = "And now, hello, world! It is the time for all good men to come to the aid of their party"
- buf, err := NewReaderSize(strings.NewReader(longString), minReadBufferSize)
- if err != nil {
- t.Fatal("NewReaderSize:", err)
- }
+ buf := NewReaderSize(strings.NewReader(longString), minReadBufferSize)
line, err := buf.ReadSlice('!')
if string(line) != "And now, hello, " || err != ErrBufferFull {
t.Errorf("first ReadSlice(,) = %q, %v", line, err)
func TestPeek(t *testing.T) {
p := make([]byte, 10)
// string is 16 (minReadBufferSize) long.
- buf, _ := NewReaderSize(strings.NewReader("abcdefghijklmnop"), minReadBufferSize)
+ buf := NewReaderSize(strings.NewReader("abcdefghijklmnop"), minReadBufferSize)
if s, err := buf.Peek(1); string(s) != "a" || err != nil {
t.Fatalf("want %q got %q, err=%v", "a", string(s), err)
}
for stride := 1; stride < 2; stride++ {
done := 0
reader := testReader{input, stride}
- l, _ := NewReaderSize(&reader, len(input)+1)
+ l := NewReaderSize(&reader, len(input)+1)
for {
line, isPrefix, err := l.ReadLine()
if len(line) > 0 && err != nil {
data = append(data, '0'+byte(i%10))
}
buf := bytes.NewBuffer(data)
- l, _ := NewReaderSize(buf, minReadBufferSize)
+ l := NewReaderSize(buf, minReadBufferSize)
line, isPrefix, err := l.ReadLine()
if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil {
t.Errorf("bad result for first line: got %q want %q %v", line, data[:minReadBufferSize], err)
inbuf := bytes.NewBuffer([]byte(line1 + "\n" + restData))
outbuf := new(bytes.Buffer)
maxLineLength := len(line1) + len(restData)/2
- l, _ := NewReaderSize(inbuf, maxLineLength)
+ l := NewReaderSize(inbuf, maxLineLength)
line, isPrefix, err := l.ReadLine()
if isPrefix || err != nil || string(line) != line1 {
t.Errorf("bad result for first line: isPrefix=%v err=%v line=%q", isPrefix, err, string(line))
}
func TestReadEmptyBuffer(t *testing.T) {
- l, _ := NewReaderSize(bytes.NewBuffer(nil), minReadBufferSize)
+ l := NewReaderSize(new(bytes.Buffer), minReadBufferSize)
line, isPrefix, err := l.ReadLine()
if err != io.EOF {
t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err)
}
func TestLinesAfterRead(t *testing.T) {
- l, _ := NewReaderSize(bytes.NewBuffer([]byte("foo")), minReadBufferSize)
+ l := NewReaderSize(bytes.NewBuffer([]byte("foo")), minReadBufferSize)
_, err := ioutil.ReadAll(l)
if err != nil {
t.Error(err)
}
func testReadLineNewlines(t *testing.T, input string, expect []readLineResult) {
- b, err := NewReaderSize(strings.NewReader(input), minReadBufferSize)
- if err != nil {
- t.Fatal(err)
- }
+ b := NewReaderSize(strings.NewReader(input), minReadBufferSize)
for i, e := range expect {
line, isPrefix, err := b.ReadLine()
if bytes.Compare(line, e.line) != 0 {
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Truncate discards all but the first n unread bytes from the buffer.
-// It is an error to call b.Truncate(n) with n > b.Len().
+// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
b.lastRead = opInvalid
- if n == 0 {
+ switch {
+ case n < 0 || n > b.Len():
+ panic("bytes.Buffer: truncation out of range")
+ case n == 0:
// Reuse buffer space.
b.off = 0
}
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
-// preferable to NewBuffer. In particular, passing a non-empty buf to
-// NewBuffer and then writing to the Buffer will overwrite buf, not append to
-// it.
+// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new Buffer using string s as its
-// initial contents. It is intended to prepare a buffer to read an existing
-// string. See the warnings about NewBuffer; similar issues apply here.
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}
if d.blockStart >= windowSize {
d.blockStart -= windowSize
} else {
- d.blockStart = skipNever
+ d.blockStart = math.MaxInt32
}
d.hashOffset += windowSize
}
}
func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name string, limit int) error {
- buffer := bytes.NewBuffer(nil)
- w := NewWriter(buffer, level)
+ var buffer bytes.Buffer
+ w := NewWriter(&buffer, level)
w.Write(input)
w.Close()
if limit > 0 && buffer.Len() > limit {
t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit)
}
- r := NewReader(buffer)
+ r := NewReader(&buffer)
out, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("read: %s", err)
return total
}
-// Generate elements in the chain using an iterative algorithm.
-func (h *huffmanEncoder) generateChains(top *levelInfo, list []literalNode) {
- n := len(list)
- list = list[0 : n+1]
- list[n] = maxNode()
-
- l := top
- for {
- if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
- // We've run out of both leafs and pairs.
- // End all calculations for this level.
- // To m sure we never come back to this level or any lower level,
- // set nextPairFreq impossibly large.
- l.lastChain = nil
- l.needed = 0
- l = l.up
- l.nextPairFreq = math.MaxInt32
- continue
- }
-
- prevFreq := l.lastChain.freq
- if l.nextCharFreq < l.nextPairFreq {
- // The next item on this row is a leaf node.
- n := l.lastChain.leafCount + 1
- l.lastChain = &chain{l.nextCharFreq, n, l.lastChain.up}
- l.nextCharFreq = list[n].freq
- } else {
- // The next item on this row is a pair from the previous row.
- // nextPairFreq isn't valid until we generate two
- // more values in the level below
- l.lastChain = &chain{l.nextPairFreq, l.lastChain.leafCount, l.down.lastChain}
- l.down.needed = 2
- }
-
- if l.needed--; l.needed == 0 {
- // We've done everything we need to do for this level.
- // Continue calculating one level up. Fill in nextPairFreq
- // of that level with the sum of the two nodes we've just calculated on
- // this level.
- up := l.up
- if up == nil {
- // All done!
- return
- }
- up.nextPairFreq = prevFreq + l.lastChain.freq
- l = up
- } else {
- // If we stole from below, move down temporarily to replenish it.
- for l.down.needed > 0 {
- l = l.down
- }
- }
- }
-}
-
// Return the number of literals assigned to each bit size in the Huffman encoding
//
// This method is only called when list.length >= 3
}
func TestReader(t *testing.T) {
- b := bytes.NewBuffer(nil)
+ var b bytes.Buffer
for _, tt := range lzwTests {
d := strings.Split(tt.desc, ";")
var order Order
rc := NewReader(strings.NewReader(tt.compressed), order, litWidth)
defer rc.Close()
b.Reset()
- n, err := io.Copy(b, rc)
+ n, err := io.Copy(&b, rc)
if err != nil {
if err != tt.err {
t.Errorf("%s: io.Copy: %v want %v", tt.desc, err, tt.err)
b.SetBytes(int64(n))
buf0, _ := ioutil.ReadFile("../testdata/e.txt")
buf0 = buf0[:10000]
- compressed := bytes.NewBuffer(nil)
+ compressed := new(bytes.Buffer)
w := NewWriter(compressed, LSB, 8)
for i := 0; i < n; i += len(buf0) {
io.Copy(w, bytes.NewBuffer(buf0))
func TestWriterDictIsUsed(t *testing.T) {
var input = []byte("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
- buf := bytes.NewBuffer(nil)
- compressor, err := NewWriterDict(buf, BestCompression, input)
+ var buf bytes.Buffer
+ compressor, err := NewWriterDict(&buf, BestCompression, input)
if err != nil {
t.Errorf("error in NewWriterDict: %s", err)
return
// NewCBCDecrypter returns a BlockMode which decrypts in cipher block chaining
// mode, using the given Block. The length of iv must be the same as the
-// Block's block size as must match the iv used to encrypt the data.
+// Block's block size and must match the iv used to encrypt the data.
func NewCBCDecrypter(b Block, iv []byte) BlockMode {
return (*cbcDecrypter)(newCBC(b, iv))
}
// The Stream* objects are so simple that all their members are public. Users
// can create them themselves.
-// StreamReader wraps a Stream into an io.Reader. It simply calls XORKeyStream
+// StreamReader wraps a Stream into an io.Reader. It calls XORKeyStream
// to process each slice of data which passes through.
type StreamReader struct {
S Stream
return
}
-// StreamWriter wraps a Stream into an io.Writer. It simply calls XORKeyStream
+// StreamWriter wraps a Stream into an io.Writer. It calls XORKeyStream
// to process each slice of data which passes through. If any Write call
// returns short then the StreamWriter is out of sync and must be discarded.
type StreamWriter struct {
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
-
-package cipher
-
-type ocfbEncrypter struct {
- b Block
- fre []byte
- outUsed int
-}
-
-// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
-// performed.
-type OCFBResyncOption bool
-
-const (
- OCFBResync OCFBResyncOption = true
- OCFBNoResync OCFBResyncOption = false
-)
-
-// NewOCFBEncrypter returns a Stream which encrypts data with OpenPGP's cipher
-// feedback mode using the given Block, and an initial amount of ciphertext.
-// randData must be random bytes and be the same length as the Block's block
-// size. Resync determines if the "resynchronization step" from RFC 4880, 13.9
-// step 7 is performed. Different parts of OpenPGP vary on this point.
-func NewOCFBEncrypter(block Block, randData []byte, resync OCFBResyncOption) (Stream, []byte) {
- blockSize := block.BlockSize()
- if len(randData) != blockSize {
- return nil, nil
- }
-
- x := &ocfbEncrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefix := make([]byte, blockSize+2)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefix[i] = randData[i] ^ x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
- prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- return x, prefix
-}
-
-func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- x.fre[x.outUsed] ^= src[i]
- dst[i] = x.fre[x.outUsed]
- x.outUsed++
- }
-}
-
-type ocfbDecrypter struct {
- b Block
- fre []byte
- outUsed int
-}
-
-// NewOCFBDecrypter returns a Stream which decrypts data with OpenPGP's cipher
-// feedback mode using the given Block. Prefix must be the first blockSize + 2
-// bytes of the ciphertext, where blockSize is the Block's block size. If an
-// incorrect key is detected then nil is returned. On successful exit,
-// blockSize+2 bytes of decrypted data are written into prefix. Resync
-// determines if the "resynchronization step" from RFC 4880, 13.9 step 7 is
-// performed. Different parts of OpenPGP vary on this point.
-func NewOCFBDecrypter(block Block, prefix []byte, resync OCFBResyncOption) Stream {
- blockSize := block.BlockSize()
- if len(prefix) != blockSize+2 {
- return nil
- }
-
- x := &ocfbDecrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefixCopy := make([]byte, len(prefix))
- copy(prefixCopy, prefix)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefixCopy[i] ^= x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefixCopy[blockSize] ^= x.fre[0]
- prefixCopy[blockSize+1] ^= x.fre[1]
-
- if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
- prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
- return nil
- }
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- copy(prefix, prefixCopy)
- return x
-}
-
-func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- c := src[i]
- dst[i] = x.fre[x.outUsed] ^ src[i]
- x.fre[x.outUsed] = c
- x.outUsed++
- }
-}
+++ /dev/null
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cipher
-
-import (
- "bytes"
- "crypto/aes"
- "crypto/rand"
- "testing"
-)
-
-func testOCFB(t *testing.T, resync OCFBResyncOption) {
- block, err := aes.NewCipher(commonKey128)
- if err != nil {
- t.Error(err)
- return
- }
-
- plaintext := []byte("this is the plaintext, which is long enough to span several blocks.")
- randData := make([]byte, block.BlockSize())
- rand.Reader.Read(randData)
- ocfb, prefix := NewOCFBEncrypter(block, randData, resync)
- ciphertext := make([]byte, len(plaintext))
- ocfb.XORKeyStream(ciphertext, plaintext)
-
- ocfbdec := NewOCFBDecrypter(block, prefix, resync)
- if ocfbdec == nil {
- t.Errorf("NewOCFBDecrypter failed (resync: %t)", resync)
- return
- }
- plaintextCopy := make([]byte, len(plaintext))
- ocfbdec.XORKeyStream(plaintextCopy, ciphertext)
-
- if !bytes.Equal(plaintextCopy, plaintext) {
- t.Errorf("got: %x, want: %x (resync: %t)", plaintextCopy, plaintext, resync)
- }
-}
-
-func TestOCFB(t *testing.T) {
- testOCFB(t, OCFBNoResync)
- testOCFB(t, OCFBResync)
-}
type Hash uint
const (
- MD4 Hash = 1 + iota // in package crypto/md4
- MD5 // in package crypto/md5
- SHA1 // in package crypto/sha1
- SHA224 // in package crypto/sha256
- SHA256 // in package crypto/sha256
- SHA384 // in package crypto/sha512
- SHA512 // in package crypto/sha512
+ MD4 Hash = 1 + iota // import code.google.com/p/go.crypto/md4
+ MD5 // import crypto/md5
+ SHA1 // import crypto/sha1
+ SHA224 // import crypto/sha256
+ SHA256 // import crypto/sha256
+ SHA384 // import crypto/sha512
+ SHA512 // import crypto/sha512
MD5SHA1 // no implementation; MD5+SHA1 used for TLS RSA
- RIPEMD160 // in package crypto/ripemd160
+ RIPEMD160 // import code.google.com/p/go.crypto/ripemd160
maxHash
)
var hashes = make([]func() hash.Hash, maxHash)
-// New returns a new hash.Hash calculating the given hash function. If the
-// hash function is not linked into the binary, New returns nil.
+// New returns a new hash.Hash calculating the given hash function. New panics
+// if the hash function is not linked into the binary.
func (h Hash) New() hash.Hash {
if h > 0 && h < maxHash {
f := hashes[h]
return f()
}
}
- return nil
+ panic("crypto: requested hash function is unavailable")
+}
+
+// Available reports whether the given hash function is linked into the binary.
+func (h Hash) Available() bool {
+ return h < maxHash && hashes[h] != nil
}
// RegisterHash registers a function that returns a new instance of the given
// BlockSize returns the DES block size, 8 bytes.
func (c *Cipher) BlockSize() int { return BlockSize }
-// Encrypts the 8-byte buffer src and stores the result in dst.
+// Encrypt encrypts the 8-byte buffer src and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) { encryptBlock(c.subkeys[:], dst, src) }
-// Decrypts the 8-byte buffer src and stores the result in dst.
+// Decrypt decrypts the 8-byte buffer src and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) { decryptBlock(c.subkeys[:], dst, src) }
// Reset zeros the key data, so that it will no longer
qBytes[0] |= 0x80
q.SetBytes(qBytes)
- if !big.ProbablyPrime(q, numMRTests) {
+ if !q.ProbablyPrime(numMRTests) {
continue
}
continue
}
- if !big.ProbablyPrime(p, numMRTests) {
+ if !p.ProbablyPrime(numMRTests) {
continue
}
import (
"crypto/rand"
+ "encoding/hex"
"fmt"
"math/big"
"testing"
return
}
}
+
+func TestP224Overflow(t *testing.T) {
+ // This tests for a specific bug in the P224 implementation.
+ p224 := P224()
+ pointData, _ := hex.DecodeString("049B535B45FB0A2072398A6831834624C7E32CCFD5A4B933BCEAF77F1DD945E08BBE5178F5EDF5E733388F196D2A631D2E075BB16CBFEEA15B")
+ x, y := Unmarshal(p224, pointData)
+ if !p224.IsOnCurve(x, y) {
+ t.Error("P224 failed to validate a correct point")
+ }
+}
in[i] += p224ZeroModP63[i]
}
- // Elimintate the coefficients at 2**224 and greater.
+ // Eliminate the coefficients at 2**224 and greater.
for i := 14; i >= 8; i-- {
in[i-8] -= in[i]
in[i-5] += (in[i] & 0xffff) << 12
a[0] += mask & (1 << 28)
}
-// p224Invert calcuates *out = in**-1 by computing in**(2**224 - 2**96 - 1),
+// p224Invert calculates *out = in**-1 by computing in**(2**224 - 2**96 - 1),
// i.e. Fermat's little theorem.
func p224Invert(out, in *p224FieldElement) {
var f1, f2, f3, f4 p224FieldElement
// p224Contract converts a FieldElement to its unique, minimal form.
//
-// On entry, in[i] < 2**32
+// On entry, in[i] < 2**29
// On exit, in[i] < 2**28
func p224Contract(out, in *p224FieldElement) {
copy(out[:], in[:])
out[i+1] -= 1 & mask
}
+ // We might have pushed out[3] over 2**28 so we perform another, partial,
+ // carry chain.
+ for i := 3; i < 7; i++ {
+ out[i+1] += out[i] >> 28
+ out[i] &= bottom28Bits
+ }
+ top = out[7] >> 28
+ out[7] &= bottom28Bits
+
+ // Eliminate top while maintaining the same value mod p.
+ out[0] -= top
+ out[3] += top << 12
+
+ // There are two cases to consider for out[3]:
+ // 1) The first time that we eliminated top, we didn't push out[3] over
+ // 2**28. In this case, the partial carry chain didn't change any values
+ // and top is zero.
+ // 2) We did push out[3] over 2**28 the first time that we eliminated top.
+ // The first value of top was in [0..16), therefore, prior to eliminating
+ // the first top, 0xfff1000 <= out[3] <= 0xfffffff. Therefore, after
+ // overflowing and being reduced by the second carry chain, out[3] <=
+ // 0xf000. Thus it cannot have overflowed when we eliminated top for the
+ // second time.
+
+ // Again, we may just have made out[0] negative, so do the same carry down.
+ // As before, if we made out[0] negative then we know that out[3] is
+ // sufficiently positive.
+ for i := 0; i < 3; i++ {
+ mask := uint32(int32(out[i]) >> 31)
+ out[i] += (1 << 28) & mask
+ out[i+1] -= 1 & mask
+ }
+
// Now we see if the value is >= p and, if so, subtract p.
// First we build a mask from the top four limbs, which must all be
bytes[len(bytes)-1] |= 1
p.SetBytes(bytes)
- if big.ProbablyPrime(p, 20) {
+ if p.ProbablyPrime(20) {
return
}
}
// about the plaintext.
// See ``Chosen Ciphertext Attacks Against Protocols Based on the RSA
// Encryption Standard PKCS #1'', Daniel Bleichenbacher, Advances in Cryptology
-// (Crypto '98),
+// (Crypto '98).
func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) (err error) {
k := (priv.N.BitLen() + 7) / 8
if k-(len(key)+3+8) < 0 {
// ProbablyPrime are deterministic, given the candidate number, it's
// easy for an attack to generate composites that pass this test.
for _, prime := range priv.Primes {
- if !big.ProbablyPrime(prime, 20) {
+ if !prime.ProbablyPrime(20) {
return errors.New("prime factor is composite")
}
}
gcd := new(big.Int)
x := new(big.Int)
y := new(big.Int)
- big.GcdInt(gcd, x, y, totient, e)
+ gcd.GCD(x, y, totient, e)
if gcd.Cmp(bigOne) != 0 {
return errors.New("invalid public exponent E")
}
priv.D = new(big.Int)
y := new(big.Int)
e := big.NewInt(int64(priv.E))
- big.GcdInt(g, priv.D, y, e, totient)
+ g.GCD(priv.D, y, e, totient)
if g.Cmp(bigOne) == 0 {
priv.D.Add(priv.D, totient)
g := new(big.Int)
x := new(big.Int)
y := new(big.Int)
- big.GcdInt(g, x, y, a, n)
+ g.GCD(x, y, a, n)
if g.Cmp(bigOne) != 0 {
// In this case, a and n aren't coprime and we cannot calculate
// the inverse. This happens because the values of n are nearly
}
// DecryptOAEP decrypts ciphertext using RSA-OAEP.
-// If rand != nil, DecryptOAEP uses RSA blinding to avoid timing side-channel attacks.
+// If random != nil, DecryptOAEP uses RSA blinding to avoid timing side-channel attacks.
func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err error) {
k := (priv.N.BitLen() + 7) / 8
if len(ciphertext) > k ||
finishedHash.Write(serverHello.marshal())
vers, ok := mutualVersion(serverHello.vers)
- if !ok {
+ if !ok || vers < versionTLS10 {
+ // TLS 1.0 is the minimum version supported as a client.
return c.sendAlert(alertProtocolVersion)
}
c.vers = vers
return &Conn{conn: conn, config: config, isClient: true}
}
-// A Listener implements a network listener (net.Listener) for TLS connections.
-type Listener struct {
- listener net.Listener
- config *Config
+// A listener implements a network listener (net.Listener) for TLS connections.
+type listener struct {
+ net.Listener
+ config *Config
}
// Accept waits for and returns the next incoming TLS connection.
// The returned connection c is a *tls.Conn.
-func (l *Listener) Accept() (c net.Conn, err error) {
- c, err = l.listener.Accept()
+func (l *listener) Accept() (c net.Conn, err error) {
+ c, err = l.Listener.Accept()
if err != nil {
return
}
return
}
-// Close closes the listener.
-func (l *Listener) Close() error { return l.listener.Close() }
-
-// Addr returns the listener's network address.
-func (l *Listener) Addr() net.Addr { return l.listener.Addr() }
-
// NewListener creates a Listener which accepts connections from an inner
// Listener and wraps each connection with Server.
// The configuration config must be non-nil and must have
// at least one certificate.
-func NewListener(listener net.Listener, config *Config) (l *Listener) {
- l = new(Listener)
- l.listener = listener
+func NewListener(inner net.Listener, config *Config) net.Listener {
+ l := new(listener)
+ l.Listener = inner
l.config = config
- return
+ return l
}
// Listen creates a TLS listener accepting connections on the
// given network address using net.Listen.
// The configuration config must be non-nil and must have
// at least one certificate.
-func Listen(network, laddr string, config *Config) (*Listener, error) {
+func Listen(network, laddr string, config *Config) (net.Listener, error) {
if config == nil || len(config.Certificates) == 0 {
return nil, errors.New("tls.Listen: no certificates in configuration")
}
var priv pkcs1PrivateKey
rest, err := asn1.Unmarshal(der, &priv)
if len(rest) > 0 {
- err = asn1.SyntaxError{"trailing data"}
+ err = asn1.SyntaxError{Msg: "trailing data"}
return
}
if err != nil {
type RelativeDistinguishedNameSET []AttributeTypeAndValue
+// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
+// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier
Value interface{}
import (
"strings"
"time"
+ "unicode/utf8"
)
type InvalidReason int
return true
}
+// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
+// an explicitly ASCII function to avoid any sharp corners resulting from
+// performing Unicode operations on DNS labels.
+func toLowerCaseASCII(in string) string {
+ // If the string is already lower-case then there's nothing to do.
+ isAlreadyLowerCase := true
+ for _, c := range in {
+ if c == utf8.RuneError {
+ // If we get a UTF-8 error then there might be
+ // upper-case ASCII bytes in the invalid sequence.
+ isAlreadyLowerCase = false
+ break
+ }
+ if 'A' <= c && c <= 'Z' {
+ isAlreadyLowerCase = false
+ break
+ }
+ }
+
+ if isAlreadyLowerCase {
+ return in
+ }
+
+ out := []byte(in)
+ for i, c := range out {
+ if 'A' <= c && c <= 'Z' {
+ out[i] += 'a' - 'A'
+ }
+ }
+ return string(out)
+}
+
// VerifyHostname returns nil if c is a valid certificate for the named host.
// Otherwise it returns an error describing the mismatch.
func (c *Certificate) VerifyHostname(h string) error {
+ lowered := toLowerCaseASCII(h)
+
if len(c.DNSNames) > 0 {
for _, match := range c.DNSNames {
- if matchHostnames(match, h) {
+ if matchHostnames(toLowerCaseASCII(match), lowered) {
return nil
}
}
// If Subject Alt Name is given, we ignore the common name.
- } else if matchHostnames(c.Subject.CommonName, h) {
+ } else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
return nil
}
intermediates: []string{thawteIntermediate},
roots: []string{verisignRoot},
currentTime: 1302726541,
+ dnsName: "WwW.GooGLE.coM",
+
+ expectedChains: [][]string{
+ {"Google", "Thawte", "VeriSign"},
+ },
+ },
+ {
+ leaf: googleLeaf,
+ intermediates: []string{thawteIntermediate},
+ roots: []string{verisignRoot},
+ currentTime: 1302726541,
dnsName: "www.example.com",
errorCallback: expectHostnameError,
return nil, err
}
if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 {
- return nil, asn1.StructuralError{"bad SAN sequence"}
+ return nil, asn1.StructuralError{Msg: "bad SAN sequence"}
}
parsedName := false
return nil, err
}
if len(rest) > 0 {
- return nil, asn1.SyntaxError{"trailing data"}
+ return nil, asn1.SyntaxError{Msg: "trailing data"}
}
return parseCertificate(&cert)
case *string:
*d = string(s)
return nil
+ case *interface{}:
+ bcopy := make([]byte, len(s))
+ copy(bcopy, s)
+ *d = bcopy
+ return nil
case *[]byte:
*d = s
return nil
*d = bv.(bool)
}
return err
+ case *interface{}:
+ *d = src
+ return nil
}
if scanner, ok := dest.(ScannerInto); ok {
s, d interface{} // source and destination
// following are used if they're non-zero
- wantint int64
- wantuint uint64
- wantstr string
- wantf32 float32
- wantf64 float64
- wanttime time.Time
- wantbool bool // used if d is of type *bool
- wanterr string
+ wantint int64
+ wantuint uint64
+ wantstr string
+ wantf32 float32
+ wantf64 float64
+ wanttime time.Time
+ wantbool bool // used if d is of type *bool
+ wanterr string
+ wantiface interface{}
}
// Target variables for scanning into.
scanf32 float32
scanf64 float64
scantime time.Time
+ scaniface interface{}
)
var conversionTests = []conversionTest{
{s: float64(1.5), d: &scanf32, wantf32: float32(1.5)},
{s: "1.5", d: &scanf32, wantf32: float32(1.5)},
{s: "1.5", d: &scanf64, wantf64: float64(1.5)},
+
+ // To interface{}
+ {s: float64(1.5), d: &scaniface, wantiface: float64(1.5)},
+ {s: int64(1), d: &scaniface, wantiface: int64(1)},
+ {s: "str", d: &scaniface, wantiface: "str"},
+ {s: []byte("byteslice"), d: &scaniface, wantiface: []byte("byteslice")},
+ {s: true, d: &scaniface, wantiface: true},
+ {s: nil, d: &scaniface},
}
func intValue(intptr interface{}) int64 {
if !ct.wanttime.IsZero() && !ct.wanttime.Equal(timeValue(ct.d)) {
errf("want time %v, got %v", ct.wanttime, timeValue(ct.d))
}
+ if ifptr, ok := ct.d.(*interface{}); ok {
+ if !reflect.DeepEqual(ct.wantiface, scaniface) {
+ errf("want interface %#v, got %#v", ct.wantiface, scaniface)
+ continue
+ }
+ if srcBytes, ok := ct.s.([]byte); ok {
+ dstBytes := (*ifptr).([]byte)
+ if &dstBytes[0] == &srcBytes[0] {
+ errf("copy into interface{} didn't copy []byte data")
+ }
+ }
+ }
}
}
// Package driver defines interfaces to be implemented by database
// drivers as used by package sql.
//
-// Code simply using databases should use package sql.
+// Most code should use package sql.
//
// Drivers only need to be aware of a subset of Go's types. The sql package
// will convert all types into one of the following:
case "bool":
return driver.Bool
case "nullbool":
- return driver.Null{driver.Bool}
+ return driver.Null{Converter: driver.Bool}
case "int32":
return driver.Int32
case "string":
- return driver.NotNull{driver.String}
+ return driver.NotNull{Converter: driver.String}
case "nullstring":
- return driver.Null{driver.String}
+ return driver.Null{Converter: driver.String}
case "int64":
// TODO(coopernurse): add type-specific converter
- return driver.NotNull{driver.DefaultParameterConverter}
+ return driver.NotNull{Converter: driver.DefaultParameterConverter}
case "nullint64":
// TODO(coopernurse): add type-specific converter
- return driver.Null{driver.DefaultParameterConverter}
+ return driver.Null{Converter: driver.DefaultParameterConverter}
case "float64":
// TODO(coopernurse): add type-specific converter
- return driver.NotNull{driver.DefaultParameterConverter}
+ return driver.NotNull{Converter: driver.DefaultParameterConverter}
case "nullfloat64":
// TODO(coopernurse): add type-specific converter
- return driver.Null{driver.DefaultParameterConverter}
+ return driver.Null{Converter: driver.DefaultParameterConverter}
case "datetime":
return driver.DefaultParameterConverter
}
// be modified and held indefinitely. The copy can be avoided by using
// an argument of type *RawBytes instead; see the documentation for
// RawBytes for restrictions on its use.
+//
+// If an argument has type *interface{}, Scan copies the value
+// provided by the underlying driver without conversion. If the value
+// is of type []byte, a copy is made and the caller owns the result.
func (rs *Rows) Scan(dest ...interface{}) error {
if rs.closed {
return errors.New("sql: Rows closed")
}
func newForkableWriter() *forkableWriter {
- return &forkableWriter{bytes.NewBuffer(nil), nil, nil}
+ return &forkableWriter{new(bytes.Buffer), nil, nil}
}
func (f *forkableWriter) fork() (pre, post *forkableWriter) {
}
}
+// EncodeToString returns the base32 encoding of src.
+func (enc *Encoding) EncodeToString(src []byte) string {
+ buf := make([]byte, enc.EncodedLen(len(src)))
+ enc.Encode(buf, src)
+ return string(buf)
+}
+
type encoder struct {
err error
enc *Encoding
// decode is like Decode but returns an additional 'end' value, which
// indicates if end-of-message padding was encountered and thus any
-// additional data is an error. decode also assumes len(src)%8==0,
-// since it is meant for internal use.
+// additional data is an error.
func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {
- for i := 0; i < len(src)/8 && !end; i++ {
+ osrc := src
+ for len(src) > 0 && !end {
// Decode quantum using the base32 alphabet
var dbuf [8]byte
dlen := 8
// do the top bytes contain any data?
dbufloop:
- for j := 0; j < 8; j++ {
- in := src[i*8+j]
- if in == '=' && j >= 2 && i == len(src)/8-1 {
+ for j := 0; j < 8; {
+ if len(src) == 0 {
+ return n, false, CorruptInputError(len(osrc) - len(src) - j)
+ }
+ in := src[0]
+ src = src[1:]
+ if in == '\r' || in == '\n' {
+ // Ignore this character.
+ continue
+ }
+ if in == '=' && j >= 2 && len(src) < 8 {
// We've reached the end and there's
// padding, the rest should be padded
- for k := j; k < 8; k++ {
- if src[i*8+k] != '=' {
- return n, false, CorruptInputError(i*8 + j)
+ for k := 0; k < 8-j-1; k++ {
+ if len(src) > k && src[k] != '=' {
+ return n, false, CorruptInputError(len(osrc) - len(src) + k - 1)
}
}
dlen = j
}
dbuf[j] = enc.decodeMap[in]
if dbuf[j] == 0xFF {
- return n, false, CorruptInputError(i*8 + j)
+ return n, false, CorruptInputError(len(osrc) - len(src) - 1)
}
+ j++
}
// Pack 8x 5-bit source blocks into 5 byte destination
// quantum
switch dlen {
case 7, 8:
- dst[i*5+4] = dbuf[6]<<5 | dbuf[7]
+ dst[4] = dbuf[6]<<5 | dbuf[7]
fallthrough
case 6, 5:
- dst[i*5+3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3
+ dst[3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3
fallthrough
case 4:
- dst[i*5+2] = dbuf[3]<<4 | dbuf[4]>>1
+ dst[2] = dbuf[3]<<4 | dbuf[4]>>1
fallthrough
case 3:
- dst[i*5+1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4
+ dst[1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4
fallthrough
case 2:
- dst[i*5+0] = dbuf[0]<<3 | dbuf[1]>>2
+ dst[0] = dbuf[0]<<3 | dbuf[1]>>2
}
+ dst = dst[5:]
switch dlen {
case 2:
n += 1
// DecodedLen(len(src)) bytes to dst and returns the number of bytes
// written. If src contains invalid base32 data, it will return the
// number of bytes successfully written and CorruptInputError.
+// New line characters (\r and \n) are ignored.
func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
- if len(src)%8 != 0 {
- return 0, CorruptInputError(len(src) / 8 * 8)
- }
-
n, _, err = enc.decode(dst, src)
return
}
+// DecodeString returns the bytes represented by the base32 string s.
+func (enc *Encoding) DecodeString(s string) ([]byte, error) {
+ dbuf := make([]byte, enc.DecodedLen(len(s)))
+ n, err := enc.Decode(dbuf, []byte(s))
+ return dbuf[:n], err
+}
+
type decoder struct {
err error
enc *Encoding
func TestEncode(t *testing.T) {
for _, p := range pairs {
- buf := make([]byte, StdEncoding.EncodedLen(len(p.decoded)))
- StdEncoding.Encode(buf, []byte(p.decoded))
- testEqual(t, "Encode(%q) = %q, want %q", p.decoded, string(buf), p.encoded)
+ got := StdEncoding.EncodeToString([]byte(p.decoded))
+ testEqual(t, "Encode(%q) = %q, want %q", p.decoded, got, p.encoded)
}
}
testEqual(t, "Decode(%q) = %q, want %q", p.encoded,
string(dbuf[0:count]),
p.decoded)
+
+ dbuf, err = StdEncoding.DecodeString(p.encoded)
+ testEqual(t, "DecodeString(%q) = error %v, want %v", p.encoded, err, error(nil))
+ testEqual(t, "DecodeString(%q) = %q, want %q", p.encoded, string(dbuf), p.decoded)
}
}
t.Errorf("Decode(Encode(%d-byte string)) failed at offset %d", n, i)
}
}
+
+func TestNewLineCharacters(t *testing.T) {
+ // Each of these should decode to the string "sure", without errors.
+ const expected = "sure"
+ examples := []string{
+ "ON2XEZI=",
+ "ON2XEZI=\r",
+ "ON2XEZI=\n",
+ "ON2XEZI=\r\n",
+ "ON2XEZ\r\nI=",
+ "ON2X\rEZ\nI=",
+ "ON2X\nEZ\rI=",
+ "ON2XEZ\nI=",
+ "ON2XEZI\n=",
+ }
+ for _, e := range examples {
+ buf, err := StdEncoding.DecodeString(e)
+ if err != nil {
+ t.Errorf("Decode(%q) failed: %v", e, err)
+ continue
+ }
+ if s := string(buf); s != expected {
+ t.Errorf("Decode(%q) = %q, want %q", e, s, expected)
+ }
+ }
+}
// decode is like Decode but returns an additional 'end' value, which
// indicates if end-of-message padding was encountered and thus any
-// additional data is an error. decode also assumes len(src)%4==0,
-// since it is meant for internal use.
+// additional data is an error.
func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) {
- for i := 0; i < len(src)/4 && !end; i++ {
+ osrc := src
+ for len(src) > 0 && !end {
// Decode quantum using the base64 alphabet
var dbuf [4]byte
dlen := 4
dbufloop:
- for j := 0; j < 4; j++ {
- in := src[i*4+j]
- if in == '=' && j >= 2 && i == len(src)/4-1 {
+ for j := 0; j < 4; {
+ if len(src) == 0 {
+ return n, false, CorruptInputError(len(osrc) - len(src) - j)
+ }
+ in := src[0]
+ src = src[1:]
+ if in == '\r' || in == '\n' {
+ // Ignore this character.
+ continue
+ }
+ if in == '=' && j >= 2 && len(src) < 4 {
// We've reached the end and there's
// padding
- if src[i*4+3] != '=' {
- return n, false, CorruptInputError(i*4 + 2)
+ if len(src) > 0 && src[0] != '=' {
+ return n, false, CorruptInputError(len(osrc) - len(src) - 1)
}
dlen = j
end = true
}
dbuf[j] = enc.decodeMap[in]
if dbuf[j] == 0xFF {
- return n, false, CorruptInputError(i*4 + j)
+ return n, false, CorruptInputError(len(osrc) - len(src) - 1)
}
+ j++
}
// Pack 4x 6-bit source blocks into 3 byte destination
// quantum
switch dlen {
case 4:
- dst[i*3+2] = dbuf[2]<<6 | dbuf[3]
+ dst[2] = dbuf[2]<<6 | dbuf[3]
fallthrough
case 3:
- dst[i*3+1] = dbuf[1]<<4 | dbuf[2]>>2
+ dst[1] = dbuf[1]<<4 | dbuf[2]>>2
fallthrough
case 2:
- dst[i*3+0] = dbuf[0]<<2 | dbuf[1]>>4
+ dst[0] = dbuf[0]<<2 | dbuf[1]>>4
}
+ dst = dst[3:]
n += dlen - 1
}
// DecodedLen(len(src)) bytes to dst and returns the number of bytes
// written. If src contains invalid base64 data, it will return the
// number of bytes successfully written and CorruptInputError.
+// New line characters (\r and \n) are ignored.
func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
- if len(src)%4 != 0 {
- return 0, CorruptInputError(len(src) / 4 * 4)
- }
-
n, _, err = enc.decode(dst, src)
return
}
t.Errorf("Decode(Encode(%d-byte string)) failed at offset %d", n, i)
}
}
+
+func TestNewLineCharacters(t *testing.T) {
+ // Each of these should decode to the string "sure", without errors.
+ const expected = "sure"
+ examples := []string{
+ "c3VyZQ==",
+ "c3VyZQ==\r",
+ "c3VyZQ==\n",
+ "c3VyZQ==\r\n",
+ "c3VyZ\r\nQ==",
+ "c3V\ryZ\nQ==",
+ "c3V\nyZ\rQ==",
+ "c3VyZ\nQ==",
+ "c3VyZQ\n==",
+ }
+ for _, e := range examples {
+ buf, err := StdEncoding.DecodeString(e)
+ if err != nil {
+ t.Errorf("Decode(%q) failed: %v", e, err)
+ continue
+ }
+ if s := string(buf); s != expected {
+ t.Errorf("Decode(%q) = %q, want %q", e, s, expected)
+ }
+ }
+}
default:
return errors.New("binary.Read: invalid type " + d.Type().String())
}
- size := TotalSize(v)
+ size := dataSize(v)
if size < 0 {
return errors.New("binary.Read: invalid type " + v.Type().String())
}
return err
}
v := reflect.Indirect(reflect.ValueOf(data))
- size := TotalSize(v)
+ size := dataSize(v)
if size < 0 {
return errors.New("binary.Write: invalid type " + v.Type().String())
}
return err
}
-func TotalSize(v reflect.Value) int {
+// dataSize returns the number of bytes the actual data represented by v occupies in memory.
+// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
+// it returns the length of the slice times the element size and does not count the memory
+// occupied by the header.
+func dataSize(v reflect.Value) int {
if v.Kind() == reflect.Slice {
elem := sizeof(v.Type().Elem())
if elem < 0 {
bsr := &byteSliceReader{}
var buf bytes.Buffer
Write(&buf, BigEndian, &s)
- n := TotalSize(reflect.ValueOf(s))
+ n := dataSize(reflect.ValueOf(s))
b.SetBytes(int64(n))
t := s
b.ResetTimer()
// ReadAll reads all the remaining records from r.
// Each record is a slice of fields.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read until EOF, it does not treat end of file as an error to be
+// reported.
func (r *Reader) ReadAll() (records [][]string, err error) {
for {
record, err := r.Read()
"bytes"
"errors"
"math"
+ "math/rand"
"reflect"
"strings"
"testing"
+ "time"
"unsafe"
)
}
debugFunc(debugBuffer)
}
+
+func encFuzzDec(rng *rand.Rand, in interface{}) error {
+ buf := new(bytes.Buffer)
+ enc := NewEncoder(buf)
+ if err := enc.Encode(&in); err != nil {
+ return err
+ }
+
+ b := buf.Bytes()
+ for i, bi := range b {
+ if rng.Intn(10) < 3 {
+ b[i] = bi + uint8(rng.Intn(256))
+ }
+ }
+
+ dec := NewDecoder(buf)
+ var e interface{}
+ if err := dec.Decode(&e); err != nil {
+ return err
+ }
+ return nil
+}
+
+// This does some "fuzz testing" by attempting to decode a sequence of random bytes.
+func TestFuzz(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ // all possible inputs
+ input := []interface{}{
+ new(int),
+ new(float32),
+ new(float64),
+ new(complex128),
+ &ByteStruct{255},
+ &ArrayStruct{},
+ &StringStruct{"hello"},
+ &GobTest1{0, &StringStruct{"hello"}},
+ }
+ testFuzz(t, time.Now().UnixNano(), 100, input...)
+}
+
+func TestFuzzRegressions(t *testing.T) {
+ // An instance triggering a type name of length ~102 GB.
+ testFuzz(t, 1328492090837718000, 100, new(float32))
+}
+
+func testFuzz(t *testing.T, seed int64, n int, input ...interface{}) {
+ t.Logf("seed=%d n=%d\n", seed, n)
+ for _, e := range input {
+ rng := rand.New(rand.NewSource(seed))
+ for i := 0; i < n; i++ {
+ encFuzzDec(rng, e)
+ }
+ }
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Delete the next line to include this file in the gob package.
-// +build ignore
+// Delete the next line to include in the gob package.
+// +build gob-debug
package gob
// Create a writable interface reflect.Value. We need one even for the nil case.
ivalue := allocValue(ityp)
// Read the name of the concrete type.
- b := make([]byte, state.decodeUint())
+ nr := state.decodeUint()
+ if nr < 0 || nr > 1<<31 { // zero is permissible for anonymous types
+ errorf("invalid type name length %d", nr)
+ }
+ b := make([]byte, nr)
state.b.Read(b)
name := string(b)
if name == "" {
// and returns the type id of the next value. It returns -1 at
// EOF. Upon return, the remainder of dec.buf is the value to be
// decoded. If this is an interface value, it can be ignored by
-// simply resetting that buffer.
+// resetting that buffer.
func (dec *Decoder) decodeTypeSequence(isInterface bool) typeId {
for dec.err == nil {
if dec.buf.Len() == 0 {
Structs, arrays and slices are also supported. Strings and arrays of bytes are
supported with a special, efficient representation (see below). When a slice is
decoded, if the existing slice has capacity the slice will be extended in place;
-if not, a new array is allocated. Regardless, the length of the resuling slice
+if not, a new array is allocated. Regardless, the length of the resulting slice
reports the number of elements decoded.
Functions and channels cannot be sent in a gob. Attempting
StructT *StructType
MapT *MapType
}
- type ArrayType struct {
+ type arrayType struct {
CommonType
Elem typeId
Len int
Name string // the name of the struct type
Id int // the id of the type, repeated so it's inside the type
}
- type SliceType struct {
+ type sliceType struct {
CommonType
Elem typeId
}
- type StructType struct {
+ type structType struct {
CommonType
Field []*fieldType // the fields of the struct.
}
- type FieldType struct {
+ type fieldType struct {
Name string // the name of the field.
Id int // the type id of the field, which must be already defined
}
- type MapType struct {
+ type mapType struct {
CommonType
Key typeId
Elem typeId
// Set the field number implicitly to -1; this is done at the beginning
// of every struct, including nested structs.
03 // Add 3 to field number; now 2 (wireType.structType; this is a struct).
- // structType starts with an embedded commonType, which appears
+ // structType starts with an embedded CommonType, which appears
// as a regular structure here too.
- 01 // add 1 to field number (now 0); start of embedded commonType.
+ 01 // add 1 to field number (now 0); start of embedded CommonType.
01 // add 1 to field number (now 0, the name of the type)
05 // string is (unsigned) 5 bytes long
- 50 6f 69 6e 74 // wireType.structType.commonType.name = "Point"
+ 50 6f 69 6e 74 // wireType.structType.CommonType.name = "Point"
01 // add 1 to field number (now 1, the id of the type)
- ff 82 // wireType.structType.commonType._id = 65
- 00 // end of embedded wiretype.structType.commonType struct
+ ff 82 // wireType.structType.CommonType._id = 65
+ 00 // end of embedded wiretype.structType.CommonType struct
01 // add 1 to field number (now 1, the field array in wireType.structType)
02 // There are two fields in the type (len(structType.field))
01 // Start of first field structure; add 1 to get field number 0: field[0].name
"bo": []bool{false},
"st": []string{"s"},
}
- buf := bytes.NewBuffer(nil)
- enc := NewEncoder(buf)
+ enc := NewEncoder(new(bytes.Buffer))
err := enc.Encode(m)
if err != nil {
t.Errorf("encode map: %s", err)
}
func TestSliceReusesMemory(t *testing.T) {
- buf := bytes.NewBuffer(nil)
+ buf := new(bytes.Buffer)
// Bytes
{
x := []byte("abcd")
// plain error. It overwrites the error return of the function that deferred its call.
func catchError(err *error) {
if e := recover(); e != nil {
- *err = e.(gobError).err // Will re-panic if not one of our errors, such as a runtime error.
+ ge, ok := e.(gobError)
+ if !ok {
+ panic(e)
+ }
+ *err = ge.err
}
return
}
var buf bytes.Buffer
enc := NewEncoder(&buf)
bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")}
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ mallocs := 0 - memstats.Mallocs
const count = 1000
for i := 0; i < count; i++ {
err := enc.Encode(bench)
t.Fatal("encode:", err)
}
}
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
+ runtime.ReadMemStats(memstats)
+ mallocs += memstats.Mallocs
fmt.Printf("mallocs per encode of type Bench: %d\n", mallocs/count)
}
}
}
dec := NewDecoder(&buf)
- runtime.UpdateMemStats()
- mallocs := 0 - runtime.MemStats.Mallocs
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ mallocs := 0 - memstats.Mallocs
for i := 0; i < count; i++ {
*bench = Bench{}
err := dec.Decode(&bench)
t.Fatal("decode:", err)
}
}
- runtime.UpdateMemStats()
- mallocs += runtime.MemStats.Mallocs
+ runtime.ReadMemStats(memstats)
+ mallocs += memstats.Mallocs
fmt.Printf("mallocs per decode of type Bench: %d\n", mallocs/count)
}
return t.gobType().name()
}
-// Common elements of all types.
+// CommonType holds elements of all types.
+// It is a historical artifact, kept for binary compatibility and exported
+// only for the benefit of the package's encoding of type descriptors. It is
+// not intended for direct use by clients.
type CommonType struct {
Name string
Id typeId
import (
"bytes"
+ "errors"
+ "fmt"
"io"
- "strconv"
)
const hextable = "0123456789abcdef"
return len(src) * 2
}
-// OddLengthInputError results from decoding an odd length slice.
-type OddLengthInputError struct{}
+// ErrLength results from decoding an odd length slice.
+var ErrLength = errors.New("encoding/hex: odd length hex string")
-func (OddLengthInputError) Error() string { return "odd length hex string" }
+// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
+type InvalidByteError byte
-// InvalidHexCharError results from finding an invalid character in a hex string.
-type InvalidHexCharError byte
-
-func (e InvalidHexCharError) Error() string {
- return "invalid hex char: " + strconv.Itoa(int(e))
+func (e InvalidByteError) Error() string {
+ return fmt.Sprintf("encoding/hex: invalid byte: %#U", rune(e))
}
func DecodedLen(x int) int { return x / 2 }
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
// number of bytes written to dst.
//
-// If Decode encounters invalid input, it returns an OddLengthInputError or an
-// InvalidHexCharError.
+// If Decode encounters invalid input, it returns an error describing the failure.
func Decode(dst, src []byte) (int, error) {
if len(src)%2 == 1 {
- return 0, OddLengthInputError{}
+ return 0, ErrLength
}
for i := 0; i < len(src)/2; i++ {
a, ok := fromHexChar(src[i*2])
if !ok {
- return 0, InvalidHexCharError(src[i*2])
+ return 0, InvalidByteError(src[i*2])
}
b, ok := fromHexChar(src[i*2+1])
if !ok {
- return 0, InvalidHexCharError(src[i*2+1])
+ return 0, InvalidByteError(src[i*2+1])
}
dst[i] = (a << 4) | b
}
// Dump returns a string that contains a hex dump of the given data. The format
// of the hex dump matches the output of `hexdump -C` on the command line.
func Dump(data []byte) string {
- buf := bytes.NewBuffer(nil)
- dumper := Dumper(buf)
+ var buf bytes.Buffer
+ dumper := Dumper(&buf)
dumper.Write(data)
dumper.Close()
return string(buf.Bytes())
"testing"
)
-type encodeTest struct {
- in, out []byte
+type encDecTest struct {
+ enc string
+ dec []byte
}
-var encodeTests = []encodeTest{
- {[]byte{}, []byte{}},
- {[]byte{0x01}, []byte{'0', '1'}},
- {[]byte{0xff}, []byte{'f', 'f'}},
- {[]byte{0xff, 00}, []byte{'f', 'f', '0', '0'}},
- {[]byte{0}, []byte{'0', '0'}},
- {[]byte{1}, []byte{'0', '1'}},
- {[]byte{2}, []byte{'0', '2'}},
- {[]byte{3}, []byte{'0', '3'}},
- {[]byte{4}, []byte{'0', '4'}},
- {[]byte{5}, []byte{'0', '5'}},
- {[]byte{6}, []byte{'0', '6'}},
- {[]byte{7}, []byte{'0', '7'}},
- {[]byte{8}, []byte{'0', '8'}},
- {[]byte{9}, []byte{'0', '9'}},
- {[]byte{10}, []byte{'0', 'a'}},
- {[]byte{11}, []byte{'0', 'b'}},
- {[]byte{12}, []byte{'0', 'c'}},
- {[]byte{13}, []byte{'0', 'd'}},
- {[]byte{14}, []byte{'0', 'e'}},
- {[]byte{15}, []byte{'0', 'f'}},
+var encDecTests = []encDecTest{
+ {"", []byte{}},
+ {"0001020304050607", []byte{0, 1, 2, 3, 4, 5, 6, 7}},
+ {"08090a0b0c0d0e0f", []byte{8, 9, 10, 11, 12, 13, 14, 15}},
+ {"f0f1f2f3f4f5f6f7", []byte{0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7}},
+ {"f8f9fafbfcfdfeff", []byte{0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff}},
+ {"67", []byte{'g'}},
+ {"e3a1", []byte{0xe3, 0xa1}},
}
func TestEncode(t *testing.T) {
- for i, test := range encodeTests {
- dst := make([]byte, EncodedLen(len(test.in)))
- n := Encode(dst, test.in)
+ for i, test := range encDecTests {
+ dst := make([]byte, EncodedLen(len(test.dec)))
+ n := Encode(dst, test.dec)
if n != len(dst) {
t.Errorf("#%d: bad return value: got: %d want: %d", i, n, len(dst))
}
- if bytes.Compare(dst, test.out) != 0 {
- t.Errorf("#%d: got: %#v want: %#v", i, dst, test.out)
+ if string(dst) != test.enc {
+ t.Errorf("#%d: got: %#v want: %#v", i, dst, test.enc)
}
}
}
-type decodeTest struct {
- in, out []byte
- ok bool
-}
-
-var decodeTests = []decodeTest{
- {[]byte{}, []byte{}, true},
- {[]byte{'0'}, []byte{}, false},
- {[]byte{'0', 'g'}, []byte{}, false},
- {[]byte{'0', '\x01'}, []byte{}, false},
- {[]byte{'0', '0'}, []byte{0}, true},
- {[]byte{'0', '1'}, []byte{1}, true},
- {[]byte{'0', '2'}, []byte{2}, true},
- {[]byte{'0', '3'}, []byte{3}, true},
- {[]byte{'0', '4'}, []byte{4}, true},
- {[]byte{'0', '5'}, []byte{5}, true},
- {[]byte{'0', '6'}, []byte{6}, true},
- {[]byte{'0', '7'}, []byte{7}, true},
- {[]byte{'0', '8'}, []byte{8}, true},
- {[]byte{'0', '9'}, []byte{9}, true},
- {[]byte{'0', 'a'}, []byte{10}, true},
- {[]byte{'0', 'b'}, []byte{11}, true},
- {[]byte{'0', 'c'}, []byte{12}, true},
- {[]byte{'0', 'd'}, []byte{13}, true},
- {[]byte{'0', 'e'}, []byte{14}, true},
- {[]byte{'0', 'f'}, []byte{15}, true},
- {[]byte{'0', 'A'}, []byte{10}, true},
- {[]byte{'0', 'B'}, []byte{11}, true},
- {[]byte{'0', 'C'}, []byte{12}, true},
- {[]byte{'0', 'D'}, []byte{13}, true},
- {[]byte{'0', 'E'}, []byte{14}, true},
- {[]byte{'0', 'F'}, []byte{15}, true},
-}
-
func TestDecode(t *testing.T) {
- for i, test := range decodeTests {
- dst := make([]byte, DecodedLen(len(test.in)))
- n, err := Decode(dst, test.in)
- if err == nil && n != len(dst) {
+ for i, test := range encDecTests {
+ dst := make([]byte, DecodedLen(len(test.enc)))
+ n, err := Decode(dst, []byte(test.enc))
+ if err != nil {
t.Errorf("#%d: bad return value: got:%d want:%d", i, n, len(dst))
- }
- if test.ok != (err == nil) {
- t.Errorf("#%d: unexpected err value: %s", i, err)
- }
- if err == nil && bytes.Compare(dst, test.out) != 0 {
- t.Errorf("#%d: got: %#v want: %#v", i, dst, test.out)
+ } else if !bytes.Equal(dst, test.dec) {
+ t.Errorf("#%d: got: %#v want: %#v", i, dst, test.dec)
}
}
}
-type encodeStringTest struct {
- in []byte
- out string
-}
-
-var encodeStringTests = []encodeStringTest{
- {[]byte{}, ""},
- {[]byte{0}, "00"},
- {[]byte{0, 1}, "0001"},
- {[]byte{0, 1, 255}, "0001ff"},
+func TestEncodeToString(t *testing.T) {
+ for i, test := range encDecTests {
+ s := EncodeToString(test.dec)
+ if s != test.enc {
+ t.Errorf("#%d got:%s want:%s", i, s, test.enc)
+ }
+ }
}
-func TestEncodeToString(t *testing.T) {
- for i, test := range encodeStringTests {
- s := EncodeToString(test.in)
- if s != test.out {
- t.Errorf("#%d got:%s want:%s", i, s, test.out)
+func TestDecodeString(t *testing.T) {
+ for i, test := range encDecTests {
+ dst, err := DecodeString(test.enc)
+ if err != nil {
+ t.Errorf("#%d: unexpected err value: %s", i, err)
+ continue
+ }
+ if bytes.Compare(dst, test.dec) != 0 {
+ t.Errorf("#%d: got: %#v want: #%v", i, dst, test.dec)
}
}
}
-type decodeStringTest struct {
+type errTest struct {
in string
- out []byte
- ok bool
+ err string
}
-var decodeStringTests = []decodeStringTest{
- {"", []byte{}, true},
- {"0", []byte{}, false},
- {"00", []byte{0}, true},
- {"0\x01", []byte{}, false},
- {"0g", []byte{}, false},
- {"00ff00", []byte{0, 255, 0}, true},
- {"0000ff", []byte{0, 0, 255}, true},
+var errTests = []errTest{
+ {"0", "encoding/hex: odd length hex string"},
+ {"0g", "encoding/hex: invalid byte: U+0067 'g'"},
+ {"0\x01", "encoding/hex: invalid byte: U+0001"},
}
-func TestDecodeString(t *testing.T) {
- for i, test := range decodeStringTests {
- dst, err := DecodeString(test.in)
- if test.ok != (err == nil) {
- t.Errorf("#%d: unexpected err value: %s", i, err)
+func TestInvalidErr(t *testing.T) {
+ for i, test := range errTests {
+ dst := make([]byte, DecodedLen(len(test.in)))
+ _, err := Decode(dst, []byte(test.in))
+ if err == nil {
+ t.Errorf("#%d: expected error; got none")
+ } else if err.Error() != test.err {
+ t.Errorf("#%d: got: %v want: %v", i, err, test.err)
}
- if err == nil && bytes.Compare(dst, test.out) != 0 {
- t.Errorf("#%d: got: %#v want: #%v", i, dst, test.out)
+ }
+}
+
+func TestInvalidStringErr(t *testing.T) {
+ for i, test := range errTests {
+ _, err := DecodeString(test.in)
+ if err == nil {
+ t.Errorf("#%d: expected error; got none")
+ } else if err.Error() != test.err {
+ t.Errorf("#%d: got: %v want: %v", i, err, test.err)
}
}
}
}
for stride := 1; stride < len(in); stride++ {
- out := bytes.NewBuffer(nil)
- dumper := Dumper(out)
+ var out bytes.Buffer
+ dumper := Dumper(&out)
done := 0
for done < len(in) {
todo := done + stride
}`
var pallValueCompact = strings.Map(noSpace, pallValueIndent)
+
+func TestRefUnmarshal(t *testing.T) {
+ type S struct {
+ // Ref is defined in encode_test.go.
+ R0 Ref
+ R1 *Ref
+ }
+ want := S{
+ R0: 12,
+ R1: new(Ref),
+ }
+ *want.R1 = 12
+
+ var got S
+ if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref"}`), &got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %+v, want %+v", got, want)
+ }
+}
return
}
- if j, ok := v.Interface().(Marshaler); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
- b, err := j.MarshalJSON()
+ m, ok := v.Interface().(Marshaler)
+ if !ok {
+ // T doesn't match the interface. Check against *T too.
+ if v.Kind() != reflect.Ptr && v.CanAddr() {
+ m, ok = v.Addr().Interface().(Marshaler)
+ if ok {
+ v = v.Addr()
+ }
+ }
+ }
+ if ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
+ b, err := m.MarshalJSON()
if err == nil {
// copy JSON into buffer, checking validity.
err = Compact(&e.Buffer, b)
}
}
}
+
+// Ref has Marshaler and Unmarshaler methods with pointer receiver.
+type Ref int
+
+func (*Ref) MarshalJSON() ([]byte, error) {
+ return []byte(`"ref"`), nil
+}
+
+func (r *Ref) UnmarshalJSON([]byte) error {
+ *r = 12
+ return nil
+}
+
+// Val has Marshaler methods with value receiver.
+type Val int
+
+func (Val) MarshalJSON() ([]byte, error) {
+ return []byte(`"val"`), nil
+}
+
+func TestRefValMarshal(t *testing.T) {
+ var s = struct {
+ R0 Ref
+ R1 *Ref
+ V0 Val
+ V1 *Val
+ }{
+ R0: 12,
+ R1: new(Ref),
+ V0: 13,
+ V1: new(Val),
+ }
+ const want = `{"R0":"ref","R1":"ref","V0":"val","V1":"val"}`
+ b, err := Marshal(&s)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
-// NOTE(rsc): The various instances of
-//
-// if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
-//
-// below should all be if c <= ' ' && isSpace(c), but inlining
-// the checks makes a significant difference (>10%) in tight loops
-// such as nextValue. These should be rewritten with the clearer
-// function call once 6g knows to inline the call.
-
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == ']' {
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
switch c {
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == '}' {
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c int) int {
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == '"' {
s.endTop = true
return stateEndTop(s, c)
}
- if c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if c <= ' ' && isSpace(rune(c)) {
s.step = stateEndValue
return scanSkipSpace
}
}
func EncodeToMemory(b *Block) []byte {
- buf := bytes.NewBuffer(nil)
- Encode(buf, b)
+ var buf bytes.Buffer
+ Encode(&buf, b)
return buf.Bytes()
}
func TestLineBreaker(t *testing.T) {
for i, test := range lineBreakerTests {
- buf := bytes.NewBuffer(nil)
+ buf := new(bytes.Buffer)
var breaker lineBreaker
breaker.out = buf
_, err := breaker.Write([]byte(test.in))
}
for i, test := range lineBreakerTests {
- buf := bytes.NewBuffer(nil)
+ buf := new(bytes.Buffer)
var breaker lineBreaker
breaker.out = buf
Value: &NameInField{Name{Space: "ns", Local: "foo"}},
ExpectXML: `<NameInField><foo xmlns="ns"></foo></NameInField>`,
},
+ {
+ Value: &NameInField{Name{Space: "ns", Local: "foo"}},
+ ExpectXML: `<NameInField><foo xmlns="ns"><ignore></ignore></foo></NameInField>`,
+ UnmarshalOnly: true,
+ },
// Marshaling zero xml.Name uses the tag or field name.
{
saveData = v
case reflect.Struct:
- sv = v
- typ := sv.Type()
+ typ := v.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
+
+ sv = v
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
panic("unreachable")
}
-// Have already read a start element.
-// Read tokens until we find the end element.
-// Token is taking care of making sure the
-// end element matches the start element we saw.
-func (p *Decoder) Skip() error {
+// Skip reads tokens until it has consumed the end element
+// matching the most recent start element already consumed.
+// It recurs if it encounters a start element, so it can be used to
+// skip nested structures.
+// It returns nil if it finds an end element matching the start
+// element; otherwise it returns an error describing the problem.
+func (d *Decoder) Skip() error {
for {
- tok, err := p.Token()
+ tok, err := d.Token()
if err != nil {
return err
}
switch tok.(type) {
case StartElement:
- if err := p.Skip(); err != nil {
+ if err := d.Skip(); err != nil {
return err
}
case EndElement:
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshalling and unmarshalling
- // is straighforward and unambiguous.
+ // is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
/*
-Ebnflint verifies that EBNF productions are consistent and gramatically correct.
+Ebnflint verifies that EBNF productions are consistent and grammatically correct.
It reads them from an HTML document such as the Go specification.
Grammar productions are grouped in boxes demarcated by the HTML elements
Usage:
- ebnflint [--start production] [file]
+ go tool ebnflint [--start production] [file]
The --start flag specifies the name of the start production for
the grammar; it defaults to "Start".
var start = flag.String("start", "Start", "name of start production")
func usage() {
- fmt.Fprintf(os.Stderr, "usage: ebnflint [flags] [filename]\n")
+ fmt.Fprintf(os.Stderr, "usage: go tool ebnflint [flags] [filename]\n")
flag.PrintDefaults()
os.Exit(1)
}
if strings.IndexAny(s, escapedChars) == -1 {
return s
}
- buf := bytes.NewBuffer(nil)
- escape(buf, s)
+ var buf bytes.Buffer
+ escape(&buf, s)
return buf.String()
}
if n == nil || len(n.Child) == 0 {
return "", nil
}
- b := bytes.NewBuffer(nil)
+ var b bytes.Buffer
for _, child := range n.Child {
- if err := dumpLevel(b, child, 0); err != nil {
+ if err := dumpLevel(&b, child, 0); err != nil {
return "", err
}
}
if len(t.Attr) == 0 {
return t.Data
}
- buf := bytes.NewBuffer(nil)
- buf.WriteString(t.Data)
+ buf := bytes.NewBufferString(t.Data)
for _, a := range t.Attr {
buf.WriteByte(' ')
buf.WriteString(a.Key)
func TestBufAPI(t *testing.T) {
s := "0<a>1</a>2<b>3<a>4<a>5</a>6</b>7</a>8<a/>9"
- z := NewTokenizer(bytes.NewBuffer([]byte(s)))
- result := bytes.NewBuffer(nil)
+ z := NewTokenizer(bytes.NewBufferString(s))
+ var result bytes.Buffer
depth := 0
loop:
for {
}
wd, err := syscall.InotifyAddWatch(w.fd, path, flags)
if err != nil {
- return &os.PathError{"inotify_add_watch", path, err}
+ return &os.PathError{
+ Op: "inotify_add_watch",
+ Path: path,
+ Err: err,
+ }
}
if !found {
func (rb *reorderBuffer) insert(src input, i int, info runeInfo) bool {
if info.size == 3 {
if rune := src.hangul(i); rune != 0 {
- return rb.decomposeHangul(uint32(rune))
+ return rb.decomposeHangul(rune)
}
}
- if info.flags.hasDecomposition() {
+ if info.hasDecomposition() {
dcomp := rb.f.decompose(src, i)
rb.tmpBytes = inputBytes(dcomp)
for i := 0; i < len(dcomp); {
}
// appendRune inserts a rune at the end of the buffer. It is used for Hangul.
-func (rb *reorderBuffer) appendRune(r uint32) {
+func (rb *reorderBuffer) appendRune(r rune) {
bn := rb.nbyte
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
rb.nbyte += utf8.UTFMax
- rb.rune[rb.nrune] = runeInfo{bn, uint8(sz), 0, 0}
+ rb.rune[rb.nrune] = runeInfo{pos: bn, size: uint8(sz)}
rb.nrune++
}
// assignRune sets a rune at position pos. It is used for Hangul and recomposition.
-func (rb *reorderBuffer) assignRune(pos int, r uint32) {
+func (rb *reorderBuffer) assignRune(pos int, r rune) {
bn := rb.rune[pos].pos
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
- rb.rune[pos] = runeInfo{bn, uint8(sz), 0, 0}
+ rb.rune[pos] = runeInfo{pos: bn, size: uint8(sz)}
}
// runeAt returns the rune at position n. It is used for Hangul and recomposition.
-func (rb *reorderBuffer) runeAt(n int) uint32 {
+func (rb *reorderBuffer) runeAt(n int) rune {
inf := rb.rune[n]
r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size])
- return uint32(r)
+ return r
}
// bytesAt returns the UTF-8 encoding of the rune at position n.
// decomposeHangul algorithmically decomposes a Hangul rune into
// its Jamo components.
// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
-func (rb *reorderBuffer) decomposeHangul(r uint32) bool {
+func (rb *reorderBuffer) decomposeHangul(r rune) bool {
b := rb.rune[:]
n := rb.nrune
if n+3 > len(b) {
// get the info for the combined character. This is more
// expensive than using the filter. Using combinesBackward()
// is safe.
- if ii.flags.combinesBackward() {
+ if ii.combinesBackward() {
cccB := b[k-1].ccc
cccC := ii.ccc
blocked := false // b[i] blocked by starter or greater or equal CCC?
}
// functions dispatchable per form
-type boundaryFunc func(f *formInfo, info runeInfo) bool
type lookupFunc func(b input, i int) runeInfo
type decompFunc func(b input, i int) []byte
composing, compatibility bool // form type
- decompose decompFunc
- info lookupFunc
- boundaryBefore boundaryFunc
- boundaryAfter boundaryFunc
+ decompose decompFunc
+ info lookupFunc
}
var formTable []*formInfo
}
if Form(i) == NFC || Form(i) == NFKC {
f.composing = true
- f.boundaryBefore = compBoundaryBefore
- f.boundaryAfter = compBoundaryAfter
- } else {
- f.boundaryBefore = decompBoundary
- f.boundaryAfter = decompBoundary
}
}
}
-func decompBoundary(f *formInfo, info runeInfo) bool {
- if info.ccc == 0 && info.flags.isYesD() { // Implies isHangul(b) == true
- return true
- }
- // We assume that the CCC of the first character in a decomposition
- // is always non-zero if different from info.ccc and that we can return
- // false at this point. This is verified by maketables.
- return false
-}
-
-func compBoundaryBefore(f *formInfo, info runeInfo) bool {
- if info.ccc == 0 && !info.flags.combinesBackward() {
+// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
+// unexpected behavior for the user. For example, in NFD, there is a boundary
+// after 'a'. However, a might combine with modifiers, so from the application's
+// perspective it is not a good boundary. We will therefore always use the
+// boundaries for the combining variants.
+func (i runeInfo) boundaryBefore() bool {
+ if i.ccc == 0 && !i.combinesBackward() {
return true
}
// We assume that the CCC of the first character in a decomposition
return false
}
-func compBoundaryAfter(f *formInfo, info runeInfo) bool {
- // This misses values where the last char in a decomposition is a
- // boundary such as Hangul with JamoT.
- return info.isInert()
+func (i runeInfo) boundaryAfter() bool {
+ return i.isInert()
}
// We pack quick check data in 4 bits:
// 0: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
-// 1..2: NFC_QC Yes(00), No (01), or Maybe (11)
+// 1..2: NFC_QC Yes(00), No (10), or Maybe (11)
// 3: Combines forward (0 == false, 1 == true)
//
// When all 4 bits are zero, the character is inert, meaning it is never
// We pack the bits for both NFC/D and NFKC/D in one byte.
type qcInfo uint8
-func (i qcInfo) isYesC() bool { return i&0x2 == 0 }
-func (i qcInfo) isNoC() bool { return i&0x6 == 0x2 }
-func (i qcInfo) isMaybe() bool { return i&0x4 != 0 }
-func (i qcInfo) isYesD() bool { return i&0x1 == 0 }
-func (i qcInfo) isNoD() bool { return i&0x1 != 0 }
+func (i runeInfo) isYesC() bool { return i.flags&0x4 == 0 }
+func (i runeInfo) isYesD() bool { return i.flags&0x1 == 0 }
-func (i qcInfo) combinesForward() bool { return i&0x8 != 0 }
-func (i qcInfo) combinesBackward() bool { return i&0x4 != 0 } // == isMaybe
-func (i qcInfo) hasDecomposition() bool { return i&0x1 != 0 } // == isNoD
+func (i runeInfo) combinesForward() bool { return i.flags&0x8 != 0 }
+func (i runeInfo) combinesBackward() bool { return i.flags&0x2 != 0 } // == isMaybe
+func (i runeInfo) hasDecomposition() bool { return i.flags&0x1 != 0 } // == isNoD
func (r runeInfo) isInert() bool {
return r.flags&0xf == 0 && r.ccc == 0
// Wrappers for tables.go
-// The 16-bit value of the decompostion tries is an index into a byte
+// The 16-bit value of the decomposition tries is an index into a byte
// array of UTF-8 decomposition sequences. The first byte is the number
// of bytes in the decomposition (excluding this length byte). The actual
// sequence starts at the offset+1.
// Note that the recomposition map for NFC and NFKC are identical.
// combine returns the combined rune or 0 if it doesn't exist.
-func combine(a, b uint32) uint32 {
+func combine(a, b rune) rune {
key := uint32(uint16(a))<<16 + uint32(uint16(b))
return recompMap[key]
}
// 12..15 qcInfo for NFKC/NFKD
func lookupInfoNFC(b input, i int) runeInfo {
v, sz := b.charinfo(i)
- return runeInfo{0, uint8(sz), uint8(v), qcInfo(v >> 8)}
+ return runeInfo{size: uint8(sz), ccc: uint8(v), flags: qcInfo(v >> 8)}
}
func lookupInfoNFKC(b input, i int) runeInfo {
v, sz := b.charinfo(i)
- return runeInfo{0, uint8(sz), uint8(v), qcInfo(v >> 12)}
+ return runeInfo{size: uint8(sz), ccc: uint8(v), flags: qcInfo(v >> 12)}
}
charinfo(p int) (uint16, int)
decomposeNFC(p int) uint16
decomposeNFKC(p int) uint16
- hangul(p int) uint32
+ hangul(p int) rune
}
type inputString string
return nfkcDecompTrie.lookupStringUnsafe(string(s[p:]))
}
-func (s inputString) hangul(p int) uint32 {
+func (s inputString) hangul(p int) rune {
if !isHangulString(string(s[p:])) {
return 0
}
rune, _ := utf8.DecodeRuneInString(string(s[p:]))
- return uint32(rune)
+ return rune
}
type inputBytes []byte
return nfkcDecompTrie.lookupUnsafe(s[p:])
}
-func (s inputBytes) hangul(p int) uint32 {
+func (s inputBytes) hangul(p int) rune {
if !isHangul(s[p:]) {
return 0
}
rune, _ := utf8.DecodeRune(s[p:])
- return uint32(rune)
+ return rune
}
switch f.quickCheck[MComposed] {
case QCYes:
case QCNo:
- e |= 0x2
+ e |= 0x4
case QCMaybe:
e |= 0x6
default:
sz := nrentries * 8
size += sz
fmt.Printf("// recompMap: %d bytes (entries only)\n", sz)
- fmt.Println("var recompMap = map[uint32]uint32{")
+ fmt.Println("var recompMap = map[uint32]rune{")
for i, c := range chars {
f := c.forms[FCanonical]
d := f.decomp
var info runeInfo
if p < n {
info = fd.info(src, p)
- if p == 0 && !fd.boundaryBefore(fd, info) {
+ if p == 0 && !info.boundaryBefore() {
out = decomposeToLastBoundary(rb, out)
}
}
- if info.size == 0 || fd.boundaryBefore(fd, info) {
+ if info.size == 0 || info.boundaryBefore() {
if fd.composing {
rb.compose()
}
}
cc := info.ccc
if rb.f.composing {
- if !info.flags.isYesC() {
+ if !info.isYesC() {
break
}
} else {
- if !info.flags.isYesD() {
+ if !info.isYesD() {
break
}
}
}
fd := &rb.f
info := fd.info(src, i)
- for n := 0; info.size != 0 && !fd.boundaryBefore(fd, info); {
+ for n := 0; info.size != 0 && !info.boundaryBefore(); {
i += int(info.size)
if n++; n >= maxCombiningChars {
return i
}
if i >= nsrc {
- if !fd.boundaryAfter(fd, info) {
+ if !info.boundaryAfter() {
return -1
}
return nsrc
if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8
return i
}
- if fd.boundaryAfter(fd, info) {
+ if info.boundaryAfter() {
return i
}
i = p
- for n := 0; i >= 0 && !fd.boundaryBefore(fd, info); {
+ for n := 0; i >= 0 && !info.boundaryBefore(); {
info, p = lastRuneStart(fd, b[:i])
if n++; n >= maxCombiningChars {
return len(b)
break
}
info = rb.f.info(rb.src, sp)
- bound := rb.f.boundaryBefore(&rb.f, info)
+ bound := info.boundaryBefore()
if bound || info.size == 0 {
break
}
for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- {
}
if p < 0 {
- return runeInfo{0, 0, 0, 0}, -1
+ return runeInfo{}, -1
}
return fd.info(inputBytes(buf), p), p
}
// illegal trailing continuation bytes
return buf
}
- if rb.f.boundaryAfter(fd, info) {
+ if info.boundaryAfter() {
return buf
}
var add [maxBackRunes]runeInfo // stores runeInfo in reverse order
padd := 1
n := 1
p := len(buf) - int(info.size)
- for ; p >= 0 && !rb.f.boundaryBefore(fd, info); p -= int(info.size) {
+ for ; p >= 0 && !info.boundaryBefore(); p -= int(info.size) {
info, i = lastRuneStart(fd, buf[:p])
if int(info.size) != p-i {
break
}
// Check that decomposition doesn't result in overflow.
- if info.flags.hasDecomposition() {
+ if info.hasDecomposition() {
dcomp := rb.f.decompose(inputBytes(buf), p-int(info.size))
for i := 0; i < len(dcomp); {
inf := rb.f.info(inputBytes(dcomp), i)
runAppendTests(t, "TestString", NFKC, stringF, appendTests)
}
-func doFormBenchmark(b *testing.B, f Form, s string) {
+func doFormBenchmark(b *testing.B, inf, f Form, s string) {
b.StopTimer()
- in := []byte(s)
+ in := inf.Bytes([]byte(s))
buf := make([]byte, 2*len(in))
- b.SetBytes(int64(len(s)))
+ b.SetBytes(int64(len(in)))
b.StartTimer()
for i := 0; i < b.N; i++ {
buf = f.Append(buf[0:0], in...)
var ascii = strings.Repeat("There is nothing to change here! ", 500)
func BenchmarkNormalizeAsciiNFC(b *testing.B) {
- doFormBenchmark(b, NFC, ascii)
+ doFormBenchmark(b, NFC, NFC, ascii)
}
func BenchmarkNormalizeAsciiNFD(b *testing.B) {
- doFormBenchmark(b, NFD, ascii)
+ doFormBenchmark(b, NFC, NFD, ascii)
}
func BenchmarkNormalizeAsciiNFKC(b *testing.B) {
- doFormBenchmark(b, NFKC, ascii)
+ doFormBenchmark(b, NFC, NFKC, ascii)
}
func BenchmarkNormalizeAsciiNFKD(b *testing.B) {
- doFormBenchmark(b, NFKD, ascii)
+ doFormBenchmark(b, NFC, NFKD, ascii)
+}
+
+func BenchmarkNormalizeNFC2NFC(b *testing.B) {
+ doFormBenchmark(b, NFC, NFC, txt_all)
+}
+func BenchmarkNormalizeNFC2NFD(b *testing.B) {
+ doFormBenchmark(b, NFC, NFD, txt_all)
+}
+func BenchmarkNormalizeNFD2NFC(b *testing.B) {
+ doFormBenchmark(b, NFD, NFC, txt_all)
+}
+func BenchmarkNormalizeNFD2NFD(b *testing.B) {
+ doFormBenchmark(b, NFD, NFD, txt_all)
+}
+
+// Hangul is often special-cased, so we test it separately.
+func BenchmarkNormalizeHangulNFC2NFC(b *testing.B) {
+ doFormBenchmark(b, NFC, NFC, txt_kr)
+}
+func BenchmarkNormalizeHangulNFC2NFD(b *testing.B) {
+ doFormBenchmark(b, NFC, NFD, txt_kr)
+}
+func BenchmarkNormalizeHangulNFD2NFC(b *testing.B) {
+ doFormBenchmark(b, NFD, NFC, txt_kr)
+}
+func BenchmarkNormalizeHangulNFD2NFD(b *testing.B) {
+ doFormBenchmark(b, NFD, NFD, txt_kr)
}
func doTextBenchmark(b *testing.B, s string) {
署名 — 您必须按照作者或者许可人指定的方式对作品进行署名。
相同方式共享 — 如果您改变、转换本作品或者以本作品为基础进行创作,
您只能采用与本协议相同的许可协议发布基于本作品的演绎作品。`
+
+const txt_cjk = txt_cn + txt_jp + txt_kr
+const txt_all = txt_vn + twoByteUtf8 + threeByteUtf8 + txt_cjk
var nfkcDecompTrie = trie{nfkcDecompLookup[:], nfkcDecompValues[:], nfkcDecompSparseValues[:], nfkcDecompSparseOffset[:], 66}
// recompMap: 7448 bytes (entries only)
-var recompMap = map[uint32]uint32{
+var recompMap = map[uint32]rune{
0x00410300: 0x00C0,
0x00410301: 0x00C1,
0x00410302: 0x00C2,
0x0136: 0x0001, 0x0137: 0x0001, 0x0138: 0x6601, 0x0139: 0x00dc, 0x013a: 0x00dc, 0x013b: 0x00dc,
0x013c: 0x00dc, 0x013d: 0x00e6, 0x013e: 0x00e6, 0x013f: 0x00e6,
// Block 0x5, offset 0x140
- 0x0140: 0x33e6, 0x0141: 0x33e6, 0x0142: 0x66e6, 0x0143: 0x33e6, 0x0144: 0x33e6, 0x0145: 0x66f0,
+ 0x0140: 0x55e6, 0x0141: 0x55e6, 0x0142: 0x66e6, 0x0143: 0x55e6, 0x0144: 0x55e6, 0x0145: 0x66f0,
0x0146: 0x00e6, 0x0147: 0x00dc, 0x0148: 0x00dc, 0x0149: 0x00dc, 0x014a: 0x00e6, 0x014b: 0x00e6,
0x014c: 0x00e6, 0x014d: 0x00dc, 0x014e: 0x00dc, 0x0150: 0x00e6, 0x0151: 0x00e6,
0x0152: 0x00e6, 0x0153: 0x00dc, 0x0154: 0x00dc, 0x0155: 0x00dc, 0x0156: 0x00dc, 0x0157: 0x00e6,
0x015e: 0x00ea, 0x015f: 0x00e9, 0x0160: 0x00ea, 0x0161: 0x00ea, 0x0162: 0x00e9, 0x0163: 0x00e6,
0x0164: 0x00e6, 0x0165: 0x00e6, 0x0166: 0x00e6, 0x0167: 0x00e6, 0x0168: 0x00e6, 0x0169: 0x00e6,
0x016a: 0x00e6, 0x016b: 0x00e6, 0x016c: 0x00e6, 0x016d: 0x00e6, 0x016e: 0x00e6, 0x016f: 0x00e6,
- 0x0174: 0x3300,
- 0x017a: 0x3000,
- 0x017e: 0x3300,
+ 0x0174: 0x5500,
+ 0x017a: 0x5000,
+ 0x017e: 0x5500,
// Block 0x6, offset 0x180
- 0x0184: 0x3000, 0x0185: 0x3100,
- 0x0186: 0x1100, 0x0187: 0x3300, 0x0188: 0x1100, 0x0189: 0x1100, 0x018a: 0x1100,
+ 0x0184: 0x5000, 0x0185: 0x5100,
+ 0x0186: 0x1100, 0x0187: 0x5500, 0x0188: 0x1100, 0x0189: 0x1100, 0x018a: 0x1100,
0x018c: 0x1100, 0x018e: 0x1100, 0x018f: 0x1100, 0x0190: 0x1100, 0x0191: 0x8800,
0x0195: 0x8800, 0x0197: 0x8800,
0x0199: 0x8800,
0x01f6: 0x8800, 0x01f7: 0x8800, 0x01f8: 0x8800, 0x01f9: 0x1100, 0x01fa: 0x8800,
0x01fe: 0x8800,
// Block 0x8, offset 0x200
- 0x0207: 0x3000,
+ 0x0207: 0x5000,
0x0211: 0x00dc,
0x0212: 0x00e6, 0x0213: 0x00e6, 0x0214: 0x00e6, 0x0215: 0x00e6, 0x0216: 0x00dc, 0x0217: 0x00e6,
0x0218: 0x00e6, 0x0219: 0x00e6, 0x021a: 0x00de, 0x021b: 0x00dc, 0x021c: 0x00e6, 0x021d: 0x00e6,
0x0252: 0x0022, 0x0253: 0x66e6, 0x0254: 0x66e6, 0x0255: 0x66dc, 0x0256: 0x00dc, 0x0257: 0x00e6,
0x0258: 0x00e6, 0x0259: 0x00e6, 0x025a: 0x00e6, 0x025b: 0x00e6, 0x025c: 0x00dc, 0x025d: 0x00e6,
0x025e: 0x00e6, 0x025f: 0x00dc,
- 0x0270: 0x0023, 0x0275: 0x3000,
- 0x0276: 0x3000, 0x0277: 0x3000, 0x0278: 0x3000,
+ 0x0270: 0x0023, 0x0275: 0x5000,
+ 0x0276: 0x5000, 0x0277: 0x5000, 0x0278: 0x5000,
// Block 0xa, offset 0x280
0x0280: 0x9900, 0x0281: 0x9900, 0x0282: 0x1100, 0x0283: 0x1100, 0x0284: 0x1100, 0x0285: 0x1100,
0x0288: 0x9900, 0x0289: 0x9900, 0x028a: 0x1100, 0x028b: 0x1100,
0x029f: 0x1100, 0x02a0: 0x9900, 0x02a1: 0x9900, 0x02a2: 0x9900, 0x02a3: 0x9900,
0x02a4: 0x9900, 0x02a5: 0x9900, 0x02a6: 0x9900, 0x02a7: 0x9900, 0x02a8: 0x9900, 0x02a9: 0x9900,
0x02aa: 0x9900, 0x02ab: 0x9900, 0x02ac: 0x9900, 0x02ad: 0x9900, 0x02ae: 0x9900, 0x02af: 0x9900,
- 0x02b0: 0x9900, 0x02b1: 0x3300, 0x02b2: 0x1100, 0x02b3: 0x3300, 0x02b4: 0x9900, 0x02b5: 0x3300,
- 0x02b6: 0x1100, 0x02b7: 0x3300, 0x02b8: 0x1100, 0x02b9: 0x3300, 0x02ba: 0x1100, 0x02bb: 0x3300,
- 0x02bc: 0x9900, 0x02bd: 0x3300,
+ 0x02b0: 0x9900, 0x02b1: 0x5500, 0x02b2: 0x1100, 0x02b3: 0x5500, 0x02b4: 0x9900, 0x02b5: 0x5500,
+ 0x02b6: 0x1100, 0x02b7: 0x5500, 0x02b8: 0x1100, 0x02b9: 0x5500, 0x02ba: 0x1100, 0x02bb: 0x5500,
+ 0x02bc: 0x9900, 0x02bd: 0x5500,
// Block 0xb, offset 0x2c0
- 0x02c0: 0x3000, 0x02c1: 0x3100, 0x02c2: 0x1100, 0x02c3: 0x1100, 0x02c4: 0x1100,
- 0x02c6: 0x9900, 0x02c7: 0x1100, 0x02c8: 0x1100, 0x02c9: 0x3300, 0x02ca: 0x1100, 0x02cb: 0x3300,
- 0x02cc: 0x1100, 0x02cd: 0x3100, 0x02ce: 0x3100, 0x02cf: 0x3100, 0x02d0: 0x1100, 0x02d1: 0x1100,
- 0x02d2: 0x1100, 0x02d3: 0x3300, 0x02d6: 0x1100, 0x02d7: 0x1100,
- 0x02d8: 0x1100, 0x02d9: 0x1100, 0x02da: 0x1100, 0x02db: 0x3300, 0x02dd: 0x3100,
- 0x02de: 0x3100, 0x02df: 0x3100, 0x02e0: 0x1100, 0x02e1: 0x1100, 0x02e2: 0x1100, 0x02e3: 0x3300,
+ 0x02c0: 0x5000, 0x02c1: 0x5100, 0x02c2: 0x1100, 0x02c3: 0x1100, 0x02c4: 0x1100,
+ 0x02c6: 0x9900, 0x02c7: 0x1100, 0x02c8: 0x1100, 0x02c9: 0x5500, 0x02ca: 0x1100, 0x02cb: 0x5500,
+ 0x02cc: 0x1100, 0x02cd: 0x5100, 0x02ce: 0x5100, 0x02cf: 0x5100, 0x02d0: 0x1100, 0x02d1: 0x1100,
+ 0x02d2: 0x1100, 0x02d3: 0x5500, 0x02d6: 0x1100, 0x02d7: 0x1100,
+ 0x02d8: 0x1100, 0x02d9: 0x1100, 0x02da: 0x1100, 0x02db: 0x5500, 0x02dd: 0x5100,
+ 0x02de: 0x5100, 0x02df: 0x5100, 0x02e0: 0x1100, 0x02e1: 0x1100, 0x02e2: 0x1100, 0x02e3: 0x5500,
0x02e4: 0x1100, 0x02e5: 0x1100, 0x02e6: 0x1100, 0x02e7: 0x1100, 0x02e8: 0x1100, 0x02e9: 0x1100,
- 0x02ea: 0x1100, 0x02eb: 0x3300, 0x02ec: 0x1100, 0x02ed: 0x3100, 0x02ee: 0x3300, 0x02ef: 0x3300,
+ 0x02ea: 0x1100, 0x02eb: 0x5500, 0x02ec: 0x1100, 0x02ed: 0x5100, 0x02ee: 0x5500, 0x02ef: 0x5500,
0x02f2: 0x1100, 0x02f3: 0x1100, 0x02f4: 0x1100,
- 0x02f6: 0x9900, 0x02f7: 0x1100, 0x02f8: 0x1100, 0x02f9: 0x3300, 0x02fa: 0x1100, 0x02fb: 0x3300,
- 0x02fc: 0x1100, 0x02fd: 0x3300, 0x02fe: 0x3800,
+ 0x02f6: 0x9900, 0x02f7: 0x1100, 0x02f8: 0x1100, 0x02f9: 0x5500, 0x02fa: 0x1100, 0x02fb: 0x5500,
+ 0x02fc: 0x1100, 0x02fd: 0x5500, 0x02fe: 0x5800,
// Block 0xc, offset 0x300
0x0301: 0x1100, 0x0303: 0x8800, 0x0304: 0x1100, 0x0305: 0x8800,
0x0307: 0x1100, 0x0308: 0x8800, 0x0309: 0x1100,
0x037c: 0x1100, 0x037d: 0x1100,
// Block 0xe, offset 0x380
0x0394: 0x1100,
- 0x0399: 0x6608, 0x039a: 0x6608, 0x039b: 0x3000, 0x039c: 0x3000, 0x039d: 0x8800,
- 0x039e: 0x1100, 0x039f: 0x3000,
+ 0x0399: 0x6608, 0x039a: 0x6608, 0x039b: 0x5000, 0x039c: 0x5000, 0x039d: 0x8800,
+ 0x039e: 0x1100, 0x039f: 0x5000,
0x03a6: 0x8800,
0x03ab: 0x8800, 0x03ac: 0x1100, 0x03ad: 0x8800, 0x03ae: 0x1100, 0x03af: 0x8800,
0x03b0: 0x1100, 0x03b1: 0x8800, 0x03b2: 0x1100, 0x03b3: 0x8800, 0x03b4: 0x1100, 0x03b5: 0x8800,
0x03ef: 0x8800,
0x03f0: 0x8800, 0x03f1: 0x8800, 0x03f2: 0x8800, 0x03f4: 0x1100,
0x03f7: 0x1100, 0x03f8: 0x1100, 0x03f9: 0x1100, 0x03fa: 0x1100,
- 0x03fd: 0x8800, 0x03fe: 0x1100, 0x03ff: 0x3000,
+ 0x03fd: 0x8800, 0x03fe: 0x1100, 0x03ff: 0x5000,
}
// charInfoSparseOffset: 156 entries, 312 bytes
var charInfoSparseValues = [757]valueRange{
// Block 0x0, offset 0x1
{value: 0x0000, lo: 0x07},
- {value: 0x3000, lo: 0xa0, hi: 0xa0},
- {value: 0x3800, lo: 0xa8, hi: 0xa8},
- {value: 0x3000, lo: 0xaa, hi: 0xaa},
- {value: 0x3000, lo: 0xaf, hi: 0xaf},
- {value: 0x3000, lo: 0xb2, hi: 0xb5},
- {value: 0x3000, lo: 0xb8, hi: 0xba},
- {value: 0x3000, lo: 0xbc, hi: 0xbe},
+ {value: 0x5000, lo: 0xa0, hi: 0xa0},
+ {value: 0x5800, lo: 0xa8, hi: 0xa8},
+ {value: 0x5000, lo: 0xaa, hi: 0xaa},
+ {value: 0x5000, lo: 0xaf, hi: 0xaf},
+ {value: 0x5000, lo: 0xb2, hi: 0xb5},
+ {value: 0x5000, lo: 0xb8, hi: 0xba},
+ {value: 0x5000, lo: 0xbc, hi: 0xbe},
// Block 0x1, offset 0x2
{value: 0x0000, lo: 0x0a},
{value: 0x1100, lo: 0x80, hi: 0x81},
{value: 0x9900, lo: 0x92, hi: 0x93},
{value: 0x1100, lo: 0x94, hi: 0xa5},
{value: 0x1100, lo: 0xa8, hi: 0xb0},
- {value: 0x3000, lo: 0xb2, hi: 0xb3},
+ {value: 0x5000, lo: 0xb2, hi: 0xb3},
{value: 0x1100, lo: 0xb4, hi: 0xb7},
{value: 0x1100, lo: 0xb9, hi: 0xbe},
- {value: 0x3000, lo: 0xbf, hi: 0xbf},
+ {value: 0x5000, lo: 0xbf, hi: 0xbf},
// Block 0x2, offset 0x3
{value: 0x0000, lo: 0x0d},
- {value: 0x3000, lo: 0x80, hi: 0x80},
+ {value: 0x5000, lo: 0x80, hi: 0x80},
{value: 0x1100, lo: 0x83, hi: 0x88},
- {value: 0x3000, lo: 0x89, hi: 0x89},
+ {value: 0x5000, lo: 0x89, hi: 0x89},
{value: 0x9900, lo: 0x8c, hi: 0x8d},
{value: 0x1100, lo: 0x8e, hi: 0x91},
{value: 0x1100, lo: 0x94, hi: 0x99},
{value: 0x1100, lo: 0xa2, hi: 0xa5},
{value: 0x9900, lo: 0xa8, hi: 0xab},
{value: 0x1100, lo: 0xac, hi: 0xbe},
- {value: 0x3800, lo: 0xbf, hi: 0xbf},
+ {value: 0x5800, lo: 0xbf, hi: 0xbf},
// Block 0x3, offset 0x4
{value: 0x0000, lo: 0x03},
{value: 0x9900, lo: 0xa0, hi: 0xa1},
{value: 0x8800, lo: 0xb7, hi: 0xb7},
// Block 0x4, offset 0x5
{value: 0x0000, lo: 0x09},
- {value: 0x3000, lo: 0x84, hi: 0x8c},
+ {value: 0x5000, lo: 0x84, hi: 0x8c},
{value: 0x1100, lo: 0x8d, hi: 0x9c},
{value: 0x1100, lo: 0x9e, hi: 0xa3},
{value: 0x1100, lo: 0xa6, hi: 0xa9},
{value: 0x9900, lo: 0xaa, hi: 0xab},
{value: 0x1100, lo: 0xac, hi: 0xb0},
- {value: 0x3000, lo: 0xb1, hi: 0xb3},
+ {value: 0x5000, lo: 0xb1, hi: 0xb3},
{value: 0x1100, lo: 0xb4, hi: 0xb5},
{value: 0x1100, lo: 0xb8, hi: 0xbf},
// Block 0x5, offset 0x6
// Block 0x6, offset 0x7
{value: 0x0000, lo: 0x02},
{value: 0x8800, lo: 0x92, hi: 0x92},
- {value: 0x3000, lo: 0xb0, hi: 0xb8},
+ {value: 0x5000, lo: 0xb0, hi: 0xb8},
// Block 0x7, offset 0x8
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x98, hi: 0x9d},
- {value: 0x3000, lo: 0xa0, hi: 0xa4},
+ {value: 0x5000, lo: 0x98, hi: 0x9d},
+ {value: 0x5000, lo: 0xa0, hi: 0xa4},
// Block 0x8, offset 0x9
{value: 0x0000, lo: 0x0d},
{value: 0x8800, lo: 0x81, hi: 0x81},
{value: 0x9900, lo: 0x8a, hi: 0x8b},
{value: 0x1100, lo: 0x8c, hi: 0x8d},
{value: 0x9900, lo: 0x8e, hi: 0x8e},
- {value: 0x3000, lo: 0x90, hi: 0x91},
- {value: 0x3800, lo: 0x92, hi: 0x92},
- {value: 0x3100, lo: 0x93, hi: 0x94},
- {value: 0x3000, lo: 0x95, hi: 0x96},
- {value: 0x3000, lo: 0xb0, hi: 0xb2},
- {value: 0x3000, lo: 0xb4, hi: 0xb5},
- {value: 0x3000, lo: 0xb9, hi: 0xb9},
+ {value: 0x5000, lo: 0x90, hi: 0x91},
+ {value: 0x5800, lo: 0x92, hi: 0x92},
+ {value: 0x5100, lo: 0x93, hi: 0x94},
+ {value: 0x5000, lo: 0x95, hi: 0x96},
+ {value: 0x5000, lo: 0xb0, hi: 0xb2},
+ {value: 0x5000, lo: 0xb4, hi: 0xb5},
+ {value: 0x5000, lo: 0xb9, hi: 0xb9},
// Block 0x9, offset 0xa
{value: 0x0000, lo: 0x0b},
{value: 0x8800, lo: 0x83, hi: 0x83},
{value: 0x00e6, lo: 0x91, hi: 0x91},
{value: 0x00dc, lo: 0x92, hi: 0x92},
{value: 0x00e6, lo: 0x93, hi: 0x94},
- {value: 0x3300, lo: 0x98, hi: 0x9f},
+ {value: 0x5500, lo: 0x98, hi: 0x9f},
// Block 0x16, offset 0x17
{value: 0x0000, lo: 0x02},
{value: 0x0007, lo: 0xbc, hi: 0xbc},
{value: 0x1100, lo: 0x8b, hi: 0x8c},
{value: 0x0009, lo: 0x8d, hi: 0x8d},
{value: 0x6600, lo: 0x97, hi: 0x97},
- {value: 0x3300, lo: 0x9c, hi: 0x9d},
- {value: 0x3300, lo: 0x9f, hi: 0x9f},
+ {value: 0x5500, lo: 0x9c, hi: 0x9d},
+ {value: 0x5500, lo: 0x9f, hi: 0x9f},
// Block 0x18, offset 0x19
{value: 0x0000, lo: 0x03},
- {value: 0x3300, lo: 0xb3, hi: 0xb3},
- {value: 0x3300, lo: 0xb6, hi: 0xb6},
+ {value: 0x5500, lo: 0xb3, hi: 0xb3},
+ {value: 0x5500, lo: 0xb6, hi: 0xb6},
{value: 0x0007, lo: 0xbc, hi: 0xbc},
// Block 0x19, offset 0x1a
{value: 0x0000, lo: 0x03},
{value: 0x0009, lo: 0x8d, hi: 0x8d},
- {value: 0x3300, lo: 0x99, hi: 0x9b},
- {value: 0x3300, lo: 0x9e, hi: 0x9e},
+ {value: 0x5500, lo: 0x99, hi: 0x9b},
+ {value: 0x5500, lo: 0x9e, hi: 0x9e},
// Block 0x1a, offset 0x1b
{value: 0x0000, lo: 0x01},
{value: 0x0007, lo: 0xbc, hi: 0xbc},
{value: 0x1100, lo: 0x8b, hi: 0x8c},
{value: 0x0009, lo: 0x8d, hi: 0x8d},
{value: 0x6600, lo: 0x96, hi: 0x97},
- {value: 0x3300, lo: 0x9c, hi: 0x9d},
+ {value: 0x5500, lo: 0x9c, hi: 0x9d},
// Block 0x1d, offset 0x1e
{value: 0x0000, lo: 0x03},
{value: 0x8800, lo: 0x92, hi: 0x92},
{value: 0x6600, lo: 0x9f, hi: 0x9f},
// Block 0x24, offset 0x25
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0xb3, hi: 0xb3},
+ {value: 0x5000, lo: 0xb3, hi: 0xb3},
{value: 0x0067, lo: 0xb8, hi: 0xb9},
{value: 0x0009, lo: 0xba, hi: 0xba},
// Block 0x25, offset 0x26
{value: 0x006b, lo: 0x88, hi: 0x8b},
// Block 0x26, offset 0x27
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0xb3, hi: 0xb3},
+ {value: 0x5000, lo: 0xb3, hi: 0xb3},
{value: 0x0076, lo: 0xb8, hi: 0xb9},
// Block 0x27, offset 0x28
{value: 0x0000, lo: 0x02},
{value: 0x007a, lo: 0x88, hi: 0x8b},
- {value: 0x3000, lo: 0x9c, hi: 0x9d},
+ {value: 0x5000, lo: 0x9c, hi: 0x9d},
// Block 0x28, offset 0x29
{value: 0x0000, lo: 0x05},
- {value: 0x3000, lo: 0x8c, hi: 0x8c},
+ {value: 0x5000, lo: 0x8c, hi: 0x8c},
{value: 0x00dc, lo: 0x98, hi: 0x99},
{value: 0x00dc, lo: 0xb5, hi: 0xb5},
{value: 0x00dc, lo: 0xb7, hi: 0xb7},
{value: 0x00d8, lo: 0xb9, hi: 0xb9},
// Block 0x29, offset 0x2a
{value: 0x0000, lo: 0x0f},
- {value: 0x3300, lo: 0x83, hi: 0x83},
- {value: 0x3300, lo: 0x8d, hi: 0x8d},
- {value: 0x3300, lo: 0x92, hi: 0x92},
- {value: 0x3300, lo: 0x97, hi: 0x97},
- {value: 0x3300, lo: 0x9c, hi: 0x9c},
- {value: 0x3300, lo: 0xa9, hi: 0xa9},
+ {value: 0x5500, lo: 0x83, hi: 0x83},
+ {value: 0x5500, lo: 0x8d, hi: 0x8d},
+ {value: 0x5500, lo: 0x92, hi: 0x92},
+ {value: 0x5500, lo: 0x97, hi: 0x97},
+ {value: 0x5500, lo: 0x9c, hi: 0x9c},
+ {value: 0x5500, lo: 0xa9, hi: 0xa9},
{value: 0x0081, lo: 0xb1, hi: 0xb1},
{value: 0x0082, lo: 0xb2, hi: 0xb2},
- {value: 0x3300, lo: 0xb3, hi: 0xb3},
+ {value: 0x5500, lo: 0xb3, hi: 0xb3},
{value: 0x0084, lo: 0xb4, hi: 0xb4},
- {value: 0x3300, lo: 0xb5, hi: 0xb6},
- {value: 0x3000, lo: 0xb7, hi: 0xb7},
- {value: 0x3300, lo: 0xb8, hi: 0xb8},
- {value: 0x3000, lo: 0xb9, hi: 0xb9},
+ {value: 0x5500, lo: 0xb5, hi: 0xb6},
+ {value: 0x5000, lo: 0xb7, hi: 0xb7},
+ {value: 0x5500, lo: 0xb8, hi: 0xb8},
+ {value: 0x5000, lo: 0xb9, hi: 0xb9},
{value: 0x0082, lo: 0xba, hi: 0xbd},
// Block 0x2a, offset 0x2b
{value: 0x0000, lo: 0x0b},
{value: 0x0082, lo: 0x80, hi: 0x80},
- {value: 0x3300, lo: 0x81, hi: 0x81},
+ {value: 0x5500, lo: 0x81, hi: 0x81},
{value: 0x00e6, lo: 0x82, hi: 0x83},
{value: 0x0009, lo: 0x84, hi: 0x84},
{value: 0x00e6, lo: 0x86, hi: 0x87},
- {value: 0x3300, lo: 0x93, hi: 0x93},
- {value: 0x3300, lo: 0x9d, hi: 0x9d},
- {value: 0x3300, lo: 0xa2, hi: 0xa2},
- {value: 0x3300, lo: 0xa7, hi: 0xa7},
- {value: 0x3300, lo: 0xac, hi: 0xac},
- {value: 0x3300, lo: 0xb9, hi: 0xb9},
+ {value: 0x5500, lo: 0x93, hi: 0x93},
+ {value: 0x5500, lo: 0x9d, hi: 0x9d},
+ {value: 0x5500, lo: 0xa2, hi: 0xa2},
+ {value: 0x5500, lo: 0xa7, hi: 0xa7},
+ {value: 0x5500, lo: 0xac, hi: 0xac},
+ {value: 0x5500, lo: 0xb9, hi: 0xb9},
// Block 0x2b, offset 0x2c
{value: 0x0000, lo: 0x01},
{value: 0x00dc, lo: 0x86, hi: 0x86},
{value: 0x00dc, lo: 0x8d, hi: 0x8d},
// Block 0x2e, offset 0x2f
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xbc, hi: 0xbc},
+ {value: 0x5000, lo: 0xbc, hi: 0xbc},
// Block 0x2f, offset 0x30
{value: 0x0000, lo: 0x01},
{value: 0x8800, lo: 0x80, hi: 0x92},
{value: 0x00dc, lo: 0xad, hi: 0xad},
// Block 0x40, offset 0x41
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0xac, hi: 0xae},
- {value: 0x3000, lo: 0xb0, hi: 0xba},
- {value: 0x3000, lo: 0xbc, hi: 0xbf},
+ {value: 0x5000, lo: 0xac, hi: 0xae},
+ {value: 0x5000, lo: 0xb0, hi: 0xba},
+ {value: 0x5000, lo: 0xbc, hi: 0xbf},
// Block 0x41, offset 0x42
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x80, hi: 0x8d},
- {value: 0x3000, lo: 0x8f, hi: 0xaa},
- {value: 0x3000, lo: 0xb8, hi: 0xb8},
+ {value: 0x5000, lo: 0x80, hi: 0x8d},
+ {value: 0x5000, lo: 0x8f, hi: 0xaa},
+ {value: 0x5000, lo: 0xb8, hi: 0xb8},
// Block 0x42, offset 0x43
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x9b, hi: 0xbf},
+ {value: 0x5000, lo: 0x9b, hi: 0xbf},
// Block 0x43, offset 0x44
{value: 0x0000, lo: 0x0e},
{value: 0x00e6, lo: 0x80, hi: 0x81},
// Block 0x46, offset 0x47
{value: 0x0000, lo: 0x07},
{value: 0x1100, lo: 0x80, hi: 0x99},
- {value: 0x3000, lo: 0x9a, hi: 0x9a},
- {value: 0x3100, lo: 0x9b, hi: 0x9b},
+ {value: 0x5000, lo: 0x9a, hi: 0x9a},
+ {value: 0x5100, lo: 0x9b, hi: 0x9b},
{value: 0x9900, lo: 0xa0, hi: 0xa1},
{value: 0x1100, lo: 0xa2, hi: 0xb7},
{value: 0x9900, lo: 0xb8, hi: 0xb9},
{value: 0x1100, lo: 0x80, hi: 0xb4},
{value: 0x9900, lo: 0xb6, hi: 0xb6},
{value: 0x1100, lo: 0xb7, hi: 0xba},
- {value: 0x3300, lo: 0xbb, hi: 0xbb},
+ {value: 0x5500, lo: 0xbb, hi: 0xbb},
{value: 0x1100, lo: 0xbc, hi: 0xbc},
- {value: 0x3000, lo: 0xbd, hi: 0xbd},
- {value: 0x3300, lo: 0xbe, hi: 0xbe},
- {value: 0x3800, lo: 0xbf, hi: 0xbf},
+ {value: 0x5000, lo: 0xbd, hi: 0xbd},
+ {value: 0x5500, lo: 0xbe, hi: 0xbe},
+ {value: 0x5800, lo: 0xbf, hi: 0xbf},
// Block 0x4a, offset 0x4b
{value: 0x0000, lo: 0x0a},
- {value: 0x3300, lo: 0x80, hi: 0x81},
- {value: 0x3000, lo: 0x82, hi: 0x8a},
- {value: 0x3000, lo: 0x91, hi: 0x91},
- {value: 0x3000, lo: 0x97, hi: 0x97},
- {value: 0x3000, lo: 0xa4, hi: 0xa6},
- {value: 0x3000, lo: 0xaf, hi: 0xaf},
- {value: 0x3000, lo: 0xb3, hi: 0xb4},
- {value: 0x3000, lo: 0xb6, hi: 0xb7},
- {value: 0x3000, lo: 0xbc, hi: 0xbc},
- {value: 0x3000, lo: 0xbe, hi: 0xbe},
+ {value: 0x5500, lo: 0x80, hi: 0x81},
+ {value: 0x5000, lo: 0x82, hi: 0x8a},
+ {value: 0x5000, lo: 0x91, hi: 0x91},
+ {value: 0x5000, lo: 0x97, hi: 0x97},
+ {value: 0x5000, lo: 0xa4, hi: 0xa6},
+ {value: 0x5000, lo: 0xaf, hi: 0xaf},
+ {value: 0x5000, lo: 0xb3, hi: 0xb4},
+ {value: 0x5000, lo: 0xb6, hi: 0xb7},
+ {value: 0x5000, lo: 0xbc, hi: 0xbc},
+ {value: 0x5000, lo: 0xbe, hi: 0xbe},
// Block 0x4b, offset 0x4c
{value: 0x0000, lo: 0x05},
- {value: 0x3000, lo: 0x87, hi: 0x89},
- {value: 0x3000, lo: 0x97, hi: 0x97},
- {value: 0x3000, lo: 0x9f, hi: 0x9f},
- {value: 0x3000, lo: 0xb0, hi: 0xb1},
- {value: 0x3000, lo: 0xb4, hi: 0xbf},
+ {value: 0x5000, lo: 0x87, hi: 0x89},
+ {value: 0x5000, lo: 0x97, hi: 0x97},
+ {value: 0x5000, lo: 0x9f, hi: 0x9f},
+ {value: 0x5000, lo: 0xb0, hi: 0xb1},
+ {value: 0x5000, lo: 0xb4, hi: 0xbf},
// Block 0x4c, offset 0x4d
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x80, hi: 0x8e},
- {value: 0x3000, lo: 0x90, hi: 0x9c},
- {value: 0x3000, lo: 0xa8, hi: 0xa8},
+ {value: 0x5000, lo: 0x80, hi: 0x8e},
+ {value: 0x5000, lo: 0x90, hi: 0x9c},
+ {value: 0x5000, lo: 0xa8, hi: 0xa8},
// Block 0x4d, offset 0x4e
{value: 0x0000, lo: 0x0d},
{value: 0x00e6, lo: 0x90, hi: 0x91},
{value: 0x00e6, lo: 0xb0, hi: 0xb0},
// Block 0x4e, offset 0x4f
{value: 0x0000, lo: 0x0e},
- {value: 0x3000, lo: 0x80, hi: 0x83},
- {value: 0x3000, lo: 0x85, hi: 0x87},
- {value: 0x3000, lo: 0x89, hi: 0x93},
- {value: 0x3000, lo: 0x95, hi: 0x96},
- {value: 0x3000, lo: 0x99, hi: 0x9d},
- {value: 0x3000, lo: 0xa0, hi: 0xa2},
- {value: 0x3000, lo: 0xa4, hi: 0xa4},
- {value: 0x3300, lo: 0xa6, hi: 0xa6},
- {value: 0x3000, lo: 0xa8, hi: 0xa8},
- {value: 0x3300, lo: 0xaa, hi: 0xab},
- {value: 0x3000, lo: 0xac, hi: 0xad},
- {value: 0x3000, lo: 0xaf, hi: 0xb1},
- {value: 0x3000, lo: 0xb3, hi: 0xb9},
- {value: 0x3000, lo: 0xbb, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x83},
+ {value: 0x5000, lo: 0x85, hi: 0x87},
+ {value: 0x5000, lo: 0x89, hi: 0x93},
+ {value: 0x5000, lo: 0x95, hi: 0x96},
+ {value: 0x5000, lo: 0x99, hi: 0x9d},
+ {value: 0x5000, lo: 0xa0, hi: 0xa2},
+ {value: 0x5000, lo: 0xa4, hi: 0xa4},
+ {value: 0x5500, lo: 0xa6, hi: 0xa6},
+ {value: 0x5000, lo: 0xa8, hi: 0xa8},
+ {value: 0x5500, lo: 0xaa, hi: 0xab},
+ {value: 0x5000, lo: 0xac, hi: 0xad},
+ {value: 0x5000, lo: 0xaf, hi: 0xb1},
+ {value: 0x5000, lo: 0xb3, hi: 0xb9},
+ {value: 0x5000, lo: 0xbb, hi: 0xbf},
// Block 0x4f, offset 0x50
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x80, hi: 0x80},
- {value: 0x3000, lo: 0x85, hi: 0x89},
- {value: 0x3000, lo: 0x90, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x80},
+ {value: 0x5000, lo: 0x85, hi: 0x89},
+ {value: 0x5000, lo: 0x90, hi: 0xbf},
// Block 0x50, offset 0x51
{value: 0x0000, lo: 0x06},
- {value: 0x3000, lo: 0x89, hi: 0x89},
+ {value: 0x5000, lo: 0x89, hi: 0x89},
{value: 0x8800, lo: 0x90, hi: 0x90},
{value: 0x8800, lo: 0x92, hi: 0x92},
{value: 0x8800, lo: 0x94, hi: 0x94},
{value: 0x1100, lo: 0xa4, hi: 0xa4},
{value: 0x8800, lo: 0xa5, hi: 0xa5},
{value: 0x1100, lo: 0xa6, hi: 0xa6},
- {value: 0x3000, lo: 0xac, hi: 0xad},
- {value: 0x3000, lo: 0xaf, hi: 0xb0},
+ {value: 0x5000, lo: 0xac, hi: 0xad},
+ {value: 0x5000, lo: 0xaf, hi: 0xb0},
{value: 0x8800, lo: 0xbc, hi: 0xbc},
// Block 0x53, offset 0x54
{value: 0x0000, lo: 0x0b},
{value: 0x1100, lo: 0xaa, hi: 0xad},
// Block 0x55, offset 0x56
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0xa9, hi: 0xaa},
+ {value: 0x5500, lo: 0xa9, hi: 0xaa},
// Block 0x56, offset 0x57
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xa0, hi: 0xbf},
+ {value: 0x5000, lo: 0xa0, hi: 0xbf},
// Block 0x57, offset 0x58
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0xbf},
// Block 0x58, offset 0x59
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xaa},
+ {value: 0x5000, lo: 0x80, hi: 0xaa},
// Block 0x59, offset 0x5a
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x8c, hi: 0x8c},
+ {value: 0x5000, lo: 0x8c, hi: 0x8c},
// Block 0x5a, offset 0x5b
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xb4, hi: 0xb6},
+ {value: 0x5000, lo: 0xb4, hi: 0xb6},
// Block 0x5b, offset 0x5c
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x9c, hi: 0x9c},
+ {value: 0x5500, lo: 0x9c, hi: 0x9c},
// Block 0x5c, offset 0x5d
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xbc, hi: 0xbd},
+ {value: 0x5000, lo: 0xbc, hi: 0xbd},
// Block 0x5d, offset 0x5e
{value: 0x0000, lo: 0x01},
{value: 0x00e6, lo: 0xaf, hi: 0xb1},
// Block 0x5e, offset 0x5f
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0xaf, hi: 0xaf},
+ {value: 0x5000, lo: 0xaf, hi: 0xaf},
{value: 0x0009, lo: 0xbf, hi: 0xbf},
// Block 0x5f, offset 0x60
{value: 0x0000, lo: 0x01},
{value: 0x00e6, lo: 0xa0, hi: 0xbf},
// Block 0x60, offset 0x61
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x9f, hi: 0x9f},
+ {value: 0x5000, lo: 0x9f, hi: 0x9f},
// Block 0x61, offset 0x62
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xb3, hi: 0xb3},
+ {value: 0x5000, lo: 0xb3, hi: 0xb3},
// Block 0x62, offset 0x63
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0x95},
+ {value: 0x5000, lo: 0x80, hi: 0x95},
// Block 0x63, offset 0x64
{value: 0x0000, lo: 0x08},
- {value: 0x3000, lo: 0x80, hi: 0x80},
+ {value: 0x5000, lo: 0x80, hi: 0x80},
{value: 0x00da, lo: 0xaa, hi: 0xaa},
{value: 0x00e4, lo: 0xab, hi: 0xab},
{value: 0x00e8, lo: 0xac, hi: 0xac},
{value: 0x00de, lo: 0xad, hi: 0xad},
{value: 0x00e0, lo: 0xae, hi: 0xaf},
- {value: 0x3000, lo: 0xb6, hi: 0xb6},
- {value: 0x3000, lo: 0xb8, hi: 0xba},
+ {value: 0x5000, lo: 0xb6, hi: 0xb6},
+ {value: 0x5000, lo: 0xb8, hi: 0xba},
// Block 0x64, offset 0x65
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xb1, hi: 0xbf},
+ {value: 0x5000, lo: 0xb1, hi: 0xbf},
// Block 0x65, offset 0x66
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x8e},
- {value: 0x3000, lo: 0x92, hi: 0x9f},
+ {value: 0x5000, lo: 0x80, hi: 0x8e},
+ {value: 0x5000, lo: 0x92, hi: 0x9f},
// Block 0x66, offset 0x67
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x9e},
- {value: 0x3000, lo: 0xa0, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x9e},
+ {value: 0x5000, lo: 0xa0, hi: 0xbf},
// Block 0x67, offset 0x68
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x87},
- {value: 0x3000, lo: 0x90, hi: 0xbe},
+ {value: 0x5000, lo: 0x80, hi: 0x87},
+ {value: 0x5000, lo: 0x90, hi: 0xbe},
// Block 0x68, offset 0x69
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xbe},
+ {value: 0x5000, lo: 0x80, hi: 0xbe},
// Block 0x69, offset 0x6a
{value: 0x0000, lo: 0x02},
{value: 0x00e6, lo: 0xaf, hi: 0xaf},
{value: 0x00e6, lo: 0xb0, hi: 0xb1},
// Block 0x6b, offset 0x6c
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0xb0, hi: 0xb0},
+ {value: 0x5000, lo: 0xb0, hi: 0xb0},
// Block 0x6c, offset 0x6d
{value: 0x0000, lo: 0x01},
{value: 0x0009, lo: 0x86, hi: 0x86},
{value: 0x1100, lo: 0x80, hi: 0xa3},
// Block 0x77, offset 0x78
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x80, hi: 0xbf},
+ {value: 0x5500, lo: 0x80, hi: 0xbf},
// Block 0x78, offset 0x79
{value: 0x0000, lo: 0x09},
- {value: 0x3300, lo: 0x80, hi: 0x8d},
- {value: 0x3300, lo: 0x90, hi: 0x90},
- {value: 0x3300, lo: 0x92, hi: 0x92},
- {value: 0x3300, lo: 0x95, hi: 0x9e},
- {value: 0x3300, lo: 0xa0, hi: 0xa0},
- {value: 0x3300, lo: 0xa2, hi: 0xa2},
- {value: 0x3300, lo: 0xa5, hi: 0xa6},
- {value: 0x3300, lo: 0xaa, hi: 0xad},
- {value: 0x3300, lo: 0xb0, hi: 0xbf},
+ {value: 0x5500, lo: 0x80, hi: 0x8d},
+ {value: 0x5500, lo: 0x90, hi: 0x90},
+ {value: 0x5500, lo: 0x92, hi: 0x92},
+ {value: 0x5500, lo: 0x95, hi: 0x9e},
+ {value: 0x5500, lo: 0xa0, hi: 0xa0},
+ {value: 0x5500, lo: 0xa2, hi: 0xa2},
+ {value: 0x5500, lo: 0xa5, hi: 0xa6},
+ {value: 0x5500, lo: 0xaa, hi: 0xad},
+ {value: 0x5500, lo: 0xb0, hi: 0xbf},
// Block 0x79, offset 0x7a
{value: 0x0000, lo: 0x02},
- {value: 0x3300, lo: 0x80, hi: 0xad},
- {value: 0x3300, lo: 0xb0, hi: 0xbf},
+ {value: 0x5500, lo: 0x80, hi: 0xad},
+ {value: 0x5500, lo: 0xb0, hi: 0xbf},
// Block 0x7a, offset 0x7b
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x80, hi: 0x99},
+ {value: 0x5500, lo: 0x80, hi: 0x99},
// Block 0x7b, offset 0x7c
{value: 0x0000, lo: 0x09},
- {value: 0x3000, lo: 0x80, hi: 0x86},
- {value: 0x3000, lo: 0x93, hi: 0x97},
- {value: 0x3300, lo: 0x9d, hi: 0x9d},
+ {value: 0x5000, lo: 0x80, hi: 0x86},
+ {value: 0x5000, lo: 0x93, hi: 0x97},
+ {value: 0x5500, lo: 0x9d, hi: 0x9d},
{value: 0x001a, lo: 0x9e, hi: 0x9e},
- {value: 0x3300, lo: 0x9f, hi: 0x9f},
- {value: 0x3000, lo: 0xa0, hi: 0xa9},
- {value: 0x3300, lo: 0xaa, hi: 0xb6},
- {value: 0x3300, lo: 0xb8, hi: 0xbc},
- {value: 0x3300, lo: 0xbe, hi: 0xbe},
+ {value: 0x5500, lo: 0x9f, hi: 0x9f},
+ {value: 0x5000, lo: 0xa0, hi: 0xa9},
+ {value: 0x5500, lo: 0xaa, hi: 0xb6},
+ {value: 0x5500, lo: 0xb8, hi: 0xbc},
+ {value: 0x5500, lo: 0xbe, hi: 0xbe},
// Block 0x7c, offset 0x7d
{value: 0x0000, lo: 0x04},
- {value: 0x3300, lo: 0x80, hi: 0x81},
- {value: 0x3300, lo: 0x83, hi: 0x84},
- {value: 0x3300, lo: 0x86, hi: 0x8e},
- {value: 0x3000, lo: 0x8f, hi: 0xbf},
+ {value: 0x5500, lo: 0x80, hi: 0x81},
+ {value: 0x5500, lo: 0x83, hi: 0x84},
+ {value: 0x5500, lo: 0x86, hi: 0x8e},
+ {value: 0x5000, lo: 0x8f, hi: 0xbf},
// Block 0x7d, offset 0x7e
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xb1},
+ {value: 0x5000, lo: 0x80, hi: 0xb1},
// Block 0x7e, offset 0x7f
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x93, hi: 0xbf},
+ {value: 0x5000, lo: 0x93, hi: 0xbf},
// Block 0x7f, offset 0x80
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xbd},
+ {value: 0x5000, lo: 0x80, hi: 0xbd},
// Block 0x80, offset 0x81
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x90, hi: 0xbf},
+ {value: 0x5000, lo: 0x90, hi: 0xbf},
// Block 0x81, offset 0x82
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x8f},
- {value: 0x3000, lo: 0x92, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x8f},
+ {value: 0x5000, lo: 0x92, hi: 0xbf},
// Block 0x82, offset 0x83
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x87},
- {value: 0x3000, lo: 0xb0, hi: 0xbc},
+ {value: 0x5000, lo: 0x80, hi: 0x87},
+ {value: 0x5000, lo: 0xb0, hi: 0xbc},
// Block 0x83, offset 0x84
{value: 0x0000, lo: 0x03},
- {value: 0x3000, lo: 0x90, hi: 0x99},
+ {value: 0x5000, lo: 0x90, hi: 0x99},
{value: 0x00e6, lo: 0xa0, hi: 0xa6},
- {value: 0x3000, lo: 0xb0, hi: 0xbf},
+ {value: 0x5000, lo: 0xb0, hi: 0xbf},
// Block 0x84, offset 0x85
{value: 0x0000, lo: 0x07},
- {value: 0x3000, lo: 0x80, hi: 0x84},
- {value: 0x3000, lo: 0x87, hi: 0x92},
- {value: 0x3000, lo: 0x94, hi: 0xa6},
- {value: 0x3000, lo: 0xa8, hi: 0xab},
- {value: 0x3000, lo: 0xb0, hi: 0xb2},
- {value: 0x3000, lo: 0xb4, hi: 0xb4},
- {value: 0x3000, lo: 0xb6, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x84},
+ {value: 0x5000, lo: 0x87, hi: 0x92},
+ {value: 0x5000, lo: 0x94, hi: 0xa6},
+ {value: 0x5000, lo: 0xa8, hi: 0xab},
+ {value: 0x5000, lo: 0xb0, hi: 0xb2},
+ {value: 0x5000, lo: 0xb4, hi: 0xb4},
+ {value: 0x5000, lo: 0xb6, hi: 0xbf},
// Block 0x85, offset 0x86
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x80, hi: 0xbc},
+ {value: 0x5000, lo: 0x80, hi: 0xbc},
// Block 0x86, offset 0x87
{value: 0x0000, lo: 0x01},
- {value: 0x3000, lo: 0x81, hi: 0xbf},
+ {value: 0x5000, lo: 0x81, hi: 0xbf},
// Block 0x87, offset 0x88
{value: 0x0000, lo: 0x06},
- {value: 0x3000, lo: 0x82, hi: 0x87},
- {value: 0x3000, lo: 0x8a, hi: 0x8f},
- {value: 0x3000, lo: 0x92, hi: 0x97},
- {value: 0x3000, lo: 0x9a, hi: 0x9c},
- {value: 0x3000, lo: 0xa0, hi: 0xa6},
- {value: 0x3000, lo: 0xa8, hi: 0xae},
+ {value: 0x5000, lo: 0x82, hi: 0x87},
+ {value: 0x5000, lo: 0x8a, hi: 0x8f},
+ {value: 0x5000, lo: 0x92, hi: 0x97},
+ {value: 0x5000, lo: 0x9a, hi: 0x9c},
+ {value: 0x5000, lo: 0xa0, hi: 0xa6},
+ {value: 0x5000, lo: 0xa8, hi: 0xae},
// Block 0x88, offset 0x89
{value: 0x0000, lo: 0x01},
{value: 0x00dc, lo: 0xbd, hi: 0xbd},
{value: 0x0009, lo: 0xb9, hi: 0xba},
// Block 0x8b, offset 0x8c
{value: 0x0000, lo: 0x06},
- {value: 0x3300, lo: 0x9e, hi: 0xa4},
+ {value: 0x5500, lo: 0x9e, hi: 0xa4},
{value: 0x00d8, lo: 0xa5, hi: 0xa6},
{value: 0x0001, lo: 0xa7, hi: 0xa9},
{value: 0x00e2, lo: 0xad, hi: 0xad},
{value: 0x00e6, lo: 0x85, hi: 0x89},
{value: 0x00dc, lo: 0x8a, hi: 0x8b},
{value: 0x00e6, lo: 0xaa, hi: 0xad},
- {value: 0x3300, lo: 0xbb, hi: 0xbf},
+ {value: 0x5500, lo: 0xbb, hi: 0xbf},
// Block 0x8d, offset 0x8e
{value: 0x0000, lo: 0x01},
- {value: 0x3300, lo: 0x80, hi: 0x80},
+ {value: 0x5500, lo: 0x80, hi: 0x80},
// Block 0x8e, offset 0x8f
{value: 0x0000, lo: 0x01},
{value: 0x00e6, lo: 0x82, hi: 0x84},
// Block 0x8f, offset 0x90
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x94},
- {value: 0x3000, lo: 0x96, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x94},
+ {value: 0x5000, lo: 0x96, hi: 0xbf},
// Block 0x90, offset 0x91
{value: 0x0000, lo: 0x08},
- {value: 0x3000, lo: 0x80, hi: 0x9c},
- {value: 0x3000, lo: 0x9e, hi: 0x9f},
- {value: 0x3000, lo: 0xa2, hi: 0xa2},
- {value: 0x3000, lo: 0xa5, hi: 0xa6},
- {value: 0x3000, lo: 0xa9, hi: 0xac},
- {value: 0x3000, lo: 0xae, hi: 0xb9},
- {value: 0x3000, lo: 0xbb, hi: 0xbb},
- {value: 0x3000, lo: 0xbd, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x9c},
+ {value: 0x5000, lo: 0x9e, hi: 0x9f},
+ {value: 0x5000, lo: 0xa2, hi: 0xa2},
+ {value: 0x5000, lo: 0xa5, hi: 0xa6},
+ {value: 0x5000, lo: 0xa9, hi: 0xac},
+ {value: 0x5000, lo: 0xae, hi: 0xb9},
+ {value: 0x5000, lo: 0xbb, hi: 0xbb},
+ {value: 0x5000, lo: 0xbd, hi: 0xbf},
// Block 0x91, offset 0x92
{value: 0x0000, lo: 0x02},
- {value: 0x3000, lo: 0x80, hi: 0x83},
- {value: 0x3000, lo: 0x85, hi: 0xbf},
+ {value: 0x5000, lo: 0x80, hi: 0x83},
+ {value: 0x5000, lo: 0x85, hi: 0xbf},
// Block 0x92, offset 0x93
{value: 0x0000, lo: 0x06},
- {value: 0x3000, lo: 0x80, hi: 0x85},
- {value: 0x3000, lo: 0x87, hi: 0x8a},
- {value: 0x3000, lo: 0x8d, hi: 0x94},
- {value: 0x