From: ian Date: Wed, 16 Mar 2011 23:05:44 +0000 (+0000) Subject: Update to current version of Go library (revision 94d654be2064). X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=commitdiff_plain;h=31c6ec422702226aabab7d082da16663e6c3e72c Update to current version of Go library (revision 94d654be2064). git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@171076 138bc75d-0d04-0410-961f-82ee72b054a4 --- diff --git a/gcc/go/gofrontend/types.cc b/gcc/go/gofrontend/types.cc index 0db599441dd..2eecafd89d6 100644 --- a/gcc/go/gofrontend/types.cc +++ b/gcc/go/gofrontend/types.cc @@ -1078,7 +1078,7 @@ Type::make_type_descriptor_type() bloc); Struct_type* type_descriptor_type = - Type::make_builtin_struct_type(9, + Type::make_builtin_struct_type(10, "Kind", uint8_type, "align", uint8_type, "fieldAlign", uint8_type, @@ -1087,7 +1087,9 @@ Type::make_type_descriptor_type() "hashfn", hashfn_type, "equalfn", equalfn_type, "string", pointer_string_type, - "", pointer_uncommon_type); + "", pointer_uncommon_type, + "ptrToThis", + pointer_type_descriptor_type); Named_type* named = Type::make_builtin_named_type("commonType", type_descriptor_type); @@ -1260,6 +1262,16 @@ Type::type_descriptor_constructor(Gogo* gogo, int runtime_type_kind, } ++p; + gcc_assert(p->field_name() == "ptrToThis"); + if (name == NULL) + vals->push_back(Expression::make_nil(bloc)); + else + { + Type* pt = Type::make_pointer_type(name); + vals->push_back(Expression::make_type_descriptor(pt, bloc)); + } + + ++p; gcc_assert(p == fields->end()); mpz_clear(iv); diff --git a/libgo/MERGE b/libgo/MERGE index 97e6655a625..e572b232450 100644 --- a/libgo/MERGE +++ b/libgo/MERGE @@ -1,4 +1,4 @@ -559f12e8fcd5 +94d654be2064 The first line of this file holds the Mercurial revision number of the last merge done from the master library sources. diff --git a/libgo/Makefile.am b/libgo/Makefile.am index e19d229a9e5..0715a9911bf 100644 --- a/libgo/Makefile.am +++ b/libgo/Makefile.am @@ -105,6 +105,7 @@ toolexeclibgo_DATA = \ bufio.gox \ bytes.gox \ cmath.gox \ + crypto.gox \ ebnf.gox \ exec.gox \ expvar.gox \ @@ -158,8 +159,10 @@ toolexeclibgoarchive_DATA = \ toolexeclibgocompressdir = $(toolexeclibgodir)/compress toolexeclibgocompress_DATA = \ + compress/bzip2.gox \ compress/flate.gox \ compress/gzip.gox \ + compress/lzw.gox \ compress/zlib.gox toolexeclibgocontainerdir = $(toolexeclibgodir)/container @@ -178,11 +181,13 @@ toolexeclibgocrypto_DATA = \ crypto/blowfish.gox \ crypto/cast5.gox \ crypto/cipher.gox \ + crypto/dsa.gox \ crypto/elliptic.gox \ crypto/hmac.gox \ crypto/md4.gox \ crypto/md5.gox \ crypto/ocsp.gox \ + crypto/openpgp.gox \ crypto/rand.gox \ crypto/rc4.gox \ crypto/ripemd160.gox \ @@ -201,6 +206,7 @@ toolexeclibgocryptoopenpgpdir = $(toolexeclibgocryptodir)/openpgp toolexeclibgocryptoopenpgp_DATA = \ crypto/openpgp/armor.gox \ crypto/openpgp/error.gox \ + crypto/openpgp/packet.gox \ crypto/openpgp/s2k.gox toolexeclibgodebugdir = $(toolexeclibgodir)/debug @@ -306,6 +312,11 @@ toolexeclibgoruntime_DATA = \ runtime/debug.gox \ runtime/pprof.gox +toolexeclibgosyncdir = $(toolexeclibgodir)/sync + +toolexeclibgosync_DATA = \ + sync/atomic.gox + toolexeclibgotestingdir = $(toolexeclibgodir)/testing toolexeclibgotesting_DATA = \ @@ -411,8 +422,6 @@ runtime_files = \ runtime/mfixalloc.c \ runtime/mgc0.c \ runtime/mheap.c \ - runtime/mheapmap32.c \ - runtime/mheapmap64.c \ runtime/msize.c \ runtime/proc.c \ runtime/thread.c \ @@ -489,6 +498,9 @@ go_cmath_files = \ go/cmath/sqrt.go \ go/cmath/tan.go +go_crypto_files = \ + go/crypto/crypto.go + go_ebnf_files = \ go/ebnf/ebnf.go \ go/ebnf/parser.go @@ -533,6 +545,7 @@ go_http_files = \ go/http/client.go \ go/http/dump.go \ go/http/fs.go \ + go/http/header.go \ go/http/lex.go \ go/http/persist.go \ go/http/request.go \ @@ -540,6 +553,7 @@ go_http_files = \ go/http/server.go \ go/http/status.go \ go/http/transfer.go \ + go/http/transport.go \ go/http/url.go go_image_files = \ @@ -693,6 +707,7 @@ go_os_files = \ go/os/env_unix.go \ go/os/error.go \ go/os/exec.go \ + go/os/exec_unix.go \ go/os/file.go \ go/os/file_unix.go \ go/os/getwd.go \ @@ -738,8 +753,6 @@ go_runtime_files = \ go/runtime/debug.go \ go/runtime/error.go \ go/runtime/extern.go \ - go/runtime/malloc_defs.go \ - go/runtime/runtime_defs.go \ go/runtime/sig.go \ go/runtime/softfloat64.go \ go/runtime/type.go \ @@ -781,11 +794,11 @@ go_strings_files = \ go/strings/strings.go go_sync_files = \ + go/sync/cond.go \ go/sync/mutex.go \ go/sync/once.go \ - go/sync/rwmutex.go -go_sync_c_files = \ - go/sync/cas.c + go/sync/rwmutex.go \ + go/sync/waitgroup.go if LIBGO_IS_SOLARIS go_syslog_file = go/syslog/syslog_solaris.go @@ -851,6 +864,12 @@ go_archive_zip_files = \ go/archive/zip/reader.go \ go/archive/zip/struct.go +go_compress_bzip2_files = \ + go/compress/bzip2/bit_reader.go \ + go/compress/bzip2/bzip2.go \ + go/compress/bzip2/huffman.go \ + go/compress/bzip2/move_to_front.go + go_compress_flate_files = \ go/compress/flate/deflate.go \ go/compress/flate/huffman_bit_writer.go \ @@ -864,6 +883,10 @@ go_compress_gzip_files = \ go/compress/gzip/gzip.go \ go/compress/gzip/gunzip.go +go_compress_lzw_files = \ + go/compress/lzw/reader.go \ + go/compress/lzw/writer.go + go_compress_zlib_files = \ go/compress/zlib/reader.go \ go/compress/zlib/writer.go @@ -911,6 +934,8 @@ go_crypto_cipher_files = \ go/crypto/cipher/io.go \ go/crypto/cipher/ocfb.go \ go/crypto/cipher/ofb.go +go_crypto_dsa_files = \ + go/crypto/dsa/dsa.go go_crypto_elliptic_files = \ go/crypto/elliptic/elliptic.go go_crypto_hmac_files = \ @@ -923,6 +948,11 @@ go_crypto_md5_files = \ go/crypto/md5/md5block.go go_crypto_ocsp_files = \ go/crypto/ocsp/ocsp.go +go_crypto_openpgp_files = \ + go/crypto/openpgp/canonical_text.go \ + go/crypto/openpgp/keys.go \ + go/crypto/openpgp/read.go \ + go/crypto/openpgp/write.go go_crypto_rand_files = \ go/crypto/rand/rand.go \ go/crypto/rand/rand_unix.go @@ -970,6 +1000,19 @@ go_crypto_openpgp_armor_files = \ go/crypto/openpgp/armor/encode.go go_crypto_openpgp_error_files = \ go/crypto/openpgp/error/error.go +go_crypto_openpgp_packet_files = \ + go/crypto/openpgp/packet/compressed.go \ + go/crypto/openpgp/packet/encrypted_key.go \ + go/crypto/openpgp/packet/literal.go \ + go/crypto/openpgp/packet/one_pass_signature.go \ + go/crypto/openpgp/packet/packet.go \ + go/crypto/openpgp/packet/private_key.go \ + go/crypto/openpgp/packet/public_key.go \ + go/crypto/openpgp/packet/reader.go \ + go/crypto/openpgp/packet/signature.go \ + go/crypto/openpgp/packet/symmetric_key_encrypted.go \ + go/crypto/openpgp/packet/symmetrically_encrypted.go \ + go/crypto/openpgp/packet/userid.go go_crypto_openpgp_s2k_files = \ go/crypto/openpgp/s2k/s2k.go @@ -1095,6 +1138,7 @@ go_net_dict_files = \ go/net/dict/dict.go go_net_textproto_files = \ + go/net/textproto/header.go \ go/net/textproto/pipeline.go \ go/net/textproto/reader.go \ go/net/textproto/textproto.go \ @@ -1116,6 +1160,11 @@ go_runtime_debug_files = \ go_runtime_pprof_files = \ go/runtime/pprof/pprof.go +go_sync_atomic_files = \ + go/sync/atomic/doc.go +go_sync_atomic_c_files = \ + go/sync/atomic/atomic.c + go_testing_iotest_files = \ go/testing/iotest/logger.go \ go/testing/iotest/reader.go \ @@ -1268,6 +1317,7 @@ libgo_go_objs = \ bytes/bytes.lo \ bytes/index.lo \ cmath/cmath.lo \ + crypto/crypto.lo \ ebnf/ebnf.lo \ exec/exec.lo \ expvar/expvar.lo \ @@ -1298,8 +1348,7 @@ libgo_go_objs = \ sort/sort.lo \ strconv/strconv.lo \ strings/strings.lo \ - sync/mutex.lo \ - sync/cas.lo \ + sync/sync.lo \ syslog/syslog.lo \ syslog/syslog_c.lo \ tabwriter/tabwriter.lo \ @@ -1313,8 +1362,10 @@ libgo_go_objs = \ xml/xml.lo \ archive/tar.lo \ archive/zip.lo \ + compress/bzip2.lo \ compress/flate.lo \ compress/gzip.lo \ + compress/lzw.lo \ compress/zlib.lo \ container/heap.lo \ container/list.lo \ @@ -1325,11 +1376,13 @@ libgo_go_objs = \ crypto/blowfish.lo \ crypto/cast5.lo \ crypto/cipher.lo \ + crypto/dsa.lo \ crypto/elliptic.lo \ crypto/hmac.lo \ crypto/md4.lo \ crypto/md5.lo \ crypto/ocsp.lo \ + crypto/openpgp.lo \ crypto/rand.lo \ crypto/rc4.lo \ crypto/ripemd160.lo \ @@ -1344,6 +1397,7 @@ libgo_go_objs = \ crypto/xtea.lo \ crypto/openpgp/armor.lo \ crypto/openpgp/error.lo \ + crypto/openpgp/packet.lo \ crypto/openpgp/s2k.lo \ debug/dwarf.lo \ debug/elf.lo \ @@ -1385,6 +1439,8 @@ libgo_go_objs = \ rpc/jsonrpc.lo \ runtime/debug.lo \ runtime/pprof.lo \ + sync/atomic.lo \ + sync/atomic_c.lo \ syscalls/syscall.lo \ syscalls/errno.lo \ testing/testing.lo \ @@ -1500,6 +1556,12 @@ cmath/check: $(CHECK_DEPS) $(CHECK) .PHONY: cmath/check +crypto/crypto.lo: $(go_crypto_files) hash.gox + $(BUILDPACKAGE) +crypto/check: $(CHECK_DEPS) + $(CHECK) +.PHONY: crypto/check + ebnf/ebnf.lo: $(go_ebnf_files) container/vector.gox go/scanner.gox \ go/token.gox os.gox strconv.gox unicode.gox utf8.gox $(BUILDPACKAGE) @@ -1507,7 +1569,7 @@ ebnf/check: $(CHECK_DEPS) $(CHECK) .PHONY: ebnf/check -exec/exec.lo: $(go_exec_files) os.gox strings.gox +exec/exec.lo: $(go_exec_files) os.gox strconv.gox strings.gox $(BUILDPACKAGE) exec/check: $(CHECK_DEPS) $(CHECK) @@ -1526,8 +1588,8 @@ flag/check: $(CHECK_DEPS) $(CHECK) .PHONY: flag/check -fmt/fmt.lo: $(go_fmt_files) bytes.gox io.gox os.gox reflect.gox strconv.gox \ - strings.gox unicode.gox utf8.gox +fmt/fmt.lo: $(go_fmt_files) bytes.gox io.gox math.gox os.gox reflect.gox \ + strconv.gox strings.gox unicode.gox utf8.gox $(BUILDPACKAGE) fmt/check: $(CHECK_DEPS) $(CHECK) @@ -1554,10 +1616,10 @@ html/check: $(CHECK_DEPS) $(CHECK) .PHONY: html/check -http/http.lo: $(go_http_files) bufio.gox bytes.gox container/list.gox \ - container/vector.gox crypto/rand.gox crypto/tls.gox \ - encoding/base64.gox fmt.gox io.gox io/ioutil.gox log.gox \ - mime.gox mime/multipart.gox net.gox os.gox path.gox sort.gox \ +http/http.lo: $(go_http_files) bufio.gox bytes.gox container/vector.gox \ + crypto/rand.gox crypto/tls.gox encoding/base64.gox fmt.gox \ + io.gox io/ioutil.gox log.gox mime.gox mime/multipart.gox \ + net.gox net/textproto.gox os.gox path.gox sort.gox \ strconv.gox strings.gox sync.gox time.gox utf8.gox $(BUILDPACKAGE) http/check: $(CHECK_DEPS) @@ -1576,9 +1638,10 @@ io/check: $(CHECK_DEPS) $(CHECK) .PHONY: io/check -json/json.lo: $(go_json_files) bytes.gox container/vector.gox fmt.gox io.gox \ - math.gox os.gox reflect.gox runtime.gox strconv.gox \ - strings.gox unicode.gox utf16.gox utf8.gox +json/json.lo: $(go_json_files) bytes.gox container/vector.gox \ + encoding/base64.gox fmt.gox io.gox math.gox os.gox \ + reflect.gox runtime.gox strconv.gox strings.gox unicode.gox \ + utf16.gox utf8.gox $(BUILDPACKAGE) json/check: $(CHECK_DEPS) $(CHECK) @@ -1611,14 +1674,14 @@ net/check: $(CHECK_DEPS) $(CHECK) .PHONY: net/check -netchan/netchan.lo: $(go_netchan_files) gob.gox log.gox net.gox os.gox \ +netchan/netchan.lo: $(go_netchan_files) gob.gox io.gox log.gox net.gox os.gox \ reflect.gox strconv.gox sync.gox time.gox $(BUILDPACKAGE) netchan/check: $(CHECK_DEPS) $(CHECK) .PHONY: netchan/check -os/os.lo: $(go_os_files) sync.gox syscall.gox +os/os.lo: $(go_os_files) runtime.gox sync.gox syscall.gox $(BUILDPACKAGE) os/check: $(CHECK_DEPS) $(CHECK) @@ -1706,10 +1769,8 @@ strings/check: $(CHECK_DEPS) $(CHECK) .PHONY: strings/check -sync/mutex.lo: $(go_sync_files) runtime.gox +sync/sync.lo: $(go_sync_files) runtime.gox sync/atomic.gox $(BUILDPACKAGE) -sync/cas.lo: $(go_sync_c_files) sync/mutex.lo - $(LTCOMPILE) -c -o sync/cas.lo $(srcdir)/go/sync/cas.c sync/check: $(CHECK_DEPS) $(CHECK) .PHONY: sync/check @@ -1806,6 +1867,13 @@ archive/zip/check: $(CHECK_DEPS) $(CHECK) .PHONY: archive/zip/check +compress/bzip2.lo: $(go_compress_bzip2_files) bufio.gox io.gox os.gox sort.gox + $(BUILDPACKAGE) +compress/bzip2/check: $(CHECK_DEPS) + @$(MKDIR_P) compress/bzip2 + $(CHECK) +.PHONY: compress/bzip2/check + compress/flate.lo: $(go_compress_flate_files) bufio.gox io.gox math.gox \ os.gox sort.gox strconv.gox $(BUILDPACKAGE) @@ -1822,6 +1890,13 @@ compress/gzip/check: $(CHECK_DEPS) $(CHECK) .PHONY: compress/gzip/check +compress/lzw.lo: $(go_compress_lzw_files) bufio.gox fmt.gox io.gox os.gox + $(BUILDPACKAGE) +compress/lzw/check: $(CHECK_DEPS) + @$(MKDIR_P) compress/lzw + $(CHECK) +.PHONY: compress/lzw/check + compress/zlib.lo: $(go_compress_zlib_files) bufio.gox compress/flate.gox \ hash.gox hash/adler32.gox io.gox os.gox $(BUILDPACKAGE) @@ -1893,6 +1968,13 @@ crypto/cipher/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/cipher/check +crypto/dsa.lo: $(go_crypto_dsa_files) big.gox io.gox os.gox + $(BUILDPACKAGE) +crypto/dsa/check: $(CHECK_DEPS) + @$(MKDIR_P) crypto/dsa + $(CHECK) +.PHONY: crypto/dsa/check + crypto/elliptic.lo: $(go_crypto_elliptic_files) big.gox io.gox os.gox sync.gox $(BUILDPACKAGE) crypto/elliptic/check: $(CHECK_DEPS) @@ -1908,21 +1990,21 @@ crypto/hmac/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/hmac/check -crypto/md4.lo: $(go_crypto_md4_files) hash.gox os.gox +crypto/md4.lo: $(go_crypto_md4_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/md4/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/md4 $(CHECK) .PHONY: crypto/md4/check -crypto/md5.lo: $(go_crypto_md5_files) hash.gox os.gox +crypto/md5.lo: $(go_crypto_md5_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/md5/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/md5 $(CHECK) .PHONY: crypto/md5/check -crypto/ocsp.lo: $(go_crypto_ocsp_files) asn1.gox crypto/rsa.gox \ +crypto/ocsp.lo: $(go_crypto_ocsp_files) asn1.gox crypto.gox crypto/rsa.gox \ crypto/sha1.gox crypto/x509.gox os.gox time.gox $(BUILDPACKAGE) crypto/ocsp/check: $(CHECK_DEPS) @@ -1930,8 +2012,18 @@ crypto/ocsp/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/ocsp/check -crypto/rand.lo: $(go_crypto_rand_files) crypto/aes.gox io.gox os.gox sync.gox \ - time.gox +crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox \ + crypto/openpgp/armor.gox crypto/openpgp/error.gox \ + crypto/openpgp/packet.gox crypto/rsa.gox crypto/sha256.gox \ + hash.gox io.gox os.gox strconv.gox time.gox + $(BUILDPACKAGE) +crypto/openpgp/check: $(CHECK_DEPS) + @$(MKDIR_P) crypto/openpgp + $(CHECK) +.PHONY: crypto/openpgp/check + +crypto/rand.lo: $(go_crypto_rand_files) bufio.gox crypto/aes.gox io.gox \ + os.gox sync.gox time.gox $(BUILDPACKAGE) crypto/rand/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/rand @@ -1945,14 +2037,14 @@ crypto/rc4/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/rc4/check -crypto/ripemd160.lo: $(go_crypto_ripemd160_files) hash.gox os.gox +crypto/ripemd160.lo: $(go_crypto_ripemd160_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/ripemd160/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/ripemd160 $(CHECK) .PHONY: crypto/ripemd160/check -crypto/rsa.lo: $(go_crypto_rsa_files) big.gox crypto/sha1.gox \ +crypto/rsa.lo: $(go_crypto_rsa_files) big.gox crypto.gox crypto/sha1.gox \ crypto/subtle.gox encoding/hex.gox hash.gox io.gox os.gox $(BUILDPACKAGE) crypto/rsa/check: $(CHECK_DEPS) @@ -1960,21 +2052,21 @@ crypto/rsa/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/rsa/check -crypto/sha1.lo: $(go_crypto_sha1_files) hash.gox os.gox +crypto/sha1.lo: $(go_crypto_sha1_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/sha1/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/sha1 $(CHECK) .PHONY: crypto/sha1/check -crypto/sha256.lo: $(go_crypto_sha256_files) hash.gox os.gox +crypto/sha256.lo: $(go_crypto_sha256_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/sha256/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/sha256 $(CHECK) .PHONY: crypto/sha256/check -crypto/sha512.lo: $(go_crypto_sha512_files) hash.gox os.gox +crypto/sha512.lo: $(go_crypto_sha512_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/sha512/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/sha512 @@ -1989,7 +2081,7 @@ crypto/subtle/check: $(CHECK_DEPS) .PHONY: crypto/subtle/check crypto/tls.lo: $(go_crypto_tls_files) big.gox bufio.gox bytes.gox \ - container/list.gox crypto/aes.gox crypto/cipher.gox \ + container/list.gox crypto.gox crypto/aes.gox crypto/cipher.gox \ crypto/elliptic.gox crypto/hmac.gox crypto/md5.gox \ crypto/rc4.gox crypto/rand.gox crypto/rsa.gox crypto/sha1.gox \ crypto/subtle.gox crypto/rsa.gox crypto/sha1.gox \ @@ -2009,8 +2101,8 @@ crypto/twofish/check: $(CHECK_DEPS) .PHONY: crypto/twofish/check crypto/x509.lo: $(go_crypto_x509_files) asn1.gox big.gox container/vector.gox \ - crypto/rsa.gox crypto/sha1.gox hash.gox os.gox strings.gox \ - time.gox + crypto.gox crypto/rsa.gox crypto/sha1.gox hash.gox os.gox \ + strings.gox time.gox $(BUILDPACKAGE) crypto/x509/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/x509 @@ -2033,16 +2125,30 @@ crypto/openpgp/armor/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/openpgp/armor/check -crypto/openpgp/error.lo: $(go_crypto_openpgp_error_files) +crypto/openpgp/error.lo: $(go_crypto_openpgp_error_files) strconv.gox $(BUILDPACKAGE) crypto/openpgp/error/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/openpgp/error $(CHECK) .PHONY: crypto/openpgp/error/check -crypto/openpgp/s2k.lo: $(go_crypto_openpgp_s2k_files) crypto/md5.gox \ - crypto/openpgp/error.gox crypto/ripemd160.gox crypto/sha1.gox \ - crypto/sha256.gox crypto/sha512.gox hash.gox io.gox os.gox +crypto/openpgp/packet.lo: $(go_crypto_openpgp_packet_files) big.gox bytes.gox \ + compress/flate.gox compress/zlib.gox crypto.gox \ + crypto/aes.gox crypto/cast5.gox crypto/cipher.gox \ + crypto/openpgp/error.gox crypto/openpgp/s2k.gox \ + crypto/rand.gox crypto/rsa.gox crypto/sha1.gox \ + crypto/subtle.gox encoding/binary.gox hash.gox io.gox \ + io/ioutil.gox os.gox strconv.gox strings.gox + $(BUILDPACKAGE) +crypto/openpgp/packet/check: $(CHECK_DEPS) + @$(MKDIR_P) crypto/openpgp/packet + $(CHECK) +.PHONY: crypto/openpgp/packet/check + +crypto/openpgp/s2k.lo: $(go_crypto_openpgp_s2k_files) crypto.gox \ + crypto/md5.gox crypto/openpgp/error.gox crypto/ripemd160.gox \ + crypto/sha1.gox crypto/sha256.gox crypto/sha512.gox hash.gox \ + io.gox os.gox $(BUILDPACKAGE) crypto/openpgp/s2k/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/openpgp/s2k @@ -2361,6 +2467,15 @@ runtime/pprof/check: $(CHECK_DEPS) $(CHECK) .PHONY: runtime/pprof/check +sync/atomic.lo: $(go_sync_atomic_files) + $(BUILDPACKAGE) +sync/atomic_c.lo: $(go_sync_atomic_c_files) sync/atomic.lo + $(LTCOMPILE) -c -o $@ $(srcdir)/go/sync/atomic/atomic.c +sync/atomic/check: $(CHECK_DEPS) + @$(MKDIR_P) sync/atomic + $(CHECK) +.PHONY: sync/atomic/check + testing/iotest.lo: $(go_testing_iotest_files) io.gox log.gox os.gox $(BUILDPACKAGE) testing/iotest/check: $(CHECK_DEPS) @@ -2410,6 +2525,8 @@ bytes.gox: bytes/bytes.lo $(BUILDGOX) cmath.gox: cmath/cmath.lo $(BUILDGOX) +crypto.gox: crypto/crypto.lo + $(BUILDGOX) ebnf.gox: ebnf/ebnf.lo $(BUILDGOX) exec.gox: exec/exec.lo @@ -2470,7 +2587,7 @@ strconv.gox: strconv/strconv.lo $(BUILDGOX) strings.gox: strings/strings.lo $(BUILDGOX) -sync.gox: sync/mutex.lo +sync.gox: sync/sync.lo $(BUILDGOX) syslog.gox: syslog/syslog.lo $(BUILDGOX) @@ -2502,10 +2619,14 @@ archive/tar.gox: archive/tar.lo archive/zip.gox: archive/zip.lo $(BUILDGOX) +compress/bzip2.gox: compress/bzip2.lo + $(BUILDGOX) compress/flate.gox: compress/flate.lo $(BUILDGOX) compress/gzip.gox: compress/gzip.lo $(BUILDGOX) +compress/lzw.gox: compress/lzw.lo + $(BUILDGOX) compress/zlib.gox: compress/zlib.lo $(BUILDGOX) @@ -2528,6 +2649,8 @@ crypto/cast5.gox: crypto/cast5.lo $(BUILDGOX) crypto/cipher.gox: crypto/cipher.lo $(BUILDGOX) +crypto/dsa.gox: crypto/dsa.lo + $(BUILDGOX) crypto/elliptic.gox: crypto/elliptic.lo $(BUILDGOX) crypto/hmac.gox: crypto/hmac.lo @@ -2538,6 +2661,8 @@ crypto/md5.gox: crypto/md5.lo $(BUILDGOX) crypto/ocsp.gox: crypto/ocsp.lo $(BUILDGOX) +crypto/openpgp.gox: crypto/openpgp.lo + $(BUILDGOX) crypto/rand.gox: crypto/rand.lo $(BUILDGOX) crypto/rc4.gox: crypto/rc4.lo @@ -2567,6 +2692,8 @@ crypto/openpgp/armor.gox: crypto/openpgp/armor.lo $(BUILDGOX) crypto/openpgp/error.gox: crypto/openpgp/error.lo $(BUILDGOX) +crypto/openpgp/packet.gox: crypto/openpgp/packet.lo + $(BUILDGOX) crypto/openpgp/s2k.gox: crypto/openpgp/s2k.lo $(BUILDGOX) @@ -2664,6 +2791,9 @@ runtime/debug.gox: runtime/debug.lo runtime/pprof.gox: runtime/pprof.lo $(BUILDGOX) +sync/atomic.gox: sync/atomic.lo + $(BUILDGOX) + testing/iotest.gox: testing/iotest.lo $(BUILDGOX) testing/quick.gox: testing/quick.lo @@ -2725,8 +2855,10 @@ TEST_PACKAGES = \ xml/check \ archive/tar/check \ archive/zip/check \ + compress/bzip2/check \ compress/flate/check \ compress/gzip/check \ + compress/lzw/check \ compress/zlib/check \ container/heap/check \ container/list/check \ @@ -2737,11 +2869,13 @@ TEST_PACKAGES = \ crypto/blowfish/check \ crypto/cast5/check \ crypto/cipher/check \ + crypto/dsa/check \ crypto/elliptic/check \ crypto/hmac/check \ crypto/md4/check \ crypto/md5/check \ crypto/ocsp/check \ + crypto/openpgp/check \ crypto/rand/check \ crypto/rc4/check \ crypto/ripemd160/check \ @@ -2755,6 +2889,7 @@ TEST_PACKAGES = \ crypto/x509/check \ crypto/xtea/check \ crypto/openpgp/armor/check \ + crypto/openpgp/packet/check \ crypto/openpgp/s2k/check \ debug/dwarf/check \ debug/elf/check \ @@ -2787,6 +2922,7 @@ TEST_PACKAGES = \ $(os_inotify_check) \ os/signal/check \ rpc/jsonrpc/check \ + sync/atomic/check \ testing/quick/check \ testing/script/check diff --git a/libgo/Makefile.in b/libgo/Makefile.in index 452c60857ff..c9f01879189 100644 --- a/libgo/Makefile.in +++ b/libgo/Makefile.in @@ -112,6 +112,7 @@ am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \ "$(DESTDIR)$(toolexeclibgoosdir)" \ "$(DESTDIR)$(toolexeclibgorpcdir)" \ "$(DESTDIR)$(toolexeclibgoruntimedir)" \ + "$(DESTDIR)$(toolexeclibgosyncdir)" \ "$(DESTDIR)$(toolexeclibgotestingdir)" LIBRARIES = $(toolexeclib_LIBRARIES) ARFLAGS = cru @@ -122,43 +123,45 @@ libgobegin_a_OBJECTS = $(am_libgobegin_a_OBJECTS) LTLIBRARIES = $(toolexeclib_LTLIBRARIES) am__DEPENDENCIES_1 = am__DEPENDENCIES_2 = asn1/asn1.lo big/big.lo bufio/bufio.lo \ - bytes/bytes.lo bytes/index.lo cmath/cmath.lo ebnf/ebnf.lo \ - exec/exec.lo expvar/expvar.lo flag/flag.lo fmt/fmt.lo \ - gob/gob.lo hash/hash.lo html/html.lo http/http.lo \ + bytes/bytes.lo bytes/index.lo cmath/cmath.lo crypto/crypto.lo \ + ebnf/ebnf.lo exec/exec.lo expvar/expvar.lo flag/flag.lo \ + fmt/fmt.lo gob/gob.lo hash/hash.lo html/html.lo http/http.lo \ image/image.lo io/io.lo json/json.lo log/log.lo math/math.lo \ mime/mime.lo net/net.lo netchan/netchan.lo os/os.lo \ patch/patch.lo path/path.lo rand/rand.lo reflect/reflect.lo \ regexp/regexp.lo rpc/rpc.lo runtime/runtime.lo \ scanner/scanner.lo smtp/smtp.lo sort/sort.lo \ - strconv/strconv.lo strings/strings.lo sync/mutex.lo \ - sync/cas.lo syslog/syslog.lo syslog/syslog_c.lo \ - tabwriter/tabwriter.lo template/template.lo time/time.lo \ - try/try.lo unicode/unicode.lo utf16/utf16.lo utf8/utf8.lo \ + strconv/strconv.lo strings/strings.lo sync/sync.lo \ + syslog/syslog.lo syslog/syslog_c.lo tabwriter/tabwriter.lo \ + template/template.lo time/time.lo try/try.lo \ + unicode/unicode.lo utf16/utf16.lo utf8/utf8.lo \ websocket/websocket.lo xml/xml.lo archive/tar.lo \ - archive/zip.lo compress/flate.lo compress/gzip.lo \ - compress/zlib.lo container/heap.lo container/list.lo \ - container/ring.lo container/vector.lo crypto/aes.lo \ - crypto/block.lo crypto/blowfish.lo crypto/cast5.lo \ - crypto/cipher.lo crypto/elliptic.lo crypto/hmac.lo \ - crypto/md4.lo crypto/md5.lo crypto/ocsp.lo crypto/rand.lo \ + archive/zip.lo compress/bzip2.lo compress/flate.lo \ + compress/gzip.lo compress/lzw.lo compress/zlib.lo \ + container/heap.lo container/list.lo container/ring.lo \ + container/vector.lo crypto/aes.lo crypto/block.lo \ + crypto/blowfish.lo crypto/cast5.lo crypto/cipher.lo \ + crypto/dsa.lo crypto/elliptic.lo crypto/hmac.lo crypto/md4.lo \ + crypto/md5.lo crypto/ocsp.lo crypto/openpgp.lo crypto/rand.lo \ crypto/rc4.lo crypto/ripemd160.lo crypto/rsa.lo crypto/sha1.lo \ crypto/sha256.lo crypto/sha512.lo crypto/subtle.lo \ crypto/tls.lo crypto/twofish.lo crypto/x509.lo crypto/xtea.lo \ crypto/openpgp/armor.lo crypto/openpgp/error.lo \ - crypto/openpgp/s2k.lo debug/dwarf.lo debug/elf.lo \ - debug/gosym.lo debug/macho.lo debug/pe.lo debug/proc.lo \ - encoding/ascii85.lo encoding/base32.lo encoding/base64.lo \ - encoding/binary.lo encoding/git85.lo encoding/hex.lo \ - encoding/line.lo encoding/pem.lo exp/datafmt.lo exp/draw.lo \ - exp/eval.lo go/ast.lo go/doc.lo go/parser.lo go/printer.lo \ - go/scanner.lo go/token.lo go/typechecker.lo hash/adler32.lo \ - hash/crc32.lo hash/crc64.lo http/pprof.lo image/jpeg.lo \ - image/png.lo index/suffixarray.lo io/ioutil.lo \ - mime/multipart.lo net/dict.lo net/textproto.lo \ + crypto/openpgp/packet.lo crypto/openpgp/s2k.lo debug/dwarf.lo \ + debug/elf.lo debug/gosym.lo debug/macho.lo debug/pe.lo \ + debug/proc.lo encoding/ascii85.lo encoding/base32.lo \ + encoding/base64.lo encoding/binary.lo encoding/git85.lo \ + encoding/hex.lo encoding/line.lo encoding/pem.lo \ + exp/datafmt.lo exp/draw.lo exp/eval.lo go/ast.lo go/doc.lo \ + go/parser.lo go/printer.lo go/scanner.lo go/token.lo \ + go/typechecker.lo hash/adler32.lo hash/crc32.lo hash/crc64.lo \ + http/pprof.lo image/jpeg.lo image/png.lo index/suffixarray.lo \ + io/ioutil.lo mime/multipart.lo net/dict.lo net/textproto.lo \ $(am__DEPENDENCIES_1) os/signal.lo rpc/jsonrpc.lo \ - runtime/debug.lo runtime/pprof.lo syscalls/syscall.lo \ - syscalls/errno.lo testing/testing.lo testing/iotest.lo \ - testing/quick.lo testing/script.lo + runtime/debug.lo runtime/pprof.lo sync/atomic.lo \ + sync/atomic_c.lo syscalls/syscall.lo syscalls/errno.lo \ + testing/testing.lo testing/iotest.lo testing/quick.lo \ + testing/script.lo libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) @@ -204,8 +207,7 @@ am__libgo_la_SOURCES_DIST = runtime/go-append.c runtime/go-assert.c \ runtime/mcache.c runtime/mcentral.c \ runtime/mem_posix_memalign.c runtime/mem.c runtime/mfinal.c \ runtime/mfixalloc.c runtime/mgc0.c runtime/mheap.c \ - runtime/mheapmap32.c runtime/mheapmap64.c runtime/msize.c \ - runtime/proc.c runtime/thread.c \ + runtime/msize.c runtime/proc.c runtime/thread.c \ runtime/rtems-task-variable-add.c chan.c iface.c malloc.c \ map.c mprof.c reflect.c sigqueue.c string.c @HAVE_SYS_MMAN_H_FALSE@am__objects_1 = mem_posix_memalign.lo @@ -237,9 +239,8 @@ am__objects_3 = go-append.lo go-assert.lo go-assert-interface.lo \ go-unreflect.lo go-unsafe-new.lo go-unsafe-newarray.lo \ go-unsafe-pointer.lo go-unwind.lo mcache.lo mcentral.lo \ $(am__objects_1) mfinal.lo mfixalloc.lo mgc0.lo mheap.lo \ - mheapmap32.lo mheapmap64.lo msize.lo proc.lo thread.lo \ - $(am__objects_2) chan.lo iface.lo malloc.lo map.lo mprof.lo \ - reflect.lo sigqueue.lo string.lo + msize.lo proc.lo thread.lo $(am__objects_2) chan.lo iface.lo \ + malloc.lo map.lo mprof.lo reflect.lo sigqueue.lo string.lo am_libgo_la_OBJECTS = $(am__objects_3) libgo_la_OBJECTS = $(am_libgo_la_OBJECTS) DEFAULT_INCLUDES = -I.@am__isrc@ @@ -280,7 +281,7 @@ DATA = $(toolexeclibgo_DATA) $(toolexeclibgoarchive_DATA) \ $(toolexeclibgoio_DATA) $(toolexeclibgomime_DATA) \ $(toolexeclibgonet_DATA) $(toolexeclibgoos_DATA) \ $(toolexeclibgorpc_DATA) $(toolexeclibgoruntime_DATA) \ - $(toolexeclibgotesting_DATA) + $(toolexeclibgosync_DATA) $(toolexeclibgotesting_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ @@ -546,6 +547,7 @@ toolexeclibgo_DATA = \ bufio.gox \ bytes.gox \ cmath.gox \ + crypto.gox \ ebnf.gox \ exec.gox \ expvar.gox \ @@ -597,8 +599,10 @@ toolexeclibgoarchive_DATA = \ toolexeclibgocompressdir = $(toolexeclibgodir)/compress toolexeclibgocompress_DATA = \ + compress/bzip2.gox \ compress/flate.gox \ compress/gzip.gox \ + compress/lzw.gox \ compress/zlib.gox toolexeclibgocontainerdir = $(toolexeclibgodir)/container @@ -615,11 +619,13 @@ toolexeclibgocrypto_DATA = \ crypto/blowfish.gox \ crypto/cast5.gox \ crypto/cipher.gox \ + crypto/dsa.gox \ crypto/elliptic.gox \ crypto/hmac.gox \ crypto/md4.gox \ crypto/md5.gox \ crypto/ocsp.gox \ + crypto/openpgp.gox \ crypto/rand.gox \ crypto/rc4.gox \ crypto/ripemd160.gox \ @@ -637,6 +643,7 @@ toolexeclibgocryptoopenpgpdir = $(toolexeclibgocryptodir)/openpgp toolexeclibgocryptoopenpgp_DATA = \ crypto/openpgp/armor.gox \ crypto/openpgp/error.gox \ + crypto/openpgp/packet.gox \ crypto/openpgp/s2k.gox toolexeclibgodebugdir = $(toolexeclibgodir)/debug @@ -725,6 +732,10 @@ toolexeclibgoruntime_DATA = \ runtime/debug.gox \ runtime/pprof.gox +toolexeclibgosyncdir = $(toolexeclibgodir)/sync +toolexeclibgosync_DATA = \ + sync/atomic.gox + toolexeclibgotestingdir = $(toolexeclibgodir)/testing toolexeclibgotesting_DATA = \ testing/iotest.gox \ @@ -821,8 +832,6 @@ runtime_files = \ runtime/mfixalloc.c \ runtime/mgc0.c \ runtime/mheap.c \ - runtime/mheapmap32.c \ - runtime/mheapmap64.c \ runtime/msize.c \ runtime/proc.c \ runtime/thread.c \ @@ -874,6 +883,9 @@ go_cmath_files = \ go/cmath/sqrt.go \ go/cmath/tan.go +go_crypto_files = \ + go/crypto/crypto.go + go_ebnf_files = \ go/ebnf/ebnf.go \ go/ebnf/parser.go @@ -918,6 +930,7 @@ go_http_files = \ go/http/client.go \ go/http/dump.go \ go/http/fs.go \ + go/http/header.go \ go/http/lex.go \ go/http/persist.go \ go/http/request.go \ @@ -925,6 +938,7 @@ go_http_files = \ go/http/server.go \ go/http/status.go \ go/http/transfer.go \ + go/http/transport.go \ go/http/url.go go_image_files = \ @@ -1051,6 +1065,7 @@ go_os_files = \ go/os/env_unix.go \ go/os/error.go \ go/os/exec.go \ + go/os/exec_unix.go \ go/os/file.go \ go/os/file_unix.go \ go/os/getwd.go \ @@ -1096,8 +1111,6 @@ go_runtime_files = \ go/runtime/debug.go \ go/runtime/error.go \ go/runtime/extern.go \ - go/runtime/malloc_defs.go \ - go/runtime/runtime_defs.go \ go/runtime/sig.go \ go/runtime/softfloat64.go \ go/runtime/type.go \ @@ -1128,12 +1141,11 @@ go_strings_files = \ go/strings/strings.go go_sync_files = \ + go/sync/cond.go \ go/sync/mutex.go \ go/sync/once.go \ - go/sync/rwmutex.go - -go_sync_c_files = \ - go/sync/cas.c + go/sync/rwmutex.go \ + go/sync/waitgroup.go @LIBGO_IS_SOLARIS_FALSE@go_syslog_file = go/syslog/syslog_unix.go @LIBGO_IS_SOLARIS_TRUE@go_syslog_file = go/syslog/syslog_solaris.go @@ -1196,6 +1208,12 @@ go_archive_zip_files = \ go/archive/zip/reader.go \ go/archive/zip/struct.go +go_compress_bzip2_files = \ + go/compress/bzip2/bit_reader.go \ + go/compress/bzip2/bzip2.go \ + go/compress/bzip2/huffman.go \ + go/compress/bzip2/move_to_front.go + go_compress_flate_files = \ go/compress/flate/deflate.go \ go/compress/flate/huffman_bit_writer.go \ @@ -1209,6 +1227,10 @@ go_compress_gzip_files = \ go/compress/gzip/gzip.go \ go/compress/gzip/gunzip.go +go_compress_lzw_files = \ + go/compress/lzw/reader.go \ + go/compress/lzw/writer.go + go_compress_zlib_files = \ go/compress/zlib/reader.go \ go/compress/zlib/writer.go @@ -1261,6 +1283,9 @@ go_crypto_cipher_files = \ go/crypto/cipher/ocfb.go \ go/crypto/cipher/ofb.go +go_crypto_dsa_files = \ + go/crypto/dsa/dsa.go + go_crypto_elliptic_files = \ go/crypto/elliptic/elliptic.go @@ -1278,6 +1303,12 @@ go_crypto_md5_files = \ go_crypto_ocsp_files = \ go/crypto/ocsp/ocsp.go +go_crypto_openpgp_files = \ + go/crypto/openpgp/canonical_text.go \ + go/crypto/openpgp/keys.go \ + go/crypto/openpgp/read.go \ + go/crypto/openpgp/write.go + go_crypto_rand_files = \ go/crypto/rand/rand.go \ go/crypto/rand/rand_unix.go @@ -1338,6 +1369,20 @@ go_crypto_openpgp_armor_files = \ go_crypto_openpgp_error_files = \ go/crypto/openpgp/error/error.go +go_crypto_openpgp_packet_files = \ + go/crypto/openpgp/packet/compressed.go \ + go/crypto/openpgp/packet/encrypted_key.go \ + go/crypto/openpgp/packet/literal.go \ + go/crypto/openpgp/packet/one_pass_signature.go \ + go/crypto/openpgp/packet/packet.go \ + go/crypto/openpgp/packet/private_key.go \ + go/crypto/openpgp/packet/public_key.go \ + go/crypto/openpgp/packet/reader.go \ + go/crypto/openpgp/packet/signature.go \ + go/crypto/openpgp/packet/symmetric_key_encrypted.go \ + go/crypto/openpgp/packet/symmetrically_encrypted.go \ + go/crypto/openpgp/packet/userid.go + go_crypto_openpgp_s2k_files = \ go/crypto/openpgp/s2k/s2k.go @@ -1484,6 +1529,7 @@ go_net_dict_files = \ go/net/dict/dict.go go_net_textproto_files = \ + go/net/textproto/header.go \ go/net/textproto/pipeline.go \ go/net/textproto/reader.go \ go/net/textproto/textproto.go \ @@ -1506,6 +1552,12 @@ go_runtime_debug_files = \ go_runtime_pprof_files = \ go/runtime/pprof/pprof.go +go_sync_atomic_files = \ + go/sync/atomic/doc.go + +go_sync_atomic_c_files = \ + go/sync/atomic/atomic.c + go_testing_iotest_files = \ go/testing/iotest/logger.go \ go/testing/iotest/reader.go \ @@ -1606,6 +1658,7 @@ libgo_go_objs = \ bytes/bytes.lo \ bytes/index.lo \ cmath/cmath.lo \ + crypto/crypto.lo \ ebnf/ebnf.lo \ exec/exec.lo \ expvar/expvar.lo \ @@ -1636,8 +1689,7 @@ libgo_go_objs = \ sort/sort.lo \ strconv/strconv.lo \ strings/strings.lo \ - sync/mutex.lo \ - sync/cas.lo \ + sync/sync.lo \ syslog/syslog.lo \ syslog/syslog_c.lo \ tabwriter/tabwriter.lo \ @@ -1651,8 +1703,10 @@ libgo_go_objs = \ xml/xml.lo \ archive/tar.lo \ archive/zip.lo \ + compress/bzip2.lo \ compress/flate.lo \ compress/gzip.lo \ + compress/lzw.lo \ compress/zlib.lo \ container/heap.lo \ container/list.lo \ @@ -1663,11 +1717,13 @@ libgo_go_objs = \ crypto/blowfish.lo \ crypto/cast5.lo \ crypto/cipher.lo \ + crypto/dsa.lo \ crypto/elliptic.lo \ crypto/hmac.lo \ crypto/md4.lo \ crypto/md5.lo \ crypto/ocsp.lo \ + crypto/openpgp.lo \ crypto/rand.lo \ crypto/rc4.lo \ crypto/ripemd160.lo \ @@ -1682,6 +1738,7 @@ libgo_go_objs = \ crypto/xtea.lo \ crypto/openpgp/armor.lo \ crypto/openpgp/error.lo \ + crypto/openpgp/packet.lo \ crypto/openpgp/s2k.lo \ debug/dwarf.lo \ debug/elf.lo \ @@ -1723,6 +1780,8 @@ libgo_go_objs = \ rpc/jsonrpc.lo \ runtime/debug.lo \ runtime/pprof.lo \ + sync/atomic.lo \ + sync/atomic_c.lo \ syscalls/syscall.lo \ syscalls/errno.lo \ testing/testing.lo \ @@ -1857,8 +1916,10 @@ TEST_PACKAGES = \ xml/check \ archive/tar/check \ archive/zip/check \ + compress/bzip2/check \ compress/flate/check \ compress/gzip/check \ + compress/lzw/check \ compress/zlib/check \ container/heap/check \ container/list/check \ @@ -1869,11 +1930,13 @@ TEST_PACKAGES = \ crypto/blowfish/check \ crypto/cast5/check \ crypto/cipher/check \ + crypto/dsa/check \ crypto/elliptic/check \ crypto/hmac/check \ crypto/md4/check \ crypto/md5/check \ crypto/ocsp/check \ + crypto/openpgp/check \ crypto/rand/check \ crypto/rc4/check \ crypto/ripemd160/check \ @@ -1887,6 +1950,7 @@ TEST_PACKAGES = \ crypto/x509/check \ crypto/xtea/check \ crypto/openpgp/armor/check \ + crypto/openpgp/packet/check \ crypto/openpgp/s2k/check \ debug/dwarf/check \ debug/elf/check \ @@ -1919,6 +1983,7 @@ TEST_PACKAGES = \ $(os_inotify_check) \ os/signal/check \ rpc/jsonrpc/check \ + sync/atomic/check \ testing/quick/check \ testing/script/check @@ -2146,8 +2211,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mfixalloc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mgc0.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mheap.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mheapmap32.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mheapmap64.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mprof.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msize.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc.Plo@am__quote@ @@ -2794,20 +2857,6 @@ mheap.lo: runtime/mheap.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mheap.lo `test -f 'runtime/mheap.c' || echo '$(srcdir)/'`runtime/mheap.c -mheapmap32.lo: runtime/mheapmap32.c -@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mheapmap32.lo -MD -MP -MF $(DEPDIR)/mheapmap32.Tpo -c -o mheapmap32.lo `test -f 'runtime/mheapmap32.c' || echo '$(srcdir)/'`runtime/mheapmap32.c -@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/mheapmap32.Tpo $(DEPDIR)/mheapmap32.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/mheapmap32.c' object='mheapmap32.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mheapmap32.lo `test -f 'runtime/mheapmap32.c' || echo '$(srcdir)/'`runtime/mheapmap32.c - -mheapmap64.lo: runtime/mheapmap64.c -@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT mheapmap64.lo -MD -MP -MF $(DEPDIR)/mheapmap64.Tpo -c -o mheapmap64.lo `test -f 'runtime/mheapmap64.c' || echo '$(srcdir)/'`runtime/mheapmap64.c -@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/mheapmap64.Tpo $(DEPDIR)/mheapmap64.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='runtime/mheapmap64.c' object='mheapmap64.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o mheapmap64.lo `test -f 'runtime/mheapmap64.c' || echo '$(srcdir)/'`runtime/mheapmap64.c - msize.lo: runtime/msize.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT msize.lo -MD -MP -MF $(DEPDIR)/msize.Tpo -c -o msize.lo `test -f 'runtime/msize.c' || echo '$(srcdir)/'`runtime/msize.c @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/msize.Tpo $(DEPDIR)/msize.Plo @@ -3261,6 +3310,26 @@ uninstall-toolexeclibgoruntimeDATA: test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(toolexeclibgoruntimedir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(toolexeclibgoruntimedir)" && rm -f $$files +install-toolexeclibgosyncDATA: $(toolexeclibgosync_DATA) + @$(NORMAL_INSTALL) + test -z "$(toolexeclibgosyncdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgosyncdir)" + @list='$(toolexeclibgosync_DATA)'; test -n "$(toolexeclibgosyncdir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgosyncdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgosyncdir)" || exit $$?; \ + done + +uninstall-toolexeclibgosyncDATA: + @$(NORMAL_UNINSTALL) + @list='$(toolexeclibgosync_DATA)'; test -n "$(toolexeclibgosyncdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + test -n "$$files" || exit 0; \ + echo " ( cd '$(DESTDIR)$(toolexeclibgosyncdir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(toolexeclibgosyncdir)" && rm -f $$files install-toolexeclibgotestingDATA: $(toolexeclibgotesting_DATA) @$(NORMAL_INSTALL) test -z "$(toolexeclibgotestingdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgotestingdir)" @@ -3598,7 +3667,7 @@ all-am: Makefile $(LIBRARIES) $(LTLIBRARIES) all-multi $(DATA) \ config.h installdirs: installdirs-recursive installdirs-am: - for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohttpdir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgorpcdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgotestingdir)"; do \ + for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohttpdir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgorpcdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive @@ -3672,7 +3741,7 @@ install-exec-am: install-multi install-toolexeclibLIBRARIES \ install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \ install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \ install-toolexeclibgoosDATA install-toolexeclibgorpcDATA \ - install-toolexeclibgoruntimeDATA \ + install-toolexeclibgoruntimeDATA install-toolexeclibgosyncDATA \ install-toolexeclibgotestingDATA install-html: install-html-recursive @@ -3732,6 +3801,7 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \ uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \ uninstall-toolexeclibgoosDATA uninstall-toolexeclibgorpcDATA \ uninstall-toolexeclibgoruntimeDATA \ + uninstall-toolexeclibgosyncDATA \ uninstall-toolexeclibgotestingDATA .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all all-multi \ @@ -3766,7 +3836,7 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \ install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \ install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \ install-toolexeclibgoosDATA install-toolexeclibgorpcDATA \ - install-toolexeclibgoruntimeDATA \ + install-toolexeclibgoruntimeDATA install-toolexeclibgosyncDATA \ install-toolexeclibgotestingDATA installcheck installcheck-am \ installdirs installdirs-am maintainer-clean \ maintainer-clean-generic maintainer-clean-multi mostlyclean \ @@ -3790,6 +3860,7 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \ uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \ uninstall-toolexeclibgoosDATA uninstall-toolexeclibgorpcDATA \ uninstall-toolexeclibgoruntimeDATA \ + uninstall-toolexeclibgosyncDATA \ uninstall-toolexeclibgotestingDATA @@ -3872,6 +3943,12 @@ cmath/check: $(CHECK_DEPS) $(CHECK) .PHONY: cmath/check +crypto/crypto.lo: $(go_crypto_files) hash.gox + $(BUILDPACKAGE) +crypto/check: $(CHECK_DEPS) + $(CHECK) +.PHONY: crypto/check + ebnf/ebnf.lo: $(go_ebnf_files) container/vector.gox go/scanner.gox \ go/token.gox os.gox strconv.gox unicode.gox utf8.gox $(BUILDPACKAGE) @@ -3879,7 +3956,7 @@ ebnf/check: $(CHECK_DEPS) $(CHECK) .PHONY: ebnf/check -exec/exec.lo: $(go_exec_files) os.gox strings.gox +exec/exec.lo: $(go_exec_files) os.gox strconv.gox strings.gox $(BUILDPACKAGE) exec/check: $(CHECK_DEPS) $(CHECK) @@ -3898,8 +3975,8 @@ flag/check: $(CHECK_DEPS) $(CHECK) .PHONY: flag/check -fmt/fmt.lo: $(go_fmt_files) bytes.gox io.gox os.gox reflect.gox strconv.gox \ - strings.gox unicode.gox utf8.gox +fmt/fmt.lo: $(go_fmt_files) bytes.gox io.gox math.gox os.gox reflect.gox \ + strconv.gox strings.gox unicode.gox utf8.gox $(BUILDPACKAGE) fmt/check: $(CHECK_DEPS) $(CHECK) @@ -3926,10 +4003,10 @@ html/check: $(CHECK_DEPS) $(CHECK) .PHONY: html/check -http/http.lo: $(go_http_files) bufio.gox bytes.gox container/list.gox \ - container/vector.gox crypto/rand.gox crypto/tls.gox \ - encoding/base64.gox fmt.gox io.gox io/ioutil.gox log.gox \ - mime.gox mime/multipart.gox net.gox os.gox path.gox sort.gox \ +http/http.lo: $(go_http_files) bufio.gox bytes.gox container/vector.gox \ + crypto/rand.gox crypto/tls.gox encoding/base64.gox fmt.gox \ + io.gox io/ioutil.gox log.gox mime.gox mime/multipart.gox \ + net.gox net/textproto.gox os.gox path.gox sort.gox \ strconv.gox strings.gox sync.gox time.gox utf8.gox $(BUILDPACKAGE) http/check: $(CHECK_DEPS) @@ -3948,9 +4025,10 @@ io/check: $(CHECK_DEPS) $(CHECK) .PHONY: io/check -json/json.lo: $(go_json_files) bytes.gox container/vector.gox fmt.gox io.gox \ - math.gox os.gox reflect.gox runtime.gox strconv.gox \ - strings.gox unicode.gox utf16.gox utf8.gox +json/json.lo: $(go_json_files) bytes.gox container/vector.gox \ + encoding/base64.gox fmt.gox io.gox math.gox os.gox \ + reflect.gox runtime.gox strconv.gox strings.gox unicode.gox \ + utf16.gox utf8.gox $(BUILDPACKAGE) json/check: $(CHECK_DEPS) $(CHECK) @@ -3983,14 +4061,14 @@ net/check: $(CHECK_DEPS) $(CHECK) .PHONY: net/check -netchan/netchan.lo: $(go_netchan_files) gob.gox log.gox net.gox os.gox \ +netchan/netchan.lo: $(go_netchan_files) gob.gox io.gox log.gox net.gox os.gox \ reflect.gox strconv.gox sync.gox time.gox $(BUILDPACKAGE) netchan/check: $(CHECK_DEPS) $(CHECK) .PHONY: netchan/check -os/os.lo: $(go_os_files) sync.gox syscall.gox +os/os.lo: $(go_os_files) runtime.gox sync.gox syscall.gox $(BUILDPACKAGE) os/check: $(CHECK_DEPS) $(CHECK) @@ -4078,10 +4156,8 @@ strings/check: $(CHECK_DEPS) $(CHECK) .PHONY: strings/check -sync/mutex.lo: $(go_sync_files) runtime.gox +sync/sync.lo: $(go_sync_files) runtime.gox sync/atomic.gox $(BUILDPACKAGE) -sync/cas.lo: $(go_sync_c_files) sync/mutex.lo - $(LTCOMPILE) -c -o sync/cas.lo $(srcdir)/go/sync/cas.c sync/check: $(CHECK_DEPS) $(CHECK) .PHONY: sync/check @@ -4178,6 +4254,13 @@ archive/zip/check: $(CHECK_DEPS) $(CHECK) .PHONY: archive/zip/check +compress/bzip2.lo: $(go_compress_bzip2_files) bufio.gox io.gox os.gox sort.gox + $(BUILDPACKAGE) +compress/bzip2/check: $(CHECK_DEPS) + @$(MKDIR_P) compress/bzip2 + $(CHECK) +.PHONY: compress/bzip2/check + compress/flate.lo: $(go_compress_flate_files) bufio.gox io.gox math.gox \ os.gox sort.gox strconv.gox $(BUILDPACKAGE) @@ -4194,6 +4277,13 @@ compress/gzip/check: $(CHECK_DEPS) $(CHECK) .PHONY: compress/gzip/check +compress/lzw.lo: $(go_compress_lzw_files) bufio.gox fmt.gox io.gox os.gox + $(BUILDPACKAGE) +compress/lzw/check: $(CHECK_DEPS) + @$(MKDIR_P) compress/lzw + $(CHECK) +.PHONY: compress/lzw/check + compress/zlib.lo: $(go_compress_zlib_files) bufio.gox compress/flate.gox \ hash.gox hash/adler32.gox io.gox os.gox $(BUILDPACKAGE) @@ -4265,6 +4355,13 @@ crypto/cipher/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/cipher/check +crypto/dsa.lo: $(go_crypto_dsa_files) big.gox io.gox os.gox + $(BUILDPACKAGE) +crypto/dsa/check: $(CHECK_DEPS) + @$(MKDIR_P) crypto/dsa + $(CHECK) +.PHONY: crypto/dsa/check + crypto/elliptic.lo: $(go_crypto_elliptic_files) big.gox io.gox os.gox sync.gox $(BUILDPACKAGE) crypto/elliptic/check: $(CHECK_DEPS) @@ -4280,21 +4377,21 @@ crypto/hmac/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/hmac/check -crypto/md4.lo: $(go_crypto_md4_files) hash.gox os.gox +crypto/md4.lo: $(go_crypto_md4_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/md4/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/md4 $(CHECK) .PHONY: crypto/md4/check -crypto/md5.lo: $(go_crypto_md5_files) hash.gox os.gox +crypto/md5.lo: $(go_crypto_md5_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/md5/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/md5 $(CHECK) .PHONY: crypto/md5/check -crypto/ocsp.lo: $(go_crypto_ocsp_files) asn1.gox crypto/rsa.gox \ +crypto/ocsp.lo: $(go_crypto_ocsp_files) asn1.gox crypto.gox crypto/rsa.gox \ crypto/sha1.gox crypto/x509.gox os.gox time.gox $(BUILDPACKAGE) crypto/ocsp/check: $(CHECK_DEPS) @@ -4302,8 +4399,18 @@ crypto/ocsp/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/ocsp/check -crypto/rand.lo: $(go_crypto_rand_files) crypto/aes.gox io.gox os.gox sync.gox \ - time.gox +crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox \ + crypto/openpgp/armor.gox crypto/openpgp/error.gox \ + crypto/openpgp/packet.gox crypto/rsa.gox crypto/sha256.gox \ + hash.gox io.gox os.gox strconv.gox time.gox + $(BUILDPACKAGE) +crypto/openpgp/check: $(CHECK_DEPS) + @$(MKDIR_P) crypto/openpgp + $(CHECK) +.PHONY: crypto/openpgp/check + +crypto/rand.lo: $(go_crypto_rand_files) bufio.gox crypto/aes.gox io.gox \ + os.gox sync.gox time.gox $(BUILDPACKAGE) crypto/rand/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/rand @@ -4317,14 +4424,14 @@ crypto/rc4/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/rc4/check -crypto/ripemd160.lo: $(go_crypto_ripemd160_files) hash.gox os.gox +crypto/ripemd160.lo: $(go_crypto_ripemd160_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/ripemd160/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/ripemd160 $(CHECK) .PHONY: crypto/ripemd160/check -crypto/rsa.lo: $(go_crypto_rsa_files) big.gox crypto/sha1.gox \ +crypto/rsa.lo: $(go_crypto_rsa_files) big.gox crypto.gox crypto/sha1.gox \ crypto/subtle.gox encoding/hex.gox hash.gox io.gox os.gox $(BUILDPACKAGE) crypto/rsa/check: $(CHECK_DEPS) @@ -4332,21 +4439,21 @@ crypto/rsa/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/rsa/check -crypto/sha1.lo: $(go_crypto_sha1_files) hash.gox os.gox +crypto/sha1.lo: $(go_crypto_sha1_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/sha1/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/sha1 $(CHECK) .PHONY: crypto/sha1/check -crypto/sha256.lo: $(go_crypto_sha256_files) hash.gox os.gox +crypto/sha256.lo: $(go_crypto_sha256_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/sha256/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/sha256 $(CHECK) .PHONY: crypto/sha256/check -crypto/sha512.lo: $(go_crypto_sha512_files) hash.gox os.gox +crypto/sha512.lo: $(go_crypto_sha512_files) crypto.gox hash.gox os.gox $(BUILDPACKAGE) crypto/sha512/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/sha512 @@ -4361,7 +4468,7 @@ crypto/subtle/check: $(CHECK_DEPS) .PHONY: crypto/subtle/check crypto/tls.lo: $(go_crypto_tls_files) big.gox bufio.gox bytes.gox \ - container/list.gox crypto/aes.gox crypto/cipher.gox \ + container/list.gox crypto.gox crypto/aes.gox crypto/cipher.gox \ crypto/elliptic.gox crypto/hmac.gox crypto/md5.gox \ crypto/rc4.gox crypto/rand.gox crypto/rsa.gox crypto/sha1.gox \ crypto/subtle.gox crypto/rsa.gox crypto/sha1.gox \ @@ -4381,8 +4488,8 @@ crypto/twofish/check: $(CHECK_DEPS) .PHONY: crypto/twofish/check crypto/x509.lo: $(go_crypto_x509_files) asn1.gox big.gox container/vector.gox \ - crypto/rsa.gox crypto/sha1.gox hash.gox os.gox strings.gox \ - time.gox + crypto.gox crypto/rsa.gox crypto/sha1.gox hash.gox os.gox \ + strings.gox time.gox $(BUILDPACKAGE) crypto/x509/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/x509 @@ -4405,16 +4512,30 @@ crypto/openpgp/armor/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/openpgp/armor/check -crypto/openpgp/error.lo: $(go_crypto_openpgp_error_files) +crypto/openpgp/error.lo: $(go_crypto_openpgp_error_files) strconv.gox $(BUILDPACKAGE) crypto/openpgp/error/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/openpgp/error $(CHECK) .PHONY: crypto/openpgp/error/check -crypto/openpgp/s2k.lo: $(go_crypto_openpgp_s2k_files) crypto/md5.gox \ - crypto/openpgp/error.gox crypto/ripemd160.gox crypto/sha1.gox \ - crypto/sha256.gox crypto/sha512.gox hash.gox io.gox os.gox +crypto/openpgp/packet.lo: $(go_crypto_openpgp_packet_files) big.gox bytes.gox \ + compress/flate.gox compress/zlib.gox crypto.gox \ + crypto/aes.gox crypto/cast5.gox crypto/cipher.gox \ + crypto/openpgp/error.gox crypto/openpgp/s2k.gox \ + crypto/rand.gox crypto/rsa.gox crypto/sha1.gox \ + crypto/subtle.gox encoding/binary.gox hash.gox io.gox \ + io/ioutil.gox os.gox strconv.gox strings.gox + $(BUILDPACKAGE) +crypto/openpgp/packet/check: $(CHECK_DEPS) + @$(MKDIR_P) crypto/openpgp/packet + $(CHECK) +.PHONY: crypto/openpgp/packet/check + +crypto/openpgp/s2k.lo: $(go_crypto_openpgp_s2k_files) crypto.gox \ + crypto/md5.gox crypto/openpgp/error.gox crypto/ripemd160.gox \ + crypto/sha1.gox crypto/sha256.gox crypto/sha512.gox hash.gox \ + io.gox os.gox $(BUILDPACKAGE) crypto/openpgp/s2k/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/openpgp/s2k @@ -4733,6 +4854,15 @@ runtime/pprof/check: $(CHECK_DEPS) $(CHECK) .PHONY: runtime/pprof/check +sync/atomic.lo: $(go_sync_atomic_files) + $(BUILDPACKAGE) +sync/atomic_c.lo: $(go_sync_atomic_c_files) sync/atomic.lo + $(LTCOMPILE) -c -o $@ $(srcdir)/go/sync/atomic/atomic.c +sync/atomic/check: $(CHECK_DEPS) + @$(MKDIR_P) sync/atomic + $(CHECK) +.PHONY: sync/atomic/check + testing/iotest.lo: $(go_testing_iotest_files) io.gox log.gox os.gox $(BUILDPACKAGE) testing/iotest/check: $(CHECK_DEPS) @@ -4777,6 +4907,8 @@ bytes.gox: bytes/bytes.lo $(BUILDGOX) cmath.gox: cmath/cmath.lo $(BUILDGOX) +crypto.gox: crypto/crypto.lo + $(BUILDGOX) ebnf.gox: ebnf/ebnf.lo $(BUILDGOX) exec.gox: exec/exec.lo @@ -4837,7 +4969,7 @@ strconv.gox: strconv/strconv.lo $(BUILDGOX) strings.gox: strings/strings.lo $(BUILDGOX) -sync.gox: sync/mutex.lo +sync.gox: sync/sync.lo $(BUILDGOX) syslog.gox: syslog/syslog.lo $(BUILDGOX) @@ -4869,10 +5001,14 @@ archive/tar.gox: archive/tar.lo archive/zip.gox: archive/zip.lo $(BUILDGOX) +compress/bzip2.gox: compress/bzip2.lo + $(BUILDGOX) compress/flate.gox: compress/flate.lo $(BUILDGOX) compress/gzip.gox: compress/gzip.lo $(BUILDGOX) +compress/lzw.gox: compress/lzw.lo + $(BUILDGOX) compress/zlib.gox: compress/zlib.lo $(BUILDGOX) @@ -4895,6 +5031,8 @@ crypto/cast5.gox: crypto/cast5.lo $(BUILDGOX) crypto/cipher.gox: crypto/cipher.lo $(BUILDGOX) +crypto/dsa.gox: crypto/dsa.lo + $(BUILDGOX) crypto/elliptic.gox: crypto/elliptic.lo $(BUILDGOX) crypto/hmac.gox: crypto/hmac.lo @@ -4905,6 +5043,8 @@ crypto/md5.gox: crypto/md5.lo $(BUILDGOX) crypto/ocsp.gox: crypto/ocsp.lo $(BUILDGOX) +crypto/openpgp.gox: crypto/openpgp.lo + $(BUILDGOX) crypto/rand.gox: crypto/rand.lo $(BUILDGOX) crypto/rc4.gox: crypto/rc4.lo @@ -4934,6 +5074,8 @@ crypto/openpgp/armor.gox: crypto/openpgp/armor.lo $(BUILDGOX) crypto/openpgp/error.gox: crypto/openpgp/error.lo $(BUILDGOX) +crypto/openpgp/packet.gox: crypto/openpgp/packet.lo + $(BUILDGOX) crypto/openpgp/s2k.gox: crypto/openpgp/s2k.lo $(BUILDGOX) @@ -5031,6 +5173,9 @@ runtime/debug.gox: runtime/debug.lo runtime/pprof.gox: runtime/pprof.lo $(BUILDGOX) +sync/atomic.gox: sync/atomic.lo + $(BUILDGOX) + testing/iotest.gox: testing/iotest.lo $(BUILDGOX) testing/quick.gox: testing/quick.lo diff --git a/libgo/go/archive/zip/reader.go b/libgo/go/archive/zip/reader.go index 579ba160294..d8d9bba60bc 100644 --- a/libgo/go/archive/zip/reader.go +++ b/libgo/go/archive/zip/reader.go @@ -42,6 +42,10 @@ type File struct { bodyOffset int64 } +func (f *File) hasDataDescriptor() bool { + return f.Flags&0x8 != 0 +} + // OpenReader will open the Zip file specified by name and return a Reader. func OpenReader(name string) (*Reader, os.Error) { f, err := os.Open(name, os.O_RDONLY, 0644) @@ -93,7 +97,16 @@ func (f *File) Open() (rc io.ReadCloser, err os.Error) { return } } - r := io.NewSectionReader(f.zipr, off+f.bodyOffset, int64(f.CompressedSize)) + size := int64(f.CompressedSize) + if f.hasDataDescriptor() { + if size == 0 { + // permit SectionReader to see the rest of the file + size = f.zipsize - (off + f.bodyOffset) + } else { + size += dataDescriptorLen + } + } + r := io.NewSectionReader(f.zipr, off+f.bodyOffset, size) switch f.Method { case 0: // store (no compression) rc = nopCloser{r} @@ -103,7 +116,7 @@ func (f *File) Open() (rc io.ReadCloser, err os.Error) { err = UnsupportedMethod } if rc != nil { - rc = &checksumReader{rc, crc32.NewIEEE(), f.CRC32} + rc = &checksumReader{rc, crc32.NewIEEE(), f, r} } return } @@ -111,7 +124,8 @@ func (f *File) Open() (rc io.ReadCloser, err os.Error) { type checksumReader struct { rc io.ReadCloser hash hash.Hash32 - sum uint32 + f *File + zipr io.Reader // for reading the data descriptor } func (r *checksumReader) Read(b []byte) (n int, err os.Error) { @@ -120,7 +134,12 @@ func (r *checksumReader) Read(b []byte) (n int, err os.Error) { if err != os.EOF { return } - if r.hash.Sum32() != r.sum { + if r.f.hasDataDescriptor() { + if err = readDataDescriptor(r.zipr, r.f); err != nil { + return + } + } + if r.hash.Sum32() != r.f.CRC32 { err = ChecksumError } return @@ -205,6 +224,18 @@ func readDirectoryHeader(f *File, r io.Reader) (err os.Error) { return } +func readDataDescriptor(r io.Reader, f *File) (err os.Error) { + defer func() { + if rerr, ok := recover().(os.Error); ok { + err = rerr + } + }() + read(r, &f.CRC32) + read(r, &f.CompressedSize) + read(r, &f.UncompressedSize) + return +} + func readDirectoryEnd(r io.ReaderAt, size int64) (d *directoryEnd, err os.Error) { // look for directoryEndSignature in the last 1k, then in the last 65k var b []byte diff --git a/libgo/go/archive/zip/reader_test.go b/libgo/go/archive/zip/reader_test.go index 3c24f1467cf..72e8cccfd47 100644 --- a/libgo/go/archive/zip/reader_test.go +++ b/libgo/go/archive/zip/reader_test.go @@ -52,6 +52,15 @@ var tests = []ZipTest{ }, {Name: "readme.zip"}, {Name: "readme.notzip", Error: FormatError}, + { + Name: "dd.zip", + File: []ZipTestFile{ + { + Name: "filename", + Content: []byte("This is a test textfile.\n"), + }, + }, + }, } func TestReader(t *testing.T) { @@ -102,16 +111,18 @@ func readTestZip(t *testing.T, zt ZipTest) { } // test invalid checksum - z.File[0].CRC32++ // invalidate - r, err := z.File[0].Open() - if err != nil { - t.Error(err) - return - } - var b bytes.Buffer - _, err = io.Copy(&b, r) - if err != ChecksumError { - t.Errorf("%s: copy error=%v, want %v", z.File[0].Name, err, ChecksumError) + if !z.File[0].hasDataDescriptor() { // skip test when crc32 in dd + z.File[0].CRC32++ // invalidate + r, err := z.File[0].Open() + if err != nil { + t.Error(err) + return + } + var b bytes.Buffer + _, err = io.Copy(&b, r) + if err != ChecksumError { + t.Errorf("%s: copy error=%v, want %v", z.File[0].Name, err, ChecksumError) + } } } diff --git a/libgo/go/archive/zip/struct.go b/libgo/go/archive/zip/struct.go index 8a8c727d474..bfe0aae2e9a 100644 --- a/libgo/go/archive/zip/struct.go +++ b/libgo/go/archive/zip/struct.go @@ -4,6 +4,7 @@ const ( fileHeaderSignature = 0x04034b50 directoryHeaderSignature = 0x02014b50 directoryEndSignature = 0x06054b50 + dataDescriptorLen = 12 ) type FileHeader struct { diff --git a/libgo/go/asn1/marshal.go b/libgo/go/asn1/marshal.go index 24548714b21..57b8f20ba7f 100644 --- a/libgo/go/asn1/marshal.go +++ b/libgo/go/asn1/marshal.go @@ -317,7 +317,7 @@ func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameter switch v := value.(type) { case *reflect.BoolValue: if v.Get() { - return out.WriteByte(1) + return out.WriteByte(255) } else { return out.WriteByte(0) } diff --git a/libgo/go/bufio/bufio.go b/libgo/go/bufio/bufio.go index c13456a6326..eae5c5ce975 100644 --- a/libgo/go/bufio/bufio.go +++ b/libgo/go/bufio/bufio.go @@ -286,7 +286,8 @@ func (b *Reader) ReadSlice(delim byte) (line []byte, err os.Error) { // returning a slice containing the data up to and including the delimiter. // If ReadBytes encounters an error before finding a delimiter, // it returns the data read before the error and the error itself (often os.EOF). -// ReadBytes returns err != nil if and only if line does not end in delim. +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. func (b *Reader) ReadBytes(delim byte) (line []byte, err os.Error) { // Use ReadSlice to look for array, // accumulating full buffers. @@ -332,7 +333,8 @@ func (b *Reader) ReadBytes(delim byte) (line []byte, err os.Error) { // returning a string containing the data up to and including the delimiter. // If ReadString encounters an error before finding a delimiter, // it returns the data read before the error and the error itself (often os.EOF). -// ReadString returns err != nil if and only if line does not end in delim. +// ReadString returns err != nil if and only if the returned data does not end in +// delim. func (b *Reader) ReadString(delim byte) (line string, err os.Error) { bytes, e := b.ReadBytes(delim) return string(bytes), e @@ -383,6 +385,9 @@ func (b *Writer) Flush() os.Error { if b.err != nil { return b.err } + if b.n == 0 { + return nil + } n, e := b.wr.Write(b.buf[0:b.n]) if n < b.n && e == nil { e = io.ErrShortWrite diff --git a/libgo/go/bytes/buffer.go b/libgo/go/bytes/buffer.go index 62cf82810e7..1acd4e05cae 100644 --- a/libgo/go/bytes/buffer.go +++ b/libgo/go/bytes/buffer.go @@ -154,17 +154,20 @@ func (b *Buffer) ReadFrom(r io.Reader) (n int64, err os.Error) { } // WriteTo writes data to w until the buffer is drained or an error -// occurs. The return value n is the number of bytes written. +// occurs. The return value n is the number of bytes written; it always +// fits into an int, but it is int64 to match the io.WriterTo interface. // Any error encountered during the write is also returned. func (b *Buffer) WriteTo(w io.Writer) (n int64, err os.Error) { b.lastRead = opInvalid - for b.off < len(b.buf) { + if b.off < len(b.buf) { m, e := w.Write(b.buf[b.off:]) - n += int64(m) b.off += m + n = int64(m) if e != nil { return n, e } + // otherwise all bytes were written, by definition of + // Write method in io.Writer } // Buffer is now empty; reset. b.Truncate(0) @@ -301,6 +304,36 @@ func (b *Buffer) UnreadByte() os.Error { return nil } +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often os.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err os.Error) { + i := IndexByte(b.buf[b.off:], delim) + size := i + 1 + if i < 0 { + size = len(b.buf) - b.off + err = os.EOF + } + line = make([]byte, size) + copy(line, b.buf[b.off:]) + b.off += size + return +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often os.EOF). +// ReadString returns err != nil if and only if the returned data does not end +// in delim. +func (b *Buffer) ReadString(delim byte) (line string, err os.Error) { + bytes, err := b.ReadBytes(delim) + return string(bytes), err +} + // NewBuffer creates and initializes a new Buffer using buf as its initial // contents. It is intended to prepare a Buffer to read existing data. It // can also be used to size the internal buffer for writing. To do that, diff --git a/libgo/go/bytes/buffer_test.go b/libgo/go/bytes/buffer_test.go index 509793d24a8..56a2d927539 100644 --- a/libgo/go/bytes/buffer_test.go +++ b/libgo/go/bytes/buffer_test.go @@ -6,6 +6,7 @@ package bytes_test import ( . "bytes" + "os" "rand" "testing" "utf8" @@ -238,7 +239,7 @@ func TestMixedReadsAndWrites(t *testing.T) { func TestNil(t *testing.T) { var b *Buffer if b.String() != "" { - t.Errorf("expcted ; got %q", b.String()) + t.Errorf("expected ; got %q", b.String()) } } @@ -347,3 +348,38 @@ func TestNext(t *testing.T) { } } } + +var readBytesTests = []struct { + buffer string + delim byte + expected []string + err os.Error +}{ + {"", 0, []string{""}, os.EOF}, + {"a\x00", 0, []string{"a\x00"}, nil}, + {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, + {"hello\x01world", 1, []string{"hello\x01"}, nil}, + {"foo\nbar", 0, []string{"foo\nbar"}, os.EOF}, + {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, + {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, os.EOF}, +} + +func TestReadBytes(t *testing.T) { + for _, test := range readBytesTests { + buf := NewBufferString(test.buffer) + var err os.Error + for _, expected := range test.expected { + var bytes []byte + bytes, err = buf.ReadBytes(test.delim) + if string(bytes) != expected { + t.Errorf("expected %q, got %q", expected, bytes) + } + if err != nil { + break + } + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + } +} diff --git a/libgo/go/compress/bzip2/bit_reader.go b/libgo/go/compress/bzip2/bit_reader.go new file mode 100644 index 00000000000..50f0ec836b4 --- /dev/null +++ b/libgo/go/compress/bzip2/bit_reader.go @@ -0,0 +1,88 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bzip2 + +import ( + "bufio" + "io" + "os" +) + +// bitReader wraps an io.Reader and provides the ability to read values, +// bit-by-bit, from it. Its Read* methods don't return the usual os.Error +// because the error handling was verbose. Instead, any error is kept and can +// be checked afterwards. +type bitReader struct { + r byteReader + n uint64 + bits uint + err os.Error +} + +// bitReader needs to read bytes from an io.Reader. We attempt to cast the +// given io.Reader to this interface and, if it doesn't already fit, we wrap in +// a bufio.Reader. +type byteReader interface { + ReadByte() (byte, os.Error) +} + +func newBitReader(r io.Reader) bitReader { + byter, ok := r.(byteReader) + if !ok { + byter = bufio.NewReader(r) + } + return bitReader{r: byter} +} + +// ReadBits64 reads the given number of bits and returns them in the +// least-significant part of a uint64. In the event of an error, it returns 0 +// and the error can be obtained by calling Error(). +func (br *bitReader) ReadBits64(bits uint) (n uint64) { + for bits > br.bits { + b, err := br.r.ReadByte() + if err == os.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + br.err = err + return 0 + } + br.n <<= 8 + br.n |= uint64(b) + br.bits += 8 + } + + // br.n looks like this (assuming that br.bits = 14 and bits = 6): + // Bit: 111111 + // 5432109876543210 + // + // (6 bits, the desired output) + // |-----| + // V V + // 0101101101001110 + // ^ ^ + // |------------| + // br.bits (num valid bits) + // + // This the next line right shifts the desired bits into the + // least-significant places and masks off anything above. + n = (br.n >> (br.bits - bits)) & ((1 << bits) - 1) + br.bits -= bits + return +} + +func (br *bitReader) ReadBits(bits uint) (n int) { + n64 := br.ReadBits64(bits) + return int(n64) +} + +func (br *bitReader) ReadBit() bool { + n := br.ReadBits(1) + return n != 0 +} + +func (br *bitReader) Error() os.Error { + return br.err +} diff --git a/libgo/go/compress/bzip2/bzip2.go b/libgo/go/compress/bzip2/bzip2.go new file mode 100644 index 00000000000..9e97edec175 --- /dev/null +++ b/libgo/go/compress/bzip2/bzip2.go @@ -0,0 +1,390 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bzip2 implements bzip2 decompression. +package bzip2 + +import ( + "io" + "os" +) + +// There's no RFC for bzip2. I used the Wikipedia page for reference and a lot +// of guessing: http://en.wikipedia.org/wiki/Bzip2 +// The source code to pyflate was useful for debugging: +// http://www.paul.sladen.org/projects/pyflate + +// A StructuralError is returned when the bzip2 data is found to be +// syntactically invalid. +type StructuralError string + +func (s StructuralError) String() string { + return "bzip2 data invalid: " + string(s) +} + +// A reader decompresses bzip2 compressed data. +type reader struct { + br bitReader + setupDone bool // true if we have parsed the bzip2 header. + blockSize int // blockSize in bytes, i.e. 900 * 1024. + eof bool + buf []byte // stores Burrows-Wheeler transformed data. + c [256]uint // the `C' array for the inverse BWT. + tt []uint32 // mirrors the `tt' array in the bzip2 source and contains the P array in the upper 24 bits. + tPos uint32 // Index of the next output byte in tt. + + preRLE []uint32 // contains the RLE data still to be processed. + preRLEUsed int // number of entries of preRLE used. + lastByte int // the last byte value seen. + byteRepeats uint // the number of repeats of lastByte seen. + repeats uint // the number of copies of lastByte to output. +} + +// NewReader returns an io.Reader which decompresses bzip2 data from r. +func NewReader(r io.Reader) io.Reader { + bz2 := new(reader) + bz2.br = newBitReader(r) + return bz2 +} + +const bzip2FileMagic = 0x425a // "BZ" +const bzip2BlockMagic = 0x314159265359 +const bzip2FinalMagic = 0x177245385090 + +// setup parses the bzip2 header. +func (bz2 *reader) setup() os.Error { + br := &bz2.br + + magic := br.ReadBits(16) + if magic != bzip2FileMagic { + return StructuralError("bad magic value") + } + + t := br.ReadBits(8) + if t != 'h' { + return StructuralError("non-Huffman entropy encoding") + } + + level := br.ReadBits(8) + if level < '1' || level > '9' { + return StructuralError("invalid compression level") + } + + bz2.blockSize = 100 * 1024 * (int(level) - '0') + bz2.tt = make([]uint32, bz2.blockSize) + return nil +} + +func (bz2 *reader) Read(buf []byte) (n int, err os.Error) { + if bz2.eof { + return 0, os.EOF + } + + if !bz2.setupDone { + err = bz2.setup() + brErr := bz2.br.Error() + if brErr != nil { + err = brErr + } + if err != nil { + return 0, err + } + bz2.setupDone = true + } + + n, err = bz2.read(buf) + brErr := bz2.br.Error() + if brErr != nil { + err = brErr + } + return +} + +func (bz2 *reader) read(buf []byte) (n int, err os.Error) { + // bzip2 is a block based compressor, except that it has a run-length + // preprocessing step. The block based nature means that we can + // preallocate fixed-size buffers and reuse them. However, the RLE + // preprocessing would require allocating huge buffers to store the + // maximum expansion. Thus we process blocks all at once, except for + // the RLE which we decompress as required. + + for (bz2.repeats > 0 || bz2.preRLEUsed < len(bz2.preRLE)) && n < len(buf) { + // We have RLE data pending. + + // The run-length encoding works like this: + // Any sequence of four equal bytes is followed by a length + // byte which contains the number of repeats of that byte to + // include. (The number of repeats can be zero.) Because we are + // decompressing on-demand our state is kept in the reader + // object. + + if bz2.repeats > 0 { + buf[n] = byte(bz2.lastByte) + n++ + bz2.repeats-- + if bz2.repeats == 0 { + bz2.lastByte = -1 + } + continue + } + + bz2.tPos = bz2.preRLE[bz2.tPos] + b := byte(bz2.tPos) + bz2.tPos >>= 8 + bz2.preRLEUsed++ + + if bz2.byteRepeats == 3 { + bz2.repeats = uint(b) + bz2.byteRepeats = 0 + continue + } + + if bz2.lastByte == int(b) { + bz2.byteRepeats++ + } else { + bz2.byteRepeats = 0 + } + bz2.lastByte = int(b) + + buf[n] = b + n++ + } + + if n > 0 { + return + } + + // No RLE data is pending so we need to read a block. + + br := &bz2.br + magic := br.ReadBits64(48) + if magic == bzip2FinalMagic { + br.ReadBits64(32) // ignored CRC + bz2.eof = true + return 0, os.EOF + } else if magic != bzip2BlockMagic { + return 0, StructuralError("bad magic value found") + } + + err = bz2.readBlock() + if err != nil { + return 0, err + } + + return bz2.read(buf) +} + +// readBlock reads a bzip2 block. The magic number should already have been consumed. +func (bz2 *reader) readBlock() (err os.Error) { + br := &bz2.br + br.ReadBits64(32) // skip checksum. TODO: check it if we can figure out what it is. + randomized := br.ReadBits(1) + if randomized != 0 { + return StructuralError("deprecated randomized files") + } + origPtr := uint(br.ReadBits(24)) + + // If not every byte value is used in the block (i.e., it's text) then + // the symbol set is reduced. The symbols used are stored as a + // two-level, 16x16 bitmap. + symbolRangeUsedBitmap := br.ReadBits(16) + symbolPresent := make([]bool, 256) + numSymbols := 0 + for symRange := uint(0); symRange < 16; symRange++ { + if symbolRangeUsedBitmap&(1<<(15-symRange)) != 0 { + bits := br.ReadBits(16) + for symbol := uint(0); symbol < 16; symbol++ { + if bits&(1<<(15-symbol)) != 0 { + symbolPresent[16*symRange+symbol] = true + numSymbols++ + } + } + } + } + + // A block uses between two and six different Huffman trees. + numHuffmanTrees := br.ReadBits(3) + if numHuffmanTrees < 2 || numHuffmanTrees > 6 { + return StructuralError("invalid number of Huffman trees") + } + + // The Huffman tree can switch every 50 symbols so there's a list of + // tree indexes telling us which tree to use for each 50 symbol block. + numSelectors := br.ReadBits(15) + treeIndexes := make([]uint8, numSelectors) + + // The tree indexes are move-to-front transformed and stored as unary + // numbers. + mtfTreeDecoder := newMTFDecoderWithRange(numHuffmanTrees) + for i := range treeIndexes { + c := 0 + for { + inc := br.ReadBits(1) + if inc == 0 { + break + } + c++ + } + if c >= numHuffmanTrees { + return StructuralError("tree index too large") + } + treeIndexes[i] = uint8(mtfTreeDecoder.Decode(c)) + } + + // The list of symbols for the move-to-front transform is taken from + // the previously decoded symbol bitmap. + symbols := make([]byte, numSymbols) + nextSymbol := 0 + for i := 0; i < 256; i++ { + if symbolPresent[i] { + symbols[nextSymbol] = byte(i) + nextSymbol++ + } + } + mtf := newMTFDecoder(symbols) + + numSymbols += 2 // to account for RUNA and RUNB symbols + huffmanTrees := make([]huffmanTree, numHuffmanTrees) + + // Now we decode the arrays of code-lengths for each tree. + lengths := make([]uint8, numSymbols) + for i := 0; i < numHuffmanTrees; i++ { + // The code lengths are delta encoded from a 5-bit base value. + length := br.ReadBits(5) + for j := 0; j < numSymbols; j++ { + for { + if !br.ReadBit() { + break + } + if br.ReadBit() { + length-- + } else { + length++ + } + } + if length < 0 || length > 20 { + return StructuralError("Huffman length out of range") + } + lengths[j] = uint8(length) + } + huffmanTrees[i], err = newHuffmanTree(lengths) + if err != nil { + return err + } + } + + selectorIndex := 1 // the next tree index to use + currentHuffmanTree := huffmanTrees[treeIndexes[0]] + bufIndex := 0 // indexes bz2.buf, the output buffer. + // The output of the move-to-front transform is run-length encoded and + // we merge the decoding into the Huffman parsing loop. These two + // variables accumulate the repeat count. See the Wikipedia page for + // details. + repeat := 0 + repeat_power := 0 + + // The `C' array (used by the inverse BWT) needs to be zero initialised. + for i := range bz2.c { + bz2.c[i] = 0 + } + + decoded := 0 // counts the number of symbols decoded by the current tree. + for { + if decoded == 50 { + currentHuffmanTree = huffmanTrees[treeIndexes[selectorIndex]] + selectorIndex++ + decoded = 0 + } + + v := currentHuffmanTree.Decode(br) + decoded++ + + if v < 2 { + // This is either the RUNA or RUNB symbol. + if repeat == 0 { + repeat_power = 1 + } + repeat += repeat_power << v + repeat_power <<= 1 + + // This limit of 2 million comes from the bzip2 source + // code. It prevents repeat from overflowing. + if repeat > 2*1024*1024 { + return StructuralError("repeat count too large") + } + continue + } + + if repeat > 0 { + // We have decoded a complete run-length so we need to + // replicate the last output symbol. + for i := 0; i < repeat; i++ { + b := byte(mtf.First()) + bz2.tt[bufIndex] = uint32(b) + bz2.c[b]++ + bufIndex++ + } + repeat = 0 + } + + if int(v) == numSymbols-1 { + // This is the EOF symbol. Because it's always at the + // end of the move-to-front list, and nevers gets moved + // to the front, it has this unique value. + break + } + + // Since two metasymbols (RUNA and RUNB) have values 0 and 1, + // one would expect |v-2| to be passed to the MTF decoder. + // However, the front of the MTF list is never referenced as 0, + // it's always referenced with a run-length of 1. Thus 0 + // doesn't need to be encoded and we have |v-1| in the next + // line. + b := byte(mtf.Decode(int(v - 1))) + bz2.tt[bufIndex] = uint32(b) + bz2.c[b]++ + bufIndex++ + } + + if origPtr >= uint(bufIndex) { + return StructuralError("origPtr out of bounds") + } + + // We have completed the entropy decoding. Now we can perform the + // inverse BWT and setup the RLE buffer. + bz2.preRLE = bz2.tt[:bufIndex] + bz2.preRLEUsed = 0 + bz2.tPos = inverseBWT(bz2.preRLE, origPtr, bz2.c[:]) + bz2.lastByte = -1 + bz2.byteRepeats = 0 + bz2.repeats = 0 + + return nil +} + +// inverseBWT implements the inverse Burrows-Wheeler transform as described in +// http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf, section 4.2. +// In that document, origPtr is called `I' and c is the `C' array after the +// first pass over the data. It's an argument here because we merge the first +// pass with the Huffman decoding. +// +// This also implements the `single array' method from the bzip2 source code +// which leaves the output, still shuffled, in the bottom 8 bits of tt with the +// index of the next byte in the top 24-bits. The index of the first byte is +// returned. +func inverseBWT(tt []uint32, origPtr uint, c []uint) uint32 { + sum := uint(0) + for i := 0; i < 256; i++ { + sum += c[i] + c[i] = sum - c[i] + } + + for i := range tt { + b := tt[i] & 0xff + tt[c[b]] |= uint32(i) << 8 + c[b]++ + } + + return tt[origPtr] >> 8 +} diff --git a/libgo/go/compress/bzip2/bzip2_test.go b/libgo/go/compress/bzip2/bzip2_test.go new file mode 100644 index 00000000000..156eea83ff2 --- /dev/null +++ b/libgo/go/compress/bzip2/bzip2_test.go @@ -0,0 +1,158 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bzip2 + +import ( + "bytes" + "encoding/hex" + "io" + "io/ioutil" + "os" + "testing" +) + +func TestBitReader(t *testing.T) { + buf := bytes.NewBuffer([]byte{0xaa}) + br := newBitReader(buf) + if n := br.ReadBits(1); n != 1 { + t.Errorf("read 1 wrong") + } + if n := br.ReadBits(1); n != 0 { + t.Errorf("read 2 wrong") + } + if n := br.ReadBits(1); n != 1 { + t.Errorf("read 3 wrong") + } + if n := br.ReadBits(1); n != 0 { + t.Errorf("read 4 wrong") + } +} + +func TestBitReaderLarge(t *testing.T) { + buf := bytes.NewBuffer([]byte{0x12, 0x34, 0x56, 0x78}) + br := newBitReader(buf) + if n := br.ReadBits(32); n != 0x12345678 { + t.Errorf("got: %x want: %x", n, 0x12345678) + } +} + +func readerFromHex(s string) io.Reader { + data, err := hex.DecodeString(s) + if err != nil { + panic("readerFromHex: bad input") + } + return bytes.NewBuffer(data) +} + +func decompressHex(s string) (out []byte, err os.Error) { + r := NewReader(readerFromHex(s)) + return ioutil.ReadAll(r) +} + +func TestHelloWorldBZ2(t *testing.T) { + out, err := decompressHex(helloWorldBZ2Hex) + if err != nil { + t.Errorf("error from Read: %s", err) + return + } + + if !bytes.Equal(helloWorld, out) { + t.Errorf("got %x, want %x", out, helloWorld) + } +} + +func testZeros(t *testing.T, inHex string, n int) { + out, err := decompressHex(inHex) + if err != nil { + t.Errorf("error from Read: %s", err) + return + } + + expected := make([]byte, n) + + if !bytes.Equal(expected, out) { + allZeros := true + for _, b := range out { + if b != 0 { + allZeros = false + break + } + } + t.Errorf("incorrect result, got %d bytes (allZeros: %t)", len(out), allZeros) + } +} + +func Test32Zeros(t *testing.T) { + testZeros(t, thirtyTwoZerosBZ2Hex, 32) +} + +func Test1MBZeros(t *testing.T) { + testZeros(t, oneMBZerosBZ2Hex, 1024*1024) +} + +func testRandomData(t *testing.T, compressedHex, uncompressedHex string) { + out, err := decompressHex(compressedHex) + if err != nil { + t.Errorf("error from Read: %s", err) + return + } + + expected, _ := hex.DecodeString(uncompressedHex) + + if !bytes.Equal(out, expected) { + t.Errorf("incorrect result\ngot: %x\nwant: %x", out, expected) + } +} + +func TestRandomData1(t *testing.T) { + testRandomData(t, randBZ2Hex, randHex) +} + +func TestRandomData2(t *testing.T) { + // This test involves several repeated bytes in the output, but they + // should trigger RLE decoding. + testRandomData(t, rand2BZ2Hex, rand2Hex) +} + +func TestRandomData3(t *testing.T) { + // This test uses the full range of symbols. + testRandomData(t, rand3BZ2Hex, rand3Hex) +} + +func Test1MBSawtooth(t *testing.T) { + out, err := decompressHex(oneMBSawtoothBZ2Hex) + if err != nil { + t.Errorf("error from Read: %s", err) + return + } + + expected := make([]byte, 1024*1024) + + for i := range expected { + expected[i] = byte(i) + } + + if !bytes.Equal(out, expected) { + t.Error("incorrect result") + } +} + +const helloWorldBZ2Hex = "425a68393141592653594eece83600000251800010400006449080200031064c4101a7a9a580bb9431f8bb9229c28482776741b0" + +var helloWorld = []byte("hello world\n") + +const thirtyTwoZerosBZ2Hex = "425a6839314159265359b5aa5098000000600040000004200021008283177245385090b5aa5098" +const oneMBZerosBZ2Hex = "425a683931415926535938571ce50008084000c0040008200030cc0529a60806c4201e2ee48a70a12070ae39ca" + +const randBZ2Hex = "425a6839314159265359905d990d0001957fffffffffffafffffffffffffffffbfff6fffdfffffffffffffffffffffffffffffc002b6dd75676ed5b77720098320d11a64626981323d4da47a83131a13d09e8040f534cd4f4d27a464d193008cd09804601347a980026350c9886234d36864193d1351b44c136919e90340d26127a4cd264c32023009898981310c0344c340027a8303427a99a04c00003534c230d034f5006468d268cf54d36a3009a69a62626261311b40026013d34201a6934c9a604c98ca6c8460989fa9346234d30d3469a2604fd4131a7aa6d0046043d4c62098479269e89e835190d018d4c046001a11e801a0264792321932308c43a130688c260d46686804cd01a9e80981193684c6a68c00000004c4c20c04627a4c0000260003400d04c0681a01334026009a6f48041466132581ec5212b081d96b0effc16543e2228b052fcd30f2567ee8d970e0f10aabca68dd8270591c376cfc1baae0dba00aaff2d6caf6b211322c997cc18eaee5927f75185336bf907021324c71626c1dd20e22b9b0977f05d0f901eaa51db9fbaf7c603b4c87bc82890e6dd7e61d0079e27ec050dd788fd958152061cd01e222f9547cb9efc465d775b6fc98bac7d387bffd151ae09dadf19494f7a638e2eae58e550faba5fe6820ea520eb986096de4e527d80def3ba625e71fbefdcf7e7844e0a25d29b52dcd1344fca083737d42692aab38d230485f3c8ed54c2ed31f15cf0270c8143765b10b92157233fa1dfe0d7ce8ffe70b8b8f7250071701dfe9f1c94de362c9031455951c93eb098a6b50ee45c6131fefc3b6f9643e21f4adc59497138e246f5c57d834aa67c4f10d8bd8b3908d8130dd7388409c299a268eab3664fa4907c5c31574874bd8d388a4ab22b339660804e53e1b8d05867d40e3082560608d35d5d2c6054e8bab23da28f61f83efd41d25529ad6ea15fb50505cacfabb0902166427354ca3830a2c8415f21b19e592690fbe447020d685a4bcd16ecc4ff1a1c0e572627d0ef6265c008a43fc243240541061ed7840606be466d1c0dac2c53250ed567507d926c844154560d631960c65e15157829b2c7f16859f111a3a8cb72bf24ffa57a680c3be67b1be67c8dd8aea73ac2437a78df5b686d427080ebc01bd30b71a49f6ea31dc0f08e4849e38face96717690239538bc08b6cc5aa8d467cb9c36aa83d40ac7e58bddbfa185b22065e89a86c0145569d9e23726651aec49e31588d70f40fe9a4449dcf4f89eac220171e9c938e803dc195679651004b79ad33cc0c13aeeba5941b33ffeeb8fbe16e76c7811445c67b4269c90479433ddf9e8ed1d00c166b6c17217fb22c3ef1b0c1c7e28e185446a111c37f1ea6c07a59fbcc6546ecc6968d36ba58bc5489a5640647e426b0c39350cb6f07d5dc7a717648c4ec7f841467597ae1f65f408fd2d9940a4b1b860b3c9ae351dcae0b4425f7e8538710f2e40b7f70d13b51ac05ccc6ecda8264a88cad2d721d18132a9b9110a9e759c2483c77dcefc7e464ec88588174cb0c9abff93230ea0bed8decdd8ed8bfe2b5df0a253803678df04fab44c03b9ab7cc97d6e6d6fd0c4c840ce0efc498436f453bbb181603459471f2b588724592b222ec990614db530e10cadd84705621cfdd9261fa44a5f5806a2d74b575056b3c915255c65678f9c16e6dc00a99180fef1a840aff0e842ac02731080cc92782538360a60a727991013984da4fad95f79d5030677b7528d076b2483685fca4429edf804682fdc110dfc2f7c30e23e20a72e039108a0ad6fdee2f76985a4b4be4f5afc6101bf9d5042b657a05dc914e1424241766434" +const randHex = "c95138082bdf2b9bfa5b1072b23f729735d42c785eeb94320fb14c265b9c2ca421d01a3db986df1ac2acde5a0e6bf955d6f95e61261540905928e195f1a66644cc7f37281744fff4dc6df35566a494c41a8167151950eb74f5fc45f85ad0e5ed28b49adfe218aa7ec1707e8e1d55825f61f72beda3b4c006b8c9188d7336a5d875329b1b58c27cc4e89ecbae02c7712400c39dd131d2c6de82e2863da51d472bdfb21ecce62cc9cf769ed28aedc7583d755da45a0d90874bda269dd53283a9bdfd05f95fc8e9a304bb338ea1a2111894678c18134f17d31a15d9bfc1237894650f3e715e2548639ecbddb845cfe4a46a7b3a3c540f48629488e8c869f1e9f3f4c552243a8105b20eb8e264994214349dae83b165fd6c2a5b8e83fce09fc0a80d3281c8d53a9a08095bd19cbc1388df23975646ed259e003d39261ee68cbece8bcf32971f7fe7e588e8ba8f5e8597909abaea693836a79a1964050ed910a45a0f13a58cd2d3ae18992c5b23082407fd920d0bf01e33118a017bb5e39f44931346845af52128f7965206759433a346034ea481671f501280067567619f5ecef6cded077f92ed7f3b3ce8e308c80f34ba06939e9303f91b4318c8c1dd4cc223c1f057ac0c91211c629cd30e46ee9ec1d9fd493086b7bc2bc83e33f08749a5d430b0ed4f79d70f481940c9b0930b16321886a0df4fa5a1465d5208c7d3494a7987d9a5e42aa256f0c9523947f8318d0ef0af3d59a45cfc2418d0785c9a548b32b81e7de18be7d55a69a4c156bbb3d7579c0ac8e9c72b24646e54b0d0e8725f8f49fb44ae3c6b9d0287be118586255a90a4a83483ed0328518037e52aa959c5748ed83e13023e532306be98b8288da306bbb040bcf5d92176f84a9306dc6b274b040370b61d71fde58dd6d20e6fee348eae0c54bd0a5a487b2d005f329794f2a902c296af0a4c1f638f63292a1fa18e006c1b1838636f4de71c73635b25660d32e88a0917e1a5677f6a02ca65585b82cbd99fb4badbfa97a585da1e6cadf6737b4ec6ca33f245d66ee6a9fae6785d69b003c17b9fc6ec34fe5824ab8caae5e8e14dc6f9e116e7bf4a60c04388783c8ae929e1b46b3ef3bbe81b38f2fa6da771bf39dfba2374d3d2ed356b8e2c42081d885a91a3afb2f31986d2f9873354c48cf5448492c32e62385af423aa4f83db6d1b2669650379a1134b0a04cbca0862d6f9743c791cbb527d36cd5d1f0fc7f503831c8bd1b7a0ef8ae1a5ed1155dfdd9e32b6bb33138112d3d476b802179cb85a2a6c354ccfed2f31604fbd8d6ec4baf9f1c8454f72c6588c06a7df3178c43a6970bfa02dd6f74cb5ec3b63f9eddaa17db5cbf27fac6de8e57c384afd0954179f7b5690c3bee42abc4fa79b4b12101a9cf5f0b9aecdda945def0bd04163237247d3539850e123fe18139f316fa0256d5bd2faa8" + +const oneMBSawtoothBZ2Hex = "425a683931415926535971931ea00006ddffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe007de00000000000000024c00130001300000000000000000000000000000000000000000000000000000000126000980009800000000000000000000000000000000000000000000000000000000930004c0004c000000000000000000000000000000000000000000000000000000004980026000260000000000000000000000000000000000000000000000000000000009aaaaa0000000000000000000000000000000000000000000000000000000000000000498002600026000000000000000000000000000000000000000000000000000000007fc42271980d044c0a822607411304a08982d044c1a82260f411308a08984d044c2a82261741130ca08986d044c3a82261f411310a08988d044c4a822627411314a0898ad044c5a82262f411318a0898cd044c6a82263741131ca0898ed044c7a82263f411320a08990d044c8a822647411324a08992d044c9a82264f411328a08994d044caa82265741132ca08996d044cba82265f411330a08998d044cca822667411334a0899ad044cda82266f411338a0899cd044cea82267741133ca0899ed044cfa82267f411340a089a0d044d0a822687411344a089a2d044d1a82268f411348a089a4d044d2a82269741134ca089a6d044d3a82269f411350a089a8d044d4a8226a7411354a089aad044d5a8226af411358a089acd044d6a8226b741135ca089aed044d7a8226bf411360a089b0d044d8a8226c7411364a089b2d044d9a8226cf411368a089b4d044daa8226d741136ca089b6d044dba8226df411370a089b8d044dca8226e7411374a089bad044dda8226ef411378a089bcd044dea8226f741137ca089bed044dfa8226ff411380a089c0d044e0a822707411384a089c2d044e1a82270f411388a089c4d044e2a82271741138ca089c59089c69089c71089c79089c81089c89089c91089c99089ca1089ca9089cb1089cb9089cc1089cc9089cd1089cd9089ce1089ce9089cf1089cf9089d01089d09089d11089d19089d21089d29089d31089d39089d41089d49089d51089d59089d61089d69089d71089d79089d81089d89089d91089d99089da1089da9089db1089db9089dc1089dc9089dd1089dd9089de1089de9089df1089df9089e01089e09089e11089e19089e21089e29089e31089e39089e41089e49089e51089e59089e61089e69089e71089e79089e81089e89089e91089e99089ea1089ea9089eb1089eb9089ec1089ec9089ed1089ed9089ee1089ee9089ef1089ef9089f01089f09089f11089f19089f21089f29089f31089f39089f41089f49089f51089f59089f61089f69089f71089f79089f81089f89089f91089f99089fa1089fa9089fb1089fb9089fc1089fc9089fd1089fd9089fe1089fe9089ff1089ff98a0ac9329acf23ba884804fdd3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0034f800000000000024c00130001300000000000000000000000000000000000000000000000000000000126000980009800000000000000000000000000000000000000000000000000000000930004c0004c000000000000000000000000000000000000000000000000000000004980026000260000000000000000000000000000000000000000000000000000000024c0013000130000000000000000000000000000000000000000000000000000000002955540000000000000000000000000000000000000000000000000000000000000001ff108c00846024230221181908c108460a4230621183908c20846124230a21185908c308461a4230e21187908c40846224231221189908c508462a423162118b908c60846324231a2118d908c708463a4231e2118f908c80846424232221191908c908464a4232621193908ca0846524232a21195908cb08465a4232e21197908cc0846624233221199908cd08466a423362119b908ce0846724233a2119d908cf08467a4233e2119f908d008468242342211a1908d108468a42346211a3908d20846924234a211a5908d308469a4234e211a7908d40846a242352211a9908d50846aa42356211ab908d60846b24235a211ad908d70846ba4235e211af908d80846c242362211b1908d90846ca42366211b3908da0846d24236a211b5908db0846da4236e211b7908dc0846e242372211b9908dd0846ea42376211bb908de0846f24237a211bd908df0846fa4237e211bf908e008470242382211c1908e108470a42386211c3908e20847124238a211c5908e2f8c211c6c8471d211c7c84721211c8c84725211c9c84729211cac8472d211cbc84731211ccc84735211cdc84739211cec8473d211cfc84741211d0c84745211d1c84749211d2c8474d211d3c84751211d4c84755211d5c84759211d6c8475d211d7c84761211d8c84765211d9c84769211dac8476d211dbc84771211dcc84775211ddc84779211dec8477d211dfc84781211e0c84785211e1c84789211e2c8478d211e3c84791211e4c84795211e5c84799211e6c8479d211e7c847a1211e8c847a5211e9c847a9211eac847ad211ebc847b1211ecc847b5211edc847b9211eec847bd211efc847c1211f0c847c5211f1c847c9211f2c847cd211f3c847d1211f4c847d5211f5c847d9211f6c847dd211f7c847e1211f8c847e5211f9c847e9211fac847ed211fbc847f1211fcc847f5211fdc847f9211fec847fd211ff8bb9229c284803a8b6248" + +const rand2BZ2Hex = "425a6839314159265359d992d0f60000137dfe84020310091c1e280e100e042801099210094806c0110002e70806402000546034000034000000f2830000032000d3403264049270eb7a9280d308ca06ad28f6981bee1bf8160727c7364510d73a1e123083421b63f031f63993a0f40051fbf177245385090d992d0f60" +const rand2Hex = "92d5652616ac444a4a04af1a8a3964aca0450d43d6cf233bd03233f4ba92f8719e6c2a2bd4f5f88db07ecd0da3a33b263483db9b2c158786ad6363be35d17335ba" + +const rand3BZ2Hex = "425a68393141592653593be669d00000327ffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffc002b3b2b1b6e2bae400004c00132300004c0d268c004c08c0130026001a008683234c0684c34008c230261a04c0260064d07a8d00034000d27a1268c9931a8d327a3427a41faa69ea0da264c1a34219326869b51b49a6469a3268c689fa53269a62794687a9a68f5189994c9e487a8f534fd49a3d34043629e8c93d04da4f4648d30d4f44d3234c4d3023d0840680984d309934c234d3131a000640984f536a6132601300130130c8d00d04d1841ea7a8d31a02609b40023460010c01a34d4c1a0d04d3069306810034d0d0d4c0046130d034d0131a9a64d321804c68003400098344c13000991808c0001a00000000098004d3d4da4604c47a13012140aadf8d673c922c607ef6212a8c0403adea4b28aee578900e653b9cdeb8d11e6b838815f3ebaad5a01c5408d84a332170aff8734d4e06612d3c2889f31925fb89e33561f5100ae89b1f7047102e729373d3667e58d73aaa80fa7be368a1cc2dadd81d81ec8e1b504bd772ca31d03649269b01ceddaca07bf3d4eba24de141be3f86f93601e03714c0f64654671684f9f9528626fd4e1b76753dc0c54b842486b8d59d8ab314e86ca818e7a1f079463cbbd70d9b79b283c7edc419406311022e4be98c2c1374df9cdde2d008ce1d00e5f06ad1024baf555631f70831fc1023034e62be7c4bcb648caf276963ffa20e96bb50377fe1c113da0db4625b50741c35a058edb009c6ee5dbf93b8a6b060eec568180e8db791b82aab96cbf4326ca98361461379425ba8dcc347be670bdba7641883e5526ae3d833f6e9cb9bac9557747c79e206151072f7f0071dff3880411846f66bf4075c7462f302b53cb3400a74cf35652ad5641ed33572fd54e7ed7f85f58a0acba89327e7c6be5c58cb71528b99df2431f1d0358f8d28d81d95292da631fb06701decabb205fac59ff0fb1df536afc681eece6ea658c4d9eaa45f1342aa1ff70bdaff2ddaf25ec88c22f12829a0553db1ec2505554cb17d7b282e213a5a2aa30431ded2bce665bb199d023840832fedb2c0c350a27291407ff77440792872137df281592e82076a05c64c345ffb058c64f7f7c207ef78420b7010520610f17e302cc4dfcfaef72a0ed091aab4b541eb0531bbe941ca2f792bf7b31ca6162882b68054a8470115bc2c19f2df2023f7800432b39b04d3a304e8085ba3f1f0ca5b1ba4d38d339e6084de979cdea6d0e244c6c9fa0366bd890621e3d30846f5e8497e21597b8f29bbf52c961a485dfbea647600da0fc1f25ce4d203a8352ece310c39073525044e7ac46acf2ed9120bae1b4f6f02364abfe343f80b290983160c103557af1c68416480d024cc31b6c06cfec011456f1e95c420a12b48b1c3fe220c2879a982fb099948ac440db844b9a112a5188c7783fd3b19593290785f908d95c9db4b280bafe89c1313aeec24772046d9bc089645f0d182a21184e143823c5f52de50e5d7e98d3d7ab56f5413bbccd1415c9bcff707def475b643fb7f29842582104d4cc1dbaaca8f10a2f44273c339e0984f2b1e06ab2f0771db01fafa8142298345f3196f23e5847bda024034b6f59b11c29e981c881456e40d211929fd4f766200258aad8212016322bd5c605790dcfdf1bd2a93d99c9b8f498722d311d7eae7ff420496a31804c55f4759a7b13aaaf5f7ce006c3a8a998897d5e0a504398c2b627852545baf440798bcc5cc049357cf3f17d9771e4528a1af3d77dc794a11346e1bdf5efe37a405b127b4c43b616d61fbc5dc914e14240ef99a7400" +const rand3Hex = "1744b384d68c042371244e13500d4bfb98c6244e3d71a5b700224420b59c593553f33bd786e3d0ce31626f511bc985f59d1a88aa38ba8ad6218d306abee60dd9172540232b95be1af146c69e72e5fde667a090dc3f93bdc5c5af0ab80acdbaa7a505f628c59dc0247b31a439cacf5010a94376d71521df08c178b02fb96fdb1809144ea38c68536187c53201fea8631fb0a880b4451ccdca7cc61f6aafca21cc7449d920599db61789ac3b1e164b3390124f95022aeea39ccca3ec1053f4fa10de2978e2861ea58e477085c2220021a0927aa94c5d0006b5055abba340e4f9eba22e969978dfd18e278a8b89d877328ae34268bc0174cfe211954c0036f078025217d1269fac1932a03b05a0b616012271bbe1fb554171c7a59b196d8a4479f45a77931b5d97aaf6c0c673cbe597b79b96e2a0c1eae2e66e46ccc8c85798e23ffe972ebdaa3f6caea243c004e60321eb47cd79137d78fd0613be606feacc5b3637bdc96a89c13746db8cad886f3ccf912b2178c823bcac395f06d28080269bdca2debf3419c66c690fd1adcfbd53e32e79443d7a42511a84cb22ca94fffad9149275a075b2f8ae0b021dcde9bf62b102db920733b897560518b06e1ad7f4b03458493ddaa7f4fa2c1609f7a1735aeeb1b3e2cea3ab45fc376323cc91873b7e9c90d07c192e38d3f5dfc9bfab1fd821c854da9e607ea596c391c7ec4161c6c4493929a8176badaa5a5af7211c623f29643a937677d3df0da9266181b7c4da5dd40376db677fe8f4a1dc456adf6f33c1e37cec471dd318c2647644fe52f93707a77da7d1702380a80e14cc0fdce7bf2eed48a529090bae0388ee277ce6c7018c5fb00b88362554362205c641f0d0fab94fd5b8357b5ff08b207fee023709bc126ec90cfb17c006754638f8186aaeb1265e80be0c1189ec07d01d5f6f96cb9ce82744147d18490de7dc72862f42f024a16968891a356f5e7e0e695d8c933ba5b5e43ad4c4ade5399bc2cae9bb6189b7870d7f22956194d277f28b10e01c10c6ffe3e065f7e2d6d056aa790db5649ca84dc64c35566c0af1b68c32b5b7874aaa66467afa44f40e9a0846a07ae75360a641dd2acc69d93219b2891f190621511e62a27f5e4fbe641ece1fa234fc7e9a74f48d2a760d82160d9540f649256b169d1fed6fbefdc491126530f3cbad7913e19fbd7aa53b1e243fbf28d5f38c10ebd77c8b986775975cc1d619efb27cdcd733fa1ca36cffe9c0a33cc9f02463c91a886601fd349efee85ef1462065ef9bd2c8f533220ad93138b8382d5938103ab25b2d9af8ae106e1211eb9b18793fba033900c809c02cd6d17e2f3e6fc84dae873411f8e87c3f0a8f1765b7825d185ce3730f299c3028d4a62da9ee95c2b870fb70c79370d485f9d5d9acb78926d20444033d960524d2776dc31988ec7c0dbf23b9905d" diff --git a/libgo/go/compress/bzip2/huffman.go b/libgo/go/compress/bzip2/huffman.go new file mode 100644 index 00000000000..732bc4a21da --- /dev/null +++ b/libgo/go/compress/bzip2/huffman.go @@ -0,0 +1,223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bzip2 + +import ( + "os" + "sort" +) + +// A huffmanTree is a binary tree which is navigated, bit-by-bit to reach a +// symbol. +type huffmanTree struct { + // nodes contains all the non-leaf nodes in the tree. nodes[0] is the + // root of the tree and nextNode contains the index of the next element + // of nodes to use when the tree is being constructed. + nodes []huffmanNode + nextNode int +} + +// A huffmanNode is a node in the tree. left and right contain indexes into the +// nodes slice of the tree. If left or right is invalidNodeValue then the child +// is a left node and its value is in leftValue/rightValue. +// +// The symbols are uint16s because bzip2 encodes not only MTF indexes in the +// tree, but also two magic values for run-length encoding and an EOF symbol. +// Thus there are more than 256 possible symbols. +type huffmanNode struct { + left, right uint16 + leftValue, rightValue uint16 +} + +// invalidNodeValue is an invalid index which marks a leaf node in the tree. +const invalidNodeValue = 0xffff + +// Decode reads bits from the given bitReader and navigates the tree until a +// symbol is found. +func (t huffmanTree) Decode(br *bitReader) (v uint16) { + nodeIndex := uint16(0) // node 0 is the root of the tree. + + for { + node := &t.nodes[nodeIndex] + bit := br.ReadBit() + // bzip2 encodes left as a true bit. + if bit { + // left + if node.left == invalidNodeValue { + return node.leftValue + } + nodeIndex = node.left + } else { + // right + if node.right == invalidNodeValue { + return node.rightValue + } + nodeIndex = node.right + } + } + + panic("unreachable") +} + +// newHuffmanTree builds a Huffman tree from a slice containing the code +// lengths of each symbol. The maximum code length is 32 bits. +func newHuffmanTree(lengths []uint8) (huffmanTree, os.Error) { + // There are many possible trees that assign the same code length to + // each symbol (consider reflecting a tree down the middle, for + // example). Since the code length assignments determine the + // efficiency of the tree, each of these trees is equally good. In + // order to minimise the amount of information needed to build a tree + // bzip2 uses a canonical tree so that it can be reconstructed given + // only the code length assignments. + + if len(lengths) < 2 { + panic("newHuffmanTree: too few symbols") + } + + var t huffmanTree + + // First we sort the code length assignments by ascending code length, + // using the symbol value to break ties. + pairs := huffmanSymbolLengthPairs(make([]huffmanSymbolLengthPair, len(lengths))) + for i, length := range lengths { + pairs[i].value = uint16(i) + pairs[i].length = length + } + + sort.Sort(pairs) + + // Now we assign codes to the symbols, starting with the longest code. + // We keep the codes packed into a uint32, at the most-significant end. + // So branches are taken from the MSB downwards. This makes it easy to + // sort them later. + code := uint32(0) + length := uint8(32) + + codes := huffmanCodes(make([]huffmanCode, len(lengths))) + for i := len(pairs) - 1; i >= 0; i-- { + if length > pairs[i].length { + // If the code length decreases we shift in order to + // zero any bits beyond the end of the code. + length >>= 32 - pairs[i].length + length <<= 32 - pairs[i].length + length = pairs[i].length + } + codes[i].code = code + codes[i].codeLen = length + codes[i].value = pairs[i].value + // We need to 'increment' the code, which means treating |code| + // like a |length| bit number. + code += 1 << (32 - length) + } + + // Now we can sort by the code so that the left half of each branch are + // grouped together, recursively. + sort.Sort(codes) + + t.nodes = make([]huffmanNode, len(codes)) + _, err := buildHuffmanNode(&t, codes, 0) + return t, err +} + +// huffmanSymbolLengthPair contains a symbol and its code length. +type huffmanSymbolLengthPair struct { + value uint16 + length uint8 +} + +// huffmanSymbolLengthPair is used to provide an interface for sorting. +type huffmanSymbolLengthPairs []huffmanSymbolLengthPair + +func (h huffmanSymbolLengthPairs) Len() int { + return len(h) +} + +func (h huffmanSymbolLengthPairs) Less(i, j int) bool { + if h[i].length < h[j].length { + return true + } + if h[i].length > h[j].length { + return false + } + if h[i].value < h[j].value { + return true + } + return false +} + +func (h huffmanSymbolLengthPairs) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +// huffmanCode contains a symbol, its code and code length. +type huffmanCode struct { + code uint32 + codeLen uint8 + value uint16 +} + +// huffmanCodes is used to provide an interface for sorting. +type huffmanCodes []huffmanCode + +func (n huffmanCodes) Len() int { + return len(n) +} + +func (n huffmanCodes) Less(i, j int) bool { + return n[i].code < n[j].code +} + +func (n huffmanCodes) Swap(i, j int) { + n[i], n[j] = n[j], n[i] +} + +// buildHuffmanNode takes a slice of sorted huffmanCodes and builds a node in +// the Huffman tree at the given level. It returns the index of the newly +// constructed node. +func buildHuffmanNode(t *huffmanTree, codes []huffmanCode, level uint32) (nodeIndex uint16, err os.Error) { + test := uint32(1) << (31 - level) + + // We have to search the list of codes to find the divide between the left and right sides. + firstRightIndex := len(codes) + for i, code := range codes { + if code.code&test != 0 { + firstRightIndex = i + break + } + } + + left := codes[:firstRightIndex] + right := codes[firstRightIndex:] + + if len(left) == 0 || len(right) == 0 { + return 0, StructuralError("superfluous level in Huffman tree") + } + + nodeIndex = uint16(t.nextNode) + node := &t.nodes[t.nextNode] + t.nextNode++ + + if len(left) == 1 { + // leaf node + node.left = invalidNodeValue + node.leftValue = left[0].value + } else { + node.left, err = buildHuffmanNode(t, left, level+1) + } + + if err != nil { + return + } + + if len(right) == 1 { + // leaf node + node.right = invalidNodeValue + node.rightValue = right[0].value + } else { + node.right, err = buildHuffmanNode(t, right, level+1) + } + + return +} diff --git a/libgo/go/compress/bzip2/move_to_front.go b/libgo/go/compress/bzip2/move_to_front.go new file mode 100644 index 00000000000..0ed19dec39c --- /dev/null +++ b/libgo/go/compress/bzip2/move_to_front.go @@ -0,0 +1,105 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bzip2 + +// moveToFrontDecoder implements a move-to-front list. Such a list is an +// efficient way to transform a string with repeating elements into one with +// many small valued numbers, which is suitable for entropy encoding. It works +// by starting with an initial list of symbols and references symbols by their +// index into that list. When a symbol is referenced, it's moved to the front +// of the list. Thus, a repeated symbol ends up being encoded with many zeros, +// as the symbol will be at the front of the list after the first access. +type moveToFrontDecoder struct { + // Rather than actually keep the list in memory, the symbols are stored + // as a circular, double linked list with the symbol indexed by head + // at the front of the list. + symbols []byte + next []uint8 + prev []uint8 + head uint8 +} + +// newMTFDecoder creates a move-to-front decoder with an explicit initial list +// of symbols. +func newMTFDecoder(symbols []byte) *moveToFrontDecoder { + if len(symbols) > 256 { + panic("too many symbols") + } + + m := &moveToFrontDecoder{ + symbols: symbols, + next: make([]uint8, len(symbols)), + prev: make([]uint8, len(symbols)), + } + + m.threadLinkedList() + return m +} + +// newMTFDecoderWithRange creates a move-to-front decoder with an initial +// symbol list of 0...n-1. +func newMTFDecoderWithRange(n int) *moveToFrontDecoder { + if n > 256 { + panic("newMTFDecoderWithRange: cannot have > 256 symbols") + } + + m := &moveToFrontDecoder{ + symbols: make([]uint8, n), + next: make([]uint8, n), + prev: make([]uint8, n), + } + + for i := 0; i < n; i++ { + m.symbols[i] = byte(i) + } + + m.threadLinkedList() + return m +} + +// threadLinkedList creates the initial linked-list pointers. +func (m *moveToFrontDecoder) threadLinkedList() { + if len(m.symbols) == 0 { + return + } + + m.prev[0] = uint8(len(m.symbols) - 1) + + for i := 0; i < len(m.symbols)-1; i++ { + m.next[i] = uint8(i + 1) + m.prev[i+1] = uint8(i) + } + + m.next[len(m.symbols)-1] = 0 +} + +func (m *moveToFrontDecoder) Decode(n int) (b byte) { + // Most of the time, n will be zero so it's worth dealing with this + // simple case. + if n == 0 { + return m.symbols[m.head] + } + + i := m.head + for j := 0; j < n; j++ { + i = m.next[i] + } + b = m.symbols[i] + + m.next[m.prev[i]] = m.next[i] + m.prev[m.next[i]] = m.prev[i] + m.next[i] = m.head + m.prev[i] = m.prev[m.head] + m.next[m.prev[m.head]] = i + m.prev[m.head] = i + m.head = i + + return +} + +// First returns the symbol at the front of the list. +func (m *moveToFrontDecoder) First() byte { + return m.symbols[m.head] +} diff --git a/libgo/go/compress/flate/deflate_test.go b/libgo/go/compress/flate/deflate_test.go index 3db955609d7..ff54164b2cc 100644 --- a/libgo/go/compress/flate/deflate_test.go +++ b/libgo/go/compress/flate/deflate_test.go @@ -116,9 +116,16 @@ func (b *syncBuffer) Read(p []byte) (n int, err os.Error) { panic("unreachable") } +func (b *syncBuffer) signal() { + select { + case b.ready <- true: + default: + } +} + func (b *syncBuffer) Write(p []byte) (n int, err os.Error) { n, err = b.buf.Write(p) - _ = b.ready <- true + b.signal() return } @@ -128,12 +135,12 @@ func (b *syncBuffer) WriteMode() { func (b *syncBuffer) ReadMode() { b.mu.Unlock() - _ = b.ready <- true + b.signal() } func (b *syncBuffer) Close() os.Error { b.closed = true - _ = b.ready <- true + b.signal() return nil } @@ -255,135 +262,9 @@ func TestReverseBits(t *testing.T) { } func TestDeflateInflateString(t *testing.T) { - gold := bytes.NewBufferString(getEdata()).Bytes() + gold, err := ioutil.ReadFile("../testdata/e.txt") + if err != nil { + t.Error(err) + } testToFromWithLevel(t, 1, gold, "2.718281828...") } - -func getEdata() string { - return "2.718281828459045235360287471352662497757247093699959574966967627724076630353547" + - "59457138217852516642742746639193200305992181741359662904357290033429526059563073" + - "81323286279434907632338298807531952510190115738341879307021540891499348841675092" + - "44761460668082264800168477411853742345442437107539077744992069551702761838606261" + - "33138458300075204493382656029760673711320070932870912744374704723069697720931014" + - "16928368190255151086574637721112523897844250569536967707854499699679468644549059" + - "87931636889230098793127736178215424999229576351482208269895193668033182528869398" + - "49646510582093923982948879332036250944311730123819706841614039701983767932068328" + - "23764648042953118023287825098194558153017567173613320698112509961818815930416903" + - "51598888519345807273866738589422879228499892086805825749279610484198444363463244" + - "96848756023362482704197862320900216099023530436994184914631409343173814364054625" + - "31520961836908887070167683964243781405927145635490613031072085103837505101157477" + - "04171898610687396965521267154688957035035402123407849819334321068170121005627880" + - "23519303322474501585390473041995777709350366041699732972508868769664035557071622" + - "68447162560798826517871341951246652010305921236677194325278675398558944896970964" + - "09754591856956380236370162112047742722836489613422516445078182442352948636372141" + - "74023889344124796357437026375529444833799801612549227850925778256209262264832627" + - "79333865664816277251640191059004916449982893150566047258027786318641551956532442" + - "58698294695930801915298721172556347546396447910145904090586298496791287406870504" + - "89585867174798546677575732056812884592054133405392200011378630094556068816674001" + - "69842055804033637953764520304024322566135278369511778838638744396625322498506549" + - "95886234281899707733276171783928034946501434558897071942586398772754710962953741" + - "52111513683506275260232648472870392076431005958411661205452970302364725492966693" + - "81151373227536450988890313602057248176585118063036442812314965507047510254465011" + - "72721155519486685080036853228183152196003735625279449515828418829478761085263981" + - "39559900673764829224437528718462457803619298197139914756448826260390338144182326" + - "25150974827987779964373089970388867782271383605772978824125611907176639465070633" + - "04527954661855096666185664709711344474016070462621568071748187784437143698821855" + - "96709591025968620023537185887485696522000503117343920732113908032936344797273559" + - "55277349071783793421637012050054513263835440001863239914907054797780566978533580" + - "48966906295119432473099587655236812859041383241160722602998330535370876138939639" + - "17795745401613722361878936526053815584158718692553860616477983402543512843961294" + - "60352913325942794904337299085731580290958631382683291477116396337092400316894586" + - "36060645845925126994655724839186564209752685082307544254599376917041977780085362" + - "73094171016343490769642372229435236612557250881477922315197477806056967253801718" + - "07763603462459278778465850656050780844211529697521890874019660906651803516501792" + - "50461950136658543663271254963990854914420001457476081930221206602433009641270489" + - "43903971771951806990869986066365832322787093765022601492910115171776359446020232" + - "49300280401867723910288097866605651183260043688508817157238669842242201024950551" + - "88169480322100251542649463981287367765892768816359831247788652014117411091360116" + - "49950766290779436460058519419985601626479076153210387275571269925182756879893027" + - "61761146162549356495903798045838182323368612016243736569846703785853305275833337" + - "93990752166069238053369887956513728559388349989470741618155012539706464817194670" + - "83481972144888987906765037959036696724949925452790337296361626589760394985767413" + - "97359441023744329709355477982629614591442936451428617158587339746791897571211956" + - "18738578364475844842355558105002561149239151889309946342841393608038309166281881" + - "15037152849670597416256282360921680751501777253874025642534708790891372917228286" + - "11515915683725241630772254406337875931059826760944203261924285317018781772960235" + - "41306067213604600038966109364709514141718577701418060644363681546444005331608778" + - "31431744408119494229755993140118886833148328027065538330046932901157441475631399" + - "97221703804617092894579096271662260740718749975359212756084414737823303270330168" + - "23719364800217328573493594756433412994302485023573221459784328264142168487872167" + - "33670106150942434569844018733128101079451272237378861260581656680537143961278887" + - "32527373890392890506865324138062796025930387727697783792868409325365880733988457" + - "21874602100531148335132385004782716937621800490479559795929059165547050577751430" + - "81751126989851884087185640260353055837378324229241856256442550226721559802740126" + - "17971928047139600689163828665277009752767069777036439260224372841840883251848770" + - "47263844037953016690546593746161932384036389313136432713768884102681121989127522" + - "30562567562547017250863497653672886059667527408686274079128565769963137897530346" + - "60616669804218267724560530660773899624218340859882071864682623215080288286359746" + - "83965435885668550377313129658797581050121491620765676995065971534476347032085321" + - "56036748286083786568030730626576334697742956346437167093971930608769634953288468" + - "33613038829431040800296873869117066666146800015121143442256023874474325250769387" + - "07777519329994213727721125884360871583483562696166198057252661220679754062106208" + - "06498829184543953015299820925030054982570433905535701686531205264956148572492573" + - "86206917403695213533732531666345466588597286659451136441370331393672118569553952" + - "10845840724432383558606310680696492485123263269951460359603729725319836842336390" + - "46321367101161928217111502828016044880588023820319814930963695967358327420249882" + - "45684941273860566491352526706046234450549227581151709314921879592718001940968866" + - "98683703730220047531433818109270803001720593553052070070607223399946399057131158" + - "70996357773590271962850611465148375262095653467132900259943976631145459026858989" + - "79115837093419370441155121920117164880566945938131183843765620627846310490346293" + - "95002945834116482411496975832601180073169943739350696629571241027323913874175492" + - "30718624545432220395527352952402459038057445028922468862853365422138157221311632" + - "88112052146489805180092024719391710555390113943316681515828843687606961102505171" + - "00739276238555338627255353883096067164466237092264680967125406186950214317621166" + - "81400975952814939072226011126811531083873176173232352636058381731510345957365382" + - "23534992935822836851007810884634349983518404451704270189381994243410090575376257" + - "76757111809008816418331920196262341628816652137471732547772778348877436651882875" + - "21566857195063719365653903894493664217640031215278702223664636357555035655769488" + - "86549500270853923617105502131147413744106134445544192101336172996285694899193369" + - "18472947858072915608851039678195942983318648075608367955149663644896559294818785" + - "17840387733262470519450504198477420141839477312028158868457072905440575106012852" + - "58056594703046836344592652552137008068752009593453607316226118728173928074623094" + - "68536782310609792159936001994623799343421068781349734695924646975250624695861690" + - "91785739765951993929939955675427146549104568607020990126068187049841780791739240" + - "71945996323060254707901774527513186809982284730860766536866855516467702911336827" + - "56310722334672611370549079536583453863719623585631261838715677411873852772292259" + - "47433737856955384562468010139057278710165129666367644518724656537304024436841408" + - "14488732957847348490003019477888020460324660842875351848364959195082888323206522" + - "12810419044804724794929134228495197002260131043006241071797150279343326340799596" + - "05314460532304885289729176598760166678119379323724538572096075822771784833616135" + - "82612896226118129455927462767137794487586753657544861407611931125958512655759734" + - "57301533364263076798544338576171533346232527057200530398828949903425956623297578" + - "24887350292591668258944568946559926584547626945287805165017206747854178879822768" + - "06536650641910973434528878338621726156269582654478205672987756426325321594294418" + - "03994321700009054265076309558846589517170914760743713689331946909098190450129030" + - "70995662266203031826493657336984195557769637876249188528656866076005660256054457" + - "11337286840205574416030837052312242587223438854123179481388550075689381124935386" + - "31863528708379984569261998179452336408742959118074745341955142035172618420084550" + - "91708456823682008977394558426792142734775608796442792027083121501564063413416171" + - "66448069815483764491573900121217041547872591998943825364950514771379399147205219" + - "52907939613762110723849429061635760459623125350606853765142311534966568371511660" + - "42207963944666211632551577290709784731562782775987881364919512574833287937715714" + - "59091064841642678309949723674420175862269402159407924480541255360431317992696739" + - "15754241929660731239376354213923061787675395871143610408940996608947141834069836" + - "29936753626215452472984642137528910798843813060955526227208375186298370667872244" + - "30195793793786072107254277289071732854874374355781966511716618330881129120245204" + - "04868220007234403502544820283425418788465360259150644527165770004452109773558589" + - "76226554849416217149895323834216001140629507184904277892585527430352213968356790" + - "18076406042138307308774460170842688272261177180842664333651780002171903449234264" + - "26629226145600433738386833555534345300426481847398921562708609565062934040526494" + - "32442614456659212912256488935696550091543064261342526684725949143142393988454324" + - "86327461842846655985332312210466259890141712103446084271616619001257195870793217" + - "56969854401339762209674945418540711844643394699016269835160784892451405894094639" + - "52678073545797003070511636825194877011897640028276484141605872061841852971891540" + - "19688253289309149665345753571427318482016384644832499037886069008072709327673127" + - "58196656394114896171683298045513972950668760474091542042842999354102582911350224" + - "16907694316685742425225090269390348148564513030699251995904363840284292674125734" + - "22447765584177886171737265462085498294498946787350929581652632072258992368768457" + - "01782303809656788311228930580914057261086588484587310165815116753332767488701482" + - "91674197015125597825727074064318086014281490241467804723275976842696339357735429" + - "30186739439716388611764209004068663398856841681003872389214483176070116684503887" + - "21236436704331409115573328018297798873659091665961240202177855885487617616198937" + - "07943800566633648843650891448055710397652146960276625835990519870423001794655367" + - "9" -} diff --git a/libgo/go/compress/lzw/reader.go b/libgo/go/compress/lzw/reader.go new file mode 100644 index 00000000000..8a540cbe6a1 --- /dev/null +++ b/libgo/go/compress/lzw/reader.go @@ -0,0 +1,210 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The lzw package implements the Lempel-Ziv-Welch compressed data format, +// described in T. A. Welch, ``A Technique for High-Performance Data +// Compression'', Computer, 17(6) (June 1984), pp 8-19. +// +// In particular, it implements LZW as used by the GIF, TIFF and PDF file +// formats, which means variable-width codes up to 12 bits and the first +// two non-literal codes are a clear code and an EOF code. +package lzw + +// TODO(nigeltao): check that TIFF and PDF use LZW in the same way as GIF, +// modulo LSB/MSB packing order. + +import ( + "bufio" + "fmt" + "io" + "os" +) + +// Order specifies the bit ordering in an LZW data stream. +type Order int + +const ( + // LSB means Least Significant Bits first, as used in the GIF file format. + LSB Order = iota + // MSB means Most Significant Bits first, as used in the TIFF and PDF + // file formats. + MSB +) + +// decoder is the state from which the readXxx method converts a byte +// stream into a code stream. +type decoder struct { + r io.ByteReader + bits uint32 + nBits uint + width uint +} + +// readLSB returns the next code for "Least Significant Bits first" data. +func (d *decoder) readLSB() (uint16, os.Error) { + for d.nBits < d.width { + x, err := d.r.ReadByte() + if err != nil { + return 0, err + } + d.bits |= uint32(x) << d.nBits + d.nBits += 8 + } + code := uint16(d.bits & (1<>= d.width + d.nBits -= d.width + return code, nil +} + +// readMSB returns the next code for "Most Significant Bits first" data. +func (d *decoder) readMSB() (uint16, os.Error) { + for d.nBits < d.width { + x, err := d.r.ReadByte() + if err != nil { + return 0, err + } + d.bits |= uint32(x) << (24 - d.nBits) + d.nBits += 8 + } + code := uint16(d.bits >> (32 - d.width)) + d.bits <<= d.width + d.nBits -= d.width + return code, nil +} + +// decode decompresses bytes from r and writes them to pw. +// read specifies how to decode bytes into codes. +// litWidth is the width in bits of literal codes. +func decode(r io.Reader, read func(*decoder) (uint16, os.Error), litWidth int, pw *io.PipeWriter) { + br, ok := r.(io.ByteReader) + if !ok { + br = bufio.NewReader(r) + } + pw.CloseWithError(decode1(pw, br, read, uint(litWidth))) +} + +func decode1(pw *io.PipeWriter, r io.ByteReader, read func(*decoder) (uint16, os.Error), litWidth uint) os.Error { + const ( + maxWidth = 12 + invalidCode = 0xffff + ) + d := decoder{r, 0, 0, 1 + litWidth} + w := bufio.NewWriter(pw) + // The first 1<= clear { + c = prefix[c] + } + buf[i] = uint8(c) + i-- + c = last + } + // Copy the suffix chain into buf and then write that to w. + for c >= clear { + buf[i] = suffix[c] + i-- + c = prefix[c] + } + buf[i] = uint8(c) + if _, err := w.Write(buf[i:]); err != nil { + return err + } + // Save what the hi code expands to. + suffix[hi] = uint8(c) + prefix[hi] = last + default: + return os.NewError("lzw: invalid code") + } + last, hi = code, hi+1 + if hi == overflow { + if d.width == maxWidth { + return os.NewError("lzw: missing clear code") + } + d.width++ + overflow <<= 1 + } + } + panic("unreachable") +} + +// NewReader creates a new io.ReadCloser that satisfies reads by decompressing +// the data read from r. +// It is the caller's responsibility to call Close on the ReadCloser when +// finished reading. +// The number of bits to use for literal codes, litWidth, must be in the +// range [2,8] and is typically 8. +func NewReader(r io.Reader, order Order, litWidth int) io.ReadCloser { + pr, pw := io.Pipe() + var read func(*decoder) (uint16, os.Error) + switch order { + case LSB: + read = (*decoder).readLSB + case MSB: + read = (*decoder).readMSB + default: + pw.CloseWithError(os.NewError("lzw: unknown order")) + return pr + } + if litWidth < 2 || 8 < litWidth { + pw.CloseWithError(fmt.Errorf("lzw: litWidth %d out of range", litWidth)) + return pr + } + go decode(r, read, litWidth, pw) + return pr +} diff --git a/libgo/go/compress/lzw/reader_test.go b/libgo/go/compress/lzw/reader_test.go new file mode 100644 index 00000000000..7795a4c1489 --- /dev/null +++ b/libgo/go/compress/lzw/reader_test.go @@ -0,0 +1,132 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzw + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "testing" +) + +type lzwTest struct { + desc string + raw string + compressed string + err os.Error +} + +var lzwTests = []lzwTest{ + { + "empty;LSB;8", + "", + "\x01\x01", + nil, + }, + { + "empty;MSB;8", + "", + "\x80\x80", + nil, + }, + { + "tobe;LSB;7", + "TOBEORNOTTOBEORTOBEORNOT", + "\x54\x4f\x42\x45\x4f\x52\x4e\x4f\x54\x82\x84\x86\x8b\x85\x87\x89\x81", + nil, + }, + { + "tobe;LSB;8", + "TOBEORNOTTOBEORTOBEORNOT", + "\x54\x9e\x08\x29\xf2\x44\x8a\x93\x27\x54\x04\x12\x34\xb8\xb0\xe0\xc1\x84\x01\x01", + nil, + }, + { + "tobe;MSB;7", + "TOBEORNOTTOBEORTOBEORNOT", + "\x54\x4f\x42\x45\x4f\x52\x4e\x4f\x54\x82\x84\x86\x8b\x85\x87\x89\x81", + nil, + }, + { + "tobe;MSB;8", + "TOBEORNOTTOBEORTOBEORNOT", + "\x2a\x13\xc8\x44\x52\x79\x48\x9c\x4f\x2a\x40\xa0\x90\x68\x5c\x16\x0f\x09\x80\x80", + nil, + }, + { + "tobe-truncated;LSB;8", + "TOBEORNOTTOBEORTOBEORNOT", + "\x54\x9e\x08\x29\xf2\x44\x8a\x93\x27\x54\x04", + io.ErrUnexpectedEOF, + }, + // This example comes from http://en.wikipedia.org/wiki/Graphics_Interchange_Format. + { + "gif;LSB;8", + "\x28\xff\xff\xff\x28\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff", + "\x00\x51\xfc\x1b\x28\x70\xa0\xc1\x83\x01\x01", + nil, + }, + // This example comes from http://compgroups.net/comp.lang.ruby/Decompressing-LZW-compression-from-PDF-file + { + "pdf;MSB;8", + "-----A---B", + "\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01", + nil, + }, +} + +func TestReader(t *testing.T) { + b := bytes.NewBuffer(nil) + for _, tt := range lzwTests { + d := strings.Split(tt.desc, ";", -1) + var order Order + switch d[1] { + case "LSB": + order = LSB + case "MSB": + order = MSB + default: + t.Errorf("%s: bad order %q", tt.desc, d[1]) + } + litWidth, _ := strconv.Atoi(d[2]) + rc := NewReader(strings.NewReader(tt.compressed), order, litWidth) + defer rc.Close() + b.Reset() + n, err := io.Copy(b, rc) + if err != nil { + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.desc, err, tt.err) + } + continue + } + s := b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.desc, n, s, len(tt.raw), tt.raw) + } + } +} + +type devNull struct{} + +func (devNull) Write(p []byte) (int, os.Error) { + return len(p), nil +} + +func BenchmarkDecoder(b *testing.B) { + b.StopTimer() + buf0, _ := ioutil.ReadFile("../testdata/e.txt") + compressed := bytes.NewBuffer(nil) + w := NewWriter(compressed, LSB, 8) + io.Copy(w, bytes.NewBuffer(buf0)) + w.Close() + buf1 := compressed.Bytes() + b.StartTimer() + for i := 0; i < b.N; i++ { + io.Copy(devNull{}, NewReader(bytes.NewBuffer(buf1), LSB, 8)) + } +} diff --git a/libgo/go/compress/lzw/writer.go b/libgo/go/compress/lzw/writer.go new file mode 100644 index 00000000000..87143b7aa94 --- /dev/null +++ b/libgo/go/compress/lzw/writer.go @@ -0,0 +1,259 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzw + +import ( + "bufio" + "fmt" + "io" + "os" +) + +// A writer is a buffered, flushable writer. +type writer interface { + WriteByte(byte) os.Error + Flush() os.Error +} + +// An errWriteCloser is an io.WriteCloser that always returns a given error. +type errWriteCloser struct { + err os.Error +} + +func (e *errWriteCloser) Write([]byte) (int, os.Error) { + return 0, e.err +} + +func (e *errWriteCloser) Close() os.Error { + return e.err +} + +const ( + // A code is a 12 bit value, stored as a uint32 when encoding to avoid + // type conversions when shifting bits. + maxCode = 1<<12 - 1 + invalidCode = 1<<32 - 1 + // There are 1<<12 possible codes, which is an upper bound on the number of + // valid hash table entries at any given point in time. tableSize is 4x that. + tableSize = 4 * 1 << 12 + tableMask = tableSize - 1 + // A hash table entry is a uint32. Zero is an invalid entry since the + // lower 12 bits of a valid entry must be a non-literal code. + invalidEntry = 0 +) + +// encoder is LZW compressor. +type encoder struct { + // w is the writer that compressed bytes are written to. + w writer + // write, bits, nBits and width are the state for converting a code stream + // into a byte stream. + write func(*encoder, uint32) os.Error + bits uint32 + nBits uint + width uint + // litWidth is the width in bits of literal codes. + litWidth uint + // hi is the code implied by the next code emission. + // overflow is the code at which hi overflows the code width. + hi, overflow uint32 + // savedCode is the accumulated code at the end of the most recent Write + // call. It is equal to invalidCode if there was no such call. + savedCode uint32 + // err is the first error encountered during writing. Closing the encoder + // will make any future Write calls return os.EINVAL. + err os.Error + // table is the hash table from 20-bit keys to 12-bit values. Each table + // entry contains key<<12|val and collisions resolve by linear probing. + // The keys consist of a 12-bit code prefix and an 8-bit byte suffix. + // The values are a 12-bit code. + table [tableSize]uint32 +} + +// writeLSB writes the code c for "Least Significant Bits first" data. +func (e *encoder) writeLSB(c uint32) os.Error { + e.bits |= c << e.nBits + e.nBits += e.width + for e.nBits >= 8 { + if err := e.w.WriteByte(uint8(e.bits)); err != nil { + return err + } + e.bits >>= 8 + e.nBits -= 8 + } + return nil +} + +// writeMSB writes the code c for "Most Significant Bits first" data. +func (e *encoder) writeMSB(c uint32) os.Error { + e.bits |= c << (32 - e.width - e.nBits) + e.nBits += e.width + for e.nBits >= 8 { + if err := e.w.WriteByte(uint8(e.bits >> 24)); err != nil { + return err + } + e.bits <<= 8 + e.nBits -= 8 + } + return nil +} + +// errOutOfCodes is an internal error that means that the encoder has run out +// of unused codes and a clear code needs to be sent next. +var errOutOfCodes = os.NewError("lzw: out of codes") + +// incHi increments e.hi and checks for both overflow and running out of +// unused codes. In the latter case, incHi sends a clear code, resets the +// encoder state and returns errOutOfCodes. +func (e *encoder) incHi() os.Error { + e.hi++ + if e.hi == e.overflow { + e.width++ + e.overflow <<= 1 + } + if e.hi == maxCode { + clear := uint32(1) << e.litWidth + if err := e.write(e, clear); err != nil { + return err + } + e.width = uint(e.litWidth) + 1 + e.hi = clear + 1 + e.overflow = clear << 1 + for i := range e.table { + e.table[i] = invalidEntry + } + return errOutOfCodes + } + return nil +} + +// Write writes a compressed representation of p to e's underlying writer. +func (e *encoder) Write(p []byte) (int, os.Error) { + if e.err != nil { + return 0, e.err + } + if len(p) == 0 { + return 0, nil + } + litMask := uint32(1<>12 ^ key) & tableMask + for h, t := hash, e.table[hash]; t != invalidEntry; { + if key == t>>12 { + code = t & maxCode + continue loop + } + h = (h + 1) & tableMask + t = e.table[h] + } + // Otherwise, write the current code, and literal becomes the start of + // the next emitted code. + if e.err = e.write(e, code); e.err != nil { + return 0, e.err + } + code = literal + // Increment e.hi, the next implied code. If we run out of codes, reset + // the encoder state (including clearing the hash table) and continue. + if err := e.incHi(); err != nil { + if err == errOutOfCodes { + continue + } + e.err = err + return 0, e.err + } + // Otherwise, insert key -> e.hi into the map that e.table represents. + for { + if e.table[hash] == invalidEntry { + e.table[hash] = (key << 12) | e.hi + break + } + hash = (hash + 1) & tableMask + } + } + e.savedCode = code + return len(p), nil +} + +// Close closes the encoder, flushing any pending output. It does not close or +// flush e's underlying writer. +func (e *encoder) Close() os.Error { + if e.err != nil { + if e.err == os.EINVAL { + return nil + } + return e.err + } + // Make any future calls to Write return os.EINVAL. + e.err = os.EINVAL + // Write the savedCode if valid. + if e.savedCode != invalidCode { + if err := e.write(e, e.savedCode); err != nil { + return err + } + if err := e.incHi(); err != nil && err != errOutOfCodes { + return err + } + } + // Write the eof code. + eof := uint32(1)< 0 { + if e.write == (*encoder).writeMSB { + e.bits >>= 24 + } + if err := e.w.WriteByte(uint8(e.bits)); err != nil { + return err + } + } + return e.w.Flush() +} + +// NewWriter creates a new io.WriteCloser that satisfies writes by compressing +// the data and writing it to w. +// It is the caller's responsibility to call Close on the WriteCloser when +// finished writing. +// The number of bits to use for literal codes, litWidth, must be in the +// range [2,8] and is typically 8. +func NewWriter(w io.Writer, order Order, litWidth int) io.WriteCloser { + var write func(*encoder, uint32) os.Error + switch order { + case LSB: + write = (*encoder).writeLSB + case MSB: + write = (*encoder).writeMSB + default: + return &errWriteCloser{os.NewError("lzw: unknown order")} + } + if litWidth < 2 || 8 < litWidth { + return &errWriteCloser{fmt.Errorf("lzw: litWidth %d out of range", litWidth)} + } + bw, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + } + lw := uint(litWidth) + return &encoder{ + w: bw, + write: write, + width: 1 + lw, + litWidth: lw, + hi: 1< 0 && h < maxHash { + return int(digestSizes[h]) + } + panic("crypto: Size of unknown hash function") +} + +var hashes = make([]func() hash.Hash, maxHash) + +// New returns a new hash.Hash calculating the given hash function. If the +// hash function is not linked into the binary, New returns nil. +func (h Hash) New() hash.Hash { + if h > 0 && h < maxHash { + f := hashes[h] + if f != nil { + return f() + } + } + return nil +} + +// RegisterHash registers a function that returns a new instance of the given +// hash function. This is intended to be called from the init function in +// packages that implement hash functions. +func RegisterHash(h Hash, f func() hash.Hash) { + if h >= maxHash { + panic("crypto: RegisterHash of unknown hash function") + } + hashes[h] = f +} diff --git a/libgo/go/crypto/dsa/dsa.go b/libgo/go/crypto/dsa/dsa.go new file mode 100644 index 00000000000..f0af8bb427e --- /dev/null +++ b/libgo/go/crypto/dsa/dsa.go @@ -0,0 +1,276 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dsa implements the Digital Signature Algorithm, as defined in FIPS 186-3 +package dsa + +import ( + "big" + "io" + "os" +) + +// Parameters represents the domain parameters for a key. These parameters can +// be shared across many keys. The bit length of Q must be a multiple of 8. +type Parameters struct { + P, Q, G *big.Int +} + +// PublicKey represents a DSA public key. +type PublicKey struct { + Parameters + Y *big.Int +} + +// PrivateKey represents a DSA private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +type invalidPublicKeyError int + +func (invalidPublicKeyError) String() string { + return "crypto/dsa: invalid public key" +} + +// InvalidPublicKeyError results when a public key is not usable by this code. +// FIPS is quite strict about the format of DSA keys, but other code may be +// less so. Thus, when using keys which may have been generated by other code, +// this error must be handled. +var InvalidPublicKeyError = invalidPublicKeyError(0) + +// ParameterSizes is a enumeration of the acceptable bit lengths of the primes +// in a set of DSA parameters. See FIPS 186-3, section 4.2. +type ParameterSizes int + +const ( + L1024N160 ParameterSizes = iota + L2048N224 + L2048N256 + L3072N256 +) + +// numMRTests is the number of Miller-Rabin primality tests that we perform. We +// pick the largest recommended number from table C.1 of FIPS 186-3. +const numMRTests = 64 + +// GenerateParameters puts a random, valid set of DSA parameters into params. +// This function takes many seconds, even on fast machines. +func GenerateParameters(params *Parameters, rand io.Reader, sizes ParameterSizes) (err os.Error) { + // This function doesn't follow FIPS 186-3 exactly in that it doesn't + // use a verification seed to generate the primes. The verification + // seed doesn't appear to be exported or used by other code and + // omitting it makes the code cleaner. + + var L, N int + switch sizes { + case L1024N160: + L = 1024 + N = 160 + case L2048N224: + L = 2048 + N = 224 + case L2048N256: + L = 2048 + N = 256 + case L3072N256: + L = 3072 + N = 256 + default: + return os.ErrorString("crypto/dsa: invalid ParameterSizes") + } + + qBytes := make([]byte, N/8) + pBytes := make([]byte, L/8) + + q := new(big.Int) + p := new(big.Int) + rem := new(big.Int) + one := new(big.Int) + one.SetInt64(1) + +GeneratePrimes: + for { + _, err = io.ReadFull(rand, qBytes) + if err != nil { + return + } + + qBytes[len(qBytes)-1] |= 1 + qBytes[0] |= 0x80 + q.SetBytes(qBytes) + + if !big.ProbablyPrime(q, numMRTests) { + continue + } + + for i := 0; i < 4*L; i++ { + _, err = io.ReadFull(rand, pBytes) + if err != nil { + return + } + + pBytes[len(pBytes)-1] |= 1 + pBytes[0] |= 0x80 + + p.SetBytes(pBytes) + rem.Mod(p, q) + rem.Sub(rem, one) + p.Sub(p, rem) + if p.BitLen() < L { + continue + } + + if !big.ProbablyPrime(p, numMRTests) { + continue + } + + params.P = p + params.Q = q + break GeneratePrimes + } + } + + h := new(big.Int) + h.SetInt64(2) + g := new(big.Int) + + pm1 := new(big.Int).Sub(p, one) + e := new(big.Int).Div(pm1, q) + + for { + g.Exp(h, e, p) + if g.Cmp(one) == 0 { + h.Add(h, one) + continue + } + + params.G = g + return + } + + panic("unreachable") +} + +// GenerateKey generates a public&private key pair. The Parameters of the +// PrivateKey must already be valid (see GenerateParameters). +func GenerateKey(priv *PrivateKey, rand io.Reader) os.Error { + if priv.P == nil || priv.Q == nil || priv.G == nil { + return os.ErrorString("crypto/dsa: parameters not set up before generating key") + } + + x := new(big.Int) + xBytes := make([]byte, priv.Q.BitLen()/8) + + for { + _, err := io.ReadFull(rand, xBytes) + if err != nil { + return err + } + x.SetBytes(xBytes) + if x.Sign() != 0 && x.Cmp(priv.Q) < 0 { + break + } + } + + priv.X = x + priv.Y = new(big.Int) + priv.Y.Exp(priv.G, x, priv.P) + return nil +} + +// Sign signs an arbitrary length hash (which should be the result of hashing a +// larger message) using the private key, priv. It returns the signature as a +// pair of integers. The security of the private key depends on the entropy of +// rand. +func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err os.Error) { + // FIPS 186-3, section 4.6 + + n := priv.Q.BitLen() + if n&7 != 0 { + err = InvalidPublicKeyError + return + } + n >>= 3 + + for { + k := new(big.Int) + buf := make([]byte, n) + for { + _, err = io.ReadFull(rand, buf) + if err != nil { + return + } + k.SetBytes(buf) + if k.Sign() > 0 && k.Cmp(priv.Q) < 0 { + break + } + } + + kInv := new(big.Int).ModInverse(k, priv.Q) + + r = new(big.Int).Exp(priv.G, k, priv.P) + r.Mod(r, priv.Q) + + if r.Sign() == 0 { + continue + } + + if n > len(hash) { + n = len(hash) + } + z := k.SetBytes(hash[:n]) + + s = new(big.Int).Mul(priv.X, r) + s.Add(s, z) + s.Mod(s, priv.Q) + s.Mul(s, kInv) + s.Mod(s, priv.Q) + + if s.Sign() != 0 { + break + } + } + + return +} + +// Verify verifies the signature in r, s of hash using the public key, pub. It +// returns true iff the signature is valid. +func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { + // FIPS 186-3, section 4.7 + + if r.Sign() < 1 || r.Cmp(pub.Q) >= 0 { + return false + } + if s.Sign() < 1 || s.Cmp(pub.Q) >= 0 { + return false + } + + w := new(big.Int).ModInverse(s, pub.Q) + + n := pub.Q.BitLen() + if n&7 != 0 { + return false + } + n >>= 3 + + if n > len(hash) { + n = len(hash) + } + z := new(big.Int).SetBytes(hash[:n]) + + u1 := new(big.Int).Mul(z, w) + u1.Mod(u1, pub.Q) + u2 := w.Mul(r, w) + u2.Mod(u2, pub.Q) + v := u1.Exp(pub.G, u1, pub.P) + u2.Exp(pub.Y, u2, pub.P) + v.Mul(v, u2) + v.Mod(v, pub.P) + v.Mod(v, pub.Q) + + return v.Cmp(r) == 0 +} diff --git a/libgo/go/crypto/dsa/dsa_test.go b/libgo/go/crypto/dsa/dsa_test.go new file mode 100644 index 00000000000..deec08dfd8a --- /dev/null +++ b/libgo/go/crypto/dsa/dsa_test.go @@ -0,0 +1,84 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dsa + +import ( + "big" + "crypto/rand" + "testing" +) + +func testSignAndVerify(t *testing.T, i int, priv *PrivateKey) { + hashed := []byte("testing") + r, s, err := Sign(rand.Reader, priv, hashed) + if err != nil { + t.Errorf("%d: error signing: %s", i, err) + return + } + + if !Verify(&priv.PublicKey, hashed, r, s) { + t.Errorf("%d: Verify failed", i) + } +} + +func testParameterGeneration(t *testing.T, sizes ParameterSizes, L, N int) { + var priv PrivateKey + params := &priv.Parameters + + err := GenerateParameters(params, rand.Reader, sizes) + if err != nil { + t.Errorf("%d: %s", int(sizes), err) + return + } + + if params.P.BitLen() != L { + t.Errorf("%d: params.BitLen got:%d want:%d", int(sizes), params.P.BitLen(), L) + } + + if params.Q.BitLen() != N { + t.Errorf("%d: q.BitLen got:%d want:%d", int(sizes), params.Q.BitLen(), L) + } + + one := new(big.Int) + one.SetInt64(1) + pm1 := new(big.Int).Sub(params.P, one) + quo, rem := new(big.Int).DivMod(pm1, params.Q, new(big.Int)) + if rem.Sign() != 0 { + t.Errorf("%d: p-1 mod q != 0", int(sizes)) + } + x := new(big.Int).Exp(params.G, quo, params.P) + if x.Cmp(one) == 0 { + t.Errorf("%d: invalid generator", int(sizes)) + } + + err = GenerateKey(&priv, rand.Reader) + if err != nil { + t.Errorf("error generating key: %s", err) + return + } + + testSignAndVerify(t, int(sizes), &priv) +} + +func TestParameterGeneration(t *testing.T) { + // This test is too slow to run all the time. + return + + testParameterGeneration(t, L1024N160, 1024, 160) + testParameterGeneration(t, L2048N224, 2048, 224) + testParameterGeneration(t, L2048N256, 2048, 256) + testParameterGeneration(t, L3072N256, 3072, 256) +} + +func TestSignAndVerify(t *testing.T) { + var priv PrivateKey + priv.P, _ = new(big.Int).SetString("A9B5B793FB4785793D246BAE77E8FF63CA52F442DA763C440259919FE1BC1D6065A9350637A04F75A2F039401D49F08E066C4D275A5A65DA5684BC563C14289D7AB8A67163BFBF79D85972619AD2CFF55AB0EE77A9002B0EF96293BDD0F42685EBB2C66C327079F6C98000FBCB79AACDE1BC6F9D5C7B1A97E3D9D54ED7951FEF", 16) + priv.Q, _ = new(big.Int).SetString("E1D3391245933D68A0714ED34BBCB7A1F422B9C1", 16) + priv.G, _ = new(big.Int).SetString("634364FC25248933D01D1993ECABD0657CC0CB2CEED7ED2E3E8AECDFCDC4A25C3B15E9E3B163ACA2984B5539181F3EFF1A5E8903D71D5B95DA4F27202B77D2C44B430BB53741A8D59A8F86887525C9F2A6A5980A195EAA7F2FF910064301DEF89D3AA213E1FAC7768D89365318E370AF54A112EFBA9246D9158386BA1B4EEFDA", 16) + priv.Y, _ = new(big.Int).SetString("32969E5780CFE1C849A1C276D7AEB4F38A23B591739AA2FE197349AEEBD31366AEE5EB7E6C6DDB7C57D02432B30DB5AA66D9884299FAA72568944E4EEDC92EA3FBC6F39F53412FBCC563208F7C15B737AC8910DBC2D9C9B8C001E72FDC40EB694AB1F06A5A2DBD18D9E36C66F31F566742F11EC0A52E9F7B89355C02FB5D32D2", 16) + priv.X, _ = new(big.Int).SetString("5078D4D29795CBE76D3AACFE48C9AF0BCDBEE91A", 16) + + testSignAndVerify(t, 0, &priv) +} diff --git a/libgo/go/crypto/md4/md4.go b/libgo/go/crypto/md4/md4.go index e13c986e686..ee46544a920 100644 --- a/libgo/go/crypto/md4/md4.go +++ b/libgo/go/crypto/md4/md4.go @@ -6,10 +6,15 @@ package md4 import ( + "crypto" "hash" "os" ) +func init() { + crypto.RegisterHash(crypto.MD4, New) +} + // The size of an MD4 checksum in bytes. const Size = 16 diff --git a/libgo/go/crypto/md5/md5.go b/libgo/go/crypto/md5/md5.go index 54fddb63b93..8f93fc4b354 100644 --- a/libgo/go/crypto/md5/md5.go +++ b/libgo/go/crypto/md5/md5.go @@ -6,10 +6,15 @@ package md5 import ( + "crypto" "hash" "os" ) +func init() { + crypto.RegisterHash(crypto.MD5, New) +} + // The size of an MD5 checksum in bytes. const Size = 16 diff --git a/libgo/go/crypto/ocsp/ocsp.go b/libgo/go/crypto/ocsp/ocsp.go index f3fa3bc834c..f42d8088884 100644 --- a/libgo/go/crypto/ocsp/ocsp.go +++ b/libgo/go/crypto/ocsp/ocsp.go @@ -9,8 +9,9 @@ package ocsp import ( "asn1" + "crypto" "crypto/rsa" - "crypto/sha1" + _ "crypto/sha1" "crypto/x509" "os" "time" @@ -168,8 +169,8 @@ func ParseResponse(bytes []byte) (*Response, os.Error) { return nil, x509.UnsupportedAlgorithmError{} } - h := sha1.New() - hashType := rsa.HashSHA1 + hashType := crypto.SHA1 + h := hashType.New() pub := ret.Certificate.PublicKey.(*rsa.PublicKey) h.Write(basicResp.TBSResponseData.Raw) diff --git a/libgo/go/crypto/openpgp/armor/armor.go b/libgo/go/crypto/openpgp/armor/armor.go index 97080f6c6d1..0c5ae9d716c 100644 --- a/libgo/go/crypto/openpgp/armor/armor.go +++ b/libgo/go/crypto/openpgp/armor/armor.go @@ -112,7 +112,7 @@ func (l *lineReader) Read(p []byte) (n int, err os.Error) { return 0, os.EOF } - if len(line) != 64 { + if len(line) > 64 { return 0, ArmorCorrupt } diff --git a/libgo/go/crypto/openpgp/armor/armor_test.go b/libgo/go/crypto/openpgp/armor/armor_test.go index e4ffd414b6a..9334e94e96c 100644 --- a/libgo/go/crypto/openpgp/armor/armor_test.go +++ b/libgo/go/crypto/openpgp/armor/armor_test.go @@ -34,7 +34,7 @@ func TestDecodeEncode(t *testing.T) { t.Error(err) } - if adler32.Checksum(contents) != 0x789d7f00 { + if adler32.Checksum(contents) != 0x27b144be { t.Errorf("contents: got: %x", contents) } @@ -73,13 +73,11 @@ func TestLongHeader(t *testing.T) { const armorExample1 = `-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.10 (GNU/Linux) -iQEcBAABAgAGBQJMtFESAAoJEKsQXJGvOPsVj40H/1WW6jaMXv4BW+1ueDSMDwM8 -kx1fLOXbVM5/Kn5LStZNt1jWWnpxdz7eq3uiqeCQjmqUoRde3YbB2EMnnwRbAhpp -cacnAvy9ZQ78OTxUdNW1mhX5bS6q1MTEJnl+DcyigD70HG/yNNQD7sOPMdYQw0TA -byQBwmLwmTsuZsrYqB68QyLHI+DUugn+kX6Hd2WDB62DKa2suoIUIHQQCd/ofwB3 -WfCYInXQKKOSxu2YOg2Eb4kLNhSMc1i9uKUWAH+sdgJh7NBgdoE4MaNtBFkHXRvv -okWuf3+xA9ksp1npSY/mDvgHijmjvtpRDe6iUeqfCn8N9u9CBg8geANgaG8+QA4= -=wfQG +iJwEAAECAAYFAk1Fv/0ACgkQo01+GMIMMbsYTwQAiAw+QAaNfY6WBdplZ/uMAccm +4g+81QPmTSGHnetSb6WBiY13kVzK4HQiZH8JSkmmroMLuGeJwsRTEL4wbjRyUKEt +p1xwUZDECs234F1xiG5enc5SGlRtP7foLBz9lOsjx+LEcA4sTl5/2eZR9zyFZqWW +TxRjs+fJCIFuo71xb1g= +=/teI -----END PGP SIGNATURE-----` const armorLongLine = `-----BEGIN PGP SIGNATURE----- diff --git a/libgo/go/crypto/openpgp/armor/encode.go b/libgo/go/crypto/openpgp/armor/encode.go index 410e734602f..0f7de024127 100644 --- a/libgo/go/crypto/openpgp/armor/encode.go +++ b/libgo/go/crypto/openpgp/armor/encode.go @@ -116,6 +116,7 @@ func (e *encoding) Close() (err os.Error) { if err != nil { return } + e.breaker.Close() var checksumBytes [3]byte checksumBytes[0] = byte(e.crc >> 16) @@ -144,11 +145,9 @@ func Encode(out io.Writer, blockType string, headers map[string]string) (w io.Wr } } - if len(headers) > 0 { - _, err := out.Write(newline) - if err != nil { - return - } + _, err = out.Write(newline) + if err != nil { + return } e := &encoding{ diff --git a/libgo/go/crypto/openpgp/canonical_text.go b/libgo/go/crypto/openpgp/canonical_text.go new file mode 100644 index 00000000000..293eff3542d --- /dev/null +++ b/libgo/go/crypto/openpgp/canonical_text.go @@ -0,0 +1,58 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "hash" + "os" +) + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, os.Error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum() []byte { + return cth.h.Sum() +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} diff --git a/libgo/go/crypto/openpgp/canonical_text_test.go b/libgo/go/crypto/openpgp/canonical_text_test.go new file mode 100644 index 00000000000..69ecf91a835 --- /dev/null +++ b/libgo/go/crypto/openpgp/canonical_text_test.go @@ -0,0 +1,50 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "bytes" + "os" + "testing" +) + +type recordingHash struct { + buf *bytes.Buffer +} + +func (r recordingHash) Write(b []byte) (n int, err os.Error) { + return r.buf.Write(b) +} + +func (r recordingHash) Sum() []byte { + return r.buf.Bytes() +} + +func (r recordingHash) Reset() { + panic("shouldn't be called") +} + +func (r recordingHash) Size() int { + panic("shouldn't be called") +} + + +func testCanonicalText(t *testing.T, input, expected string) { + r := recordingHash{bytes.NewBuffer(nil)} + c := NewCanonicalTextHash(r) + c.Write([]byte(input)) + result := c.Sum() + if expected != string(result) { + t.Errorf("input: %x got: %x want: %x", input, result, expected) + } +} + +func TestCanonicalText(t *testing.T) { + testCanonicalText(t, "foo\n", "foo\r\n") + testCanonicalText(t, "foo", "foo") + testCanonicalText(t, "foo\r\n", "foo\r\n") + testCanonicalText(t, "foo\r\nbar", "foo\r\nbar") + testCanonicalText(t, "foo\r\nbar\n\n", "foo\r\nbar\r\n\r\n") +} diff --git a/libgo/go/crypto/openpgp/error/error.go b/libgo/go/crypto/openpgp/error/error.go index 2d80ce3734e..053d1596726 100644 --- a/libgo/go/crypto/openpgp/error/error.go +++ b/libgo/go/crypto/openpgp/error/error.go @@ -5,6 +5,10 @@ // This package contains common error types for the OpenPGP packages. package error +import ( + "strconv" +) + // A StructuralError is returned when OpenPGP data is found to be syntactically // invalid. type StructuralError string @@ -44,3 +48,17 @@ func (ki keyIncorrect) String() string { } var KeyIncorrectError = keyIncorrect(0) + +type unknownIssuer int + +func (unknownIssuer) String() string { + return "signature make by unknown entity" +} + +var UnknownIssuerError = unknownIssuer(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) String() string { + return "unknown OpenPGP packet type: " + strconv.Itoa(int(upte)) +} diff --git a/libgo/go/crypto/openpgp/keys.go b/libgo/go/crypto/openpgp/keys.go new file mode 100644 index 00000000000..ecaa86f2828 --- /dev/null +++ b/libgo/go/crypto/openpgp/keys.go @@ -0,0 +1,280 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/openpgp/error" + "crypto/openpgp/packet" + "io" + "os" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, os.Error) { + body, err := readArmored(r, PublicKeyType) + if err != nil { + return nil, err + } + + return ReadKeyRing(body) +} + +// ReadKeyRing reads one or more public/private keys, ignoring unsupported keys. +func ReadKeyRing(r io.Reader) (el EntityList, err os.Error) { + packets := packet.NewReader(r) + + for { + var e *Entity + e, err = readEntity(packets) + if err != nil { + if _, ok := err.(error.UnsupportedError); ok { + err = readToNextPublicKey(packets) + } + if err == os.EOF { + err = nil + return + } + if err != nil { + el = nil + return + } + } else { + el = append(el, e) + } + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err os.Error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == os.EOF { + return + } else if err != nil { + if _, ok := err.(error.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } + + panic("unreachable") +} + +// readEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func readEntity(packets *packet.Reader) (*Entity, os.Error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, error.StructuralError("first packet was not a public/private key") + } else { + e.PrimaryKey = &e.PrivateKey.PublicKey + } + } + + var current *Identity +EachPacket: + for { + p, err := packets.Next() + if err == os.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + current = new(Identity) + current.Name = pkt.Id + current.UserId = pkt + e.Identities[pkt.Id] = current + p, err = packets.Next() + if err == os.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + if _, ok := err.(error.UnsupportedError); ok { + return nil, err + } + return nil, error.StructuralError("identity self-signature invalid: " + err.String()) + } + current.SelfSignature, ok = p.(*packet.Signature) + if !ok { + return nil, error.StructuralError("user ID packet not followed by self signature") + } + if current.SelfSignature.SigType != packet.SigTypePositiveCert { + return nil, error.StructuralError("user ID self-signature with wrong type") + } + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, current.SelfSignature); err != nil { + return nil, error.StructuralError("user ID self-signature invalid: " + err.String()) + } + case *packet.Signature: + if current == nil { + return nil, error.StructuralError("signature packet found before user id packet") + } + current.Signatures = append(current.Signatures, pkt) + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, error.StructuralError("entity without any identities") + } + + return e, nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) os.Error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + p, err := packets.Next() + if err == os.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return error.StructuralError("subkey signature invalid: " + err.String()) + } + var ok bool + subKey.Sig, ok = p.(*packet.Signature) + if !ok { + return error.StructuralError("subkey packet not followed by signature") + } + if subKey.Sig.SigType != packet.SigTypeSubkeyBinding { + return error.StructuralError("subkey signature with wrong type") + } + err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig) + if err != nil { + return error.StructuralError("subkey signature invalid: " + err.String()) + } + e.Subkeys = append(e.Subkeys, subKey) + return nil +} diff --git a/libgo/go/crypto/openpgp/packet/compressed.go b/libgo/go/crypto/openpgp/packet/compressed.go new file mode 100644 index 00000000000..1c15c24c4b6 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/compressed.go @@ -0,0 +1,39 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/flate" + "compress/zlib" + "crypto/openpgp/error" + "io" + "os" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +func (c *Compressed) parse(r io.Reader) os.Error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + default: + err = error.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} diff --git a/libgo/go/crypto/openpgp/packet/compressed_test.go b/libgo/go/crypto/openpgp/packet/compressed_test.go new file mode 100644 index 00000000000..24fe501edba --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/compressed_test.go @@ -0,0 +1,41 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/hex" + "os" + "io/ioutil" + "testing" +) + +func TestCompressed(t *testing.T) { + packet, err := Read(readerFromHex(compressedHex)) + if err != nil { + t.Errorf("failed to read Compressed: %s", err) + return + } + + c, ok := packet.(*Compressed) + if !ok { + t.Error("didn't find Compressed packet") + return + } + + contents, err := ioutil.ReadAll(c.Body) + if err != nil && err != os.EOF { + t.Error(err) + return + } + + expected, _ := hex.DecodeString(compressedExpectedHex) + if !bytes.Equal(expected, contents) { + t.Errorf("got:%x want:%x", contents, expected) + } +} + +const compressedHex = "a3013b2d90c4e02b72e25f727e5e496a5e49b11e1700" +const compressedExpectedHex = "cb1062004d14c8fe636f6e74656e74732e0a" diff --git a/libgo/go/crypto/openpgp/packet/encrypted_key.go b/libgo/go/crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 00000000000..b11a9b8301a --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,66 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/openpgp/error" + "crypto/rand" + "crypto/rsa" + "encoding/binary" + "io" + "os" + "strconv" +) + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + Encrypted []byte + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt +} + +func (e *EncryptedKey) parse(r io.Reader) (err os.Error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 3 { + return error.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + if e.Algo == PubKeyAlgoRSA || e.Algo == PubKeyAlgoRSAEncryptOnly { + e.Encrypted, _, err = readMPI(r) + } + _, err = consumeAll(r) + return +} + +// DecryptRSA decrypts an RSA encrypted session key with the given private key. +func (e *EncryptedKey) DecryptRSA(priv *rsa.PrivateKey) (err os.Error) { + if e.Algo != PubKeyAlgoRSA && e.Algo != PubKeyAlgoRSAEncryptOnly { + return error.InvalidArgumentError("EncryptedKey not RSA encrypted") + } + b, err := rsa.DecryptPKCS1v15(rand.Reader, priv, e.Encrypted) + if err != nil { + return + } + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + var checksum uint16 + for _, v := range e.Key { + checksum += uint16(v) + } + if checksum != expectedChecksum { + return error.StructuralError("EncryptedKey checksum incorrect") + } + + return +} diff --git a/libgo/go/crypto/openpgp/packet/encrypted_key_test.go b/libgo/go/crypto/openpgp/packet/encrypted_key_test.go new file mode 100644 index 00000000000..755ae7a3074 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/encrypted_key_test.go @@ -0,0 +1,67 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "big" + "crypto/rsa" + "fmt" + "testing" +) + +func bigFromBase10(s string) *big.Int { + b, ok := new(big.Int).SetString(s, 10) + if !ok { + panic("bigFromBase10 failed") + } + return b +} + +func TestEncryptedKey(t *testing.T) { + p, err := Read(readerFromHex(encryptedKeyHex)) + if err != nil { + t.Errorf("error from Read: %s", err) + return + } + ek, ok := p.(*EncryptedKey) + if !ok { + t.Errorf("didn't parse an EncryptedKey, got %#v", p) + return + } + + if ek.KeyId != 0x2a67d68660df41c7 || ek.Algo != PubKeyAlgoRSA { + t.Errorf("unexpected EncryptedKey contents: %#v", ek) + return + } + + pub := rsa.PublicKey{ + E: 65537, + N: bigFromBase10("115804063926007623305902631768113868327816898845124614648849934718568541074358183759250136204762053879858102352159854352727097033322663029387610959884180306668628526686121021235757016368038585212410610742029286439607686208110250133174279811431933746643015923132833417396844716207301518956640020862630546868823"), + } + + priv := &rsa.PrivateKey{ + PublicKey: pub, + D: bigFromBase10("32355588668219869544751561565313228297765464314098552250409557267371233892496951383426602439009993875125222579159850054973310859166139474359774543943714622292329487391199285040721944491839695981199720170366763547754915493640685849961780092241140181198779299712578774460837139360803883139311171713302987058393"), + } + + err = ek.DecryptRSA(priv) + if err != nil { + t.Errorf("error from DecryptRSA: %s", err) + return + } + + if ek.CipherFunc != CipherAES256 { + t.Errorf("unexpected EncryptedKey contents: %#v", ek) + return + } + + keyHex := fmt.Sprintf("%x", ek.Key) + if keyHex != expectedKeyHex { + t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex) + } +} + +const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8" +const expectedKeyHex = "d930363f7e0308c333b9618617ea728963d8df993665ae7be1092d4926fd864b" diff --git a/libgo/go/crypto/openpgp/packet/literal.go b/libgo/go/crypto/openpgp/packet/literal.go new file mode 100644 index 00000000000..04f50e53e13 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/literal.go @@ -0,0 +1,53 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" + "os" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err os.Error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} diff --git a/libgo/go/crypto/openpgp/packet/one_pass_signature.go b/libgo/go/crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 00000000000..acbf58bbefb --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,49 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/openpgp/error" + "crypto/openpgp/s2k" + "encoding/binary" + "io" + "os" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +func (ops *OnePassSignature) parse(r io.Reader) (err os.Error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 3 { + err = error.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return error.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} diff --git a/libgo/go/crypto/openpgp/packet/packet.go b/libgo/go/crypto/openpgp/packet/packet.go new file mode 100644 index 00000000000..269603ba498 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/packet.go @@ -0,0 +1,395 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package implements parsing and serialisation of OpenPGP packets, as +// specified in RFC 4880. +package packet + +import ( + "crypto/aes" + "crypto/cast5" + "crypto/cipher" + "crypto/openpgp/error" + "io" + "os" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err os.Error) { + n, err = io.ReadFull(r, buf) + if err == os.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err os.Error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err os.Error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, os.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == os.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err os.Error) { + if l.n <= 0 { + return 0, os.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == os.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err os.Error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = error.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serialiseHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serialiseHeader(w io.Writer, ptype packetType, length int) (err os.Error) { + var buf [5]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = byte(length >> 8) + buf[2] = byte(length) + n = 3 + } else { + buf[0] = 255 + buf[1] = byte(length >> 24) + buf[2] = byte(length >> 16) + buf[3] = byte(length >> 8) + buf[4] = byte(length) + n = 5 + } + + _, err = w.Write(buf[:n]) + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) os.Error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err os.Error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == os.EOF { + err = nil + return + } + if err != nil { + return + } + } + + panic("unreachable") +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err os.Error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + pk := new(PublicKey) + if tag == packetTypePublicSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = error.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 + PubKeyAlgoElgamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 +) + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + CipherCAST5 = 3 + CipherAES128 = 7 + CipherAES192 = 8 + CipherAES256 = 9 +) + +// keySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) keySize() int { + switch cipher { + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialised exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err os.Error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + return +} + +// writeMPI serialises a big integer to r. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err os.Error) { + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} diff --git a/libgo/go/crypto/openpgp/packet/packet_test.go b/libgo/go/crypto/openpgp/packet/packet_test.go new file mode 100644 index 00000000000..6789d2abc79 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/packet_test.go @@ -0,0 +1,192 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/openpgp/error" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "testing" +) + +func TestReadFull(t *testing.T) { + var out [4]byte + + b := bytes.NewBufferString("foo") + n, err := readFull(b, out[:3]) + if n != 3 || err != nil { + t.Errorf("full read failed n:%d err:%s", n, err) + } + + b = bytes.NewBufferString("foo") + n, err = readFull(b, out[:4]) + if n != 3 || err != io.ErrUnexpectedEOF { + t.Errorf("partial read failed n:%d err:%s", n, err) + } + + b = bytes.NewBuffer(nil) + n, err = readFull(b, out[:3]) + if n != 0 || err != io.ErrUnexpectedEOF { + t.Errorf("empty read failed n:%d err:%s", n, err) + } +} + +func readerFromHex(s string) io.Reader { + data, err := hex.DecodeString(s) + if err != nil { + panic("readerFromHex: bad input") + } + return bytes.NewBuffer(data) +} + +var readLengthTests = []struct { + hexInput string + length int64 + isPartial bool + err os.Error +}{ + {"", 0, false, io.ErrUnexpectedEOF}, + {"1f", 31, false, nil}, + {"c0", 0, false, io.ErrUnexpectedEOF}, + {"c101", 256 + 1 + 192, false, nil}, + {"e0", 1, true, nil}, + {"e1", 2, true, nil}, + {"e2", 4, true, nil}, + {"ff", 0, false, io.ErrUnexpectedEOF}, + {"ff00", 0, false, io.ErrUnexpectedEOF}, + {"ff0000", 0, false, io.ErrUnexpectedEOF}, + {"ff000000", 0, false, io.ErrUnexpectedEOF}, + {"ff00000000", 0, false, nil}, + {"ff01020304", 16909060, false, nil}, +} + +func TestReadLength(t *testing.T) { + for i, test := range readLengthTests { + length, isPartial, err := readLength(readerFromHex(test.hexInput)) + if test.err != nil { + if err != test.err { + t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err) + } + continue + } + if err != nil { + t.Errorf("%d: unexpected error: %s", i, err) + continue + } + if length != test.length || isPartial != test.isPartial { + t.Errorf("%d: bad result got:(%d,%t) want:(%d,%t)", i, length, isPartial, test.length, test.isPartial) + } + } +} + +var partialLengthReaderTests = []struct { + hexInput string + err os.Error + hexOutput string +}{ + {"e0", io.ErrUnexpectedEOF, ""}, + {"e001", io.ErrUnexpectedEOF, ""}, + {"e0010102", nil, "0102"}, + {"ff00000000", nil, ""}, + {"e10102e1030400", nil, "01020304"}, + {"e101", io.ErrUnexpectedEOF, ""}, +} + +func TestPartialLengthReader(t *testing.T) { + for i, test := range partialLengthReaderTests { + r := &partialLengthReader{readerFromHex(test.hexInput), 0, true} + out, err := ioutil.ReadAll(r) + if test.err != nil { + if err != test.err { + t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err) + } + continue + } + if err != nil { + t.Errorf("%d: unexpected error: %s", i, err) + continue + } + + got := fmt.Sprintf("%x", out) + if got != test.hexOutput { + t.Errorf("%d: got:%s want:%s", i, test.hexOutput, got) + } + } +} + +var readHeaderTests = []struct { + hexInput string + structuralError bool + unexpectedEOF bool + tag int + length int64 + hexOutput string +}{ + {"", false, false, 0, 0, ""}, + {"7f", true, false, 0, 0, ""}, + + // Old format headers + {"80", false, true, 0, 0, ""}, + {"8001", false, true, 0, 1, ""}, + {"800102", false, false, 0, 1, "02"}, + {"81000102", false, false, 0, 1, "02"}, + {"820000000102", false, false, 0, 1, "02"}, + {"860000000102", false, false, 1, 1, "02"}, + {"83010203", false, false, 0, -1, "010203"}, + + // New format headers + {"c0", false, true, 0, 0, ""}, + {"c000", false, false, 0, 0, ""}, + {"c00102", false, false, 0, 1, "02"}, + {"c0020203", false, false, 0, 2, "0203"}, + {"c00202", false, true, 0, 2, ""}, + {"c3020203", false, false, 3, 2, "0203"}, +} + +func TestReadHeader(t *testing.T) { + for i, test := range readHeaderTests { + tag, length, contents, err := readHeader(readerFromHex(test.hexInput)) + if test.structuralError { + if _, ok := err.(error.StructuralError); ok { + continue + } + t.Errorf("%d: expected StructuralError, got:%s", i, err) + continue + } + if err != nil { + if len(test.hexInput) == 0 && err == os.EOF { + continue + } + if !test.unexpectedEOF || err != io.ErrUnexpectedEOF { + t.Errorf("%d: unexpected error from readHeader: %s", i, err) + } + continue + } + if int(tag) != test.tag || length != test.length { + t.Errorf("%d: got:(%d,%d) want:(%d,%d)", i, int(tag), length, test.tag, test.length) + continue + } + + body, err := ioutil.ReadAll(contents) + if err != nil { + if !test.unexpectedEOF || err != io.ErrUnexpectedEOF { + t.Errorf("%d: unexpected error from contents: %s", i, err) + } + continue + } + if test.unexpectedEOF { + t.Errorf("%d: expected ErrUnexpectedEOF from contents but got no error", i) + continue + } + got := fmt.Sprintf("%x", body) + if got != test.hexOutput { + t.Errorf("%d: got:%s want:%s", i, got, test.hexOutput) + } + } +} diff --git a/libgo/go/crypto/openpgp/packet/private_key.go b/libgo/go/crypto/openpgp/packet/private_key.go new file mode 100644 index 00000000000..b22891755e3 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/private_key.go @@ -0,0 +1,164 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "big" + "bytes" + "crypto/cipher" + "crypto/openpgp/error" + "crypto/openpgp/s2k" + "crypto/rsa" + "crypto/sha1" + "io" + "io/ioutil" + "os" + "strconv" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *rsa.PrivateKey. + sha1Checksum bool + iv []byte +} + +func (pk *PrivateKey) parse(r io.Reader) (err os.Error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + default: + return error.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return error.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = ioutil.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) os.Error { + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.keySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := pk.encryptedData + cfb.XORKeyStream(data, data) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return error.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum() + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return error.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return error.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return error.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err os.Error) { + // TODO(agl): support DSA and ECDSA private keys. + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.P = new(big.Int).SetBytes(p) + rsaPriv.Q = new(big.Int).SetBytes(q) + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/libgo/go/crypto/openpgp/packet/private_key_test.go b/libgo/go/crypto/openpgp/packet/private_key_test.go new file mode 100644 index 00000000000..e941cc735cf --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/private_key_test.go @@ -0,0 +1,37 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "testing" +) + +func TestPrivateKeyRead(t *testing.T) { + packet, err := Read(readerFromHex(privKeyHex)) + if err != nil { + t.Error(err) + return + } + + privKey := packet.(*PrivateKey) + + if !privKey.Encrypted { + t.Error("private key isn't encrypted") + return + } + + err = privKey.Decrypt([]byte("testing")) + if err != nil { + t.Error(err) + return + } + + if privKey.CreationTime != 0x4cc349a8 || privKey.Encrypted { + t.Errorf("failed to parse, got: %#v", privKey) + } +} + +// Generated with `gpg --export-secret-keys "Test Key 2"` +const privKeyHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" diff --git a/libgo/go/crypto/openpgp/packet/public_key.go b/libgo/go/crypto/openpgp/packet/public_key.go new file mode 100644 index 00000000000..8866bdaaa94 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/public_key.go @@ -0,0 +1,260 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "big" + "crypto/dsa" + "crypto/openpgp/error" + "crypto/rsa" + "crypto/sha1" + "encoding/binary" + "hash" + "io" + "os" +) + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime uint32 // seconds since the epoch + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // Either a *rsa.PublicKey or *dsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI +} + +func (pk *PublicKey) parse(r io.Reader) (err os.Error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return error.UnsupportedError("public key version") + } + pk.CreationTime = uint32(buf[1])<<24 | uint32(buf[2])<<16 | uint32(buf[3])<<8 | uint32(buf[4]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + default: + err = error.UnsupportedError("public key type") + } + if err != nil { + return + } + + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.Serialize(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum()) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) + + return +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err os.Error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 3 { + err = error.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseRSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err os.Error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h hash.Hash) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +// Serialize marshals the PublicKey to w in the form of an OpenPGP public key +// packet, not including the packet header. +func (pk *PublicKey) Serialize(w io.Writer) (err os.Error) { + var buf [6]byte + buf[0] = 4 + buf[1] = byte(pk.CreationTime >> 24) + buf[2] = byte(pk.CreationTime >> 16) + buf[3] = byte(pk.CreationTime >> 8) + buf[4] = byte(pk.CreationTime) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + } + return error.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElgamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err os.Error) { + if !pk.CanSign() { + return error.InvalidArgumentError("public key cannot generate signatures") + } + + rsaPublicKey, ok := pk.PublicKey.(*rsa.PublicKey) + if !ok { + // TODO(agl): support DSA and ECDSA keys. + return error.UnsupportedError("non-RSA public key") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum() + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return error.SignatureError("hash tag doesn't match") + } + + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.Signature) + if err != nil { + return error.SignatureError("RSA verification failure") + } + return nil +} + +// VerifyKeySignature returns nil iff sig is a valid signature, make by this +// public key, of the public key in signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) (err os.Error) { + h := sig.Hash.New() + if h == nil { + return error.UnsupportedError("hash function") + } + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.Serialize(h) + signed.SerializeSignaturePrefix(h) + signed.Serialize(h) + + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, make by this +// public key, of the given user id. +func (pk *PublicKey) VerifyUserIdSignature(id string, sig *Signature) (err os.Error) { + h := sig.Hash.New() + if h == nil { + return error.UnsupportedError("hash function") + } + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.Serialize(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return pk.VerifySignature(h, sig) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialised exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serialising several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err os.Error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} diff --git a/libgo/go/crypto/openpgp/packet/public_key_test.go b/libgo/go/crypto/openpgp/packet/public_key_test.go new file mode 100644 index 00000000000..c015f64aec9 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/public_key_test.go @@ -0,0 +1,58 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/hex" + "testing" +) + +var pubKeyTests = []struct { + hexData string + hexFingerprint string + creationTime uint32 + pubKeyAlgo PublicKeyAlgorithm + keyId uint64 +}{ + {rsaPkDataHex, rsaFingerprintHex, 0x4d3c5c10, PubKeyAlgoRSA, 0xa34d7e18c20c31bb}, + {dsaPkDataHex, dsaFingerprintHex, 0x4d432f89, PubKeyAlgoDSA, 0x8e8fbe54062f19ed}, +} + +func TestPublicKeyRead(t *testing.T) { + for i, test := range pubKeyTests { + packet, err := Read(readerFromHex(test.hexData)) + if err != nil { + t.Errorf("#%d: Read error: %s", i, err) + return + } + pk, ok := packet.(*PublicKey) + if !ok { + t.Errorf("#%d: failed to parse, got: %#v", i, packet) + return + } + if pk.PubKeyAlgo != test.pubKeyAlgo { + t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo) + } + if pk.CreationTime != test.creationTime { + t.Errorf("#%d: bad creation time got:%x want:%x", i, pk.CreationTime, test.creationTime) + } + expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint) + if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) { + t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint) + } + if pk.KeyId != test.keyId { + t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId) + } + } +} + +const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" + +const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" + +const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" + +const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" diff --git a/libgo/go/crypto/openpgp/packet/reader.go b/libgo/go/crypto/openpgp/packet/reader.go new file mode 100644 index 00000000000..5febc3bc8dc --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/reader.go @@ -0,0 +1,63 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/openpgp/error" + "io" + "os" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err os.Error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == os.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(error.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, os.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. +func (r *Reader) Push(reader io.Reader) { + r.readers = append(r.readers, reader) +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/libgo/go/crypto/openpgp/packet/signature.go b/libgo/go/crypto/openpgp/packet/signature.go new file mode 100644 index 00000000000..fd2518ab41e --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/signature.go @@ -0,0 +1,468 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/openpgp/error" + "crypto/openpgp/s2k" + "crypto/rand" + "crypto/rsa" + "encoding/binary" + "hash" + "io" + "os" + "strconv" +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime uint32 // Unix epoch time + Signature []byte + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + IssuerKeyId *uint64 + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err os.Error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = error.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + default: + err = error.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return error.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + // We have already checked that the public key algorithm is RSA. + sig.Signature, _, err = readMPI(r) + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err os.Error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime == 0 { + err = error.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirySubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err os.Error) { + // RFC 4880, section 5.2.3.1 + var length uint32 + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = error.StructuralError("zero length signature subpacket") + return + } + packetType := subpacket[0] & 0x7f + isCritial := subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + switch signatureSubpacketType(packetType) { + case creationTimeSubpacket: + if !isHashed { + err = error.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = error.StructuralError("signature creation time not four bytes") + return + } + sig.CreationTime = binary.BigEndian.Uint32(subpacket) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = error.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirySubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = error.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = error.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = error.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = error.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&1 != 0 { + sig.FlagCertify = true + } + if subpacket[0]&2 != 0 { + sig.FlagSign = true + } + if subpacket[0]&4 != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&8 != 0 { + sig.FlagEncryptStorage = true + } + + default: + if isCritial { + err = error.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = error.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serialiseSubpacketLength marshals the given length into to. +func serialiseSubpacketLength(to []byte, length int) int { + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte(length >> 8) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialised length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serialiseSubpackets marshals the given subpackets into to. +func serialiseSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serialiseSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err os.Error) { + sig.outSubpackets = sig.buildSubpackets() + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return error.InvalidArgumentError("hash cannot be repesented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serialiseSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +// SignRSA signs a message with an RSA private key. The hash, h, must contain +// the hash of message to be signed and will be mutated by this function. +func (sig *Signature) SignRSA(h hash.Hash, priv *rsa.PrivateKey) (err os.Error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest := h.Sum() + copy(sig.HashTag[:], digest) + sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, priv, sig.Hash, digest) + return +} + +// Serialize marshals sig to w. SignRSA must have been called first. +func (sig *Signature) Serialize(w io.Writer) (err os.Error) { + if sig.Signature == nil { + return error.InvalidArgumentError("Signature: need to call SignRSA before Serialize") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + 2 /* length of signature MPI */ + len(sig.Signature) + err = serialiseHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serialiseSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + return writeMPI(w, 8*uint16(len(sig.Signature)), sig.Signature) +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + creationTime[0] = byte(sig.CreationTime >> 24) + creationTime[1] = byte(sig.CreationTime >> 16) + creationTime[2] = byte(sig.CreationTime >> 8) + creationTime[3] = byte(sig.CreationTime) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, keyId}) + } + + return +} diff --git a/libgo/go/crypto/openpgp/packet/signature_test.go b/libgo/go/crypto/openpgp/packet/signature_test.go new file mode 100644 index 00000000000..1305548b2ae --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/signature_test.go @@ -0,0 +1,28 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "encoding/hex" + "testing" +) + +func TestSignatureRead(t *testing.T) { + signatureData, _ := hex.DecodeString(signatureDataHex) + buf := bytes.NewBuffer(signatureData) + packet, err := Read(buf) + if err != nil { + t.Error(err) + return + } + sig, ok := packet.(*Signature) + if !ok || sig.SigType != SigTypeBinary || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.SHA1 { + t.Errorf("failed to parse, got: %#v", packet) + } +} + +const signatureDataHex = "89011c04000102000605024cb45112000a0910ab105c91af38fb158f8d07ff5596ea368c5efe015bed6e78348c0f033c931d5f2ce5db54ce7f2a7e4b4ad64db758d65a7a71773edeab7ba2a9e0908e6a94a1175edd86c1d843279f045b021a6971a72702fcbd650efc393c5474d5b59a15f96d2eaad4c4c426797e0dcca2803ef41c6ff234d403eec38f31d610c344c06f2401c262f0993b2e66cad8a81ebc4322c723e0d4ba09fe917e8777658307ad8329adacba821420741009dfe87f007759f0982275d028a392c6ed983a0d846f890b36148c7358bdb8a516007fac760261ecd06076813831a36d0459075d1befa245ae7f7fb103d92ca759e9498fe60ef8078a39a3beda510deea251ea9f0a7f0df6ef42060f20780360686f3e400e" diff --git a/libgo/go/crypto/openpgp/packet/symmetric_key_encrypted.go b/libgo/go/crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 00000000000..d9010f88a3d --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,102 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/openpgp/error" + "crypto/openpgp/s2k" + "io" + "os" + "strconv" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + Encrypted bool + Key []byte // Empty unless Encrypted is false. + s2k func(out, in []byte) + encryptedKey []byte +} + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) (err os.Error) { + // RFC 4880, section 5.3. + var buf [2]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return error.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.keySize() == 0 { + return error.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + ske.s2k, err = s2k.Parse(r) + if err != nil { + return + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return + } + err = nil + if n != 0 { + if n == maxSessionKeySizeInBytes { + return error.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + ske.Encrypted = true + + return +} + +// Decrypt attempts to decrypt an encrypted session key. If it returns nil, +// ske.Key will contain the session key. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) os.Error { + if !ske.Encrypted { + return nil + } + + key := make([]byte, ske.CipherFunc.keySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + ske.Key = key + } else { + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + c.XORKeyStream(ske.encryptedKey, ske.encryptedKey) + ske.CipherFunc = CipherFunction(ske.encryptedKey[0]) + if ske.CipherFunc.blockSize() == 0 { + return error.UnsupportedError("unknown cipher: " + strconv.Itoa(int(ske.CipherFunc))) + } + ske.CipherFunc = CipherFunction(ske.encryptedKey[0]) + ske.Key = ske.encryptedKey[1:] + if len(ske.Key)%ske.CipherFunc.blockSize() != 0 { + ske.Key = nil + return error.StructuralError("length of decrypted key not a multiple of block size") + } + } + + ske.Encrypted = false + return nil +} diff --git a/libgo/go/crypto/openpgp/packet/symmetric_key_encrypted_test.go b/libgo/go/crypto/openpgp/packet/symmetric_key_encrypted_test.go new file mode 100644 index 00000000000..717c8ffa6d6 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/symmetric_key_encrypted_test.go @@ -0,0 +1,62 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "os" + "testing" +) + +func TestSymmetricKeyEncrypted(t *testing.T) { + buf := readerFromHex(symmetricallyEncryptedHex) + packet, err := Read(buf) + if err != nil { + t.Errorf("failed to read SymmetricKeyEncrypted: %s", err) + return + } + ske, ok := packet.(*SymmetricKeyEncrypted) + if !ok { + t.Error("didn't find SymmetricKeyEncrypted packet") + return + } + err = ske.Decrypt([]byte("password")) + if err != nil { + t.Error(err) + return + } + + packet, err = Read(buf) + if err != nil { + t.Errorf("failed to read SymmetricallyEncrypted: %s", err) + return + } + se, ok := packet.(*SymmetricallyEncrypted) + if !ok { + t.Error("didn't find SymmetricallyEncrypted packet") + return + } + r, err := se.Decrypt(ske.CipherFunc, ske.Key) + if err != nil { + t.Error(err) + return + } + + contents, err := ioutil.ReadAll(r) + if err != nil && err != os.EOF { + t.Error(err) + return + } + + expectedContents, _ := hex.DecodeString(symmetricallyEncryptedContentsHex) + if !bytes.Equal(expectedContents, contents) { + t.Errorf("bad contents got:%x want:%x", contents, expectedContents) + } +} + +const symmetricallyEncryptedHex = "8c0d04030302371a0b38d884f02060c91cf97c9973b8e58e028e9501708ccfe618fb92afef7fa2d80ddadd93cf" +const symmetricallyEncryptedContentsHex = "cb1062004d14c4df636f6e74656e74732e0a" diff --git a/libgo/go/crypto/openpgp/packet/symmetrically_encrypted.go b/libgo/go/crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 00000000000..fc19ffe809a --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,206 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/openpgp/error" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "os" + "strconv" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +func (se *SymmetricallyEncrypted) parse(r io.Reader) os.Error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != 1 { + return error.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, os.Error) { + keySize := c.keySize() + if keySize == 0 { + return nil, error.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, error.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, error.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := cipher.OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = cipher.OCFBNoResync + } + + s := cipher.NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, error.KeyIncorrectError + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, os.Error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() os.Error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err os.Error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = os.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == os.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = os.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == os.EOF { + ser.eof = true + } + return +} + +func (ser *seMDCReader) Close() os.Error { + if ser.error { + return error.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == os.EOF { + break + } + if err != nil { + return error.SignatureError("error during reading") + } + } + + // This is a new-format packet tag byte for a type 19 (MDC) packet. + const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return error.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum() + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) == 1 { + return error.SignatureError("hash mismatch") + } + return nil +} diff --git a/libgo/go/crypto/openpgp/packet/symmetrically_encrypted_test.go b/libgo/go/crypto/openpgp/packet/symmetrically_encrypted_test.go new file mode 100644 index 00000000000..5543b20297a --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/symmetrically_encrypted_test.go @@ -0,0 +1,78 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/openpgp/error" + "crypto/sha1" + "encoding/hex" + "io/ioutil" + "os" + "testing" +) + +// TestReader wraps a []byte and returns reads of a specific length. +type testReader struct { + data []byte + stride int +} + +func (t *testReader) Read(buf []byte) (n int, err os.Error) { + n = t.stride + if n > len(t.data) { + n = len(t.data) + } + if n > len(buf) { + n = len(buf) + } + copy(buf, t.data) + t.data = t.data[n:] + if len(t.data) == 0 { + err = os.EOF + } + return +} + +func testMDCReader(t *testing.T) { + mdcPlaintext, _ := hex.DecodeString(mdcPlaintextHex) + + for stride := 1; stride < len(mdcPlaintext)/2; stride++ { + r := &testReader{data: mdcPlaintext, stride: stride} + mdcReader := &seMDCReader{in: r, h: sha1.New()} + body, err := ioutil.ReadAll(mdcReader) + if err != nil { + t.Errorf("stride: %d, error: %s", stride, err) + continue + } + if !bytes.Equal(body, mdcPlaintext[:len(mdcPlaintext)-22]) { + t.Errorf("stride: %d: bad contents %x", stride, body) + continue + } + + err = mdcReader.Close() + if err != nil { + t.Errorf("stride: %d, error on Close: %s", stride, err) + } + } + + mdcPlaintext[15] ^= 80 + + r := &testReader{data: mdcPlaintext, stride: 2} + mdcReader := &seMDCReader{in: r, h: sha1.New()} + _, err := ioutil.ReadAll(mdcReader) + if err != nil { + t.Errorf("corruption test, error: %s", err) + return + } + err = mdcReader.Close() + if err == nil { + t.Error("corruption: no error") + } else if _, ok := err.(*error.SignatureError); !ok { + t.Errorf("corruption: expected SignatureError, got: %s", err) + } +} + +const mdcPlaintextHex = "a302789c3b2d93c4e0eb9aba22283539b3203335af44a134afb800c849cb4c4de10200aff40b45d31432c80cb384299a0655966d6939dfdeed1dddf980" diff --git a/libgo/go/crypto/openpgp/packet/userid.go b/libgo/go/crypto/openpgp/packet/userid.go new file mode 100644 index 00000000000..ed2ad777486 --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/userid.go @@ -0,0 +1,105 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "os" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func (uid *UserId) parse(r io.Reader) (err os.Error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/libgo/go/crypto/openpgp/packet/userid_test.go b/libgo/go/crypto/openpgp/packet/userid_test.go new file mode 100644 index 00000000000..394873dc38c --- /dev/null +++ b/libgo/go/crypto/openpgp/packet/userid_test.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "testing" +) + +var userIdTests = []struct { + id string + name, comment, email string +}{ + {"", "", "", ""}, + {"John Smith", "John Smith", "", ""}, + {"John Smith ()", "John Smith", "", ""}, + {"John Smith () <>", "John Smith", "", ""}, + {"(comment", "", "comment", ""}, + {"(comment)", "", "comment", ""}, + {" sdfk", "", "", "email"}, + {" John Smith ( Comment ) asdkflj < email > lksdfj", "John Smith", "Comment", "email"}, + {" John Smith < email > lksdfj", "John Smith", "", "email"}, + {"( /tmp/log 2>&1 // % python parse-gnutls-cli-debug-log.py < /tmp/log diff --git a/libgo/go/crypto/tls/handshake_server.go b/libgo/go/crypto/tls/handshake_server.go index 955811ada35..809c8c15e5d 100644 --- a/libgo/go/crypto/tls/handshake_server.go +++ b/libgo/go/crypto/tls/handshake_server.go @@ -5,6 +5,7 @@ package tls import ( + "crypto" "crypto/rsa" "crypto/subtle" "crypto/x509" @@ -56,6 +57,7 @@ Curves: var suite *cipherSuite var suiteId uint16 +FindCipherSuite: for _, id := range clientHello.cipherSuites { for _, supported := range config.cipherSuites() { if id == supported { @@ -66,7 +68,7 @@ Curves: continue } suiteId = id - break + break FindCipherSuite } } } @@ -213,7 +215,7 @@ Curves: digest := make([]byte, 36) copy(digest[0:16], finishedHash.serverMD5.Sum()) copy(digest[16:36], finishedHash.serverSHA1.Sum()) - err = rsa.VerifyPKCS1v15(pub, rsa.HashMD5SHA1, digest, certVerify.signature) + err = rsa.VerifyPKCS1v15(pub, crypto.MD5SHA1, digest, certVerify.signature) if err != nil { c.sendAlert(alertBadCertificate) return os.ErrorString("could not validate signature of connection nonces: " + err.String()) diff --git a/libgo/go/crypto/tls/handshake_server_test.go b/libgo/go/crypto/tls/handshake_server_test.go index 5cf3ae0499d..6beb6a9f62b 100644 --- a/libgo/go/crypto/tls/handshake_server_test.go +++ b/libgo/go/crypto/tls/handshake_server_test.go @@ -194,7 +194,7 @@ var testPrivateKey = &rsa.PrivateKey{ // Script of interaction with gnutls implementation. // The values for this test are obtained by building and running in server mode: -// % gotest -match "TestRunServer" -serve +// % gotest -test.run "TestRunServer" -serve // and then: // % gnutls-cli --insecure --debug 100 -p 10443 localhost > /tmp/log 2>&1 // % python parse-gnutls-cli-debug-log.py < /tmp/log diff --git a/libgo/go/crypto/tls/key_agreement.go b/libgo/go/crypto/tls/key_agreement.go index 861c64f04bb..8edbb11900c 100644 --- a/libgo/go/crypto/tls/key_agreement.go +++ b/libgo/go/crypto/tls/key_agreement.go @@ -6,6 +6,7 @@ package tls import ( "big" + "crypto" "crypto/elliptic" "crypto/md5" "crypto/rsa" @@ -143,7 +144,7 @@ Curve: copy(serverECDHParams[4:], ecdhePublic) md5sha1 := md5SHA1Hash(clientHello.random, hello.random, serverECDHParams) - sig, err := rsa.SignPKCS1v15(config.rand(), config.Certificates[0].PrivateKey, rsa.HashMD5SHA1, md5sha1) + sig, err := rsa.SignPKCS1v15(config.rand(), config.Certificates[0].PrivateKey, crypto.MD5SHA1, md5sha1) if err != nil { return nil, os.ErrorString("failed to sign ECDHE parameters: " + err.String()) } @@ -216,7 +217,7 @@ func (ka *ecdheRSAKeyAgreement) processServerKeyExchange(config *Config, clientH sig = sig[2:] md5sha1 := md5SHA1Hash(clientHello.random, serverHello.random, serverECDHParams) - return rsa.VerifyPKCS1v15(cert.PublicKey.(*rsa.PublicKey), rsa.HashMD5SHA1, md5sha1, sig) + return rsa.VerifyPKCS1v15(cert.PublicKey.(*rsa.PublicKey), crypto.MD5SHA1, md5sha1, sig) Error: return os.ErrorString("invalid ServerKeyExchange") diff --git a/libgo/go/crypto/tls/tls.go b/libgo/go/crypto/tls/tls.go index b11d3225daa..e8290d728dd 100644 --- a/libgo/go/crypto/tls/tls.go +++ b/libgo/go/crypto/tls/tls.go @@ -124,14 +124,22 @@ func LoadX509KeyPair(certFile string, keyFile string) (cert Certificate, err os. return } - certDERBlock, _ := pem.Decode(certPEMBlock) - if certDERBlock == nil { + var certDERBlock *pem.Block + for { + certDERBlock, certPEMBlock = pem.Decode(certPEMBlock) + if certDERBlock == nil { + break + } + if certDERBlock.Type == "CERTIFICATE" { + cert.Certificate = append(cert.Certificate, certDERBlock.Bytes) + } + } + + if len(cert.Certificate) == 0 { err = os.ErrorString("crypto/tls: failed to parse certificate PEM data") return } - cert.Certificate = [][]byte{certDERBlock.Bytes} - keyPEMBlock, err := ioutil.ReadFile(keyFile) if err != nil { return @@ -153,7 +161,7 @@ func LoadX509KeyPair(certFile string, keyFile string) (cert Certificate, err os. // We don't need to parse the public key for TLS, but we so do anyway // to check that it looks sane and matches the private key. - x509Cert, err := x509.ParseCertificate(certDERBlock.Bytes) + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) if err != nil { return } diff --git a/libgo/go/crypto/x509/x509.go b/libgo/go/crypto/x509/x509.go index 6199e8db9f5..3af8ba8ca24 100644 --- a/libgo/go/crypto/x509/x509.go +++ b/libgo/go/crypto/x509/x509.go @@ -9,6 +9,7 @@ import ( "asn1" "big" "container/vector" + "crypto" "crypto/rsa" "crypto/sha1" "hash" @@ -330,6 +331,10 @@ type Certificate struct { DNSNames []string EmailAddresses []string + // Name constraints + PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical. + PermittedDNSDomains []string + PolicyIdentifiers []asn1.ObjectIdentifier } @@ -374,12 +379,12 @@ func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err os.Error) { // TODO(agl): don't ignore the path length constraint. var h hash.Hash - var hashType rsa.PKCS1v15Hash + var hashType crypto.Hash switch c.SignatureAlgorithm { case SHA1WithRSA: h = sha1.New() - hashType = rsa.HashSHA1 + hashType = crypto.SHA1 default: return UnsupportedAlgorithmError{} } @@ -474,6 +479,18 @@ type policyInformation struct { // policyQualifiers omitted } +// RFC 5280, 4.2.1.10 +type nameConstraints struct { + Permitted []generalSubtree "optional,tag:0" + Excluded []generalSubtree "optional,tag:1" +} + +type generalSubtree struct { + Name string "tag:2,optional,ia5" + Min int "optional,tag:0" + Max int "optional,tag:1" +} + func parsePublicKey(algo PublicKeyAlgorithm, asn1Data []byte) (interface{}, os.Error) { switch algo { case RSA: @@ -602,6 +619,43 @@ func parseCertificate(in *certificate) (*Certificate, os.Error) { // If we didn't parse any of the names then we // fall through to the critical check below. + case 30: + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // minimum [0] BaseDistance DEFAULT 0, + // maximum [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + var constraints nameConstraints + _, err := asn1.Unmarshal(e.Value, &constraints) + if err != nil { + return nil, err + } + + if len(constraints.Excluded) > 0 && e.Critical { + return out, UnhandledCriticalExtension{} + } + + for _, subtree := range constraints.Permitted { + if subtree.Min > 0 || subtree.Max > 0 || len(subtree.Name) == 0 { + if e.Critical { + return out, UnhandledCriticalExtension{} + } + continue + } + out.PermittedDNSDomains = append(out.PermittedDNSDomains, subtree.Name) + } + continue + case 35: // RFC 5280, 4.2.1.1 var a authKeyId @@ -698,10 +752,11 @@ var ( oidExtensionBasicConstraints = []int{2, 5, 29, 19} oidExtensionSubjectAltName = []int{2, 5, 29, 17} oidExtensionCertificatePolicies = []int{2, 5, 29, 32} + oidExtensionNameConstraints = []int{2, 5, 29, 30} ) func buildExtensions(template *Certificate) (ret []extension, err os.Error) { - ret = make([]extension, 6 /* maximum number of elements. */ ) + ret = make([]extension, 7 /* maximum number of elements. */ ) n := 0 if template.KeyUsage != 0 { @@ -778,6 +833,22 @@ func buildExtensions(template *Certificate) (ret []extension, err os.Error) { n++ } + if len(template.PermittedDNSDomains) > 0 { + ret[n].Id = oidExtensionNameConstraints + ret[n].Critical = template.PermittedDNSDomainsCritical + + var out nameConstraints + out.Permitted = make([]generalSubtree, len(template.PermittedDNSDomains)) + for i, permitted := range template.PermittedDNSDomains { + out.Permitted[i] = generalSubtree{Name: permitted} + } + ret[n].Value, err = asn1.Marshal(out) + if err != nil { + return + } + n++ + } + // Adding another extension here? Remember to update the maximum number // of elements in the make() at the top of the function. @@ -792,7 +863,8 @@ var ( // CreateSelfSignedCertificate creates a new certificate based on // a template. The following members of template are used: SerialNumber, // Subject, NotBefore, NotAfter, KeyUsage, BasicConstraintsValid, IsCA, -// MaxPathLen, SubjectKeyId, DNSNames. +// MaxPathLen, SubjectKeyId, DNSNames, PermittedDNSDomainsCritical, +// PermittedDNSDomains. // // The certificate is signed by parent. If parent is equal to template then the // certificate is self-signed. The parameter pub is the public key of the @@ -840,7 +912,7 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub *rsa.P h.Write(tbsCertContents) digest := h.Sum() - signature, err := rsa.SignPKCS1v15(rand, priv, rsa.HashSHA1, digest) + signature, err := rsa.SignPKCS1v15(rand, priv, crypto.SHA1, digest) if err != nil { return } diff --git a/libgo/go/crypto/x509/x509_test.go b/libgo/go/crypto/x509/x509_test.go index 2fe47fdbe59..57889e7e122 100644 --- a/libgo/go/crypto/x509/x509_test.go +++ b/libgo/go/crypto/x509/x509_test.go @@ -171,7 +171,8 @@ func TestCreateSelfSignedCertificate(t *testing.T) { IsCA: true, DNSNames: []string{"test.example.com"}, - PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}}, + PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}}, + PermittedDNSDomains: []string{".example.com", "example.com"}, } derBytes, err := CreateCertificate(random, &template, &template, &priv.PublicKey, priv) @@ -190,6 +191,10 @@ func TestCreateSelfSignedCertificate(t *testing.T) { t.Errorf("Failed to parse policy identifiers: got:%#v want:%#v", cert.PolicyIdentifiers, template.PolicyIdentifiers) } + if len(cert.PermittedDNSDomains) != 2 || cert.PermittedDNSDomains[0] != ".example.com" || cert.PermittedDNSDomains[1] != "example.com" { + t.Errorf("Failed to parse name constraints: %#v", cert.PermittedDNSDomains) + } + err = cert.CheckSignatureFrom(cert) if err != nil { t.Errorf("Signature verification failed: %s", err) diff --git a/libgo/go/debug/pe/file.go b/libgo/go/debug/pe/file.go index 82c02407bbe..1bcbdc5e9a5 100644 --- a/libgo/go/debug/pe/file.go +++ b/libgo/go/debug/pe/file.go @@ -57,7 +57,6 @@ type ImportDirectory struct { FirstThunk uint32 dll string - rva []uint32 } // Data reads and returns the contents of the PE section. @@ -267,34 +266,28 @@ func (f *File) ImportedSymbols() ([]string, os.Error) { } ida = append(ida, dt) } - for i, _ := range ida { + names, _ := ds.Data() + var all []string + for _, dt := range ida { + dt.dll, _ = getString(names, int(dt.Name-ds.VirtualAddress)) + d, _ = ds.Data() + // seek to OriginalFirstThunk + d = d[dt.OriginalFirstThunk-ds.VirtualAddress:] for len(d) > 0 { va := binary.LittleEndian.Uint32(d[0:4]) d = d[4:] if va == 0 { break } - ida[i].rva = append(ida[i].rva, va) - } - } - for _, _ = range ida { - for len(d) > 0 { - va := binary.LittleEndian.Uint32(d[0:4]) - d = d[4:] - if va == 0 { - break + if va&0x80000000 > 0 { // is Ordinal + // TODO add dynimport ordinal support. + //ord := va&0x0000FFFF + } else { + fn, _ := getString(names, int(va-ds.VirtualAddress+2)) + all = append(all, fn+":"+dt.dll) } } } - names, _ := ds.Data() - var all []string - for _, dt := range ida { - dt.dll, _ = getString(names, int(dt.Name-ds.VirtualAddress)) - for _, va := range dt.rva { - fn, _ := getString(names, int(va-ds.VirtualAddress+2)) - all = append(all, fn+":"+dt.dll) - } - } return all, nil } diff --git a/libgo/go/ebnf/ebnf_test.go b/libgo/go/ebnf/ebnf_test.go index bbe530c278f..69ad5fed1cf 100644 --- a/libgo/go/ebnf/ebnf_test.go +++ b/libgo/go/ebnf/ebnf_test.go @@ -15,31 +15,31 @@ var fset = token.NewFileSet() var grammars = []string{ - `Program = . - `, - - `Program = foo . - foo = "foo" . - `, - - `Program = "a" | "b" "c" . - `, - - `Program = "a" ... "z" . - `, - - `Program = Song . - Song = { Note } . - Note = Do | (Re | Mi | Fa | So | La) | Ti . - Do = "c" . - Re = "d" . - Mi = "e" . - Fa = "f" . - So = "g" . - La = "a" . - Ti = ti . - ti = "b" . - `, +`Program = . +`, + +`Program = foo . +foo = "foo" . +`, + +`Program = "a" | "b" "c" . +`, + +`Program = "a" ... "z" . +`, + +`Program = Song . + Song = { Note } . + Note = Do | (Re | Mi | Fa | So | La) | Ti . + Do = "c" . + Re = "d" . + Mi = "e" . + Fa = "f" . + So = "g" . + La = "a" . + Ti = ti . + ti = "b" . +`, } diff --git a/libgo/go/encoding/binary/binary.go b/libgo/go/encoding/binary/binary.go index 77ff3a9f3ef..ee2f23dbba2 100644 --- a/libgo/go/encoding/binary/binary.go +++ b/libgo/go/encoding/binary/binary.go @@ -2,8 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This package implements translation between -// unsigned integer values and byte sequences. +// Package binary implements translation between +// unsigned integer values and byte sequences +// and the reading and writing of fixed-size values. package binary import ( diff --git a/libgo/go/encoding/line/line.go b/libgo/go/encoding/line/line.go index 92dddcb996d..f46ce1c83a0 100644 --- a/libgo/go/encoding/line/line.go +++ b/libgo/go/encoding/line/line.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This package implements a Reader which handles reading \r and \r\n -// deliminated lines. +// The line package implements a Reader that reads lines delimited by '\n' or ' \r\n'. package line import ( @@ -11,8 +10,7 @@ import ( "os" ) -// Reader reads lines from an io.Reader (which may use either '\n' or -// '\r\n'). +// Reader reads lines, delimited by '\n' or \r\n', from an io.Reader. type Reader struct { buf []byte consumed int @@ -20,11 +18,33 @@ type Reader struct { err os.Error } -func NewReader(in io.Reader, maxLineLength int) *Reader { +// NewReader returns a new Reader that will read successive +// lines from the input Reader. +func NewReader(input io.Reader, maxLineLength int) *Reader { return &Reader{ buf: make([]byte, 0, maxLineLength), consumed: 0, - in: in, + in: input, + } +} + +// Read reads from any buffered data past the last line read, or from the underlying +// io.Reader if the buffer is empty. +func (l *Reader) Read(p []byte) (n int, err os.Error) { + l.removeConsumedFromBuffer() + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.consumed += n + return + } + return l.in.Read(p) +} + +func (l *Reader) removeConsumedFromBuffer() { + if l.consumed > 0 { + n := copy(l.buf, l.buf[l.consumed:]) + l.buf = l.buf[:n] + l.consumed = 0 } } @@ -36,11 +56,7 @@ func NewReader(in io.Reader, maxLineLength int) *Reader { // the Reader and is only valid until the next call to ReadLine. ReadLine // either returns a non-nil line or it returns an error, never both. func (l *Reader) ReadLine() (line []byte, isPrefix bool, err os.Error) { - if l.consumed > 0 { - n := copy(l.buf, l.buf[l.consumed:]) - l.buf = l.buf[:n] - l.consumed = 0 - } + l.removeConsumedFromBuffer() if len(l.buf) == 0 && l.err != nil { err = l.err @@ -89,6 +105,9 @@ func (l *Reader) ReadLine() (line []byte, isPrefix bool, err os.Error) { l.buf = l.buf[:oldLen+n] if readErr != nil { l.err = readErr + if len(l.buf) == 0 { + return nil, false, readErr + } } } panic("unreachable") diff --git a/libgo/go/encoding/line/line_test.go b/libgo/go/encoding/line/line_test.go index 68d13b58616..ff3d51669b5 100644 --- a/libgo/go/encoding/line/line_test.go +++ b/libgo/go/encoding/line/line_test.go @@ -6,6 +6,8 @@ package line import ( "bytes" + "io" + "io/ioutil" "os" "testing" ) @@ -87,3 +89,45 @@ func TestLineTooLong(t *testing.T) { t.Errorf("bad result for third line: %x", line) } } + +func TestReadAfterLines(t *testing.T) { + line1 := "line1" + restData := "line2\nline 3\n" + inbuf := bytes.NewBuffer([]byte(line1 + "\n" + restData)) + outbuf := new(bytes.Buffer) + maxLineLength := len(line1) + len(restData)/2 + l := NewReader(inbuf, maxLineLength) + line, isPrefix, err := l.ReadLine() + if isPrefix || err != nil || string(line) != line1 { + t.Errorf("bad result for first line: isPrefix=%v err=%v line=%q", isPrefix, err, string(line)) + } + n, err := io.Copy(outbuf, l) + if int(n) != len(restData) || err != nil { + t.Errorf("bad result for Read: n=%d err=%v", n, err) + } + if outbuf.String() != restData { + t.Errorf("bad result for Read: got %q; expected %q", outbuf.String(), restData) + } +} + +func TestReadEmptyBuffer(t *testing.T) { + l := NewReader(bytes.NewBuffer(nil), 10) + line, isPrefix, err := l.ReadLine() + if err != os.EOF { + t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err) + } +} + +func TestLinesAfterRead(t *testing.T) { + l := NewReader(bytes.NewBuffer([]byte("foo")), 10) + _, err := ioutil.ReadAll(l) + if err != nil { + t.Error(err) + return + } + + line, isPrefix, err := l.ReadLine() + if err != os.EOF { + t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err) + } +} diff --git a/libgo/go/exec/exec.go b/libgo/go/exec/exec.go index ba9bd2472a4..80f6f3c7dd4 100644 --- a/libgo/go/exec/exec.go +++ b/libgo/go/exec/exec.go @@ -7,6 +7,7 @@ package exec import ( "os" + "strconv" ) // Arguments to Run. @@ -21,12 +22,22 @@ const ( // Stdin, Stdout, and Stderr are Files representing pipes // connected to the running command's standard input, output, and error, // or else nil, depending on the arguments to Run. -// Pid is the running command's operating system process ID. +// Process represents the underlying operating system process. type Cmd struct { - Stdin *os.File - Stdout *os.File - Stderr *os.File - Pid int + Stdin *os.File + Stdout *os.File + Stderr *os.File + Process *os.Process +} + +// PathError records the name of a binary that was not +// found on the current $PATH. +type PathError struct { + Name string +} + +func (e *PathError) String() string { + return "command " + strconv.Quote(e.Name) + " not found in $PATH" } // Given mode (DevNull, etc), return file for child @@ -77,24 +88,24 @@ func modeToFiles(mode, fd int) (*os.File, *os.File, os.Error) { // If a parameter is Pipe, then the corresponding field (Stdin, Stdout, Stderr) // of the returned Cmd is the other end of the pipe. // Otherwise the field in Cmd is nil. -func Run(name string, argv, envv []string, dir string, stdin, stdout, stderr int) (p *Cmd, err os.Error) { - p = new(Cmd) +func Run(name string, argv, envv []string, dir string, stdin, stdout, stderr int) (c *Cmd, err os.Error) { + c = new(Cmd) var fd [3]*os.File - if fd[0], p.Stdin, err = modeToFiles(stdin, 0); err != nil { + if fd[0], c.Stdin, err = modeToFiles(stdin, 0); err != nil { goto Error } - if fd[1], p.Stdout, err = modeToFiles(stdout, 1); err != nil { + if fd[1], c.Stdout, err = modeToFiles(stdout, 1); err != nil { goto Error } if stderr == MergeWithStdout { fd[2] = fd[1] - } else if fd[2], p.Stderr, err = modeToFiles(stderr, 2); err != nil { + } else if fd[2], c.Stderr, err = modeToFiles(stderr, 2); err != nil { goto Error } // Run command. - p.Pid, err = os.ForkExec(name, argv, envv, dir, fd[0:]) + c.Process, err = os.StartProcess(name, argv, envv, dir, fd[0:]) if err != nil { goto Error } @@ -107,7 +118,7 @@ func Run(name string, argv, envv []string, dir string, stdin, stdout, stderr int if fd[2] != os.Stderr && fd[2] != fd[1] { fd[2].Close() } - return p, nil + return c, nil Error: if fd[0] != os.Stdin && fd[0] != nil { @@ -119,63 +130,67 @@ Error: if fd[2] != os.Stderr && fd[2] != nil && fd[2] != fd[1] { fd[2].Close() } - if p.Stdin != nil { - p.Stdin.Close() + if c.Stdin != nil { + c.Stdin.Close() + } + if c.Stdout != nil { + c.Stdout.Close() } - if p.Stdout != nil { - p.Stdout.Close() + if c.Stderr != nil { + c.Stderr.Close() } - if p.Stderr != nil { - p.Stderr.Close() + if c.Process != nil { + c.Process.Release() } return nil, err } -// Wait waits for the running command p, -// returning the Waitmsg returned by os.Wait and an error. -// The options are passed through to os.Wait. -// Setting options to 0 waits for p to exit; +// Wait waits for the running command c, +// returning the Waitmsg returned when the process exits. +// The options are passed to the process's Wait method. +// Setting options to 0 waits for c to exit; // other options cause Wait to return for other // process events; see package os for details. -func (p *Cmd) Wait(options int) (*os.Waitmsg, os.Error) { - if p.Pid <= 0 { +func (c *Cmd) Wait(options int) (*os.Waitmsg, os.Error) { + if c.Process == nil { return nil, os.ErrorString("exec: invalid use of Cmd.Wait") } - w, err := os.Wait(p.Pid, options) + w, err := c.Process.Wait(options) if w != nil && (w.Exited() || w.Signaled()) { - p.Pid = -1 + c.Process.Release() + c.Process = nil } return w, err } -// Close waits for the running command p to exit, +// Close waits for the running command c to exit, // if it hasn't already, and then closes the non-nil file descriptors -// p.Stdin, p.Stdout, and p.Stderr. -func (p *Cmd) Close() os.Error { - if p.Pid > 0 { +// c.Stdin, c.Stdout, and c.Stderr. +func (c *Cmd) Close() os.Error { + if c.Process != nil { // Loop on interrupt, but // ignore other errors -- maybe // caller has already waited for pid. - _, err := p.Wait(0) + _, err := c.Wait(0) for err == os.EINTR { - _, err = p.Wait(0) + _, err = c.Wait(0) } } // Close the FDs that are still open. var err os.Error - if p.Stdin != nil && p.Stdin.Fd() >= 0 { - if err1 := p.Stdin.Close(); err1 != nil { + if c.Stdin != nil && c.Stdin.Fd() >= 0 { + if err1 := c.Stdin.Close(); err1 != nil { err = err1 } } - if p.Stdout != nil && p.Stdout.Fd() >= 0 { - if err1 := p.Stdout.Close(); err1 != nil && err != nil { + if c.Stdout != nil && c.Stdout.Fd() >= 0 { + if err1 := c.Stdout.Close(); err1 != nil && err != nil { err = err1 } } - if p.Stderr != nil && p.Stderr != p.Stdout && p.Stderr.Fd() >= 0 { - if err1 := p.Stderr.Close(); err1 != nil && err != nil { + if c.Stderr != nil && c.Stderr != c.Stdout && c.Stderr.Fd() >= 0 { + if err1 := c.Stderr.Close(); err1 != nil && err != nil { err = err1 } } diff --git a/libgo/go/exec/lp_test.go b/libgo/go/exec/lp_test.go new file mode 100644 index 00000000000..54081771ecc --- /dev/null +++ b/libgo/go/exec/lp_test.go @@ -0,0 +1,33 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package exec + +import ( + "testing" +) + +var nonExistentPaths = []string{ + "some-non-existent-path", + "non-existent-path/slashed", +} + +func TestLookPathNotFound(t *testing.T) { + for _, name := range nonExistentPaths { + path, err := LookPath(name) + if err == nil { + t.Fatalf("LookPath found %q in $PATH", name) + } + if path != "" { + t.Fatalf("LookPath path == %q when err != nil", path) + } + perr, ok := err.(*PathError) + if !ok { + t.Fatal("LookPath error is not a PathError") + } + if perr.Name != name { + t.Fatalf("want PathError name %q, got %q", name, perr.Name) + } + } +} diff --git a/libgo/go/exec/lp_unix.go b/libgo/go/exec/lp_unix.go index 292e24fccdd..44f84347b99 100644 --- a/libgo/go/exec/lp_unix.go +++ b/libgo/go/exec/lp_unix.go @@ -29,7 +29,7 @@ func LookPath(file string) (string, os.Error) { if canExec(file) { return file, nil } - return "", &os.PathError{"lookpath", file, os.ENOENT} + return "", &PathError{file} } pathenv := os.Getenv("PATH") for _, dir := range strings.Split(pathenv, ":", -1) { @@ -41,5 +41,5 @@ func LookPath(file string) (string, os.Error) { return dir + "/" + file, nil } } - return "", &os.PathError{"lookpath", file, os.ENOENT} + return "", &PathError{file} } diff --git a/libgo/go/exec/lp_windows.go b/libgo/go/exec/lp_windows.go index 7b56afa8566..d357575fdbe 100644 --- a/libgo/go/exec/lp_windows.go +++ b/libgo/go/exec/lp_windows.go @@ -49,7 +49,7 @@ func LookPath(file string) (string, os.Error) { if f, ok := canExec(file, exts); ok { return f, nil } - return ``, &os.PathError{"lookpath", file, os.ENOENT} + return ``, &PathError{file} } if pathenv := os.Getenv(`PATH`); pathenv == `` { if f, ok := canExec(`.\`+file, exts); ok { @@ -62,5 +62,5 @@ func LookPath(file string) (string, os.Error) { } } } - return ``, &os.PathError{"lookpath", file, os.ENOENT} + return ``, &PathError{file} } diff --git a/libgo/go/exp/draw/x11/conn.go b/libgo/go/exp/draw/x11/conn.go index da2181536fb..e28fb217065 100644 --- a/libgo/go/exp/draw/x11/conn.go +++ b/libgo/go/exp/draw/x11/conn.go @@ -122,10 +122,13 @@ func (c *conn) writeSocket() { func (c *conn) Screen() draw.Image { return c.img } func (c *conn) FlushImage() { - // We do the send (the <- operator) in an expression context, rather than in - // a statement context, so that it does not block, and fails if the buffered - // channel is full (in which case there already is a flush request pending). - _ = c.flush <- false + select { + case c.flush <- false: + // Flush notification sent. + default: + // Could not send. + // Flush notification must be pending already. + } } func (c *conn) Close() os.Error { diff --git a/libgo/go/exp/eval/stmt.go b/libgo/go/exp/eval/stmt.go index 77ff066d09c..5c5d4338a1d 100644 --- a/libgo/go/exp/eval/stmt.go +++ b/libgo/go/exp/eval/stmt.go @@ -908,7 +908,7 @@ func (a *stmtCompiler) compileBranchStmt(s *ast.BranchStmt) { return default: - log.Panic("Unexpected branch token %v", s.Tok) + log.Panicf("Unexpected branch token %v", s.Tok) } a.flow.put1(false, pc) diff --git a/libgo/go/exp/eval/stmt_test.go b/libgo/go/exp/eval/stmt_test.go index a14a288d936..4a883ef5ee7 100644 --- a/libgo/go/exp/eval/stmt_test.go +++ b/libgo/go/exp/eval/stmt_test.go @@ -217,7 +217,7 @@ var stmtTests = []test{ Val2("if false { i = 2 } else { i = 3 }; i2 = 4", "i", 3, "i2", 4), Val2("if i == i2 { i = 2 } else { i = 3 }; i2 = 4", "i", 3, "i2", 4), // Omit optional parts - Val2("if { i = 2 } else { i = 3 }; i2 = 4", "i", 2, "i2", 4), + Val2("if true { i = 2 } else { i = 3 }; i2 = 4", "i", 2, "i2", 4), Val2("if true { i = 2 }; i2 = 4", "i", 2, "i2", 4), Val2("if false { i = 2 }; i2 = 4", "i", 1, "i2", 4), // Init @@ -243,11 +243,11 @@ var stmtTests = []test{ CErr("fn1 := func() int { if true { return 1 } }", "return"), CErr("fn1 := func() int { if true { } }", "return"), Run("fn1 := func() int { if true { }; return 1 }"), - CErr("fn1 := func() int { if { } }", "return"), - CErr("fn1 := func() int { if { } else { return 2 } }", "return"), - Run("fn1 := func() int { if { return 1 } }"), - Run("fn1 := func() int { if { return 1 } else { } }"), - Run("fn1 := func() int { if { return 1 } else { } }"), + CErr("fn1 := func() int { if true { } }", "return"), + CErr("fn1 := func() int { if true { } else { return 2 } }", "return"), + Run("fn1 := func() int { if true { return 1 }; return 0 }"), + Run("fn1 := func() int { if true { return 1 } else { }; return 0 }"), + Run("fn1 := func() int { if true { return 1 } else { }; return 0 }"), // Switch Val1("switch { case false: i += 2; case true: i += 4; default: i += 8 }", "i", 1+4), diff --git a/libgo/go/exp/wingui/gui.go b/libgo/go/exp/wingui/gui.go new file mode 100644 index 00000000000..cf392934c5b --- /dev/null +++ b/libgo/go/exp/wingui/gui.go @@ -0,0 +1,153 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "syscall" + "os" + "unsafe" +) + +// some help functions + +func abortf(format string, a ...interface{}) { + fmt.Fprintf(os.Stdout, format, a...) + os.Exit(1) +} + +func abortErrNo(funcname string, err int) { + abortf("%s failed: %d %s\n", funcname, err, syscall.Errstr(err)) +} + +// global vars + +var ( + mh uint32 + bh uint32 +) + +// WinProc called by windows to notify us of all windows events we might be interested in. +func WndProc(hwnd, msg uint32, wparam, lparam int32) uintptr { + var rc int32 + switch msg { + case WM_CREATE: + var e int + // CreateWindowEx + bh, e = CreateWindowEx( + 0, + syscall.StringToUTF16Ptr("button"), + syscall.StringToUTF16Ptr("Quit"), + WS_CHILD|WS_VISIBLE|BS_DEFPUSHBUTTON, + 75, 70, 140, 25, + hwnd, 1, mh, 0) + if e != 0 { + abortErrNo("CreateWindowEx", e) + } + fmt.Printf("button handle is %x\n", bh) + rc = DefWindowProc(hwnd, msg, wparam, lparam) + case WM_COMMAND: + switch uint32(lparam) { + case bh: + e := PostMessage(hwnd, WM_CLOSE, 0, 0) + if e != 0 { + abortErrNo("PostMessage", e) + } + default: + rc = DefWindowProc(hwnd, msg, wparam, lparam) + } + case WM_CLOSE: + DestroyWindow(hwnd) + case WM_DESTROY: + PostQuitMessage(0) + default: + rc = DefWindowProc(hwnd, msg, wparam, lparam) + } + //fmt.Printf("WndProc(0x%08x, %d, 0x%08x, 0x%08x) (%d)\n", hwnd, msg, wparam, lparam, rc) + return uintptr(rc) +} + +func rungui() int { + var e int + + // GetModuleHandle + mh, e = GetModuleHandle(nil) + if e != 0 { + abortErrNo("GetModuleHandle", e) + } + + // Get icon we're going to use. + myicon, e := LoadIcon(0, IDI_APPLICATION) + if e != 0 { + abortErrNo("LoadIcon", e) + } + + // Get cursor we're going to use. + mycursor, e := LoadCursor(0, IDC_ARROW) + if e != 0 { + abortErrNo("LoadCursor", e) + } + + // Create callback + wproc := syscall.NewCallback(WndProc) + + // RegisterClassEx + wcname := syscall.StringToUTF16Ptr("myWindowClass") + var wc Wndclassex + wc.Size = uint32(unsafe.Sizeof(wc)) + wc.WndProc = wproc + wc.Instance = mh + wc.Icon = myicon + wc.Cursor = mycursor + wc.Background = COLOR_BTNFACE + 1 + wc.MenuName = nil + wc.ClassName = wcname + wc.IconSm = myicon + if _, e := RegisterClassEx(&wc); e != 0 { + abortErrNo("RegisterClassEx", e) + } + + // CreateWindowEx + wh, e := CreateWindowEx( + WS_EX_CLIENTEDGE, + wcname, + syscall.StringToUTF16Ptr("My window"), + WS_OVERLAPPEDWINDOW, + CW_USEDEFAULT, CW_USEDEFAULT, 300, 200, + 0, 0, mh, 0) + if e != 0 { + abortErrNo("CreateWindowEx", e) + } + fmt.Printf("main window handle is %x\n", wh) + + // ShowWindow + ShowWindow(wh, SW_SHOWDEFAULT) + + // UpdateWindow + if e := UpdateWindow(wh); e != 0 { + abortErrNo("UpdateWindow", e) + } + + // Process all windows messages until WM_QUIT. + var m Msg + for { + r, e := GetMessage(&m, 0, 0, 0) + if e != 0 { + abortErrNo("GetMessage", e) + } + if r == 0 { + // WM_QUIT received -> get out + break + } + TranslateMessage(&m) + DispatchMessage(&m) + } + return int(m.Wparam) +} + +func main() { + rc := rungui() + os.Exit(rc) +} diff --git a/libgo/go/exp/wingui/winapi.go b/libgo/go/exp/wingui/winapi.go new file mode 100644 index 00000000000..c96f452999f --- /dev/null +++ b/libgo/go/exp/wingui/winapi.go @@ -0,0 +1,148 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "syscall" + "unsafe" +) + +func loadDll(fname string) uint32 { + h, e := syscall.LoadLibrary(fname) + if e != 0 { + abortf("LoadLibrary(%s) failed with err=%d.\n", fname, e) + } + return h +} + +func getSysProcAddr(m uint32, pname string) uintptr { + p, e := syscall.GetProcAddress(m, pname) + if e != 0 { + abortf("GetProcAddress(%s) failed with err=%d.\n", pname, e) + } + return uintptr(p) +} + +type Wndclassex struct { + Size uint32 + Style uint32 + WndProc uintptr + ClsExtra int32 + WndExtra int32 + Instance uint32 + Icon uint32 + Cursor uint32 + Background uint32 + MenuName *uint16 + ClassName *uint16 + IconSm uint32 +} + +type Point struct { + X int32 + Y int32 +} + +type Msg struct { + Hwnd uint32 + Message uint32 + Wparam int32 + Lparam int32 + Time uint32 + Pt Point +} + +const ( + // Window styles + WS_OVERLAPPED = 0 + WS_POPUP = 0x80000000 + WS_CHILD = 0x40000000 + WS_MINIMIZE = 0x20000000 + WS_VISIBLE = 0x10000000 + WS_DISABLED = 0x8000000 + WS_CLIPSIBLINGS = 0x4000000 + WS_CLIPCHILDREN = 0x2000000 + WS_MAXIMIZE = 0x1000000 + WS_CAPTION = WS_BORDER | WS_DLGFRAME + WS_BORDER = 0x800000 + WS_DLGFRAME = 0x400000 + WS_VSCROLL = 0x200000 + WS_HSCROLL = 0x100000 + WS_SYSMENU = 0x80000 + WS_THICKFRAME = 0x40000 + WS_GROUP = 0x20000 + WS_TABSTOP = 0x10000 + WS_MINIMIZEBOX = 0x20000 + WS_MAXIMIZEBOX = 0x10000 + WS_TILED = WS_OVERLAPPED + WS_ICONIC = WS_MINIMIZE + WS_SIZEBOX = WS_THICKFRAME + // Common Window Styles + WS_OVERLAPPEDWINDOW = WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_THICKFRAME | WS_MINIMIZEBOX | WS_MAXIMIZEBOX + WS_TILEDWINDOW = WS_OVERLAPPEDWINDOW + WS_POPUPWINDOW = WS_POPUP | WS_BORDER | WS_SYSMENU + WS_CHILDWINDOW = WS_CHILD + + WS_EX_CLIENTEDGE = 0x200 + + // Some windows messages + WM_CREATE = 1 + WM_DESTROY = 2 + WM_CLOSE = 16 + WM_COMMAND = 273 + + // Some button control styles + BS_DEFPUSHBUTTON = 1 + + // Some colour constants + COLOR_WINDOW = 5 + COLOR_BTNFACE = 15 + + // Default window position + CW_USEDEFAULT = 0x80000000 - 0x100000000 + + // Show window default style + SW_SHOWDEFAULT = 10 +) + +var ( + // Some globaly known cusrors + IDC_ARROW = MakeIntResource(32512) + IDC_IBEAM = MakeIntResource(32513) + IDC_WAIT = MakeIntResource(32514) + IDC_CROSS = MakeIntResource(32515) + + // Some globaly known icons + IDI_APPLICATION = MakeIntResource(32512) + IDI_HAND = MakeIntResource(32513) + IDI_QUESTION = MakeIntResource(32514) + IDI_EXCLAMATION = MakeIntResource(32515) + IDI_ASTERISK = MakeIntResource(32516) + IDI_WINLOGO = MakeIntResource(32517) + IDI_WARNING = IDI_EXCLAMATION + IDI_ERROR = IDI_HAND + IDI_INFORMATION = IDI_ASTERISK +) + +//sys GetModuleHandle(modname *uint16) (handle uint32, errno int) = GetModuleHandleW +//sys RegisterClassEx(wndclass *Wndclassex) (atom uint16, errno int) = user32.RegisterClassExW +//sys CreateWindowEx(exstyle uint32, classname *uint16, windowname *uint16, style uint32, x int32, y int32, width int32, height int32, wndparent uint32, menu uint32, instance uint32, param uintptr) (hwnd uint32, errno int) = user32.CreateWindowExW +//sys DefWindowProc(hwnd uint32, msg uint32, wparam int32, lparam int32) (lresult int32) = user32.DefWindowProcW +//sys DestroyWindow(hwnd uint32) (errno int) = user32.DestroyWindow +//sys PostQuitMessage(exitcode int32) = user32.PostQuitMessage +//sys ShowWindow(hwnd uint32, cmdshow int32) (wasvisible bool) = user32.ShowWindow +//sys UpdateWindow(hwnd uint32) (errno int) = user32.UpdateWindow +//sys GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, errno int) [failretval==-1] = user32.GetMessageW +//sys TranslateMessage(msg *Msg) (done bool) = user32.TranslateMessage +//sys DispatchMessage(msg *Msg) (ret int32) = user32.DispatchMessageW +//sys LoadIcon(instance uint32, iconname *uint16) (icon uint32, errno int) = user32.LoadIconW +//sys LoadCursor(instance uint32, cursorname *uint16) (cursor uint32, errno int) = user32.LoadCursorW +//sys SetCursor(cursor uint32) (precursor uint32, errno int) = user32.SetCursor +//sys SendMessage(hwnd uint32, msg uint32, wparam int32, lparam int32) (lresult int32) = user32.SendMessageW +//sys PostMessage(hwnd uint32, msg uint32, wparam int32, lparam int32) (errno int) = user32.PostMessageW + +func MakeIntResource(id uint16) *uint16 { + return (*uint16)(unsafe.Pointer(uintptr(id))) +} diff --git a/libgo/go/exp/wingui/zwinapi.go b/libgo/go/exp/wingui/zwinapi.go new file mode 100644 index 00000000000..60aaac6cf16 --- /dev/null +++ b/libgo/go/exp/wingui/zwinapi.go @@ -0,0 +1,211 @@ +// mksyscall_windows.sh winapi.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package main + +import "unsafe" +import "syscall" + +var ( + modkernel32 = loadDll("kernel32.dll") + moduser32 = loadDll("user32.dll") + + procGetModuleHandleW = getSysProcAddr(modkernel32, "GetModuleHandleW") + procRegisterClassExW = getSysProcAddr(moduser32, "RegisterClassExW") + procCreateWindowExW = getSysProcAddr(moduser32, "CreateWindowExW") + procDefWindowProcW = getSysProcAddr(moduser32, "DefWindowProcW") + procDestroyWindow = getSysProcAddr(moduser32, "DestroyWindow") + procPostQuitMessage = getSysProcAddr(moduser32, "PostQuitMessage") + procShowWindow = getSysProcAddr(moduser32, "ShowWindow") + procUpdateWindow = getSysProcAddr(moduser32, "UpdateWindow") + procGetMessageW = getSysProcAddr(moduser32, "GetMessageW") + procTranslateMessage = getSysProcAddr(moduser32, "TranslateMessage") + procDispatchMessageW = getSysProcAddr(moduser32, "DispatchMessageW") + procLoadIconW = getSysProcAddr(moduser32, "LoadIconW") + procLoadCursorW = getSysProcAddr(moduser32, "LoadCursorW") + procSetCursor = getSysProcAddr(moduser32, "SetCursor") + procSendMessageW = getSysProcAddr(moduser32, "SendMessageW") + procPostMessageW = getSysProcAddr(moduser32, "PostMessageW") +) + +func GetModuleHandle(modname *uint16) (handle uint32, errno int) { + r0, _, e1 := syscall.Syscall(procGetModuleHandleW, 1, uintptr(unsafe.Pointer(modname)), 0, 0) + handle = uint32(r0) + if handle == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func RegisterClassEx(wndclass *Wndclassex) (atom uint16, errno int) { + r0, _, e1 := syscall.Syscall(procRegisterClassExW, 1, uintptr(unsafe.Pointer(wndclass)), 0, 0) + atom = uint16(r0) + if atom == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func CreateWindowEx(exstyle uint32, classname *uint16, windowname *uint16, style uint32, x int32, y int32, width int32, height int32, wndparent uint32, menu uint32, instance uint32, param uintptr) (hwnd uint32, errno int) { + r0, _, e1 := syscall.Syscall12(procCreateWindowExW, 12, uintptr(exstyle), uintptr(unsafe.Pointer(classname)), uintptr(unsafe.Pointer(windowname)), uintptr(style), uintptr(x), uintptr(y), uintptr(width), uintptr(height), uintptr(wndparent), uintptr(menu), uintptr(instance), uintptr(param)) + hwnd = uint32(r0) + if hwnd == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func DefWindowProc(hwnd uint32, msg uint32, wparam int32, lparam int32) (lresult int32) { + r0, _, _ := syscall.Syscall6(procDefWindowProcW, 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + lresult = int32(r0) + return +} + +func DestroyWindow(hwnd uint32) (errno int) { + r1, _, e1 := syscall.Syscall(procDestroyWindow, 1, uintptr(hwnd), 0, 0) + if int(r1) == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func PostQuitMessage(exitcode int32) { + syscall.Syscall(procPostQuitMessage, 1, uintptr(exitcode), 0, 0) + return +} + +func ShowWindow(hwnd uint32, cmdshow int32) (wasvisible bool) { + r0, _, _ := syscall.Syscall(procShowWindow, 2, uintptr(hwnd), uintptr(cmdshow), 0) + wasvisible = bool(r0 != 0) + return +} + +func UpdateWindow(hwnd uint32) (errno int) { + r1, _, e1 := syscall.Syscall(procUpdateWindow, 1, uintptr(hwnd), 0, 0) + if int(r1) == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, errno int) { + r0, _, e1 := syscall.Syscall6(procGetMessageW, 4, uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax), 0, 0) + ret = int32(r0) + if ret == -1 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func TranslateMessage(msg *Msg) (done bool) { + r0, _, _ := syscall.Syscall(procTranslateMessage, 1, uintptr(unsafe.Pointer(msg)), 0, 0) + done = bool(r0 != 0) + return +} + +func DispatchMessage(msg *Msg) (ret int32) { + r0, _, _ := syscall.Syscall(procDispatchMessageW, 1, uintptr(unsafe.Pointer(msg)), 0, 0) + ret = int32(r0) + return +} + +func LoadIcon(instance uint32, iconname *uint16) (icon uint32, errno int) { + r0, _, e1 := syscall.Syscall(procLoadIconW, 2, uintptr(instance), uintptr(unsafe.Pointer(iconname)), 0) + icon = uint32(r0) + if icon == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func LoadCursor(instance uint32, cursorname *uint16) (cursor uint32, errno int) { + r0, _, e1 := syscall.Syscall(procLoadCursorW, 2, uintptr(instance), uintptr(unsafe.Pointer(cursorname)), 0) + cursor = uint32(r0) + if cursor == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func SetCursor(cursor uint32) (precursor uint32, errno int) { + r0, _, e1 := syscall.Syscall(procSetCursor, 1, uintptr(cursor), 0, 0) + precursor = uint32(r0) + if precursor == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} + +func SendMessage(hwnd uint32, msg uint32, wparam int32, lparam int32) (lresult int32) { + r0, _, _ := syscall.Syscall6(procSendMessageW, 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + lresult = int32(r0) + return +} + +func PostMessage(hwnd uint32, msg uint32, wparam int32, lparam int32) (errno int) { + r1, _, e1 := syscall.Syscall6(procPostMessageW, 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + if int(r1) == 0 { + if e1 != 0 { + errno = int(e1) + } else { + errno = syscall.EINVAL + } + } else { + errno = 0 + } + return +} diff --git a/libgo/go/flag/flag.go b/libgo/go/flag/flag.go index 143a1061150..be972057ed7 100644 --- a/libgo/go/flag/flag.go +++ b/libgo/go/flag/flag.go @@ -96,7 +96,7 @@ func newIntValue(val int, p *int) *intValue { } func (i *intValue) Set(s string) bool { - v, err := strconv.Atoi(s) + v, err := strconv.Btoi64(s, 0) *i = intValue(v) return err == nil } @@ -112,7 +112,7 @@ func newInt64Value(val int64, p *int64) *int64Value { } func (i *int64Value) Set(s string) bool { - v, err := strconv.Atoi64(s) + v, err := strconv.Btoi64(s, 0) *i = int64Value(v) return err == nil } @@ -128,7 +128,7 @@ func newUintValue(val uint, p *uint) *uintValue { } func (i *uintValue) Set(s string) bool { - v, err := strconv.Atoui(s) + v, err := strconv.Btoui64(s, 0) *i = uintValue(v) return err == nil } @@ -144,7 +144,7 @@ func newUint64Value(val uint64, p *uint64) *uint64Value { } func (i *uint64Value) Set(s string) bool { - v, err := strconv.Atoui64(s) + v, err := strconv.Btoui64(s, 0) *i = uint64Value(v) return err == nil } diff --git a/libgo/go/flag/flag_test.go b/libgo/go/flag/flag_test.go index b91a8b56795..30a21e61ae2 100644 --- a/libgo/go/flag/flag_test.go +++ b/libgo/go/flag/flag_test.go @@ -106,7 +106,7 @@ func TestParse(t *testing.T) { "-bool", "-bool2=true", "--int", "22", - "--int64", "23", + "--int64", "0x23", "-uint", "24", "--uint64", "25", "-string", "hello", @@ -125,8 +125,8 @@ func TestParse(t *testing.T) { if *intFlag != 22 { t.Error("int flag should be 22, is ", *intFlag) } - if *int64Flag != 23 { - t.Error("int64 flag should be 23, is ", *int64Flag) + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) } if *uintFlag != 24 { t.Error("uint flag should be 24, is ", *uintFlag) diff --git a/libgo/go/fmt/doc.go b/libgo/go/fmt/doc.go index 191bf68b13b..77ee62bb1dd 100644 --- a/libgo/go/fmt/doc.go +++ b/libgo/go/fmt/doc.go @@ -16,6 +16,7 @@ when printing structs, the plus flag (%+v) adds field names %#v a Go-syntax representation of the value %T a Go-syntax representation of the type of the value + %% a literal percent sign; consumes no value Boolean: %t the word true or false @@ -26,8 +27,10 @@ %o base 8 %x base 16, with lower-case letters for a-f %X base 16, with upper-case letters for A-F - %U unicode format: U+1234; same as "U+%x" with 4 digits default + %U Unicode format: U+1234; same as "U+%x" with 4 digits default Floating-point and complex constituents: + %b decimalless scientific notation with exponent a power + of two, in the manner of strconv.Ftoa32, e.g. -123456p-78 %e scientific notation, e.g. -1234.456e+78 %E scientific notation, e.g. -1234.456E+78 %f decimal point but no exponent, e.g. 123.456 @@ -44,14 +47,19 @@ There is no 'u' flag. Integers are printed unsigned if they have unsigned type. Similarly, there is no need to specify the size of the operand (int8, int64). - For numeric values, the width and precision flags control - formatting; width sets the width of the field, precision the - number of places after the decimal, if appropriate. The - format %6.2f prints 123.45. The width of a field is the number - of Unicode code points in the string. This differs from C's printf where - the field width is the number of bytes. Either or both of the - flags may be replaced with the character '*', causing their values - to be obtained from the next operand, which must be of type int. + The width and precision control formatting and are in units of Unicode + code points. (This differs from C's printf where the units are numbers + of bytes.) Either or both of the flags may be replaced with the + character '*', causing their values to be obtained from the next + operand, which must be of type int. + + For numeric values, width sets the width of the field and precision + sets the number of places after the decimal, if appropriate. For + example, the format %6.2f prints 123.45. + + For strings, width is the minimum number of characters to output, + padding with spaces if necessary, and precision is the maximum + number of characters to output, truncating if necessary. Other flags: + always print a sign for numeric values @@ -112,7 +120,7 @@ An analogous set of functions scans formatted text to yield values. Scan, Scanf and Scanln read from os.Stdin; Fscan, Fscanf and Fscanln read from a specified os.Reader; Sscan, - Sscanf and Sscanln read from an argument string. Sscanln, + Sscanf and Sscanln read from an argument string. Scanln, Fscanln and Sscanln stop scanning at a newline and require that the items be followed by one; Sscanf, Fscanf and Sscanf require newlines in the input to match newlines in the format; the other @@ -131,6 +139,10 @@ %e %E %f %F %g %g are all equivalent and scan any floating point or complex value %s and %v on strings scan a space-delimited token + The familiar base-setting prefixes 0 (octal) and 0x + (hexadecimal) are accepted when scanning integers without a + format or with the %v verb. + Width is interpreted in the input text (%5s means at most five runes of input will be read to scan a string) but there is no syntax for scanning with a precision (no %5.2f, just @@ -152,13 +164,15 @@ All arguments to be scanned must be either pointers to basic types or implementations of the Scanner interface. - Note: Fscan etc. can read one character (rune) past the - input they return, which means that a loop calling a scan - routine may skip some of the input. This is usually a - problem only when there is no space between input values. - However, if the reader provided to Fscan implements UnreadRune, + Note: Fscan etc. can read one character (rune) past the input + they return, which means that a loop calling a scan routine + may skip some of the input. This is usually a problem only + when there is no space between input values. If the reader + provided to Fscan implements ReadRune, that method will be used + to read characters. If the reader also implements UnreadRune, that method will be used to save the character and successive - calls will not lose data. To attach an UnreadRune method - to a reader without that capability, use bufio.NewReader. + calls will not lose data. To attach ReadRune and UnreadRune + methods to a reader without that capability, use + bufio.NewReader. */ package fmt diff --git a/libgo/go/fmt/fmt_test.go b/libgo/go/fmt/fmt_test.go index 3f085b72245..c8aa6090bbf 100644 --- a/libgo/go/fmt/fmt_test.go +++ b/libgo/go/fmt/fmt_test.go @@ -311,9 +311,9 @@ var fmttests = []struct { // go syntax {"%#v", A{1, 2, "a", []int{1, 2}}, `fmt_test.A{i:1, j:0x2, s:"a", x:[]int{1, 2}}`}, - {"%#v", &b, "(*uint8)(PTR)"}, - {"%#v", TestFmtInterface, "(func(*testing.T))(PTR)"}, - {"%#v", make(chan int), "(chan int)(PTR)"}, + {"%#v", &b, "(*uint8)(0xPTR)"}, + {"%#v", TestFmtInterface, "(func(*testing.T))(0xPTR)"}, + {"%#v", make(chan int), "(chan int)(0xPTR)"}, {"%#v", uint64(1<<64 - 1), "0xffffffffffffffff"}, {"%#v", 1000000000, "1000000000"}, {"%#v", map[string]int{"a": 1, "b": 2}, `map[string] int{"a":1, "b":2}`}, @@ -365,14 +365,15 @@ var fmttests = []struct { {"%6T", &intVal, " *int"}, // %p - {"p0=%p", new(int), "p0=PTR"}, + {"p0=%p", new(int), "p0=0xPTR"}, {"p1=%s", &pValue, "p1=String(p)"}, // String method... - {"p2=%p", &pValue, "p2=PTR"}, // ... not called with %p + {"p2=%p", &pValue, "p2=0xPTR"}, // ... not called with %p + {"p4=%#p", new(int), "p4=PTR"}, // %p on non-pointers - {"%p", make(chan int), "PTR"}, - {"%p", make(map[int]int), "PTR"}, - {"%p", make([]int, 1), "PTR"}, + {"%p", make(chan int), "0xPTR"}, + {"%p", make(map[int]int), "0xPTR"}, + {"%p", make([]int, 1), "0xPTR"}, {"%p", 27, "%!p(int=27)"}, // not a pointer at all // erroneous things @@ -388,8 +389,8 @@ var fmttests = []struct { func TestSprintf(t *testing.T) { for _, tt := range fmttests { s := Sprintf(tt.fmt, tt.val) - if i := strings.Index(s, "0x"); i >= 0 && strings.Contains(tt.out, "PTR") { - j := i + 2 + if i := strings.Index(tt.out, "PTR"); i >= 0 { + j := i for ; j < len(s); j++ { c := s[j] if (c < '0' || c > '9') && (c < 'a' || c > 'f') && (c < 'A' || c > 'F') { diff --git a/libgo/go/fmt/print.go b/libgo/go/fmt/print.go index 96029a8789f..4e14fdaa4a7 100644 --- a/libgo/go/fmt/print.go +++ b/libgo/go/fmt/print.go @@ -74,15 +74,42 @@ type pp struct { fmt fmt } -// A leaky bucket of reusable pp structures. -var ppFree = make(chan *pp, 100) +// A cache holds a set of reusable objects. +// The buffered channel holds the currently available objects. +// If more are needed, the cache creates them by calling new. +type cache struct { + saved chan interface{} + new func() interface{} +} + +func (c *cache) put(x interface{}) { + select { + case c.saved <- x: + // saved in cache + default: + // discard + } +} -// Allocate a new pp struct. Probably can grab the previous one from ppFree. -func newPrinter() *pp { - p, ok := <-ppFree - if !ok { - p = new(pp) +func (c *cache) get() interface{} { + select { + case x := <-c.saved: + return x // reused from cache + default: + return c.new() } + panic("not reached") +} + +func newCache(f func() interface{}) *cache { + return &cache{make(chan interface{}, 100), f} +} + +var ppFree = newCache(func() interface{} { return new(pp) }) + +// Allocate a new pp struct or grab a cached one. +func newPrinter() *pp { + p := ppFree.get().(*pp) p.fmt.init(&p.buf) return p } @@ -94,7 +121,7 @@ func (p *pp) free() { return } p.buf.Reset() - _ = ppFree <- p + ppFree.put(p) } func (p *pp) Width() (wid int, ok bool) { return p.fmt.wid, p.fmt.widPresent } @@ -321,11 +348,11 @@ func (p *pp) fmtInt64(v int64, verb int, value interface{}) { } } -// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x by -// temporarily turning on the sharp flag. -func (p *pp) fmt0x64(v uint64) { +// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or +// not, as requested, by temporarily setting the sharp flag. +func (p *pp) fmt0x64(v uint64, leading0x bool) { sharp := p.fmt.sharp - p.fmt.sharp = true // turn on 0x + p.fmt.sharp = leading0x p.fmt.integer(int64(v), 16, unsigned, ldigits) p.fmt.sharp = sharp } @@ -357,7 +384,7 @@ func (p *pp) fmtUint64(v uint64, verb int, goSyntax bool, value interface{}) { p.fmt.integer(int64(v), 10, unsigned, ldigits) case 'v': if goSyntax { - p.fmt0x64(v) + p.fmt0x64(v, true) } else { p.fmt.integer(int64(v), 10, unsigned, ldigits) } @@ -507,11 +534,11 @@ func (p *pp) fmtPointer(field interface{}, value reflect.Value, verb int, goSynt if u == 0 { p.buf.Write(nilBytes) } else { - p.fmt0x64(uint64(v.Get())) + p.fmt0x64(uint64(v.Get()), true) } p.add(')') } else { - p.fmt0x64(uint64(u)) + p.fmt0x64(uint64(u), !p.fmt.sharp) } } @@ -774,7 +801,7 @@ BigSwitch: if v == 0 { p.buf.Write(nilBytes) } else { - p.fmt0x64(uint64(v)) + p.fmt0x64(uint64(v), true) } p.buf.WriteByte(')') break @@ -783,7 +810,7 @@ BigSwitch: p.buf.Write(nilAngleBytes) break } - p.fmt0x64(uint64(v)) + p.fmt0x64(uint64(v), true) case uintptrGetter: p.fmtPointer(field, value, verb, goSyntax) default: diff --git a/libgo/go/fmt/scan.go b/libgo/go/fmt/scan.go index ebbb17155e4..c0f2bacb69b 100644 --- a/libgo/go/fmt/scan.go +++ b/libgo/go/fmt/scan.go @@ -7,6 +7,7 @@ package fmt import ( "bytes" "io" + "math" "os" "reflect" "strconv" @@ -15,18 +16,11 @@ import ( "utf8" ) -// readRuner is the interface to something that can read runes. If -// the object provided to Scan does not satisfy this interface, the -// object will be wrapped by a readRune object. -type readRuner interface { - ReadRune() (rune int, size int, err os.Error) -} - -// unreadRuner is the interface to something that can unread runes. +// runeUnreader is the interface to something that can unread runes. // If the object provided to Scan does not satisfy this interface, // a local buffer will be used to back up the input, but its contents // will be lost when Scan returns. -type unreadRuner interface { +type runeUnreader interface { UnreadRune() os.Error } @@ -34,23 +28,30 @@ type unreadRuner interface { // Scanners may do rune-at-a-time scanning or ask the ScanState // to discover the next space-delimited token. type ScanState interface { - // GetRune reads the next rune (Unicode code point) from the input. - GetRune() (rune int, err os.Error) - // UngetRune causes the next call to GetRune to return the rune. - UngetRune() - // Width returns the value of the width option and whether it has been set. - // The unit is Unicode code points. - Width() (wid int, ok bool) + // ReadRune reads the next rune (Unicode code point) from the input. + // If invoked during Scanln, Fscanln, or Sscanln, ReadRune() will + // return EOF after returning the first '\n' or when reading beyond + // the specified width. + ReadRune() (rune int, size int, err os.Error) + // UnreadRune causes the next call to ReadRune to return the same rune. + UnreadRune() os.Error // Token returns the next space-delimited token from the input. If // a width has been specified, the returned token will be no longer // than the width. Token() (token string, err os.Error) + // Width returns the value of the width option and whether it has been set. + // The unit is Unicode code points. + Width() (wid int, ok bool) + // Because ReadRune is implemented by the interface, Read should never be + // called by the scanning routines and a valid implementation of + // ScanState may choose always to return an error from Read. + Read(buf []byte) (n int, err os.Error) } // Scanner is implemented by any value that has a Scan method, which scans // the input for the representation of a value and stores the result in the // receiver, which must be a pointer to be useful. The Scan method is called -// for any argument to Scan or Scanln that implements it. +// for any argument to Scan, Scanf, or Scanln that implements it. type Scanner interface { Scan(state ScanState, verb int) os.Error } @@ -102,18 +103,18 @@ func Sscanf(str string, format string, a ...interface{}) (n int, err os.Error) { // returns the number of items successfully scanned. If that is less // than the number of arguments, err will report why. func Fscan(r io.Reader, a ...interface{}) (n int, err os.Error) { - s := newScanState(r, true) + s, old := newScanState(r, true, false) n, err = s.doScan(a) - s.free() + s.free(old) return } // Fscanln is similar to Fscan, but stops scanning at a newline and // after the final item there must be a newline or EOF. func Fscanln(r io.Reader, a ...interface{}) (n int, err os.Error) { - s := newScanState(r, false) + s, old := newScanState(r, false, true) n, err = s.doScan(a) - s.free() + s.free(old) return } @@ -121,9 +122,9 @@ func Fscanln(r io.Reader, a ...interface{}) (n int, err os.Error) { // values into successive arguments as determined by the format. It // returns the number of items successfully parsed. func Fscanf(r io.Reader, format string, a ...interface{}) (n int, err os.Error) { - s := newScanState(r, false) + s, old := newScanState(r, false, false) n, err = s.doScanf(format, a) - s.free() + s.free(old) return } @@ -137,53 +138,70 @@ const EOF = -1 // ss is the internal implementation of ScanState. type ss struct { - rr readRuner // where to read input - buf bytes.Buffer // token accumulator - nlIsSpace bool // whether newline counts as white space - peekRune int // one-rune lookahead - prevRune int // last rune returned by GetRune - atEOF bool // already read EOF - maxWid int // max width of field, in runes - widPresent bool // width was specified - wid int // width consumed so far; used in accept() -} - -func (s *ss) GetRune() (rune int, err os.Error) { + rr io.RuneReader // where to read input + buf bytes.Buffer // token accumulator + peekRune int // one-rune lookahead + prevRune int // last rune returned by ReadRune + count int // runes consumed so far. + atEOF bool // already read EOF + ssave +} + +// ssave holds the parts of ss that need to be +// saved and restored on recursive scans. +type ssave struct { + validSave bool // is or was a part of an actual ss. + nlIsEnd bool // whether newline terminates scan + nlIsSpace bool // whether newline counts as white space + fieldLimit int // max value of ss.count for this field; fieldLimit <= limit + limit int // max value of ss.count. + maxWid int // width of this field. +} + +// The Read method is only in ScanState so that ScanState +// satisfies io.Reader. It will never be called when used as +// intended, so there is no need to make it actually work. +func (s *ss) Read(buf []byte) (n int, err os.Error) { + return 0, os.ErrorString("ScanState's Read should not be called. Use ReadRune") +} + +func (s *ss) ReadRune() (rune int, size int, err os.Error) { if s.peekRune >= 0 { + s.count++ rune = s.peekRune + size = utf8.RuneLen(rune) s.prevRune = rune s.peekRune = -1 return } - rune, _, err = s.rr.ReadRune() + if s.atEOF || s.nlIsEnd && s.prevRune == '\n' || s.count >= s.fieldLimit { + err = os.EOF + return + } + + rune, size, err = s.rr.ReadRune() if err == nil { + s.count++ s.prevRune = rune + } else if err == os.EOF { + s.atEOF = true } return } func (s *ss) Width() (wid int, ok bool) { - return s.maxWid, s.widPresent + if s.maxWid == hugeWid { + return 0, false + } + return s.maxWid, true } // The public method returns an error; this private one panics. // If getRune reaches EOF, the return value is EOF (-1). func (s *ss) getRune() (rune int) { - if s.atEOF { - return EOF - } - if s.peekRune >= 0 { - rune = s.peekRune - s.prevRune = rune - s.peekRune = -1 - return - } - rune, _, err := s.rr.ReadRune() - if err == nil { - s.prevRune = rune - } else if err != nil { + rune, _, err := s.ReadRune() + if err != nil { if err == os.EOF { - s.atEOF = true return EOF } s.error(err) @@ -191,35 +209,25 @@ func (s *ss) getRune() (rune int) { return } -// mustGetRune turns os.EOF into a panic(io.ErrUnexpectedEOF). +// mustReadRune turns os.EOF into a panic(io.ErrUnexpectedEOF). // It is called in cases such as string scanning where an EOF is a // syntax error. -func (s *ss) mustGetRune() (rune int) { - if s.atEOF { +func (s *ss) mustReadRune() (rune int) { + rune = s.getRune() + if rune == EOF { s.error(io.ErrUnexpectedEOF) } - if s.peekRune >= 0 { - rune = s.peekRune - s.peekRune = -1 - return - } - rune, _, err := s.rr.ReadRune() - if err != nil { - if err == os.EOF { - err = io.ErrUnexpectedEOF - } - s.error(err) - } return } - -func (s *ss) UngetRune() { - if u, ok := s.rr.(unreadRuner); ok { +func (s *ss) UnreadRune() os.Error { + if u, ok := s.rr.(runeUnreader); ok { u.UnreadRune() } else { s.peekRune = s.prevRune } + s.count-- + return nil } func (s *ss) error(err os.Error) { @@ -246,7 +254,7 @@ func (s *ss) Token() (tok string, err os.Error) { // readRune is a structure to enable reading UTF-8 encoded code points // from an io.Reader. It is used if the Reader given to the scanner does -// not already implement ReadRuner. +// not already implement io.RuneReader. type readRune struct { reader io.Reader buf [utf8.UTFMax]byte // used only inside ReadRune @@ -303,37 +311,53 @@ func (r *readRune) ReadRune() (rune int, size int, err os.Error) { } -// A leaky bucket of reusable ss structures. -var ssFree = make(chan *ss, 100) +var ssFree = newCache(func() interface{} { return new(ss) }) -// Allocate a new ss struct. Probably can grab the previous one from ssFree. -func newScanState(r io.Reader, nlIsSpace bool) *ss { - s, ok := <-ssFree - if !ok { - s = new(ss) +// Allocate a new ss struct or grab a cached one. +func newScanState(r io.Reader, nlIsSpace, nlIsEnd bool) (s *ss, old ssave) { + // If the reader is a *ss, then we've got a recursive + // call to Scan, so re-use the scan state. + s, ok := r.(*ss) + if ok { + old = s.ssave + s.limit = s.fieldLimit + s.nlIsEnd = nlIsEnd || s.nlIsEnd + s.nlIsSpace = nlIsSpace + return } - if rr, ok := r.(readRuner); ok { + + s = ssFree.get().(*ss) + if rr, ok := r.(io.RuneReader); ok { s.rr = rr } else { s.rr = &readRune{reader: r} } s.nlIsSpace = nlIsSpace + s.nlIsEnd = nlIsEnd + s.prevRune = -1 s.peekRune = -1 s.atEOF = false - s.maxWid = 0 - s.widPresent = false - return s + s.limit = hugeWid + s.fieldLimit = hugeWid + s.maxWid = hugeWid + s.validSave = true + return } // Save used ss structs in ssFree; avoid an allocation per invocation. -func (s *ss) free() { +func (s *ss) free(old ssave) { + // If it was used recursively, just restore the old state. + if old.validSave { + s.ssave = old + return + } // Don't hold on to ss structs with large buffers. if cap(s.buf.Bytes()) > 1024 { return } s.buf.Reset() s.rr = nil - _ = ssFree <- s + ssFree.put(s) } // skipSpace skips spaces and maybe newlines. @@ -354,7 +378,7 @@ func (s *ss) skipSpace(stopAtNewline bool) { return } if !unicode.IsSpace(rune) { - s.UngetRune() + s.UnreadRune() break } } @@ -366,13 +390,13 @@ func (s *ss) skipSpace(stopAtNewline bool) { func (s *ss) token() string { s.skipSpace(false) // read until white space or newline - for nrunes := 0; !s.widPresent || nrunes < s.maxWid; nrunes++ { + for { rune := s.getRune() if rune == EOF { break } if unicode.IsSpace(rune) { - s.UngetRune() + s.UnreadRune() break } s.buf.WriteRune(rune) @@ -391,28 +415,31 @@ var boolError = os.ErrorString("syntax error scanning boolean") // consume reads the next rune in the input and reports whether it is in the ok string. // If accept is true, it puts the character into the input token. func (s *ss) consume(ok string, accept bool) bool { - if s.wid >= s.maxWid { - return false - } rune := s.getRune() if rune == EOF { return false } - for i := 0; i < len(ok); i++ { - if int(ok[i]) == rune { - if accept { - s.buf.WriteRune(rune) - s.wid++ - } - return true + if strings.IndexRune(ok, rune) >= 0 { + if accept { + s.buf.WriteRune(rune) } + return true } if rune != EOF && accept { - s.UngetRune() + s.UnreadRune() } return false } +// peek reports whether the next character is in the ok string, without consuming it. +func (s *ss) peek(ok string) bool { + rune := s.getRune() + if rune != EOF { + s.UnreadRune() + } + return strings.IndexRune(ok, rune) >= 0 +} + // accept checks the next rune in the input. If it's a byte (sic) in the string, it puts it in the // buffer and returns true. Otherwise it return false. func (s *ss) accept(ok string) bool { @@ -436,7 +463,7 @@ func (s *ss) scanBool(verb int) bool { return false } // Syntax-checking a boolean is annoying. We're not fastidious about case. - switch s.mustGetRune() { + switch s.mustReadRune() { case '0': return false case '1': @@ -463,7 +490,7 @@ const ( hexadecimalDigits = "0123456789aAbBcCdDeEfF" sign = "+-" period = "." - exponent = "eE" + exponent = "eEp" ) // getBase returns the numeric base represented by the verb and its digit string. @@ -486,8 +513,8 @@ func (s *ss) getBase(verb int) (base int, digits string) { } // scanNumber returns the numerical string with specified digits starting here. -func (s *ss) scanNumber(digits string) string { - if !s.accept(digits) { +func (s *ss) scanNumber(digits string, haveDigits bool) string { + if !haveDigits && !s.accept(digits) { s.errorString("expected integer") } for s.accept(digits) { @@ -497,7 +524,7 @@ func (s *ss) scanNumber(digits string) string { // scanRune returns the next rune value in the input. func (s *ss) scanRune(bitSize int) int64 { - rune := int64(s.mustGetRune()) + rune := int64(s.mustReadRune()) n := uint(bitSize) x := (rune << (64 - n)) >> (64 - n) if x != rune { @@ -506,22 +533,44 @@ func (s *ss) scanRune(bitSize int) int64 { return rune } +// scanBasePrefix reports whether the integer begins with a 0 or 0x, +// and returns the base, digit string, and whether a zero was found. +// It is called only if the verb is %v. +func (s *ss) scanBasePrefix() (base int, digits string, found bool) { + if !s.peek("0") { + return 10, decimalDigits, false + } + s.accept("0") + found = true // We've put a digit into the token buffer. + // Special cases for '0' && '0x' + base, digits = 8, octalDigits + if s.peek("xX") { + s.consume("xX", false) + base, digits = 16, hexadecimalDigits + } + return +} + // scanInt returns the value of the integer represented by the next // token, checking for overflow. Any error is stored in s.err. func (s *ss) scanInt(verb int, bitSize int) int64 { if verb == 'c' { return s.scanRune(bitSize) } - base, digits := s.getBase(verb) s.skipSpace(false) + base, digits := s.getBase(verb) + haveDigits := false if verb == 'U' { if !s.consume("U", false) || !s.consume("+", false) { s.errorString("bad unicode format ") } } else { s.accept(sign) // If there's a sign, it will be left in the token buffer. + if verb == 'v' { + base, digits, haveDigits = s.scanBasePrefix() + } } - tok := s.scanNumber(digits) + tok := s.scanNumber(digits, haveDigits) i, err := strconv.Btoi64(tok, base) if err != nil { s.error(err) @@ -540,14 +589,17 @@ func (s *ss) scanUint(verb int, bitSize int) uint64 { if verb == 'c' { return uint64(s.scanRune(bitSize)) } - base, digits := s.getBase(verb) s.skipSpace(false) + base, digits := s.getBase(verb) + haveDigits := false if verb == 'U' { if !s.consume("U", false) || !s.consume("+", false) { s.errorString("bad unicode format ") } + } else if verb == 'v' { + base, digits, haveDigits = s.scanBasePrefix() } - tok := s.scanNumber(digits) + tok := s.scanNumber(digits, haveDigits) i, err := strconv.Btoui64(tok, base) if err != nil { s.error(err) @@ -621,6 +673,27 @@ func (s *ss) complexTokens() (real, imag string) { // convertFloat converts the string to a float64value. func (s *ss) convertFloat(str string, n int) float64 { + if p := strings.Index(str, "p"); p >= 0 { + // Atof doesn't handle power-of-2 exponents, + // but they're easy to evaluate. + f, err := strconv.AtofN(str[:p], n) + if err != nil { + // Put full string into error. + if e, ok := err.(*strconv.NumError); ok { + e.Num = str + } + s.error(err) + } + n, err := strconv.Atoi(str[p+1:]) + if err != nil { + // Put full string into error. + if e, ok := err.(*strconv.NumError); ok { + e.Num = str + } + s.error(err) + } + return math.Ldexp(f, n) + } f, err := strconv.AtofN(str, n) if err != nil { s.error(err) @@ -667,12 +740,12 @@ func (s *ss) convertString(verb int) (str string) { // quotedString returns the double- or back-quoted string represented by the next input characters. func (s *ss) quotedString() string { - quote := s.mustGetRune() + quote := s.mustReadRune() switch quote { case '`': // Back-quoted: Anything goes until EOF or back quote. for { - rune := s.mustGetRune() + rune := s.mustReadRune() if rune == quote { break } @@ -683,13 +756,13 @@ func (s *ss) quotedString() string { // Double-quoted: Include the quotes and let strconv.Unquote do the backslash escapes. s.buf.WriteRune(quote) for { - rune := s.mustGetRune() + rune := s.mustReadRune() s.buf.WriteRune(rune) if rune == '\\' { // In a legal backslash escape, no matter how long, only the character // immediately after the escape can itself be a backslash or quote. // Thus we only need to protect the first character after the backslash. - rune := s.mustGetRune() + rune := s.mustReadRune() s.buf.WriteRune(rune) } else if rune == '"' { break @@ -728,10 +801,10 @@ func (s *ss) hexByte() (b byte, ok bool) { return } if unicode.IsSpace(rune1) { - s.UngetRune() + s.UnreadRune() return } - rune2 := s.mustGetRune() + rune2 := s.mustReadRune() return byte(s.hexDigit(rune1)<<4 | s.hexDigit(rune2)), true } @@ -751,7 +824,9 @@ func (s *ss) hexString() string { return s.buf.String() } -const floatVerbs = "eEfFgGv" +const floatVerbs = "beEfFgGv" + +const hugeWid = 1 << 30 // scanOne scans a single value, deriving the scanner from the type of the argument. func (s *ss) scanOne(verb int, field interface{}) { @@ -761,14 +836,13 @@ func (s *ss) scanOne(verb int, field interface{}) { if v, ok := field.(Scanner); ok { err = v.Scan(s, verb) if err != nil { + if err == os.EOF { + err = io.ErrUnexpectedEOF + } s.error(err) } return } - if !s.widPresent { - s.maxWid = 1 << 30 // Huge - } - s.wid = 0 switch v := field.(type) { case *bool: *v = s.scanBool(verb) @@ -869,7 +943,6 @@ func errorHandler(errp *os.Error) { } // doScan does the real work for scanning without a format string. -// At the moment, it handles only pointers to basic types. func (s *ss) doScan(a []interface{}) (numProcessed int, err os.Error) { defer errorHandler(&err) for _, field := range a { @@ -930,9 +1003,9 @@ func (s *ss) advance(format string) (i int) { s.skipSpace(true) continue } - inputc := s.mustGetRune() + inputc := s.mustReadRune() if fmtc != inputc { - s.UngetRune() + s.UnreadRune() return -1 } i += w @@ -964,7 +1037,15 @@ func (s *ss) doScanf(format string, a []interface{}) (numProcessed int, err os.E i++ // % is one byte // do we have 20 (width)? - s.maxWid, s.widPresent, i = parsenum(format, i, end) + var widPresent bool + s.maxWid, widPresent, i = parsenum(format, i, end) + if !widPresent { + s.maxWid = hugeWid + } + s.fieldLimit = s.limit + if f := s.count + s.maxWid; f < s.fieldLimit { + s.fieldLimit = f + } c, w := utf8.DecodeRuneInString(format[i:]) i += w @@ -977,6 +1058,7 @@ func (s *ss) doScanf(format string, a []interface{}) (numProcessed int, err os.E s.scanOne(c, field) numProcessed++ + s.fieldLimit = s.limit } if numProcessed < len(a) { s.errorString("too many operands") diff --git a/libgo/go/fmt/scan_test.go b/libgo/go/fmt/scan_test.go index 78b9fbb4ab0..65adb023686 100644 --- a/libgo/go/fmt/scan_test.go +++ b/libgo/go/fmt/scan_test.go @@ -6,6 +6,7 @@ package fmt_test import ( "bufio" + "bytes" . "fmt" "io" "math" @@ -87,21 +88,7 @@ type FloatTest struct { type Xs string func (x *Xs) Scan(state ScanState, verb int) os.Error { - var tok string - var c int - var err os.Error - wid, present := state.Width() - if !present { - tok, err = state.Token() - } else { - for i := 0; i < wid; i++ { - c, err = state.GetRune() - if err != nil { - break - } - tok += string(c) - } - } + tok, err := state.Token() if err != nil { return err } @@ -114,6 +101,26 @@ func (x *Xs) Scan(state ScanState, verb int) os.Error { var xVal Xs +// IntString accepts an integer followed immediately by a string. +// It tests the embedding of a scan within a scan. +type IntString struct { + i int + s string +} + +func (s *IntString) Scan(state ScanState, verb int) os.Error { + if _, err := Fscan(state, &s.i); err != nil { + return err + } + + if _, err := Fscan(state, &s.s); err != nil { + return err + } + return nil +} + +var intStringVal IntString + // myStringReader implements Read but not ReadRune, allowing us to test our readRune wrapper // type that creates something that can read runes given only Read(). type myStringReader struct { @@ -129,10 +136,20 @@ func newReader(s string) *myStringReader { } var scanTests = []ScanTest{ - // Numbers + // Basic types {"T\n", &boolVal, true}, // boolean test vals toggle to be sure they are written {"F\n", &boolVal, false}, // restored to zero value {"21\n", &intVal, 21}, + {"0\n", &intVal, 0}, + {"000\n", &intVal, 0}, + {"0x10\n", &intVal, 0x10}, + {"-0x10\n", &intVal, -0x10}, + {"0377\n", &intVal, 0377}, + {"-0377\n", &intVal, -0377}, + {"0\n", &uintVal, uint(0)}, + {"000\n", &uintVal, uint(0)}, + {"0x10\n", &uintVal, uint(0x10)}, + {"0377\n", &uintVal, uint(0377)}, {"22\n", &int8Val, int8(22)}, {"23\n", &int16Val, int16(23)}, {"24\n", &int32Val, int32(24)}, @@ -160,6 +177,10 @@ var scanTests = []ScanTest{ {"2.3\n", &float64Val, 2.3}, {"2.3e1\n", &float32Val, float32(2.3e1)}, {"2.3e2\n", &float64Val, 2.3e2}, + {"2.3p2\n", &float64Val, 2.3 * 4}, + {"2.3p+2\n", &float64Val, 2.3 * 4}, + {"2.3p+66\n", &float64Val, 2.3 * (1 << 32) * (1 << 32) * 4}, + {"2.3p-66\n", &float64Val, 2.3 / ((1 << 32) * (1 << 32) * 4)}, {"2.35\n", &stringVal, "2.35"}, {"2345678\n", &bytesVal, []byte("2345678")}, {"(3.4e1-2i)\n", &complex128Val, 3.4e1 - 2i}, @@ -186,8 +207,9 @@ var scanTests = []ScanTest{ {"114\n", &renamedStringVal, renamedString("114")}, {"115\n", &renamedBytesVal, renamedBytes([]byte("115"))}, - // Custom scanner. + // Custom scanners. {" vvv ", &xVal, Xs("vvv")}, + {" 1234hello", &intStringVal, IntString{1234, "hello"}}, // Fixed bugs {"2147483648\n", &int64Val, int64(2147483648)}, // was: integer overflow @@ -197,6 +219,8 @@ var scanfTests = []ScanfTest{ {"%v", "TRUE\n", &boolVal, true}, {"%t", "false\n", &boolVal, false}, {"%v", "-71\n", &intVal, -71}, + {"%v", "0377\n", &intVal, 0377}, + {"%v", "0x44\n", &intVal, 0x44}, {"%d", "72\n", &intVal, 72}, {"%c", "a\n", &intVal, 'a'}, {"%c", "\u5072\n", &intVal, 0x5072}, @@ -292,6 +316,7 @@ var f float64 var s, t string var c complex128 var x, y Xs +var z IntString var multiTests = []ScanfMultiTest{ {"", "", nil, nil, ""}, @@ -305,8 +330,9 @@ var multiTests = []ScanfMultiTest{ {"%d%s", "123abc", args(&i, &s), args(123, "abc"), ""}, {"%c%c%c", "2\u50c2X", args(&i, &j, &k), args('2', '\u50c2', 'X'), ""}, - // Custom scanner. + // Custom scanners. {"%2e%f", "eefffff", args(&x, &y), args(Xs("ee"), Xs("fffff")), ""}, + {"%4v%s", "12abcd", args(&z, &s), args(IntString{12, "ab"}, "cd"), ""}, // Errors {"%t", "23 18", args(&i), nil, "bad verb"}, @@ -329,7 +355,11 @@ func testScan(name string, t *testing.T, scan func(r io.Reader, a ...interface{} } n, err := scan(r, test.in) if err != nil { - t.Errorf("%s got error scanning %q: %s", name, test.text, err) + m := "" + if n > 0 { + m = Sprintf(" (%d fields ok)", n) + } + t.Errorf("%s got error scanning %q: %s%s", name, test.text, err, m) continue } if n != 1 { @@ -657,3 +687,178 @@ func TestUnreadRuneWithBufio(t *testing.T) { t.Errorf("expected αb; got %q", a) } } + +type TwoLines string + +// Attempt to read two lines into the object. Scanln should prevent this +// because it stops at newline; Scan and Scanf should be fine. +func (t *TwoLines) Scan(state ScanState, verb int) os.Error { + chars := make([]int, 0, 100) + for nlCount := 0; nlCount < 2; { + c, _, err := state.ReadRune() + if err != nil { + return err + } + chars = append(chars, c) + if c == '\n' { + nlCount++ + } + } + *t = TwoLines(string(chars)) + return nil +} + +func TestMultiLine(t *testing.T) { + input := "abc\ndef\n" + // Sscan should work + var tscan TwoLines + n, err := Sscan(input, &tscan) + if n != 1 { + t.Errorf("Sscan: expected 1 item; got %d", n) + } + if err != nil { + t.Errorf("Sscan: expected no error; got %s", err) + } + if string(tscan) != input { + t.Errorf("Sscan: expected %q; got %q", input, tscan) + } + // Sscanf should work + var tscanf TwoLines + n, err = Sscanf(input, "%s", &tscanf) + if n != 1 { + t.Errorf("Sscanf: expected 1 item; got %d", n) + } + if err != nil { + t.Errorf("Sscanf: expected no error; got %s", err) + } + if string(tscanf) != input { + t.Errorf("Sscanf: expected %q; got %q", input, tscanf) + } + // Sscanln should not work + var tscanln TwoLines + n, err = Sscanln(input, &tscanln) + if n != 0 { + t.Errorf("Sscanln: expected 0 items; got %d: %q", n, tscanln) + } + if err == nil { + t.Error("Sscanln: expected error; got none") + } else if err != io.ErrUnexpectedEOF { + t.Errorf("Sscanln: expected io.ErrUnexpectedEOF (ha!); got %s", err) + } +} + +// RecursiveInt accepts an string matching %d.%d.%d.... +// and parses it into a linked list. +// It allows us to benchmark recursive descent style scanners. +type RecursiveInt struct { + i int + next *RecursiveInt +} + +func (r *RecursiveInt) Scan(state ScanState, verb int) (err os.Error) { + _, err = Fscan(state, &r.i) + if err != nil { + return + } + next := new(RecursiveInt) + _, err = Fscanf(state, ".%v", next) + if err != nil { + if err == os.ErrorString("input does not match format") || err == io.ErrUnexpectedEOF { + err = nil + } + return + } + r.next = next + return +} + +// Perform the same scanning task as RecursiveInt.Scan +// but without recurring through scanner, so we can compare +// performance more directly. +func scanInts(r *RecursiveInt, b *bytes.Buffer) (err os.Error) { + r.next = nil + _, err = Fscan(b, &r.i) + if err != nil { + return + } + var c int + c, _, err = b.ReadRune() + if err != nil { + if err == os.EOF { + err = nil + } + return + } + if c != '.' { + return + } + next := new(RecursiveInt) + err = scanInts(next, b) + if err == nil { + r.next = next + } + return +} + +func makeInts(n int) []byte { + var buf bytes.Buffer + Fprintf(&buf, "1") + for i := 1; i < n; i++ { + Fprintf(&buf, ".%d", i+1) + } + return buf.Bytes() +} + +func TestScanInts(t *testing.T) { + testScanInts(t, scanInts) + testScanInts(t, func(r *RecursiveInt, b *bytes.Buffer) (err os.Error) { + _, err = Fscan(b, r) + return + }) +} + +const intCount = 1000 + +func testScanInts(t *testing.T, scan func(*RecursiveInt, *bytes.Buffer) os.Error) { + r := new(RecursiveInt) + ints := makeInts(intCount) + buf := bytes.NewBuffer(ints) + err := scan(r, buf) + if err != nil { + t.Error("unexpected error", err) + } + i := 1 + for ; r != nil; r = r.next { + if r.i != i { + t.Fatal("bad scan: expected %d got %d", i, r.i) + } + i++ + } + if i-1 != intCount { + t.Fatal("bad scan count: expected %d got %d", intCount, i-1) + } +} + +func BenchmarkScanInts(b *testing.B) { + b.ResetTimer() + ints := makeInts(intCount) + var r RecursiveInt + for i := b.N - 1; i >= 0; i-- { + buf := bytes.NewBuffer(ints) + b.StartTimer() + scanInts(&r, buf) + b.StopTimer() + } +} + +func BenchmarkScanRecursiveInt(b *testing.B) { + b.ResetTimer() + ints := makeInts(intCount) + var r RecursiveInt + for i := b.N - 1; i >= 0; i-- { + buf := bytes.NewBuffer(ints) + b.StartTimer() + Fscan(buf, &r) + b.StopTimer() + } +} diff --git a/libgo/go/go/ast/ast.go b/libgo/go/go/ast/ast.go index cf2ce36df88..abafb5663b3 100644 --- a/libgo/go/go/ast/ast.go +++ b/libgo/go/go/ast/ast.go @@ -535,6 +535,13 @@ type ( X Expr // expression } + // A SendStmt node represents a send statement. + SendStmt struct { + Chan Expr + Arrow token.Pos // position of "<-" + Value Expr + } + // An IncDecStmt node represents an increment or decrement statement. IncDecStmt struct { X Expr @@ -590,7 +597,7 @@ type ( IfStmt struct { If token.Pos // position of "if" keyword Init Stmt // initalization statement; or nil - Cond Expr // condition; or nil + Cond Expr // condition Body *BlockStmt Else Stmt // else branch; or nil } @@ -629,11 +636,10 @@ type ( // A CommClause node represents a case of a select statement. CommClause struct { - Case token.Pos // position of "case" or "default" keyword - Tok token.Token // ASSIGN or DEFINE (valid only if Lhs != nil) - Lhs, Rhs Expr // Rhs == nil means default case - Colon token.Pos // position of ":" - Body []Stmt // statement list; or nil + Case token.Pos // position of "case" or "default" keyword + Comm Stmt // send or receive statement; nil means default case + Colon token.Pos // position of ":" + Body []Stmt // statement list; or nil } // An SelectStmt node represents a select statement. @@ -670,6 +676,7 @@ func (s *DeclStmt) Pos() token.Pos { return s.Decl.Pos() } func (s *EmptyStmt) Pos() token.Pos { return s.Semicolon } func (s *LabeledStmt) Pos() token.Pos { return s.Label.Pos() } func (s *ExprStmt) Pos() token.Pos { return s.X.Pos() } +func (s *SendStmt) Pos() token.Pos { return s.Chan.Pos() } func (s *IncDecStmt) Pos() token.Pos { return s.X.Pos() } func (s *AssignStmt) Pos() token.Pos { return s.Lhs[0].Pos() } func (s *GoStmt) Pos() token.Pos { return s.Go } @@ -695,6 +702,7 @@ func (s *EmptyStmt) End() token.Pos { } func (s *LabeledStmt) End() token.Pos { return s.Stmt.End() } func (s *ExprStmt) End() token.Pos { return s.X.End() } +func (s *SendStmt) End() token.Pos { return s.Value.End() } func (s *IncDecStmt) End() token.Pos { return s.TokPos + 2 /* len("++") */ } @@ -753,6 +761,7 @@ func (s *DeclStmt) stmtNode() {} func (s *EmptyStmt) stmtNode() {} func (s *LabeledStmt) stmtNode() {} func (s *ExprStmt) stmtNode() {} +func (s *SendStmt) stmtNode() {} func (s *IncDecStmt) stmtNode() {} func (s *AssignStmt) stmtNode() {} func (s *GoStmt) stmtNode() {} diff --git a/libgo/go/go/ast/walk.go b/libgo/go/go/ast/walk.go index 875a92f3f49..20c337c3be9 100644 --- a/libgo/go/go/ast/walk.go +++ b/libgo/go/go/ast/walk.go @@ -195,6 +195,10 @@ func Walk(v Visitor, node Node) { case *ExprStmt: Walk(v, n.X) + case *SendStmt: + Walk(v, n.Chan) + Walk(v, n.Value) + case *IncDecStmt: Walk(v, n.X) @@ -223,9 +227,7 @@ func Walk(v Visitor, node Node) { if n.Init != nil { Walk(v, n.Init) } - if n.Cond != nil { - Walk(v, n.Cond) - } + Walk(v, n.Cond) Walk(v, n.Body) if n.Else != nil { Walk(v, n.Else) @@ -258,11 +260,8 @@ func Walk(v Visitor, node Node) { Walk(v, n.Body) case *CommClause: - if n.Lhs != nil { - Walk(v, n.Lhs) - } - if n.Rhs != nil { - Walk(v, n.Rhs) + if n.Comm != nil { + Walk(v, n.Comm) } walkStmtList(v, n.Body) diff --git a/libgo/go/go/parser/parser.go b/libgo/go/go/parser/parser.go index f1746e04055..7c5843f3637 100644 --- a/libgo/go/go/parser/parser.go +++ b/libgo/go/go/parser/parser.go @@ -1193,18 +1193,6 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { x := p.parseExprList() switch p.tok { - case token.COLON: - // labeled statement - colon := p.pos - p.next() - if labelOk && len(x) == 1 { - if label, isIdent := x[0].(*ast.Ident); isIdent { - return &ast.LabeledStmt{label, colon, p.parseStmt()} - } - } - p.error(x[0].Pos(), "illegal label declaration") - return &ast.BadStmt{x[0].Pos(), colon + 1} - case token.DEFINE, token.ASSIGN, token.ADD_ASSIGN, token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN, @@ -1218,11 +1206,29 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { } if len(x) > 1 { - p.error(x[0].Pos(), "only one expression allowed") + p.errorExpected(x[0].Pos(), "1 expression") // continue with first expression } - if p.tok == token.INC || p.tok == token.DEC { + switch p.tok { + case token.COLON: + // labeled statement + colon := p.pos + p.next() + if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent { + return &ast.LabeledStmt{label, colon, p.parseStmt()} + } + p.error(x[0].Pos(), "illegal label declaration") + return &ast.BadStmt{x[0].Pos(), colon + 1} + + case token.ARROW: + // send statement + arrow := p.pos + p.next() // consume "<-" + y := p.parseExpr() + return &ast.SendStmt{x[0], arrow, y} + + case token.INC, token.DEC: // increment or decrement s := &ast.IncDecStmt{x[0], p.pos, p.tok} p.next() // consume "++" or "--" @@ -1321,44 +1327,34 @@ func (p *parser) makeExpr(s ast.Stmt) ast.Expr { } -func (p *parser) parseControlClause(isForStmt bool) (s1, s2, s3 ast.Stmt) { - if p.tok != token.LBRACE { +func (p *parser) parseIfStmt() *ast.IfStmt { + if p.trace { + defer un(trace(p, "IfStmt")) + } + + pos := p.expect(token.IF) + + var s ast.Stmt + var x ast.Expr + { prevLev := p.exprLev p.exprLev = -1 - - if p.tok != token.SEMICOLON { - s1 = p.parseSimpleStmt(false) - } if p.tok == token.SEMICOLON { p.next() - if p.tok != token.LBRACE && p.tok != token.SEMICOLON { - s2 = p.parseSimpleStmt(false) - } - if isForStmt { - // for statements have a 3rd section - p.expectSemi() - if p.tok != token.LBRACE { - s3 = p.parseSimpleStmt(false) - } - } + x = p.parseExpr() } else { - s1, s2 = nil, s1 + s = p.parseSimpleStmt(false) + if p.tok == token.SEMICOLON { + p.next() + x = p.parseExpr() + } else { + x = p.makeExpr(s) + s = nil + } } - p.exprLev = prevLev } - return s1, s2, s3 -} - - -func (p *parser) parseIfStmt() *ast.IfStmt { - if p.trace { - defer un(trace(p, "IfStmt")) - } - - pos := p.expect(token.IF) - s1, s2, _ := p.parseControlClause(false) body := p.parseBlockStmt() var else_ ast.Stmt if p.tok == token.ELSE { @@ -1368,7 +1364,7 @@ func (p *parser) parseIfStmt() *ast.IfStmt { p.expectSemi() } - return &ast.IfStmt{pos, s1, p.makeExpr(s2), body, else_} + return &ast.IfStmt{pos, s, x, body, else_} } @@ -1451,7 +1447,24 @@ func (p *parser) parseSwitchStmt() ast.Stmt { } pos := p.expect(token.SWITCH) - s1, s2, _ := p.parseControlClause(false) + + var s1, s2 ast.Stmt + if p.tok != token.LBRACE { + prevLev := p.exprLev + p.exprLev = -1 + if p.tok != token.SEMICOLON { + s2 = p.parseSimpleStmt(false) + } + if p.tok == token.SEMICOLON { + p.next() + s1 = s2 + s2 = nil + if p.tok != token.LBRACE { + s2 = p.parseSimpleStmt(false) + } + } + p.exprLev = prevLev + } if isExprSwitch(s2) { lbrace := p.expect(token.LBRACE) @@ -1486,28 +1499,52 @@ func (p *parser) parseCommClause() *ast.CommClause { // CommCase pos := p.pos - var tok token.Token - var lhs, rhs ast.Expr + var comm ast.Stmt if p.tok == token.CASE { p.next() + lhs := p.parseExprList() if p.tok == token.ARROW { - // RecvExpr without assignment - rhs = p.parseExpr() + // SendStmt + if len(lhs) > 1 { + p.errorExpected(lhs[0].Pos(), "1 expression") + // continue with first expression + } + arrow := p.pos + p.next() + rhs := p.parseExpr() + comm = &ast.SendStmt{lhs[0], arrow, rhs} } else { - // SendExpr or RecvExpr - rhs = p.parseExpr() + // RecvStmt + pos := p.pos + tok := p.tok + var rhs ast.Expr if p.tok == token.ASSIGN || p.tok == token.DEFINE { - // RecvExpr with assignment - tok = p.tok + // RecvStmt with assignment + if len(lhs) > 2 { + p.errorExpected(lhs[0].Pos(), "1 or 2 expressions") + // continue with first two expressions + lhs = lhs[0:2] + } p.next() - lhs = rhs - if p.tok == token.ARROW { - rhs = p.parseExpr() - } else { - p.expect(token.ARROW) // use expect() error handling + rhs = p.parseExpr() + } else { + // rhs must be single receive operation + if len(lhs) > 1 { + p.errorExpected(lhs[0].Pos(), "1 expression") + // continue with first expression } + rhs = lhs[0] + lhs = nil // there is no lhs + } + if x, isUnary := rhs.(*ast.UnaryExpr); !isUnary || x.Op != token.ARROW { + p.errorExpected(rhs.Pos(), "send or receive operation") + rhs = &ast.BadExpr{rhs.Pos(), rhs.End()} + } + if lhs != nil { + comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}} + } else { + comm = &ast.ExprStmt{rhs} } - // else SendExpr } } else { p.expect(token.DEFAULT) @@ -1516,7 +1553,7 @@ func (p *parser) parseCommClause() *ast.CommClause { colon := p.expect(token.COLON) body := p.parseStmtList() - return &ast.CommClause{pos, tok, lhs, rhs, colon, body} + return &ast.CommClause{pos, comm, colon, body} } @@ -1545,7 +1582,29 @@ func (p *parser) parseForStmt() ast.Stmt { } pos := p.expect(token.FOR) - s1, s2, s3 := p.parseControlClause(true) + + var s1, s2, s3 ast.Stmt + if p.tok != token.LBRACE { + prevLev := p.exprLev + p.exprLev = -1 + if p.tok != token.SEMICOLON { + s2 = p.parseSimpleStmt(false) + } + if p.tok == token.SEMICOLON { + p.next() + s1 = s2 + s2 = nil + if p.tok != token.SEMICOLON { + s2 = p.parseSimpleStmt(false) + } + p.expectSemi() + if p.tok != token.LBRACE { + s3 = p.parseSimpleStmt(false) + } + } + p.exprLev = prevLev + } + body := p.parseBlockStmt() p.expectSemi() @@ -1568,7 +1627,7 @@ func (p *parser) parseForStmt() ast.Stmt { } // check rhs if len(as.Rhs) != 1 { - p.errorExpected(as.Rhs[0].Pos(), "1 expressions") + p.errorExpected(as.Rhs[0].Pos(), "1 expression") return &ast.BadStmt{pos, body.End()} } if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE { diff --git a/libgo/go/go/parser/parser_test.go b/libgo/go/go/parser/parser_test.go index 56bd80ef1fc..38535627a75 100644 --- a/libgo/go/go/parser/parser_test.go +++ b/libgo/go/go/parser/parser_test.go @@ -18,6 +18,9 @@ var illegalInputs = []interface{}{ 3.14, []byte(nil), "foo!", + `package p; func f() { if /* should have condition */ {} };`, + `package p; func f() { if ; /* should have condition */ {} };`, + `package p; func f() { if f(); /* should have condition */ {} };`, } @@ -32,20 +35,23 @@ func TestParseIllegalInputs(t *testing.T) { var validPrograms = []interface{}{ - "package main\n", - `package main;`, - `package main; import "fmt"; func main() { fmt.Println("Hello, World!") };`, - `package main; func main() { if f(T{}) {} };`, - `package main; func main() { _ = (<-chan int)(x) };`, - `package main; func main() { _ = (<-chan <-chan int)(x) };`, - `package main; func f(func() func() func());`, - `package main; func f(...T);`, - `package main; func f(float, ...int);`, - `package main; func f(x int, a ...int) { f(0, a...); f(1, a...,) };`, - `package main; type T []int; var a []bool; func f() { if a[T{42}[0]] {} };`, - `package main; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`, - `package main; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`, - `package main; var a = T{{1, 2}, {3, 4}}`, + "package p\n", + `package p;`, + `package p; import "fmt"; func f() { fmt.Println("Hello, World!") };`, + `package p; func f() { if f(T{}) {} };`, + `package p; func f() { _ = (<-chan int)(x) };`, + `package p; func f() { _ = (<-chan <-chan int)(x) };`, + `package p; func f(func() func() func());`, + `package p; func f(...T);`, + `package p; func f(float, ...int);`, + `package p; func f(x int, a ...int) { f(0, a...); f(1, a...,) };`, + `package p; type T []int; var a []bool; func f() { if a[T{42}[0]] {} };`, + `package p; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`, + `package p; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`, + `package p; var a = T{{1, 2}, {3, 4}}`, + `package p; func f() { select { case <- c: case c <- d: case c <- <- d: case <-c <- d: } };`, + `package p; func f() { if ; true {} };`, + `package p; func f() { switch ; {} };`, } diff --git a/libgo/go/go/printer/nodes.go b/libgo/go/go/printer/nodes.go index 8207996dcdc..7933c2f1820 100644 --- a/libgo/go/go/printer/nodes.go +++ b/libgo/go/go/printer/nodes.go @@ -506,12 +506,12 @@ const ( ) -func walkBinary(e *ast.BinaryExpr) (has5, has6 bool, maxProblem int) { +func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { switch e.Op.Precedence() { + case 4: + has4 = true case 5: has5 = true - case 6: - has6 = true } switch l := e.X.(type) { @@ -521,9 +521,9 @@ func walkBinary(e *ast.BinaryExpr) (has5, has6 bool, maxProblem int) { // pretend this is an *ast.ParenExpr and do nothing. break } - h5, h6, mp := walkBinary(l) + h4, h5, mp := walkBinary(l) + has4 = has4 || h4 has5 = has5 || h5 - has6 = has6 || h6 if maxProblem < mp { maxProblem = mp } @@ -536,25 +536,25 @@ func walkBinary(e *ast.BinaryExpr) (has5, has6 bool, maxProblem int) { // pretend this is an *ast.ParenExpr and do nothing. break } - h5, h6, mp := walkBinary(r) + h4, h5, mp := walkBinary(r) + has4 = has4 || h4 has5 = has5 || h5 - has6 = has6 || h6 if maxProblem < mp { maxProblem = mp } case *ast.StarExpr: if e.Op.String() == "/" { - maxProblem = 6 + maxProblem = 5 } case *ast.UnaryExpr: switch e.Op.String() + r.Op.String() { case "/*", "&&", "&^": - maxProblem = 6 + maxProblem = 5 case "++", "--": - if maxProblem < 5 { - maxProblem = 5 + if maxProblem < 4 { + maxProblem = 4 } } } @@ -563,20 +563,20 @@ func walkBinary(e *ast.BinaryExpr) (has5, has6 bool, maxProblem int) { func cutoff(e *ast.BinaryExpr, depth int) int { - has5, has6, maxProblem := walkBinary(e) + has4, has5, maxProblem := walkBinary(e) if maxProblem > 0 { return maxProblem + 1 } - if has5 && has6 { + if has4 && has5 { if depth == 1 { - return 6 + return 5 } - return 5 + return 4 } if depth == 1 { - return 7 + return 6 } - return 5 + return 4 } @@ -603,15 +603,14 @@ func reduceDepth(depth int) int { // (Algorithm suggestion by Russ Cox.) // // The precedences are: -// 6 * / % << >> & &^ -// 5 + - | ^ -// 4 == != < <= > >= -// 3 <- +// 5 * / % << >> & &^ +// 4 + - | ^ +// 3 == != < <= > >= // 2 && // 1 || // -// The only decision is whether there will be spaces around levels 5 and 6. -// There are never spaces at level 7 (unary), and always spaces at levels 4 and below. +// The only decision is whether there will be spaces around levels 4 and 5. +// There are never spaces at level 6 (unary), and always spaces at levels 3 and below. // // To choose the cutoff, look at the whole expression but excluding primary // expressions (function calls, parenthesized exprs), and apply these rules: @@ -619,21 +618,21 @@ func reduceDepth(depth int) int { // 1) If there is a binary operator with a right side unary operand // that would clash without a space, the cutoff must be (in order): // -// /* 7 -// && 7 -// &^ 7 -// ++ 6 -// -- 6 +// /* 6 +// && 6 +// &^ 6 +// ++ 5 +// -- 5 // // (Comparison operators always have spaces around them.) // -// 2) If there is a mix of level 6 and level 5 operators, then the cutoff -// is 6 (use spaces to distinguish precedence) in Normal mode -// and 5 (never use spaces) in Compact mode. +// 2) If there is a mix of level 5 and level 4 operators, then the cutoff +// is 5 (use spaces to distinguish precedence) in Normal mode +// and 4 (never use spaces) in Compact mode. // -// 3) If there are no level 5 operators or no level 6 operators, then the -// cutoff is 7 (always use spaces) in Normal mode -// and 5 (never use spaces) in Compact mode. +// 3) If there are no level 4 operators or no level 5 operators, then the +// cutoff is 6 (always use spaces) in Normal mode +// and 4 (never use spaces) in Compact mode. // // Sets multiLine to true if the binary expression spans multiple lines. func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiLine *bool) { @@ -1083,6 +1082,12 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) { const depth = 1 p.expr0(s.X, depth, multiLine) + case *ast.SendStmt: + const depth = 1 + p.expr0(s.Chan, depth, multiLine) + p.print(blank, s.Arrow, token.ARROW, blank) + p.expr0(s.Value, depth, multiLine) + case *ast.IncDecStmt: const depth = 1 p.expr0(s.X, depth+1, multiLine) @@ -1179,13 +1184,9 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) { *multiLine = true case *ast.CommClause: - if s.Rhs != nil { + if s.Comm != nil { p.print(token.CASE, blank) - if s.Lhs != nil { - p.expr(s.Lhs, multiLine) - p.print(blank, s.Tok, blank) - } - p.expr(s.Rhs, multiLine) + p.stmt(s.Comm, false, ignoreMultiLine) } else { p.print(token.DEFAULT) } diff --git a/libgo/go/go/printer/printer.go b/libgo/go/go/printer/printer.go index 34b0c4e2dc4..48e2af1b736 100644 --- a/libgo/go/go/printer/printer.go +++ b/libgo/go/go/printer/printer.go @@ -34,18 +34,18 @@ const ( ) +const ( + esc2 = '\xfe' // an escape byte that cannot occur in regular UTF-8 + _ = 1 / (esc2 - tabwriter.Escape) // cause compiler error if esc2 == tabwriter.Escape +) + + var ( esc = []byte{tabwriter.Escape} htab = []byte{'\t'} htabs = []byte("\t\t\t\t\t\t\t\t") newlines = []byte("\n\n\n\n\n\n\n\n") // more than the max determined by nlines formfeeds = []byte("\f\f\f\f\f\f\f\f") // more than the max determined by nlines - - esc_quot = []byte(""") // shorter than """ - esc_apos = []byte("'") // shorter than "'" - esc_amp = []byte("&") - esc_lt = []byte("<") - esc_gt = []byte(">") ) @@ -145,18 +145,20 @@ func (p *printer) nlines(n, min int) int { // write0 does not indent after newlines, and does not HTML-escape or update p.pos. // func (p *printer) write0(data []byte) { - n, err := p.output.Write(data) - p.written += n - if err != nil { - p.errors <- err - runtime.Goexit() + if len(data) > 0 { + n, err := p.output.Write(data) + p.written += n + if err != nil { + p.errors <- err + runtime.Goexit() + } } } // write interprets data and writes it to p.output. It inserts indentation -// after a line break unless in a tabwriter escape sequence, and it HTML- -// escapes characters if GenHTML is set. It updates p.pos as a side-effect. +// after a line break unless in a tabwriter escape sequence. +// It updates p.pos as a side-effect. // func (p *printer) write(data []byte) { i0 := 0 @@ -189,36 +191,6 @@ func (p *printer) write(data []byte) { // next segment start i0 = i + 1 - case '"', '\'', '&', '<', '>': - if p.Mode&GenHTML != 0 { - // write segment ending in b - p.write0(data[i0:i]) - - // write HTML-escaped b - var esc []byte - switch b { - case '"': - esc = esc_quot - case '\'': - esc = esc_apos - case '&': - esc = esc_amp - case '<': - esc = esc_lt - case '>': - esc = esc_gt - } - p.write0(esc) - - // update p.pos - d := i + 1 - i0 - p.pos.Offset += d - p.pos.Column += d - - // next segment start - i0 = i + 1 - } - case tabwriter.Escape: p.mode ^= inLiteral @@ -251,29 +223,13 @@ func (p *printer) writeNewlines(n int, useFF bool) { } -func (p *printer) writeTaggedItem(data []byte, tag HTMLTag) { - // write start tag, if any - // (no html-escaping and no p.pos update for tags - use write0) - if tag.Start != "" { - p.write0([]byte(tag.Start)) - } - p.write(data) - // write end tag, if any - if tag.End != "" { - p.write0([]byte(tag.End)) - } -} - - // writeItem writes data at position pos. data is the text corresponding to // a single lexical token, but may also be comment text. pos is the actual // (or at least very accurately estimated) position of the data in the original -// source text. If tags are present and GenHTML is set, the tags are written -// before and after the data. writeItem updates p.last to the position -// immediately following the data. +// source text. writeItem updates p.last to the position immediately following +// the data. // -func (p *printer) writeItem(pos token.Position, data []byte, tag HTMLTag) { - fileChanged := false +func (p *printer) writeItem(pos token.Position, data []byte) { if pos.IsValid() { // continue with previous position if we don't have a valid pos if p.last.IsValid() && p.last.Filename != pos.Filename { @@ -283,7 +239,6 @@ func (p *printer) writeItem(pos token.Position, data []byte, tag HTMLTag) { p.indent = 0 p.mode = 0 p.buffer = p.buffer[0:0] - fileChanged = true } p.pos = pos } @@ -292,18 +247,7 @@ func (p *printer) writeItem(pos token.Position, data []byte, tag HTMLTag) { _, filename := path.Split(pos.Filename) p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column))) } - if p.Mode&GenHTML != 0 { - // write line tag if on a new line - // TODO(gri): should write line tags on each line at the start - // will be more useful (e.g. to show line numbers) - if p.Styler != nil && (pos.Line != p.lastTaggedLine || fileChanged) { - p.writeTaggedItem(p.Styler.LineTag(pos.Line)) - p.lastTaggedLine = pos.Line - } - p.writeTaggedItem(data, tag) - } else { - p.write(data) - } + p.write(data) p.last = p.pos } @@ -312,14 +256,13 @@ func (p *printer) writeItem(pos token.Position, data []byte, tag HTMLTag) { // If there is any pending whitespace, it consumes as much of // it as is likely to help position the comment nicely. // pos is the comment position, next the position of the item -// after all pending comments, isFirst indicates if this is the -// first comment in a group of comments, and isKeyword indicates -// if the next item is a keyword. +// after all pending comments, prev is the previous comment in +// a group of comments (or nil), and isKeyword indicates if the +// next item is a keyword. // -func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeyword bool) { - if !p.last.IsValid() { - // there was no preceeding item and the comment is the - // first item to be printed - don't write any whitespace +func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, isKeyword bool) { + if p.written == 0 { + // the comment is the first item to be printed - don't write any whitespace return } @@ -329,11 +272,12 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor return } - if pos.IsValid() && pos.Line == p.last.Line { + if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') { // comment on the same line as last item: // separate with at least one separator hasSep := false - if isFirst { + if prev == nil { + // first comment of a comment group j := 0 for i, ch := range p.buffer { switch ch { @@ -370,7 +314,8 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor } else { // comment on a different line: // separate with at least one line break - if isFirst { + if prev == nil { + // first comment of a comment group j := 0 for i, ch := range p.buffer { switch ch { @@ -402,10 +347,14 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor } // use formfeeds to break columns before a comment; // this is analogous to using formfeeds to separate - // individual lines of /*-style comments - // (if !pos.IsValid(), pos.Line == 0, and this will - // print no newlines) - p.writeNewlines(pos.Line-p.last.Line, true) + // individual lines of /*-style comments - but make + // sure there is at least one line break if the previous + // comment was a line comment + n := pos.Line - p.last.Line // if !pos.IsValid(), pos.Line == 0, and n will be 0 + if n <= 0 && prev != nil && prev.Text[1] == '/' { + n = 1 + } + p.writeNewlines(n, true) } } @@ -413,21 +362,10 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor func (p *printer) writeCommentLine(comment *ast.Comment, pos token.Position, line []byte) { // line must pass through unchanged, bracket it with tabwriter.Escape line = bytes.Join([][]byte{esc, line, esc}, nil) - - // apply styler, if any - var tag HTMLTag - if p.Styler != nil { - line, tag = p.Styler.Comment(comment, line) - } - - p.writeItem(pos, line, tag) + p.writeItem(pos, line) } -// TODO(gri): Similar (but not quite identical) functionality for -// comment processing can be found in go/doc/comment.go. -// Perhaps this can be factored eventually. - // Split comment text into lines func split(text []byte) [][]byte { // count lines (comment text never ends in a newline) @@ -680,7 +618,7 @@ func (p *printer) intersperseComments(next token.Position, tok token.Token) (dro var last *ast.Comment for ; p.commentBefore(next); p.cindex++ { for _, c := range p.comments[p.cindex].List { - p.writeCommentPrefix(p.fset.Position(c.Pos()), next, last == nil, tok.IsKeyword()) + p.writeCommentPrefix(p.fset.Position(c.Pos()), next, last, tok.IsKeyword()) p.writeComment(c) last = c } @@ -796,7 +734,6 @@ func (p *printer) print(args ...interface{}) { for _, f := range args { next := p.pos // estimated position of next item var data []byte - var tag HTMLTag var tok token.Token switch x := f.(type) { @@ -821,28 +758,31 @@ func (p *printer) print(args ...interface{}) { p.buffer = p.buffer[0 : i+1] p.buffer[i] = x case *ast.Ident: - if p.Styler != nil { - data, tag = p.Styler.Ident(x) - } else { - data = []byte(x.Name) - } + data = []byte(x.Name) tok = token.IDENT case *ast.BasicLit: - if p.Styler != nil { - data, tag = p.Styler.BasicLit(x) - } else { - data = x.Value - } // escape all literals so they pass through unchanged // (note that valid Go programs cannot contain // tabwriter.Escape bytes since they do not appear in // legal UTF-8 sequences) - escData := make([]byte, 0, len(data)+2) - escData = append(escData, tabwriter.Escape) - escData = append(escData, data...) - escData = append(escData, tabwriter.Escape) - data = escData + data = make([]byte, 0, len(x.Value)+2) + data = append(data, tabwriter.Escape) + data = append(data, x.Value...) + data = append(data, tabwriter.Escape) tok = x.Kind + // If we have a raw string that spans multiple lines and + // the opening quote (`) is on a line preceded only by + // indentation, we don't want to write that indentation + // because the following lines of the raw string are not + // indented. It's easiest to correct the output at the end + // via the trimmer (because of the complex handling of + // white space). + // Mark multi-line raw strings by replacing the opening + // quote with esc2 and have the trimmer take care of fixing + // it up. (Do this _after_ making a copy of data!) + if data[1] == '`' && bytes.IndexByte(data, '\n') > 0 { + data[1] = esc2 + } case token.Token: s := x.String() if mayCombine(p.lastTok, s[0]) { @@ -858,11 +798,7 @@ func (p *printer) print(args ...interface{}) { p.buffer = p.buffer[0:1] p.buffer[0] = ' ' } - if p.Styler != nil { - data, tag = p.Styler.Token(x) - } else { - data = []byte(s) - } + data = []byte(s) tok = x case token.Pos: if x.IsValid() { @@ -885,7 +821,7 @@ func (p *printer) print(args ...interface{}) { // before p.writeNewlines(next.Line-p.pos.Line, droppedFF) - p.writeItem(next, data, tag) + p.writeItem(next, data) } } } @@ -927,21 +863,26 @@ func (p *printer) flush(next token.Position, tok token.Token) (droppedFF bool) { // through unchanged. // type trimmer struct { - output io.Writer - space bytes.Buffer - state int + output io.Writer + state int + space bytes.Buffer + hasText bool } // trimmer is implemented as a state machine. // It can be in one of the following states: const ( - inSpace = iota - inEscape - inText + inSpace = iota // inside space + atEscape // inside space and the last char was an opening tabwriter.Escape + inEscape // inside text bracketed by tabwriter.Escapes + inText // inside text ) +var backquote = []byte{'`'} + + // Design note: It is tempting to eliminate extra blanks occurring in // whitespace in this function as it could simplify some // of the blanks logic in the node printing functions. @@ -949,7 +890,13 @@ const ( // the tabwriter. func (p *trimmer) Write(data []byte) (n int, err os.Error) { - m := 0 // if p.state != inSpace, data[m:n] is unwritten + // invariants: + // p.state == inSpace, atEscape: + // p.space is unwritten + // p.hasText indicates if there is any text on this line + // p.state == inEscape, inText: + // data[m:n] is unwritten + m := 0 var b byte for n, b = range data { if b == '\v' { @@ -960,37 +907,55 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { switch b { case '\t', ' ': p.space.WriteByte(b) // WriteByte returns no errors - case '\f', '\n': + case '\n', '\f': p.space.Reset() // discard trailing space _, err = p.output.Write(newlines[0:1]) // write newline + p.hasText = false case tabwriter.Escape: - _, err = p.output.Write(p.space.Bytes()) - p.space.Reset() - p.state = inEscape - m = n + 1 // drop tabwriter.Escape + p.state = atEscape default: _, err = p.output.Write(p.space.Bytes()) - p.space.Reset() p.state = inText m = n } + case atEscape: + // discard indentation if we have a multi-line raw string + // (see printer.print for details) + if b != esc2 || p.hasText { + _, err = p.output.Write(p.space.Bytes()) + } + p.state = inEscape + m = n + if b == esc2 { + _, err = p.output.Write(backquote) // convert back + m++ + } case inEscape: if b == tabwriter.Escape { _, err = p.output.Write(data[m:n]) p.state = inSpace + p.space.Reset() + p.hasText = true } case inText: switch b { case '\t', ' ': _, err = p.output.Write(data[m:n]) p.state = inSpace + p.space.Reset() p.space.WriteByte(b) // WriteByte returns no errors - case '\f': - data[n] = '\n' // convert to newline + p.hasText = true + case '\n', '\f': + _, err = p.output.Write(data[m:n]) + p.state = inSpace + p.space.Reset() + _, err = p.output.Write(newlines[0:1]) // write newline + p.hasText = false case tabwriter.Escape: _, err = p.output.Write(data[m:n]) - p.state = inEscape - m = n + 1 // drop tabwriter.Escape + p.state = atEscape + p.space.Reset() + p.hasText = true } } if err != nil { @@ -999,9 +964,12 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { } n = len(data) - if p.state != inSpace { + switch p.state { + case inEscape, inText: _, err = p.output.Write(data[m:n]) p.state = inSpace + p.space.Reset() + p.hasText = true } return @@ -1013,36 +981,16 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { // General printing is controlled with these Config.Mode flags. const ( - GenHTML uint = 1 << iota // generate HTML - RawFormat // do not use a tabwriter; if set, UseSpaces is ignored + RawFormat uint = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored TabIndent // use tabs for indentation independent of UseSpaces UseSpaces // use spaces instead of tabs for alignment ) -// An HTMLTag specifies a start and end tag. -type HTMLTag struct { - Start, End string // empty if tags are absent -} - - -// A Styler specifies formatting of line tags and elementary Go words. -// A format consists of text and a (possibly empty) surrounding HTML tag. -// -type Styler interface { - LineTag(line int) ([]byte, HTMLTag) - Comment(c *ast.Comment, line []byte) ([]byte, HTMLTag) - BasicLit(x *ast.BasicLit) ([]byte, HTMLTag) - Ident(id *ast.Ident) ([]byte, HTMLTag) - Token(tok token.Token) ([]byte, HTMLTag) -} - - // A Config node controls the output of Fprint. type Config struct { - Mode uint // default: 0 - Tabwidth int // default: 8 - Styler Styler // default: nil + Mode uint // default: 0 + Tabwidth int // default: 8 } @@ -1070,9 +1018,6 @@ func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{ } twmode := tabwriter.DiscardEmptyColumns - if cfg.Mode&GenHTML != 0 { - twmode |= tabwriter.FilterHTML - } if cfg.Mode&TabIndent != 0 { minwidth = 0 twmode |= tabwriter.TabIndent diff --git a/libgo/go/go/printer/printer_test.go b/libgo/go/go/printer/printer_test.go index c66471b926a..565075aa20c 100644 --- a/libgo/go/go/printer/printer_test.go +++ b/libgo/go/go/printer/printer_test.go @@ -127,7 +127,7 @@ var data = []entry{ } -func Test(t *testing.T) { +func TestFiles(t *testing.T) { for _, e := range data { source := path.Join(dataDir, e.source) golden := path.Join(dataDir, e.golden) @@ -136,3 +136,38 @@ func Test(t *testing.T) { //check(t, golden, golden, e.mode); } } + + +// TestLineComments, using a simple test case, checks that consequtive line +// comments are properly terminated with a newline even if the AST position +// information is incorrect. +// +func TestLineComments(t *testing.T) { + const src = `// comment 1 + // comment 2 + // comment 3 + package main + ` + + fset := token.NewFileSet() + ast1, err1 := parser.ParseFile(fset, "", src, parser.ParseComments) + if err1 != nil { + panic(err1) + } + + var buf bytes.Buffer + fset = token.NewFileSet() // use the wrong file set + Fprint(&buf, fset, ast1) + + nlines := 0 + for _, ch := range buf.Bytes() { + if ch == '\n' { + nlines++ + } + } + + const expected = 3 + if nlines < expected { + t.Errorf("got %d, expected %d\n", nlines, expected) + } +} diff --git a/libgo/go/go/printer/testdata/expressions.golden b/libgo/go/go/printer/testdata/expressions.golden index 882c7624c01..7f18f338a63 100644 --- a/libgo/go/go/printer/testdata/expressions.golden +++ b/libgo/go/go/printer/testdata/expressions.golden @@ -248,6 +248,77 @@ they must not be removed` func _() { + // smart handling of indentation for multi-line raw strings + var _ = `` + var _ = `foo` + var _ = `foo +bar` + + var _ = `` + var _ = `foo` + var _ = + // the next line should not be indented +`foo +bar` + + var _ = // comment + `` + var _ = // comment + `foo` + var _ = // comment + // the next line should not be indented +`foo +bar` + + var _ = /* comment */ `` + var _ = /* comment */ `foo` + var _ = /* comment */ `foo +bar` + + var _ = /* comment */ + `` + var _ = /* comment */ + `foo` + var _ = /* comment */ + // the next line should not be indented +`foo +bar` + + var board = []int( +`........... +........... +....●●●.... +....●●●.... +..●●●●●●●.. +..●●●○●●●.. +..●●●●●●●.. +....●●●.... +....●●●.... +........... +........... +`) + + var state = S{ + "foo", + // the next line should not be indented +`........... +........... +....●●●.... +....●●●.... +..●●●●●●●.. +..●●●○●●●.. +..●●●●●●●.. +....●●●.... +....●●●.... +........... +........... +`, + "bar", + } +} + + +func _() { // one-line function literals (body is on a single line) _ = func() {} _ = func() int { return 0 } diff --git a/libgo/go/go/printer/testdata/expressions.input b/libgo/go/go/printer/testdata/expressions.input index 647706b0923..6bcd9b5f89e 100644 --- a/libgo/go/go/printer/testdata/expressions.input +++ b/libgo/go/go/printer/testdata/expressions.input @@ -244,6 +244,85 @@ they must not be removed` func _() { + // smart handling of indentation for multi-line raw strings + var _ = `` + var _ = `foo` + var _ = `foo +bar` + + +var _ = + `` +var _ = + `foo` +var _ = + // the next line should not be indented + `foo +bar` + + + var _ = // comment + `` + var _ = // comment + `foo` + var _ = // comment + // the next line should not be indented + `foo +bar` + + +var _ = /* comment */ `` +var _ = /* comment */ `foo` +var _ = /* comment */ `foo +bar` + + + var _ = /* comment */ + `` + var _ = /* comment */ + `foo` + var _ = /* comment */ + // the next line should not be indented + `foo +bar` + + +var board = []int( + `........... +........... +....●●●.... +....●●●.... +..●●●●●●●.. +..●●●○●●●.. +..●●●●●●●.. +....●●●.... +....●●●.... +........... +........... +`) + + + var state = S{ + "foo", + // the next line should not be indented + `........... +........... +....●●●.... +....●●●.... +..●●●●●●●.. +..●●●○●●●.. +..●●●●●●●.. +....●●●.... +....●●●.... +........... +........... +`, + "bar", + } +} + + +func _() { // one-line function literals (body is on a single line) _ = func() {} _ = func() int { return 0 } diff --git a/libgo/go/go/printer/testdata/expressions.raw b/libgo/go/go/printer/testdata/expressions.raw index 62be00cc301..f1944c94bb4 100644 --- a/libgo/go/go/printer/testdata/expressions.raw +++ b/libgo/go/go/printer/testdata/expressions.raw @@ -243,7 +243,77 @@ func _() { _ = `foo bar` _ = `three spaces before the end of the line starting here: -they must not be removed` +they must not be removed` } + + +func _() { + // smart handling of indentation for multi-line raw strings + var _ = `` + var _ = `foo` + var _ = `foo +bar` + + var _ = `` + var _ = `foo` + var _ = + // the next line should not be indented +`foo +bar` + + var _ = // comment + `` + var _ = // comment + `foo` + var _ = // comment + // the next line should not be indented +`foo +bar` + + var _ = /* comment */ `` + var _ = /* comment */ `foo` + var _ = /* comment */ `foo +bar` + + var _ = /* comment */ + `` + var _ = /* comment */ + `foo` + var _ = /* comment */ + // the next line should not be indented +`foo +bar` + + var board = []int( +`........... +........... +....●●●.... +....●●●.... +..●●●●●●●.. +..●●●○●●●.. +..●●●●●●●.. +....●●●.... +....●●●.... +........... +........... +`) + + var state = S{ + "foo", + // the next line should not be indented +`........... +........... +....●●●.... +....●●●.... +..●●●●●●●.. +..●●●○●●●.. +..●●●●●●●.. +....●●●.... +....●●●.... +........... +........... +`, + "bar", + } } diff --git a/libgo/go/go/printer/testdata/statements.golden b/libgo/go/go/printer/testdata/statements.golden index 5eceb7dd555..2900602699f 100644 --- a/libgo/go/go/printer/testdata/statements.golden +++ b/libgo/go/go/printer/testdata/statements.golden @@ -10,9 +10,9 @@ func use(x interface{}) {} // Formatting of if-statement headers. func _() { - if { + if true { } - if { + if true { } // no semicolon printed if expr { } @@ -22,7 +22,7 @@ func _() { } // no parens printed if expr { } // no semicolon and parens printed - if x := expr; { + if x := expr; true { use(x) } if x := expr; expr { @@ -354,14 +354,14 @@ func _() { func _() { - if { + if true { _ = 0 } _ = 0 // the indentation here should not be affected by the long label name AnOverlongLabel: _ = 0 - if { + if true { _ = 0 } _ = 0 diff --git a/libgo/go/go/printer/testdata/statements.input b/libgo/go/go/printer/testdata/statements.input index 7819820edef..21e61efc4f8 100644 --- a/libgo/go/go/printer/testdata/statements.input +++ b/libgo/go/go/printer/testdata/statements.input @@ -10,13 +10,13 @@ func use(x interface{}) {} // Formatting of if-statement headers. func _() { - if {} - if;{} // no semicolon printed + if true {} + if; true {} // no semicolon printed if expr{} if;expr{} // no semicolon printed if (expr){} // no parens printed if;((expr)){} // no semicolon and parens printed - if x:=expr;{ + if x:=expr;true{ use(x)} if x:=expr; expr {use(x)} } @@ -271,14 +271,14 @@ func _() { func _() { - if { + if true { _ = 0 } _ = 0 // the indentation here should not be affected by the long label name AnOverlongLabel: _ = 0 - if { + if true { _ = 0 } _ = 0 diff --git a/libgo/go/go/scanner/scanner.go b/libgo/go/go/scanner/scanner.go index 8c3205230e8..2ae296b3f15 100644 --- a/libgo/go/go/scanner/scanner.go +++ b/libgo/go/go/scanner/scanner.go @@ -8,7 +8,8 @@ // // var s Scanner // fset := token.NewFileSet() // position information is relative to fset -// s.Init(fset, filename, src, nil /* no error handler */, 0) +// file := fset.AddFile(filename, fset.Base(), len(src)) // register file +// s.Init(file, src, nil /* no error handler */, 0) // for { // pos, tok, lit := s.Scan() // if tok == token.EOF { diff --git a/libgo/go/go/scanner/scanner_test.go b/libgo/go/go/scanner/scanner_test.go index 1c3b6728c27..c622ff482f3 100644 --- a/libgo/go/go/scanner/scanner_test.go +++ b/libgo/go/go/scanner/scanner_test.go @@ -223,7 +223,7 @@ func TestScan(t *testing.T) { for _, e := range tokens { src += e.lit + whitespace } - src_linecount := newlineCount(src) + 1 + src_linecount := newlineCount(src) whitespace_linecount := newlineCount(whitespace) // verify scan @@ -241,7 +241,7 @@ func TestScan(t *testing.T) { if tok == token.EOF { lit = "" epos.Line = src_linecount - epos.Column = 1 + epos.Column = 2 } checkPos(t, lit, pos, epos) if tok != e.tok { diff --git a/libgo/go/go/token/position.go b/libgo/go/go/token/position.go index 0044a0ed77d..809e53f0aa2 100644 --- a/libgo/go/go/token/position.go +++ b/libgo/go/go/token/position.go @@ -153,7 +153,7 @@ type lineInfo struct { // AddLineInfo adds alternative file and line number information for // a given file offset. The offset must be larger than the offset for -// the previously added alternative line info and not larger than the +// the previously added alternative line info and smaller than the // file size; otherwise the information is ignored. // // AddLineInfo is typically used to register alternative position @@ -161,7 +161,7 @@ type lineInfo struct { // func (f *File) AddLineInfo(offset int, filename string, line int) { f.set.mutex.Lock() - if i := len(f.infos); i == 0 || f.infos[i-1].offset < offset && offset <= f.size { + if i := len(f.infos); i == 0 || f.infos[i-1].offset < offset && offset < f.size { f.infos = append(f.infos, lineInfo{offset, filename, line}) } f.set.mutex.Unlock() @@ -212,27 +212,30 @@ func (f *File) LineCount() int { // AddLine adds the line offset for a new line. // The line offset must be larger than the offset for the previous line -// and not larger than the file size; otherwise the line offset is ignored. +// and smaller than the file size; otherwise the line offset is ignored. // func (f *File) AddLine(offset int) { f.set.mutex.Lock() - if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset <= f.size { + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { f.lines = append(f.lines, offset) } f.set.mutex.Unlock() } -// SetLines sets all line offsets for a file and returns true if successful. +// SetLines sets the line offsets for a file and returns true if successful. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. // Each line offset must be larger than the offset for the previous line -// and not larger than the file size; otherwise the SetLines fails and returns +// and smaller than the file size; otherwise SetLines fails and returns // false. // func (f *File) SetLines(lines []int) bool { // verify validity of lines table size := f.size for i, offset := range lines { - if i > 0 && offset <= lines[i-1] || size < offset { + if i > 0 && offset <= lines[i-1] || size <= offset { return false } } @@ -245,6 +248,27 @@ func (f *File) SetLines(lines []int) bool { } +// SetLinesForContent sets the line offsets for the given file content. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() +} + + // Pos returns the Pos value for the given file offset; // the offset must be <= f.Size(). // f.Pos(f.Offset(p)) == p. diff --git a/libgo/go/go/token/position_test.go b/libgo/go/go/token/position_test.go index 1cffcc3c278..979c9b1e8e7 100644 --- a/libgo/go/go/token/position_test.go +++ b/libgo/go/go/token/position_test.go @@ -39,14 +39,18 @@ func TestNoPos(t *testing.T) { var tests = []struct { filename string + source []byte // may be nil size int lines []int }{ - {"a", 0, []int{}}, - {"b", 5, []int{0}}, - {"c", 10, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, - {"d", 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}}, - {"e", 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}}, + {"a", []byte{}, 0, []int{}}, + {"b", []byte("01234"), 5, []int{0}}, + {"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}}, + {"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}}, + {"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}}, + {"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}}, + {"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}}, + {"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}}, } @@ -77,10 +81,26 @@ func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) { } +func makeTestSource(size int, lines []int) []byte { + src := make([]byte, size) + for _, offs := range lines { + if offs > 0 { + src[offs-1] = '\n' + } + } + return src +} + + func TestPositions(t *testing.T) { const delta = 7 // a non-zero base offset increment fset := NewFileSet() for _, test := range tests { + // verify consistency of test case + if test.source != nil && len(test.source) != test.size { + t.Errorf("%s: inconsistent test case: expected file size %d; got %d", test.filename, test.size, len(test.source)) + } + // add file and verify name and size f := fset.AddFile(test.filename, fset.Base()+delta, test.size) if f.Name() != test.filename { @@ -107,15 +127,26 @@ func TestPositions(t *testing.T) { verifyPositions(t, fset, f, test.lines[0:i+1]) } - // add lines at once and verify all positions - ok := f.SetLines(test.lines) - if !ok { + // add lines with SetLines and verify all positions + if ok := f.SetLines(test.lines); !ok { t.Errorf("%s: SetLines failed", f.Name()) } if f.LineCount() != len(test.lines) { t.Errorf("%s, SetLines: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount()) } verifyPositions(t, fset, f, test.lines) + + // add lines with SetLinesForContent and verify all positions + src := test.source + if src == nil { + // no test source available - create one from scratch + src = makeTestSource(test.size, test.lines) + } + f.SetLinesForContent(src) + if f.LineCount() != len(test.lines) { + t.Errorf("%s, SetLinesForContent: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount()) + } + verifyPositions(t, fset, f, test.lines) } } diff --git a/libgo/go/go/token/token.go b/libgo/go/go/token/token.go index 1bd81c1b143..2a2d3ecc4fb 100644 --- a/libgo/go/go/token/token.go +++ b/libgo/go/go/token/token.go @@ -252,8 +252,8 @@ func (tok Token) String() string { // const ( LowestPrec = 0 // non-operators - UnaryPrec = 7 - HighestPrec = 8 + UnaryPrec = 6 + HighestPrec = 7 ) @@ -267,14 +267,12 @@ func (op Token) Precedence() int { return 1 case LAND: return 2 - case ARROW: - return 3 case EQL, NEQ, LSS, LEQ, GTR, GEQ: - return 4 + return 3 case ADD, SUB, OR, XOR: - return 5 + return 4 case MUL, QUO, REM, SHL, SHR, AND, AND_NOT: - return 6 + return 5 } return LowestPrec } diff --git a/libgo/go/gob/codec_test.go b/libgo/go/gob/codec_test.go index af941c629cd..c822d6863ac 100644 --- a/libgo/go/gob/codec_test.go +++ b/libgo/go/gob/codec_test.go @@ -58,7 +58,7 @@ func TestUintCodec(t *testing.T) { t.Errorf("encodeUint: %#x encode: expected % x got % x", tt.x, tt.b, b.Bytes()) } } - decState := newDecodeState(nil, &b) + decState := newDecodeState(nil, b) for u := uint64(0); ; u = (u + 1) * 7 { b.Reset() encState.encodeUint(u) @@ -77,7 +77,7 @@ func verifyInt(i int64, t *testing.T) { var b = new(bytes.Buffer) encState := newEncoderState(nil, b) encState.encodeInt(i) - decState := newDecodeState(nil, &b) + decState := newDecodeState(nil, b) decState.buf = make([]byte, 8) j := decState.decodeInt() if i != j { @@ -315,7 +315,7 @@ func execDec(typ string, instr *decInstr, state *decodeState, t *testing.T, p un func newDecodeStateFromData(data []byte) *decodeState { b := bytes.NewBuffer(data) - state := newDecodeState(nil, &b) + state := newDecodeState(nil, b) state.fieldnum = -1 return state } @@ -342,7 +342,7 @@ func TestScalarDecInstructions(t *testing.T) { var data struct { a int } - instr := &decInstr{decOpMap[reflect.Int], 6, 0, 0, ovfl} + instr := &decInstr{decOpTable[reflect.Int], 6, 0, 0, ovfl} state := newDecodeStateFromData(signedResult) execDec("int", instr, state, t, unsafe.Pointer(&data)) if data.a != 17 { @@ -355,7 +355,7 @@ func TestScalarDecInstructions(t *testing.T) { var data struct { a uint } - instr := &decInstr{decOpMap[reflect.Uint], 6, 0, 0, ovfl} + instr := &decInstr{decOpTable[reflect.Uint], 6, 0, 0, ovfl} state := newDecodeStateFromData(unsignedResult) execDec("uint", instr, state, t, unsafe.Pointer(&data)) if data.a != 17 { @@ -446,7 +446,7 @@ func TestScalarDecInstructions(t *testing.T) { var data struct { a uintptr } - instr := &decInstr{decOpMap[reflect.Uintptr], 6, 0, 0, ovfl} + instr := &decInstr{decOpTable[reflect.Uintptr], 6, 0, 0, ovfl} state := newDecodeStateFromData(unsignedResult) execDec("uintptr", instr, state, t, unsafe.Pointer(&data)) if data.a != 17 { @@ -511,7 +511,7 @@ func TestScalarDecInstructions(t *testing.T) { var data struct { a complex64 } - instr := &decInstr{decOpMap[reflect.Complex64], 6, 0, 0, ovfl} + instr := &decInstr{decOpTable[reflect.Complex64], 6, 0, 0, ovfl} state := newDecodeStateFromData(complexResult) execDec("complex", instr, state, t, unsafe.Pointer(&data)) if data.a != 17+19i { @@ -524,7 +524,7 @@ func TestScalarDecInstructions(t *testing.T) { var data struct { a complex128 } - instr := &decInstr{decOpMap[reflect.Complex128], 6, 0, 0, ovfl} + instr := &decInstr{decOpTable[reflect.Complex128], 6, 0, 0, ovfl} state := newDecodeStateFromData(complexResult) execDec("complex", instr, state, t, unsafe.Pointer(&data)) if data.a != 17+19i { @@ -973,18 +973,32 @@ func TestIgnoredFields(t *testing.T) { } } + +func TestBadRecursiveType(t *testing.T) { + type Rec ***Rec + var rec Rec + b := new(bytes.Buffer) + err := NewEncoder(b).Encode(&rec) + if err == nil { + t.Error("expected error; got none") + } else if strings.Index(err.String(), "recursive") < 0 { + t.Error("expected recursive type error; got", err) + } + // Can't test decode easily because we can't encode one, so we can't pass one to a Decoder. +} + type Bad0 struct { - ch chan int - c float64 + CH chan int + C float64 } -var nilEncoder *Encoder func TestInvalidField(t *testing.T) { var bad0 Bad0 - bad0.ch = make(chan int) + bad0.CH = make(chan int) b := new(bytes.Buffer) - err := nilEncoder.encode(b, reflect.NewValue(&bad0)) + var nilEncoder *Encoder + err := nilEncoder.encode(b, reflect.NewValue(&bad0), userType(reflect.Typeof(&bad0))) if err == nil { t.Error("expected error; got none") } else if strings.Index(err.String(), "type") < 0 { @@ -1088,11 +1102,11 @@ func (v Vector) Square() int { } type Point struct { - a, b int + X, Y int } func (p Point) Square() int { - return p.a*p.a + p.b*p.b + return p.X*p.X + p.Y*p.Y } // A struct with interfaces in it. @@ -1162,7 +1176,6 @@ func TestInterface(t *testing.T) { } } } - } // A struct with all basic types, stored in interfaces. @@ -1182,7 +1195,7 @@ func TestInterfaceBasic(t *testing.T) { int(1), int8(1), int16(1), int32(1), int64(1), uint(1), uint8(1), uint16(1), uint32(1), uint64(1), float32(1), 1.0, - complex64(0i), complex128(0i), + complex64(1i), complex128(1i), true, "hello", []byte("sailor"), @@ -1307,6 +1320,31 @@ func TestUnexportedFields(t *testing.T) { } } +var singletons = []interface{}{ + true, + 7, + 3.2, + "hello", + [3]int{11, 22, 33}, + []float32{0.5, 0.25, 0.125}, + map[string]int{"one": 1, "two": 2}, +} + +func TestDebugSingleton(t *testing.T) { + if debugFunc == nil { + return + } + b := new(bytes.Buffer) + // Accumulate a number of values and print them out all at once. + for _, x := range singletons { + err := NewEncoder(b).Encode(x) + if err != nil { + t.Fatal("encode:", err) + } + } + debugFunc(b) +} + // A type that won't be defined in the gob until we send it in an interface value. type OnTheFly struct { A int @@ -1325,7 +1363,7 @@ type DT struct { S []string } -func TestDebug(t *testing.T) { +func TestDebugStruct(t *testing.T) { if debugFunc == nil { return } diff --git a/libgo/go/gob/decode.go b/libgo/go/gob/decode.go index 2db75215c19..8f599e10041 100644 --- a/libgo/go/gob/decode.go +++ b/libgo/go/gob/decode.go @@ -30,15 +30,17 @@ type decodeState struct { dec *Decoder // The buffer is stored with an extra indirection because it may be replaced // if we load a type during decode (when reading an interface value). - b **bytes.Buffer + b *bytes.Buffer fieldnum int // the last field number read. buf []byte } -func newDecodeState(dec *Decoder, b **bytes.Buffer) *decodeState { +// We pass the bytes.Buffer separately for easier testing of the infrastructure +// without requiring a full Decoder. +func newDecodeState(dec *Decoder, buf *bytes.Buffer) *decodeState { d := new(decodeState) d.dec = dec - d.b = b + d.b = buf d.buf = make([]byte, uint64Size) return d } @@ -49,14 +51,15 @@ func overflow(name string) os.ErrorString { // decodeUintReader reads an encoded unsigned integer from an io.Reader. // Used only by the Decoder to read the message length. -func decodeUintReader(r io.Reader, buf []byte) (x uint64, err os.Error) { - _, err = r.Read(buf[0:1]) +func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err os.Error) { + width = 1 + _, err = r.Read(buf[0:width]) if err != nil { return } b := buf[0] if b <= 0x7f { - return uint64(b), nil + return uint64(b), width, nil } nb := -int(int8(b)) if nb > uint64Size { @@ -75,6 +78,7 @@ func decodeUintReader(r io.Reader, buf []byte) (x uint64, err os.Error) { for i := 0; i < n; i++ { x <<= 8 x |= uint64(buf[i]) + width++ } return } @@ -405,10 +409,9 @@ func allocate(rtyp reflect.Type, p uintptr, indir int) uintptr { return *(*uintptr)(up) } -func (dec *Decoder) decodeSingle(engine *decEngine, rtyp reflect.Type, b **bytes.Buffer, p uintptr, indir int) (err os.Error) { - defer catchError(&err) - p = allocate(rtyp, p, indir) - state := newDecodeState(dec, b) +func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, p uintptr) (err os.Error) { + p = allocate(ut.base, p, ut.indir) + state := newDecodeState(dec, &dec.buf) state.fieldnum = singletonField basep := p delta := int(state.decodeUint()) @@ -424,10 +427,13 @@ func (dec *Decoder) decodeSingle(engine *decEngine, rtyp reflect.Type, b **bytes return nil } -func (dec *Decoder) decodeStruct(engine *decEngine, rtyp *reflect.StructType, b **bytes.Buffer, p uintptr, indir int) (err os.Error) { - defer catchError(&err) - p = allocate(rtyp, p, indir) - state := newDecodeState(dec, b) +// Indir is for the value, not the type. At the time of the call it may +// differ from ut.indir, which was computed when the engine was built. +// This state cannot arise for decodeSingle, which is called directly +// from the user's value, not from the innards of an engine. +func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, indir int) (err os.Error) { + p = allocate(ut.base.(*reflect.StructType), p, indir) + state := newDecodeState(dec, &dec.buf) state.fieldnum = -1 basep := p for state.b.Len() > 0 { @@ -454,9 +460,8 @@ func (dec *Decoder) decodeStruct(engine *decEngine, rtyp *reflect.StructType, b return nil } -func (dec *Decoder) ignoreStruct(engine *decEngine, b **bytes.Buffer) (err os.Error) { - defer catchError(&err) - state := newDecodeState(dec, b) +func (dec *Decoder) ignoreStruct(engine *decEngine) (err os.Error) { + state := newDecodeState(dec, &dec.buf) state.fieldnum = -1 for state.b.Len() > 0 { delta := int(state.decodeUint()) @@ -477,6 +482,18 @@ func (dec *Decoder) ignoreStruct(engine *decEngine, b **bytes.Buffer) (err os.Er return nil } +func (dec *Decoder) ignoreSingle(engine *decEngine) (err os.Error) { + state := newDecodeState(dec, &dec.buf) + state.fieldnum = singletonField + delta := int(state.decodeUint()) + if delta != 0 { + errorf("gob decode: corrupted data: non-zero delta for singleton") + } + instr := &engine.instr[singletonField] + instr.op(instr, state, unsafe.Pointer(nil)) + return nil +} + func (dec *Decoder) decodeArrayHelper(state *decodeState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl os.ErrorString) { instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl} for i := 0; i < length; i++ { @@ -501,7 +518,7 @@ func (dec *Decoder) decodeArray(atyp *reflect.ArrayType, state *decodeState, p u func decodeIntoValue(state *decodeState, op decOp, indir int, v reflect.Value, ovfl os.ErrorString) reflect.Value { instr := &decInstr{op, 0, indir, 0, ovfl} - up := unsafe.Pointer(v.Addr()) + up := unsafe.Pointer(v.UnsafeAddr()) if indir > 1 { up = decIndirect(up, indir) } @@ -612,9 +629,17 @@ func (dec *Decoder) decodeInterface(ityp *reflect.InterfaceType, state *decodeSt if !ok { errorf("gob: name not registered for interface: %q", name) } + // Read the type id of the concrete value. + concreteId := dec.decodeTypeSequence(true) + if concreteId < 0 { + error(dec.err) + } + // Byte count of value is next; we don't care what it is (it's there + // in case we want to ignore the value by skipping it completely). + state.decodeUint() // Read the concrete value. value := reflect.MakeZero(typ) - dec.decodeValueFromBuffer(value, false, true) + dec.decodeValue(concreteId, value) if dec.err != nil { error(dec.err) } @@ -637,14 +662,16 @@ func (dec *Decoder) ignoreInterface(state *decodeState) { if err != nil { error(err) } - dec.decodeValueFromBuffer(nil, true, true) - if dec.err != nil { - error(err) + id := dec.decodeTypeSequence(true) + if id < 0 { + error(dec.err) } + // At this point, the decoder buffer contains a delimited value. Just toss it. + state.b.Next(int(state.decodeUint())) } // Index by Go types. -var decOpMap = []decOp{ +var decOpTable = [...]decOp{ reflect.Bool: decBool, reflect.Int8: decInt8, reflect.Int16: decInt16, @@ -674,35 +701,43 @@ var decIgnoreOpMap = map[typeId]decOp{ // Return the decoding op for the base type under rt and // the indirection count to reach it. -func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string) (decOp, int) { - typ, indir := indirect(rt) +func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) (*decOp, int) { + ut := userType(rt) + // If this type is already in progress, it's a recursive type (e.g. map[string]*T). + // Return the pointer to the op we're already building. + if opPtr := inProgress[rt]; opPtr != nil { + return opPtr, ut.indir + } + typ := ut.base + indir := ut.indir var op decOp k := typ.Kind() - if int(k) < len(decOpMap) { - op = decOpMap[k] + if int(k) < len(decOpTable) { + op = decOpTable[k] } if op == nil { + inProgress[rt] = &op // Special cases switch t := typ.(type) { case *reflect.ArrayType: name = "element of " + name elemId := dec.wireType[wireId].ArrayT.Elem - elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name) + elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) ovfl := overflow(name) op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { - state.dec.decodeArray(t, state, uintptr(p), elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl) + state.dec.decodeArray(t, state, uintptr(p), *elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl) } case *reflect.MapType: name = "element of " + name keyId := dec.wireType[wireId].MapT.Key elemId := dec.wireType[wireId].MapT.Elem - keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name) - elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name) + keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name, inProgress) + elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) ovfl := overflow(name) op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { up := unsafe.Pointer(p) - state.dec.decodeMap(t, state, uintptr(up), keyOp, elemOp, i.indir, keyIndir, elemIndir, ovfl) + state.dec.decodeMap(t, state, uintptr(up), *keyOp, *elemOp, i.indir, keyIndir, elemIndir, ovfl) } case *reflect.SliceType: @@ -717,10 +752,10 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string) (decOp } else { elemId = dec.wireType[wireId].SliceT.Elem } - elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name) + elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) ovfl := overflow(name) op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { - state.dec.decodeSlice(t, state, uintptr(p), elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl) + state.dec.decodeSlice(t, state, uintptr(p), *elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl) } case *reflect.StructType: @@ -730,8 +765,8 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string) (decOp error(err) } op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { - // indirect through enginePtr to delay evaluation for recursive structs - err = dec.decodeStruct(*enginePtr, t, state.b, uintptr(p), i.indir) + // indirect through enginePtr to delay evaluation for recursive structs. + err = dec.decodeStruct(*enginePtr, userType(typ), uintptr(p), i.indir) if err != nil { error(err) } @@ -745,7 +780,7 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string) (decOp if op == nil { errorf("gob: decode can't handle type %s", rt.String()) } - return op, indir + return &op, indir } // Return the decoding op for a field that has no destination. @@ -796,7 +831,7 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { } op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { // indirect through enginePtr to delay evaluation for recursive structs - state.dec.ignoreStruct(*enginePtr, state.b) + state.dec.ignoreStruct(*enginePtr) } } } @@ -809,11 +844,15 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { // Are these two gob Types compatible? // Answers the question for basic types, arrays, and slices. // Structs are considered ok; fields will be checked later. -func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId) bool { - fr, _ = indirect(fr) +func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[reflect.Type]typeId) bool { + if rhs, ok := inProgress[fr]; ok { + return rhs == fw + } + inProgress[fr] = fw + fr = userType(fr).base switch t := fr.(type) { default: - // map, chan, etc: cannot handle. + // chan, etc: cannot handle. return false case *reflect.BoolType: return fw == tBool @@ -835,14 +874,14 @@ func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId) bool { return false } array := wire.ArrayT - return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem) + return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem, inProgress) case *reflect.MapType: wire, ok := dec.wireType[fw] if !ok || wire.MapT == nil { return false } MapType := wire.MapT - return dec.compatibleType(t.Key(), MapType.Key) && dec.compatibleType(t.Elem(), MapType.Elem) + return dec.compatibleType(t.Key(), MapType.Key, inProgress) && dec.compatibleType(t.Elem(), MapType.Elem, inProgress) case *reflect.SliceType: // Is it an array of bytes? if t.Elem().Kind() == reflect.Uint8 { @@ -855,8 +894,8 @@ func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId) bool { } else { sw = dec.wireType[fw].SliceT } - elem, _ := indirect(t.Elem()) - return sw != nil && dec.compatibleType(elem, sw.Elem) + elem := userType(t.Elem()).base + return sw != nil && dec.compatibleType(elem, sw.Elem, inProgress) case *reflect.StructType: return true } @@ -877,12 +916,22 @@ func (dec *Decoder) compileSingle(remoteId typeId, rt reflect.Type) (engine *dec engine = new(decEngine) engine.instr = make([]decInstr, 1) // one item name := rt.String() // best we can do - if !dec.compatibleType(rt, remoteId) { + if !dec.compatibleType(rt, remoteId, make(map[reflect.Type]typeId)) { return nil, os.ErrorString("gob: wrong type received for local value " + name + ": " + dec.typeString(remoteId)) } - op, indir := dec.decOpFor(remoteId, rt, name) + op, indir := dec.decOpFor(remoteId, rt, name, make(map[reflect.Type]*decOp)) ovfl := os.ErrorString(`value for "` + name + `" out of range`) - engine.instr[singletonField] = decInstr{op, singletonField, indir, 0, ovfl} + engine.instr[singletonField] = decInstr{*op, singletonField, indir, 0, ovfl} + engine.numInstr = 1 + return +} + +func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err os.Error) { + engine = new(decEngine) + engine.instr = make([]decInstr, 1) // one item + op := dec.decIgnoreOpFor(remoteId) + ovfl := overflow(dec.typeString(remoteId)) + engine.instr[0] = decInstr{op, 0, 0, 0, ovfl} engine.numInstr = 1 return } @@ -894,7 +943,6 @@ func isExported(name string) bool { } func (dec *Decoder) compileDec(remoteId typeId, rt reflect.Type) (engine *decEngine, err os.Error) { - defer catchError(&err) srt, ok := rt.(*reflect.StructType) if !ok { return dec.compileSingle(remoteId, rt) @@ -905,13 +953,18 @@ func (dec *Decoder) compileDec(remoteId typeId, rt reflect.Type) (engine *decEng if t, ok := builtinIdToType[remoteId]; ok { wireStruct, _ = t.(*structType) } else { - wireStruct = dec.wireType[remoteId].StructT + wire := dec.wireType[remoteId] + if wire == nil { + error(errBadType) + } + wireStruct = wire.StructT } if wireStruct == nil { errorf("gob: type mismatch in decoder: want struct type %s; got non-struct", rt.String()) } engine = new(decEngine) engine.instr = make([]decInstr, len(wireStruct.Field)) + seen := make(map[reflect.Type]*decOp) // Loop over the fields of the wire type. for fieldnum := 0; fieldnum < len(wireStruct.Field); fieldnum++ { wireField := wireStruct.Field[fieldnum] @@ -927,11 +980,11 @@ func (dec *Decoder) compileDec(remoteId typeId, rt reflect.Type) (engine *decEng engine.instr[fieldnum] = decInstr{op, fieldnum, 0, 0, ovfl} continue } - if !dec.compatibleType(localField.Type, wireField.Id) { + if !dec.compatibleType(localField.Type, wireField.Id, make(map[reflect.Type]typeId)) { errorf("gob: wrong type (%s) for received field %s.%s", localField.Type, wireStruct.Name, wireField.Name) } - op, indir := dec.decOpFor(wireField.Id, localField.Type, localField.Name) - engine.instr[fieldnum] = decInstr{op, fieldnum, indir, uintptr(localField.Offset), ovfl} + op, indir := dec.decOpFor(wireField.Id, localField.Type, localField.Name, seen) + engine.instr[fieldnum] = decInstr{*op, fieldnum, indir, uintptr(localField.Offset), ovfl} engine.numInstr++ } return @@ -966,7 +1019,12 @@ func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, er // To handle recursive types, mark this engine as underway before compiling. enginePtr = new(*decEngine) dec.ignorerCache[wireId] = enginePtr - *enginePtr, err = dec.compileDec(wireId, emptyStructType) + wire := dec.wireType[wireId] + if wire != nil && wire.StructT != nil { + *enginePtr, err = dec.compileDec(wireId, emptyStructType) + } else { + *enginePtr, err = dec.compileIgnoreSingle(wireId) + } if err != nil { dec.ignorerCache[wireId] = nil, false } @@ -974,22 +1032,41 @@ func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, er return } -func (dec *Decoder) decode(wireId typeId, val reflect.Value) os.Error { +func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) (err os.Error) { + defer catchError(&err) + // If the value is nil, it means we should just ignore this item. + if val == nil { + return dec.decodeIgnoredValue(wireId) + } // Dereference down to the underlying struct type. - rt, indir := indirect(val.Type()) - enginePtr, err := dec.getDecEnginePtr(wireId, rt) + ut := userType(val.Type()) + base := ut.base + indir := ut.indir + enginePtr, err := dec.getDecEnginePtr(wireId, base) if err != nil { return err } engine := *enginePtr - if st, ok := rt.(*reflect.StructType); ok { + if st, ok := base.(*reflect.StructType); ok { if engine.numInstr == 0 && st.NumField() > 0 && len(dec.wireType[wireId].StructT.Field) > 0 { - name := rt.Name() + name := base.Name() return os.ErrorString("gob: type mismatch: no fields matched compiling decoder for " + name) } - return dec.decodeStruct(engine, st, dec.state.b, uintptr(val.Addr()), indir) + return dec.decodeStruct(engine, ut, uintptr(val.UnsafeAddr()), indir) + } + return dec.decodeSingle(engine, ut, uintptr(val.UnsafeAddr())) +} + +func (dec *Decoder) decodeIgnoredValue(wireId typeId) os.Error { + enginePtr, err := dec.getIgnoreEnginePtr(wireId) + if err != nil { + return err + } + wire := dec.wireType[wireId] + if wire != nil && wire.StructT != nil { + return dec.ignoreStruct(*enginePtr) } - return dec.decodeSingle(engine, rt, dec.state.b, uintptr(val.Addr()), indir) + return dec.ignoreSingle(*enginePtr) } func init() { @@ -1004,8 +1081,8 @@ func init() { default: panic("gob: unknown size of int/uint") } - decOpMap[reflect.Int] = iop - decOpMap[reflect.Uint] = uop + decOpTable[reflect.Int] = iop + decOpTable[reflect.Uint] = uop // Finally uintptr switch reflect.Typeof(uintptr(0)).Bits() { @@ -1016,5 +1093,5 @@ func init() { default: panic("gob: unknown size of uintptr") } - decOpMap[reflect.Uintptr] = uop + decOpTable[reflect.Uintptr] = uop } diff --git a/libgo/go/gob/decoder.go b/libgo/go/gob/decoder.go index 664001a4b21..f7c994ffa78 100644 --- a/libgo/go/gob/decoder.go +++ b/libgo/go/gob/decoder.go @@ -17,14 +17,13 @@ import ( type Decoder struct { mutex sync.Mutex // each item must be received atomically r io.Reader // source of the data + buf bytes.Buffer // buffer for more efficient i/o from r wireType map[typeId]*wireType // map from remote ID to local description decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines ignorerCache map[typeId]**decEngine // ditto for ignored objects - state *decodeState // reads data from in-memory buffer countState *decodeState // reads counts from wire - buf []byte - countBuf [9]byte // counts may be uint64s (unlikely!), require 9 bytes - byteBuffer *bytes.Buffer + countBuf []byte // used for decoding integers while parsing messages + tmp []byte // temporary storage for i/o; saves reallocating err os.Error } @@ -33,128 +32,160 @@ func NewDecoder(r io.Reader) *Decoder { dec := new(Decoder) dec.r = r dec.wireType = make(map[typeId]*wireType) - dec.state = newDecodeState(dec, &dec.byteBuffer) // buffer set in Decode() dec.decoderCache = make(map[reflect.Type]map[typeId]**decEngine) dec.ignorerCache = make(map[typeId]**decEngine) + dec.countBuf = make([]byte, 9) // counts may be uint64s (unlikely!), require 9 bytes return dec } -// recvType loads the definition of a type and reloads the Decoder's buffer. +// recvType loads the definition of a type. func (dec *Decoder) recvType(id typeId) { // Have we already seen this type? That's an error - if dec.wireType[id] != nil { + if id < firstUserId || dec.wireType[id] != nil { dec.err = os.ErrorString("gob: duplicate type received") return } // Type: wire := new(wireType) - dec.err = dec.decode(tWireType, reflect.NewValue(wire)) + dec.err = dec.decodeValue(tWireType, reflect.NewValue(wire)) if dec.err != nil { return } // Remember we've seen this type. dec.wireType[id] = wire - - // Load the next parcel. - dec.recv() } -// Decode reads the next value from the connection and stores -// it in the data represented by the empty interface value. -// The value underlying e must be the correct type for the next -// data item received, and must be a pointer. -func (dec *Decoder) Decode(e interface{}) os.Error { - value := reflect.NewValue(e) - // If e represents a value as opposed to a pointer, the answer won't - // get back to the caller. Make sure it's a pointer. - if value.Type().Kind() != reflect.Ptr { - dec.err = os.ErrorString("gob: attempt to decode into a non-pointer") - return dec.err +// recvMessage reads the next count-delimited item from the input. It is the converse +// of Encoder.writeMessage. It returns false on EOF or other error reading the message. +func (dec *Decoder) recvMessage() bool { + // Read a count. + nbytes, _, err := decodeUintReader(dec.r, dec.countBuf) + if err != nil { + dec.err = err + return false } - return dec.DecodeValue(value) + dec.readMessage(int(nbytes)) + return dec.err == nil } -// recv reads the next count-delimited item from the input. It is the converse -// of Encoder.send. -func (dec *Decoder) recv() { - // Read a count. - var nbytes uint64 - nbytes, dec.err = decodeUintReader(dec.r, dec.countBuf[0:]) - if dec.err != nil { - return - } +// readMessage reads the next nbytes bytes from the input. +func (dec *Decoder) readMessage(nbytes int) { // Allocate the buffer. - if nbytes > uint64(len(dec.buf)) { - dec.buf = make([]byte, nbytes+1000) + if cap(dec.tmp) < nbytes { + dec.tmp = make([]byte, nbytes+100) // room to grow } - dec.byteBuffer = bytes.NewBuffer(dec.buf[0:nbytes]) + dec.tmp = dec.tmp[:nbytes] // Read the data - _, dec.err = io.ReadFull(dec.r, dec.buf[0:nbytes]) + _, dec.err = io.ReadFull(dec.r, dec.tmp) if dec.err != nil { if dec.err == os.EOF { dec.err = io.ErrUnexpectedEOF } return } + dec.buf.Write(dec.tmp) } -// decodeValueFromBuffer grabs the next value from the input. The Decoder's -// buffer already contains data. If the next item in the buffer is a type -// descriptor, it may be necessary to reload the buffer, but recvType does that. -func (dec *Decoder) decodeValueFromBuffer(value reflect.Value, ignoreInterfaceValue, countPresent bool) { - for dec.state.b.Len() > 0 { - // Receive a type id. - id := typeId(dec.state.decodeInt()) +// toInt turns an encoded uint64 into an int, according to the marshaling rules. +func toInt(x uint64) int64 { + i := int64(x >> 1) + if x&1 != 0 { + i = ^i + } + return i +} + +func (dec *Decoder) nextInt() int64 { + n, _, err := decodeUintReader(&dec.buf, dec.countBuf) + if err != nil { + dec.err = err + } + return toInt(n) +} - // Is it a new type? - if id < 0 { // 0 is the error state, handled above - // If the id is negative, we have a type. - dec.recvType(-id) - if dec.err != nil { +func (dec *Decoder) nextUint() uint64 { + n, _, err := decodeUintReader(&dec.buf, dec.countBuf) + if err != nil { + dec.err = err + } + return n +} + +// decodeTypeSequence parses: +// TypeSequence +// (TypeDefinition DelimitedTypeDefinition*)? +// and returns the type id of the next value. It returns -1 at +// EOF. Upon return, the remainder of dec.buf is the value to be +// decoded. If this is an interface value, it can be ignored by +// simply resetting that buffer. +func (dec *Decoder) decodeTypeSequence(isInterface bool) typeId { + for dec.err == nil { + if dec.buf.Len() == 0 { + if !dec.recvMessage() { break } - continue } - - // Make sure the type has been defined already or is a builtin type (for - // top-level singleton values). - if dec.wireType[id] == nil && builtinIdToType[id] == nil { - dec.err = errBadType - break + // Receive a type id. + id := typeId(dec.nextInt()) + if id >= 0 { + // Value follows. + return id } - // An interface value is preceded by a byte count. - if countPresent { - count := int(dec.state.decodeUint()) - if ignoreInterfaceValue { - // An interface value is preceded by a byte count. Just skip that many bytes. - dec.state.b.Next(int(count)) + // Type definition for (-id) follows. + dec.recvType(-id) + // When decoding an interface, after a type there may be a + // DelimitedValue still in the buffer. Skip its count. + // (Alternatively, the buffer is empty and the byte count + // will be absorbed by recvMessage.) + if dec.buf.Len() > 0 { + if !isInterface { + dec.err = os.ErrorString("extra data in buffer") break } - // Otherwise fall through and decode it. + dec.nextUint() } - dec.err = dec.decode(id, value) - break } + return -1 +} + +// Decode reads the next value from the connection and stores +// it in the data represented by the empty interface value. +// If e is nil, the value will be discarded. Otherwise, +// the value underlying e must either be the correct type for the next +// data item received, and must be a pointer. +func (dec *Decoder) Decode(e interface{}) os.Error { + if e == nil { + return dec.DecodeValue(nil) + } + value := reflect.NewValue(e) + // If e represents a value as opposed to a pointer, the answer won't + // get back to the caller. Make sure it's a pointer. + if value.Type().Kind() != reflect.Ptr { + dec.err = os.ErrorString("gob: attempt to decode into a non-pointer") + return dec.err + } + return dec.DecodeValue(value) } // DecodeValue reads the next value from the connection and stores // it in the data represented by the reflection value. // The value must be the correct type for the next -// data item received. +// data item received, or it may be nil, which means the +// value will be discarded. func (dec *Decoder) DecodeValue(value reflect.Value) os.Error { // Make sure we're single-threaded through here. dec.mutex.Lock() defer dec.mutex.Unlock() + dec.buf.Reset() // In case data lingers from previous invocation. dec.err = nil - dec.recv() - if dec.err != nil { - return dec.err + id := dec.decodeTypeSequence(false) + if dec.err == nil { + dec.err = dec.decodeValue(id, value) } - dec.decodeValueFromBuffer(value, false, false) return dec.err } diff --git a/libgo/go/gob/doc.go b/libgo/go/gob/doc.go index 31253f16d09..613974a000f 100644 --- a/libgo/go/gob/doc.go +++ b/libgo/go/gob/doc.go @@ -220,6 +220,54 @@ be predefined or be defined before the value in the stream. package gob /* +Grammar: + +Tokens starting with a lower case letter are terminals; int(n) +and uint(n) represent the signed/unsigned encodings of the value n. + +GobStream: + DelimitedMessage* +DelimitedMessage: + uint(lengthOfMessage) Message +Message: + TypeSequence TypedValue +TypeSequence + (TypeDefinition DelimitedTypeDefinition*)? +DelimitedTypeDefinition: + uint(lengthOfTypeDefinition) TypeDefinition +TypedValue: + int(typeId) Value +TypeDefinition: + int(-typeId) encodingOfWireType +Value: + SingletonValue | StructValue +SingletonValue: + uint(0) FieldValue +FieldValue: + builtinValue | ArrayValue | MapValue | SliceValue | StructValue | InterfaceValue +InterfaceValue: + NilInterfaceValue | NonNilInterfaceValue +NilInterfaceValue: + uint(0) +NonNilInterfaceValue: + ConcreteTypeName TypeSequence InterfaceContents +ConcreteTypeName: + uint(lengthOfName) [already read=n] name +InterfaceContents: + int(concreteTypeId) DelimitedValue +DelimitedValue: + uint(length) Value +ArrayValue: + uint(n) FieldValue*n [n elements] +MapValue: + uint(n) (FieldValue FieldValue)*n [n (key, value) pairs] +SliceValue: + uint(n) FieldValue*n [n elements] +StructValue: + (uint(fieldDelta) FieldValue)* +*/ + +/* For implementers and the curious, here is an encoded example. Given type Point struct {x, y int} and the value diff --git a/libgo/go/gob/encode.go b/libgo/go/gob/encode.go index d286a7e00b8..e92db74ffdd 100644 --- a/libgo/go/gob/encode.go +++ b/libgo/go/gob/encode.go @@ -264,9 +264,6 @@ func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) { } } -func encNoOp(i *encInstr, state *encoderState, p unsafe.Pointer) { -} - // Byte arrays are encoded as an unsigned count followed by the raw bytes. func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) { b := *(*[]byte)(p) @@ -359,7 +356,7 @@ func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir in if v == nil { errorf("gob: encodeReflectValue: nil element") } - op(nil, state, unsafe.Pointer(v.Addr())) + op(nil, state, unsafe.Pointer(v.UnsafeAddr())) } func (enc *Encoder) encodeMap(b *bytes.Buffer, mv *reflect.MapValue, keyOp, elemOp encOp, keyIndir, elemIndir int) { @@ -387,10 +384,10 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue) return } - typ, _ := indirect(iv.Elem().Type()) - name, ok := concreteTypeToName[typ] + ut := userType(iv.Elem().Type()) + name, ok := concreteTypeToName[ut.base] if !ok { - errorf("gob: type not registered for interface: %s", typ) + errorf("gob: type not registered for interface: %s", ut.base) } // Send the name. state.encodeUint(uint64(len(name))) @@ -398,22 +395,26 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue) if err != nil { error(err) } - // Send (and maybe first define) the type id. - enc.sendTypeDescriptor(typ) - // Encode the value into a new buffer. + // Define the type id if necessary. + enc.sendTypeDescriptor(enc.writer(), state, ut) + // Send the type id. + enc.sendTypeId(state, ut) + // Encode the value into a new buffer. Any nested type definitions + // should be written to b, before the encoded value. + enc.pushWriter(b) data := new(bytes.Buffer) - err = enc.encode(data, iv.Elem()) + err = enc.encode(data, iv.Elem(), ut) if err != nil { error(err) } - state.encodeUint(uint64(data.Len())) - _, err = state.b.Write(data.Bytes()) - if err != nil { + enc.popWriter() + enc.writeMessage(b, data) + if enc.err != nil { error(err) } } -var encOpMap = []encOp{ +var encOpTable = [...]encOp{ reflect.Bool: encBool, reflect.Int: encInt, reflect.Int8: encInt8, @@ -433,16 +434,24 @@ var encOpMap = []encOp{ reflect.String: encString, } -// Return the encoding op for the base type under rt and +// Return (a pointer to) the encoding op for the base type under rt and // the indirection count to reach it. -func (enc *Encoder) encOpFor(rt reflect.Type) (encOp, int) { - typ, indir := indirect(rt) - var op encOp +func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp) (*encOp, int) { + ut := userType(rt) + // If this type is already in progress, it's a recursive type (e.g. map[string]*T). + // Return the pointer to the op we're already building. + if opPtr := inProgress[rt]; opPtr != nil { + return opPtr, ut.indir + } + typ := ut.base + indir := ut.indir k := typ.Kind() - if int(k) < len(encOpMap) { - op = encOpMap[k] + var op encOp + if int(k) < len(encOpTable) { + op = encOpTable[k] } if op == nil { + inProgress[rt] = &op // Special cases switch t := typ.(type) { case *reflect.SliceType: @@ -451,25 +460,25 @@ func (enc *Encoder) encOpFor(rt reflect.Type) (encOp, int) { break } // Slices have a header; we decode it to find the underlying array. - elemOp, indir := enc.encOpFor(t.Elem()) + elemOp, indir := enc.encOpFor(t.Elem(), inProgress) op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { slice := (*reflect.SliceHeader)(p) if !state.sendZero && slice.Len == 0 { return } state.update(i) - state.enc.encodeArray(state.b, slice.Data, elemOp, t.Elem().Size(), indir, int(slice.Len)) + state.enc.encodeArray(state.b, slice.Data, *elemOp, t.Elem().Size(), indir, int(slice.Len)) } case *reflect.ArrayType: // True arrays have size in the type. - elemOp, indir := enc.encOpFor(t.Elem()) + elemOp, indir := enc.encOpFor(t.Elem(), inProgress) op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { state.update(i) - state.enc.encodeArray(state.b, uintptr(p), elemOp, t.Elem().Size(), indir, t.Len()) + state.enc.encodeArray(state.b, uintptr(p), *elemOp, t.Elem().Size(), indir, t.Len()) } case *reflect.MapType: - keyOp, keyIndir := enc.encOpFor(t.Key()) - elemOp, elemIndir := enc.encOpFor(t.Elem()) + keyOp, keyIndir := enc.encOpFor(t.Key(), inProgress) + elemOp, elemIndir := enc.encOpFor(t.Elem(), inProgress) op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { // Maps cannot be accessed by moving addresses around the way // that slices etc. can. We must recover a full reflection value for @@ -480,7 +489,7 @@ func (enc *Encoder) encOpFor(rt reflect.Type) (encOp, int) { return } state.update(i) - state.enc.encodeMap(state.b, mv, keyOp, elemOp, keyIndir, elemIndir) + state.enc.encodeMap(state.b, mv, *keyOp, *elemOp, keyIndir, elemIndir) } case *reflect.StructType: // Generate a closure that calls out to the engine for the nested type. @@ -508,28 +517,31 @@ func (enc *Encoder) encOpFor(rt reflect.Type) (encOp, int) { if op == nil { errorf("gob enc: can't happen: encode type %s", rt.String()) } - return op, indir + return &op, indir } // The local Type was compiled from the actual value, so we know it's compatible. func (enc *Encoder) compileEnc(rt reflect.Type) *encEngine { srt, isStruct := rt.(*reflect.StructType) engine := new(encEngine) + seen := make(map[reflect.Type]*encOp) if isStruct { - engine.instr = make([]encInstr, srt.NumField()+1) // +1 for terminator - for fieldnum := 0; fieldnum < srt.NumField(); fieldnum++ { - f := srt.Field(fieldnum) - op, indir := enc.encOpFor(f.Type) + for fieldNum := 0; fieldNum < srt.NumField(); fieldNum++ { + f := srt.Field(fieldNum) if !isExported(f.Name) { - op = encNoOp + continue } - engine.instr[fieldnum] = encInstr{op, fieldnum, indir, uintptr(f.Offset)} + op, indir := enc.encOpFor(f.Type, seen) + engine.instr = append(engine.instr, encInstr{*op, fieldNum, indir, uintptr(f.Offset)}) + } + if srt.NumField() > 0 && len(engine.instr) == 0 { + errorf("type %s has no exported fields", rt) } - engine.instr[srt.NumField()] = encInstr{encStructTerminator, 0, 0, 0} + engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, 0, 0}) } else { engine.instr = make([]encInstr, 1) - op, indir := enc.encOpFor(rt) - engine.instr[0] = encInstr{op, singletonField, indir, 0} // offset is zero + op, indir := enc.encOpFor(rt, seen) + engine.instr[0] = encInstr{*op, singletonField, indir, 0} // offset is zero } return engine } @@ -556,18 +568,16 @@ func (enc *Encoder) lockAndGetEncEngine(rt reflect.Type) *encEngine { return enc.getEncEngine(rt) } -func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value) (err os.Error) { +func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) (err os.Error) { defer catchError(&err) - // Dereference down to the underlying object. - rt, indir := indirect(value.Type()) - for i := 0; i < indir; i++ { + for i := 0; i < ut.indir; i++ { value = reflect.Indirect(value) } - engine := enc.lockAndGetEncEngine(rt) + engine := enc.lockAndGetEncEngine(ut.base) if value.Type().Kind() == reflect.Struct { - enc.encodeStruct(b, engine, value.Addr()) + enc.encodeStruct(b, engine, value.UnsafeAddr()) } else { - enc.encodeSingle(b, engine, value.Addr()) + enc.encodeSingle(b, engine, value.UnsafeAddr()) } return nil } diff --git a/libgo/go/gob/encoder.go b/libgo/go/gob/encoder.go index 8869b262982..92d036c11c3 100644 --- a/libgo/go/gob/encoder.go +++ b/libgo/go/gob/encoder.go @@ -16,9 +16,8 @@ import ( // other side of a connection. type Encoder struct { mutex sync.Mutex // each item must be sent atomically - w io.Writer // where to send the data + w []io.Writer // where to send the data sent map[reflect.Type]typeId // which types we've already sent - state *encoderState // so we can encode integers, strings directly countState *encoderState // stage for writing counts buf []byte // for collecting the output. err os.Error @@ -27,13 +26,27 @@ type Encoder struct { // NewEncoder returns a new encoder that will transmit on the io.Writer. func NewEncoder(w io.Writer) *Encoder { enc := new(Encoder) - enc.w = w + enc.w = []io.Writer{w} enc.sent = make(map[reflect.Type]typeId) - enc.state = newEncoderState(enc, new(bytes.Buffer)) enc.countState = newEncoderState(enc, new(bytes.Buffer)) return enc } +// writer() returns the innermost writer the encoder is using +func (enc *Encoder) writer() io.Writer { + return enc.w[len(enc.w)-1] +} + +// pushWriter adds a writer to the encoder. +func (enc *Encoder) pushWriter(w io.Writer) { + enc.w = append(enc.w, w) +} + +// popWriter pops the innermost writer. +func (enc *Encoder) popWriter() { + enc.w = enc.w[0 : len(enc.w)-1] +} + func (enc *Encoder) badType(rt reflect.Type) { enc.setError(os.ErrorString("gob: can't encode type " + rt.String())) } @@ -42,16 +55,14 @@ func (enc *Encoder) setError(err os.Error) { if enc.err == nil { // remember the first. enc.err = err } - enc.state.b.Reset() } -// Send the data item preceded by a unsigned count of its length. -func (enc *Encoder) send() { - // Encode the length. - enc.countState.encodeUint(uint64(enc.state.b.Len())) +// writeMessage sends the data item preceded by a unsigned count of its length. +func (enc *Encoder) writeMessage(w io.Writer, b *bytes.Buffer) { + enc.countState.encodeUint(uint64(b.Len())) // Build the buffer. countLen := enc.countState.b.Len() - total := countLen + enc.state.b.Len() + total := countLen + b.Len() if total > len(enc.buf) { enc.buf = make([]byte, total+1000) // extra for growth } @@ -59,17 +70,18 @@ func (enc *Encoder) send() { // TODO(r): avoid the extra copy here. enc.countState.b.Read(enc.buf[0:countLen]) // Now the data. - enc.state.b.Read(enc.buf[countLen:total]) + b.Read(enc.buf[countLen:total]) // Write the data. - _, err := enc.w.Write(enc.buf[0:total]) + _, err := w.Write(enc.buf[0:total]) if err != nil { enc.setError(err) } } -func (enc *Encoder) sendType(origt reflect.Type) (sent bool) { +func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Type) (sent bool) { // Drill down to the base type. - rt, _ := indirect(origt) + ut := userType(origt) + rt := ut.base switch rt := rt.(type) { default: @@ -112,10 +124,10 @@ func (enc *Encoder) sendType(origt reflect.Type) (sent bool) { } // Send the pair (-id, type) // Id: - enc.state.encodeInt(-int64(info.id)) + state.encodeInt(-int64(info.id)) // Type: - enc.encode(enc.state.b, reflect.NewValue(info.wire)) - enc.send() + enc.encode(state.b, reflect.NewValue(info.wire), wireTypeUserInfo) + enc.writeMessage(w, state.b) if enc.err != nil { return } @@ -128,10 +140,10 @@ func (enc *Encoder) sendType(origt reflect.Type) (sent bool) { switch st := rt.(type) { case *reflect.StructType: for i := 0; i < st.NumField(); i++ { - enc.sendType(st.Field(i).Type) + enc.sendType(w, state, st.Field(i).Type) } case reflect.ArrayOrSliceType: - enc.sendType(st.Elem()) + enc.sendType(w, state, st.Elem()) } return true } @@ -142,15 +154,16 @@ func (enc *Encoder) Encode(e interface{}) os.Error { return enc.EncodeValue(reflect.NewValue(e)) } -// sendTypeId makes sure the remote side knows about this type. +// sendTypeDescriptor makes sure the remote side knows about this type. // It will send a descriptor if this is the first time the type has been -// sent. Regardless, it sends the id. -func (enc *Encoder) sendTypeDescriptor(rt reflect.Type) { +// sent. +func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *userTypeInfo) { // Make sure the type is known to the other side. - // First, have we already sent this type? - if _, alreadySent := enc.sent[rt]; !alreadySent { + // First, have we already sent this (base) type? + base := ut.base + if _, alreadySent := enc.sent[base]; !alreadySent { // No, so send it. - sent := enc.sendType(rt) + sent := enc.sendType(w, state, base) if enc.err != nil { return } @@ -159,18 +172,21 @@ func (enc *Encoder) sendTypeDescriptor(rt reflect.Type) { // need to send the type info but we do need to update enc.sent. if !sent { typeLock.Lock() - info, err := getTypeInfo(rt) + info, err := getTypeInfo(base) typeLock.Unlock() if err != nil { enc.setError(err) return } - enc.sent[rt] = info.id + enc.sent[base] = info.id } } +} +// sendTypeId sends the id, which must have already been defined. +func (enc *Encoder) sendTypeId(state *encoderState, ut *userTypeInfo) { // Identify the type of this top-level value. - enc.state.encodeInt(int64(enc.sent[rt])) + state.encodeInt(int64(enc.sent[ut.base])) } // EncodeValue transmits the data item represented by the reflection value, @@ -181,26 +197,29 @@ func (enc *Encoder) EncodeValue(value reflect.Value) os.Error { enc.mutex.Lock() defer enc.mutex.Unlock() - enc.err = nil - rt, _ := indirect(value.Type()) + // Remove any nested writers remaining due to previous errors. + enc.w = enc.w[0:1] - // Sanity check only: encoder should never come in with data present. - if enc.state.b.Len() > 0 || enc.countState.b.Len() > 0 { - enc.err = os.ErrorString("encoder: buffer not empty") - return enc.err + ut, err := validUserType(value.Type()) + if err != nil { + return err } - enc.sendTypeDescriptor(rt) + enc.err = nil + state := newEncoderState(enc, new(bytes.Buffer)) + + enc.sendTypeDescriptor(enc.writer(), state, ut) + enc.sendTypeId(state, ut) if enc.err != nil { return enc.err } // Encode the object. - err := enc.encode(enc.state.b, value) + err = enc.encode(state.b, value, ut) if err != nil { enc.setError(err) } else { - enc.send() + enc.writeMessage(enc.writer(), state.b) } return enc.err diff --git a/libgo/go/gob/encoder_test.go b/libgo/go/gob/encoder_test.go index c2309352a09..a0c713b81df 100644 --- a/libgo/go/gob/encoder_test.go +++ b/libgo/go/gob/encoder_test.go @@ -6,6 +6,7 @@ package gob import ( "bytes" + "fmt" "io" "os" "reflect" @@ -120,7 +121,7 @@ func corruptDataCheck(s string, err os.Error, t *testing.T) { dec := NewDecoder(b) err1 := dec.Decode(new(ET2)) if err1 != err { - t.Error("expected error", err, "got", err1) + t.Errorf("from %q expected error %s; got %s", s, err, err1) } } @@ -220,7 +221,7 @@ func TestSlice(t *testing.T) { func TestValueError(t *testing.T) { // Encode a *T, decode a T type Type4 struct { - a int + A int } t4p := &Type4{3} var t4 Type4 // note: not a pointer. @@ -248,6 +249,24 @@ func TestArray(t *testing.T) { } } +func TestRecursiveMapType(t *testing.T) { + type recursiveMap map[string]recursiveMap + r1 := recursiveMap{"A": recursiveMap{"B": nil, "C": nil}, "D": nil} + r2 := make(recursiveMap) + if err := encAndDec(r1, &r2); err != nil { + t.Error(err) + } +} + +func TestRecursiveSliceType(t *testing.T) { + type recursiveSlice []recursiveSlice + r1 := recursiveSlice{0: recursiveSlice{0: nil}, 1: nil} + r2 := make(recursiveSlice, 0) + if err := encAndDec(r1, &r2); err != nil { + t.Error(err) + } +} + // Regression test for bug: must send zero values inside arrays func TestDefaultsInArray(t *testing.T) { type Type7 struct { @@ -383,3 +402,115 @@ func TestInterfaceIndirect(t *testing.T) { t.Fatal("decode error:", err) } } + +// Now follow various tests that decode into things that can't represent the +// encoded value, all of which should be legal. + +// Also, when the ignored object contains an interface value, it may define +// types. Make sure that skipping the value still defines the types by using +// the encoder/decoder pair to send a value afterwards. If an interface +// is sent, its type in the test is always NewType0, so this checks that the +// encoder and decoder don't skew with respect to type definitions. + +type Struct0 struct { + I interface{} +} + +type NewType0 struct { + S string +} + +type ignoreTest struct { + in, out interface{} +} + +var ignoreTests = []ignoreTest{ + // Decode normal struct into an empty struct + {&struct{ A int }{23}, &struct{}{}}, + // Decode normal struct into a nil. + {&struct{ A int }{23}, nil}, + // Decode singleton string into a nil. + {"hello, world", nil}, + // Decode singleton slice into a nil. + {[]int{1, 2, 3, 4}, nil}, + // Decode struct containing an interface into a nil. + {&Struct0{&NewType0{"value0"}}, nil}, + // Decode singleton slice of interfaces into a nil. + {[]interface{}{"hi", &NewType0{"value1"}, 23}, nil}, +} + +func TestDecodeIntoNothing(t *testing.T) { + Register(new(NewType0)) + for i, test := range ignoreTests { + b := new(bytes.Buffer) + enc := NewEncoder(b) + err := enc.Encode(test.in) + if err != nil { + t.Errorf("%d: encode error %s:", i, err) + continue + } + dec := NewDecoder(b) + err = dec.Decode(test.out) + if err != nil { + t.Errorf("%d: decode error: %s", i, err) + continue + } + // Now see if the encoder and decoder are in a consistent state. + str := fmt.Sprintf("Value %d", i) + err = enc.Encode(&NewType0{str}) + if err != nil { + t.Fatalf("%d: NewType0 encode error: %s", i, err) + } + ns := new(NewType0) + err = dec.Decode(ns) + if err != nil { + t.Fatalf("%d: NewType0 decode error: %s", i, err) + } + if ns.S != str { + t.Fatalf("%d: expected %q got %q", i, str, ns.S) + } + } +} + +// Another bug from golang-nuts, involving nested interfaces. +type Bug0Outer struct { + Bug0Field interface{} +} + +type Bug0Inner struct { + A int +} + +func TestNestedInterfaces(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + d := NewDecoder(&buf) + Register(new(Bug0Outer)) + Register(new(Bug0Inner)) + f := &Bug0Outer{&Bug0Outer{&Bug0Inner{7}}} + var v interface{} = f + err := e.Encode(&v) + if err != nil { + t.Fatal("Encode:", err) + } + err = d.Decode(&v) + if err != nil { + t.Fatal("Decode:", err) + } + // Make sure it decoded correctly. + outer1, ok := v.(*Bug0Outer) + if !ok { + t.Fatalf("v not Bug0Outer: %T", v) + } + outer2, ok := outer1.Bug0Field.(*Bug0Outer) + if !ok { + t.Fatalf("v.Bug0Field not Bug0Outer: %T", outer1.Bug0Field) + } + inner, ok := outer2.Bug0Field.(*Bug0Inner) + if !ok { + t.Fatalf("v.Bug0Field.Bug0Field not Bug0Inner: %T", outer2.Bug0Field) + } + if inner.A != 7 { + t.Fatalf("final value %d; expected %d", inner.A, 7) + } +} diff --git a/libgo/go/gob/type.go b/libgo/go/gob/type.go index 22502a6e6b9..6e3f148b4e7 100644 --- a/libgo/go/gob/type.go +++ b/libgo/go/gob/type.go @@ -11,13 +11,76 @@ import ( "sync" ) -// Reflection types are themselves interface values holding structs -// describing the type. Each type has a different struct so that struct can -// be the kind. For example, if typ is the reflect type for an int8, typ is -// a pointer to a reflect.Int8Type struct; if typ is the reflect type for a -// function, typ is a pointer to a reflect.FuncType struct; we use the type -// of that pointer as the kind. +// userTypeInfo stores the information associated with a type the user has handed +// to the package. It's computed once and stored in a map keyed by reflection +// type. +type userTypeInfo struct { + user reflect.Type // the type the user handed us + base reflect.Type // the base type after all indirections + indir int // number of indirections to reach the base type +} + +var ( + // Protected by an RWMutex because we read it a lot and write + // it only when we see a new type, typically when compiling. + userTypeLock sync.RWMutex + userTypeCache = make(map[reflect.Type]*userTypeInfo) +) + +// validType returns, and saves, the information associated with user-provided type rt. +// If the user type is not valid, err will be non-nil. To be used when the error handler +// is not set up. +func validUserType(rt reflect.Type) (ut *userTypeInfo, err os.Error) { + userTypeLock.RLock() + ut = userTypeCache[rt] + userTypeLock.RUnlock() + if ut != nil { + return + } + // Now set the value under the write lock. + userTypeLock.Lock() + defer userTypeLock.Unlock() + if ut = userTypeCache[rt]; ut != nil { + // Lost the race; not a problem. + return + } + ut = new(userTypeInfo) + ut.base = rt + ut.user = rt + // A type that is just a cycle of pointers (such as type T *T) cannot + // be represented in gobs, which need some concrete data. We use a + // cycle detection algorithm from Knuth, Vol 2, Section 3.1, Ex 6, + // pp 539-540. As we step through indirections, run another type at + // half speed. If they meet up, there's a cycle. + slowpoke := ut.base // walks half as fast as ut.base + for { + pt, ok := ut.base.(*reflect.PtrType) + if !ok { + break + } + ut.base = pt.Elem() + if ut.base == slowpoke { // ut.base lapped slowpoke + // recursive pointer type. + return nil, os.ErrorString("can't represent recursive pointer type " + ut.base.String()) + } + if ut.indir%2 == 0 { + slowpoke = slowpoke.(*reflect.PtrType).Elem() + } + ut.indir++ + } + userTypeCache[rt] = ut + return +} +// userType returns, and saves, the information associated with user-provided type rt. +// If the user type is not valid, it calls error. +func userType(rt reflect.Type) *userTypeInfo { + ut, err := validUserType(rt) + if err != nil { + error(err) + } + return ut +} // A typeId represents a gob Type as an integer that can be passed on the wire. // Internally, typeIds are used as keys to a map to recover the underlying type info. type typeId int32 @@ -93,7 +156,7 @@ var ( tBool = bootstrapType("bool", false, 1) tInt = bootstrapType("int", int(0), 2) tUint = bootstrapType("uint", uint(0), 3) - tFloat = bootstrapType("float", 0.0, 4) + tFloat = bootstrapType("float", float64(0), 4) tBytes = bootstrapType("bytes", make([]byte, 0), 5) tString = bootstrapType("string", "", 6) tComplex = bootstrapType("complex", 0+0i, 7) @@ -110,6 +173,7 @@ var ( // Predefined because it's needed by the Decoder var tWireType = mustGetTypeInfo(reflect.Typeof(wireType{})).id +var wireTypeUserInfo *userTypeInfo // userTypeInfo of (*wireType) func init() { // Some magic numbers to make sure there are no surprises. @@ -133,6 +197,7 @@ func init() { } nextId = firstUserId registerBasics() + wireTypeUserInfo = userType(reflect.Typeof((*wireType)(nil))) } // Array type @@ -142,12 +207,18 @@ type arrayType struct { Len int } -func newArrayType(name string, elem gobType, length int) *arrayType { - a := &arrayType{CommonType{Name: name}, elem.id(), length} - setTypeId(a) +func newArrayType(name string) *arrayType { + a := &arrayType{CommonType{Name: name}, 0, 0} return a } +func (a *arrayType) init(elem gobType, len int) { + // Set our type id before evaluating the element's, in case it's our own. + setTypeId(a) + a.Elem = elem.id() + a.Len = len +} + func (a *arrayType) safeString(seen map[typeId]bool) string { if seen[a.Id] { return a.Name @@ -165,12 +236,18 @@ type mapType struct { Elem typeId } -func newMapType(name string, key, elem gobType) *mapType { - m := &mapType{CommonType{Name: name}, key.id(), elem.id()} - setTypeId(m) +func newMapType(name string) *mapType { + m := &mapType{CommonType{Name: name}, 0, 0} return m } +func (m *mapType) init(key, elem gobType) { + // Set our type id before evaluating the element's, in case it's our own. + setTypeId(m) + m.Key = key.id() + m.Elem = elem.id() +} + func (m *mapType) safeString(seen map[typeId]bool) string { if seen[m.Id] { return m.Name @@ -189,12 +266,17 @@ type sliceType struct { Elem typeId } -func newSliceType(name string, elem gobType) *sliceType { - s := &sliceType{CommonType{Name: name}, elem.id()} - setTypeId(s) +func newSliceType(name string) *sliceType { + s := &sliceType{CommonType{Name: name}, 0} return s } +func (s *sliceType) init(elem gobType) { + // Set our type id before evaluating the element's, in case it's our own. + setTypeId(s) + s.Elem = elem.id() +} + func (s *sliceType) safeString(seen map[typeId]bool) string { if seen[s.Id] { return s.Name @@ -236,26 +318,26 @@ func (s *structType) string() string { return s.safeString(make(map[typeId]bool) func newStructType(name string) *structType { s := &structType{CommonType{Name: name}, nil} + // For historical reasons we set the id here rather than init. + // Se the comment in newTypeObject for details. setTypeId(s) return s } -// Step through the indirections on a type to discover the base type. -// Return the base type and the number of indirections. -func indirect(t reflect.Type) (rt reflect.Type, count int) { - rt = t - for { - pt, ok := rt.(*reflect.PtrType) - if !ok { - break - } - rt = pt.Elem() - count++ - } - return +func (s *structType) init(field []*fieldType) { + s.Field = field } func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { + var err os.Error + var type0, type1 gobType + defer func() { + if err != nil { + types[rt] = nil, false + } + }() + // Install the top-level type before the subtypes (e.g. struct before + // fields) so recursive types can be constructed safely. switch t := rt.(type) { // All basic types are easy: they are predefined. case *reflect.BoolType: @@ -280,47 +362,62 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { return tInterface.gobType(), nil case *reflect.ArrayType: - gt, err := getType("", t.Elem()) + at := newArrayType(name) + types[rt] = at + type0, err = getType("", t.Elem()) if err != nil { return nil, err } - return newArrayType(name, gt, t.Len()), nil + // Historical aside: + // For arrays, maps, and slices, we set the type id after the elements + // are constructed. This is to retain the order of type id allocation after + // a fix made to handle recursive types, which changed the order in + // which types are built. Delaying the setting in this way preserves + // type ids while allowing recursive types to be described. Structs, + // done below, were already handling recursion correctly so they + // assign the top-level id before those of the field. + at.init(type0, t.Len()) + return at, nil case *reflect.MapType: - kt, err := getType("", t.Key()) + mt := newMapType(name) + types[rt] = mt + type0, err = getType("", t.Key()) if err != nil { return nil, err } - vt, err := getType("", t.Elem()) + type1, err = getType("", t.Elem()) if err != nil { return nil, err } - return newMapType(name, kt, vt), nil + mt.init(type0, type1) + return mt, nil case *reflect.SliceType: // []byte == []uint8 is a special case if t.Elem().Kind() == reflect.Uint8 { return tBytes.gobType(), nil } - gt, err := getType(t.Elem().Name(), t.Elem()) + st := newSliceType(name) + types[rt] = st + type0, err = getType(t.Elem().Name(), t.Elem()) if err != nil { return nil, err } - return newSliceType(name, gt), nil + st.init(type0) + return st, nil case *reflect.StructType: - // Install the struct type itself before the fields so recursive - // structures can be constructed safely. - strType := newStructType(name) - types[rt] = strType - idToType[strType.id()] = strType + st := newStructType(name) + types[rt] = st + idToType[st.id()] = st field := make([]*fieldType, t.NumField()) for i := 0; i < t.NumField(); i++ { f := t.Field(i) - typ, _ := indirect(f.Type) + typ := userType(f.Type).base tname := typ.Name() if tname == "" { - t, _ := indirect(f.Type) + t := userType(f.Type).base tname = t.String() } gt, err := getType(tname, f.Type) @@ -329,8 +426,8 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { } field[i] = &fieldType{f.Name, gt.id()} } - strType.Field = field - return strType, nil + st.init(field) + return st, nil default: return nil, os.ErrorString("gob NewTypeObject can't handle type: " + rt.String()) @@ -341,7 +438,7 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { // getType returns the Gob type describing the given reflect.Type. // typeLock must be held. func getType(name string, rt reflect.Type) (gobType, os.Error) { - rt, _ = indirect(rt) + rt = userType(rt).base typ, present := types[rt] if present { return typ, nil @@ -371,6 +468,7 @@ func bootstrapType(name string, e interface{}, expect typeId) typeId { types[rt] = typ setTypeId(typ) checkId(expect, nextId) + userType(rt) // might as well cache it now return nextId } @@ -381,7 +479,7 @@ func bootstrapType(name string, e interface{}, expect typeId) typeId { // For bootstrapping purposes, we assume that the recipient knows how // to decode a wireType; it is exactly the wireType struct here, interpreted // using the gob rules for sending a structure, except that we assume the -// ids for wireType and structType are known. The relevant pieces +// ids for wireType and structType etc. are known. The relevant pieces // are built in encode.go's init() function. // To maintain binary compatibility, if you extend this type, always put // the new fields last. @@ -473,18 +571,18 @@ func RegisterName(name string, value interface{}) { // reserved for nil panic("attempt to register empty name") } - rt, _ := indirect(reflect.Typeof(value)) + base := userType(reflect.Typeof(value)).base // Check for incompatible duplicates. - if t, ok := nameToConcreteType[name]; ok && t != rt { + if t, ok := nameToConcreteType[name]; ok && t != base { panic("gob: registering duplicate types for " + name) } - if n, ok := concreteTypeToName[rt]; ok && n != name { - panic("gob: registering duplicate names for " + rt.String()) + if n, ok := concreteTypeToName[base]; ok && n != name { + panic("gob: registering duplicate names for " + base.String()) } // Store the name and type provided by the user.... nameToConcreteType[name] = reflect.Typeof(value) // but the flattened type in the type table, since that's what decode needs. - concreteTypeToName[rt] = name + concreteTypeToName[base] = name } // Register records a type, identified by a value for that type, under its @@ -530,7 +628,7 @@ func registerBasics() { Register(uint32(0)) Register(uint64(0)) Register(float32(0)) - Register(0.0) + Register(float64(0)) Register(complex64(0i)) Register(complex128(0i)) Register(false) diff --git a/libgo/go/html/doc.go b/libgo/go/html/doc.go index c5338d0781d..4f5dee72da3 100644 --- a/libgo/go/html/doc.go +++ b/libgo/go/html/doc.go @@ -69,6 +69,9 @@ call to Next. For example, to extract an HTML page's anchor text: } } +A Tokenizer typically skips over HTML comments. To return comment tokens, set +Tokenizer.ReturnComments to true before looping over calls to Next. + Parsing is done by calling Parse with an io.Reader, which returns the root of the parse tree (the document element) as a *Node. It is the caller's responsibility to ensure that the Reader provides UTF-8 encoded HTML. For diff --git a/libgo/go/html/token.go b/libgo/go/html/token.go index d6388385051..ad03241ed92 100644 --- a/libgo/go/html/token.go +++ b/libgo/go/html/token.go @@ -25,6 +25,8 @@ const ( EndTagToken // A SelfClosingTagToken tag looks like
. SelfClosingTagToken + // A CommentToken looks like . + CommentToken ) // String returns a string representation of the TokenType. @@ -40,6 +42,8 @@ func (t TokenType) String() string { return "EndTag" case SelfClosingTagToken: return "SelfClosingTag" + case CommentToken: + return "Comment" } return "Invalid(" + strconv.Itoa(int(t)) + ")" } @@ -52,8 +56,8 @@ type Attribute struct { } // A Token consists of a TokenType and some Data (tag name for start and end -// tags, content for text). A tag Token may also contain a slice of Attributes. -// Data is unescaped for both tag and text Tokens (it looks like "a" case SelfClosingTagToken: return "<" + t.tagString() + "/>" + case CommentToken: + return "" } return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" } // A Tokenizer returns a stream of HTML Tokens. type Tokenizer struct { + // If ReturnComments is set, Next returns comment tokens; + // otherwise it skips over comments (default). + ReturnComments bool + // r is the source of the HTML text. r io.Reader // tt is the TokenType of the most recently read token. If tt == Error @@ -176,6 +186,39 @@ func (z *Tokenizer) readTo(x uint8) os.Error { panic("unreachable") } +// nextMarkupDeclaration returns the next TokenType starting with ", don't just assume that it's a comment. + for i := 0; i < 2; i++ { + c, err := z.readByte() + if err != nil { + return TextToken, err + } + if c != '-' { + return z.nextText(), nil + } + } + // is a valid comment. + for dashCount := 2; ; { + c, err := z.readByte() + if err != nil { + return TextToken, err + } + switch c { + case '-': + dashCount++ + case '>': + if dashCount >= 2 { + return CommentToken, nil + } + fallthrough + default: + dashCount = 0 + } + } + panic("unreachable") +} + // nextTag returns the next TokenType starting from the tag open state. func (z *Tokenizer) nextTag() (tt TokenType, err os.Error) { c, err := z.readByte() @@ -189,7 +232,7 @@ func (z *Tokenizer) nextTag() (tt TokenType, err os.Error) { case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': tt = StartTagToken case c == '!': - return ErrorToken, os.NewError("html: TODO(nigeltao): implement comments") + return z.nextMarkupDeclaration() case c == '?': return ErrorToken, os.NewError("html: TODO(nigeltao): implement XML processing instructions") default: @@ -221,22 +264,8 @@ func (z *Tokenizer) nextTag() (tt TokenType, err os.Error) { panic("unreachable") } -// Next scans the next token and returns its type. -func (z *Tokenizer) Next() TokenType { - if z.err != nil { - z.tt = ErrorToken - return z.tt - } - z.p0 = z.p1 - c, err := z.readByte() - if err != nil { - z.tt, z.err = ErrorToken, err - return z.tt - } - if c == '<' { - z.tt, z.err = z.nextTag() - return z.tt - } +// nextText reads all text up until an '<'. +func (z *Tokenizer) nextText() TokenType { for { c, err := z.readByte() if err != nil { @@ -255,6 +284,31 @@ func (z *Tokenizer) Next() TokenType { panic("unreachable") } +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + for { + if z.err != nil { + z.tt = ErrorToken + return z.tt + } + z.p0 = z.p1 + c, err := z.readByte() + if err != nil { + z.tt, z.err = ErrorToken, err + return z.tt + } + if c == '<' { + z.tt, z.err = z.nextTag() + if z.tt == CommentToken && !z.ReturnComments { + continue + } + return z.tt + } + return z.nextText() + } + panic("unreachable") +} + // trim returns the largest j such that z.buf[i:j] contains only white space, // or only white space plus the final ">" or "/>" of the raw data. func (z *Tokenizer) trim(i int) int { @@ -299,18 +353,33 @@ loop: return z.buf[i0:i], z.trim(i) } -// Text returns the raw data after unescaping. +// Text returns the unescaped text of a TextToken or a CommentToken. // The contents of the returned slice may change on the next call to Next. func (z *Tokenizer) Text() []byte { - s := unescape(z.Raw()) - z.p0 = z.p1 - return s + switch z.tt { + case TextToken: + s := unescape(z.Raw()) + z.p0 = z.p1 + return s + case CommentToken: + // We trim the "" from the right. + // "" is a valid comment, so the adjusted endpoints might overlap. + i0 := z.p0 + 4 + i1 := z.p1 - 3 + z.p0 = z.p1 + var s []byte + if i0 < i1 { + s = unescape(z.buf[i0:i1]) + } + return s + } + return nil } // TagName returns the lower-cased name of a tag token (the `img` out of -// ``), and whether the tag has attributes. +// ``) and whether the tag has attributes. // The contents of the returned slice may change on the next call to Next. -func (z *Tokenizer) TagName() (name []byte, remaining bool) { +func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { i := z.p0 + 1 if i >= z.p1 { z.p0 = z.p1 @@ -320,14 +389,14 @@ func (z *Tokenizer) TagName() (name []byte, remaining bool) { i++ } name, z.p0 = z.lower(i) - remaining = z.p0 != z.p1 + hasAttr = z.p0 != z.p1 return } // TagAttr returns the lower-cased key and unescaped value of the next unparsed -// attribute for the current tag token, and whether there are more attributes. +// attribute for the current tag token and whether there are more attributes. // The contents of the returned slices may change on the next call to Next. -func (z *Tokenizer) TagAttr() (key, val []byte, remaining bool) { +func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { key, i := z.lower(z.p0) // Get past the "=\"". if i == z.p1 || z.buf[i] != '=' { @@ -363,7 +432,7 @@ loop: } } val, z.p0 = z.buf[i:dst], z.trim(src) - remaining = z.p0 != z.p1 + moreAttr = z.p0 != z.p1 return } @@ -372,14 +441,14 @@ loop: func (z *Tokenizer) Token() Token { t := Token{Type: z.tt} switch z.tt { - case TextToken: + case TextToken, CommentToken: t.Data = string(z.Text()) case StartTagToken, EndTagToken, SelfClosingTagToken: var attr []Attribute - name, remaining := z.TagName() - for remaining { + name, moreAttr := z.TagName() + for moreAttr { var key, val []byte - key, val, remaining = z.TagAttr() + key, val, moreAttr = z.TagAttr() attr = append(attr, Attribute{string(key), string(val)}) } t.Data = string(name) diff --git a/libgo/go/html/token_test.go b/libgo/go/html/token_test.go index e07999ca5ad..5cf1f6dac30 100644 --- a/libgo/go/html/token_test.go +++ b/libgo/go/html/token_test.go @@ -7,6 +7,7 @@ package html import ( "bytes" "os" + "strings" "testing" ) @@ -15,8 +16,8 @@ type tokenTest struct { desc string // The HTML to parse. html string - // The string representations of the expected tokens. - tokens []string + // The string representations of the expected tokens, joined by '$'. + golden string } var tokenTests = []tokenTest{ @@ -25,61 +26,86 @@ var tokenTests = []tokenTest{ { "text", "foo bar", - []string{ - "foo bar", - }, + "foo bar", }, // An entity. { "entity", "one < two", - []string{ - "one < two", - }, + "one < two", }, // A start, self-closing and end tag. The tokenizer does not care if the start // and end tokens don't match; that is the job of the parser. { "tags", "bd", - []string{ - "", - "b", - "", - "d", - "", - }, + "$b$$d$", + }, + // Comments. + { + "comment0", + "abcdef", + "abc$$$def", + }, + { + "comment1", + "az", + "a$z", + }, + { + "comment2", + "az", + "a$z", + }, + { + "comment3", + "az", + "a$z", + }, + { + "comment4", + "az", + "a$z", + }, + { + "comment5", + "az", + "a$<!>z", + }, + { + "comment6", + "az", + "a$<!->z", + }, + { + "comment7", + "a0; ) { + if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) { + obj = (byte*)obj - (shift-j)*PtrSize; + shift = j; + bits = xbits>>shift; + goto found; } } + + // Otherwise consult span table to find beginning. + // (Manually inlined copy of MHeap_LookupMaybe.) + nlookup++; + naddrlookup++; + k = (uintptr)obj>>PageShift; + x = k; + if(sizeof(void*) == 8) + x -= (uintptr)arena_start>>PageShift; + s = runtime_mheap.map[x]; + if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse) + continue; + p = (byte*)((uintptr)s->start<sizeclass == 0) { + obj = p; + } else { + if((byte*)obj >= (byte*)s->limit) + continue; + size = runtime_class_to_size[s->sizeclass]; + int32 i = ((byte*)obj - p)/size; + obj = p+i*size; + } + + // Now that we know the object header, reload bits. + off = (uintptr*)obj - (uintptr*)arena_start; + bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + xbits = *bitp; + bits = xbits >> shift; + + found: + // Now we have bits, bitp, and shift correct for + // obj pointing at the base of the object. + // If not allocated or already marked, done. + if((bits & bitAllocated) == 0 || (bits & bitMarked) != 0) + continue; + *bitp |= bitMarked<= ew) { + wbuf = getempty(wbuf); + bw = (void**)wbuf->w; + w = bw; + ew = bw + nelem(wbuf->w); + } + *w++ = obj; } + + // Done scanning [b, b+n). Prepare for the next iteration of + // the loop by setting b and n to the parameters for the next block. + + // Fetch b from the work buffers. + if(w <= bw) { + // Emptied our buffer: refill. + wbuf = getfull(wbuf); + if(wbuf == nil) + break; + bw = (void**)wbuf->w; + ew = (void**)(wbuf->w + nelem(wbuf->w)); + w = bw+wbuf->nw; + } + b = *--w; + + // Figure out n = size of b. Start by loading bits for b. + off = (uintptr*)b - (uintptr*)arena_start; + bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + xbits = *bitp; + bits = xbits >> shift; + + // Might be small; look for nearby block boundary. + // A block boundary is marked by either bitBlockBoundary + // or bitAllocated being set (see notes near their definition). + enum { + boundary = bitBlockBoundary|bitAllocated + }; + // Look for a block boundary both after and before b + // in the same bitmap word. + // + // A block boundary j words after b is indicated by + // bits>>j & boundary + // assuming shift+j < bitShift. (If shift+j >= bitShift then + // we'll be bleeding other bit types like bitMarked into our test.) + // Instead of inserting the conditional shift+j < bitShift into the loop, + // we can let j range from 1 to bitShift as long as we first + // apply a mask to keep only the bits corresponding + // to shift+j < bitShift aka j < bitShift-shift. + bits &= (boundary<<(bitShift-shift)) - boundary; + + // A block boundary j words before b is indicated by + // xbits>>(shift-j) & boundary + // (assuming shift >= j). There is no cleverness here + // avoid the test, because when j gets too large the shift + // turns negative, which is undefined in C. + + for(j=1; j>j)&boundary) != 0 || (shift>=j && ((xbits>>(shift-j))&boundary) != 0)) { + n = j*PtrSize; + goto scan; + } + } + + // Fall back to asking span about size class. + // (Manually inlined copy of MHeap_Lookup.) + nlookup++; + nsizelookup++; + x = (uintptr)b>>PageShift; + if(sizeof(void*) == 8) + x -= (uintptr)arena_start>>PageShift; + s = runtime_mheap.map[x]; + if(s->sizeclass == 0) + n = s->npages<sizeclass]; + scan:; + } +} + +static struct { + Workbuf *full; + Workbuf *empty; + byte *chunk; + uintptr nchunk; +} work; + +// Get an empty work buffer off the work.empty list, +// allocating new buffers as needed. +static Workbuf* +getempty(Workbuf *b) +{ + if(b != nil) { + b->nw = nelem(b->w); + b->next = work.full; + work.full = b; + } + b = work.empty; + if(b != nil) { + work.empty = b->next; + return b; + } + + if(work.nchunk < sizeof *b) { + work.nchunk = 1<<20; + work.chunk = runtime_SysAlloc(work.nchunk); } + b = (Workbuf*)work.chunk; + work.chunk += sizeof *b; + work.nchunk -= sizeof *b; + return b; } +// Get a full work buffer off the work.full list, or return nil. +static Workbuf* +getfull(Workbuf *b) +{ + if(b != nil) { + b->nw = 0; + b->next = work.empty; + work.empty = b; + } + b = work.full; + if(b != nil) + work.full = b->next; + return b; +} + +// Scanstack calls scanblock on each of gp's stack segments. static void markfin(void *v) { uintptr size; - uint32 *refp; size = 0; - refp = nil; - if(!runtime_mlookup(v, (byte**)&v, &size, nil, &refp) || !(*refp & RefHasFinalizer)) + if(!runtime_mlookup(v, (byte**)&v, &size, nil) || !runtime_blockspecial(v)) runtime_throw("mark - finalizer inconsistency"); - + // do not mark the finalizer block itself. just mark the things it points at. scanblock(v, size); } @@ -131,32 +350,12 @@ __go_register_gc_roots (struct root_list* r) roots = r; } +// Mark static void mark(void) { - uintptr blsize, nobj; struct root_list *pl; - // Figure out how big an object stack we need. - // Get a new one if we need more than we have - // or we need significantly less than we have. - nobj = mstats.heap_objects; - if(nobj > (uintptr)(ebl - bl) || nobj < (uintptr)(ebl-bl)/4) { - if(bl != nil) - runtime_SysFree(bl, (byte*)ebl - (byte*)bl); - - // While we're allocated a new object stack, - // add 20% headroom and also round up to - // the nearest page boundary, since mmap - // will anyway. - nobj = nobj * 12/10; - blsize = nobj * sizeof *bl; - blsize = (blsize + 4095) & ~4095; - nobj = blsize / sizeof *bl; - bl = runtime_SysAlloc(blsize); - ebl = bl + nobj; - } - for(pl = roots; pl != nil; pl = pl->next) { struct root* pr = &pl->roots[0]; while(1) { @@ -179,97 +378,85 @@ mark(void) runtime_walkfintab(markfin, scanblock); } -// free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome +// Sweep frees or calls finalizers for blocks not marked in the mark phase. +// It clears the mark bits in preparation for the next GC round. static void -sweepspan(MSpan *s) +sweep(void) { - int32 n, npages, size; + MSpan *s; + int32 cl, n, npages; + uintptr size; byte *p; - uint32 ref, *gcrefp, *gcrefep; MCache *c; Finalizer *f; - p = (byte*)(s->start << PageShift); - if(s->sizeclass == 0) { - // Large block. - ref = s->gcref0; - switch(ref & ~(RefFlags^RefHasFinalizer)) { - case RefNone: - // Free large object. - mstats.alloc -= s->npages<npages<npages<gcref0 = RefFree; - runtime_MHeap_Free(&runtime_mheap, s, 1); - break; - case RefNone|RefHasFinalizer: - f = runtime_getfinalizer(p, 1); - if(f == nil) - runtime_throw("finalizer inconsistency"); - f->arg = p; - f->next = finq; - finq = f; - ref &= ~RefHasFinalizer; - // fall through - case RefSome: - case RefSome|RefHasFinalizer: - s->gcref0 = RefNone | (ref&RefFlags); - break; + for(s = runtime_mheap.allspans; s != nil; s = s->allnext) { + if(s->state != MSpanInUse) + continue; + + p = (byte*)(s->start << PageShift); + cl = s->sizeclass; + if(cl == 0) { + size = s->npages< 0; n--, p += size) { + uintptr off, *bitp, shift, bits; - // Chunk full of small blocks. - runtime_MGetSizeClassInfo(s->sizeclass, &size, &npages, &n); - gcrefp = s->gcref; - gcrefep = s->gcref + n; - for(; gcrefp < gcrefep; gcrefp++, p += size) { - ref = *gcrefp; - if(ref < RefNone) // RefFree or RefStack - continue; - switch(ref & ~(RefFlags^RefHasFinalizer)) { - case RefNone: - // Free small object. - if(ref & RefProfiled) + off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start; + bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + bits = *bitp>>shift; + + if((bits & bitAllocated) == 0) + continue; + + if((bits & bitMarked) != 0) { + *bitp &= ~(bitMarked<arg = p; + f->next = finq; + finq = f; + continue; + } runtime_MProf_Free(p, size); - *gcrefp = RefFree; - c = m->mcache; - if(size > (int32)sizeof(uintptr)) - ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed" + } + + // Mark freed; restore block boundary bit. + *bitp = (*bitp & ~(bitMask<sizeclass == 0) { + // Free large span. + runtime_unmarkspan(p, 1<mcache; + if(size > sizeof(uintptr)) + ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed" + mstats.by_size[s->sizeclass].nfree++; + runtime_MCache_Free(c, p, s->sizeclass, size); + } mstats.alloc -= size; mstats.nfree++; - mstats.by_size[s->sizeclass].nfree++; - runtime_MCache_Free(c, p, s->sizeclass, size); - break; - case RefNone|RefHasFinalizer: - f = runtime_getfinalizer(p, 1); - if(f == nil) - runtime_throw("finalizer inconsistency"); - f->arg = p; - f->next = finq; - finq = f; - ref &= ~RefHasFinalizer; - // fall through - case RefSome: - case RefSome|RefHasFinalizer: - *gcrefp = RefNone | (ref&RefFlags); - break; } } } -static void -sweep(void) -{ - MSpan *s; - - for(s = runtime_mheap.allspans; s != nil; s = s->allnext) - if(s->state == MSpanInUse) - sweepspan(s); -} - static pthread_mutex_t gcsema = PTHREAD_MUTEX_INITIALIZER; // Initialized from $GOGC. GOGC=off means no gc. @@ -286,7 +473,8 @@ static int32 gcpercent = -2; void runtime_gc(int32 force __attribute__ ((unused))) { - int64 t0, t1; + int64 t0, t1, t2, t3; + uint64 heap0, heap1, obj0, obj1; char *p; Finalizer *fp; @@ -309,29 +497,65 @@ runtime_gc(int32 force __attribute__ ((unused))) gcpercent = -1; else gcpercent = runtime_atoi(p); + + p = runtime_getenv("GOGCTRACE"); + if(p != nil) + gctrace = runtime_atoi(p); } if(gcpercent < 0) return; pthread_mutex_lock(&finqlock); pthread_mutex_lock(&gcsema); - m->locks++; // disable gc during the mallocs in newproc + if(!force && mstats.heap_alloc < mstats.next_gc) { + pthread_mutex_unlock(&gcsema); + pthread_mutex_unlock(&finqlock); + return; + } + t0 = runtime_nanotime(); + nlookup = 0; + nsizelookup = 0; + naddrlookup = 0; + + m->gcing = 1; runtime_stoptheworld(); - if(force || mstats.heap_alloc >= mstats.next_gc) { - __go_cachestats(); - mark(); - sweep(); - __go_stealcache(); - mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100; - } + if(runtime_mheap.Lock.key != 0) + runtime_throw("runtime_mheap locked during gc"); + __go_cachestats(); + heap0 = mstats.heap_alloc; + obj0 = mstats.nmalloc - mstats.nfree; + + mark(); t1 = runtime_nanotime(); + sweep(); + t2 = runtime_nanotime(); + __go_stealcache(); + + mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100; + m->gcing = 0; + + m->locks++; // disable gc during the mallocs in newproc + + heap1 = mstats.heap_alloc; + obj1 = mstats.nmalloc - mstats.nfree; + + t3 = runtime_nanotime(); + mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0; + mstats.pause_total_ns += t3 - t0; mstats.numgc++; - mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t1 - t0; - mstats.pause_total_ns += t1 - t0; if(mstats.debuggc) - runtime_printf("pause %llu\n", (unsigned long long)t1-t0); + runtime_printf("pause %llu\n", (unsigned long long)t3-t0); + + if(gctrace) { + runtime_printf("gc%d: %llu+%llu+%llu ms %llu -> %llu MB %llu -> %llu (%llu-%llu) objects %llu pointer lookups (%llu size, %llu addr)\n", + mstats.numgc, (unsigned long long)(t1-t0)/1000000, (unsigned long long)(t2-t1)/1000000, (unsigned long long)(t3-t2)/1000000, + (unsigned long long)heap0>>20, (unsigned long long)heap1>>20, (unsigned long long)obj0, (unsigned long long)obj1, + (unsigned long long)mstats.nmalloc, (unsigned long long)mstats.nfree, + (unsigned long long)nlookup, (unsigned long long)nsizelookup, (unsigned long long)naddrlookup); + } + pthread_mutex_unlock(&gcsema); runtime_starttheworld(); @@ -350,6 +574,9 @@ runtime_gc(int32 force __attribute__ ((unused))) } m->locks--; pthread_mutex_unlock(&finqlock); + + if(gctrace > 1 && !force) + runtime_gc(1); } static void @@ -385,6 +612,202 @@ runfinq(void* dummy) } } +#define runtime_gomaxprocs 2 + +// mark the block at v of size n as allocated. +// If noptr is true, mark it as having no pointers. +void +runtime_markallocated(void *v, uintptr n, bool noptr) +{ + uintptr *b, obits, bits, off, shift; + + // if(0) + // runtime_printf("markallocated %p+%p\n", v, n); + + if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) + runtime_throw("markallocated: bad pointer"); + + off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset + b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + + for(;;) { + obits = *b; + bits = (obits & ~(bitMask< 1: use atomic op + if(runtime_casp((void**)b, (void*)obits, (void*)bits)) + break; + } + } +} + +// mark the block at v of size n as freed. +void +runtime_markfreed(void *v, uintptr n) +{ + uintptr *b, obits, bits, off, shift; + + // if(0) + // runtime_printf("markallocated %p+%p\n", v, n); + + if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) + runtime_throw("markallocated: bad pointer"); + + off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset + b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + + for(;;) { + obits = *b; + bits = (obits & ~(bitMask< 1: use atomic op + if(runtime_casp((void**)b, (void*)obits, (void*)bits)) + break; + } + } +} + +// check that the block at v of size n is marked freed. +void +runtime_checkfreed(void *v, uintptr n) +{ + uintptr *b, bits, off, shift; + + if(!runtime_checking) + return; + + if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) + return; // not allocated, so okay + + off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset + b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + + bits = *b>>shift; + if((bits & bitAllocated) != 0) { + runtime_printf("checkfreed %p+%p: off=%p have=%p\n", + v, (void*)n, (void*)off, (void*)(bits & bitMask)); + runtime_throw("checkfreed: not freed"); + } +} + +// mark the span of memory at v as having n blocks of the given size. +// if leftover is true, there is left over space at the end of the span. +void +runtime_markspan(void *v, uintptr size, uintptr n, bool leftover) +{ + uintptr *b, off, shift; + byte *p; + + if((byte*)v+size*n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) + runtime_throw("markspan: bad pointer"); + + p = v; + if(leftover) // mark a boundary just past end of last block too + n++; + for(; n-- > 0; p += size) { + // Okay to use non-atomic ops here, because we control + // the entire span, and each bitmap word has bits for only + // one span, so no other goroutines are changing these + // bitmap words. + off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start; // word offset + b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + *b = (*b & ~(bitMask< (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) + runtime_throw("markspan: bad pointer"); + + p = v; + off = p - (uintptr*)runtime_mheap.arena_start; // word offset + if(off % wordsPerBitmapWord != 0) + runtime_throw("markspan: unaligned pointer"); + b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; + n /= PtrSize; + if(n%wordsPerBitmapWord != 0) + runtime_throw("unmarkspan: unaligned length"); + // Okay to use non-atomic ops here, because we control + // the entire span, and each bitmap word has bits for only + // one span, so no other goroutines are changing these + // bitmap words. + n /= wordsPerBitmapWord; + while(n-- > 0) + *b-- = 0; +} + +bool +runtime_blockspecial(void *v) +{ + uintptr *b, off, shift; + + off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; + b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; + shift = off % wordsPerBitmapWord; + + return (*b & (bitSpecial< 1: use atomic op + if(runtime_casp((void**)b, (void*)obits, (void*)bits)) + break; + } + } +} + +void +runtime_MHeap_MapBits(MHeap *h) +{ + // Caller has added extra mappings to the arena. + // Add extra mappings of bitmap words as needed. + // We allocate extra bitmap pieces in chunks of bitmapChunk. + enum { + bitmapChunk = 8192 + }; + uintptr n; + + n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; + n = (n+bitmapChunk-1) & ~(bitmapChunk-1); + if(h->bitmap_mapped >= n) + return; + + runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped); + h->bitmap_mapped = n; +} + void __go_enable_gc() { diff --git a/libgo/runtime/mheap.c b/libgo/runtime/mheap.c index 52c6d8c1baa..b36df258818 100644 --- a/libgo/runtime/mheap.c +++ b/libgo/runtime/mheap.c @@ -42,7 +42,6 @@ runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr)) runtime_initlock(h); runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h); runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil); - runtime_MHeapMap_Init(&h->map, alloc); // h->mapcache needs no init for(i=0; ifree); i++) runtime_MSpanList_Init(&h->free[i]); @@ -80,6 +79,7 @@ MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; + PageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { @@ -113,18 +113,29 @@ HaveSpan: mstats.mspan_sys = h->spanalloc.sys; runtime_MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; - runtime_MHeapMap_Set(&h->map, t->start - 1, s); - runtime_MHeapMap_Set(&h->map, t->start, t); - runtime_MHeapMap_Set(&h->map, t->start + t->npages - 1, t); + p = t->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); + if(p > 0) + h->map[p-1] = s; + h->map[p] = t; + h->map[p+t->npages-1] = t; + *(uintptr*)(t->start<start<state = MSpanInUse; MHeap_FreeLocked(h, t); } + if(*(uintptr*)(s->start<start<npages<sizeclass = sizeclass; + p = s->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); for(n=0; nmap, s->start+n, s); + h->map[p+n] = s; return s; } @@ -162,6 +173,7 @@ MHeap_Grow(MHeap *h, uintptr npage) uintptr ask; void *v; MSpan *s; + PageID p; // Ask for a big chunk, to reduce the number of mappings // the operating system needs to track; also amortizes @@ -169,68 +181,72 @@ MHeap_Grow(MHeap *h, uintptr npage) // Allocate a multiple of 64kB (16 pages). npage = (npage+15)&~15; ask = npage< (uintptr)(h->arena_end - h->arena_used)) + return false; + if(ask < HeapAllocChunk && HeapAllocChunk <= h->arena_end - h->arena_used) ask = HeapAllocChunk; - v = runtime_SysAlloc(ask); + v = runtime_MHeap_SysAlloc(h, ask); if(v == nil) { if(ask > (npage<min || h->min == nil) - h->min = v; - if((byte*)v+ask > h->max) - h->max = (byte*)v+ask; - - // NOTE(rsc): In tcmalloc, if we've accumulated enough - // system allocations, the heap map gets entirely allocated - // in 32-bit mode. (In 64-bit mode that's not practical.) - if(!runtime_MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) { - runtime_SysFree(v, ask); - return false; - } - // Create a fake "in use" span and free it, so that the // right coalescing happens. s = runtime_FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); - runtime_MHeapMap_Set(&h->map, s->start, s); - runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s); + p = s->start; + if(sizeof(void*) == 8) + p -= ((uintptr)h->arena_start>>PageShift); + h->map[p] = s; + h->map[p + s->npages - 1] = s; s->state = MSpanInUse; MHeap_FreeLocked(h, s); return true; } -// Look up the span at the given page number. -// Page number is guaranteed to be in map +// Look up the span at the given address. +// Address is guaranteed to be in map // and is guaranteed to be start or end of span. MSpan* -runtime_MHeap_Lookup(MHeap *h, PageID p) +runtime_MHeap_Lookup(MHeap *h, void *v) { - return runtime_MHeapMap_Get(&h->map, p); + uintptr p; + + p = (uintptr)v; + if(sizeof(void*) == 8) + p -= (uintptr)h->arena_start; + return h->map[p >> PageShift]; } -// Look up the span at the given page number. -// Page number is *not* guaranteed to be in map +// Look up the span at the given address. +// Address is *not* guaranteed to be in map // and may be anywhere in the span. // Map entries for the middle of a span are only // valid for allocated spans. Free spans may have // other garbage in their middles, so we have to // check for that. MSpan* -runtime_MHeap_LookupMaybe(MHeap *h, PageID p) +runtime_MHeap_LookupMaybe(MHeap *h, void *v) { MSpan *s; + PageID p, q; - s = runtime_MHeapMap_GetMaybe(&h->map, p); + if((byte*)v < h->arena_start || (byte*)v >= h->arena_used) + return nil; + p = (uintptr)v>>PageShift; + q = p; + if(sizeof(void*) == 8) + q -= (uintptr)h->arena_start >> PageShift; + s = h->map[q]; if(s == nil || p < s->start || p - s->start >= s->npages) return nil; if(s->state != MSpanInUse) @@ -259,7 +275,9 @@ runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct) static void MHeap_FreeLocked(MHeap *h, MSpan *s) { + uintptr *sp, *tp; MSpan *t; + PageID p; if(s->state != MSpanInUse || s->ref != 0) { // runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<state, s->ref); @@ -267,21 +285,30 @@ MHeap_FreeLocked(MHeap *h, MSpan *s) } s->state = MSpanFree; runtime_MSpanList_Remove(s); + sp = (uintptr*)(s->start<map, s->start - 1)) != nil && t->state != MSpanInUse) { + p = s->start; + if(sizeof(void*) == 8) + p -= (uintptr)h->arena_start >> PageShift; + if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) { + tp = (uintptr*)(t->start<start = t->start; s->npages += t->npages; - runtime_MHeapMap_Set(&h->map, s->start, s); + p -= t->npages; + h->map[p] = s; runtime_MSpanList_Remove(t); t->state = MSpanDead; runtime_FixAlloc_Free(&h->spanalloc, t); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; } - if((t = runtime_MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) { + if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) { + tp = (uintptr*)(t->start<npages += t->npages; - runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s); + h->map[p + s->npages - 1] = s; runtime_MSpanList_Remove(t); t->state = MSpanDead; runtime_FixAlloc_Free(&h->spanalloc, t); @@ -341,10 +368,14 @@ runtime_MSpanList_IsEmpty(MSpan *list) void runtime_MSpanList_Insert(MSpan *list, MSpan *span) { - if(span->next != nil || span->prev != nil) + if(span->next != nil || span->prev != nil) { + // runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev); runtime_throw("MSpanList_Insert"); + } span->next = list->next; span->prev = list; span->next->prev = span; span->prev->next = span; } + + diff --git a/libgo/runtime/mheapmap32.c b/libgo/runtime/mheapmap32.c deleted file mode 100644 index 547c602fe3f..00000000000 --- a/libgo/runtime/mheapmap32.c +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Heap map, 32-bit version -// See malloc.h and mheap.c for overview. - -#include "runtime.h" -#include "malloc.h" - -#if __SIZEOF_POINTER__ == 4 - -// 3-level radix tree mapping page ids to Span*. -void -runtime_MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr)) -{ - m->allocator = allocator; -} - -MSpan* -runtime_MHeapMap_Get(MHeapMap *m, PageID k) -{ - int32 i1, i2; - - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime_throw("MHeapMap_Get"); - - return m->p[i1]->s[i2]; -} - -MSpan* -runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k) -{ - int32 i1, i2; - MHeapMapNode2 *p2; - - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime_throw("MHeapMap_Get"); - - p2 = m->p[i1]; - if(p2 == nil) - return nil; - return p2->s[i2]; -} - -void -runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s) -{ - int32 i1, i2; - - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime_throw("MHeapMap_Set"); - - m->p[i1]->s[i2] = s; -} - -// Allocate the storage required for entries [k, k+1, ..., k+len-1] -// so that Get and Set calls need not check for nil pointers. -bool -runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len) -{ - uintptr end; - int32 i1; - MHeapMapNode2 *p2; - - end = k+len; - while(k < end) { - if((k >> MHeapMap_TotalBits) != 0) - return false; - i1 = (k >> MHeapMap_Level2Bits) & MHeapMap_Level1Mask; - - // first-level pointer - if(m->p[i1] == nil) { - p2 = m->allocator(sizeof *p2); - if(p2 == nil) - return false; - mstats.heapmap_sys += sizeof *p2; - m->p[i1] = p2; - } - - // advance key past this leaf node - k = ((k >> MHeapMap_Level2Bits) + 1) << MHeapMap_Level2Bits; - } - return true; -} - -#endif /* __SIZEOF_POINTER__ == 4 */ diff --git a/libgo/runtime/mheapmap32.h b/libgo/runtime/mheapmap32.h deleted file mode 100644 index 2861624690f..00000000000 --- a/libgo/runtime/mheapmap32.h +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Free(v) must be able to determine the MSpan containing v. -// The MHeapMap is a 2-level radix tree mapping page numbers to MSpans. - -typedef struct MHeapMapNode2 MHeapMapNode2; - -enum -{ - // 32 bit address - 12 bit page size = 20 bits to map - MHeapMap_Level1Bits = 10, - MHeapMap_Level2Bits = 10, - - MHeapMap_TotalBits = - MHeapMap_Level1Bits + - MHeapMap_Level2Bits, - - MHeapMap_Level1Mask = (1<allocator = allocator; -} - -MSpan* -runtime_MHeapMap_Get(MHeapMap *m, PageID k) -{ - int32 i1, i2, i3; - - i3 = k & MHeapMap_Level3Mask; - k >>= MHeapMap_Level3Bits; - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime_throw("MHeapMap_Get"); - - return m->p[i1]->p[i2]->s[i3]; -} - -MSpan* -runtime_MHeapMap_GetMaybe(MHeapMap *m, PageID k) -{ - int32 i1, i2, i3; - MHeapMapNode2 *p2; - MHeapMapNode3 *p3; - - i3 = k & MHeapMap_Level3Mask; - k >>= MHeapMap_Level3Bits; - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime_throw("MHeapMap_Get"); - - p2 = m->p[i1]; - if(p2 == nil) - return nil; - p3 = p2->p[i2]; - if(p3 == nil) - return nil; - return p3->s[i3]; -} - -void -runtime_MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s) -{ - int32 i1, i2, i3; - - i3 = k & MHeapMap_Level3Mask; - k >>= MHeapMap_Level3Bits; - i2 = k & MHeapMap_Level2Mask; - k >>= MHeapMap_Level2Bits; - i1 = k & MHeapMap_Level1Mask; - k >>= MHeapMap_Level1Bits; - if(k != 0) - runtime_throw("MHeapMap_Set"); - - m->p[i1]->p[i2]->s[i3] = s; -} - -// Allocate the storage required for entries [k, k+1, ..., k+len-1] -// so that Get and Set calls need not check for nil pointers. -bool -runtime_MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr len) -{ - uintptr end; - int32 i1, i2; - MHeapMapNode2 *p2; - MHeapMapNode3 *p3; - - end = k+len; - while(k < end) { - if((k >> MHeapMap_TotalBits) != 0) - return false; - i2 = (k >> MHeapMap_Level3Bits) & MHeapMap_Level2Mask; - i1 = (k >> (MHeapMap_Level3Bits + MHeapMap_Level2Bits)) & MHeapMap_Level1Mask; - - // first-level pointer - if((p2 = m->p[i1]) == nil) { - p2 = m->allocator(sizeof *p2); - if(p2 == nil) - return false; - mstats.heapmap_sys += sizeof *p2; - m->p[i1] = p2; - } - - // second-level pointer - if(p2->p[i2] == nil) { - p3 = m->allocator(sizeof *p3); - if(p3 == nil) - return false; - mstats.heapmap_sys += sizeof *p3; - p2->p[i2] = p3; - } - - // advance key past this leaf node - k = ((k >> MHeapMap_Level3Bits) + 1) << MHeapMap_Level3Bits; - } - return true; -} - -#endif /* __SIZEOF_POINTER__ == 8 */ diff --git a/libgo/runtime/mheapmap64.h b/libgo/runtime/mheapmap64.h deleted file mode 100644 index be304cb2e8b..00000000000 --- a/libgo/runtime/mheapmap64.h +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Free(v) must be able to determine the MSpan containing v. -// The MHeapMap is a 3-level radix tree mapping page numbers to MSpans. -// -// NOTE(rsc): On a 32-bit platform (= 20-bit page numbers), -// we can swap in a 2-level radix tree. -// -// NOTE(rsc): We use a 3-level tree because tcmalloc does, but -// having only three levels requires approximately 1 MB per node -// in the tree, making the minimum map footprint 3 MB. -// Using a 4-level tree would cut the minimum footprint to 256 kB. -// On the other hand, it's just virtual address space: most of -// the memory is never going to be touched, thus never paged in. - -typedef struct MHeapMapNode2 MHeapMapNode2; -typedef struct MHeapMapNode3 MHeapMapNode3; - -enum -{ - // 64 bit address - 12 bit page size = 52 bits to map - MHeapMap_Level1Bits = 18, - MHeapMap_Level2Bits = 18, - MHeapMap_Level3Bits = 16, - - MHeapMap_TotalBits = - MHeapMap_Level1Bits + - MHeapMap_Level2Bits + - MHeapMap_Level3Bits, - - MHeapMap_Level1Mask = (1<stk, (byte*)stk, nstk*sizeof stk[0]) == 0) return b; - b = runtime_mallocgc(sizeof *b + nstk*sizeof stk[0], RefNoProfiling, 0, 1); + b = runtime_mallocgc(sizeof *b + nstk*sizeof stk[0], FlagNoProfiling, 0, 1); bucketmem += sizeof *b + nstk*sizeof stk[0]; runtime_memmove(b->stk, stk, nstk*sizeof stk[0]); b->hash = h; @@ -134,7 +134,7 @@ setaddrbucket(uintptr addr, Bucket *b) if(ah->addr == (addr>>20)) goto found; - ah = runtime_mallocgc(sizeof *ah, RefNoProfiling, 0, 1); + ah = runtime_mallocgc(sizeof *ah, FlagNoProfiling, 0, 1); addrmem += sizeof *ah; ah->next = addrhash[h]; ah->addr = addr>>20; @@ -142,7 +142,7 @@ setaddrbucket(uintptr addr, Bucket *b) found: if((e = addrfree) == nil) { - e = runtime_mallocgc(64*sizeof *e, RefNoProfiling, 0, 0); + e = runtime_mallocgc(64*sizeof *e, FlagNoProfiling, 0, 0); addrmem += 64*sizeof *e; for(i=0; i+1<64; i++) e[i].next = &e[i+1]; diff --git a/libgo/runtime/msize.c b/libgo/runtime/msize.c index 8b021a2b6b3..6e82885bab4 100644 --- a/libgo/runtime/msize.c +++ b/libgo/runtime/msize.c @@ -57,7 +57,7 @@ runtime_SizeToClass(int32 size) void runtime_InitSizes(void) { - int32 align, sizeclass, size, osize, nextsize, n; + int32 align, sizeclass, size, nextsize, n; uint32 i; uintptr allocsize, npages; @@ -81,8 +81,7 @@ runtime_InitSizes(void) // the leftover is less than 1/8 of the total, // so wasted space is at most 12.5%. allocsize = PageSize; - osize = size + RefcountOverhead; - while(allocsize%osize > (allocsize/8)) + while(allocsize%size > allocsize/8) allocsize += PageSize; npages = allocsize >> PageShift; @@ -93,7 +92,7 @@ runtime_InitSizes(void) // different sizes. if(sizeclass > 1 && (int32)npages == runtime_class_to_allocnpages[sizeclass-1] - && allocsize/osize == allocsize/(runtime_class_to_size[sizeclass-1]+RefcountOverhead)) { + && allocsize/size == allocsize/runtime_class_to_size[sizeclass-1]) { runtime_class_to_size[sizeclass-1] = size; continue; } diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h index 95216e4a5ca..011ba7dab6b 100644 --- a/libgo/runtime/runtime.h +++ b/libgo/runtime/runtime.h @@ -185,6 +185,7 @@ void runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64)); #define runtime_mmap mmap #define runtime_munmap(p, s) munmap((p), (s)) #define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) +#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) struct __go_func_type; void reflect_call(const struct __go_func_type *, const void *, _Bool, void **, diff --git a/libgo/runtime/sigqueue.goc b/libgo/runtime/sigqueue.goc index b5f2954bc8e..7cbd739e51e 100644 --- a/libgo/runtime/sigqueue.goc +++ b/libgo/runtime/sigqueue.goc @@ -102,7 +102,7 @@ func Signame(sig int32) (name String) { s = buf; } int32 len = __builtin_strlen(s); - unsigned char *data = runtime_mallocgc(len, RefNoPointers, 0, 0); + unsigned char *data = runtime_mallocgc(len, FlagNoPointers, 0, 0); __builtin_memcpy(data, s, len); name.__data = data; name.__length = len; diff --git a/libgo/syscalls/exec.go b/libgo/syscalls/exec.go index d0f56d3b923..7a9ee2825dc 100644 --- a/libgo/syscalls/exec.go +++ b/libgo/syscalls/exec.go @@ -12,7 +12,7 @@ import "unsafe" func libc_fcntl(fd int, cmd int, arg int) int __asm__ ("fcntl") func libc_fork() Pid_t __asm__ ("fork") -func libc_chdir(name *byte) int __asm__ ("chdir"); +func libc_chdir(name *byte) int __asm__ ("chdir") func libc_dup2(int, int) int __asm__ ("dup2") func libc_execve(*byte, **byte, **byte) int __asm__ ("execve") func libc_sysexit(int) __asm__ ("_exit") @@ -27,17 +27,17 @@ func libc_wait4(Pid_t, *int, int, *Rusage) Pid_t __asm__ ("wait4") func forkAndExecInChild(argv0 *byte, argv []*byte, envv []*byte, traceme bool, dir *byte, fd []int, pipe int) (pid int, err int) { // Declare all variables at top in case any // declarations require heap allocation (e.g., err1). - var r1, r2, err1 uintptr; - var nextfd int; - var i int; + var r1, r2, err1 uintptr + var nextfd int + var i int - darwin := OS == "darwin"; + darwin := OS == "darwin" // About to call fork. // No more allocation or calls of non-assembly functions. - child := libc_fork(); + child := libc_fork() if child == -1 { - return 0, GetErrno(); + return 0, GetErrno() } if child != 0 { @@ -50,41 +50,41 @@ func forkAndExecInChild(argv0 *byte, argv []*byte, envv []*byte, traceme bool, d // Enable tracing if requested. if traceme { if libc_ptrace(_PTRACE_TRACEME, 0, 0, nil) < 0 { - goto childerror; + goto childerror } } // Chdir if dir != nil { - r := libc_chdir(dir); + r := libc_chdir(dir) if r < 0 { - goto childerror; + goto childerror } } // Pass 1: look for fd[i] < i and move those up above len(fd) // so that pass 2 won't stomp on an fd it needs later. - nextfd = int(len(fd)); + nextfd = int(len(fd)) if pipe < nextfd { - r := libc_dup2(pipe, nextfd); + r := libc_dup2(pipe, nextfd) if r == -1 { - goto childerror; + goto childerror } - libc_fcntl(nextfd, F_SETFD, FD_CLOEXEC); - pipe = nextfd; - nextfd++; + libc_fcntl(nextfd, F_SETFD, FD_CLOEXEC) + pipe = nextfd + nextfd++ } for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] < int(i) { - r := libc_dup2(fd[i], nextfd); + r := libc_dup2(fd[i], nextfd) if r == -1 { - goto childerror; + goto childerror } - libc_fcntl(nextfd, F_SETFD, FD_CLOEXEC); - fd[i] = nextfd; - nextfd++; - if nextfd == pipe { // don't stomp on pipe - nextfd++; + libc_fcntl(nextfd, F_SETFD, FD_CLOEXEC) + fd[i] = nextfd + nextfd++ + if nextfd == pipe { // don't stomp on pipe + nextfd++ } } } @@ -92,23 +92,23 @@ func forkAndExecInChild(argv0 *byte, argv []*byte, envv []*byte, traceme bool, d // Pass 2: dup fd[i] down onto i. for i = 0; i < len(fd); i++ { if fd[i] == -1 { - libc_close(i); - continue; + libc_close(i) + continue } if fd[i] == int(i) { // dup2(i, i) won't clear close-on-exec flag on Linux, // probably not elsewhere either. - r := libc_fcntl(fd[i], F_SETFD, 0); + r := libc_fcntl(fd[i], F_SETFD, 0) if r != 0 { - goto childerror; + goto childerror } - continue; + continue } // The new fd is created NOT close-on-exec, // which is exactly what we want. - r := libc_dup2(fd[i], i); + r := libc_dup2(fd[i], i) if r == -1 { - goto childerror; + goto childerror } } @@ -117,17 +117,17 @@ func forkAndExecInChild(argv0 *byte, argv []*byte, envv []*byte, traceme bool, d // Programs that know they inherit fds >= 3 will need // to set them close-on-exec. for i = len(fd); i < 3; i++ { - libc_close(i); + libc_close(i) } // Time to exec. - libc_execve(argv0, &argv[0], &envv[0]); + libc_execve(argv0, &argv[0], &envv[0]) childerror: // send error code on pipe - var e uintptr = uintptr(GetErrno()); + var e uintptr = uintptr(GetErrno()) libc_write(pipe, (*byte)(unsafe.Pointer(&e)), - Size_t(unsafe.Sizeof(err1))); + Size_t(unsafe.Sizeof(err1))) for { libc_sysexit(253) } @@ -135,79 +135,78 @@ childerror: // Calling panic is not actually safe, // but the for loop above won't break // and this shuts up the compiler. - panic("unreached"); + panic("unreached") } func forkExec(argv0 string, argv []string, envv []string, traceme bool, dir string, fd []int) (pid int, err int) { - var p [2]int; - var r1 int; - var err1 uintptr; - var wstatus WaitStatus; + var p [2]int + var r1 int + var err1 uintptr + var wstatus WaitStatus - p[0] = -1; - p[1] = -1; + p[0] = -1 + p[1] = -1 // Convert args to C form. - argv0p := StringBytePtr(argv0); - argvp := StringArrayPtr(argv); - envvp := StringArrayPtr(envv); - var dirp *byte; + argv0p := StringBytePtr(argv0) + argvp := StringArrayPtr(argv) + envvp := StringArrayPtr(envv) + var dirp *byte if len(dir) > 0 { - dirp = StringBytePtr(dir); + dirp = StringBytePtr(dir) } // Acquire the fork lock so that no other threads // create new fds that are not yet close-on-exec // before we fork. - ForkLock.Lock(); + ForkLock.Lock() // Allocate child status pipe close on exec. if err = Pipe(p[0:]); err != 0 { - goto error; + goto error } - var val int; - if val, err = fcntl(p[0], F_SETFD, FD_CLOEXEC); err != 0 { - goto error; + if _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC); err != 0 { + goto error } - if val, err = fcntl(p[1], F_SETFD, FD_CLOEXEC); err != 0 { - goto error; + if _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC); err != 0 { + goto error } // Kick off child. - pid, err = forkAndExecInChild(argv0p, argvp, envvp, traceme, dirp, fd, p[1]); + pid, err = forkAndExecInChild(argv0p, argvp, envvp, traceme, dirp, fd, p[1]) if err != 0 { error: if p[0] >= 0 { - Close(p[0]); - Close(p[1]); + Close(p[0]) + Close(p[1]) } - ForkLock.Unlock(); + ForkLock.Unlock() return 0, err } - ForkLock.Unlock(); + ForkLock.Unlock() // Read child error status from pipe. - Close(p[1]); + Close(p[1]) n := libc_read(p[0], (*byte)(unsafe.Pointer(&err1)), - Size_t(unsafe.Sizeof(err1))); - err = 0; + Size_t(unsafe.Sizeof(err1))) + err = 0 if n < 0 { - err = GetErrno(); + err = GetErrno() } - Close(p[0]); + Close(p[0]) if err != 0 || n != 0 { if int(n) == unsafe.Sizeof(err1) { - err = int(err1); + err = int(err1) } if err == 0 { - err = EPIPE; + err = EPIPE } // Child failed; wait for it to exit, to make sure // the zombies don't accumulate. - pid1, err1 := Wait4(pid, &wstatus, 0, nil); + _, err1 := Wait4(pid, &wstatus, 0, nil) for err1 == EINTR { - pid1, err1 = Wait4(pid, &wstatus, 0, nil); + _, err1 = Wait4(pid, &wstatus, 0, nil) } return 0, err } @@ -218,31 +217,37 @@ func forkExec(argv0 string, argv []string, envv []string, traceme bool, dir stri // Combination of fork and exec, careful to be thread safe. func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []int) (pid int, err int) { - return forkExec(argv0, argv, envv, false, dir, fd); + return forkExec(argv0, argv, envv, false, dir, fd) } // PtraceForkExec is like ForkExec, but starts the child in a traced state. func PtraceForkExec(argv0 string, argv []string, envv []string, dir string, fd []int) (pid int, err int) { - return forkExec(argv0, argv, envv, true, dir, fd); + return forkExec(argv0, argv, envv, true, dir, fd) } // Ordinary exec. func Exec(argv0 string, argv []string, envv []string) (err int) { - argv_arg := StringArrayPtr(argv); - envv_arg := StringArrayPtr(envv); - libc_execve(StringBytePtr(argv0), &argv_arg[0], &envv_arg[0]); - return GetErrno(); + argv_arg := StringArrayPtr(argv) + envv_arg := StringArrayPtr(envv) + libc_execve(StringBytePtr(argv0), &argv_arg[0], &envv_arg[0]) + return GetErrno() +} + +// StartProcess wraps ForkExec for package os. +func StartProcess(argv0 string, argv []string, envv []string, dir string, fd []int) (pid, handle int, err int) { + pid, err = forkExec(argv0, argv, envv, false, dir, fd) + return pid, 0, err } func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, errno int) { - var status int; - r := libc_wait4(Pid_t(pid), &status, options, rusage); - wpid = int(r); + var status int + r := libc_wait4(Pid_t(pid), &status, options, rusage) + wpid = int(r) if r < 0 { - errno = GetErrno(); + errno = GetErrno() } if wstatus != nil { - *wstatus = WaitStatus(status); + *wstatus = WaitStatus(status) } - return; + return } diff --git a/libgo/syscalls/socket.go b/libgo/syscalls/socket.go index 65c1916711c..e786653705b 100644 --- a/libgo/syscalls/socket.go +++ b/libgo/syscalls/socket.go @@ -232,6 +232,10 @@ func SetsockoptString(fd, level, opt int, s string) (errno int) { return setsockopt(fd, level, opt, uintptr(unsafe.Pointer(&[]byte(s)[0])), Socklen_t(len(s))) } +func SetsockoptIpMreq(fd, level, opt int, mreq *IpMreq) (errno int) { + return setsockopt(fd, level, opt, uintptr(unsafe.Pointer(mreq)), unsafe.Sizeof(*mreq)) +} + func Getsockname(fd int) (sa Sockaddr, errno int) { var rsa RawSockaddrAny; var len Socklen_t = SizeofSockaddrAny; diff --git a/libgo/testsuite/gotest b/libgo/testsuite/gotest index 28aba6e2dd8..93db3462e8a 100755 --- a/libgo/testsuite/gotest +++ b/libgo/testsuite/gotest @@ -131,6 +131,13 @@ esac # so that the tests do not have to refer to srcdir to find test data. ln -s $srcdir/* . +# Some tests refer to a ../testdata directory. +if test -e $srcdir/../testdata; then + rm -f ../testdata + abssrcdir=`cd $srcdir && pwd` + ln -s $abssrcdir/../testdata ../testdata +fi + # Copy the .go files because io/utils_test.go expects a regular file. case "x$gofiles" in x)