| @@ -0,0 +1,65 @@ | |||
| package main | |||
| import ( | |||
| "time" | |||
| "fmt" | |||
| "github.com/AFASystems/presence/internal/pkg/config" | |||
| "github.com/yosssi/gmq/mqtt" | |||
| "github.com/yosssi/gmq/mqtt/client" | |||
| "github.com/AFASystems/presence/internal/pkg/bridge/mqtthandler" | |||
| "github.com/segmentio/kafka-go" | |||
| ) | |||
| func main() { | |||
| cfg := config.Load() | |||
| cli := client.New(&client.Options{ | |||
| ErrorHandler: func(err error) { | |||
| fmt.Println("Error in initiating MQTT client: ", err) | |||
| }, | |||
| }) | |||
| defer cli.Terminate() | |||
| err := cli.Connect(&client.ConnectOptions{ | |||
| Network: "tcp", | |||
| Address: cfg.MQTTHost, | |||
| ClientID: []byte(cfg.MQTTClientID), | |||
| UserName: []byte(cfg.MQTTUser), | |||
| Password: []byte(cfg.MQTTPass), | |||
| }) | |||
| if err != nil { | |||
| fmt.Println("Error comes from here") | |||
| panic(err) | |||
| } | |||
| writer := kafkaWriter("127.0.0.1:9092", "rawbeacons") | |||
| defer writer.Close() | |||
| err = cli.Subscribe(&client.SubscribeOptions{ | |||
| SubReqs: []*client.SubReq{ | |||
| &client.SubReq{ | |||
| TopicFilter: []byte("publish_out/#"), | |||
| QoS: mqtt.QoS0, | |||
| Handler: func(topicName, message[]byte) { | |||
| mqtthandler.MqttHandler(writer, topicName, message) | |||
| }, | |||
| }, | |||
| }, | |||
| }) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| select {} | |||
| } | |||
| func kafkaWriter(kafkaURL, topic string) *kafka.Writer { | |||
| return &kafka.Writer{ | |||
| Addr: kafka.TCP(kafkaURL), | |||
| Topic: topic, | |||
| Balancer: &kafka.LeastBytes{}, | |||
| BatchSize: 100, | |||
| BatchTimeout: 10 * time.Millisecond, | |||
| } | |||
| } | |||
| @@ -0,0 +1,162 @@ | |||
| package main | |||
| import ( | |||
| "context" | |||
| "encoding/json" | |||
| "fmt" | |||
| "strings" | |||
| "time" | |||
| "github.com/AFASystems/presence/internal/pkg/model" | |||
| "github.com/AFASystems/presence/internal/pkg/mqttclient" | |||
| "github.com/redis/go-redis/v9" | |||
| "github.com/segmentio/kafka-go" | |||
| ) | |||
| func main() { | |||
| // Load global context to init beacons and latest list | |||
| appCtx := model.AppContext{ | |||
| Beacons: model.BeaconsList{ | |||
| Beacons: make(map[string]model.Beacon), | |||
| }, | |||
| LatestList: model.LatestBeaconsList{ | |||
| LatestList: make(map[string]model.Beacon), | |||
| }, | |||
| } | |||
| // Kafka writer idk why yet | |||
| writer := kafkaWriter("127.0.0.1:9092", "beacons") | |||
| // Kafka reader for Raw MQTT beacons | |||
| rawReader := kafkaReader("127.0.0.1:9092", "rawbeacons", "someID") | |||
| defer rawReader.Close() | |||
| // Kafka reader for API server updates | |||
| apiReader := kafkaReader("127.0.0.1:9092", "apibeacons", "someID") | |||
| defer apiReader.Close() | |||
| // Kafka reader for latest list updates | |||
| latestReader := kafkaReader("127.0.0.1:9092", "latestbeacons", "someID") | |||
| defer latestReader.Close() | |||
| defer writer.Close() | |||
| ctx := context.Background() | |||
| // Init Redis Client | |||
| client := redis.NewClient(&redis.Options{ | |||
| Addr: "127.0.0.1:6379", | |||
| Password: "", | |||
| }) | |||
| // Initialize list values from Redis | |||
| beaconsList, err := client.Get(ctx, "beaconsList").Result() | |||
| if err == redis.Nil { | |||
| fmt.Println("no beacons list, starting empty") | |||
| } else if err != nil { | |||
| panic(err) | |||
| } else { | |||
| json.Unmarshal([]byte(beaconsList), &appCtx.Beacons.Beacons) | |||
| } | |||
| // Initialize list values from Redis | |||
| latestList, err := client.Get(ctx, "latestList").Result() | |||
| if err == redis.Nil { | |||
| fmt.Println("no latest list, starting empty") | |||
| } else if err != nil { | |||
| panic(err) | |||
| } else { | |||
| json.Unmarshal([]byte(latestList), &appCtx.LatestList.LatestList) | |||
| } | |||
| // declare channel for collecting Kafka messages | |||
| chRaw := make(chan model.Incoming_json, 2000) | |||
| chApi := make(chan model.Incoming_json, 2000) | |||
| chLatest := make(chan model.Incoming_json, 2000) | |||
| go consume(rawReader, chRaw) | |||
| go consume(apiReader, chApi) | |||
| go consume(latestReader, chLatest) | |||
| for { | |||
| select { | |||
| case msg := <-chRaw: | |||
| processIncoming(msg, &appCtx) | |||
| case msg := <-chApi: | |||
| fmt.Println("api msg: ", msg) | |||
| case msg := <-chLatest: | |||
| fmt.Println("latest msg: ", msg) | |||
| } | |||
| } | |||
| } | |||
| func kafkaWriter(kafkaURL, topic string) *kafka.Writer { | |||
| return &kafka.Writer{ | |||
| Addr: kafka.TCP(kafkaURL), | |||
| Topic: topic, | |||
| Balancer: &kafka.LeastBytes{}, | |||
| BatchSize: 100, | |||
| BatchTimeout: 10 * time.Millisecond, | |||
| } | |||
| } | |||
| func kafkaReader(kafkaURL, topic, groupID string) *kafka.Reader { | |||
| brokers := strings.Split(kafkaURL, ",") | |||
| return kafka.NewReader(kafka.ReaderConfig{ | |||
| Brokers: brokers, | |||
| GroupID: groupID, | |||
| Topic: topic, | |||
| MinBytes: 1, | |||
| MaxBytes: 10e6, | |||
| }) | |||
| } | |||
| func consume(r *kafka.Reader, ch chan<- model.Incoming_json) { | |||
| for { | |||
| msg, err := r.ReadMessage(context.Background()) | |||
| if err != nil { | |||
| fmt.Println("error reading message:", err) | |||
| continue | |||
| } | |||
| var incoming model.Incoming_json | |||
| if err := json.Unmarshal(msg.Value, &incoming); err != nil { | |||
| fmt.Println("error in decoding string: ", err) | |||
| continue | |||
| } | |||
| ch <- incoming | |||
| } | |||
| } | |||
| func processIncoming(incoming model.Incoming_json, ctx *model.AppContext) { | |||
| defer func() { | |||
| if err := recover(); err != nil { | |||
| fmt.Println("work failed:", err) | |||
| } | |||
| }() | |||
| incoming = mqttclient.IncomingBeaconFilter(incoming) | |||
| id := mqttclient.GetBeaconID(incoming) | |||
| now := time.Now().Unix() | |||
| beacons := &ctx.Beacons | |||
| beacons.Lock.Lock() | |||
| defer beacons.Lock.Unlock() | |||
| latestList := &ctx.LatestList | |||
| latestList.Lock.Lock() | |||
| defer latestList.Lock.Unlock() | |||
| beacon, exists := beacons.Beacons[id] | |||
| if !exists { | |||
| fmt.Println("beacon does not yet exist") | |||
| fmt.Println("time now: ", now) | |||
| return | |||
| } | |||
| fmt.Println("Beacon does exist: ", beacon) | |||
| } | |||
| @@ -13,7 +13,7 @@ import ( | |||
| "github.com/AFASystems/presence/internal/pkg/config" | |||
| "github.com/AFASystems/presence/internal/pkg/httpserver" | |||
| "github.com/AFASystems/presence/internal/pkg/model" | |||
| "github.com/AFASystems/presence/internal/pkg/mqtt_client" | |||
| "github.com/AFASystems/presence/internal/pkg/mqttclient" | |||
| "github.com/AFASystems/presence/internal/pkg/persistence" | |||
| "github.com/boltdb/bolt" | |||
| "github.com/gorilla/websocket" | |||
| @@ -26,6 +26,8 @@ func main() { | |||
| signal.Notify(sigc, os.Interrupt) | |||
| cfg := config.Load() | |||
| fmt.Println("hello world") | |||
| db, err := bolt.Open("presence.db", 0644, nil) | |||
| if err != nil { | |||
| log.Fatal(err) | |||
| @@ -79,7 +81,7 @@ func main() { | |||
| } | |||
| persistence.LoadState(model.Db, ctx) | |||
| incomingChan := mqtt_client.IncomingMQTTProcessor(1*time.Second, cli, model.Db, ctx) | |||
| incomingChan := mqttclient.IncomingMQTTProcessor(1*time.Second, cli, model.Db, ctx) | |||
| err = cli.Subscribe(&client.SubscribeOptions{ | |||
| SubReqs: []*client.SubReq{ | |||
| @@ -96,7 +98,7 @@ func main() { | |||
| var readings []model.RawReading | |||
| err := json.Unmarshal(message, &readings) | |||
| if err != nil { | |||
| log.Printf("Errore parsing JSON: %v", err) | |||
| log.Printf("Error parsing JSON: %v", err) | |||
| return | |||
| } | |||
| @@ -9,10 +9,16 @@ require ( | |||
| github.com/gorilla/handlers v1.5.2 | |||
| github.com/gorilla/mux v1.8.1 | |||
| github.com/gorilla/websocket v1.5.3 | |||
| github.com/redis/go-redis/v9 v9.16.0 | |||
| github.com/segmentio/kafka-go v0.4.49 | |||
| github.com/yosssi/gmq v0.0.1 | |||
| ) | |||
| require ( | |||
| github.com/cespare/xxhash/v2 v2.3.0 // indirect | |||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect | |||
| github.com/felixge/httpsnoop v1.0.3 // indirect | |||
| github.com/klauspost/compress v1.15.9 // indirect | |||
| github.com/pierrec/lz4/v4 v4.1.15 // indirect | |||
| golang.org/x/sys v0.37.0 // indirect | |||
| ) | |||
| @@ -1,5 +1,15 @@ | |||
| github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= | |||
| github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= | |||
| github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= | |||
| github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= | |||
| github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= | |||
| github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= | |||
| github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= | |||
| github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= | |||
| github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= | |||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= | |||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= | |||
| github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= | |||
| github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= | |||
| github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= | |||
| @@ -8,7 +18,31 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= | |||
| github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= | |||
| github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= | |||
| github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= | |||
| github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= | |||
| github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= | |||
| github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= | |||
| github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= | |||
| github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | |||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | |||
| github.com/redis/go-redis/v9 v9.16.0 h1:OotgqgLSRCmzfqChbQyG1PHC3tLNR89DG4jdOERSEP4= | |||
| github.com/redis/go-redis/v9 v9.16.0/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= | |||
| github.com/segmentio/kafka-go v0.4.49 h1:GJiNX1d/g+kG6ljyJEoi9++PUMdXGAxb7JGPiDCuNmk= | |||
| github.com/segmentio/kafka-go v0.4.49/go.mod h1:Y1gn60kzLEEaW28YshXyk2+VCUKbJ3Qr6DrnT3i4+9E= | |||
| github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= | |||
| github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= | |||
| github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= | |||
| github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= | |||
| github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= | |||
| github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= | |||
| github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= | |||
| github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= | |||
| github.com/yosssi/gmq v0.0.1 h1:GhlDVaAQoi3Mvjul/qJXXGfL4JBeE0GQwbWp3eIsja8= | |||
| github.com/yosssi/gmq v0.0.1/go.mod h1:mReykazh0U1JabvuWh1PEbzzJftqOQWsjr0Lwg5jL1Y= | |||
| golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= | |||
| golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= | |||
| golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= | |||
| golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= | |||
| golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= | |||
| golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= | |||
| gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | |||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | |||
| @@ -0,0 +1,105 @@ | |||
| package mqtthandler | |||
| import ( | |||
| "fmt" | |||
| "encoding/json" | |||
| "strings" | |||
| "log" | |||
| "strconv" | |||
| "os" | |||
| "context" | |||
| "time" | |||
| "github.com/AFASystems/presence/internal/pkg/model" | |||
| "github.com/segmentio/kafka-go" | |||
| ) | |||
| func MqttHandler(writer *kafka.Writer, topicName []byte, message []byte) { | |||
| hostname := strings.Split(string(topicName), "/")[1] | |||
| msgStr := string(message) | |||
| if strings.HasPrefix(msgStr, "[") { | |||
| var readings []model.RawReading | |||
| err := json.Unmarshal(message, &readings) | |||
| if err != nil { | |||
| log.Printf("Error parsing JSON: %v", err) | |||
| return | |||
| } | |||
| for _, reading := range readings { | |||
| if reading.Type == "Gateway" { | |||
| continue | |||
| } | |||
| incoming := model.Incoming_json{ | |||
| Hostname: hostname, | |||
| MAC: reading.MAC, | |||
| RSSI: int64(reading.RSSI), | |||
| Data: reading.RawData, | |||
| HB_ButtonCounter: parseButtonState(reading.RawData), | |||
| } | |||
| encodedMsg, err := json.Marshal(incoming) | |||
| if err != nil { | |||
| fmt.Println("Error in marshaling: ", err) | |||
| } | |||
| msg := kafka.Message{ | |||
| Value: encodedMsg, | |||
| } | |||
| err = writer.WriteMessages(context.Background(), msg) | |||
| if err != nil { | |||
| fmt.Println("Error in writing to Kafka: ", err) | |||
| } | |||
| fmt.Println("message sent: ", time.Now()) | |||
| } | |||
| } else { | |||
| s := strings.Split(string(message), ",") | |||
| if len(s) < 6 { | |||
| log.Printf("Messaggio CSV non valido: %s", msgStr) | |||
| return | |||
| } | |||
| rawdata := s[4] | |||
| buttonCounter := parseButtonState(rawdata) | |||
| if buttonCounter > 0 { | |||
| incoming := model.Incoming_json{} | |||
| i, _ := strconv.ParseInt(s[3], 10, 64) | |||
| incoming.Hostname = hostname | |||
| incoming.Beacon_type = "hb_button" | |||
| incoming.MAC = s[1] | |||
| incoming.RSSI = i | |||
| incoming.Data = rawdata | |||
| incoming.HB_ButtonCounter = buttonCounter | |||
| read_line := strings.TrimRight(string(s[5]), "\r\n") | |||
| it, err33 := strconv.Atoi(read_line) | |||
| if err33 != nil { | |||
| fmt.Println(it) | |||
| fmt.Println(err33) | |||
| os.Exit(2) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| func parseButtonState(raw string) int64 { | |||
| raw = strings.ToUpper(raw) | |||
| if strings.HasPrefix(raw, "0201060303E1FF12") && len(raw) >= 38 { | |||
| buttonField := raw[34:38] | |||
| if buttonValue, err := strconv.ParseInt(buttonField, 16, 64); err == nil { | |||
| return buttonValue | |||
| } | |||
| } | |||
| if strings.HasPrefix(raw, "02010612FF590") && len(raw) >= 24 { | |||
| counterField := raw[22:24] | |||
| buttonState, err := strconv.ParseInt(counterField, 16, 64) | |||
| if err == nil { | |||
| return buttonState | |||
| } | |||
| } | |||
| return 0 | |||
| } | |||
| @@ -22,11 +22,11 @@ func getEnv(key, def string) string { | |||
| func Load() *Config { | |||
| return &Config{ | |||
| HTTPAddr: getEnv("HTTP_HOST_PATH", "0.0.0.0:1902"), | |||
| WSAddr: getEnv("HTTPWS_HOST_PATH", "0.0.0.0:1922"), | |||
| MQTTHost: getEnv("MQTT_HOST", "localhost:1883"), | |||
| MQTTUser: getEnv("MQTT_USERNAME", "chesnek"), | |||
| MQTTPass: getEnv("MQTT_PASSWORD", "chesnek"), | |||
| HTTPAddr: getEnv("HTTP_HOST_PATH", "0.0.0.0:8080"), | |||
| WSAddr: getEnv("HTTPWS_HOST_PATH", "0.0.0.0:8088"), | |||
| MQTTHost: getEnv("MQTT_HOST", "127.0.0.1:11883"), | |||
| MQTTUser: getEnv("MQTT_USERNAME", "user"), | |||
| MQTTPass: getEnv("MQTT_PASSWORD", "sandbox2024"), | |||
| MQTTClientID: getEnv("MQTT_CLIENT_ID", "presence-detector"), | |||
| DBPath: getEnv("DB_PATH", "/data/conf/presence/presence.db"), | |||
| } | |||
| @@ -162,12 +162,12 @@ func BeaconsDeleteHandler(beacons *model.BeaconsList, buttonsList map[string]mod | |||
| func latestBeaconsListHandler(latestList *model.LatestBeaconsList) http.HandlerFunc { | |||
| return func(w http.ResponseWriter, r *http.Request) { | |||
| latestList.LatestListLock.RLock() | |||
| latestList.Lock.RLock() | |||
| var la = make([]model.Beacon, 0) | |||
| for _, b := range latestList.LatestList { | |||
| la = append(la, b) | |||
| } | |||
| latestList.LatestListLock.RUnlock() | |||
| latestList.Lock.RUnlock() | |||
| js, err := json.Marshal(la) | |||
| if err != nil { | |||
| http.Error(w, err.Error(), http.StatusInternalServerError) | |||
| @@ -327,7 +327,7 @@ func serveLatestBeaconsWs(latestList *model.LatestBeaconsList) http.HandlerFunc | |||
| return | |||
| } | |||
| go latestBeaconWriter(ws, latestList.LatestList, &latestList.LatestListLock) | |||
| go latestBeaconWriter(ws, latestList.LatestList, &latestList.Lock) | |||
| reader(ws) | |||
| } | |||
| } | |||
| @@ -173,8 +173,8 @@ type RawReading struct { | |||
| } | |||
| type LatestBeaconsList struct { | |||
| LatestList map[string]Beacon | |||
| LatestListLock sync.RWMutex | |||
| LatestList map[string]Beacon | |||
| Lock sync.RWMutex | |||
| } | |||
| type HTTPResultsList struct { | |||
| @@ -1,4 +1,4 @@ | |||
| package mqtt_client | |||
| package mqttclient | |||
| import ( | |||
| "bytes" | |||
| @@ -14,17 +14,17 @@ import ( | |||
| "github.com/yosssi/gmq/mqtt/client" | |||
| ) | |||
| func getBeaconID(incoming model.Incoming_json) string { | |||
| func GetBeaconID(incoming model.Incoming_json) string { | |||
| unique_id := fmt.Sprintf("%s", incoming.MAC) | |||
| return unique_id | |||
| } | |||
| func updateLatestList(incoming model.Incoming_json, now int64, latestList *model.LatestBeaconsList) { | |||
| latestList.LatestListLock.Lock() | |||
| defer latestList.LatestListLock.Unlock() | |||
| latestList.Lock.Lock() | |||
| defer latestList.Lock.Unlock() | |||
| b := model.Beacon{ | |||
| Beacon_id: getBeaconID(incoming), | |||
| Beacon_id: GetBeaconID(incoming), | |||
| Beacon_type: incoming.Beacon_type, | |||
| Last_seen: now, | |||
| Incoming_JSON: incoming, | |||
| @@ -1,4 +1,4 @@ | |||
| package mqtt_client | |||
| package mqttclient | |||
| import ( | |||
| "fmt" | |||
| @@ -8,7 +8,7 @@ import ( | |||
| "github.com/AFASystems/presence/internal/pkg/model" | |||
| ) | |||
| func incomingBeaconFilter(incoming model.Incoming_json) model.Incoming_json { | |||
| func IncomingBeaconFilter(incoming model.Incoming_json) model.Incoming_json { | |||
| out_json := incoming | |||
| if incoming.Beacon_type == "hb_button" { | |||
| raw_data := incoming.Data | |||
| @@ -1,4 +1,4 @@ | |||
| package mqtt_client | |||
| package mqttclient | |||
| import ( | |||
| "encoding/json" | |||
| @@ -1,4 +1,4 @@ | |||
| package mqtt_client | |||
| package mqttclient | |||
| import ( | |||
| "fmt" | |||
| @@ -27,20 +27,20 @@ func runProcessor(ticker *time.Ticker, cl *client.Client, ch <-chan model.Incomi | |||
| case <-ticker.C: | |||
| getLikelyLocations(&ctx.Settings, ctx, cl) | |||
| case incoming := <-ch: | |||
| processIncoming(incoming, cl, ctx) | |||
| ProcessIncoming(incoming, cl, ctx) | |||
| } | |||
| } | |||
| } | |||
| func processIncoming(incoming model.Incoming_json, cl *client.Client, ctx *model.AppContext) { | |||
| func ProcessIncoming(incoming model.Incoming_json, cl *client.Client, ctx *model.AppContext) { | |||
| defer func() { | |||
| if err := recover(); err != nil { | |||
| log.Println("work failed:", err) | |||
| } | |||
| }() | |||
| incoming = incomingBeaconFilter(incoming) | |||
| id := getBeaconID(incoming) | |||
| incoming = IncomingBeaconFilter(incoming) | |||
| id := GetBeaconID(incoming) | |||
| now := time.Now().Unix() | |||
| beacons := &ctx.Beacons | |||
| @@ -53,7 +53,6 @@ func processIncoming(incoming model.Incoming_json, cl *client.Client, ctx *model | |||
| beacon, ok := beacons.Beacons[id] | |||
| if !ok { | |||
| fmt.Println("updating latest list") | |||
| updateLatestList(incoming, now, latestList) | |||
| return | |||
| } | |||
| @@ -0,0 +1,28 @@ | |||
| [Unit] | |||
| Description=Presense | |||
| PartOf=podman.service | |||
| Wants=network-online.target podman-conf-login.service | |||
| After=podman.service network-online.target podman-conf-login.service | |||
| StartLimitIntervalSec=0 | |||
| [Container] | |||
| Image=presense-go:latest | |||
| ContainerName=presense | |||
| PodmanArgs=-a stdout -a stderr | |||
| Network=sandbox.network | |||
| PublishPort=127.0.0.1:1902:8080 | |||
| Environment=HTTP_HOST_PATH=0.0.0.0:8080 | |||
| Environment=HTTPWS_HOST_PATH=0.0.0.0:8088 | |||
| Environment=MQTT_HOST=emqx:1883 | |||
| Environment=MQTT_USERNAME=sandbox | |||
| Environment=MQTT_PASSWORD=sandbox2025 | |||
| Environment=MQTT_CLIENT_ID=presence-detector | |||
| Environment=DB_PATH=.presence.db | |||
| [Service] | |||
| Restart=always | |||
| TimeoutStartSec=infinity | |||
| RestartSec=5 | |||
| [Install] | |||
| WantedBy=multi-user.target podman.service | |||
| @@ -6,7 +6,7 @@ import ( | |||
| "time" | |||
| "github.com/AFASystems/presence/internal/pkg/model" | |||
| "github.com/AFASystems/presence/internal/pkg/mqtt_client" | |||
| "github.com/AFASystems/presence/internal/pkg/mqttclient" | |||
| "github.com/AFASystems/presence/internal/pkg/persistence" | |||
| "github.com/boltdb/bolt" | |||
| ) | |||
| @@ -31,7 +31,7 @@ func TestIncomingMQTTProcessor(t *testing.T) { | |||
| persistence.LoadState(model.Db, ctx) | |||
| ch := mqtt_client.IncomingMQTTProcessor(20*time.Millisecond, nil, model.Db, ctx) | |||
| ch := mqttclient.IncomingMQTTProcessor(20*time.Millisecond, nil, model.Db, ctx) | |||
| msg := model.Incoming_json{MAC: "15:02:31", Hostname: "testHost", RSSI: -55} | |||
| ch <- msg | |||
| @@ -0,0 +1,22 @@ | |||
| Copyright (c) 2016 Caleb Spare | |||
| MIT License | |||
| Permission is hereby granted, free of charge, to any person obtaining | |||
| a copy of this software and associated documentation files (the | |||
| "Software"), to deal in the Software without restriction, including | |||
| without limitation the rights to use, copy, modify, merge, publish, | |||
| distribute, sublicense, and/or sell copies of the Software, and to | |||
| permit persons to whom the Software is furnished to do so, subject to | |||
| the following conditions: | |||
| The above copyright notice and this permission notice shall be | |||
| included in all copies or substantial portions of the Software. | |||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |||
| EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |||
| MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |||
| NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | |||
| LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | |||
| OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | |||
| WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
| @@ -0,0 +1,74 @@ | |||
| # xxhash | |||
| [](https://pkg.go.dev/github.com/cespare/xxhash/v2) | |||
| [](https://github.com/cespare/xxhash/actions/workflows/test.yml) | |||
| xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a | |||
| high-quality hashing algorithm that is much faster than anything in the Go | |||
| standard library. | |||
| This package provides a straightforward API: | |||
| ``` | |||
| func Sum64(b []byte) uint64 | |||
| func Sum64String(s string) uint64 | |||
| type Digest struct{ ... } | |||
| func New() *Digest | |||
| ``` | |||
| The `Digest` type implements hash.Hash64. Its key methods are: | |||
| ``` | |||
| func (*Digest) Write([]byte) (int, error) | |||
| func (*Digest) WriteString(string) (int, error) | |||
| func (*Digest) Sum64() uint64 | |||
| ``` | |||
| The package is written with optimized pure Go and also contains even faster | |||
| assembly implementations for amd64 and arm64. If desired, the `purego` build tag | |||
| opts into using the Go code even on those architectures. | |||
| [xxHash]: http://cyan4973.github.io/xxHash/ | |||
| ## Compatibility | |||
| This package is in a module and the latest code is in version 2 of the module. | |||
| You need a version of Go with at least "minimal module compatibility" to use | |||
| github.com/cespare/xxhash/v2: | |||
| * 1.9.7+ for Go 1.9 | |||
| * 1.10.3+ for Go 1.10 | |||
| * Go 1.11 or later | |||
| I recommend using the latest release of Go. | |||
| ## Benchmarks | |||
| Here are some quick benchmarks comparing the pure-Go and assembly | |||
| implementations of Sum64. | |||
| | input size | purego | asm | | |||
| | ---------- | --------- | --------- | | |||
| | 4 B | 1.3 GB/s | 1.2 GB/s | | |||
| | 16 B | 2.9 GB/s | 3.5 GB/s | | |||
| | 100 B | 6.9 GB/s | 8.1 GB/s | | |||
| | 4 KB | 11.7 GB/s | 16.7 GB/s | | |||
| | 10 MB | 12.0 GB/s | 17.3 GB/s | | |||
| These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C | |||
| CPU using the following commands under Go 1.19.2: | |||
| ``` | |||
| benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') | |||
| benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') | |||
| ``` | |||
| ## Projects using this package | |||
| - [InfluxDB](https://github.com/influxdata/influxdb) | |||
| - [Prometheus](https://github.com/prometheus/prometheus) | |||
| - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) | |||
| - [FreeCache](https://github.com/coocood/freecache) | |||
| - [FastCache](https://github.com/VictoriaMetrics/fastcache) | |||
| - [Ristretto](https://github.com/dgraph-io/ristretto) | |||
| - [Badger](https://github.com/dgraph-io/badger) | |||
| @@ -0,0 +1,10 @@ | |||
| #!/bin/bash | |||
| set -eu -o pipefail | |||
| # Small convenience script for running the tests with various combinations of | |||
| # arch/tags. This assumes we're running on amd64 and have qemu available. | |||
| go test ./... | |||
| go test -tags purego ./... | |||
| GOARCH=arm64 go test | |||
| GOARCH=arm64 go test -tags purego | |||
| @@ -0,0 +1,243 @@ | |||
| // Package xxhash implements the 64-bit variant of xxHash (XXH64) as described | |||
| // at http://cyan4973.github.io/xxHash/. | |||
| package xxhash | |||
| import ( | |||
| "encoding/binary" | |||
| "errors" | |||
| "math/bits" | |||
| ) | |||
| const ( | |||
| prime1 uint64 = 11400714785074694791 | |||
| prime2 uint64 = 14029467366897019727 | |||
| prime3 uint64 = 1609587929392839161 | |||
| prime4 uint64 = 9650029242287828579 | |||
| prime5 uint64 = 2870177450012600261 | |||
| ) | |||
| // Store the primes in an array as well. | |||
| // | |||
| // The consts are used when possible in Go code to avoid MOVs but we need a | |||
| // contiguous array for the assembly code. | |||
| var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} | |||
| // Digest implements hash.Hash64. | |||
| // | |||
| // Note that a zero-valued Digest is not ready to receive writes. | |||
| // Call Reset or create a Digest using New before calling other methods. | |||
| type Digest struct { | |||
| v1 uint64 | |||
| v2 uint64 | |||
| v3 uint64 | |||
| v4 uint64 | |||
| total uint64 | |||
| mem [32]byte | |||
| n int // how much of mem is used | |||
| } | |||
| // New creates a new Digest with a zero seed. | |||
| func New() *Digest { | |||
| return NewWithSeed(0) | |||
| } | |||
| // NewWithSeed creates a new Digest with the given seed. | |||
| func NewWithSeed(seed uint64) *Digest { | |||
| var d Digest | |||
| d.ResetWithSeed(seed) | |||
| return &d | |||
| } | |||
| // Reset clears the Digest's state so that it can be reused. | |||
| // It uses a seed value of zero. | |||
| func (d *Digest) Reset() { | |||
| d.ResetWithSeed(0) | |||
| } | |||
| // ResetWithSeed clears the Digest's state so that it can be reused. | |||
| // It uses the given seed to initialize the state. | |||
| func (d *Digest) ResetWithSeed(seed uint64) { | |||
| d.v1 = seed + prime1 + prime2 | |||
| d.v2 = seed + prime2 | |||
| d.v3 = seed | |||
| d.v4 = seed - prime1 | |||
| d.total = 0 | |||
| d.n = 0 | |||
| } | |||
| // Size always returns 8 bytes. | |||
| func (d *Digest) Size() int { return 8 } | |||
| // BlockSize always returns 32 bytes. | |||
| func (d *Digest) BlockSize() int { return 32 } | |||
| // Write adds more data to d. It always returns len(b), nil. | |||
| func (d *Digest) Write(b []byte) (n int, err error) { | |||
| n = len(b) | |||
| d.total += uint64(n) | |||
| memleft := d.mem[d.n&(len(d.mem)-1):] | |||
| if d.n+n < 32 { | |||
| // This new data doesn't even fill the current block. | |||
| copy(memleft, b) | |||
| d.n += n | |||
| return | |||
| } | |||
| if d.n > 0 { | |||
| // Finish off the partial block. | |||
| c := copy(memleft, b) | |||
| d.v1 = round(d.v1, u64(d.mem[0:8])) | |||
| d.v2 = round(d.v2, u64(d.mem[8:16])) | |||
| d.v3 = round(d.v3, u64(d.mem[16:24])) | |||
| d.v4 = round(d.v4, u64(d.mem[24:32])) | |||
| b = b[c:] | |||
| d.n = 0 | |||
| } | |||
| if len(b) >= 32 { | |||
| // One or more full blocks left. | |||
| nw := writeBlocks(d, b) | |||
| b = b[nw:] | |||
| } | |||
| // Store any remaining partial block. | |||
| copy(d.mem[:], b) | |||
| d.n = len(b) | |||
| return | |||
| } | |||
| // Sum appends the current hash to b and returns the resulting slice. | |||
| func (d *Digest) Sum(b []byte) []byte { | |||
| s := d.Sum64() | |||
| return append( | |||
| b, | |||
| byte(s>>56), | |||
| byte(s>>48), | |||
| byte(s>>40), | |||
| byte(s>>32), | |||
| byte(s>>24), | |||
| byte(s>>16), | |||
| byte(s>>8), | |||
| byte(s), | |||
| ) | |||
| } | |||
| // Sum64 returns the current hash. | |||
| func (d *Digest) Sum64() uint64 { | |||
| var h uint64 | |||
| if d.total >= 32 { | |||
| v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 | |||
| h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) | |||
| h = mergeRound(h, v1) | |||
| h = mergeRound(h, v2) | |||
| h = mergeRound(h, v3) | |||
| h = mergeRound(h, v4) | |||
| } else { | |||
| h = d.v3 + prime5 | |||
| } | |||
| h += d.total | |||
| b := d.mem[:d.n&(len(d.mem)-1)] | |||
| for ; len(b) >= 8; b = b[8:] { | |||
| k1 := round(0, u64(b[:8])) | |||
| h ^= k1 | |||
| h = rol27(h)*prime1 + prime4 | |||
| } | |||
| if len(b) >= 4 { | |||
| h ^= uint64(u32(b[:4])) * prime1 | |||
| h = rol23(h)*prime2 + prime3 | |||
| b = b[4:] | |||
| } | |||
| for ; len(b) > 0; b = b[1:] { | |||
| h ^= uint64(b[0]) * prime5 | |||
| h = rol11(h) * prime1 | |||
| } | |||
| h ^= h >> 33 | |||
| h *= prime2 | |||
| h ^= h >> 29 | |||
| h *= prime3 | |||
| h ^= h >> 32 | |||
| return h | |||
| } | |||
| const ( | |||
| magic = "xxh\x06" | |||
| marshaledSize = len(magic) + 8*5 + 32 | |||
| ) | |||
| // MarshalBinary implements the encoding.BinaryMarshaler interface. | |||
| func (d *Digest) MarshalBinary() ([]byte, error) { | |||
| b := make([]byte, 0, marshaledSize) | |||
| b = append(b, magic...) | |||
| b = appendUint64(b, d.v1) | |||
| b = appendUint64(b, d.v2) | |||
| b = appendUint64(b, d.v3) | |||
| b = appendUint64(b, d.v4) | |||
| b = appendUint64(b, d.total) | |||
| b = append(b, d.mem[:d.n]...) | |||
| b = b[:len(b)+len(d.mem)-d.n] | |||
| return b, nil | |||
| } | |||
| // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. | |||
| func (d *Digest) UnmarshalBinary(b []byte) error { | |||
| if len(b) < len(magic) || string(b[:len(magic)]) != magic { | |||
| return errors.New("xxhash: invalid hash state identifier") | |||
| } | |||
| if len(b) != marshaledSize { | |||
| return errors.New("xxhash: invalid hash state size") | |||
| } | |||
| b = b[len(magic):] | |||
| b, d.v1 = consumeUint64(b) | |||
| b, d.v2 = consumeUint64(b) | |||
| b, d.v3 = consumeUint64(b) | |||
| b, d.v4 = consumeUint64(b) | |||
| b, d.total = consumeUint64(b) | |||
| copy(d.mem[:], b) | |||
| d.n = int(d.total % uint64(len(d.mem))) | |||
| return nil | |||
| } | |||
| func appendUint64(b []byte, x uint64) []byte { | |||
| var a [8]byte | |||
| binary.LittleEndian.PutUint64(a[:], x) | |||
| return append(b, a[:]...) | |||
| } | |||
| func consumeUint64(b []byte) ([]byte, uint64) { | |||
| x := u64(b) | |||
| return b[8:], x | |||
| } | |||
| func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } | |||
| func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } | |||
| func round(acc, input uint64) uint64 { | |||
| acc += input * prime2 | |||
| acc = rol31(acc) | |||
| acc *= prime1 | |||
| return acc | |||
| } | |||
| func mergeRound(acc, val uint64) uint64 { | |||
| val = round(0, val) | |||
| acc ^= val | |||
| acc = acc*prime1 + prime4 | |||
| return acc | |||
| } | |||
| func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } | |||
| func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } | |||
| func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } | |||
| func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } | |||
| func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } | |||
| func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } | |||
| func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } | |||
| func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } | |||
| @@ -0,0 +1,209 @@ | |||
| //go:build !appengine && gc && !purego | |||
| // +build !appengine | |||
| // +build gc | |||
| // +build !purego | |||
| #include "textflag.h" | |||
| // Registers: | |||
| #define h AX | |||
| #define d AX | |||
| #define p SI // pointer to advance through b | |||
| #define n DX | |||
| #define end BX // loop end | |||
| #define v1 R8 | |||
| #define v2 R9 | |||
| #define v3 R10 | |||
| #define v4 R11 | |||
| #define x R12 | |||
| #define prime1 R13 | |||
| #define prime2 R14 | |||
| #define prime4 DI | |||
| #define round(acc, x) \ | |||
| IMULQ prime2, x \ | |||
| ADDQ x, acc \ | |||
| ROLQ $31, acc \ | |||
| IMULQ prime1, acc | |||
| // round0 performs the operation x = round(0, x). | |||
| #define round0(x) \ | |||
| IMULQ prime2, x \ | |||
| ROLQ $31, x \ | |||
| IMULQ prime1, x | |||
| // mergeRound applies a merge round on the two registers acc and x. | |||
| // It assumes that prime1, prime2, and prime4 have been loaded. | |||
| #define mergeRound(acc, x) \ | |||
| round0(x) \ | |||
| XORQ x, acc \ | |||
| IMULQ prime1, acc \ | |||
| ADDQ prime4, acc | |||
| // blockLoop processes as many 32-byte blocks as possible, | |||
| // updating v1, v2, v3, and v4. It assumes that there is at least one block | |||
| // to process. | |||
| #define blockLoop() \ | |||
| loop: \ | |||
| MOVQ +0(p), x \ | |||
| round(v1, x) \ | |||
| MOVQ +8(p), x \ | |||
| round(v2, x) \ | |||
| MOVQ +16(p), x \ | |||
| round(v3, x) \ | |||
| MOVQ +24(p), x \ | |||
| round(v4, x) \ | |||
| ADDQ $32, p \ | |||
| CMPQ p, end \ | |||
| JLE loop | |||
| // func Sum64(b []byte) uint64 | |||
| TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 | |||
| // Load fixed primes. | |||
| MOVQ ·primes+0(SB), prime1 | |||
| MOVQ ·primes+8(SB), prime2 | |||
| MOVQ ·primes+24(SB), prime4 | |||
| // Load slice. | |||
| MOVQ b_base+0(FP), p | |||
| MOVQ b_len+8(FP), n | |||
| LEAQ (p)(n*1), end | |||
| // The first loop limit will be len(b)-32. | |||
| SUBQ $32, end | |||
| // Check whether we have at least one block. | |||
| CMPQ n, $32 | |||
| JLT noBlocks | |||
| // Set up initial state (v1, v2, v3, v4). | |||
| MOVQ prime1, v1 | |||
| ADDQ prime2, v1 | |||
| MOVQ prime2, v2 | |||
| XORQ v3, v3 | |||
| XORQ v4, v4 | |||
| SUBQ prime1, v4 | |||
| blockLoop() | |||
| MOVQ v1, h | |||
| ROLQ $1, h | |||
| MOVQ v2, x | |||
| ROLQ $7, x | |||
| ADDQ x, h | |||
| MOVQ v3, x | |||
| ROLQ $12, x | |||
| ADDQ x, h | |||
| MOVQ v4, x | |||
| ROLQ $18, x | |||
| ADDQ x, h | |||
| mergeRound(h, v1) | |||
| mergeRound(h, v2) | |||
| mergeRound(h, v3) | |||
| mergeRound(h, v4) | |||
| JMP afterBlocks | |||
| noBlocks: | |||
| MOVQ ·primes+32(SB), h | |||
| afterBlocks: | |||
| ADDQ n, h | |||
| ADDQ $24, end | |||
| CMPQ p, end | |||
| JG try4 | |||
| loop8: | |||
| MOVQ (p), x | |||
| ADDQ $8, p | |||
| round0(x) | |||
| XORQ x, h | |||
| ROLQ $27, h | |||
| IMULQ prime1, h | |||
| ADDQ prime4, h | |||
| CMPQ p, end | |||
| JLE loop8 | |||
| try4: | |||
| ADDQ $4, end | |||
| CMPQ p, end | |||
| JG try1 | |||
| MOVL (p), x | |||
| ADDQ $4, p | |||
| IMULQ prime1, x | |||
| XORQ x, h | |||
| ROLQ $23, h | |||
| IMULQ prime2, h | |||
| ADDQ ·primes+16(SB), h | |||
| try1: | |||
| ADDQ $4, end | |||
| CMPQ p, end | |||
| JGE finalize | |||
| loop1: | |||
| MOVBQZX (p), x | |||
| ADDQ $1, p | |||
| IMULQ ·primes+32(SB), x | |||
| XORQ x, h | |||
| ROLQ $11, h | |||
| IMULQ prime1, h | |||
| CMPQ p, end | |||
| JL loop1 | |||
| finalize: | |||
| MOVQ h, x | |||
| SHRQ $33, x | |||
| XORQ x, h | |||
| IMULQ prime2, h | |||
| MOVQ h, x | |||
| SHRQ $29, x | |||
| XORQ x, h | |||
| IMULQ ·primes+16(SB), h | |||
| MOVQ h, x | |||
| SHRQ $32, x | |||
| XORQ x, h | |||
| MOVQ h, ret+24(FP) | |||
| RET | |||
| // func writeBlocks(d *Digest, b []byte) int | |||
| TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 | |||
| // Load fixed primes needed for round. | |||
| MOVQ ·primes+0(SB), prime1 | |||
| MOVQ ·primes+8(SB), prime2 | |||
| // Load slice. | |||
| MOVQ b_base+8(FP), p | |||
| MOVQ b_len+16(FP), n | |||
| LEAQ (p)(n*1), end | |||
| SUBQ $32, end | |||
| // Load vN from d. | |||
| MOVQ s+0(FP), d | |||
| MOVQ 0(d), v1 | |||
| MOVQ 8(d), v2 | |||
| MOVQ 16(d), v3 | |||
| MOVQ 24(d), v4 | |||
| // We don't need to check the loop condition here; this function is | |||
| // always called with at least one block of data to process. | |||
| blockLoop() | |||
| // Copy vN back to d. | |||
| MOVQ v1, 0(d) | |||
| MOVQ v2, 8(d) | |||
| MOVQ v3, 16(d) | |||
| MOVQ v4, 24(d) | |||
| // The number of bytes written is p minus the old base pointer. | |||
| SUBQ b_base+8(FP), p | |||
| MOVQ p, ret+32(FP) | |||
| RET | |||
| @@ -0,0 +1,183 @@ | |||
| //go:build !appengine && gc && !purego | |||
| // +build !appengine | |||
| // +build gc | |||
| // +build !purego | |||
| #include "textflag.h" | |||
| // Registers: | |||
| #define digest R1 | |||
| #define h R2 // return value | |||
| #define p R3 // input pointer | |||
| #define n R4 // input length | |||
| #define nblocks R5 // n / 32 | |||
| #define prime1 R7 | |||
| #define prime2 R8 | |||
| #define prime3 R9 | |||
| #define prime4 R10 | |||
| #define prime5 R11 | |||
| #define v1 R12 | |||
| #define v2 R13 | |||
| #define v3 R14 | |||
| #define v4 R15 | |||
| #define x1 R20 | |||
| #define x2 R21 | |||
| #define x3 R22 | |||
| #define x4 R23 | |||
| #define round(acc, x) \ | |||
| MADD prime2, acc, x, acc \ | |||
| ROR $64-31, acc \ | |||
| MUL prime1, acc | |||
| // round0 performs the operation x = round(0, x). | |||
| #define round0(x) \ | |||
| MUL prime2, x \ | |||
| ROR $64-31, x \ | |||
| MUL prime1, x | |||
| #define mergeRound(acc, x) \ | |||
| round0(x) \ | |||
| EOR x, acc \ | |||
| MADD acc, prime4, prime1, acc | |||
| // blockLoop processes as many 32-byte blocks as possible, | |||
| // updating v1, v2, v3, and v4. It assumes that n >= 32. | |||
| #define blockLoop() \ | |||
| LSR $5, n, nblocks \ | |||
| PCALIGN $16 \ | |||
| loop: \ | |||
| LDP.P 16(p), (x1, x2) \ | |||
| LDP.P 16(p), (x3, x4) \ | |||
| round(v1, x1) \ | |||
| round(v2, x2) \ | |||
| round(v3, x3) \ | |||
| round(v4, x4) \ | |||
| SUB $1, nblocks \ | |||
| CBNZ nblocks, loop | |||
| // func Sum64(b []byte) uint64 | |||
| TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 | |||
| LDP b_base+0(FP), (p, n) | |||
| LDP ·primes+0(SB), (prime1, prime2) | |||
| LDP ·primes+16(SB), (prime3, prime4) | |||
| MOVD ·primes+32(SB), prime5 | |||
| CMP $32, n | |||
| CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } | |||
| BLT afterLoop | |||
| ADD prime1, prime2, v1 | |||
| MOVD prime2, v2 | |||
| MOVD $0, v3 | |||
| NEG prime1, v4 | |||
| blockLoop() | |||
| ROR $64-1, v1, x1 | |||
| ROR $64-7, v2, x2 | |||
| ADD x1, x2 | |||
| ROR $64-12, v3, x3 | |||
| ROR $64-18, v4, x4 | |||
| ADD x3, x4 | |||
| ADD x2, x4, h | |||
| mergeRound(h, v1) | |||
| mergeRound(h, v2) | |||
| mergeRound(h, v3) | |||
| mergeRound(h, v4) | |||
| afterLoop: | |||
| ADD n, h | |||
| TBZ $4, n, try8 | |||
| LDP.P 16(p), (x1, x2) | |||
| round0(x1) | |||
| // NOTE: here and below, sequencing the EOR after the ROR (using a | |||
| // rotated register) is worth a small but measurable speedup for small | |||
| // inputs. | |||
| ROR $64-27, h | |||
| EOR x1 @> 64-27, h, h | |||
| MADD h, prime4, prime1, h | |||
| round0(x2) | |||
| ROR $64-27, h | |||
| EOR x2 @> 64-27, h, h | |||
| MADD h, prime4, prime1, h | |||
| try8: | |||
| TBZ $3, n, try4 | |||
| MOVD.P 8(p), x1 | |||
| round0(x1) | |||
| ROR $64-27, h | |||
| EOR x1 @> 64-27, h, h | |||
| MADD h, prime4, prime1, h | |||
| try4: | |||
| TBZ $2, n, try2 | |||
| MOVWU.P 4(p), x2 | |||
| MUL prime1, x2 | |||
| ROR $64-23, h | |||
| EOR x2 @> 64-23, h, h | |||
| MADD h, prime3, prime2, h | |||
| try2: | |||
| TBZ $1, n, try1 | |||
| MOVHU.P 2(p), x3 | |||
| AND $255, x3, x1 | |||
| LSR $8, x3, x2 | |||
| MUL prime5, x1 | |||
| ROR $64-11, h | |||
| EOR x1 @> 64-11, h, h | |||
| MUL prime1, h | |||
| MUL prime5, x2 | |||
| ROR $64-11, h | |||
| EOR x2 @> 64-11, h, h | |||
| MUL prime1, h | |||
| try1: | |||
| TBZ $0, n, finalize | |||
| MOVBU (p), x4 | |||
| MUL prime5, x4 | |||
| ROR $64-11, h | |||
| EOR x4 @> 64-11, h, h | |||
| MUL prime1, h | |||
| finalize: | |||
| EOR h >> 33, h | |||
| MUL prime2, h | |||
| EOR h >> 29, h | |||
| MUL prime3, h | |||
| EOR h >> 32, h | |||
| MOVD h, ret+24(FP) | |||
| RET | |||
| // func writeBlocks(d *Digest, b []byte) int | |||
| TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 | |||
| LDP ·primes+0(SB), (prime1, prime2) | |||
| // Load state. Assume v[1-4] are stored contiguously. | |||
| MOVD d+0(FP), digest | |||
| LDP 0(digest), (v1, v2) | |||
| LDP 16(digest), (v3, v4) | |||
| LDP b_base+8(FP), (p, n) | |||
| blockLoop() | |||
| // Store updated state. | |||
| STP (v1, v2), 0(digest) | |||
| STP (v3, v4), 16(digest) | |||
| BIC $31, n | |||
| MOVD n, ret+32(FP) | |||
| RET | |||
| @@ -0,0 +1,15 @@ | |||
| //go:build (amd64 || arm64) && !appengine && gc && !purego | |||
| // +build amd64 arm64 | |||
| // +build !appengine | |||
| // +build gc | |||
| // +build !purego | |||
| package xxhash | |||
| // Sum64 computes the 64-bit xxHash digest of b with a zero seed. | |||
| // | |||
| //go:noescape | |||
| func Sum64(b []byte) uint64 | |||
| //go:noescape | |||
| func writeBlocks(d *Digest, b []byte) int | |||
| @@ -0,0 +1,76 @@ | |||
| //go:build (!amd64 && !arm64) || appengine || !gc || purego | |||
| // +build !amd64,!arm64 appengine !gc purego | |||
| package xxhash | |||
| // Sum64 computes the 64-bit xxHash digest of b with a zero seed. | |||
| func Sum64(b []byte) uint64 { | |||
| // A simpler version would be | |||
| // d := New() | |||
| // d.Write(b) | |||
| // return d.Sum64() | |||
| // but this is faster, particularly for small inputs. | |||
| n := len(b) | |||
| var h uint64 | |||
| if n >= 32 { | |||
| v1 := primes[0] + prime2 | |||
| v2 := prime2 | |||
| v3 := uint64(0) | |||
| v4 := -primes[0] | |||
| for len(b) >= 32 { | |||
| v1 = round(v1, u64(b[0:8:len(b)])) | |||
| v2 = round(v2, u64(b[8:16:len(b)])) | |||
| v3 = round(v3, u64(b[16:24:len(b)])) | |||
| v4 = round(v4, u64(b[24:32:len(b)])) | |||
| b = b[32:len(b):len(b)] | |||
| } | |||
| h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) | |||
| h = mergeRound(h, v1) | |||
| h = mergeRound(h, v2) | |||
| h = mergeRound(h, v3) | |||
| h = mergeRound(h, v4) | |||
| } else { | |||
| h = prime5 | |||
| } | |||
| h += uint64(n) | |||
| for ; len(b) >= 8; b = b[8:] { | |||
| k1 := round(0, u64(b[:8])) | |||
| h ^= k1 | |||
| h = rol27(h)*prime1 + prime4 | |||
| } | |||
| if len(b) >= 4 { | |||
| h ^= uint64(u32(b[:4])) * prime1 | |||
| h = rol23(h)*prime2 + prime3 | |||
| b = b[4:] | |||
| } | |||
| for ; len(b) > 0; b = b[1:] { | |||
| h ^= uint64(b[0]) * prime5 | |||
| h = rol11(h) * prime1 | |||
| } | |||
| h ^= h >> 33 | |||
| h *= prime2 | |||
| h ^= h >> 29 | |||
| h *= prime3 | |||
| h ^= h >> 32 | |||
| return h | |||
| } | |||
| func writeBlocks(d *Digest, b []byte) int { | |||
| v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 | |||
| n := len(b) | |||
| for len(b) >= 32 { | |||
| v1 = round(v1, u64(b[0:8:len(b)])) | |||
| v2 = round(v2, u64(b[8:16:len(b)])) | |||
| v3 = round(v3, u64(b[16:24:len(b)])) | |||
| v4 = round(v4, u64(b[24:32:len(b)])) | |||
| b = b[32:len(b):len(b)] | |||
| } | |||
| d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 | |||
| return n - len(b) | |||
| } | |||
| @@ -0,0 +1,16 @@ | |||
| //go:build appengine | |||
| // +build appengine | |||
| // This file contains the safe implementations of otherwise unsafe-using code. | |||
| package xxhash | |||
| // Sum64String computes the 64-bit xxHash digest of s with a zero seed. | |||
| func Sum64String(s string) uint64 { | |||
| return Sum64([]byte(s)) | |||
| } | |||
| // WriteString adds more data to d. It always returns len(s), nil. | |||
| func (d *Digest) WriteString(s string) (n int, err error) { | |||
| return d.Write([]byte(s)) | |||
| } | |||
| @@ -0,0 +1,58 @@ | |||
| //go:build !appengine | |||
| // +build !appengine | |||
| // This file encapsulates usage of unsafe. | |||
| // xxhash_safe.go contains the safe implementations. | |||
| package xxhash | |||
| import ( | |||
| "unsafe" | |||
| ) | |||
| // In the future it's possible that compiler optimizations will make these | |||
| // XxxString functions unnecessary by realizing that calls such as | |||
| // Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. | |||
| // If that happens, even if we keep these functions they can be replaced with | |||
| // the trivial safe code. | |||
| // NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: | |||
| // | |||
| // var b []byte | |||
| // bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) | |||
| // bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data | |||
| // bh.Len = len(s) | |||
| // bh.Cap = len(s) | |||
| // | |||
| // Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough | |||
| // weight to this sequence of expressions that any function that uses it will | |||
| // not be inlined. Instead, the functions below use a different unsafe | |||
| // conversion designed to minimize the inliner weight and allow both to be | |||
| // inlined. There is also a test (TestInlining) which verifies that these are | |||
| // inlined. | |||
| // | |||
| // See https://github.com/golang/go/issues/42739 for discussion. | |||
| // Sum64String computes the 64-bit xxHash digest of s with a zero seed. | |||
| // It may be faster than Sum64([]byte(s)) by avoiding a copy. | |||
| func Sum64String(s string) uint64 { | |||
| b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) | |||
| return Sum64(b) | |||
| } | |||
| // WriteString adds more data to d. It always returns len(s), nil. | |||
| // It may be faster than Write([]byte(s)) by avoiding a copy. | |||
| func (d *Digest) WriteString(s string) (n int, err error) { | |||
| d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) | |||
| // d.Write always returns len(s), nil. | |||
| // Ignoring the return output and returning these fixed values buys a | |||
| // savings of 6 in the inliner's cost model. | |||
| return len(s), nil | |||
| } | |||
| // sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout | |||
| // of the first two words is the same as the layout of a string. | |||
| type sliceHeader struct { | |||
| s string | |||
| cap int | |||
| } | |||
| @@ -0,0 +1,21 @@ | |||
| The MIT License (MIT) | |||
| Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com> | |||
| Permission is hereby granted, free of charge, to any person obtaining a copy | |||
| of this software and associated documentation files (the "Software"), to deal | |||
| in the Software without restriction, including without limitation the rights | |||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
| copies of the Software, and to permit persons to whom the Software is | |||
| furnished to do so, subject to the following conditions: | |||
| The above copyright notice and this permission notice shall be included in | |||
| all copies or substantial portions of the Software. | |||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |||
| THE SOFTWARE. | |||
| @@ -0,0 +1,79 @@ | |||
| package rendezvous | |||
| type Rendezvous struct { | |||
| nodes map[string]int | |||
| nstr []string | |||
| nhash []uint64 | |||
| hash Hasher | |||
| } | |||
| type Hasher func(s string) uint64 | |||
| func New(nodes []string, hash Hasher) *Rendezvous { | |||
| r := &Rendezvous{ | |||
| nodes: make(map[string]int, len(nodes)), | |||
| nstr: make([]string, len(nodes)), | |||
| nhash: make([]uint64, len(nodes)), | |||
| hash: hash, | |||
| } | |||
| for i, n := range nodes { | |||
| r.nodes[n] = i | |||
| r.nstr[i] = n | |||
| r.nhash[i] = hash(n) | |||
| } | |||
| return r | |||
| } | |||
| func (r *Rendezvous) Lookup(k string) string { | |||
| // short-circuit if we're empty | |||
| if len(r.nodes) == 0 { | |||
| return "" | |||
| } | |||
| khash := r.hash(k) | |||
| var midx int | |||
| var mhash = xorshiftMult64(khash ^ r.nhash[0]) | |||
| for i, nhash := range r.nhash[1:] { | |||
| if h := xorshiftMult64(khash ^ nhash); h > mhash { | |||
| midx = i + 1 | |||
| mhash = h | |||
| } | |||
| } | |||
| return r.nstr[midx] | |||
| } | |||
| func (r *Rendezvous) Add(node string) { | |||
| r.nodes[node] = len(r.nstr) | |||
| r.nstr = append(r.nstr, node) | |||
| r.nhash = append(r.nhash, r.hash(node)) | |||
| } | |||
| func (r *Rendezvous) Remove(node string) { | |||
| // find index of node to remove | |||
| nidx := r.nodes[node] | |||
| // remove from the slices | |||
| l := len(r.nstr) | |||
| r.nstr[nidx] = r.nstr[l] | |||
| r.nstr = r.nstr[:l] | |||
| r.nhash[nidx] = r.nhash[l] | |||
| r.nhash = r.nhash[:l] | |||
| // update the map | |||
| delete(r.nodes, node) | |||
| moved := r.nstr[nidx] | |||
| r.nodes[moved] = nidx | |||
| } | |||
| func xorshiftMult64(x uint64) uint64 { | |||
| x ^= x >> 12 // a | |||
| x ^= x << 25 // b | |||
| x ^= x >> 27 // c | |||
| return x * 2685821657736338717 | |||
| } | |||
| @@ -0,0 +1,2 @@ | |||
| * -text | |||
| *.bin -text -diff | |||
| @@ -0,0 +1,32 @@ | |||
| # Compiled Object files, Static and Dynamic libs (Shared Objects) | |||
| *.o | |||
| *.a | |||
| *.so | |||
| # Folders | |||
| _obj | |||
| _test | |||
| # Architecture specific extensions/prefixes | |||
| *.[568vq] | |||
| [568vq].out | |||
| *.cgo1.go | |||
| *.cgo2.c | |||
| _cgo_defun.c | |||
| _cgo_gotypes.go | |||
| _cgo_export.* | |||
| _testmain.go | |||
| *.exe | |||
| *.test | |||
| *.prof | |||
| /s2/cmd/_s2sx/sfx-exe | |||
| # Linux perf files | |||
| perf.data | |||
| perf.data.old | |||
| # gdb history | |||
| .gdb_history | |||
| @@ -0,0 +1,141 @@ | |||
| # This is an example goreleaser.yaml file with some sane defaults. | |||
| # Make sure to check the documentation at http://goreleaser.com | |||
| before: | |||
| hooks: | |||
| - ./gen.sh | |||
| - go install mvdan.cc/garble@latest | |||
| builds: | |||
| - | |||
| id: "s2c" | |||
| binary: s2c | |||
| main: ./s2/cmd/s2c/main.go | |||
| flags: | |||
| - -trimpath | |||
| env: | |||
| - CGO_ENABLED=0 | |||
| goos: | |||
| - aix | |||
| - linux | |||
| - freebsd | |||
| - netbsd | |||
| - windows | |||
| - darwin | |||
| goarch: | |||
| - 386 | |||
| - amd64 | |||
| - arm | |||
| - arm64 | |||
| - ppc64 | |||
| - ppc64le | |||
| - mips64 | |||
| - mips64le | |||
| goarm: | |||
| - 7 | |||
| gobinary: garble | |||
| - | |||
| id: "s2d" | |||
| binary: s2d | |||
| main: ./s2/cmd/s2d/main.go | |||
| flags: | |||
| - -trimpath | |||
| env: | |||
| - CGO_ENABLED=0 | |||
| goos: | |||
| - aix | |||
| - linux | |||
| - freebsd | |||
| - netbsd | |||
| - windows | |||
| - darwin | |||
| goarch: | |||
| - 386 | |||
| - amd64 | |||
| - arm | |||
| - arm64 | |||
| - ppc64 | |||
| - ppc64le | |||
| - mips64 | |||
| - mips64le | |||
| goarm: | |||
| - 7 | |||
| gobinary: garble | |||
| - | |||
| id: "s2sx" | |||
| binary: s2sx | |||
| main: ./s2/cmd/_s2sx/main.go | |||
| flags: | |||
| - -modfile=s2sx.mod | |||
| - -trimpath | |||
| env: | |||
| - CGO_ENABLED=0 | |||
| goos: | |||
| - aix | |||
| - linux | |||
| - freebsd | |||
| - netbsd | |||
| - windows | |||
| - darwin | |||
| goarch: | |||
| - 386 | |||
| - amd64 | |||
| - arm | |||
| - arm64 | |||
| - ppc64 | |||
| - ppc64le | |||
| - mips64 | |||
| - mips64le | |||
| goarm: | |||
| - 7 | |||
| gobinary: garble | |||
| archives: | |||
| - | |||
| id: s2-binaries | |||
| name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" | |||
| replacements: | |||
| aix: AIX | |||
| darwin: OSX | |||
| linux: Linux | |||
| windows: Windows | |||
| 386: i386 | |||
| amd64: x86_64 | |||
| freebsd: FreeBSD | |||
| netbsd: NetBSD | |||
| format_overrides: | |||
| - goos: windows | |||
| format: zip | |||
| files: | |||
| - unpack/* | |||
| - s2/LICENSE | |||
| - s2/README.md | |||
| checksum: | |||
| name_template: 'checksums.txt' | |||
| snapshot: | |||
| name_template: "{{ .Tag }}-next" | |||
| changelog: | |||
| sort: asc | |||
| filters: | |||
| exclude: | |||
| - '^doc:' | |||
| - '^docs:' | |||
| - '^test:' | |||
| - '^tests:' | |||
| - '^Update\sREADME.md' | |||
| nfpms: | |||
| - | |||
| file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" | |||
| vendor: Klaus Post | |||
| homepage: https://github.com/klauspost/compress | |||
| maintainer: Klaus Post <klauspost@gmail.com> | |||
| description: S2 Compression Tool | |||
| license: BSD 3-Clause | |||
| formats: | |||
| - deb | |||
| - rpm | |||
| replacements: | |||
| darwin: Darwin | |||
| linux: Linux | |||
| freebsd: FreeBSD | |||
| amd64: x86_64 | |||
| @@ -0,0 +1,304 @@ | |||
| Copyright (c) 2012 The Go Authors. All rights reserved. | |||
| Copyright (c) 2019 Klaus Post. All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| * Neither the name of Google Inc. nor the names of its | |||
| contributors may be used to endorse or promote products derived from | |||
| this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| ------------------ | |||
| Files: gzhttp/* | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, | |||
| and distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by | |||
| the copyright owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all | |||
| other entities that control, are controlled by, or are under common | |||
| control with that entity. For the purposes of this definition, | |||
| "control" means (i) the power, direct or indirect, to cause the | |||
| direction or management of such entity, whether by contract or | |||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity | |||
| exercising permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, | |||
| including but not limited to software source code, documentation | |||
| source, and configuration files. | |||
| "Object" form shall mean any form resulting from mechanical | |||
| transformation or translation of a Source form, including but | |||
| not limited to compiled object code, generated documentation, | |||
| and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or | |||
| Object form, made available under the License, as indicated by a | |||
| copyright notice that is included in or attached to the work | |||
| (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object | |||
| form, that is based on (or derived from) the Work and for which the | |||
| editorial revisions, annotations, elaborations, or other modifications | |||
| represent, as a whole, an original work of authorship. For the purposes | |||
| of this License, Derivative Works shall not include works that remain | |||
| separable from, or merely link (or bind by name) to the interfaces of, | |||
| the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including | |||
| the original version of the Work and any modifications or additions | |||
| to that Work or Derivative Works thereof, that is intentionally | |||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||
| or by an individual or Legal Entity authorized to submit on behalf of | |||
| the copyright owner. For the purposes of this definition, "submitted" | |||
| means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, | |||
| and issue tracking systems that are managed by, or on behalf of, the | |||
| Licensor for the purpose of discussing and improving the Work, but | |||
| excluding communication that is conspicuously marked or otherwise | |||
| designated in writing by the copyright owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||
| on behalf of whom a Contribution has been received by Licensor and | |||
| subsequently incorporated within the Work. | |||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the | |||
| Work and such Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| (except as stated in this section) patent license to make, have made, | |||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||
| where such license applies only to those patent claims licensable | |||
| by such Contributor that are necessarily infringed by their | |||
| Contribution(s) alone or by combination of their Contribution(s) | |||
| with the Work to which such Contribution(s) was submitted. If You | |||
| institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
| or a Contribution incorporated within the Work constitutes direct | |||
| or contributory patent infringement, then any patent licenses | |||
| granted to You under this License for that Work shall terminate | |||
| as of the date such litigation is filed. | |||
| 4. Redistribution. You may reproduce and distribute copies of the | |||
| Work or Derivative Works thereof in any medium, with or without | |||
| modifications, and in Source or Object form, provided that You | |||
| meet the following conditions: | |||
| (a) You must give any other recipients of the Work or | |||
| Derivative Works a copy of this License; and | |||
| (b) You must cause any modified files to carry prominent notices | |||
| stating that You changed the files; and | |||
| (c) You must retain, in the Source form of any Derivative Works | |||
| that You distribute, all copyright, patent, trademark, and | |||
| attribution notices from the Source form of the Work, | |||
| excluding those notices that do not pertain to any part of | |||
| the Derivative Works; and | |||
| (d) If the Work includes a "NOTICE" text file as part of its | |||
| distribution, then any Derivative Works that You distribute must | |||
| include a readable copy of the attribution notices contained | |||
| within such NOTICE file, excluding those notices that do not | |||
| pertain to any part of the Derivative Works, in at least one | |||
| of the following places: within a NOTICE text file distributed | |||
| as part of the Derivative Works; within the Source form or | |||
| documentation, if provided along with the Derivative Works; or, | |||
| within a display generated by the Derivative Works, if and | |||
| wherever such third-party notices normally appear. The contents | |||
| of the NOTICE file are for informational purposes only and | |||
| do not modify the License. You may add Your own attribution | |||
| notices within Derivative Works that You distribute, alongside | |||
| or as an addendum to the NOTICE text from the Work, provided | |||
| that such additional attribution notices cannot be construed | |||
| as modifying the License. | |||
| You may add Your own copyright statement to Your modifications and | |||
| may provide additional or different license terms and conditions | |||
| for use, reproduction, or distribution of Your modifications, or | |||
| for any such Derivative Works as a whole, provided Your use, | |||
| reproduction, and distribution of the Work otherwise complies with | |||
| the conditions stated in this License. | |||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||
| any Contribution intentionally submitted for inclusion in the Work | |||
| by You to the Licensor shall be under the terms and conditions of | |||
| this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify | |||
| the terms of any separate license agreement you may have executed | |||
| with Licensor regarding such Contributions. | |||
| 6. Trademarks. This License does not grant permission to use the trade | |||
| names, trademarks, service marks, or product names of the Licensor, | |||
| except as required for reasonable and customary use in describing the | |||
| origin of the Work and reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||
| agreed to in writing, Licensor provides the Work (and each | |||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
| implied, including, without limitation, any warranties or conditions | |||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||
| appropriateness of using or redistributing the Work and assume any | |||
| risks associated with Your exercise of permissions under this License. | |||
| 8. Limitation of Liability. In no event and under no legal theory, | |||
| whether in tort (including negligence), contract, or otherwise, | |||
| unless required by applicable law (such as deliberate and grossly | |||
| negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, | |||
| incidental, or consequential damages of any character arising as a | |||
| result of this License or out of the use or inability to use the | |||
| Work (including but not limited to damages for loss of goodwill, | |||
| work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses), even if such Contributor | |||
| has been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||
| the Work or Derivative Works thereof, You may choose to offer, | |||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||
| or other liability obligations and/or rights consistent with this | |||
| License. However, in accepting such obligations, You may act only | |||
| on Your own behalf and on Your sole responsibility, not on behalf | |||
| of any other Contributor, and only if You agree to indemnify, | |||
| defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason | |||
| of your accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work. | |||
| To apply the Apache License to your work, attach the following | |||
| boilerplate notice, with the fields enclosed by brackets "[]" | |||
| replaced with your own identifying information. (Don't include | |||
| the brackets!) The text should be enclosed in the appropriate | |||
| comment syntax for the file format. We also recommend that a | |||
| file or class name and description of purpose be included on the | |||
| same "printed page" as the copyright notice for easier | |||
| identification within third-party archives. | |||
| Copyright 2016-2017 The New York Times Company | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| ------------------ | |||
| Files: s2/cmd/internal/readahead/* | |||
| The MIT License (MIT) | |||
| Copyright (c) 2015 Klaus Post | |||
| Permission is hereby granted, free of charge, to any person obtaining a copy | |||
| of this software and associated documentation files (the "Software"), to deal | |||
| in the Software without restriction, including without limitation the rights | |||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
| copies of the Software, and to permit persons to whom the Software is | |||
| furnished to do so, subject to the following conditions: | |||
| The above copyright notice and this permission notice shall be included in all | |||
| copies or substantial portions of the Software. | |||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||
| SOFTWARE. | |||
| --------------------- | |||
| Files: snappy/* | |||
| Files: internal/snapref/* | |||
| Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| * Neither the name of Google Inc. nor the names of its | |||
| contributors may be used to endorse or promote products derived from | |||
| this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| ----------------- | |||
| Files: s2/cmd/internal/filepathx/* | |||
| Copyright 2016 The filepathx Authors | |||
| Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: | |||
| The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. | |||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
| @@ -0,0 +1,560 @@ | |||
| # compress | |||
| This package provides various compression algorithms. | |||
| * [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. | |||
| * [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. | |||
| * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). | |||
| * [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. | |||
| * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. | |||
| * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. | |||
| * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. | |||
| * [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. | |||
| [](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) | |||
| [](https://github.com/klauspost/compress/actions/workflows/go.yml) | |||
| [](https://sourcegraph.com/github.com/klauspost/compress?badge) | |||
| # changelog | |||
| * July 13, 2022 (v1.15.8) | |||
| * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 | |||
| * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 | |||
| * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 | |||
| * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 | |||
| * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 | |||
| * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 | |||
| * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 | |||
| * June 29, 2022 (v1.15.7) | |||
| * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 | |||
| * zip: Merge upstream https://github.com/klauspost/compress/pull/631 | |||
| * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 | |||
| * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 | |||
| * flate: Faster histograms https://github.com/klauspost/compress/pull/620 | |||
| * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 | |||
| * June 3, 2022 (v1.15.6) | |||
| * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 | |||
| * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 | |||
| * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 | |||
| * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 | |||
| * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 | |||
| * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 | |||
| * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 | |||
| * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 | |||
| * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 | |||
| * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 | |||
| * May 25, 2022 (v1.15.5) | |||
| * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 | |||
| * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 | |||
| * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 | |||
| * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 | |||
| * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 | |||
| * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 | |||
| * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 | |||
| * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 | |||
| * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 | |||
| * May 11, 2022 (v1.15.4) | |||
| * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) | |||
| * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) | |||
| * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) | |||
| * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) | |||
| * May 5, 2022 (v1.15.3) | |||
| * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) | |||
| * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) | |||
| * Apr 26, 2022 (v1.15.2) | |||
| * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) | |||
| * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) | |||
| * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) | |||
| * Minimum version is Go 1.16, added CI test on 1.18. | |||
| * Mar 11, 2022 (v1.15.1) | |||
| * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) | |||
| * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) | |||
| * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) | |||
| * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) | |||
| * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) | |||
| * Mar 3, 2022 (v1.15.0) | |||
| * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) | |||
| * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) | |||
| * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) | |||
| * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) | |||
| * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) | |||
| * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) | |||
| <details> | |||
| <summary>See Details</summary> | |||
| Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. | |||
| Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. | |||
| While the release has been extensively tested, it is recommended to testing when upgrading. | |||
| </details> | |||
| * Feb 22, 2022 (v1.14.4) | |||
| * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) | |||
| * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) | |||
| * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 | |||
| * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) | |||
| * Feb 17, 2022 (v1.14.3) | |||
| * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) | |||
| * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) | |||
| * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) | |||
| * Jan 25, 2022 (v1.14.2) | |||
| * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) | |||
| * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) | |||
| * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) | |||
| * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) | |||
| * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) | |||
| * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) | |||
| * Jan 11, 2022 (v1.14.1) | |||
| * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) | |||
| * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) | |||
| * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) | |||
| * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) | |||
| * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) | |||
| <details> | |||
| <summary>See changes to v1.13.x</summary> | |||
| * Aug 30, 2021 (v1.13.5) | |||
| * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) | |||
| * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) | |||
| * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) | |||
| * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) | |||
| * Aug 12, 2021 (v1.13.4) | |||
| * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). | |||
| * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) | |||
| * Aug 3, 2021 (v1.13.3) | |||
| * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) | |||
| * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) | |||
| * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) | |||
| * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) | |||
| * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) | |||
| * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) | |||
| * Jun 14, 2021 (v1.13.1) | |||
| * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) | |||
| * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) | |||
| * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) | |||
| * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) | |||
| * Jun 3, 2021 (v1.13.0) | |||
| * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. | |||
| * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) | |||
| * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) | |||
| </details> | |||
| <details> | |||
| <summary>See changes to v1.12.x</summary> | |||
| * May 25, 2021 (v1.12.3) | |||
| * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) | |||
| * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) | |||
| * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) | |||
| * Apr 27, 2021 (v1.12.2) | |||
| * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) | |||
| * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) | |||
| * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) | |||
| * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) | |||
| * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) | |||
| * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) | |||
| * Apr 14, 2021 (v1.12.1) | |||
| * snappy package removed. Upstream added as dependency. | |||
| * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) | |||
| * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) | |||
| * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) | |||
| * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) | |||
| * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) | |||
| * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) | |||
| </details> | |||
| <details> | |||
| <summary>See changes to v1.11.x</summary> | |||
| * Mar 26, 2021 (v1.11.13) | |||
| * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) | |||
| * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) | |||
| * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) | |||
| * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) | |||
| * Mar 5, 2021 (v1.11.12) | |||
| * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). | |||
| * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) | |||
| * Mar 1, 2021 (v1.11.9) | |||
| * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) | |||
| * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) | |||
| * s2: Fix binaries. | |||
| * Feb 25, 2021 (v1.11.8) | |||
| * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. | |||
| * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) | |||
| * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) | |||
| * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) | |||
| * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) | |||
| * Jan 14, 2021 (v1.11.7) | |||
| * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) | |||
| * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) | |||
| * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) | |||
| * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) | |||
| * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) | |||
| * Jan 7, 2021 (v1.11.6) | |||
| * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) | |||
| * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) | |||
| * Dec 20, 2020 (v1.11.4) | |||
| * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) | |||
| * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) | |||
| * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) | |||
| * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) | |||
| * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) | |||
| * Nov 15, 2020 (v1.11.3) | |||
| * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) | |||
| * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) | |||
| * Oct 11, 2020 (v1.11.2) | |||
| * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) | |||
| * Oct 1, 2020 (v1.11.1) | |||
| * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) | |||
| * Sept 8, 2020 (v1.11.0) | |||
| * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) | |||
| * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) | |||
| * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) | |||
| </details> | |||
| <details> | |||
| <summary>See changes to v1.10.x</summary> | |||
| * July 8, 2020 (v1.10.11) | |||
| * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) | |||
| * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) | |||
| * June 23, 2020 (v1.10.10) | |||
| * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) | |||
| * June 16, 2020 (v1.10.9): | |||
| * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) | |||
| * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) | |||
| * Fuzzit tests removed. The service has been purchased and is no longer available. | |||
| * June 5, 2020 (v1.10.8): | |||
| * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) | |||
| * June 1, 2020 (v1.10.7): | |||
| * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) | |||
| * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) | |||
| * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) | |||
| * May 21, 2020: (v1.10.6) | |||
| * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) | |||
| * zstd: Stricter decompression checks. | |||
| * April 12, 2020: (v1.10.5) | |||
| * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) | |||
| * Apr 8, 2020: (v1.10.4) | |||
| * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) | |||
| * Mar 11, 2020: (v1.10.3) | |||
| * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) | |||
| * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) | |||
| * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) | |||
| * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) | |||
| * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) | |||
| * Feb 27, 2020: (v1.10.2) | |||
| * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) | |||
| * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) | |||
| * Feb 18, 2020: (v1.10.1) | |||
| * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) | |||
| * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) | |||
| * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) | |||
| * Feb 4, 2020: (v1.10.0) | |||
| * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) | |||
| * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) | |||
| * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) | |||
| * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) | |||
| </details> | |||
| <details> | |||
| <summary>See changes prior to v1.10.0</summary> | |||
| * Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). | |||
| * Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) | |||
| * Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. | |||
| * Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. | |||
| * Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) | |||
| * Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. | |||
| * Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) | |||
| * Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features | |||
| * Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) | |||
| * Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) | |||
| * Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. | |||
| * Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) | |||
| * Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) | |||
| * Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) | |||
| * Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. | |||
| * Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. | |||
| * Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) | |||
| * Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. | |||
| * Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) | |||
| * Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. | |||
| * Nov 11, 2019: Reduce inflate memory use by 1KB. | |||
| * Nov 10, 2019: Less allocations in deflate bit writer. | |||
| * Nov 10, 2019: Fix inconsistent error returned by zstd decoder. | |||
| * Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) | |||
| * Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) | |||
| * Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) | |||
| * Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) | |||
| </details> | |||
| <details> | |||
| <summary>See changes prior to v1.9.0</summary> | |||
| * Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) | |||
| * Oct 3, 2019: Fix inconsistent results on broken zstd streams. | |||
| * Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) | |||
| * Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). | |||
| * Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). | |||
| * Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). | |||
| * Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. | |||
| * Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. | |||
| * Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. | |||
| * Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. | |||
| * Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. | |||
| * Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. | |||
| * Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. | |||
| * Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) | |||
| * Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) | |||
| * Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) | |||
| * Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) | |||
| * July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. | |||
| * July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. | |||
| * July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. | |||
| * July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. | |||
| * June 17, 2019: zstd decompression bugfix. | |||
| * June 17, 2019: fix 32 bit builds. | |||
| * June 17, 2019: Easier use in modules (less dependencies). | |||
| * June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. | |||
| * June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. | |||
| * June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. | |||
| * June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! | |||
| * May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. | |||
| * Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. | |||
| * Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). | |||
| * Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. | |||
| * Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). | |||
| * Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. | |||
| * Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. | |||
| * Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. | |||
| * May 28, 2017: Reduce allocations when resetting decoder. | |||
| * Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. | |||
| * Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). | |||
| * Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. | |||
| * Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. | |||
| * Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. | |||
| * Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. | |||
| * Mar 24, 2016: Small speedup for level 1-3. | |||
| * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. | |||
| * Feb 19, 2016: Handle small payloads faster in level 1-3. | |||
| * Feb 19, 2016: Added faster level 2 + 3 compression modes. | |||
| * Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. | |||
| * Feb 14, 2016: Snappy: Merge upstream changes. | |||
| * Feb 14, 2016: Snappy: Fix aggressive skipping. | |||
| * Feb 14, 2016: Snappy: Update benchmark. | |||
| * Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. | |||
| * Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. | |||
| * Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. | |||
| * Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. | |||
| * Jan 16, 2016: Optimization on deflate level 1,2,3 compression. | |||
| * Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. | |||
| * Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. | |||
| * Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. | |||
| * Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! | |||
| * Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). | |||
| * Nov 20 2015: Small optimization to bit writer on 64 bit systems. | |||
| * Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). | |||
| * Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. | |||
| * Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file | |||
| * Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. | |||
| </details> | |||
| # deflate usage | |||
| The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: | |||
| | old import | new import | Documentation | |||
| |--------------------|-----------------------------------------|--------------------| | |||
| | `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | |||
| | `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | |||
| | `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | |||
| | `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | |||
| * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). | |||
| You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. | |||
| The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). | |||
| Currently there is only minor speedup on decompression (mostly CRC32 calculation). | |||
| Memory usage is typically 1MB for a Writer. stdlib is in the same range. | |||
| If you expect to have a lot of concurrently allocated Writers consider using | |||
| the stateless compress described below. | |||
| For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). | |||
| # Stateless compression | |||
| This package offers stateless compression as a special option for gzip/deflate. | |||
| It will do compression but without maintaining any state between Write calls. | |||
| This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. | |||
| This is only relevant in cases where you expect to run many thousands of compressors concurrently, | |||
| but with very little activity. This is *not* intended for regular web servers serving individual requests. | |||
| Because of this, the size of actual Write calls will affect output size. | |||
| In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. | |||
| For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) | |||
| A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: | |||
| ``` | |||
| // replace 'ioutil.Discard' with your output. | |||
| gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer gzw.Close() | |||
| w := bufio.NewWriterSize(gzw, 4096) | |||
| defer w.Flush() | |||
| // Write to 'w' | |||
| ``` | |||
| This will only use up to 4KB in memory when the writer is idle. | |||
| Compression is almost always worse than the fastest compression level | |||
| and each write will allocate (a little) memory. | |||
| # Performance Update 2018 | |||
| It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. | |||
| The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. | |||
| The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. | |||
| The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). | |||
| ## Overall differences. | |||
| There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. | |||
| The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. | |||
| This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. | |||
| There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. | |||
| ## Web Content | |||
| This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. | |||
| Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. | |||
| Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. | |||
| ## Object files | |||
| This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. | |||
| The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. | |||
| The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. | |||
| ## Highly Compressible File | |||
| This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. | |||
| It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. | |||
| So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". | |||
| ## Medium-High Compressible | |||
| This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. | |||
| We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. | |||
| ## Medium Compressible | |||
| I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. | |||
| The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. | |||
| ## Un-compressible Content | |||
| This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. | |||
| ## Huffman only compression | |||
| This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. | |||
| This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). | |||
| Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. | |||
| The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). | |||
| The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. | |||
| For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). | |||
| This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. | |||
| # Other packages | |||
| Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): | |||
| * [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. | |||
| * [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. | |||
| * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. | |||
| # license | |||
| This code is licensed under the same conditions as the original Go code. See LICENSE file. | |||
| @@ -0,0 +1,85 @@ | |||
| package compress | |||
| import "math" | |||
| // Estimate returns a normalized compressibility estimate of block b. | |||
| // Values close to zero are likely uncompressible. | |||
| // Values above 0.1 are likely to be compressible. | |||
| // Values above 0.5 are very compressible. | |||
| // Very small lengths will return 0. | |||
| func Estimate(b []byte) float64 { | |||
| if len(b) < 16 { | |||
| return 0 | |||
| } | |||
| // Correctly predicted order 1 | |||
| hits := 0 | |||
| lastMatch := false | |||
| var o1 [256]byte | |||
| var hist [256]int | |||
| c1 := byte(0) | |||
| for _, c := range b { | |||
| if c == o1[c1] { | |||
| // We only count a hit if there was two correct predictions in a row. | |||
| if lastMatch { | |||
| hits++ | |||
| } | |||
| lastMatch = true | |||
| } else { | |||
| lastMatch = false | |||
| } | |||
| o1[c1] = c | |||
| c1 = c | |||
| hist[c]++ | |||
| } | |||
| // Use x^0.6 to give better spread | |||
| prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) | |||
| // Calculate histogram distribution | |||
| variance := float64(0) | |||
| avg := float64(len(b)) / 256 | |||
| for _, v := range hist { | |||
| Δ := float64(v) - avg | |||
| variance += Δ * Δ | |||
| } | |||
| stddev := math.Sqrt(float64(variance)) / float64(len(b)) | |||
| exp := math.Sqrt(1 / float64(len(b))) | |||
| // Subtract expected stddev | |||
| stddev -= exp | |||
| if stddev < 0 { | |||
| stddev = 0 | |||
| } | |||
| stddev *= 1 + exp | |||
| // Use x^0.4 to give better spread | |||
| entropy := math.Pow(stddev, 0.4) | |||
| // 50/50 weight between prediction and histogram distribution | |||
| return math.Pow((prediction+entropy)/2, 0.9) | |||
| } | |||
| // ShannonEntropyBits returns the number of bits minimum required to represent | |||
| // an entropy encoding of the input bytes. | |||
| // https://en.wiktionary.org/wiki/Shannon_entropy | |||
| func ShannonEntropyBits(b []byte) int { | |||
| if len(b) == 0 { | |||
| return 0 | |||
| } | |||
| var hist [256]int | |||
| for _, c := range b { | |||
| hist[c]++ | |||
| } | |||
| shannon := float64(0) | |||
| invTotal := 1.0 / float64(len(b)) | |||
| for _, v := range hist[:] { | |||
| if v > 0 { | |||
| n := float64(v) | |||
| shannon += math.Ceil(-math.Log2(n*invTotal) * n) | |||
| } | |||
| } | |||
| return int(math.Ceil(shannon)) | |||
| } | |||
| @@ -0,0 +1,903 @@ | |||
| // Copyright 2009 The Go Authors. All rights reserved. | |||
| // Copyright (c) 2015 Klaus Post | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package flate | |||
| import ( | |||
| "encoding/binary" | |||
| "fmt" | |||
| "io" | |||
| "math" | |||
| ) | |||
| const ( | |||
| NoCompression = 0 | |||
| BestSpeed = 1 | |||
| BestCompression = 9 | |||
| DefaultCompression = -1 | |||
| // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman | |||
| // entropy encoding. This mode is useful in compressing data that has | |||
| // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) | |||
| // that lacks an entropy encoder. Compression gains are achieved when | |||
| // certain bytes in the input stream occur more frequently than others. | |||
| // | |||
| // Note that HuffmanOnly produces a compressed output that is | |||
| // RFC 1951 compliant. That is, any valid DEFLATE decompressor will | |||
| // continue to be able to decompress this output. | |||
| HuffmanOnly = -2 | |||
| ConstantCompression = HuffmanOnly // compatibility alias. | |||
| logWindowSize = 15 | |||
| windowSize = 1 << logWindowSize | |||
| windowMask = windowSize - 1 | |||
| logMaxOffsetSize = 15 // Standard DEFLATE | |||
| minMatchLength = 4 // The smallest match that the compressor looks for | |||
| maxMatchLength = 258 // The longest match for the compressor | |||
| minOffsetSize = 1 // The shortest offset that makes any sense | |||
| // The maximum number of tokens we will encode at the time. | |||
| // Smaller sizes usually creates less optimal blocks. | |||
| // Bigger can make context switching slow. | |||
| // We use this for levels 7-9, so we make it big. | |||
| maxFlateBlockTokens = 1 << 15 | |||
| maxStoreBlockSize = 65535 | |||
| hashBits = 17 // After 17 performance degrades | |||
| hashSize = 1 << hashBits | |||
| hashMask = (1 << hashBits) - 1 | |||
| hashShift = (hashBits + minMatchLength - 1) / minMatchLength | |||
| maxHashOffset = 1 << 28 | |||
| skipNever = math.MaxInt32 | |||
| debugDeflate = false | |||
| ) | |||
| type compressionLevel struct { | |||
| good, lazy, nice, chain, fastSkipHashing, level int | |||
| } | |||
| // Compression levels have been rebalanced from zlib deflate defaults | |||
| // to give a bigger spread in speed and compression. | |||
| // See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ | |||
| var levels = []compressionLevel{ | |||
| {}, // 0 | |||
| // Level 1-6 uses specialized algorithm - values not used | |||
| {0, 0, 0, 0, 0, 1}, | |||
| {0, 0, 0, 0, 0, 2}, | |||
| {0, 0, 0, 0, 0, 3}, | |||
| {0, 0, 0, 0, 0, 4}, | |||
| {0, 0, 0, 0, 0, 5}, | |||
| {0, 0, 0, 0, 0, 6}, | |||
| // Levels 7-9 use increasingly more lazy matching | |||
| // and increasingly stringent conditions for "good enough". | |||
| {8, 12, 16, 24, skipNever, 7}, | |||
| {16, 30, 40, 64, skipNever, 8}, | |||
| {32, 258, 258, 1024, skipNever, 9}, | |||
| } | |||
| // advancedState contains state for the advanced levels, with bigger hash tables, etc. | |||
| type advancedState struct { | |||
| // deflate state | |||
| length int | |||
| offset int | |||
| maxInsertIndex int | |||
| chainHead int | |||
| hashOffset int | |||
| ii uint16 // position of last match, intended to overflow to reset. | |||
| // input window: unprocessed data is window[index:windowEnd] | |||
| index int | |||
| estBitsPerByte int | |||
| hashMatch [maxMatchLength + minMatchLength]uint32 | |||
| // Input hash chains | |||
| // hashHead[hashValue] contains the largest inputIndex with the specified hash value | |||
| // If hashHead[hashValue] is within the current window, then | |||
| // hashPrev[hashHead[hashValue] & windowMask] contains the previous index | |||
| // with the same hash value. | |||
| hashHead [hashSize]uint32 | |||
| hashPrev [windowSize]uint32 | |||
| } | |||
| type compressor struct { | |||
| compressionLevel | |||
| h *huffmanEncoder | |||
| w *huffmanBitWriter | |||
| // compression algorithm | |||
| fill func(*compressor, []byte) int // copy data to window | |||
| step func(*compressor) // process window | |||
| window []byte | |||
| windowEnd int | |||
| blockStart int // window index where current tokens start | |||
| err error | |||
| // queued output tokens | |||
| tokens tokens | |||
| fast fastEnc | |||
| state *advancedState | |||
| sync bool // requesting flush | |||
| byteAvailable bool // if true, still need to process window[index-1]. | |||
| } | |||
| func (d *compressor) fillDeflate(b []byte) int { | |||
| s := d.state | |||
| if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { | |||
| // shift the window by windowSize | |||
| copy(d.window[:], d.window[windowSize:2*windowSize]) | |||
| s.index -= windowSize | |||
| d.windowEnd -= windowSize | |||
| if d.blockStart >= windowSize { | |||
| d.blockStart -= windowSize | |||
| } else { | |||
| d.blockStart = math.MaxInt32 | |||
| } | |||
| s.hashOffset += windowSize | |||
| if s.hashOffset > maxHashOffset { | |||
| delta := s.hashOffset - 1 | |||
| s.hashOffset -= delta | |||
| s.chainHead -= delta | |||
| // Iterate over slices instead of arrays to avoid copying | |||
| // the entire table onto the stack (Issue #18625). | |||
| for i, v := range s.hashPrev[:] { | |||
| if int(v) > delta { | |||
| s.hashPrev[i] = uint32(int(v) - delta) | |||
| } else { | |||
| s.hashPrev[i] = 0 | |||
| } | |||
| } | |||
| for i, v := range s.hashHead[:] { | |||
| if int(v) > delta { | |||
| s.hashHead[i] = uint32(int(v) - delta) | |||
| } else { | |||
| s.hashHead[i] = 0 | |||
| } | |||
| } | |||
| } | |||
| } | |||
| n := copy(d.window[d.windowEnd:], b) | |||
| d.windowEnd += n | |||
| return n | |||
| } | |||
| func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { | |||
| if index > 0 || eof { | |||
| var window []byte | |||
| if d.blockStart <= index { | |||
| window = d.window[d.blockStart:index] | |||
| } | |||
| d.blockStart = index | |||
| //d.w.writeBlock(tok, eof, window) | |||
| d.w.writeBlockDynamic(tok, eof, window, d.sync) | |||
| return d.w.err | |||
| } | |||
| return nil | |||
| } | |||
| // writeBlockSkip writes the current block and uses the number of tokens | |||
| // to determine if the block should be stored on no matches, or | |||
| // only huffman encoded. | |||
| func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { | |||
| if index > 0 || eof { | |||
| if d.blockStart <= index { | |||
| window := d.window[d.blockStart:index] | |||
| // If we removed less than a 64th of all literals | |||
| // we huffman compress the block. | |||
| if int(tok.n) > len(window)-int(tok.n>>6) { | |||
| d.w.writeBlockHuff(eof, window, d.sync) | |||
| } else { | |||
| // Write a dynamic huffman block. | |||
| d.w.writeBlockDynamic(tok, eof, window, d.sync) | |||
| } | |||
| } else { | |||
| d.w.writeBlock(tok, eof, nil) | |||
| } | |||
| d.blockStart = index | |||
| return d.w.err | |||
| } | |||
| return nil | |||
| } | |||
| // fillWindow will fill the current window with the supplied | |||
| // dictionary and calculate all hashes. | |||
| // This is much faster than doing a full encode. | |||
| // Should only be used after a start/reset. | |||
| func (d *compressor) fillWindow(b []byte) { | |||
| // Do not fill window if we are in store-only or huffman mode. | |||
| if d.level <= 0 { | |||
| return | |||
| } | |||
| if d.fast != nil { | |||
| // encode the last data, but discard the result | |||
| if len(b) > maxMatchOffset { | |||
| b = b[len(b)-maxMatchOffset:] | |||
| } | |||
| d.fast.Encode(&d.tokens, b) | |||
| d.tokens.Reset() | |||
| return | |||
| } | |||
| s := d.state | |||
| // If we are given too much, cut it. | |||
| if len(b) > windowSize { | |||
| b = b[len(b)-windowSize:] | |||
| } | |||
| // Add all to window. | |||
| n := copy(d.window[d.windowEnd:], b) | |||
| // Calculate 256 hashes at the time (more L1 cache hits) | |||
| loops := (n + 256 - minMatchLength) / 256 | |||
| for j := 0; j < loops; j++ { | |||
| startindex := j * 256 | |||
| end := startindex + 256 + minMatchLength - 1 | |||
| if end > n { | |||
| end = n | |||
| } | |||
| tocheck := d.window[startindex:end] | |||
| dstSize := len(tocheck) - minMatchLength + 1 | |||
| if dstSize <= 0 { | |||
| continue | |||
| } | |||
| dst := s.hashMatch[:dstSize] | |||
| bulkHash4(tocheck, dst) | |||
| var newH uint32 | |||
| for i, val := range dst { | |||
| di := i + startindex | |||
| newH = val & hashMask | |||
| // Get previous value with the same hash. | |||
| // Our chain should point to the previous value. | |||
| s.hashPrev[di&windowMask] = s.hashHead[newH] | |||
| // Set the head of the hash chain to us. | |||
| s.hashHead[newH] = uint32(di + s.hashOffset) | |||
| } | |||
| } | |||
| // Update window information. | |||
| d.windowEnd += n | |||
| s.index = n | |||
| } | |||
| // Try to find a match starting at index whose length is greater than prevSize. | |||
| // We only look at chainCount possibilities before giving up. | |||
| // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead | |||
| func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { | |||
| minMatchLook := maxMatchLength | |||
| if lookahead < minMatchLook { | |||
| minMatchLook = lookahead | |||
| } | |||
| win := d.window[0 : pos+minMatchLook] | |||
| // We quit when we get a match that's at least nice long | |||
| nice := len(win) - pos | |||
| if d.nice < nice { | |||
| nice = d.nice | |||
| } | |||
| // If we've got a match that's good enough, only look in 1/4 the chain. | |||
| tries := d.chain | |||
| length = minMatchLength - 1 | |||
| wEnd := win[pos+length] | |||
| wPos := win[pos:] | |||
| minIndex := pos - windowSize | |||
| if minIndex < 0 { | |||
| minIndex = 0 | |||
| } | |||
| offset = 0 | |||
| cGain := 0 | |||
| if d.chain < 100 { | |||
| for i := prevHead; tries > 0; tries-- { | |||
| if wEnd == win[i+length] { | |||
| n := matchLen(win[i:i+minMatchLook], wPos) | |||
| if n > length { | |||
| length = n | |||
| offset = pos - i | |||
| ok = true | |||
| if n >= nice { | |||
| // The match is good enough that we don't try to find a better one. | |||
| break | |||
| } | |||
| wEnd = win[pos+n] | |||
| } | |||
| } | |||
| if i <= minIndex { | |||
| // hashPrev[i & windowMask] has already been overwritten, so stop now. | |||
| break | |||
| } | |||
| i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset | |||
| if i < minIndex { | |||
| break | |||
| } | |||
| } | |||
| return | |||
| } | |||
| // Some like it higher (CSV), some like it lower (JSON) | |||
| const baseCost = 6 | |||
| // Base is 4 bytes at with an additional cost. | |||
| // Matches must be better than this. | |||
| for i := prevHead; tries > 0; tries-- { | |||
| if wEnd == win[i+length] { | |||
| n := matchLen(win[i:i+minMatchLook], wPos) | |||
| if n > length { | |||
| // Calculate gain. Estimate | |||
| newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) | |||
| //fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n])) | |||
| if newGain > cGain { | |||
| length = n | |||
| offset = pos - i | |||
| cGain = newGain | |||
| ok = true | |||
| if n >= nice { | |||
| // The match is good enough that we don't try to find a better one. | |||
| break | |||
| } | |||
| wEnd = win[pos+n] | |||
| } | |||
| } | |||
| } | |||
| if i <= minIndex { | |||
| // hashPrev[i & windowMask] has already been overwritten, so stop now. | |||
| break | |||
| } | |||
| i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset | |||
| if i < minIndex { | |||
| break | |||
| } | |||
| } | |||
| return | |||
| } | |||
| func (d *compressor) writeStoredBlock(buf []byte) error { | |||
| if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { | |||
| return d.w.err | |||
| } | |||
| d.w.writeBytes(buf) | |||
| return d.w.err | |||
| } | |||
| // hash4 returns a hash representation of the first 4 bytes | |||
| // of the supplied slice. | |||
| // The caller must ensure that len(b) >= 4. | |||
| func hash4(b []byte) uint32 { | |||
| return hash4u(binary.LittleEndian.Uint32(b), hashBits) | |||
| } | |||
| // bulkHash4 will compute hashes using the same | |||
| // algorithm as hash4 | |||
| func bulkHash4(b []byte, dst []uint32) { | |||
| if len(b) < 4 { | |||
| return | |||
| } | |||
| hb := binary.LittleEndian.Uint32(b) | |||
| dst[0] = hash4u(hb, hashBits) | |||
| end := len(b) - 4 + 1 | |||
| for i := 1; i < end; i++ { | |||
| hb = (hb >> 8) | uint32(b[i+3])<<24 | |||
| dst[i] = hash4u(hb, hashBits) | |||
| } | |||
| } | |||
| func (d *compressor) initDeflate() { | |||
| d.window = make([]byte, 2*windowSize) | |||
| d.byteAvailable = false | |||
| d.err = nil | |||
| if d.state == nil { | |||
| return | |||
| } | |||
| s := d.state | |||
| s.index = 0 | |||
| s.hashOffset = 1 | |||
| s.length = minMatchLength - 1 | |||
| s.offset = 0 | |||
| s.chainHead = -1 | |||
| } | |||
| // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, | |||
| // meaning it always has lazy matching on. | |||
| func (d *compressor) deflateLazy() { | |||
| s := d.state | |||
| // Sanity enables additional runtime tests. | |||
| // It's intended to be used during development | |||
| // to supplement the currently ad-hoc unit tests. | |||
| const sanity = debugDeflate | |||
| if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { | |||
| return | |||
| } | |||
| if d.windowEnd != s.index && d.chain > 100 { | |||
| // Get literal huffman coder. | |||
| if d.h == nil { | |||
| d.h = newHuffmanEncoder(maxFlateBlockTokens) | |||
| } | |||
| var tmp [256]uint16 | |||
| for _, v := range d.window[s.index:d.windowEnd] { | |||
| tmp[v]++ | |||
| } | |||
| d.h.generate(tmp[:], 15) | |||
| } | |||
| s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) | |||
| for { | |||
| if sanity && s.index > d.windowEnd { | |||
| panic("index > windowEnd") | |||
| } | |||
| lookahead := d.windowEnd - s.index | |||
| if lookahead < minMatchLength+maxMatchLength { | |||
| if !d.sync { | |||
| return | |||
| } | |||
| if sanity && s.index > d.windowEnd { | |||
| panic("index > windowEnd") | |||
| } | |||
| if lookahead == 0 { | |||
| // Flush current output block if any. | |||
| if d.byteAvailable { | |||
| // There is still one pending token that needs to be flushed | |||
| d.tokens.AddLiteral(d.window[s.index-1]) | |||
| d.byteAvailable = false | |||
| } | |||
| if d.tokens.n > 0 { | |||
| if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { | |||
| return | |||
| } | |||
| d.tokens.Reset() | |||
| } | |||
| return | |||
| } | |||
| } | |||
| if s.index < s.maxInsertIndex { | |||
| // Update the hash | |||
| hash := hash4(d.window[s.index:]) | |||
| ch := s.hashHead[hash] | |||
| s.chainHead = int(ch) | |||
| s.hashPrev[s.index&windowMask] = ch | |||
| s.hashHead[hash] = uint32(s.index + s.hashOffset) | |||
| } | |||
| prevLength := s.length | |||
| prevOffset := s.offset | |||
| s.length = minMatchLength - 1 | |||
| s.offset = 0 | |||
| minIndex := s.index - windowSize | |||
| if minIndex < 0 { | |||
| minIndex = 0 | |||
| } | |||
| if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { | |||
| if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { | |||
| s.length = newLength | |||
| s.offset = newOffset | |||
| } | |||
| } | |||
| if prevLength >= minMatchLength && s.length <= prevLength { | |||
| // Check for better match at end... | |||
| // | |||
| // checkOff must be >=2 since we otherwise risk checking s.index | |||
| // Offset of 2 seems to yield best results. | |||
| const checkOff = 2 | |||
| prevIndex := s.index - 1 | |||
| if prevIndex+prevLength+checkOff < s.maxInsertIndex { | |||
| end := lookahead | |||
| if lookahead > maxMatchLength { | |||
| end = maxMatchLength | |||
| } | |||
| end += prevIndex | |||
| idx := prevIndex + prevLength - (4 - checkOff) | |||
| h := hash4(d.window[idx:]) | |||
| ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff) | |||
| if ch2 > minIndex { | |||
| length := matchLen(d.window[prevIndex:end], d.window[ch2:]) | |||
| // It seems like a pure length metric is best. | |||
| if length > prevLength { | |||
| prevLength = length | |||
| prevOffset = prevIndex - ch2 | |||
| } | |||
| } | |||
| } | |||
| // There was a match at the previous step, and the current match is | |||
| // not better. Output the previous match. | |||
| d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) | |||
| // Insert in the hash table all strings up to the end of the match. | |||
| // index and index-1 are already inserted. If there is not enough | |||
| // lookahead, the last two strings are not inserted into the hash | |||
| // table. | |||
| newIndex := s.index + prevLength - 1 | |||
| // Calculate missing hashes | |||
| end := newIndex | |||
| if end > s.maxInsertIndex { | |||
| end = s.maxInsertIndex | |||
| } | |||
| end += minMatchLength - 1 | |||
| startindex := s.index + 1 | |||
| if startindex > s.maxInsertIndex { | |||
| startindex = s.maxInsertIndex | |||
| } | |||
| tocheck := d.window[startindex:end] | |||
| dstSize := len(tocheck) - minMatchLength + 1 | |||
| if dstSize > 0 { | |||
| dst := s.hashMatch[:dstSize] | |||
| bulkHash4(tocheck, dst) | |||
| var newH uint32 | |||
| for i, val := range dst { | |||
| di := i + startindex | |||
| newH = val & hashMask | |||
| // Get previous value with the same hash. | |||
| // Our chain should point to the previous value. | |||
| s.hashPrev[di&windowMask] = s.hashHead[newH] | |||
| // Set the head of the hash chain to us. | |||
| s.hashHead[newH] = uint32(di + s.hashOffset) | |||
| } | |||
| } | |||
| s.index = newIndex | |||
| d.byteAvailable = false | |||
| s.length = minMatchLength - 1 | |||
| if d.tokens.n == maxFlateBlockTokens { | |||
| // The block includes the current character | |||
| if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { | |||
| return | |||
| } | |||
| d.tokens.Reset() | |||
| } | |||
| s.ii = 0 | |||
| } else { | |||
| // Reset, if we got a match this run. | |||
| if s.length >= minMatchLength { | |||
| s.ii = 0 | |||
| } | |||
| // We have a byte waiting. Emit it. | |||
| if d.byteAvailable { | |||
| s.ii++ | |||
| d.tokens.AddLiteral(d.window[s.index-1]) | |||
| if d.tokens.n == maxFlateBlockTokens { | |||
| if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { | |||
| return | |||
| } | |||
| d.tokens.Reset() | |||
| } | |||
| s.index++ | |||
| // If we have a long run of no matches, skip additional bytes | |||
| // Resets when s.ii overflows after 64KB. | |||
| if n := int(s.ii) - d.chain; n > 0 { | |||
| n = 1 + int(n>>6) | |||
| for j := 0; j < n; j++ { | |||
| if s.index >= d.windowEnd-1 { | |||
| break | |||
| } | |||
| d.tokens.AddLiteral(d.window[s.index-1]) | |||
| if d.tokens.n == maxFlateBlockTokens { | |||
| if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { | |||
| return | |||
| } | |||
| d.tokens.Reset() | |||
| } | |||
| // Index... | |||
| if s.index < s.maxInsertIndex { | |||
| h := hash4(d.window[s.index:]) | |||
| ch := s.hashHead[h] | |||
| s.chainHead = int(ch) | |||
| s.hashPrev[s.index&windowMask] = ch | |||
| s.hashHead[h] = uint32(s.index + s.hashOffset) | |||
| } | |||
| s.index++ | |||
| } | |||
| // Flush last byte | |||
| d.tokens.AddLiteral(d.window[s.index-1]) | |||
| d.byteAvailable = false | |||
| // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength | |||
| if d.tokens.n == maxFlateBlockTokens { | |||
| if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { | |||
| return | |||
| } | |||
| d.tokens.Reset() | |||
| } | |||
| } | |||
| } else { | |||
| s.index++ | |||
| d.byteAvailable = true | |||
| } | |||
| } | |||
| } | |||
| } | |||
| func (d *compressor) store() { | |||
| if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { | |||
| d.err = d.writeStoredBlock(d.window[:d.windowEnd]) | |||
| d.windowEnd = 0 | |||
| } | |||
| } | |||
| // fillWindow will fill the buffer with data for huffman-only compression. | |||
| // The number of bytes copied is returned. | |||
| func (d *compressor) fillBlock(b []byte) int { | |||
| n := copy(d.window[d.windowEnd:], b) | |||
| d.windowEnd += n | |||
| return n | |||
| } | |||
| // storeHuff will compress and store the currently added data, | |||
| // if enough has been accumulated or we at the end of the stream. | |||
| // Any error that occurred will be in d.err | |||
| func (d *compressor) storeHuff() { | |||
| if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { | |||
| return | |||
| } | |||
| d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) | |||
| d.err = d.w.err | |||
| d.windowEnd = 0 | |||
| } | |||
| // storeFast will compress and store the currently added data, | |||
| // if enough has been accumulated or we at the end of the stream. | |||
| // Any error that occurred will be in d.err | |||
| func (d *compressor) storeFast() { | |||
| // We only compress if we have maxStoreBlockSize. | |||
| if d.windowEnd < len(d.window) { | |||
| if !d.sync { | |||
| return | |||
| } | |||
| // Handle extremely small sizes. | |||
| if d.windowEnd < 128 { | |||
| if d.windowEnd == 0 { | |||
| return | |||
| } | |||
| if d.windowEnd <= 32 { | |||
| d.err = d.writeStoredBlock(d.window[:d.windowEnd]) | |||
| } else { | |||
| d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) | |||
| d.err = d.w.err | |||
| } | |||
| d.tokens.Reset() | |||
| d.windowEnd = 0 | |||
| d.fast.Reset() | |||
| return | |||
| } | |||
| } | |||
| d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) | |||
| // If we made zero matches, store the block as is. | |||
| if d.tokens.n == 0 { | |||
| d.err = d.writeStoredBlock(d.window[:d.windowEnd]) | |||
| // If we removed less than 1/16th, huffman compress the block. | |||
| } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { | |||
| d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) | |||
| d.err = d.w.err | |||
| } else { | |||
| d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) | |||
| d.err = d.w.err | |||
| } | |||
| d.tokens.Reset() | |||
| d.windowEnd = 0 | |||
| } | |||
| // write will add input byte to the stream. | |||
| // Unless an error occurs all bytes will be consumed. | |||
| func (d *compressor) write(b []byte) (n int, err error) { | |||
| if d.err != nil { | |||
| return 0, d.err | |||
| } | |||
| n = len(b) | |||
| for len(b) > 0 { | |||
| if d.windowEnd == len(d.window) || d.sync { | |||
| d.step(d) | |||
| } | |||
| b = b[d.fill(d, b):] | |||
| if d.err != nil { | |||
| return 0, d.err | |||
| } | |||
| } | |||
| return n, d.err | |||
| } | |||
| func (d *compressor) syncFlush() error { | |||
| d.sync = true | |||
| if d.err != nil { | |||
| return d.err | |||
| } | |||
| d.step(d) | |||
| if d.err == nil { | |||
| d.w.writeStoredHeader(0, false) | |||
| d.w.flush() | |||
| d.err = d.w.err | |||
| } | |||
| d.sync = false | |||
| return d.err | |||
| } | |||
| func (d *compressor) init(w io.Writer, level int) (err error) { | |||
| d.w = newHuffmanBitWriter(w) | |||
| switch { | |||
| case level == NoCompression: | |||
| d.window = make([]byte, maxStoreBlockSize) | |||
| d.fill = (*compressor).fillBlock | |||
| d.step = (*compressor).store | |||
| case level == ConstantCompression: | |||
| d.w.logNewTablePenalty = 10 | |||
| d.window = make([]byte, 32<<10) | |||
| d.fill = (*compressor).fillBlock | |||
| d.step = (*compressor).storeHuff | |||
| case level == DefaultCompression: | |||
| level = 5 | |||
| fallthrough | |||
| case level >= 1 && level <= 6: | |||
| d.w.logNewTablePenalty = 7 | |||
| d.fast = newFastEnc(level) | |||
| d.window = make([]byte, maxStoreBlockSize) | |||
| d.fill = (*compressor).fillBlock | |||
| d.step = (*compressor).storeFast | |||
| case 7 <= level && level <= 9: | |||
| d.w.logNewTablePenalty = 8 | |||
| d.state = &advancedState{} | |||
| d.compressionLevel = levels[level] | |||
| d.initDeflate() | |||
| d.fill = (*compressor).fillDeflate | |||
| d.step = (*compressor).deflateLazy | |||
| default: | |||
| return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) | |||
| } | |||
| d.level = level | |||
| return nil | |||
| } | |||
| // reset the state of the compressor. | |||
| func (d *compressor) reset(w io.Writer) { | |||
| d.w.reset(w) | |||
| d.sync = false | |||
| d.err = nil | |||
| // We only need to reset a few things for Snappy. | |||
| if d.fast != nil { | |||
| d.fast.Reset() | |||
| d.windowEnd = 0 | |||
| d.tokens.Reset() | |||
| return | |||
| } | |||
| switch d.compressionLevel.chain { | |||
| case 0: | |||
| // level was NoCompression or ConstantCompresssion. | |||
| d.windowEnd = 0 | |||
| default: | |||
| s := d.state | |||
| s.chainHead = -1 | |||
| for i := range s.hashHead { | |||
| s.hashHead[i] = 0 | |||
| } | |||
| for i := range s.hashPrev { | |||
| s.hashPrev[i] = 0 | |||
| } | |||
| s.hashOffset = 1 | |||
| s.index, d.windowEnd = 0, 0 | |||
| d.blockStart, d.byteAvailable = 0, false | |||
| d.tokens.Reset() | |||
| s.length = minMatchLength - 1 | |||
| s.offset = 0 | |||
| s.ii = 0 | |||
| s.maxInsertIndex = 0 | |||
| } | |||
| } | |||
| func (d *compressor) close() error { | |||
| if d.err != nil { | |||
| return d.err | |||
| } | |||
| d.sync = true | |||
| d.step(d) | |||
| if d.err != nil { | |||
| return d.err | |||
| } | |||
| if d.w.writeStoredHeader(0, true); d.w.err != nil { | |||
| return d.w.err | |||
| } | |||
| d.w.flush() | |||
| d.w.reset(nil) | |||
| return d.w.err | |||
| } | |||
| // NewWriter returns a new Writer compressing data at the given level. | |||
| // Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); | |||
| // higher levels typically run slower but compress more. | |||
| // Level 0 (NoCompression) does not attempt any compression; it only adds the | |||
| // necessary DEFLATE framing. | |||
| // Level -1 (DefaultCompression) uses the default compression level. | |||
| // Level -2 (ConstantCompression) will use Huffman compression only, giving | |||
| // a very fast compression for all types of input, but sacrificing considerable | |||
| // compression efficiency. | |||
| // | |||
| // If level is in the range [-2, 9] then the error returned will be nil. | |||
| // Otherwise the error returned will be non-nil. | |||
| func NewWriter(w io.Writer, level int) (*Writer, error) { | |||
| var dw Writer | |||
| if err := dw.d.init(w, level); err != nil { | |||
| return nil, err | |||
| } | |||
| return &dw, nil | |||
| } | |||
| // NewWriterDict is like NewWriter but initializes the new | |||
| // Writer with a preset dictionary. The returned Writer behaves | |||
| // as if the dictionary had been written to it without producing | |||
| // any compressed output. The compressed data written to w | |||
| // can only be decompressed by a Reader initialized with the | |||
| // same dictionary. | |||
| func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { | |||
| zw, err := NewWriter(w, level) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| zw.d.fillWindow(dict) | |||
| zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. | |||
| return zw, err | |||
| } | |||
| // A Writer takes data written to it and writes the compressed | |||
| // form of that data to an underlying writer (see NewWriter). | |||
| type Writer struct { | |||
| d compressor | |||
| dict []byte | |||
| } | |||
| // Write writes data to w, which will eventually write the | |||
| // compressed form of data to its underlying writer. | |||
| func (w *Writer) Write(data []byte) (n int, err error) { | |||
| return w.d.write(data) | |||
| } | |||
| // Flush flushes any pending data to the underlying writer. | |||
| // It is useful mainly in compressed network protocols, to ensure that | |||
| // a remote reader has enough data to reconstruct a packet. | |||
| // Flush does not return until the data has been written. | |||
| // Calling Flush when there is no pending data still causes the Writer | |||
| // to emit a sync marker of at least 4 bytes. | |||
| // If the underlying writer returns an error, Flush returns that error. | |||
| // | |||
| // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. | |||
| func (w *Writer) Flush() error { | |||
| // For more about flushing: | |||
| // http://www.bolet.org/~pornin/deflate-flush.html | |||
| return w.d.syncFlush() | |||
| } | |||
| // Close flushes and closes the writer. | |||
| func (w *Writer) Close() error { | |||
| return w.d.close() | |||
| } | |||
| // Reset discards the writer's state and makes it equivalent to | |||
| // the result of NewWriter or NewWriterDict called with dst | |||
| // and w's level and dictionary. | |||
| func (w *Writer) Reset(dst io.Writer) { | |||
| if len(w.dict) > 0 { | |||
| // w was created with NewWriterDict | |||
| w.d.reset(dst) | |||
| if dst != nil { | |||
| w.d.fillWindow(w.dict) | |||
| } | |||
| } else { | |||
| // w was created with NewWriter | |||
| w.d.reset(dst) | |||
| } | |||
| } | |||
| // ResetDict discards the writer's state and makes it equivalent to | |||
| // the result of NewWriter or NewWriterDict called with dst | |||
| // and w's level, but sets a specific dictionary. | |||
| func (w *Writer) ResetDict(dst io.Writer, dict []byte) { | |||
| w.dict = dict | |||
| w.d.reset(dst) | |||
| w.d.fillWindow(w.dict) | |||
| } | |||
| @@ -0,0 +1,184 @@ | |||
| // Copyright 2016 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package flate | |||
| // dictDecoder implements the LZ77 sliding dictionary as used in decompression. | |||
| // LZ77 decompresses data through sequences of two forms of commands: | |||
| // | |||
| // * Literal insertions: Runs of one or more symbols are inserted into the data | |||
| // stream as is. This is accomplished through the writeByte method for a | |||
| // single symbol, or combinations of writeSlice/writeMark for multiple symbols. | |||
| // Any valid stream must start with a literal insertion if no preset dictionary | |||
| // is used. | |||
| // | |||
| // * Backward copies: Runs of one or more symbols are copied from previously | |||
| // emitted data. Backward copies come as the tuple (dist, length) where dist | |||
| // determines how far back in the stream to copy from and length determines how | |||
| // many bytes to copy. Note that it is valid for the length to be greater than | |||
| // the distance. Since LZ77 uses forward copies, that situation is used to | |||
| // perform a form of run-length encoding on repeated runs of symbols. | |||
| // The writeCopy and tryWriteCopy are used to implement this command. | |||
| // | |||
| // For performance reasons, this implementation performs little to no sanity | |||
| // checks about the arguments. As such, the invariants documented for each | |||
| // method call must be respected. | |||
| type dictDecoder struct { | |||
| hist []byte // Sliding window history | |||
| // Invariant: 0 <= rdPos <= wrPos <= len(hist) | |||
| wrPos int // Current output position in buffer | |||
| rdPos int // Have emitted hist[:rdPos] already | |||
| full bool // Has a full window length been written yet? | |||
| } | |||
| // init initializes dictDecoder to have a sliding window dictionary of the given | |||
| // size. If a preset dict is provided, it will initialize the dictionary with | |||
| // the contents of dict. | |||
| func (dd *dictDecoder) init(size int, dict []byte) { | |||
| *dd = dictDecoder{hist: dd.hist} | |||
| if cap(dd.hist) < size { | |||
| dd.hist = make([]byte, size) | |||
| } | |||
| dd.hist = dd.hist[:size] | |||
| if len(dict) > len(dd.hist) { | |||
| dict = dict[len(dict)-len(dd.hist):] | |||
| } | |||
| dd.wrPos = copy(dd.hist, dict) | |||
| if dd.wrPos == len(dd.hist) { | |||
| dd.wrPos = 0 | |||
| dd.full = true | |||
| } | |||
| dd.rdPos = dd.wrPos | |||
| } | |||
| // histSize reports the total amount of historical data in the dictionary. | |||
| func (dd *dictDecoder) histSize() int { | |||
| if dd.full { | |||
| return len(dd.hist) | |||
| } | |||
| return dd.wrPos | |||
| } | |||
| // availRead reports the number of bytes that can be flushed by readFlush. | |||
| func (dd *dictDecoder) availRead() int { | |||
| return dd.wrPos - dd.rdPos | |||
| } | |||
| // availWrite reports the available amount of output buffer space. | |||
| func (dd *dictDecoder) availWrite() int { | |||
| return len(dd.hist) - dd.wrPos | |||
| } | |||
| // writeSlice returns a slice of the available buffer to write data to. | |||
| // | |||
| // This invariant will be kept: len(s) <= availWrite() | |||
| func (dd *dictDecoder) writeSlice() []byte { | |||
| return dd.hist[dd.wrPos:] | |||
| } | |||
| // writeMark advances the writer pointer by cnt. | |||
| // | |||
| // This invariant must be kept: 0 <= cnt <= availWrite() | |||
| func (dd *dictDecoder) writeMark(cnt int) { | |||
| dd.wrPos += cnt | |||
| } | |||
| // writeByte writes a single byte to the dictionary. | |||
| // | |||
| // This invariant must be kept: 0 < availWrite() | |||
| func (dd *dictDecoder) writeByte(c byte) { | |||
| dd.hist[dd.wrPos] = c | |||
| dd.wrPos++ | |||
| } | |||
| // writeCopy copies a string at a given (dist, length) to the output. | |||
| // This returns the number of bytes copied and may be less than the requested | |||
| // length if the available space in the output buffer is too small. | |||
| // | |||
| // This invariant must be kept: 0 < dist <= histSize() | |||
| func (dd *dictDecoder) writeCopy(dist, length int) int { | |||
| dstBase := dd.wrPos | |||
| dstPos := dstBase | |||
| srcPos := dstPos - dist | |||
| endPos := dstPos + length | |||
| if endPos > len(dd.hist) { | |||
| endPos = len(dd.hist) | |||
| } | |||
| // Copy non-overlapping section after destination position. | |||
| // | |||
| // This section is non-overlapping in that the copy length for this section | |||
| // is always less than or equal to the backwards distance. This can occur | |||
| // if a distance refers to data that wraps-around in the buffer. | |||
| // Thus, a backwards copy is performed here; that is, the exact bytes in | |||
| // the source prior to the copy is placed in the destination. | |||
| if srcPos < 0 { | |||
| srcPos += len(dd.hist) | |||
| dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) | |||
| srcPos = 0 | |||
| } | |||
| // Copy possibly overlapping section before destination position. | |||
| // | |||
| // This section can overlap if the copy length for this section is larger | |||
| // than the backwards distance. This is allowed by LZ77 so that repeated | |||
| // strings can be succinctly represented using (dist, length) pairs. | |||
| // Thus, a forwards copy is performed here; that is, the bytes copied is | |||
| // possibly dependent on the resulting bytes in the destination as the copy | |||
| // progresses along. This is functionally equivalent to the following: | |||
| // | |||
| // for i := 0; i < endPos-dstPos; i++ { | |||
| // dd.hist[dstPos+i] = dd.hist[srcPos+i] | |||
| // } | |||
| // dstPos = endPos | |||
| // | |||
| for dstPos < endPos { | |||
| dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) | |||
| } | |||
| dd.wrPos = dstPos | |||
| return dstPos - dstBase | |||
| } | |||
| // tryWriteCopy tries to copy a string at a given (distance, length) to the | |||
| // output. This specialized version is optimized for short distances. | |||
| // | |||
| // This method is designed to be inlined for performance reasons. | |||
| // | |||
| // This invariant must be kept: 0 < dist <= histSize() | |||
| func (dd *dictDecoder) tryWriteCopy(dist, length int) int { | |||
| dstPos := dd.wrPos | |||
| endPos := dstPos + length | |||
| if dstPos < dist || endPos > len(dd.hist) { | |||
| return 0 | |||
| } | |||
| dstBase := dstPos | |||
| srcPos := dstPos - dist | |||
| // Copy possibly overlapping section before destination position. | |||
| loop: | |||
| dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) | |||
| if dstPos < endPos { | |||
| goto loop // Avoid for-loop so that this function can be inlined | |||
| } | |||
| dd.wrPos = dstPos | |||
| return dstPos - dstBase | |||
| } | |||
| // readFlush returns a slice of the historical buffer that is ready to be | |||
| // emitted to the user. The data returned by readFlush must be fully consumed | |||
| // before calling any other dictDecoder methods. | |||
| func (dd *dictDecoder) readFlush() []byte { | |||
| toRead := dd.hist[dd.rdPos:dd.wrPos] | |||
| dd.rdPos = dd.wrPos | |||
| if dd.wrPos == len(dd.hist) { | |||
| dd.wrPos, dd.rdPos = 0, 0 | |||
| dd.full = true | |||
| } | |||
| return toRead | |||
| } | |||
| @@ -0,0 +1,233 @@ | |||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | |||
| // Modified for deflate by Klaus Post (c) 2015. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package flate | |||
| import ( | |||
| "encoding/binary" | |||
| "fmt" | |||
| "math/bits" | |||
| ) | |||
| type fastEnc interface { | |||
| Encode(dst *tokens, src []byte) | |||
| Reset() | |||
| } | |||
| func newFastEnc(level int) fastEnc { | |||
| switch level { | |||
| case 1: | |||
| return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} | |||
| case 2: | |||
| return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} | |||
| case 3: | |||
| return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} | |||
| case 4: | |||
| return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} | |||
| case 5: | |||
| return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} | |||
| case 6: | |||
| return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} | |||
| default: | |||
| panic("invalid level specified") | |||
| } | |||
| } | |||
| const ( | |||
| tableBits = 15 // Bits used in the table | |||
| tableSize = 1 << tableBits // Size of the table | |||
| tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. | |||
| baseMatchOffset = 1 // The smallest match offset | |||
| baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 | |||
| maxMatchOffset = 1 << 15 // The largest match offset | |||
| bTableBits = 17 // Bits used in the big tables | |||
| bTableSize = 1 << bTableBits // Size of the table | |||
| allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. | |||
| bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. | |||
| ) | |||
| const ( | |||
| prime3bytes = 506832829 | |||
| prime4bytes = 2654435761 | |||
| prime5bytes = 889523592379 | |||
| prime6bytes = 227718039650203 | |||
| prime7bytes = 58295818150454627 | |||
| prime8bytes = 0xcf1bbcdcb7a56463 | |||
| ) | |||
| func load32(b []byte, i int) uint32 { | |||
| // Help the compiler eliminate bounds checks on the read so it can be done in a single read. | |||
| b = b[i:] | |||
| b = b[:4] | |||
| return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 | |||
| } | |||
| func load64(b []byte, i int) uint64 { | |||
| return binary.LittleEndian.Uint64(b[i:]) | |||
| } | |||
| func load3232(b []byte, i int32) uint32 { | |||
| return binary.LittleEndian.Uint32(b[i:]) | |||
| } | |||
| func load6432(b []byte, i int32) uint64 { | |||
| return binary.LittleEndian.Uint64(b[i:]) | |||
| } | |||
| func hash(u uint32) uint32 { | |||
| return (u * 0x1e35a7bd) >> tableShift | |||
| } | |||
| type tableEntry struct { | |||
| offset int32 | |||
| } | |||
| // fastGen maintains the table for matches, | |||
| // and the previous byte block for level 2. | |||
| // This is the generic implementation. | |||
| type fastGen struct { | |||
| hist []byte | |||
| cur int32 | |||
| } | |||
| func (e *fastGen) addBlock(src []byte) int32 { | |||
| // check if we have space already | |||
| if len(e.hist)+len(src) > cap(e.hist) { | |||
| if cap(e.hist) == 0 { | |||
| e.hist = make([]byte, 0, allocHistory) | |||
| } else { | |||
| if cap(e.hist) < maxMatchOffset*2 { | |||
| panic("unexpected buffer size") | |||
| } | |||
| // Move down | |||
| offset := int32(len(e.hist)) - maxMatchOffset | |||
| copy(e.hist[0:maxMatchOffset], e.hist[offset:]) | |||
| e.cur += offset | |||
| e.hist = e.hist[:maxMatchOffset] | |||
| } | |||
| } | |||
| s := int32(len(e.hist)) | |||
| e.hist = append(e.hist, src...) | |||
| return s | |||
| } | |||
| // hash4 returns the hash of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <32. | |||
| func hash4u(u uint32, h uint8) uint32 { | |||
| return (u * prime4bytes) >> (32 - h) | |||
| } | |||
| type tableEntryPrev struct { | |||
| Cur tableEntry | |||
| Prev tableEntry | |||
| } | |||
| // hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <32. | |||
| func hash4x64(u uint64, h uint8) uint32 { | |||
| return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32) | |||
| } | |||
| // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <64. | |||
| func hash7(u uint64, h uint8) uint32 { | |||
| return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) | |||
| } | |||
| // hash8 returns the hash of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <64. | |||
| func hash8(u uint64, h uint8) uint32 { | |||
| return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64)) | |||
| } | |||
| // hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <64. | |||
| func hash6(u uint64, h uint8) uint32 { | |||
| return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64)) | |||
| } | |||
| // matchlen will return the match length between offsets and t in src. | |||
| // The maximum length returned is maxMatchLength - 4. | |||
| // It is assumed that s > t, that t >=0 and s < len(src). | |||
| func (e *fastGen) matchlen(s, t int32, src []byte) int32 { | |||
| if debugDecode { | |||
| if t >= s { | |||
| panic(fmt.Sprint("t >=s:", t, s)) | |||
| } | |||
| if int(s) >= len(src) { | |||
| panic(fmt.Sprint("s >= len(src):", s, len(src))) | |||
| } | |||
| if t < 0 { | |||
| panic(fmt.Sprint("t < 0:", t)) | |||
| } | |||
| if s-t > maxMatchOffset { | |||
| panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) | |||
| } | |||
| } | |||
| s1 := int(s) + maxMatchLength - 4 | |||
| if s1 > len(src) { | |||
| s1 = len(src) | |||
| } | |||
| // Extend the match to be as long as possible. | |||
| return int32(matchLen(src[s:s1], src[t:])) | |||
| } | |||
| // matchlenLong will return the match length between offsets and t in src. | |||
| // It is assumed that s > t, that t >=0 and s < len(src). | |||
| func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { | |||
| if debugDeflate { | |||
| if t >= s { | |||
| panic(fmt.Sprint("t >=s:", t, s)) | |||
| } | |||
| if int(s) >= len(src) { | |||
| panic(fmt.Sprint("s >= len(src):", s, len(src))) | |||
| } | |||
| if t < 0 { | |||
| panic(fmt.Sprint("t < 0:", t)) | |||
| } | |||
| if s-t > maxMatchOffset { | |||
| panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) | |||
| } | |||
| } | |||
| // Extend the match to be as long as possible. | |||
| return int32(matchLen(src[s:], src[t:])) | |||
| } | |||
| // Reset the encoding table. | |||
| func (e *fastGen) Reset() { | |||
| if cap(e.hist) < allocHistory { | |||
| e.hist = make([]byte, 0, allocHistory) | |||
| } | |||
| // We offset current position so everything will be out of reach. | |||
| // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. | |||
| if e.cur <= bufferReset { | |||
| e.cur += maxMatchOffset + int32(len(e.hist)) | |||
| } | |||
| e.hist = e.hist[:0] | |||
| } | |||
| // matchLen returns the maximum length. | |||
| // 'a' must be the shortest of the two. | |||
| func matchLen(a, b []byte) int { | |||
| var checked int | |||
| for len(a) >= 8 { | |||
| if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { | |||
| return checked + (bits.TrailingZeros64(diff) >> 3) | |||
| } | |||
| checked += 8 | |||
| a = a[8:] | |||
| b = b[8:] | |||
| } | |||
| b = b[:len(a)] | |||
| for i := range a { | |||
| if a[i] != b[i] { | |||
| return i + checked | |||
| } | |||
| } | |||
| return len(a) + checked | |||
| } | |||
| @@ -0,0 +1,412 @@ | |||
| // Copyright 2009 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package flate | |||
| import ( | |||
| "math" | |||
| "math/bits" | |||
| ) | |||
| const ( | |||
| maxBitsLimit = 16 | |||
| // number of valid literals | |||
| literalCount = 286 | |||
| ) | |||
| // hcode is a huffman code with a bit code and bit length. | |||
| type hcode uint32 | |||
| func (h hcode) len() uint8 { | |||
| return uint8(h) | |||
| } | |||
| func (h hcode) code64() uint64 { | |||
| return uint64(h >> 8) | |||
| } | |||
| func (h hcode) zero() bool { | |||
| return h == 0 | |||
| } | |||
| type huffmanEncoder struct { | |||
| codes []hcode | |||
| bitCount [17]int32 | |||
| // Allocate a reusable buffer with the longest possible frequency table. | |||
| // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. | |||
| // The largest of these is literalCount, so we allocate for that case. | |||
| freqcache [literalCount + 1]literalNode | |||
| } | |||
| type literalNode struct { | |||
| literal uint16 | |||
| freq uint16 | |||
| } | |||
| // A levelInfo describes the state of the constructed tree for a given depth. | |||
| type levelInfo struct { | |||
| // Our level. for better printing | |||
| level int32 | |||
| // The frequency of the last node at this level | |||
| lastFreq int32 | |||
| // The frequency of the next character to add to this level | |||
| nextCharFreq int32 | |||
| // The frequency of the next pair (from level below) to add to this level. | |||
| // Only valid if the "needed" value of the next lower level is 0. | |||
| nextPairFreq int32 | |||
| // The number of chains remaining to generate for this level before moving | |||
| // up to the next level | |||
| needed int32 | |||
| } | |||
| // set sets the code and length of an hcode. | |||
| func (h *hcode) set(code uint16, length uint8) { | |||
| *h = hcode(length) | (hcode(code) << 8) | |||
| } | |||
| func newhcode(code uint16, length uint8) hcode { | |||
| return hcode(length) | (hcode(code) << 8) | |||
| } | |||
| func reverseBits(number uint16, bitLength byte) uint16 { | |||
| return bits.Reverse16(number << ((16 - bitLength) & 15)) | |||
| } | |||
| func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } | |||
| func newHuffmanEncoder(size int) *huffmanEncoder { | |||
| // Make capacity to next power of two. | |||
| c := uint(bits.Len32(uint32(size - 1))) | |||
| return &huffmanEncoder{codes: make([]hcode, size, 1<<c)} | |||
| } | |||
| // Generates a HuffmanCode corresponding to the fixed literal table | |||
| func generateFixedLiteralEncoding() *huffmanEncoder { | |||
| h := newHuffmanEncoder(literalCount) | |||
| codes := h.codes | |||
| var ch uint16 | |||
| for ch = 0; ch < literalCount; ch++ { | |||
| var bits uint16 | |||
| var size uint8 | |||
| switch { | |||
| case ch < 144: | |||
| // size 8, 000110000 .. 10111111 | |||
| bits = ch + 48 | |||
| size = 8 | |||
| case ch < 256: | |||
| // size 9, 110010000 .. 111111111 | |||
| bits = ch + 400 - 144 | |||
| size = 9 | |||
| case ch < 280: | |||
| // size 7, 0000000 .. 0010111 | |||
| bits = ch - 256 | |||
| size = 7 | |||
| default: | |||
| // size 8, 11000000 .. 11000111 | |||
| bits = ch + 192 - 280 | |||
| size = 8 | |||
| } | |||
| codes[ch] = newhcode(reverseBits(bits, size), size) | |||
| } | |||
| return h | |||
| } | |||
| func generateFixedOffsetEncoding() *huffmanEncoder { | |||
| h := newHuffmanEncoder(30) | |||
| codes := h.codes | |||
| for ch := range codes { | |||
| codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5) | |||
| } | |||
| return h | |||
| } | |||
| var fixedLiteralEncoding = generateFixedLiteralEncoding() | |||
| var fixedOffsetEncoding = generateFixedOffsetEncoding() | |||
| func (h *huffmanEncoder) bitLength(freq []uint16) int { | |||
| var total int | |||
| for i, f := range freq { | |||
| if f != 0 { | |||
| total += int(f) * int(h.codes[i].len()) | |||
| } | |||
| } | |||
| return total | |||
| } | |||
| func (h *huffmanEncoder) bitLengthRaw(b []byte) int { | |||
| var total int | |||
| for _, f := range b { | |||
| total += int(h.codes[f].len()) | |||
| } | |||
| return total | |||
| } | |||
| // canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused. | |||
| func (h *huffmanEncoder) canReuseBits(freq []uint16) int { | |||
| var total int | |||
| for i, f := range freq { | |||
| if f != 0 { | |||
| code := h.codes[i] | |||
| if code.zero() { | |||
| return math.MaxInt32 | |||
| } | |||
| total += int(f) * int(code.len()) | |||
| } | |||
| } | |||
| return total | |||
| } | |||
| // Return the number of literals assigned to each bit size in the Huffman encoding | |||
| // | |||
| // This method is only called when list.length >= 3 | |||
| // The cases of 0, 1, and 2 literals are handled by special case code. | |||
| // | |||
| // list An array of the literals with non-zero frequencies | |||
| // and their associated frequencies. The array is in order of increasing | |||
| // frequency, and has as its last element a special element with frequency | |||
| // MaxInt32 | |||
| // maxBits The maximum number of bits that should be used to encode any literal. | |||
| // Must be less than 16. | |||
| // return An integer array in which array[i] indicates the number of literals | |||
| // that should be encoded in i bits. | |||
| func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { | |||
| if maxBits >= maxBitsLimit { | |||
| panic("flate: maxBits too large") | |||
| } | |||
| n := int32(len(list)) | |||
| list = list[0 : n+1] | |||
| list[n] = maxNode() | |||
| // The tree can't have greater depth than n - 1, no matter what. This | |||
| // saves a little bit of work in some small cases | |||
| if maxBits > n-1 { | |||
| maxBits = n - 1 | |||
| } | |||
| // Create information about each of the levels. | |||
| // A bogus "Level 0" whose sole purpose is so that | |||
| // level1.prev.needed==0. This makes level1.nextPairFreq | |||
| // be a legitimate value that never gets chosen. | |||
| var levels [maxBitsLimit]levelInfo | |||
| // leafCounts[i] counts the number of literals at the left | |||
| // of ancestors of the rightmost node at level i. | |||
| // leafCounts[i][j] is the number of literals at the left | |||
| // of the level j ancestor. | |||
| var leafCounts [maxBitsLimit][maxBitsLimit]int32 | |||
| // Descending to only have 1 bounds check. | |||
| l2f := int32(list[2].freq) | |||
| l1f := int32(list[1].freq) | |||
| l0f := int32(list[0].freq) + int32(list[1].freq) | |||
| for level := int32(1); level <= maxBits; level++ { | |||
| // For every level, the first two items are the first two characters. | |||
| // We initialize the levels as if we had already figured this out. | |||
| levels[level] = levelInfo{ | |||
| level: level, | |||
| lastFreq: l1f, | |||
| nextCharFreq: l2f, | |||
| nextPairFreq: l0f, | |||
| } | |||
| leafCounts[level][level] = 2 | |||
| if level == 1 { | |||
| levels[level].nextPairFreq = math.MaxInt32 | |||
| } | |||
| } | |||
| // We need a total of 2*n - 2 items at top level and have already generated 2. | |||
| levels[maxBits].needed = 2*n - 4 | |||
| level := uint32(maxBits) | |||
| for level < 16 { | |||
| l := &levels[level] | |||
| if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { | |||
| // We've run out of both leafs and pairs. | |||
| // End all calculations for this level. | |||
| // To make sure we never come back to this level or any lower level, | |||
| // set nextPairFreq impossibly large. | |||
| l.needed = 0 | |||
| levels[level+1].nextPairFreq = math.MaxInt32 | |||
| level++ | |||
| continue | |||
| } | |||
| prevFreq := l.lastFreq | |||
| if l.nextCharFreq < l.nextPairFreq { | |||
| // The next item on this row is a leaf node. | |||
| n := leafCounts[level][level] + 1 | |||
| l.lastFreq = l.nextCharFreq | |||
| // Lower leafCounts are the same of the previous node. | |||
| leafCounts[level][level] = n | |||
| e := list[n] | |||
| if e.literal < math.MaxUint16 { | |||
| l.nextCharFreq = int32(e.freq) | |||
| } else { | |||
| l.nextCharFreq = math.MaxInt32 | |||
| } | |||
| } else { | |||
| // The next item on this row is a pair from the previous row. | |||
| // nextPairFreq isn't valid until we generate two | |||
| // more values in the level below | |||
| l.lastFreq = l.nextPairFreq | |||
| // Take leaf counts from the lower level, except counts[level] remains the same. | |||
| if true { | |||
| save := leafCounts[level][level] | |||
| leafCounts[level] = leafCounts[level-1] | |||
| leafCounts[level][level] = save | |||
| } else { | |||
| copy(leafCounts[level][:level], leafCounts[level-1][:level]) | |||
| } | |||
| levels[l.level-1].needed = 2 | |||
| } | |||
| if l.needed--; l.needed == 0 { | |||
| // We've done everything we need to do for this level. | |||
| // Continue calculating one level up. Fill in nextPairFreq | |||
| // of that level with the sum of the two nodes we've just calculated on | |||
| // this level. | |||
| if l.level == maxBits { | |||
| // All done! | |||
| break | |||
| } | |||
| levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq | |||
| level++ | |||
| } else { | |||
| // If we stole from below, move down temporarily to replenish it. | |||
| for levels[level-1].needed > 0 { | |||
| level-- | |||
| } | |||
| } | |||
| } | |||
| // Somethings is wrong if at the end, the top level is null or hasn't used | |||
| // all of the leaves. | |||
| if leafCounts[maxBits][maxBits] != n { | |||
| panic("leafCounts[maxBits][maxBits] != n") | |||
| } | |||
| bitCount := h.bitCount[:maxBits+1] | |||
| bits := 1 | |||
| counts := &leafCounts[maxBits] | |||
| for level := maxBits; level > 0; level-- { | |||
| // chain.leafCount gives the number of literals requiring at least "bits" | |||
| // bits to encode. | |||
| bitCount[bits] = counts[level] - counts[level-1] | |||
| bits++ | |||
| } | |||
| return bitCount | |||
| } | |||
| // Look at the leaves and assign them a bit count and an encoding as specified | |||
| // in RFC 1951 3.2.2 | |||
| func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { | |||
| code := uint16(0) | |||
| for n, bits := range bitCount { | |||
| code <<= 1 | |||
| if n == 0 || bits == 0 { | |||
| continue | |||
| } | |||
| // The literals list[len(list)-bits] .. list[len(list)-bits] | |||
| // are encoded using "bits" bits, and get the values | |||
| // code, code + 1, .... The code values are | |||
| // assigned in literal order (not frequency order). | |||
| chunk := list[len(list)-int(bits):] | |||
| sortByLiteral(chunk) | |||
| for _, node := range chunk { | |||
| h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) | |||
| code++ | |||
| } | |||
| list = list[0 : len(list)-int(bits)] | |||
| } | |||
| } | |||
| // Update this Huffman Code object to be the minimum code for the specified frequency count. | |||
| // | |||
| // freq An array of frequencies, in which frequency[i] gives the frequency of literal i. | |||
| // maxBits The maximum number of bits to use for any literal. | |||
| func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { | |||
| list := h.freqcache[:len(freq)+1] | |||
| codes := h.codes[:len(freq)] | |||
| // Number of non-zero literals | |||
| count := 0 | |||
| // Set list to be the set of all non-zero literals and their frequencies | |||
| for i, f := range freq { | |||
| if f != 0 { | |||
| list[count] = literalNode{uint16(i), f} | |||
| count++ | |||
| } else { | |||
| codes[i] = 0 | |||
| } | |||
| } | |||
| list[count] = literalNode{} | |||
| list = list[:count] | |||
| if count <= 2 { | |||
| // Handle the small cases here, because they are awkward for the general case code. With | |||
| // two or fewer literals, everything has bit length 1. | |||
| for i, node := range list { | |||
| // "list" is in order of increasing literal value. | |||
| h.codes[node.literal].set(uint16(i), 1) | |||
| } | |||
| return | |||
| } | |||
| sortByFreq(list) | |||
| // Get the number of literals for each bit count | |||
| bitCount := h.bitCounts(list, maxBits) | |||
| // And do the assignment | |||
| h.assignEncodingAndSize(bitCount, list) | |||
| } | |||
| // atLeastOne clamps the result between 1 and 15. | |||
| func atLeastOne(v float32) float32 { | |||
| if v < 1 { | |||
| return 1 | |||
| } | |||
| if v > 15 { | |||
| return 15 | |||
| } | |||
| return v | |||
| } | |||
| func histogram(b []byte, h []uint16) { | |||
| if true && len(b) >= 8<<10 { | |||
| // Split for bigger inputs | |||
| histogramSplit(b, h) | |||
| } else { | |||
| h = h[:256] | |||
| for _, t := range b { | |||
| h[t]++ | |||
| } | |||
| } | |||
| } | |||
| func histogramSplit(b []byte, h []uint16) { | |||
| // Tested, and slightly faster than 2-way. | |||
| // Writing to separate arrays and combining is also slightly slower. | |||
| h = h[:256] | |||
| for len(b)&3 != 0 { | |||
| h[b[0]]++ | |||
| b = b[1:] | |||
| } | |||
| n := len(b) / 4 | |||
| x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] | |||
| y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] | |||
| for i, t := range x { | |||
| v0 := &h[t] | |||
| v1 := &h[y[i]] | |||
| v3 := &h[w[i]] | |||
| v2 := &h[z[i]] | |||
| *v0++ | |||
| *v1++ | |||
| *v2++ | |||
| *v3++ | |||
| } | |||
| } | |||
| @@ -0,0 +1,178 @@ | |||
| // Copyright 2009 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package flate | |||
| // Sort sorts data. | |||
| // It makes one call to data.Len to determine n, and O(n*log(n)) calls to | |||
| // data.Less and data.Swap. The sort is not guaranteed to be stable. | |||
| func sortByFreq(data []literalNode) { | |||
| n := len(data) | |||
| quickSortByFreq(data, 0, n, maxDepth(n)) | |||
| } | |||
| func quickSortByFreq(data []literalNode, a, b, maxDepth int) { | |||
| for b-a > 12 { // Use ShellSort for slices <= 12 elements | |||
| if maxDepth == 0 { | |||
| heapSort(data, a, b) | |||
| return | |||
| } | |||
| maxDepth-- | |||
| mlo, mhi := doPivotByFreq(data, a, b) | |||
| // Avoiding recursion on the larger subproblem guarantees | |||
| // a stack depth of at most lg(b-a). | |||
| if mlo-a < b-mhi { | |||
| quickSortByFreq(data, a, mlo, maxDepth) | |||
| a = mhi // i.e., quickSortByFreq(data, mhi, b) | |||
| } else { | |||
| quickSortByFreq(data, mhi, b, maxDepth) | |||
| b = mlo // i.e., quickSortByFreq(data, a, mlo) | |||
| } | |||
| } | |||
| if b-a > 1 { | |||
| // Do ShellSort pass with gap 6 | |||
| // It could be written in this simplified form cause b-a <= 12 | |||
| for i := a + 6; i < b; i++ { | |||
| if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { | |||
| data[i], data[i-6] = data[i-6], data[i] | |||
| } | |||
| } | |||
| insertionSortByFreq(data, a, b) | |||
| } | |||
| } | |||
| // siftDownByFreq implements the heap property on data[lo, hi). | |||
| // first is an offset into the array where the root of the heap lies. | |||
| func siftDownByFreq(data []literalNode, lo, hi, first int) { | |||
| root := lo | |||
| for { | |||
| child := 2*root + 1 | |||
| if child >= hi { | |||
| break | |||
| } | |||
| if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { | |||
| child++ | |||
| } | |||
| if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { | |||
| return | |||
| } | |||
| data[first+root], data[first+child] = data[first+child], data[first+root] | |||
| root = child | |||
| } | |||
| } | |||
| func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { | |||
| m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. | |||
| if hi-lo > 40 { | |||
| // Tukey's ``Ninther,'' median of three medians of three. | |||
| s := (hi - lo) / 8 | |||
| medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) | |||
| medianOfThreeSortByFreq(data, m, m-s, m+s) | |||
| medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) | |||
| } | |||
| medianOfThreeSortByFreq(data, lo, m, hi-1) | |||
| // Invariants are: | |||
| // data[lo] = pivot (set up by ChoosePivot) | |||
| // data[lo < i < a] < pivot | |||
| // data[a <= i < b] <= pivot | |||
| // data[b <= i < c] unexamined | |||
| // data[c <= i < hi-1] > pivot | |||
| // data[hi-1] >= pivot | |||
| pivot := lo | |||
| a, c := lo+1, hi-1 | |||
| for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { | |||
| } | |||
| b := a | |||
| for { | |||
| for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot | |||
| } | |||
| for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot | |||
| } | |||
| if b >= c { | |||
| break | |||
| } | |||
| // data[b] > pivot; data[c-1] <= pivot | |||
| data[b], data[c-1] = data[c-1], data[b] | |||
| b++ | |||
| c-- | |||
| } | |||
| // If hi-c<3 then there are duplicates (by property of median of nine). | |||
| // Let's be a bit more conservative, and set border to 5. | |||
| protect := hi-c < 5 | |||
| if !protect && hi-c < (hi-lo)/4 { | |||
| // Lets test some points for equality to pivot | |||
| dups := 0 | |||
| if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot | |||
| data[c], data[hi-1] = data[hi-1], data[c] | |||
| c++ | |||
| dups++ | |||
| } | |||
| if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot | |||
| b-- | |||
| dups++ | |||
| } | |||
| // m-lo = (hi-lo)/2 > 6 | |||
| // b-lo > (hi-lo)*3/4-1 > 8 | |||
| // ==> m < b ==> data[m] <= pivot | |||
| if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot | |||
| data[m], data[b-1] = data[b-1], data[m] | |||
| b-- | |||
| dups++ | |||
| } | |||
| // if at least 2 points are equal to pivot, assume skewed distribution | |||
| protect = dups > 1 | |||
| } | |||
| if protect { | |||
| // Protect against a lot of duplicates | |||
| // Add invariant: | |||
| // data[a <= i < b] unexamined | |||
| // data[b <= i < c] = pivot | |||
| for { | |||
| for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot | |||
| } | |||
| for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot | |||
| } | |||
| if a >= b { | |||
| break | |||
| } | |||
| // data[a] == pivot; data[b-1] < pivot | |||
| data[a], data[b-1] = data[b-1], data[a] | |||
| a++ | |||
| b-- | |||
| } | |||
| } | |||
| // Swap pivot into middle | |||
| data[pivot], data[b-1] = data[b-1], data[pivot] | |||
| return b - 1, c | |||
| } | |||
| // Insertion sort | |||
| func insertionSortByFreq(data []literalNode, a, b int) { | |||
| for i := a + 1; i < b; i++ { | |||
| for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { | |||
| data[j], data[j-1] = data[j-1], data[j] | |||
| } | |||
| } | |||
| } | |||
| // quickSortByFreq, loosely following Bentley and McIlroy, | |||
| // ``Engineering a Sort Function,'' SP&E November 1993. | |||
| // medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. | |||
| func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { | |||
| // sort 3 elements | |||
| if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { | |||
| data[m1], data[m0] = data[m0], data[m1] | |||
| } | |||
| // data[m0] <= data[m1] | |||
| if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { | |||
| data[m2], data[m1] = data[m1], data[m2] | |||
| // data[m0] <= data[m2] && data[m1] < data[m2] | |||
| if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { | |||
| data[m1], data[m0] = data[m0], data[m1] | |||
| } | |||
| } | |||
| // now data[m0] <= data[m1] <= data[m2] | |||
| } | |||
| @@ -0,0 +1,201 @@ | |||
| // Copyright 2009 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package flate | |||
| // Sort sorts data. | |||
| // It makes one call to data.Len to determine n, and O(n*log(n)) calls to | |||
| // data.Less and data.Swap. The sort is not guaranteed to be stable. | |||
| func sortByLiteral(data []literalNode) { | |||
| n := len(data) | |||
| quickSort(data, 0, n, maxDepth(n)) | |||
| } | |||
| func quickSort(data []literalNode, a, b, maxDepth int) { | |||
| for b-a > 12 { // Use ShellSort for slices <= 12 elements | |||
| if maxDepth == 0 { | |||
| heapSort(data, a, b) | |||
| return | |||
| } | |||
| maxDepth-- | |||
| mlo, mhi := doPivot(data, a, b) | |||
| // Avoiding recursion on the larger subproblem guarantees | |||
| // a stack depth of at most lg(b-a). | |||
| if mlo-a < b-mhi { | |||
| quickSort(data, a, mlo, maxDepth) | |||
| a = mhi // i.e., quickSort(data, mhi, b) | |||
| } else { | |||
| quickSort(data, mhi, b, maxDepth) | |||
| b = mlo // i.e., quickSort(data, a, mlo) | |||
| } | |||
| } | |||
| if b-a > 1 { | |||
| // Do ShellSort pass with gap 6 | |||
| // It could be written in this simplified form cause b-a <= 12 | |||
| for i := a + 6; i < b; i++ { | |||
| if data[i].literal < data[i-6].literal { | |||
| data[i], data[i-6] = data[i-6], data[i] | |||
| } | |||
| } | |||
| insertionSort(data, a, b) | |||
| } | |||
| } | |||
| func heapSort(data []literalNode, a, b int) { | |||
| first := a | |||
| lo := 0 | |||
| hi := b - a | |||
| // Build heap with greatest element at top. | |||
| for i := (hi - 1) / 2; i >= 0; i-- { | |||
| siftDown(data, i, hi, first) | |||
| } | |||
| // Pop elements, largest first, into end of data. | |||
| for i := hi - 1; i >= 0; i-- { | |||
| data[first], data[first+i] = data[first+i], data[first] | |||
| siftDown(data, lo, i, first) | |||
| } | |||
| } | |||
| // siftDown implements the heap property on data[lo, hi). | |||
| // first is an offset into the array where the root of the heap lies. | |||
| func siftDown(data []literalNode, lo, hi, first int) { | |||
| root := lo | |||
| for { | |||
| child := 2*root + 1 | |||
| if child >= hi { | |||
| break | |||
| } | |||
| if child+1 < hi && data[first+child].literal < data[first+child+1].literal { | |||
| child++ | |||
| } | |||
| if data[first+root].literal > data[first+child].literal { | |||
| return | |||
| } | |||
| data[first+root], data[first+child] = data[first+child], data[first+root] | |||
| root = child | |||
| } | |||
| } | |||
| func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { | |||
| m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. | |||
| if hi-lo > 40 { | |||
| // Tukey's ``Ninther,'' median of three medians of three. | |||
| s := (hi - lo) / 8 | |||
| medianOfThree(data, lo, lo+s, lo+2*s) | |||
| medianOfThree(data, m, m-s, m+s) | |||
| medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) | |||
| } | |||
| medianOfThree(data, lo, m, hi-1) | |||
| // Invariants are: | |||
| // data[lo] = pivot (set up by ChoosePivot) | |||
| // data[lo < i < a] < pivot | |||
| // data[a <= i < b] <= pivot | |||
| // data[b <= i < c] unexamined | |||
| // data[c <= i < hi-1] > pivot | |||
| // data[hi-1] >= pivot | |||
| pivot := lo | |||
| a, c := lo+1, hi-1 | |||
| for ; a < c && data[a].literal < data[pivot].literal; a++ { | |||
| } | |||
| b := a | |||
| for { | |||
| for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot | |||
| } | |||
| for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot | |||
| } | |||
| if b >= c { | |||
| break | |||
| } | |||
| // data[b] > pivot; data[c-1] <= pivot | |||
| data[b], data[c-1] = data[c-1], data[b] | |||
| b++ | |||
| c-- | |||
| } | |||
| // If hi-c<3 then there are duplicates (by property of median of nine). | |||
| // Let's be a bit more conservative, and set border to 5. | |||
| protect := hi-c < 5 | |||
| if !protect && hi-c < (hi-lo)/4 { | |||
| // Lets test some points for equality to pivot | |||
| dups := 0 | |||
| if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot | |||
| data[c], data[hi-1] = data[hi-1], data[c] | |||
| c++ | |||
| dups++ | |||
| } | |||
| if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot | |||
| b-- | |||
| dups++ | |||
| } | |||
| // m-lo = (hi-lo)/2 > 6 | |||
| // b-lo > (hi-lo)*3/4-1 > 8 | |||
| // ==> m < b ==> data[m] <= pivot | |||
| if data[m].literal > data[pivot].literal { // data[m] = pivot | |||
| data[m], data[b-1] = data[b-1], data[m] | |||
| b-- | |||
| dups++ | |||
| } | |||
| // if at least 2 points are equal to pivot, assume skewed distribution | |||
| protect = dups > 1 | |||
| } | |||
| if protect { | |||
| // Protect against a lot of duplicates | |||
| // Add invariant: | |||
| // data[a <= i < b] unexamined | |||
| // data[b <= i < c] = pivot | |||
| for { | |||
| for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot | |||
| } | |||
| for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot | |||
| } | |||
| if a >= b { | |||
| break | |||
| } | |||
| // data[a] == pivot; data[b-1] < pivot | |||
| data[a], data[b-1] = data[b-1], data[a] | |||
| a++ | |||
| b-- | |||
| } | |||
| } | |||
| // Swap pivot into middle | |||
| data[pivot], data[b-1] = data[b-1], data[pivot] | |||
| return b - 1, c | |||
| } | |||
| // Insertion sort | |||
| func insertionSort(data []literalNode, a, b int) { | |||
| for i := a + 1; i < b; i++ { | |||
| for j := i; j > a && data[j].literal < data[j-1].literal; j-- { | |||
| data[j], data[j-1] = data[j-1], data[j] | |||
| } | |||
| } | |||
| } | |||
| // maxDepth returns a threshold at which quicksort should switch | |||
| // to heapsort. It returns 2*ceil(lg(n+1)). | |||
| func maxDepth(n int) int { | |||
| var depth int | |||
| for i := n; i > 0; i >>= 1 { | |||
| depth++ | |||
| } | |||
| return depth * 2 | |||
| } | |||
| // medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. | |||
| func medianOfThree(data []literalNode, m1, m0, m2 int) { | |||
| // sort 3 elements | |||
| if data[m1].literal < data[m0].literal { | |||
| data[m1], data[m0] = data[m0], data[m1] | |||
| } | |||
| // data[m0] <= data[m1] | |||
| if data[m2].literal < data[m1].literal { | |||
| data[m2], data[m1] = data[m1], data[m2] | |||
| // data[m0] <= data[m2] && data[m1] < data[m2] | |||
| if data[m1].literal < data[m0].literal { | |||
| data[m1], data[m0] = data[m0], data[m1] | |||
| } | |||
| } | |||
| // now data[m0] <= data[m1] <= data[m2] | |||
| } | |||
| @@ -0,0 +1,793 @@ | |||
| // Copyright 2009 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Package flate implements the DEFLATE compressed data format, described in | |||
| // RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file | |||
| // formats. | |||
| package flate | |||
| import ( | |||
| "bufio" | |||
| "compress/flate" | |||
| "fmt" | |||
| "io" | |||
| "math/bits" | |||
| "sync" | |||
| ) | |||
| const ( | |||
| maxCodeLen = 16 // max length of Huffman code | |||
| maxCodeLenMask = 15 // mask for max length of Huffman code | |||
| // The next three numbers come from the RFC section 3.2.7, with the | |||
| // additional proviso in section 3.2.5 which implies that distance codes | |||
| // 30 and 31 should never occur in compressed data. | |||
| maxNumLit = 286 | |||
| maxNumDist = 30 | |||
| numCodes = 19 // number of codes in Huffman meta-code | |||
| debugDecode = false | |||
| ) | |||
| // Value of length - 3 and extra bits. | |||
| type lengthExtra struct { | |||
| length, extra uint8 | |||
| } | |||
| var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} | |||
| var bitMask32 = [32]uint32{ | |||
| 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, | |||
| 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, | |||
| 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, | |||
| 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, | |||
| } // up to 32 bits | |||
| // Initialize the fixedHuffmanDecoder only once upon first use. | |||
| var fixedOnce sync.Once | |||
| var fixedHuffmanDecoder huffmanDecoder | |||
| // A CorruptInputError reports the presence of corrupt input at a given offset. | |||
| type CorruptInputError = flate.CorruptInputError | |||
| // An InternalError reports an error in the flate code itself. | |||
| type InternalError string | |||
| func (e InternalError) Error() string { return "flate: internal error: " + string(e) } | |||
| // A ReadError reports an error encountered while reading input. | |||
| // | |||
| // Deprecated: No longer returned. | |||
| type ReadError = flate.ReadError | |||
| // A WriteError reports an error encountered while writing output. | |||
| // | |||
| // Deprecated: No longer returned. | |||
| type WriteError = flate.WriteError | |||
| // Resetter resets a ReadCloser returned by NewReader or NewReaderDict to | |||
| // to switch to a new underlying Reader. This permits reusing a ReadCloser | |||
| // instead of allocating a new one. | |||
| type Resetter interface { | |||
| // Reset discards any buffered data and resets the Resetter as if it was | |||
| // newly initialized with the given reader. | |||
| Reset(r io.Reader, dict []byte) error | |||
| } | |||
| // The data structure for decoding Huffman tables is based on that of | |||
| // zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), | |||
| // For codes smaller than the table width, there are multiple entries | |||
| // (each combination of trailing bits has the same value). For codes | |||
| // larger than the table width, the table contains a link to an overflow | |||
| // table. The width of each entry in the link table is the maximum code | |||
| // size minus the chunk width. | |||
| // | |||
| // Note that you can do a lookup in the table even without all bits | |||
| // filled. Since the extra bits are zero, and the DEFLATE Huffman codes | |||
| // have the property that shorter codes come before longer ones, the | |||
| // bit length estimate in the result is a lower bound on the actual | |||
| // number of bits. | |||
| // | |||
| // See the following: | |||
| // http://www.gzip.org/algorithm.txt | |||
| // chunk & 15 is number of bits | |||
| // chunk >> 4 is value, including table link | |||
| const ( | |||
| huffmanChunkBits = 9 | |||
| huffmanNumChunks = 1 << huffmanChunkBits | |||
| huffmanCountMask = 15 | |||
| huffmanValueShift = 4 | |||
| ) | |||
| type huffmanDecoder struct { | |||
| maxRead int // the maximum number of bits we can read and not overread | |||
| chunks *[huffmanNumChunks]uint16 // chunks as described above | |||
| links [][]uint16 // overflow links | |||
| linkMask uint32 // mask the width of the link table | |||
| } | |||
| // Initialize Huffman decoding tables from array of code lengths. | |||
| // Following this function, h is guaranteed to be initialized into a complete | |||
| // tree (i.e., neither over-subscribed nor under-subscribed). The exception is a | |||
| // degenerate case where the tree has only a single symbol with length 1. Empty | |||
| // trees are permitted. | |||
| func (h *huffmanDecoder) init(lengths []int) bool { | |||
| // Sanity enables additional runtime tests during Huffman | |||
| // table construction. It's intended to be used during | |||
| // development to supplement the currently ad-hoc unit tests. | |||
| const sanity = false | |||
| if h.chunks == nil { | |||
| h.chunks = &[huffmanNumChunks]uint16{} | |||
| } | |||
| if h.maxRead != 0 { | |||
| *h = huffmanDecoder{chunks: h.chunks, links: h.links} | |||
| } | |||
| // Count number of codes of each length, | |||
| // compute maxRead and max length. | |||
| var count [maxCodeLen]int | |||
| var min, max int | |||
| for _, n := range lengths { | |||
| if n == 0 { | |||
| continue | |||
| } | |||
| if min == 0 || n < min { | |||
| min = n | |||
| } | |||
| if n > max { | |||
| max = n | |||
| } | |||
| count[n&maxCodeLenMask]++ | |||
| } | |||
| // Empty tree. The decompressor.huffSym function will fail later if the tree | |||
| // is used. Technically, an empty tree is only valid for the HDIST tree and | |||
| // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree | |||
| // is guaranteed to fail since it will attempt to use the tree to decode the | |||
| // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is | |||
| // guaranteed to fail later since the compressed data section must be | |||
| // composed of at least one symbol (the end-of-block marker). | |||
| if max == 0 { | |||
| return true | |||
| } | |||
| code := 0 | |||
| var nextcode [maxCodeLen]int | |||
| for i := min; i <= max; i++ { | |||
| code <<= 1 | |||
| nextcode[i&maxCodeLenMask] = code | |||
| code += count[i&maxCodeLenMask] | |||
| } | |||
| // Check that the coding is complete (i.e., that we've | |||
| // assigned all 2-to-the-max possible bit sequences). | |||
| // Exception: To be compatible with zlib, we also need to | |||
| // accept degenerate single-code codings. See also | |||
| // TestDegenerateHuffmanCoding. | |||
| if code != 1<<uint(max) && !(code == 1 && max == 1) { | |||
| if debugDecode { | |||
| fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)") | |||
| } | |||
| return false | |||
| } | |||
| h.maxRead = min | |||
| chunks := h.chunks[:] | |||
| for i := range chunks { | |||
| chunks[i] = 0 | |||
| } | |||
| if max > huffmanChunkBits { | |||
| numLinks := 1 << (uint(max) - huffmanChunkBits) | |||
| h.linkMask = uint32(numLinks - 1) | |||
| // create link tables | |||
| link := nextcode[huffmanChunkBits+1] >> 1 | |||
| if cap(h.links) < huffmanNumChunks-link { | |||
| h.links = make([][]uint16, huffmanNumChunks-link) | |||
| } else { | |||
| h.links = h.links[:huffmanNumChunks-link] | |||
| } | |||
| for j := uint(link); j < huffmanNumChunks; j++ { | |||
| reverse := int(bits.Reverse16(uint16(j))) | |||
| reverse >>= uint(16 - huffmanChunkBits) | |||
| off := j - uint(link) | |||
| if sanity && h.chunks[reverse] != 0 { | |||
| panic("impossible: overwriting existing chunk") | |||
| } | |||
| h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1)) | |||
| if cap(h.links[off]) < numLinks { | |||
| h.links[off] = make([]uint16, numLinks) | |||
| } else { | |||
| links := h.links[off][:0] | |||
| h.links[off] = links[:numLinks] | |||
| } | |||
| } | |||
| } else { | |||
| h.links = h.links[:0] | |||
| } | |||
| for i, n := range lengths { | |||
| if n == 0 { | |||
| continue | |||
| } | |||
| code := nextcode[n] | |||
| nextcode[n]++ | |||
| chunk := uint16(i<<huffmanValueShift | n) | |||
| reverse := int(bits.Reverse16(uint16(code))) | |||
| reverse >>= uint(16 - n) | |||
| if n <= huffmanChunkBits { | |||
| for off := reverse; off < len(h.chunks); off += 1 << uint(n) { | |||
| // We should never need to overwrite | |||
| // an existing chunk. Also, 0 is | |||
| // never a valid chunk, because the | |||
| // lower 4 "count" bits should be | |||
| // between 1 and 15. | |||
| if sanity && h.chunks[off] != 0 { | |||
| panic("impossible: overwriting existing chunk") | |||
| } | |||
| h.chunks[off] = chunk | |||
| } | |||
| } else { | |||
| j := reverse & (huffmanNumChunks - 1) | |||
| if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { | |||
| // Longer codes should have been | |||
| // associated with a link table above. | |||
| panic("impossible: not an indirect chunk") | |||
| } | |||
| value := h.chunks[j] >> huffmanValueShift | |||
| linktab := h.links[value] | |||
| reverse >>= huffmanChunkBits | |||
| for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { | |||
| if sanity && linktab[off] != 0 { | |||
| panic("impossible: overwriting existing chunk") | |||
| } | |||
| linktab[off] = chunk | |||
| } | |||
| } | |||
| } | |||
| if sanity { | |||
| // Above we've sanity checked that we never overwrote | |||
| // an existing entry. Here we additionally check that | |||
| // we filled the tables completely. | |||
| for i, chunk := range h.chunks { | |||
| if chunk == 0 { | |||
| // As an exception, in the degenerate | |||
| // single-code case, we allow odd | |||
| // chunks to be missing. | |||
| if code == 1 && i%2 == 1 { | |||
| continue | |||
| } | |||
| panic("impossible: missing chunk") | |||
| } | |||
| } | |||
| for _, linktab := range h.links { | |||
| for _, chunk := range linktab { | |||
| if chunk == 0 { | |||
| panic("impossible: missing chunk") | |||
| } | |||
| } | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| // The actual read interface needed by NewReader. | |||
| // If the passed in io.Reader does not also have ReadByte, | |||
| // the NewReader will introduce its own buffering. | |||
| type Reader interface { | |||
| io.Reader | |||
| io.ByteReader | |||
| } | |||
| // Decompress state. | |||
| type decompressor struct { | |||
| // Input source. | |||
| r Reader | |||
| roffset int64 | |||
| // Huffman decoders for literal/length, distance. | |||
| h1, h2 huffmanDecoder | |||
| // Length arrays used to define Huffman codes. | |||
| bits *[maxNumLit + maxNumDist]int | |||
| codebits *[numCodes]int | |||
| // Output history, buffer. | |||
| dict dictDecoder | |||
| // Next step in the decompression, | |||
| // and decompression state. | |||
| step func(*decompressor) | |||
| stepState int | |||
| err error | |||
| toRead []byte | |||
| hl, hd *huffmanDecoder | |||
| copyLen int | |||
| copyDist int | |||
| // Temporary buffer (avoids repeated allocation). | |||
| buf [4]byte | |||
| // Input bits, in top of b. | |||
| b uint32 | |||
| nb uint | |||
| final bool | |||
| } | |||
| func (f *decompressor) nextBlock() { | |||
| for f.nb < 1+2 { | |||
| if f.err = f.moreBits(); f.err != nil { | |||
| return | |||
| } | |||
| } | |||
| f.final = f.b&1 == 1 | |||
| f.b >>= 1 | |||
| typ := f.b & 3 | |||
| f.b >>= 2 | |||
| f.nb -= 1 + 2 | |||
| switch typ { | |||
| case 0: | |||
| f.dataBlock() | |||
| if debugDecode { | |||
| fmt.Println("stored block") | |||
| } | |||
| case 1: | |||
| // compressed, fixed Huffman tables | |||
| f.hl = &fixedHuffmanDecoder | |||
| f.hd = nil | |||
| f.huffmanBlockDecoder()() | |||
| if debugDecode { | |||
| fmt.Println("predefinied huffman block") | |||
| } | |||
| case 2: | |||
| // compressed, dynamic Huffman tables | |||
| if f.err = f.readHuffman(); f.err != nil { | |||
| break | |||
| } | |||
| f.hl = &f.h1 | |||
| f.hd = &f.h2 | |||
| f.huffmanBlockDecoder()() | |||
| if debugDecode { | |||
| fmt.Println("dynamic huffman block") | |||
| } | |||
| default: | |||
| // 3 is reserved. | |||
| if debugDecode { | |||
| fmt.Println("reserved data block encountered") | |||
| } | |||
| f.err = CorruptInputError(f.roffset) | |||
| } | |||
| } | |||
| func (f *decompressor) Read(b []byte) (int, error) { | |||
| for { | |||
| if len(f.toRead) > 0 { | |||
| n := copy(b, f.toRead) | |||
| f.toRead = f.toRead[n:] | |||
| if len(f.toRead) == 0 { | |||
| return n, f.err | |||
| } | |||
| return n, nil | |||
| } | |||
| if f.err != nil { | |||
| return 0, f.err | |||
| } | |||
| f.step(f) | |||
| if f.err != nil && len(f.toRead) == 0 { | |||
| f.toRead = f.dict.readFlush() // Flush what's left in case of error | |||
| } | |||
| } | |||
| } | |||
| // Support the io.WriteTo interface for io.Copy and friends. | |||
| func (f *decompressor) WriteTo(w io.Writer) (int64, error) { | |||
| total := int64(0) | |||
| flushed := false | |||
| for { | |||
| if len(f.toRead) > 0 { | |||
| n, err := w.Write(f.toRead) | |||
| total += int64(n) | |||
| if err != nil { | |||
| f.err = err | |||
| return total, err | |||
| } | |||
| if n != len(f.toRead) { | |||
| return total, io.ErrShortWrite | |||
| } | |||
| f.toRead = f.toRead[:0] | |||
| } | |||
| if f.err != nil && flushed { | |||
| if f.err == io.EOF { | |||
| return total, nil | |||
| } | |||
| return total, f.err | |||
| } | |||
| if f.err == nil { | |||
| f.step(f) | |||
| } | |||
| if len(f.toRead) == 0 && f.err != nil && !flushed { | |||
| f.toRead = f.dict.readFlush() // Flush what's left in case of error | |||
| flushed = true | |||
| } | |||
| } | |||
| } | |||
| func (f *decompressor) Close() error { | |||
| if f.err == io.EOF { | |||
| return nil | |||
| } | |||
| return f.err | |||
| } | |||
| // RFC 1951 section 3.2.7. | |||
| // Compression with dynamic Huffman codes | |||
| var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} | |||
| func (f *decompressor) readHuffman() error { | |||
| // HLIT[5], HDIST[5], HCLEN[4]. | |||
| for f.nb < 5+5+4 { | |||
| if err := f.moreBits(); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| nlit := int(f.b&0x1F) + 257 | |||
| if nlit > maxNumLit { | |||
| if debugDecode { | |||
| fmt.Println("nlit > maxNumLit", nlit) | |||
| } | |||
| return CorruptInputError(f.roffset) | |||
| } | |||
| f.b >>= 5 | |||
| ndist := int(f.b&0x1F) + 1 | |||
| if ndist > maxNumDist { | |||
| if debugDecode { | |||
| fmt.Println("ndist > maxNumDist", ndist) | |||
| } | |||
| return CorruptInputError(f.roffset) | |||
| } | |||
| f.b >>= 5 | |||
| nclen := int(f.b&0xF) + 4 | |||
| // numCodes is 19, so nclen is always valid. | |||
| f.b >>= 4 | |||
| f.nb -= 5 + 5 + 4 | |||
| // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. | |||
| for i := 0; i < nclen; i++ { | |||
| for f.nb < 3 { | |||
| if err := f.moreBits(); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| f.codebits[codeOrder[i]] = int(f.b & 0x7) | |||
| f.b >>= 3 | |||
| f.nb -= 3 | |||
| } | |||
| for i := nclen; i < len(codeOrder); i++ { | |||
| f.codebits[codeOrder[i]] = 0 | |||
| } | |||
| if !f.h1.init(f.codebits[0:]) { | |||
| if debugDecode { | |||
| fmt.Println("init codebits failed") | |||
| } | |||
| return CorruptInputError(f.roffset) | |||
| } | |||
| // HLIT + 257 code lengths, HDIST + 1 code lengths, | |||
| // using the code length Huffman code. | |||
| for i, n := 0, nlit+ndist; i < n; { | |||
| x, err := f.huffSym(&f.h1) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if x < 16 { | |||
| // Actual length. | |||
| f.bits[i] = x | |||
| i++ | |||
| continue | |||
| } | |||
| // Repeat previous length or zero. | |||
| var rep int | |||
| var nb uint | |||
| var b int | |||
| switch x { | |||
| default: | |||
| return InternalError("unexpected length code") | |||
| case 16: | |||
| rep = 3 | |||
| nb = 2 | |||
| if i == 0 { | |||
| if debugDecode { | |||
| fmt.Println("i==0") | |||
| } | |||
| return CorruptInputError(f.roffset) | |||
| } | |||
| b = f.bits[i-1] | |||
| case 17: | |||
| rep = 3 | |||
| nb = 3 | |||
| b = 0 | |||
| case 18: | |||
| rep = 11 | |||
| nb = 7 | |||
| b = 0 | |||
| } | |||
| for f.nb < nb { | |||
| if err := f.moreBits(); err != nil { | |||
| if debugDecode { | |||
| fmt.Println("morebits:", err) | |||
| } | |||
| return err | |||
| } | |||
| } | |||
| rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) | |||
| f.b >>= nb & regSizeMaskUint32 | |||
| f.nb -= nb | |||
| if i+rep > n { | |||
| if debugDecode { | |||
| fmt.Println("i+rep > n", i, rep, n) | |||
| } | |||
| return CorruptInputError(f.roffset) | |||
| } | |||
| for j := 0; j < rep; j++ { | |||
| f.bits[i] = b | |||
| i++ | |||
| } | |||
| } | |||
| if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { | |||
| if debugDecode { | |||
| fmt.Println("init2 failed") | |||
| } | |||
| return CorruptInputError(f.roffset) | |||
| } | |||
| // As an optimization, we can initialize the maxRead bits to read at a time | |||
| // for the HLIT tree to the length of the EOB marker since we know that | |||
| // every block must terminate with one. This preserves the property that | |||
| // we never read any extra bytes after the end of the DEFLATE stream. | |||
| if f.h1.maxRead < f.bits[endBlockMarker] { | |||
| f.h1.maxRead = f.bits[endBlockMarker] | |||
| } | |||
| if !f.final { | |||
| // If not the final block, the smallest block possible is | |||
| // a predefined table, BTYPE=01, with a single EOB marker. | |||
| // This will take up 3 + 7 bits. | |||
| f.h1.maxRead += 10 | |||
| } | |||
| return nil | |||
| } | |||
| // Copy a single uncompressed data block from input to output. | |||
| func (f *decompressor) dataBlock() { | |||
| // Uncompressed. | |||
| // Discard current half-byte. | |||
| left := (f.nb) & 7 | |||
| f.nb -= left | |||
| f.b >>= left | |||
| offBytes := f.nb >> 3 | |||
| // Unfilled values will be overwritten. | |||
| f.buf[0] = uint8(f.b) | |||
| f.buf[1] = uint8(f.b >> 8) | |||
| f.buf[2] = uint8(f.b >> 16) | |||
| f.buf[3] = uint8(f.b >> 24) | |||
| f.roffset += int64(offBytes) | |||
| f.nb, f.b = 0, 0 | |||
| // Length then ones-complement of length. | |||
| nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) | |||
| f.roffset += int64(nr) | |||
| if err != nil { | |||
| f.err = noEOF(err) | |||
| return | |||
| } | |||
| n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 | |||
| nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 | |||
| if nn != ^n { | |||
| if debugDecode { | |||
| ncomp := ^n | |||
| fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) | |||
| } | |||
| f.err = CorruptInputError(f.roffset) | |||
| return | |||
| } | |||
| if n == 0 { | |||
| f.toRead = f.dict.readFlush() | |||
| f.finishBlock() | |||
| return | |||
| } | |||
| f.copyLen = int(n) | |||
| f.copyData() | |||
| } | |||
| // copyData copies f.copyLen bytes from the underlying reader into f.hist. | |||
| // It pauses for reads when f.hist is full. | |||
| func (f *decompressor) copyData() { | |||
| buf := f.dict.writeSlice() | |||
| if len(buf) > f.copyLen { | |||
| buf = buf[:f.copyLen] | |||
| } | |||
| cnt, err := io.ReadFull(f.r, buf) | |||
| f.roffset += int64(cnt) | |||
| f.copyLen -= cnt | |||
| f.dict.writeMark(cnt) | |||
| if err != nil { | |||
| f.err = noEOF(err) | |||
| return | |||
| } | |||
| if f.dict.availWrite() == 0 || f.copyLen > 0 { | |||
| f.toRead = f.dict.readFlush() | |||
| f.step = (*decompressor).copyData | |||
| return | |||
| } | |||
| f.finishBlock() | |||
| } | |||
| func (f *decompressor) finishBlock() { | |||
| if f.final { | |||
| if f.dict.availRead() > 0 { | |||
| f.toRead = f.dict.readFlush() | |||
| } | |||
| f.err = io.EOF | |||
| } | |||
| f.step = (*decompressor).nextBlock | |||
| } | |||
| // noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. | |||
| func noEOF(e error) error { | |||
| if e == io.EOF { | |||
| return io.ErrUnexpectedEOF | |||
| } | |||
| return e | |||
| } | |||
| func (f *decompressor) moreBits() error { | |||
| c, err := f.r.ReadByte() | |||
| if err != nil { | |||
| return noEOF(err) | |||
| } | |||
| f.roffset++ | |||
| f.b |= uint32(c) << (f.nb & regSizeMaskUint32) | |||
| f.nb += 8 | |||
| return nil | |||
| } | |||
| // Read the next Huffman-encoded symbol from f according to h. | |||
| func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { | |||
| // Since a huffmanDecoder can be empty or be composed of a degenerate tree | |||
| // with single element, huffSym must error on these two edge cases. In both | |||
| // cases, the chunks slice will be 0 for the invalid sequence, leading it | |||
| // satisfy the n == 0 check below. | |||
| n := uint(h.maxRead) | |||
| // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, | |||
| // but is smart enough to keep local variables in registers, so use nb and b, | |||
| // inline call to moreBits and reassign b,nb back to f on return. | |||
| nb, b := f.nb, f.b | |||
| for { | |||
| for nb < n { | |||
| c, err := f.r.ReadByte() | |||
| if err != nil { | |||
| f.b = b | |||
| f.nb = nb | |||
| return 0, noEOF(err) | |||
| } | |||
| f.roffset++ | |||
| b |= uint32(c) << (nb & regSizeMaskUint32) | |||
| nb += 8 | |||
| } | |||
| chunk := h.chunks[b&(huffmanNumChunks-1)] | |||
| n = uint(chunk & huffmanCountMask) | |||
| if n > huffmanChunkBits { | |||
| chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] | |||
| n = uint(chunk & huffmanCountMask) | |||
| } | |||
| if n <= nb { | |||
| if n == 0 { | |||
| f.b = b | |||
| f.nb = nb | |||
| if debugDecode { | |||
| fmt.Println("huffsym: n==0") | |||
| } | |||
| f.err = CorruptInputError(f.roffset) | |||
| return 0, f.err | |||
| } | |||
| f.b = b >> (n & regSizeMaskUint32) | |||
| f.nb = nb - n | |||
| return int(chunk >> huffmanValueShift), nil | |||
| } | |||
| } | |||
| } | |||
| func makeReader(r io.Reader) Reader { | |||
| if rr, ok := r.(Reader); ok { | |||
| return rr | |||
| } | |||
| return bufio.NewReader(r) | |||
| } | |||
| func fixedHuffmanDecoderInit() { | |||
| fixedOnce.Do(func() { | |||
| // These come from the RFC section 3.2.6. | |||
| var bits [288]int | |||
| for i := 0; i < 144; i++ { | |||
| bits[i] = 8 | |||
| } | |||
| for i := 144; i < 256; i++ { | |||
| bits[i] = 9 | |||
| } | |||
| for i := 256; i < 280; i++ { | |||
| bits[i] = 7 | |||
| } | |||
| for i := 280; i < 288; i++ { | |||
| bits[i] = 8 | |||
| } | |||
| fixedHuffmanDecoder.init(bits[:]) | |||
| }) | |||
| } | |||
| func (f *decompressor) Reset(r io.Reader, dict []byte) error { | |||
| *f = decompressor{ | |||
| r: makeReader(r), | |||
| bits: f.bits, | |||
| codebits: f.codebits, | |||
| h1: f.h1, | |||
| h2: f.h2, | |||
| dict: f.dict, | |||
| step: (*decompressor).nextBlock, | |||
| } | |||
| f.dict.init(maxMatchOffset, dict) | |||
| return nil | |||
| } | |||
| // NewReader returns a new ReadCloser that can be used | |||
| // to read the uncompressed version of r. | |||
| // If r does not also implement io.ByteReader, | |||
| // the decompressor may read more data than necessary from r. | |||
| // It is the caller's responsibility to call Close on the ReadCloser | |||
| // when finished reading. | |||
| // | |||
| // The ReadCloser returned by NewReader also implements Resetter. | |||
| func NewReader(r io.Reader) io.ReadCloser { | |||
| fixedHuffmanDecoderInit() | |||
| var f decompressor | |||
| f.r = makeReader(r) | |||
| f.bits = new([maxNumLit + maxNumDist]int) | |||
| f.codebits = new([numCodes]int) | |||
| f.step = (*decompressor).nextBlock | |||
| f.dict.init(maxMatchOffset, nil) | |||
| return &f | |||
| } | |||
| // NewReaderDict is like NewReader but initializes the reader | |||
| // with a preset dictionary. The returned Reader behaves as if | |||
| // the uncompressed data stream started with the given dictionary, | |||
| // which has already been read. NewReaderDict is typically used | |||
| // to read data compressed by NewWriterDict. | |||
| // | |||
| // The ReadCloser returned by NewReader also implements Resetter. | |||
| func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { | |||
| fixedHuffmanDecoderInit() | |||
| var f decompressor | |||
| f.r = makeReader(r) | |||
| f.bits = new([maxNumLit + maxNumDist]int) | |||
| f.codebits = new([numCodes]int) | |||
| f.step = (*decompressor).nextBlock | |||
| f.dict.init(maxMatchOffset, dict) | |||
| return &f | |||
| } | |||
| @@ -0,0 +1,240 @@ | |||
| package flate | |||
| import ( | |||
| "encoding/binary" | |||
| "fmt" | |||
| "math/bits" | |||
| ) | |||
| // fastGen maintains the table for matches, | |||
| // and the previous byte block for level 2. | |||
| // This is the generic implementation. | |||
| type fastEncL1 struct { | |||
| fastGen | |||
| table [tableSize]tableEntry | |||
| } | |||
| // EncodeL1 uses a similar algorithm to level 1 | |||
| func (e *fastEncL1) Encode(dst *tokens, src []byte) { | |||
| const ( | |||
| inputMargin = 12 - 1 | |||
| minNonLiteralBlockSize = 1 + 1 + inputMargin | |||
| ) | |||
| if debugDeflate && e.cur < 0 { | |||
| panic(fmt.Sprint("e.cur < 0: ", e.cur)) | |||
| } | |||
| // Protect against e.cur wraparound. | |||
| for e.cur >= bufferReset { | |||
| if len(e.hist) == 0 { | |||
| for i := range e.table[:] { | |||
| e.table[i] = tableEntry{} | |||
| } | |||
| e.cur = maxMatchOffset | |||
| break | |||
| } | |||
| // Shift down everything in the table that isn't already too far away. | |||
| minOff := e.cur + int32(len(e.hist)) - maxMatchOffset | |||
| for i := range e.table[:] { | |||
| v := e.table[i].offset | |||
| if v <= minOff { | |||
| v = 0 | |||
| } else { | |||
| v = v - e.cur + maxMatchOffset | |||
| } | |||
| e.table[i].offset = v | |||
| } | |||
| e.cur = maxMatchOffset | |||
| } | |||
| s := e.addBlock(src) | |||
| // This check isn't in the Snappy implementation, but there, the caller | |||
| // instead of the callee handles this case. | |||
| if len(src) < minNonLiteralBlockSize { | |||
| // We do not fill the token table. | |||
| // This will be picked up by caller. | |||
| dst.n = uint16(len(src)) | |||
| return | |||
| } | |||
| // Override src | |||
| src = e.hist | |||
| nextEmit := s | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := int32(len(src) - inputMargin) | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| cv := load3232(src, s) | |||
| for { | |||
| const skipLog = 5 | |||
| const doEvery = 2 | |||
| nextS := s | |||
| var candidate tableEntry | |||
| for { | |||
| nextHash := hash(cv) | |||
| candidate = e.table[nextHash] | |||
| nextS = s + doEvery + (s-nextEmit)>>skipLog | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| now := load6432(src, nextS) | |||
| e.table[nextHash] = tableEntry{offset: s + e.cur} | |||
| nextHash = hash(uint32(now)) | |||
| offset := s - (candidate.offset - e.cur) | |||
| if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { | |||
| e.table[nextHash] = tableEntry{offset: nextS + e.cur} | |||
| break | |||
| } | |||
| // Do one right away... | |||
| cv = uint32(now) | |||
| s = nextS | |||
| nextS++ | |||
| candidate = e.table[nextHash] | |||
| now >>= 8 | |||
| e.table[nextHash] = tableEntry{offset: s + e.cur} | |||
| offset = s - (candidate.offset - e.cur) | |||
| if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { | |||
| e.table[nextHash] = tableEntry{offset: nextS + e.cur} | |||
| break | |||
| } | |||
| cv = uint32(now) | |||
| s = nextS | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| for { | |||
| // Invariant: we have a 4-byte match at s, and no need to emit any | |||
| // literal bytes prior to s. | |||
| // Extend the 4-byte match as long as possible. | |||
| t := candidate.offset - e.cur | |||
| var l = int32(4) | |||
| if false { | |||
| l = e.matchlenLong(s+4, t+4, src) + 4 | |||
| } else { | |||
| // inlined: | |||
| a := src[s+4:] | |||
| b := src[t+4:] | |||
| for len(a) >= 8 { | |||
| if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { | |||
| l += int32(bits.TrailingZeros64(diff) >> 3) | |||
| break | |||
| } | |||
| l += 8 | |||
| a = a[8:] | |||
| b = b[8:] | |||
| } | |||
| if len(a) < 8 { | |||
| b = b[:len(a)] | |||
| for i := range a { | |||
| if a[i] != b[i] { | |||
| break | |||
| } | |||
| l++ | |||
| } | |||
| } | |||
| } | |||
| // Extend backwards | |||
| for t > 0 && s > nextEmit && src[t-1] == src[s-1] { | |||
| s-- | |||
| t-- | |||
| l++ | |||
| } | |||
| if nextEmit < s { | |||
| if false { | |||
| emitLiteral(dst, src[nextEmit:s]) | |||
| } else { | |||
| for _, v := range src[nextEmit:s] { | |||
| dst.tokens[dst.n] = token(v) | |||
| dst.litHist[v]++ | |||
| dst.n++ | |||
| } | |||
| } | |||
| } | |||
| // Save the match found | |||
| if false { | |||
| dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) | |||
| } else { | |||
| // Inlined... | |||
| xoffset := uint32(s - t - baseMatchOffset) | |||
| xlength := l | |||
| oc := offsetCode(xoffset) | |||
| xoffset |= oc << 16 | |||
| for xlength > 0 { | |||
| xl := xlength | |||
| if xl > 258 { | |||
| if xl > 258+baseMatchLength { | |||
| xl = 258 | |||
| } else { | |||
| xl = 258 - baseMatchLength | |||
| } | |||
| } | |||
| xlength -= xl | |||
| xl -= baseMatchLength | |||
| dst.extraHist[lengthCodes1[uint8(xl)]]++ | |||
| dst.offHist[oc]++ | |||
| dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset) | |||
| dst.n++ | |||
| } | |||
| } | |||
| s += l | |||
| nextEmit = s | |||
| if nextS >= s { | |||
| s = nextS + 1 | |||
| } | |||
| if s >= sLimit { | |||
| // Index first pair after match end. | |||
| if int(s+l+4) < len(src) { | |||
| cv := load3232(src, s) | |||
| e.table[hash(cv)] = tableEntry{offset: s + e.cur} | |||
| } | |||
| goto emitRemainder | |||
| } | |||
| // We could immediately start working at s now, but to improve | |||
| // compression we first update the hash table at s-2 and at s. If | |||
| // another emitCopy is not our next move, also calculate nextHash | |||
| // at s+1. At least on GOARCH=amd64, these three hash calculations | |||
| // are faster as one load64 call (with some shifts) instead of | |||
| // three load32 calls. | |||
| x := load6432(src, s-2) | |||
| o := e.cur + s - 2 | |||
| prevHash := hash(uint32(x)) | |||
| e.table[prevHash] = tableEntry{offset: o} | |||
| x >>= 16 | |||
| currHash := hash(uint32(x)) | |||
| candidate = e.table[currHash] | |||
| e.table[currHash] = tableEntry{offset: o + 2} | |||
| offset := s - (candidate.offset - e.cur) | |||
| if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { | |||
| cv = uint32(x >> 8) | |||
| s++ | |||
| break | |||
| } | |||
| } | |||
| } | |||
| emitRemainder: | |||
| if int(nextEmit) < len(src) { | |||
| // If nothing was added, don't encode literals. | |||
| if dst.n == 0 { | |||
| return | |||
| } | |||
| emitLiteral(dst, src[nextEmit:]) | |||
| } | |||
| } | |||
| @@ -0,0 +1,213 @@ | |||
| package flate | |||
| import "fmt" | |||
| // fastGen maintains the table for matches, | |||
| // and the previous byte block for level 2. | |||
| // This is the generic implementation. | |||
| type fastEncL2 struct { | |||
| fastGen | |||
| table [bTableSize]tableEntry | |||
| } | |||
| // EncodeL2 uses a similar algorithm to level 1, but is capable | |||
| // of matching across blocks giving better compression at a small slowdown. | |||
| func (e *fastEncL2) Encode(dst *tokens, src []byte) { | |||
| const ( | |||
| inputMargin = 12 - 1 | |||
| minNonLiteralBlockSize = 1 + 1 + inputMargin | |||
| ) | |||
| if debugDeflate && e.cur < 0 { | |||
| panic(fmt.Sprint("e.cur < 0: ", e.cur)) | |||
| } | |||
| // Protect against e.cur wraparound. | |||
| for e.cur >= bufferReset { | |||
| if len(e.hist) == 0 { | |||
| for i := range e.table[:] { | |||
| e.table[i] = tableEntry{} | |||
| } | |||
| e.cur = maxMatchOffset | |||
| break | |||
| } | |||
| // Shift down everything in the table that isn't already too far away. | |||
| minOff := e.cur + int32(len(e.hist)) - maxMatchOffset | |||
| for i := range e.table[:] { | |||
| v := e.table[i].offset | |||
| if v <= minOff { | |||
| v = 0 | |||
| } else { | |||
| v = v - e.cur + maxMatchOffset | |||
| } | |||
| e.table[i].offset = v | |||
| } | |||
| e.cur = maxMatchOffset | |||
| } | |||
| s := e.addBlock(src) | |||
| // This check isn't in the Snappy implementation, but there, the caller | |||
| // instead of the callee handles this case. | |||
| if len(src) < minNonLiteralBlockSize { | |||
| // We do not fill the token table. | |||
| // This will be picked up by caller. | |||
| dst.n = uint16(len(src)) | |||
| return | |||
| } | |||
| // Override src | |||
| src = e.hist | |||
| nextEmit := s | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := int32(len(src) - inputMargin) | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| cv := load3232(src, s) | |||
| for { | |||
| // When should we start skipping if we haven't found matches in a long while. | |||
| const skipLog = 5 | |||
| const doEvery = 2 | |||
| nextS := s | |||
| var candidate tableEntry | |||
| for { | |||
| nextHash := hash4u(cv, bTableBits) | |||
| s = nextS | |||
| nextS = s + doEvery + (s-nextEmit)>>skipLog | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| candidate = e.table[nextHash] | |||
| now := load6432(src, nextS) | |||
| e.table[nextHash] = tableEntry{offset: s + e.cur} | |||
| nextHash = hash4u(uint32(now), bTableBits) | |||
| offset := s - (candidate.offset - e.cur) | |||
| if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { | |||
| e.table[nextHash] = tableEntry{offset: nextS + e.cur} | |||
| break | |||
| } | |||
| // Do one right away... | |||
| cv = uint32(now) | |||
| s = nextS | |||
| nextS++ | |||
| candidate = e.table[nextHash] | |||
| now >>= 8 | |||
| e.table[nextHash] = tableEntry{offset: s + e.cur} | |||
| offset = s - (candidate.offset - e.cur) | |||
| if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { | |||
| break | |||
| } | |||
| cv = uint32(now) | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| // Call emitCopy, and then see if another emitCopy could be our next | |||
| // move. Repeat until we find no match for the input immediately after | |||
| // what was consumed by the last emitCopy call. | |||
| // | |||
| // If we exit this loop normally then we need to call emitLiteral next, | |||
| // though we don't yet know how big the literal will be. We handle that | |||
| // by proceeding to the next iteration of the main loop. We also can | |||
| // exit this loop via goto if we get close to exhausting the input. | |||
| for { | |||
| // Invariant: we have a 4-byte match at s, and no need to emit any | |||
| // literal bytes prior to s. | |||
| // Extend the 4-byte match as long as possible. | |||
| t := candidate.offset - e.cur | |||
| l := e.matchlenLong(s+4, t+4, src) + 4 | |||
| // Extend backwards | |||
| for t > 0 && s > nextEmit && src[t-1] == src[s-1] { | |||
| s-- | |||
| t-- | |||
| l++ | |||
| } | |||
| if nextEmit < s { | |||
| if false { | |||
| emitLiteral(dst, src[nextEmit:s]) | |||
| } else { | |||
| for _, v := range src[nextEmit:s] { | |||
| dst.tokens[dst.n] = token(v) | |||
| dst.litHist[v]++ | |||
| dst.n++ | |||
| } | |||
| } | |||
| } | |||
| dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) | |||
| s += l | |||
| nextEmit = s | |||
| if nextS >= s { | |||
| s = nextS + 1 | |||
| } | |||
| if s >= sLimit { | |||
| // Index first pair after match end. | |||
| if int(s+l+4) < len(src) { | |||
| cv := load3232(src, s) | |||
| e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur} | |||
| } | |||
| goto emitRemainder | |||
| } | |||
| // Store every second hash in-between, but offset by 1. | |||
| for i := s - l + 2; i < s-5; i += 7 { | |||
| x := load6432(src, i) | |||
| nextHash := hash4u(uint32(x), bTableBits) | |||
| e.table[nextHash] = tableEntry{offset: e.cur + i} | |||
| // Skip one | |||
| x >>= 16 | |||
| nextHash = hash4u(uint32(x), bTableBits) | |||
| e.table[nextHash] = tableEntry{offset: e.cur + i + 2} | |||
| // Skip one | |||
| x >>= 16 | |||
| nextHash = hash4u(uint32(x), bTableBits) | |||
| e.table[nextHash] = tableEntry{offset: e.cur + i + 4} | |||
| } | |||
| // We could immediately start working at s now, but to improve | |||
| // compression we first update the hash table at s-2 to s. If | |||
| // another emitCopy is not our next move, also calculate nextHash | |||
| // at s+1. At least on GOARCH=amd64, these three hash calculations | |||
| // are faster as one load64 call (with some shifts) instead of | |||
| // three load32 calls. | |||
| x := load6432(src, s-2) | |||
| o := e.cur + s - 2 | |||
| prevHash := hash4u(uint32(x), bTableBits) | |||
| prevHash2 := hash4u(uint32(x>>8), bTableBits) | |||
| e.table[prevHash] = tableEntry{offset: o} | |||
| e.table[prevHash2] = tableEntry{offset: o + 1} | |||
| currHash := hash4u(uint32(x>>16), bTableBits) | |||
| candidate = e.table[currHash] | |||
| e.table[currHash] = tableEntry{offset: o + 2} | |||
| offset := s - (candidate.offset - e.cur) | |||
| if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { | |||
| cv = uint32(x >> 24) | |||
| s++ | |||
| break | |||
| } | |||
| } | |||
| } | |||
| emitRemainder: | |||
| if int(nextEmit) < len(src) { | |||
| // If nothing was added, don't encode literals. | |||
| if dst.n == 0 { | |||
| return | |||
| } | |||
| emitLiteral(dst, src[nextEmit:]) | |||
| } | |||
| } | |||
| @@ -0,0 +1,240 @@ | |||
| package flate | |||
| import "fmt" | |||
| // fastEncL3 | |||
| type fastEncL3 struct { | |||
| fastGen | |||
| table [1 << 16]tableEntryPrev | |||
| } | |||
| // Encode uses a similar algorithm to level 2, will check up to two candidates. | |||
| func (e *fastEncL3) Encode(dst *tokens, src []byte) { | |||
| const ( | |||
| inputMargin = 8 - 1 | |||
| minNonLiteralBlockSize = 1 + 1 + inputMargin | |||
| tableBits = 16 | |||
| tableSize = 1 << tableBits | |||
| ) | |||
| if debugDeflate && e.cur < 0 { | |||
| panic(fmt.Sprint("e.cur < 0: ", e.cur)) | |||
| } | |||
| // Protect against e.cur wraparound. | |||
| for e.cur >= bufferReset { | |||
| if len(e.hist) == 0 { | |||
| for i := range e.table[:] { | |||
| e.table[i] = tableEntryPrev{} | |||
| } | |||
| e.cur = maxMatchOffset | |||
| break | |||
| } | |||
| // Shift down everything in the table that isn't already too far away. | |||
| minOff := e.cur + int32(len(e.hist)) - maxMatchOffset | |||
| for i := range e.table[:] { | |||
| v := e.table[i] | |||
| if v.Cur.offset <= minOff { | |||
| v.Cur.offset = 0 | |||
| } else { | |||
| v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset | |||
| } | |||
| if v.Prev.offset <= minOff { | |||
| v.Prev.offset = 0 | |||
| } else { | |||
| v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset | |||
| } | |||
| e.table[i] = v | |||
| } | |||
| e.cur = maxMatchOffset | |||
| } | |||
| s := e.addBlock(src) | |||
| // Skip if too small. | |||
| if len(src) < minNonLiteralBlockSize { | |||
| // We do not fill the token table. | |||
| // This will be picked up by caller. | |||
| dst.n = uint16(len(src)) | |||
| return | |||
| } | |||
| // Override src | |||
| src = e.hist | |||
| nextEmit := s | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := int32(len(src) - inputMargin) | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| cv := load3232(src, s) | |||
| for { | |||
| const skipLog = 6 | |||
| nextS := s | |||
| var candidate tableEntry | |||
| for { | |||
| nextHash := hash4u(cv, tableBits) | |||
| s = nextS | |||
| nextS = s + 1 + (s-nextEmit)>>skipLog | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| candidates := e.table[nextHash] | |||
| now := load3232(src, nextS) | |||
| // Safe offset distance until s + 4... | |||
| minOffset := e.cur + s - (maxMatchOffset - 4) | |||
| e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} | |||
| // Check both candidates | |||
| candidate = candidates.Cur | |||
| if candidate.offset < minOffset { | |||
| cv = now | |||
| // Previous will also be invalid, we have nothing. | |||
| continue | |||
| } | |||
| if cv == load3232(src, candidate.offset-e.cur) { | |||
| if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) { | |||
| break | |||
| } | |||
| // Both match and are valid, pick longest. | |||
| offset := s - (candidate.offset - e.cur) | |||
| o2 := s - (candidates.Prev.offset - e.cur) | |||
| l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) | |||
| if l2 > l1 { | |||
| candidate = candidates.Prev | |||
| } | |||
| break | |||
| } else { | |||
| // We only check if value mismatches. | |||
| // Offset will always be invalid in other cases. | |||
| candidate = candidates.Prev | |||
| if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { | |||
| break | |||
| } | |||
| } | |||
| cv = now | |||
| } | |||
| // Call emitCopy, and then see if another emitCopy could be our next | |||
| // move. Repeat until we find no match for the input immediately after | |||
| // what was consumed by the last emitCopy call. | |||
| // | |||
| // If we exit this loop normally then we need to call emitLiteral next, | |||
| // though we don't yet know how big the literal will be. We handle that | |||
| // by proceeding to the next iteration of the main loop. We also can | |||
| // exit this loop via goto if we get close to exhausting the input. | |||
| for { | |||
| // Invariant: we have a 4-byte match at s, and no need to emit any | |||
| // literal bytes prior to s. | |||
| // Extend the 4-byte match as long as possible. | |||
| // | |||
| t := candidate.offset - e.cur | |||
| l := e.matchlenLong(s+4, t+4, src) + 4 | |||
| // Extend backwards | |||
| for t > 0 && s > nextEmit && src[t-1] == src[s-1] { | |||
| s-- | |||
| t-- | |||
| l++ | |||
| } | |||
| if nextEmit < s { | |||
| if false { | |||
| emitLiteral(dst, src[nextEmit:s]) | |||
| } else { | |||
| for _, v := range src[nextEmit:s] { | |||
| dst.tokens[dst.n] = token(v) | |||
| dst.litHist[v]++ | |||
| dst.n++ | |||
| } | |||
| } | |||
| } | |||
| dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) | |||
| s += l | |||
| nextEmit = s | |||
| if nextS >= s { | |||
| s = nextS + 1 | |||
| } | |||
| if s >= sLimit { | |||
| t += l | |||
| // Index first pair after match end. | |||
| if int(t+4) < len(src) && t > 0 { | |||
| cv := load3232(src, t) | |||
| nextHash := hash4u(cv, tableBits) | |||
| e.table[nextHash] = tableEntryPrev{ | |||
| Prev: e.table[nextHash].Cur, | |||
| Cur: tableEntry{offset: e.cur + t}, | |||
| } | |||
| } | |||
| goto emitRemainder | |||
| } | |||
| // Store every 5th hash in-between. | |||
| for i := s - l + 2; i < s-5; i += 5 { | |||
| nextHash := hash4u(load3232(src, i), tableBits) | |||
| e.table[nextHash] = tableEntryPrev{ | |||
| Prev: e.table[nextHash].Cur, | |||
| Cur: tableEntry{offset: e.cur + i}} | |||
| } | |||
| // We could immediately start working at s now, but to improve | |||
| // compression we first update the hash table at s-2 to s. | |||
| x := load6432(src, s-2) | |||
| prevHash := hash4u(uint32(x), tableBits) | |||
| e.table[prevHash] = tableEntryPrev{ | |||
| Prev: e.table[prevHash].Cur, | |||
| Cur: tableEntry{offset: e.cur + s - 2}, | |||
| } | |||
| x >>= 8 | |||
| prevHash = hash4u(uint32(x), tableBits) | |||
| e.table[prevHash] = tableEntryPrev{ | |||
| Prev: e.table[prevHash].Cur, | |||
| Cur: tableEntry{offset: e.cur + s - 1}, | |||
| } | |||
| x >>= 8 | |||
| currHash := hash4u(uint32(x), tableBits) | |||
| candidates := e.table[currHash] | |||
| cv = uint32(x) | |||
| e.table[currHash] = tableEntryPrev{ | |||
| Prev: candidates.Cur, | |||
| Cur: tableEntry{offset: s + e.cur}, | |||
| } | |||
| // Check both candidates | |||
| candidate = candidates.Cur | |||
| minOffset := e.cur + s - (maxMatchOffset - 4) | |||
| if candidate.offset > minOffset { | |||
| if cv == load3232(src, candidate.offset-e.cur) { | |||
| // Found a match... | |||
| continue | |||
| } | |||
| candidate = candidates.Prev | |||
| if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { | |||
| // Match at prev... | |||
| continue | |||
| } | |||
| } | |||
| cv = uint32(x >> 8) | |||
| s++ | |||
| break | |||
| } | |||
| } | |||
| emitRemainder: | |||
| if int(nextEmit) < len(src) { | |||
| // If nothing was added, don't encode literals. | |||
| if dst.n == 0 { | |||
| return | |||
| } | |||
| emitLiteral(dst, src[nextEmit:]) | |||
| } | |||
| } | |||
| @@ -0,0 +1,220 @@ | |||
| package flate | |||
| import "fmt" | |||
| type fastEncL4 struct { | |||
| fastGen | |||
| table [tableSize]tableEntry | |||
| bTable [tableSize]tableEntry | |||
| } | |||
| func (e *fastEncL4) Encode(dst *tokens, src []byte) { | |||
| const ( | |||
| inputMargin = 12 - 1 | |||
| minNonLiteralBlockSize = 1 + 1 + inputMargin | |||
| ) | |||
| if debugDeflate && e.cur < 0 { | |||
| panic(fmt.Sprint("e.cur < 0: ", e.cur)) | |||
| } | |||
| // Protect against e.cur wraparound. | |||
| for e.cur >= bufferReset { | |||
| if len(e.hist) == 0 { | |||
| for i := range e.table[:] { | |||
| e.table[i] = tableEntry{} | |||
| } | |||
| for i := range e.bTable[:] { | |||
| e.bTable[i] = tableEntry{} | |||
| } | |||
| e.cur = maxMatchOffset | |||
| break | |||
| } | |||
| // Shift down everything in the table that isn't already too far away. | |||
| minOff := e.cur + int32(len(e.hist)) - maxMatchOffset | |||
| for i := range e.table[:] { | |||
| v := e.table[i].offset | |||
| if v <= minOff { | |||
| v = 0 | |||
| } else { | |||
| v = v - e.cur + maxMatchOffset | |||
| } | |||
| e.table[i].offset = v | |||
| } | |||
| for i := range e.bTable[:] { | |||
| v := e.bTable[i].offset | |||
| if v <= minOff { | |||
| v = 0 | |||
| } else { | |||
| v = v - e.cur + maxMatchOffset | |||
| } | |||
| e.bTable[i].offset = v | |||
| } | |||
| e.cur = maxMatchOffset | |||
| } | |||
| s := e.addBlock(src) | |||
| // This check isn't in the Snappy implementation, but there, the caller | |||
| // instead of the callee handles this case. | |||
| if len(src) < minNonLiteralBlockSize { | |||
| // We do not fill the token table. | |||
| // This will be picked up by caller. | |||
| dst.n = uint16(len(src)) | |||
| return | |||
| } | |||
| // Override src | |||
| src = e.hist | |||
| nextEmit := s | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := int32(len(src) - inputMargin) | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| cv := load6432(src, s) | |||
| for { | |||
| const skipLog = 6 | |||
| const doEvery = 1 | |||
| nextS := s | |||
| var t int32 | |||
| for { | |||
| nextHashS := hash4x64(cv, tableBits) | |||
| nextHashL := hash7(cv, tableBits) | |||
| s = nextS | |||
| nextS = s + doEvery + (s-nextEmit)>>skipLog | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| // Fetch a short+long candidate | |||
| sCandidate := e.table[nextHashS] | |||
| lCandidate := e.bTable[nextHashL] | |||
| next := load6432(src, nextS) | |||
| entry := tableEntry{offset: s + e.cur} | |||
| e.table[nextHashS] = entry | |||
| e.bTable[nextHashL] = entry | |||
| t = lCandidate.offset - e.cur | |||
| if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { | |||
| // We got a long match. Use that. | |||
| break | |||
| } | |||
| t = sCandidate.offset - e.cur | |||
| if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { | |||
| // Found a 4 match... | |||
| lCandidate = e.bTable[hash7(next, tableBits)] | |||
| // If the next long is a candidate, check if we should use that instead... | |||
| lOff := nextS - (lCandidate.offset - e.cur) | |||
| if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { | |||
| l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) | |||
| if l2 > l1 { | |||
| s = nextS | |||
| t = lCandidate.offset - e.cur | |||
| } | |||
| } | |||
| break | |||
| } | |||
| cv = next | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| // Extend the 4-byte match as long as possible. | |||
| l := e.matchlenLong(s+4, t+4, src) + 4 | |||
| // Extend backwards | |||
| for t > 0 && s > nextEmit && src[t-1] == src[s-1] { | |||
| s-- | |||
| t-- | |||
| l++ | |||
| } | |||
| if nextEmit < s { | |||
| if false { | |||
| emitLiteral(dst, src[nextEmit:s]) | |||
| } else { | |||
| for _, v := range src[nextEmit:s] { | |||
| dst.tokens[dst.n] = token(v) | |||
| dst.litHist[v]++ | |||
| dst.n++ | |||
| } | |||
| } | |||
| } | |||
| if debugDeflate { | |||
| if t >= s { | |||
| panic("s-t") | |||
| } | |||
| if (s - t) > maxMatchOffset { | |||
| panic(fmt.Sprintln("mmo", t)) | |||
| } | |||
| if l < baseMatchLength { | |||
| panic("bml") | |||
| } | |||
| } | |||
| dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) | |||
| s += l | |||
| nextEmit = s | |||
| if nextS >= s { | |||
| s = nextS + 1 | |||
| } | |||
| if s >= sLimit { | |||
| // Index first pair after match end. | |||
| if int(s+8) < len(src) { | |||
| cv := load6432(src, s) | |||
| e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur} | |||
| e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} | |||
| } | |||
| goto emitRemainder | |||
| } | |||
| // Store every 3rd hash in-between | |||
| if true { | |||
| i := nextS | |||
| if i < s-1 { | |||
| cv := load6432(src, i) | |||
| t := tableEntry{offset: i + e.cur} | |||
| t2 := tableEntry{offset: t.offset + 1} | |||
| e.bTable[hash7(cv, tableBits)] = t | |||
| e.bTable[hash7(cv>>8, tableBits)] = t2 | |||
| e.table[hash4u(uint32(cv>>8), tableBits)] = t2 | |||
| i += 3 | |||
| for ; i < s-1; i += 3 { | |||
| cv := load6432(src, i) | |||
| t := tableEntry{offset: i + e.cur} | |||
| t2 := tableEntry{offset: t.offset + 1} | |||
| e.bTable[hash7(cv, tableBits)] = t | |||
| e.bTable[hash7(cv>>8, tableBits)] = t2 | |||
| e.table[hash4u(uint32(cv>>8), tableBits)] = t2 | |||
| } | |||
| } | |||
| } | |||
| // We could immediately start working at s now, but to improve | |||
| // compression we first update the hash table at s-1 and at s. | |||
| x := load6432(src, s-1) | |||
| o := e.cur + s - 1 | |||
| prevHashS := hash4x64(x, tableBits) | |||
| prevHashL := hash7(x, tableBits) | |||
| e.table[prevHashS] = tableEntry{offset: o} | |||
| e.bTable[prevHashL] = tableEntry{offset: o} | |||
| cv = x >> 8 | |||
| } | |||
| emitRemainder: | |||
| if int(nextEmit) < len(src) { | |||
| // If nothing was added, don't encode literals. | |||
| if dst.n == 0 { | |||
| return | |||
| } | |||
| emitLiteral(dst, src[nextEmit:]) | |||
| } | |||
| } | |||
| @@ -0,0 +1,302 @@ | |||
| package flate | |||
| import "fmt" | |||
| type fastEncL5 struct { | |||
| fastGen | |||
| table [tableSize]tableEntry | |||
| bTable [tableSize]tableEntryPrev | |||
| } | |||
| func (e *fastEncL5) Encode(dst *tokens, src []byte) { | |||
| const ( | |||
| inputMargin = 12 - 1 | |||
| minNonLiteralBlockSize = 1 + 1 + inputMargin | |||
| ) | |||
| if debugDeflate && e.cur < 0 { | |||
| panic(fmt.Sprint("e.cur < 0: ", e.cur)) | |||
| } | |||
| // Protect against e.cur wraparound. | |||
| for e.cur >= bufferReset { | |||
| if len(e.hist) == 0 { | |||
| for i := range e.table[:] { | |||
| e.table[i] = tableEntry{} | |||
| } | |||
| for i := range e.bTable[:] { | |||
| e.bTable[i] = tableEntryPrev{} | |||
| } | |||
| e.cur = maxMatchOffset | |||
| break | |||
| } | |||
| // Shift down everything in the table that isn't already too far away. | |||
| minOff := e.cur + int32(len(e.hist)) - maxMatchOffset | |||
| for i := range e.table[:] { | |||
| v := e.table[i].offset | |||
| if v <= minOff { | |||
| v = 0 | |||
| } else { | |||
| v = v - e.cur + maxMatchOffset | |||
| } | |||
| e.table[i].offset = v | |||
| } | |||
| for i := range e.bTable[:] { | |||
| v := e.bTable[i] | |||
| if v.Cur.offset <= minOff { | |||
| v.Cur.offset = 0 | |||
| v.Prev.offset = 0 | |||
| } else { | |||
| v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset | |||
| if v.Prev.offset <= minOff { | |||
| v.Prev.offset = 0 | |||
| } else { | |||
| v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset | |||
| } | |||
| } | |||
| e.bTable[i] = v | |||
| } | |||
| e.cur = maxMatchOffset | |||
| } | |||
| s := e.addBlock(src) | |||
| // This check isn't in the Snappy implementation, but there, the caller | |||
| // instead of the callee handles this case. | |||
| if len(src) < minNonLiteralBlockSize { | |||
| // We do not fill the token table. | |||
| // This will be picked up by caller. | |||
| dst.n = uint16(len(src)) | |||
| return | |||
| } | |||
| // Override src | |||
| src = e.hist | |||
| nextEmit := s | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := int32(len(src) - inputMargin) | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| cv := load6432(src, s) | |||
| for { | |||
| const skipLog = 6 | |||
| const doEvery = 1 | |||
| nextS := s | |||
| var l int32 | |||
| var t int32 | |||
| for { | |||
| nextHashS := hash4x64(cv, tableBits) | |||
| nextHashL := hash7(cv, tableBits) | |||
| s = nextS | |||
| nextS = s + doEvery + (s-nextEmit)>>skipLog | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| // Fetch a short+long candidate | |||
| sCandidate := e.table[nextHashS] | |||
| lCandidate := e.bTable[nextHashL] | |||
| next := load6432(src, nextS) | |||
| entry := tableEntry{offset: s + e.cur} | |||
| e.table[nextHashS] = entry | |||
| eLong := &e.bTable[nextHashL] | |||
| eLong.Cur, eLong.Prev = entry, eLong.Cur | |||
| nextHashS = hash4x64(next, tableBits) | |||
| nextHashL = hash7(next, tableBits) | |||
| t = lCandidate.Cur.offset - e.cur | |||
| if s-t < maxMatchOffset { | |||
| if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { | |||
| // Store the next match | |||
| e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | |||
| eLong := &e.bTable[nextHashL] | |||
| eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | |||
| t2 := lCandidate.Prev.offset - e.cur | |||
| if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { | |||
| l = e.matchlen(s+4, t+4, src) + 4 | |||
| ml1 := e.matchlen(s+4, t2+4, src) + 4 | |||
| if ml1 > l { | |||
| t = t2 | |||
| l = ml1 | |||
| break | |||
| } | |||
| } | |||
| break | |||
| } | |||
| t = lCandidate.Prev.offset - e.cur | |||
| if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { | |||
| // Store the next match | |||
| e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | |||
| eLong := &e.bTable[nextHashL] | |||
| eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | |||
| break | |||
| } | |||
| } | |||
| t = sCandidate.offset - e.cur | |||
| if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { | |||
| // Found a 4 match... | |||
| l = e.matchlen(s+4, t+4, src) + 4 | |||
| lCandidate = e.bTable[nextHashL] | |||
| // Store the next match | |||
| e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | |||
| eLong := &e.bTable[nextHashL] | |||
| eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | |||
| // If the next long is a candidate, use that... | |||
| t2 := lCandidate.Cur.offset - e.cur | |||
| if nextS-t2 < maxMatchOffset { | |||
| if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { | |||
| ml := e.matchlen(nextS+4, t2+4, src) + 4 | |||
| if ml > l { | |||
| t = t2 | |||
| s = nextS | |||
| l = ml | |||
| break | |||
| } | |||
| } | |||
| // If the previous long is a candidate, use that... | |||
| t2 = lCandidate.Prev.offset - e.cur | |||
| if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { | |||
| ml := e.matchlen(nextS+4, t2+4, src) + 4 | |||
| if ml > l { | |||
| t = t2 | |||
| s = nextS | |||
| l = ml | |||
| break | |||
| } | |||
| } | |||
| } | |||
| break | |||
| } | |||
| cv = next | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| if l == 0 { | |||
| // Extend the 4-byte match as long as possible. | |||
| l = e.matchlenLong(s+4, t+4, src) + 4 | |||
| } else if l == maxMatchLength { | |||
| l += e.matchlenLong(s+l, t+l, src) | |||
| } | |||
| // Try to locate a better match by checking the end of best match... | |||
| if sAt := s + l; l < 30 && sAt < sLimit { | |||
| eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset | |||
| // Test current | |||
| t2 := eLong - e.cur - l | |||
| off := s - t2 | |||
| if t2 >= 0 && off < maxMatchOffset && off > 0 { | |||
| if l2 := e.matchlenLong(s, t2, src); l2 > l { | |||
| t = t2 | |||
| l = l2 | |||
| } | |||
| } | |||
| } | |||
| // Extend backwards | |||
| for t > 0 && s > nextEmit && src[t-1] == src[s-1] { | |||
| s-- | |||
| t-- | |||
| l++ | |||
| } | |||
| if nextEmit < s { | |||
| if false { | |||
| emitLiteral(dst, src[nextEmit:s]) | |||
| } else { | |||
| for _, v := range src[nextEmit:s] { | |||
| dst.tokens[dst.n] = token(v) | |||
| dst.litHist[v]++ | |||
| dst.n++ | |||
| } | |||
| } | |||
| } | |||
| if debugDeflate { | |||
| if t >= s { | |||
| panic(fmt.Sprintln("s-t", s, t)) | |||
| } | |||
| if (s - t) > maxMatchOffset { | |||
| panic(fmt.Sprintln("mmo", s-t)) | |||
| } | |||
| if l < baseMatchLength { | |||
| panic("bml") | |||
| } | |||
| } | |||
| dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) | |||
| s += l | |||
| nextEmit = s | |||
| if nextS >= s { | |||
| s = nextS + 1 | |||
| } | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| // Store every 3rd hash in-between. | |||
| if true { | |||
| const hashEvery = 3 | |||
| i := s - l + 1 | |||
| if i < s-1 { | |||
| cv := load6432(src, i) | |||
| t := tableEntry{offset: i + e.cur} | |||
| e.table[hash4x64(cv, tableBits)] = t | |||
| eLong := &e.bTable[hash7(cv, tableBits)] | |||
| eLong.Cur, eLong.Prev = t, eLong.Cur | |||
| // Do an long at i+1 | |||
| cv >>= 8 | |||
| t = tableEntry{offset: t.offset + 1} | |||
| eLong = &e.bTable[hash7(cv, tableBits)] | |||
| eLong.Cur, eLong.Prev = t, eLong.Cur | |||
| // We only have enough bits for a short entry at i+2 | |||
| cv >>= 8 | |||
| t = tableEntry{offset: t.offset + 1} | |||
| e.table[hash4x64(cv, tableBits)] = t | |||
| // Skip one - otherwise we risk hitting 's' | |||
| i += 4 | |||
| for ; i < s-1; i += hashEvery { | |||
| cv := load6432(src, i) | |||
| t := tableEntry{offset: i + e.cur} | |||
| t2 := tableEntry{offset: t.offset + 1} | |||
| eLong := &e.bTable[hash7(cv, tableBits)] | |||
| eLong.Cur, eLong.Prev = t, eLong.Cur | |||
| e.table[hash4u(uint32(cv>>8), tableBits)] = t2 | |||
| } | |||
| } | |||
| } | |||
| // We could immediately start working at s now, but to improve | |||
| // compression we first update the hash table at s-1 and at s. | |||
| x := load6432(src, s-1) | |||
| o := e.cur + s - 1 | |||
| prevHashS := hash4x64(x, tableBits) | |||
| prevHashL := hash7(x, tableBits) | |||
| e.table[prevHashS] = tableEntry{offset: o} | |||
| eLong := &e.bTable[prevHashL] | |||
| eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur | |||
| cv = x >> 8 | |||
| } | |||
| emitRemainder: | |||
| if int(nextEmit) < len(src) { | |||
| // If nothing was added, don't encode literals. | |||
| if dst.n == 0 { | |||
| return | |||
| } | |||
| emitLiteral(dst, src[nextEmit:]) | |||
| } | |||
| } | |||
| @@ -0,0 +1,315 @@ | |||
| package flate | |||
| import "fmt" | |||
| type fastEncL6 struct { | |||
| fastGen | |||
| table [tableSize]tableEntry | |||
| bTable [tableSize]tableEntryPrev | |||
| } | |||
| func (e *fastEncL6) Encode(dst *tokens, src []byte) { | |||
| const ( | |||
| inputMargin = 12 - 1 | |||
| minNonLiteralBlockSize = 1 + 1 + inputMargin | |||
| ) | |||
| if debugDeflate && e.cur < 0 { | |||
| panic(fmt.Sprint("e.cur < 0: ", e.cur)) | |||
| } | |||
| // Protect against e.cur wraparound. | |||
| for e.cur >= bufferReset { | |||
| if len(e.hist) == 0 { | |||
| for i := range e.table[:] { | |||
| e.table[i] = tableEntry{} | |||
| } | |||
| for i := range e.bTable[:] { | |||
| e.bTable[i] = tableEntryPrev{} | |||
| } | |||
| e.cur = maxMatchOffset | |||
| break | |||
| } | |||
| // Shift down everything in the table that isn't already too far away. | |||
| minOff := e.cur + int32(len(e.hist)) - maxMatchOffset | |||
| for i := range e.table[:] { | |||
| v := e.table[i].offset | |||
| if v <= minOff { | |||
| v = 0 | |||
| } else { | |||
| v = v - e.cur + maxMatchOffset | |||
| } | |||
| e.table[i].offset = v | |||
| } | |||
| for i := range e.bTable[:] { | |||
| v := e.bTable[i] | |||
| if v.Cur.offset <= minOff { | |||
| v.Cur.offset = 0 | |||
| v.Prev.offset = 0 | |||
| } else { | |||
| v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset | |||
| if v.Prev.offset <= minOff { | |||
| v.Prev.offset = 0 | |||
| } else { | |||
| v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset | |||
| } | |||
| } | |||
| e.bTable[i] = v | |||
| } | |||
| e.cur = maxMatchOffset | |||
| } | |||
| s := e.addBlock(src) | |||
| // This check isn't in the Snappy implementation, but there, the caller | |||
| // instead of the callee handles this case. | |||
| if len(src) < minNonLiteralBlockSize { | |||
| // We do not fill the token table. | |||
| // This will be picked up by caller. | |||
| dst.n = uint16(len(src)) | |||
| return | |||
| } | |||
| // Override src | |||
| src = e.hist | |||
| nextEmit := s | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := int32(len(src) - inputMargin) | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| cv := load6432(src, s) | |||
| // Repeat MUST be > 1 and within range | |||
| repeat := int32(1) | |||
| for { | |||
| const skipLog = 7 | |||
| const doEvery = 1 | |||
| nextS := s | |||
| var l int32 | |||
| var t int32 | |||
| for { | |||
| nextHashS := hash4x64(cv, tableBits) | |||
| nextHashL := hash7(cv, tableBits) | |||
| s = nextS | |||
| nextS = s + doEvery + (s-nextEmit)>>skipLog | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| // Fetch a short+long candidate | |||
| sCandidate := e.table[nextHashS] | |||
| lCandidate := e.bTable[nextHashL] | |||
| next := load6432(src, nextS) | |||
| entry := tableEntry{offset: s + e.cur} | |||
| e.table[nextHashS] = entry | |||
| eLong := &e.bTable[nextHashL] | |||
| eLong.Cur, eLong.Prev = entry, eLong.Cur | |||
| // Calculate hashes of 'next' | |||
| nextHashS = hash4x64(next, tableBits) | |||
| nextHashL = hash7(next, tableBits) | |||
| t = lCandidate.Cur.offset - e.cur | |||
| if s-t < maxMatchOffset { | |||
| if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { | |||
| // Long candidate matches at least 4 bytes. | |||
| // Store the next match | |||
| e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | |||
| eLong := &e.bTable[nextHashL] | |||
| eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | |||
| // Check the previous long candidate as well. | |||
| t2 := lCandidate.Prev.offset - e.cur | |||
| if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { | |||
| l = e.matchlen(s+4, t+4, src) + 4 | |||
| ml1 := e.matchlen(s+4, t2+4, src) + 4 | |||
| if ml1 > l { | |||
| t = t2 | |||
| l = ml1 | |||
| break | |||
| } | |||
| } | |||
| break | |||
| } | |||
| // Current value did not match, but check if previous long value does. | |||
| t = lCandidate.Prev.offset - e.cur | |||
| if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { | |||
| // Store the next match | |||
| e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | |||
| eLong := &e.bTable[nextHashL] | |||
| eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | |||
| break | |||
| } | |||
| } | |||
| t = sCandidate.offset - e.cur | |||
| if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { | |||
| // Found a 4 match... | |||
| l = e.matchlen(s+4, t+4, src) + 4 | |||
| // Look up next long candidate (at nextS) | |||
| lCandidate = e.bTable[nextHashL] | |||
| // Store the next match | |||
| e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | |||
| eLong := &e.bTable[nextHashL] | |||
| eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | |||
| // Check repeat at s + repOff | |||
| const repOff = 1 | |||
| t2 := s - repeat + repOff | |||
| if load3232(src, t2) == uint32(cv>>(8*repOff)) { | |||
| ml := e.matchlen(s+4+repOff, t2+4, src) + 4 | |||
| if ml > l { | |||
| t = t2 | |||
| l = ml | |||
| s += repOff | |||
| // Not worth checking more. | |||
| break | |||
| } | |||
| } | |||
| // If the next long is a candidate, use that... | |||
| t2 = lCandidate.Cur.offset - e.cur | |||
| if nextS-t2 < maxMatchOffset { | |||
| if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { | |||
| ml := e.matchlen(nextS+4, t2+4, src) + 4 | |||
| if ml > l { | |||
| t = t2 | |||
| s = nextS | |||
| l = ml | |||
| // This is ok, but check previous as well. | |||
| } | |||
| } | |||
| // If the previous long is a candidate, use that... | |||
| t2 = lCandidate.Prev.offset - e.cur | |||
| if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { | |||
| ml := e.matchlen(nextS+4, t2+4, src) + 4 | |||
| if ml > l { | |||
| t = t2 | |||
| s = nextS | |||
| l = ml | |||
| break | |||
| } | |||
| } | |||
| } | |||
| break | |||
| } | |||
| cv = next | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| // Extend the 4-byte match as long as possible. | |||
| if l == 0 { | |||
| l = e.matchlenLong(s+4, t+4, src) + 4 | |||
| } else if l == maxMatchLength { | |||
| l += e.matchlenLong(s+l, t+l, src) | |||
| } | |||
| // Try to locate a better match by checking the end-of-match... | |||
| if sAt := s + l; sAt < sLimit { | |||
| eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] | |||
| // Test current | |||
| t2 := eLong.Cur.offset - e.cur - l | |||
| off := s - t2 | |||
| if off < maxMatchOffset { | |||
| if off > 0 && t2 >= 0 { | |||
| if l2 := e.matchlenLong(s, t2, src); l2 > l { | |||
| t = t2 | |||
| l = l2 | |||
| } | |||
| } | |||
| // Test next: | |||
| t2 = eLong.Prev.offset - e.cur - l | |||
| off := s - t2 | |||
| if off > 0 && off < maxMatchOffset && t2 >= 0 { | |||
| if l2 := e.matchlenLong(s, t2, src); l2 > l { | |||
| t = t2 | |||
| l = l2 | |||
| } | |||
| } | |||
| } | |||
| } | |||
| // Extend backwards | |||
| for t > 0 && s > nextEmit && src[t-1] == src[s-1] { | |||
| s-- | |||
| t-- | |||
| l++ | |||
| } | |||
| if nextEmit < s { | |||
| if false { | |||
| emitLiteral(dst, src[nextEmit:s]) | |||
| } else { | |||
| for _, v := range src[nextEmit:s] { | |||
| dst.tokens[dst.n] = token(v) | |||
| dst.litHist[v]++ | |||
| dst.n++ | |||
| } | |||
| } | |||
| } | |||
| if false { | |||
| if t >= s { | |||
| panic(fmt.Sprintln("s-t", s, t)) | |||
| } | |||
| if (s - t) > maxMatchOffset { | |||
| panic(fmt.Sprintln("mmo", s-t)) | |||
| } | |||
| if l < baseMatchLength { | |||
| panic("bml") | |||
| } | |||
| } | |||
| dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) | |||
| repeat = s - t | |||
| s += l | |||
| nextEmit = s | |||
| if nextS >= s { | |||
| s = nextS + 1 | |||
| } | |||
| if s >= sLimit { | |||
| // Index after match end. | |||
| for i := nextS + 1; i < int32(len(src))-8; i += 2 { | |||
| cv := load6432(src, i) | |||
| e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur} | |||
| eLong := &e.bTable[hash7(cv, tableBits)] | |||
| eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur | |||
| } | |||
| goto emitRemainder | |||
| } | |||
| // Store every long hash in-between and every second short. | |||
| if true { | |||
| for i := nextS + 1; i < s-1; i += 2 { | |||
| cv := load6432(src, i) | |||
| t := tableEntry{offset: i + e.cur} | |||
| t2 := tableEntry{offset: t.offset + 1} | |||
| eLong := &e.bTable[hash7(cv, tableBits)] | |||
| eLong2 := &e.bTable[hash7(cv>>8, tableBits)] | |||
| e.table[hash4x64(cv, tableBits)] = t | |||
| eLong.Cur, eLong.Prev = t, eLong.Cur | |||
| eLong2.Cur, eLong2.Prev = t2, eLong2.Cur | |||
| } | |||
| } | |||
| // We could immediately start working at s now, but to improve | |||
| // compression we first update the hash table at s-1 and at s. | |||
| cv = load6432(src, s) | |||
| } | |||
| emitRemainder: | |||
| if int(nextEmit) < len(src) { | |||
| // If nothing was added, don't encode literals. | |||
| if dst.n == 0 { | |||
| return | |||
| } | |||
| emitLiteral(dst, src[nextEmit:]) | |||
| } | |||
| } | |||
| @@ -0,0 +1,37 @@ | |||
| package flate | |||
| const ( | |||
| // Masks for shifts with register sizes of the shift value. | |||
| // This can be used to work around the x86 design of shifting by mod register size. | |||
| // It can be used when a variable shift is always smaller than the register size. | |||
| // reg8SizeMaskX - shift value is 8 bits, shifted is X | |||
| reg8SizeMask8 = 7 | |||
| reg8SizeMask16 = 15 | |||
| reg8SizeMask32 = 31 | |||
| reg8SizeMask64 = 63 | |||
| // reg16SizeMaskX - shift value is 16 bits, shifted is X | |||
| reg16SizeMask8 = reg8SizeMask8 | |||
| reg16SizeMask16 = reg8SizeMask16 | |||
| reg16SizeMask32 = reg8SizeMask32 | |||
| reg16SizeMask64 = reg8SizeMask64 | |||
| // reg32SizeMaskX - shift value is 32 bits, shifted is X | |||
| reg32SizeMask8 = reg8SizeMask8 | |||
| reg32SizeMask16 = reg8SizeMask16 | |||
| reg32SizeMask32 = reg8SizeMask32 | |||
| reg32SizeMask64 = reg8SizeMask64 | |||
| // reg64SizeMaskX - shift value is 64 bits, shifted is X | |||
| reg64SizeMask8 = reg8SizeMask8 | |||
| reg64SizeMask16 = reg8SizeMask16 | |||
| reg64SizeMask32 = reg8SizeMask32 | |||
| reg64SizeMask64 = reg8SizeMask64 | |||
| // regSizeMaskUintX - shift value is uint, shifted is X | |||
| regSizeMaskUint8 = reg8SizeMask8 | |||
| regSizeMaskUint16 = reg8SizeMask16 | |||
| regSizeMaskUint32 = reg8SizeMask32 | |||
| regSizeMaskUint64 = reg8SizeMask64 | |||
| ) | |||
| @@ -0,0 +1,40 @@ | |||
| //go:build !amd64 | |||
| // +build !amd64 | |||
| package flate | |||
| const ( | |||
| // Masks for shifts with register sizes of the shift value. | |||
| // This can be used to work around the x86 design of shifting by mod register size. | |||
| // It can be used when a variable shift is always smaller than the register size. | |||
| // reg8SizeMaskX - shift value is 8 bits, shifted is X | |||
| reg8SizeMask8 = 0xff | |||
| reg8SizeMask16 = 0xff | |||
| reg8SizeMask32 = 0xff | |||
| reg8SizeMask64 = 0xff | |||
| // reg16SizeMaskX - shift value is 16 bits, shifted is X | |||
| reg16SizeMask8 = 0xffff | |||
| reg16SizeMask16 = 0xffff | |||
| reg16SizeMask32 = 0xffff | |||
| reg16SizeMask64 = 0xffff | |||
| // reg32SizeMaskX - shift value is 32 bits, shifted is X | |||
| reg32SizeMask8 = 0xffffffff | |||
| reg32SizeMask16 = 0xffffffff | |||
| reg32SizeMask32 = 0xffffffff | |||
| reg32SizeMask64 = 0xffffffff | |||
| // reg64SizeMaskX - shift value is 64 bits, shifted is X | |||
| reg64SizeMask8 = 0xffffffffffffffff | |||
| reg64SizeMask16 = 0xffffffffffffffff | |||
| reg64SizeMask32 = 0xffffffffffffffff | |||
| reg64SizeMask64 = 0xffffffffffffffff | |||
| // regSizeMaskUintX - shift value is uint, shifted is X | |||
| regSizeMaskUint8 = ^uint(0) | |||
| regSizeMaskUint16 = ^uint(0) | |||
| regSizeMaskUint32 = ^uint(0) | |||
| regSizeMaskUint64 = ^uint(0) | |||
| ) | |||
| @@ -0,0 +1,305 @@ | |||
| package flate | |||
| import ( | |||
| "io" | |||
| "math" | |||
| "sync" | |||
| ) | |||
| const ( | |||
| maxStatelessBlock = math.MaxInt16 | |||
| // dictionary will be taken from maxStatelessBlock, so limit it. | |||
| maxStatelessDict = 8 << 10 | |||
| slTableBits = 13 | |||
| slTableSize = 1 << slTableBits | |||
| slTableShift = 32 - slTableBits | |||
| ) | |||
| type statelessWriter struct { | |||
| dst io.Writer | |||
| closed bool | |||
| } | |||
| func (s *statelessWriter) Close() error { | |||
| if s.closed { | |||
| return nil | |||
| } | |||
| s.closed = true | |||
| // Emit EOF block | |||
| return StatelessDeflate(s.dst, nil, true, nil) | |||
| } | |||
| func (s *statelessWriter) Write(p []byte) (n int, err error) { | |||
| err = StatelessDeflate(s.dst, p, false, nil) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| return len(p), nil | |||
| } | |||
| func (s *statelessWriter) Reset(w io.Writer) { | |||
| s.dst = w | |||
| s.closed = false | |||
| } | |||
| // NewStatelessWriter will do compression but without maintaining any state | |||
| // between Write calls. | |||
| // There will be no memory kept between Write calls, | |||
| // but compression and speed will be suboptimal. | |||
| // Because of this, the size of actual Write calls will affect output size. | |||
| func NewStatelessWriter(dst io.Writer) io.WriteCloser { | |||
| return &statelessWriter{dst: dst} | |||
| } | |||
| // bitWriterPool contains bit writers that can be reused. | |||
| var bitWriterPool = sync.Pool{ | |||
| New: func() interface{} { | |||
| return newHuffmanBitWriter(nil) | |||
| }, | |||
| } | |||
| // StatelessDeflate allows compressing directly to a Writer without retaining state. | |||
| // When returning everything will be flushed. | |||
| // Up to 8KB of an optional dictionary can be given which is presumed to precede the block. | |||
| // Longer dictionaries will be truncated and will still produce valid output. | |||
| // Sending nil dictionary is perfectly fine. | |||
| func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { | |||
| var dst tokens | |||
| bw := bitWriterPool.Get().(*huffmanBitWriter) | |||
| bw.reset(out) | |||
| defer func() { | |||
| // don't keep a reference to our output | |||
| bw.reset(nil) | |||
| bitWriterPool.Put(bw) | |||
| }() | |||
| if eof && len(in) == 0 { | |||
| // Just write an EOF block. | |||
| // Could be faster... | |||
| bw.writeStoredHeader(0, true) | |||
| bw.flush() | |||
| return bw.err | |||
| } | |||
| // Truncate dict | |||
| if len(dict) > maxStatelessDict { | |||
| dict = dict[len(dict)-maxStatelessDict:] | |||
| } | |||
| for len(in) > 0 { | |||
| todo := in | |||
| if len(todo) > maxStatelessBlock-len(dict) { | |||
| todo = todo[:maxStatelessBlock-len(dict)] | |||
| } | |||
| in = in[len(todo):] | |||
| uncompressed := todo | |||
| if len(dict) > 0 { | |||
| // combine dict and source | |||
| bufLen := len(todo) + len(dict) | |||
| combined := make([]byte, bufLen) | |||
| copy(combined, dict) | |||
| copy(combined[len(dict):], todo) | |||
| todo = combined | |||
| } | |||
| // Compress | |||
| statelessEnc(&dst, todo, int16(len(dict))) | |||
| isEof := eof && len(in) == 0 | |||
| if dst.n == 0 { | |||
| bw.writeStoredHeader(len(uncompressed), isEof) | |||
| if bw.err != nil { | |||
| return bw.err | |||
| } | |||
| bw.writeBytes(uncompressed) | |||
| } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { | |||
| // If we removed less than 1/16th, huffman compress the block. | |||
| bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) | |||
| } else { | |||
| bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) | |||
| } | |||
| if len(in) > 0 { | |||
| // Retain a dict if we have more | |||
| dict = todo[len(todo)-maxStatelessDict:] | |||
| dst.Reset() | |||
| } | |||
| if bw.err != nil { | |||
| return bw.err | |||
| } | |||
| } | |||
| if !eof { | |||
| // Align, only a stored block can do that. | |||
| bw.writeStoredHeader(0, false) | |||
| } | |||
| bw.flush() | |||
| return bw.err | |||
| } | |||
| func hashSL(u uint32) uint32 { | |||
| return (u * 0x1e35a7bd) >> slTableShift | |||
| } | |||
| func load3216(b []byte, i int16) uint32 { | |||
| // Help the compiler eliminate bounds checks on the read so it can be done in a single read. | |||
| b = b[i:] | |||
| b = b[:4] | |||
| return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 | |||
| } | |||
| func load6416(b []byte, i int16) uint64 { | |||
| // Help the compiler eliminate bounds checks on the read so it can be done in a single read. | |||
| b = b[i:] | |||
| b = b[:8] | |||
| return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | | |||
| uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 | |||
| } | |||
| func statelessEnc(dst *tokens, src []byte, startAt int16) { | |||
| const ( | |||
| inputMargin = 12 - 1 | |||
| minNonLiteralBlockSize = 1 + 1 + inputMargin | |||
| ) | |||
| type tableEntry struct { | |||
| offset int16 | |||
| } | |||
| var table [slTableSize]tableEntry | |||
| // This check isn't in the Snappy implementation, but there, the caller | |||
| // instead of the callee handles this case. | |||
| if len(src)-int(startAt) < minNonLiteralBlockSize { | |||
| // We do not fill the token table. | |||
| // This will be picked up by caller. | |||
| dst.n = 0 | |||
| return | |||
| } | |||
| // Index until startAt | |||
| if startAt > 0 { | |||
| cv := load3232(src, 0) | |||
| for i := int16(0); i < startAt; i++ { | |||
| table[hashSL(cv)] = tableEntry{offset: i} | |||
| cv = (cv >> 8) | (uint32(src[i+4]) << 24) | |||
| } | |||
| } | |||
| s := startAt + 1 | |||
| nextEmit := startAt | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := int16(len(src) - inputMargin) | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| cv := load3216(src, s) | |||
| for { | |||
| const skipLog = 5 | |||
| const doEvery = 2 | |||
| nextS := s | |||
| var candidate tableEntry | |||
| for { | |||
| nextHash := hashSL(cv) | |||
| candidate = table[nextHash] | |||
| nextS = s + doEvery + (s-nextEmit)>>skipLog | |||
| if nextS > sLimit || nextS <= 0 { | |||
| goto emitRemainder | |||
| } | |||
| now := load6416(src, nextS) | |||
| table[nextHash] = tableEntry{offset: s} | |||
| nextHash = hashSL(uint32(now)) | |||
| if cv == load3216(src, candidate.offset) { | |||
| table[nextHash] = tableEntry{offset: nextS} | |||
| break | |||
| } | |||
| // Do one right away... | |||
| cv = uint32(now) | |||
| s = nextS | |||
| nextS++ | |||
| candidate = table[nextHash] | |||
| now >>= 8 | |||
| table[nextHash] = tableEntry{offset: s} | |||
| if cv == load3216(src, candidate.offset) { | |||
| table[nextHash] = tableEntry{offset: nextS} | |||
| break | |||
| } | |||
| cv = uint32(now) | |||
| s = nextS | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| for { | |||
| // Invariant: we have a 4-byte match at s, and no need to emit any | |||
| // literal bytes prior to s. | |||
| // Extend the 4-byte match as long as possible. | |||
| t := candidate.offset | |||
| l := int16(matchLen(src[s+4:], src[t+4:]) + 4) | |||
| // Extend backwards | |||
| for t > 0 && s > nextEmit && src[t-1] == src[s-1] { | |||
| s-- | |||
| t-- | |||
| l++ | |||
| } | |||
| if nextEmit < s { | |||
| if false { | |||
| emitLiteral(dst, src[nextEmit:s]) | |||
| } else { | |||
| for _, v := range src[nextEmit:s] { | |||
| dst.tokens[dst.n] = token(v) | |||
| dst.litHist[v]++ | |||
| dst.n++ | |||
| } | |||
| } | |||
| } | |||
| // Save the match found | |||
| dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) | |||
| s += l | |||
| nextEmit = s | |||
| if nextS >= s { | |||
| s = nextS + 1 | |||
| } | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| // We could immediately start working at s now, but to improve | |||
| // compression we first update the hash table at s-2 and at s. If | |||
| // another emitCopy is not our next move, also calculate nextHash | |||
| // at s+1. At least on GOARCH=amd64, these three hash calculations | |||
| // are faster as one load64 call (with some shifts) instead of | |||
| // three load32 calls. | |||
| x := load6416(src, s-2) | |||
| o := s - 2 | |||
| prevHash := hashSL(uint32(x)) | |||
| table[prevHash] = tableEntry{offset: o} | |||
| x >>= 16 | |||
| currHash := hashSL(uint32(x)) | |||
| candidate = table[currHash] | |||
| table[currHash] = tableEntry{offset: o + 2} | |||
| if uint32(x) != load3216(src, candidate.offset) { | |||
| cv = uint32(x >> 8) | |||
| s++ | |||
| break | |||
| } | |||
| } | |||
| } | |||
| emitRemainder: | |||
| if int(nextEmit) < len(src) { | |||
| // If nothing was added, don't encode literals. | |||
| if dst.n == 0 { | |||
| return | |||
| } | |||
| emitLiteral(dst, src[nextEmit:]) | |||
| } | |||
| } | |||
| @@ -0,0 +1,379 @@ | |||
| // Copyright 2009 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package flate | |||
| import ( | |||
| "bytes" | |||
| "encoding/binary" | |||
| "fmt" | |||
| "io" | |||
| "math" | |||
| ) | |||
| const ( | |||
| // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits | |||
| // bits 16-22 offsetcode - 5 bits | |||
| // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits | |||
| // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits | |||
| lengthShift = 22 | |||
| offsetMask = 1<<lengthShift - 1 | |||
| typeMask = 3 << 30 | |||
| literalType = 0 << 30 | |||
| matchType = 1 << 30 | |||
| matchOffsetOnlyMask = 0xffff | |||
| ) | |||
| // The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH) | |||
| // is lengthCodes[length - MIN_MATCH_LENGTH] | |||
| var lengthCodes = [256]uint8{ | |||
| 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, | |||
| 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, | |||
| 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, | |||
| 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, | |||
| 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, | |||
| 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, | |||
| 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, | |||
| 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, | |||
| 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, | |||
| 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, | |||
| 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, | |||
| 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, | |||
| 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, | |||
| 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, | |||
| 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, | |||
| 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, | |||
| 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | |||
| 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | |||
| 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | |||
| 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, | |||
| 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | |||
| 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | |||
| 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, | |||
| 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, | |||
| 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, | |||
| 27, 27, 27, 27, 27, 28, | |||
| } | |||
| // lengthCodes1 is length codes, but starting at 1. | |||
| var lengthCodes1 = [256]uint8{ | |||
| 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, | |||
| 10, 10, 11, 11, 12, 12, 13, 13, 13, 13, | |||
| 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, | |||
| 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, | |||
| 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, | |||
| 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, | |||
| 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, | |||
| 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, | |||
| 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, | |||
| 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, | |||
| 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, | |||
| 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, | |||
| 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, | |||
| 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | |||
| 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | |||
| 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | |||
| 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | |||
| 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | |||
| 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | |||
| 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, | |||
| 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, | |||
| 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, | |||
| 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, | |||
| 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, | |||
| 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, | |||
| 28, 28, 28, 28, 28, 29, | |||
| } | |||
| var offsetCodes = [256]uint32{ | |||
| 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, | |||
| 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, | |||
| 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, | |||
| 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, | |||
| 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, | |||
| 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, | |||
| 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, | |||
| 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, | |||
| 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, | |||
| 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, | |||
| 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, | |||
| 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, | |||
| 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, | |||
| 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, | |||
| 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, | |||
| 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, | |||
| } | |||
| // offsetCodes14 are offsetCodes, but with 14 added. | |||
| var offsetCodes14 = [256]uint32{ | |||
| 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, | |||
| 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, | |||
| 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, | |||
| 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | |||
| 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | |||
| 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | |||
| 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, | |||
| 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, | |||
| 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, | |||
| 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, | |||
| 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, | |||
| 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, | |||
| 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, | |||
| 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, | |||
| 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, | |||
| 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, | |||
| } | |||
| type token uint32 | |||
| type tokens struct { | |||
| extraHist [32]uint16 // codes 256->maxnumlit | |||
| offHist [32]uint16 // offset codes | |||
| litHist [256]uint16 // codes 0->255 | |||
| nFilled int | |||
| n uint16 // Must be able to contain maxStoreBlockSize | |||
| tokens [maxStoreBlockSize + 1]token | |||
| } | |||
| func (t *tokens) Reset() { | |||
| if t.n == 0 { | |||
| return | |||
| } | |||
| t.n = 0 | |||
| t.nFilled = 0 | |||
| for i := range t.litHist[:] { | |||
| t.litHist[i] = 0 | |||
| } | |||
| for i := range t.extraHist[:] { | |||
| t.extraHist[i] = 0 | |||
| } | |||
| for i := range t.offHist[:] { | |||
| t.offHist[i] = 0 | |||
| } | |||
| } | |||
| func (t *tokens) Fill() { | |||
| if t.n == 0 { | |||
| return | |||
| } | |||
| for i, v := range t.litHist[:] { | |||
| if v == 0 { | |||
| t.litHist[i] = 1 | |||
| t.nFilled++ | |||
| } | |||
| } | |||
| for i, v := range t.extraHist[:literalCount-256] { | |||
| if v == 0 { | |||
| t.nFilled++ | |||
| t.extraHist[i] = 1 | |||
| } | |||
| } | |||
| for i, v := range t.offHist[:offsetCodeCount] { | |||
| if v == 0 { | |||
| t.offHist[i] = 1 | |||
| } | |||
| } | |||
| } | |||
| func indexTokens(in []token) tokens { | |||
| var t tokens | |||
| t.indexTokens(in) | |||
| return t | |||
| } | |||
| func (t *tokens) indexTokens(in []token) { | |||
| t.Reset() | |||
| for _, tok := range in { | |||
| if tok < matchType { | |||
| t.AddLiteral(tok.literal()) | |||
| continue | |||
| } | |||
| t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) | |||
| } | |||
| } | |||
| // emitLiteral writes a literal chunk and returns the number of bytes written. | |||
| func emitLiteral(dst *tokens, lit []byte) { | |||
| for _, v := range lit { | |||
| dst.tokens[dst.n] = token(v) | |||
| dst.litHist[v]++ | |||
| dst.n++ | |||
| } | |||
| } | |||
| func (t *tokens) AddLiteral(lit byte) { | |||
| t.tokens[t.n] = token(lit) | |||
| t.litHist[lit]++ | |||
| t.n++ | |||
| } | |||
| // from https://stackoverflow.com/a/28730362 | |||
| func mFastLog2(val float32) float32 { | |||
| ux := int32(math.Float32bits(val)) | |||
| log2 := (float32)(((ux >> 23) & 255) - 128) | |||
| ux &= -0x7f800001 | |||
| ux += 127 << 23 | |||
| uval := math.Float32frombits(uint32(ux)) | |||
| log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 | |||
| return log2 | |||
| } | |||
| // EstimatedBits will return an minimum size estimated by an *optimal* | |||
| // compression of the block. | |||
| // The size of the block | |||
| func (t *tokens) EstimatedBits() int { | |||
| shannon := float32(0) | |||
| bits := int(0) | |||
| nMatches := 0 | |||
| total := int(t.n) + t.nFilled | |||
| if total > 0 { | |||
| invTotal := 1.0 / float32(total) | |||
| for _, v := range t.litHist[:] { | |||
| if v > 0 { | |||
| n := float32(v) | |||
| shannon += atLeastOne(-mFastLog2(n*invTotal)) * n | |||
| } | |||
| } | |||
| // Just add 15 for EOB | |||
| shannon += 15 | |||
| for i, v := range t.extraHist[1 : literalCount-256] { | |||
| if v > 0 { | |||
| n := float32(v) | |||
| shannon += atLeastOne(-mFastLog2(n*invTotal)) * n | |||
| bits += int(lengthExtraBits[i&31]) * int(v) | |||
| nMatches += int(v) | |||
| } | |||
| } | |||
| } | |||
| if nMatches > 0 { | |||
| invTotal := 1.0 / float32(nMatches) | |||
| for i, v := range t.offHist[:offsetCodeCount] { | |||
| if v > 0 { | |||
| n := float32(v) | |||
| shannon += atLeastOne(-mFastLog2(n*invTotal)) * n | |||
| bits += int(offsetExtraBits[i&31]) * int(v) | |||
| } | |||
| } | |||
| } | |||
| return int(shannon) + bits | |||
| } | |||
| // AddMatch adds a match to the tokens. | |||
| // This function is very sensitive to inlining and right on the border. | |||
| func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { | |||
| if debugDeflate { | |||
| if xlength >= maxMatchLength+baseMatchLength { | |||
| panic(fmt.Errorf("invalid length: %v", xlength)) | |||
| } | |||
| if xoffset >= maxMatchOffset+baseMatchOffset { | |||
| panic(fmt.Errorf("invalid offset: %v", xoffset)) | |||
| } | |||
| } | |||
| oCode := offsetCode(xoffset) | |||
| xoffset |= oCode << 16 | |||
| t.extraHist[lengthCodes1[uint8(xlength)]]++ | |||
| t.offHist[oCode&31]++ | |||
| t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset) | |||
| t.n++ | |||
| } | |||
| // AddMatchLong adds a match to the tokens, potentially longer than max match length. | |||
| // Length should NOT have the base subtracted, only offset should. | |||
| func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { | |||
| if debugDeflate { | |||
| if xoffset >= maxMatchOffset+baseMatchOffset { | |||
| panic(fmt.Errorf("invalid offset: %v", xoffset)) | |||
| } | |||
| } | |||
| oc := offsetCode(xoffset) | |||
| xoffset |= oc << 16 | |||
| for xlength > 0 { | |||
| xl := xlength | |||
| if xl > 258 { | |||
| // We need to have at least baseMatchLength left over for next loop. | |||
| if xl > 258+baseMatchLength { | |||
| xl = 258 | |||
| } else { | |||
| xl = 258 - baseMatchLength | |||
| } | |||
| } | |||
| xlength -= xl | |||
| xl -= baseMatchLength | |||
| t.extraHist[lengthCodes1[uint8(xl)]]++ | |||
| t.offHist[oc&31]++ | |||
| t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset) | |||
| t.n++ | |||
| } | |||
| } | |||
| func (t *tokens) AddEOB() { | |||
| t.tokens[t.n] = token(endBlockMarker) | |||
| t.extraHist[0]++ | |||
| t.n++ | |||
| } | |||
| func (t *tokens) Slice() []token { | |||
| return t.tokens[:t.n] | |||
| } | |||
| // VarInt returns the tokens as varint encoded bytes. | |||
| func (t *tokens) VarInt() []byte { | |||
| var b = make([]byte, binary.MaxVarintLen32*int(t.n)) | |||
| var off int | |||
| for _, v := range t.tokens[:t.n] { | |||
| off += binary.PutUvarint(b[off:], uint64(v)) | |||
| } | |||
| return b[:off] | |||
| } | |||
| // FromVarInt restores t to the varint encoded tokens provided. | |||
| // Any data in t is removed. | |||
| func (t *tokens) FromVarInt(b []byte) error { | |||
| var buf = bytes.NewReader(b) | |||
| var toks []token | |||
| for { | |||
| r, err := binary.ReadUvarint(buf) | |||
| if err == io.EOF { | |||
| break | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| toks = append(toks, token(r)) | |||
| } | |||
| t.indexTokens(toks) | |||
| return nil | |||
| } | |||
| // Returns the type of a token | |||
| func (t token) typ() uint32 { return uint32(t) & typeMask } | |||
| // Returns the literal of a literal token | |||
| func (t token) literal() uint8 { return uint8(t) } | |||
| // Returns the extra offset of a match token | |||
| func (t token) offset() uint32 { return uint32(t) & offsetMask } | |||
| func (t token) length() uint8 { return uint8(t >> lengthShift) } | |||
| // Convert length to code. | |||
| func lengthCode(len uint8) uint8 { return lengthCodes[len] } | |||
| // Returns the offset code corresponding to a specific offset | |||
| func offsetCode(off uint32) uint32 { | |||
| if false { | |||
| if off < uint32(len(offsetCodes)) { | |||
| return offsetCodes[off&255] | |||
| } else if off>>7 < uint32(len(offsetCodes)) { | |||
| return offsetCodes[(off>>7)&255] + 14 | |||
| } else { | |||
| return offsetCodes[(off>>14)&255] + 28 | |||
| } | |||
| } | |||
| if off < uint32(len(offsetCodes)) { | |||
| return offsetCodes[uint8(off)] | |||
| } | |||
| return offsetCodes14[uint8(off>>7)] | |||
| } | |||
| @@ -0,0 +1,79 @@ | |||
| # Finite State Entropy | |||
| This package provides Finite State Entropy encoding and decoding. | |||
| Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) | |||
| encoding provides a fast near-optimal symbol encoding/decoding | |||
| for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). | |||
| This can be used for compressing input with a lot of similar input values to the smallest number of bytes. | |||
| This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, | |||
| but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. | |||
| * [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) | |||
| ## News | |||
| * Feb 2018: First implementation released. Consider this beta software for now. | |||
| # Usage | |||
| This package provides a low level interface that allows to compress single independent blocks. | |||
| Each block is separate, and there is no built in integrity checks. | |||
| This means that the caller should keep track of block sizes and also do checksums if needed. | |||
| Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. | |||
| You must provide input and will receive the output and maybe an error. | |||
| These error values can be returned: | |||
| | Error | Description | | |||
| |---------------------|-----------------------------------------------------------------------------| | |||
| | `<nil>` | Everything ok, output is returned | | |||
| | `ErrIncompressible` | Returned when input is judged to be too hard to compress | | |||
| | `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | | |||
| | `(error)` | An internal error occurred. | | |||
| As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. | |||
| To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object | |||
| that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same | |||
| object can be used for both. | |||
| Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this | |||
| you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. | |||
| Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. | |||
| You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back | |||
| your input was likely corrupted. | |||
| It is important to note that a successful decoding does *not* mean your output matches your original input. | |||
| There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. | |||
| For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). | |||
| # Performance | |||
| A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. | |||
| All compression functions are currently only running on the calling goroutine so only one core will be used per block. | |||
| The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input | |||
| is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be | |||
| beneficial to transpose all your input values down by 64. | |||
| With moderate block sizes around 64k speed are typically 200MB/s per core for compression and | |||
| around 300MB/s decompression speed. | |||
| The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. | |||
| # Plans | |||
| At one point, more internals will be exposed to facilitate more "expert" usage of the components. | |||
| A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). | |||
| # Contributing | |||
| Contributions are always welcome. Be aware that adding public functions will require good justification and breaking | |||
| changes will likely not be accepted. If in doubt open an issue before writing the PR. | |||
| @@ -0,0 +1,122 @@ | |||
| // Copyright 2018 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. | |||
| package fse | |||
| import ( | |||
| "encoding/binary" | |||
| "errors" | |||
| "io" | |||
| ) | |||
| // bitReader reads a bitstream in reverse. | |||
| // The last set bit indicates the start of the stream and is used | |||
| // for aligning the input. | |||
| type bitReader struct { | |||
| in []byte | |||
| off uint // next byte to read is at in[off - 1] | |||
| value uint64 | |||
| bitsRead uint8 | |||
| } | |||
| // init initializes and resets the bit reader. | |||
| func (b *bitReader) init(in []byte) error { | |||
| if len(in) < 1 { | |||
| return errors.New("corrupt stream: too short") | |||
| } | |||
| b.in = in | |||
| b.off = uint(len(in)) | |||
| // The highest bit of the last byte indicates where to start | |||
| v := in[len(in)-1] | |||
| if v == 0 { | |||
| return errors.New("corrupt stream, did not find end of stream") | |||
| } | |||
| b.bitsRead = 64 | |||
| b.value = 0 | |||
| if len(in) >= 8 { | |||
| b.fillFastStart() | |||
| } else { | |||
| b.fill() | |||
| b.fill() | |||
| } | |||
| b.bitsRead += 8 - uint8(highBits(uint32(v))) | |||
| return nil | |||
| } | |||
| // getBits will return n bits. n can be 0. | |||
| func (b *bitReader) getBits(n uint8) uint16 { | |||
| if n == 0 || b.bitsRead >= 64 { | |||
| return 0 | |||
| } | |||
| return b.getBitsFast(n) | |||
| } | |||
| // getBitsFast requires that at least one bit is requested every time. | |||
| // There are no checks if the buffer is filled. | |||
| func (b *bitReader) getBitsFast(n uint8) uint16 { | |||
| const regMask = 64 - 1 | |||
| v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) | |||
| b.bitsRead += n | |||
| return v | |||
| } | |||
| // fillFast() will make sure at least 32 bits are available. | |||
| // There must be at least 4 bytes available. | |||
| func (b *bitReader) fillFast() { | |||
| if b.bitsRead < 32 { | |||
| return | |||
| } | |||
| // 2 bounds checks. | |||
| v := b.in[b.off-4:] | |||
| v = v[:4] | |||
| low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |||
| b.value = (b.value << 32) | uint64(low) | |||
| b.bitsRead -= 32 | |||
| b.off -= 4 | |||
| } | |||
| // fill() will make sure at least 32 bits are available. | |||
| func (b *bitReader) fill() { | |||
| if b.bitsRead < 32 { | |||
| return | |||
| } | |||
| if b.off > 4 { | |||
| v := b.in[b.off-4:] | |||
| v = v[:4] | |||
| low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |||
| b.value = (b.value << 32) | uint64(low) | |||
| b.bitsRead -= 32 | |||
| b.off -= 4 | |||
| return | |||
| } | |||
| for b.off > 0 { | |||
| b.value = (b.value << 8) | uint64(b.in[b.off-1]) | |||
| b.bitsRead -= 8 | |||
| b.off-- | |||
| } | |||
| } | |||
| // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. | |||
| func (b *bitReader) fillFastStart() { | |||
| // Do single re-slice to avoid bounds checks. | |||
| b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) | |||
| b.bitsRead = 0 | |||
| b.off -= 8 | |||
| } | |||
| // finished returns true if all bits have been read from the bit stream. | |||
| func (b *bitReader) finished() bool { | |||
| return b.bitsRead >= 64 && b.off == 0 | |||
| } | |||
| // close the bitstream and returns an error if out-of-buffer reads occurred. | |||
| func (b *bitReader) close() error { | |||
| // Release reference. | |||
| b.in = nil | |||
| if b.bitsRead > 64 { | |||
| return io.ErrUnexpectedEOF | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,168 @@ | |||
| // Copyright 2018 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. | |||
| package fse | |||
| import "fmt" | |||
| // bitWriter will write bits. | |||
| // First bit will be LSB of the first byte of output. | |||
| type bitWriter struct { | |||
| bitContainer uint64 | |||
| nBits uint8 | |||
| out []byte | |||
| } | |||
| // bitMask16 is bitmasks. Has extra to avoid bounds check. | |||
| var bitMask16 = [32]uint16{ | |||
| 0, 1, 3, 7, 0xF, 0x1F, | |||
| 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, | |||
| 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, | |||
| 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, | |||
| 0xFFFF, 0xFFFF} /* up to 16 bits */ | |||
| // addBits16NC will add up to 16 bits. | |||
| // It will not check if there is space for them, | |||
| // so the caller must ensure that it has flushed recently. | |||
| func (b *bitWriter) addBits16NC(value uint16, bits uint8) { | |||
| b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) | |||
| b.nBits += bits | |||
| } | |||
| // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. | |||
| // It will not check if there is space for them, so the caller must ensure that it has flushed recently. | |||
| func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { | |||
| b.bitContainer |= uint64(value) << (b.nBits & 63) | |||
| b.nBits += bits | |||
| } | |||
| // addBits16ZeroNC will add up to 16 bits. | |||
| // It will not check if there is space for them, | |||
| // so the caller must ensure that it has flushed recently. | |||
| // This is fastest if bits can be zero. | |||
| func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { | |||
| if bits == 0 { | |||
| return | |||
| } | |||
| value <<= (16 - bits) & 15 | |||
| value >>= (16 - bits) & 15 | |||
| b.bitContainer |= uint64(value) << (b.nBits & 63) | |||
| b.nBits += bits | |||
| } | |||
| // flush will flush all pending full bytes. | |||
| // There will be at least 56 bits available for writing when this has been called. | |||
| // Using flush32 is faster, but leaves less space for writing. | |||
| func (b *bitWriter) flush() { | |||
| v := b.nBits >> 3 | |||
| switch v { | |||
| case 0: | |||
| case 1: | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| ) | |||
| case 2: | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| ) | |||
| case 3: | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| byte(b.bitContainer>>16), | |||
| ) | |||
| case 4: | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| byte(b.bitContainer>>16), | |||
| byte(b.bitContainer>>24), | |||
| ) | |||
| case 5: | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| byte(b.bitContainer>>16), | |||
| byte(b.bitContainer>>24), | |||
| byte(b.bitContainer>>32), | |||
| ) | |||
| case 6: | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| byte(b.bitContainer>>16), | |||
| byte(b.bitContainer>>24), | |||
| byte(b.bitContainer>>32), | |||
| byte(b.bitContainer>>40), | |||
| ) | |||
| case 7: | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| byte(b.bitContainer>>16), | |||
| byte(b.bitContainer>>24), | |||
| byte(b.bitContainer>>32), | |||
| byte(b.bitContainer>>40), | |||
| byte(b.bitContainer>>48), | |||
| ) | |||
| case 8: | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| byte(b.bitContainer>>16), | |||
| byte(b.bitContainer>>24), | |||
| byte(b.bitContainer>>32), | |||
| byte(b.bitContainer>>40), | |||
| byte(b.bitContainer>>48), | |||
| byte(b.bitContainer>>56), | |||
| ) | |||
| default: | |||
| panic(fmt.Errorf("bits (%d) > 64", b.nBits)) | |||
| } | |||
| b.bitContainer >>= v << 3 | |||
| b.nBits &= 7 | |||
| } | |||
| // flush32 will flush out, so there are at least 32 bits available for writing. | |||
| func (b *bitWriter) flush32() { | |||
| if b.nBits < 32 { | |||
| return | |||
| } | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| byte(b.bitContainer>>16), | |||
| byte(b.bitContainer>>24)) | |||
| b.nBits -= 32 | |||
| b.bitContainer >>= 32 | |||
| } | |||
| // flushAlign will flush remaining full bytes and align to next byte boundary. | |||
| func (b *bitWriter) flushAlign() { | |||
| nbBytes := (b.nBits + 7) >> 3 | |||
| for i := uint8(0); i < nbBytes; i++ { | |||
| b.out = append(b.out, byte(b.bitContainer>>(i*8))) | |||
| } | |||
| b.nBits = 0 | |||
| b.bitContainer = 0 | |||
| } | |||
| // close will write the alignment bit and write the final byte(s) | |||
| // to the output. | |||
| func (b *bitWriter) close() error { | |||
| // End mark | |||
| b.addBits16Clean(1, 1) | |||
| // flush until next byte. | |||
| b.flushAlign() | |||
| return nil | |||
| } | |||
| // reset and continue writing by appending to out. | |||
| func (b *bitWriter) reset(out []byte) { | |||
| b.bitContainer = 0 | |||
| b.nBits = 0 | |||
| b.out = out | |||
| } | |||
| @@ -0,0 +1,47 @@ | |||
| // Copyright 2018 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. | |||
| package fse | |||
| // byteReader provides a byte reader that reads | |||
| // little endian values from a byte stream. | |||
| // The input stream is manually advanced. | |||
| // The reader performs no bounds checks. | |||
| type byteReader struct { | |||
| b []byte | |||
| off int | |||
| } | |||
| // init will initialize the reader and set the input. | |||
| func (b *byteReader) init(in []byte) { | |||
| b.b = in | |||
| b.off = 0 | |||
| } | |||
| // advance the stream b n bytes. | |||
| func (b *byteReader) advance(n uint) { | |||
| b.off += int(n) | |||
| } | |||
| // Uint32 returns a little endian uint32 starting at current offset. | |||
| func (b byteReader) Uint32() uint32 { | |||
| b2 := b.b[b.off:] | |||
| b2 = b2[:4] | |||
| v3 := uint32(b2[3]) | |||
| v2 := uint32(b2[2]) | |||
| v1 := uint32(b2[1]) | |||
| v0 := uint32(b2[0]) | |||
| return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) | |||
| } | |||
| // unread returns the unread portion of the input. | |||
| func (b byteReader) unread() []byte { | |||
| return b.b[b.off:] | |||
| } | |||
| // remain will return the number of bytes remaining. | |||
| func (b byteReader) remain() int { | |||
| return len(b.b) - b.off | |||
| } | |||
| @@ -0,0 +1,683 @@ | |||
| // Copyright 2018 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. | |||
| package fse | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| ) | |||
| // Compress the input bytes. Input must be < 2GB. | |||
| // Provide a Scratch buffer to avoid memory allocations. | |||
| // Note that the output is also kept in the scratch buffer. | |||
| // If input is too hard to compress, ErrIncompressible is returned. | |||
| // If input is a single byte value repeated ErrUseRLE is returned. | |||
| func Compress(in []byte, s *Scratch) ([]byte, error) { | |||
| if len(in) <= 1 { | |||
| return nil, ErrIncompressible | |||
| } | |||
| if len(in) > (2<<30)-1 { | |||
| return nil, errors.New("input too big, must be < 2GB") | |||
| } | |||
| s, err := s.prepare(in) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // Create histogram, if none was provided. | |||
| maxCount := s.maxCount | |||
| if maxCount == 0 { | |||
| maxCount = s.countSimple(in) | |||
| } | |||
| // Reset for next run. | |||
| s.clearCount = true | |||
| s.maxCount = 0 | |||
| if maxCount == len(in) { | |||
| // One symbol, use RLE | |||
| return nil, ErrUseRLE | |||
| } | |||
| if maxCount == 1 || maxCount < (len(in)>>7) { | |||
| // Each symbol present maximum once or too well distributed. | |||
| return nil, ErrIncompressible | |||
| } | |||
| s.optimalTableLog() | |||
| err = s.normalizeCount() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = s.writeCount() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if false { | |||
| err = s.validateNorm() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| err = s.buildCTable() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = s.compress(in) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| s.Out = s.bw.out | |||
| // Check if we compressed. | |||
| if len(s.Out) >= len(in) { | |||
| return nil, ErrIncompressible | |||
| } | |||
| return s.Out, nil | |||
| } | |||
| // cState contains the compression state of a stream. | |||
| type cState struct { | |||
| bw *bitWriter | |||
| stateTable []uint16 | |||
| state uint16 | |||
| } | |||
| // init will initialize the compression state to the first symbol of the stream. | |||
| func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { | |||
| c.bw = bw | |||
| c.stateTable = ct.stateTable | |||
| nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 | |||
| im := int32((nbBitsOut << 16) - first.deltaNbBits) | |||
| lu := (im >> nbBitsOut) + first.deltaFindState | |||
| c.state = c.stateTable[lu] | |||
| } | |||
| // encode the output symbol provided and write it to the bitstream. | |||
| func (c *cState) encode(symbolTT symbolTransform) { | |||
| nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 | |||
| dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState | |||
| c.bw.addBits16NC(c.state, uint8(nbBitsOut)) | |||
| c.state = c.stateTable[dstState] | |||
| } | |||
| // encode the output symbol provided and write it to the bitstream. | |||
| func (c *cState) encodeZero(symbolTT symbolTransform) { | |||
| nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 | |||
| dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState | |||
| c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) | |||
| c.state = c.stateTable[dstState] | |||
| } | |||
| // flush will write the tablelog to the output and flush the remaining full bytes. | |||
| func (c *cState) flush(tableLog uint8) { | |||
| c.bw.flush32() | |||
| c.bw.addBits16NC(c.state, tableLog) | |||
| c.bw.flush() | |||
| } | |||
| // compress is the main compression loop that will encode the input from the last byte to the first. | |||
| func (s *Scratch) compress(src []byte) error { | |||
| if len(src) <= 2 { | |||
| return errors.New("compress: src too small") | |||
| } | |||
| tt := s.ct.symbolTT[:256] | |||
| s.bw.reset(s.Out) | |||
| // Our two states each encodes every second byte. | |||
| // Last byte encoded (first byte decoded) will always be encoded by c1. | |||
| var c1, c2 cState | |||
| // Encode so remaining size is divisible by 4. | |||
| ip := len(src) | |||
| if ip&1 == 1 { | |||
| c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) | |||
| c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) | |||
| c1.encodeZero(tt[src[ip-3]]) | |||
| ip -= 3 | |||
| } else { | |||
| c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) | |||
| c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) | |||
| ip -= 2 | |||
| } | |||
| if ip&2 != 0 { | |||
| c2.encodeZero(tt[src[ip-1]]) | |||
| c1.encodeZero(tt[src[ip-2]]) | |||
| ip -= 2 | |||
| } | |||
| // Main compression loop. | |||
| switch { | |||
| case !s.zeroBits && s.actualTableLog <= 8: | |||
| // We can encode 4 symbols without requiring a flush. | |||
| // We do not need to check if any output is 0 bits. | |||
| for ip >= 4 { | |||
| s.bw.flush32() | |||
| v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] | |||
| c2.encode(tt[v0]) | |||
| c1.encode(tt[v1]) | |||
| c2.encode(tt[v2]) | |||
| c1.encode(tt[v3]) | |||
| ip -= 4 | |||
| } | |||
| case !s.zeroBits: | |||
| // We do not need to check if any output is 0 bits. | |||
| for ip >= 4 { | |||
| s.bw.flush32() | |||
| v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] | |||
| c2.encode(tt[v0]) | |||
| c1.encode(tt[v1]) | |||
| s.bw.flush32() | |||
| c2.encode(tt[v2]) | |||
| c1.encode(tt[v3]) | |||
| ip -= 4 | |||
| } | |||
| case s.actualTableLog <= 8: | |||
| // We can encode 4 symbols without requiring a flush | |||
| for ip >= 4 { | |||
| s.bw.flush32() | |||
| v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] | |||
| c2.encodeZero(tt[v0]) | |||
| c1.encodeZero(tt[v1]) | |||
| c2.encodeZero(tt[v2]) | |||
| c1.encodeZero(tt[v3]) | |||
| ip -= 4 | |||
| } | |||
| default: | |||
| for ip >= 4 { | |||
| s.bw.flush32() | |||
| v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] | |||
| c2.encodeZero(tt[v0]) | |||
| c1.encodeZero(tt[v1]) | |||
| s.bw.flush32() | |||
| c2.encodeZero(tt[v2]) | |||
| c1.encodeZero(tt[v3]) | |||
| ip -= 4 | |||
| } | |||
| } | |||
| // Flush final state. | |||
| // Used to initialize state when decoding. | |||
| c2.flush(s.actualTableLog) | |||
| c1.flush(s.actualTableLog) | |||
| return s.bw.close() | |||
| } | |||
| // writeCount will write the normalized histogram count to header. | |||
| // This is read back by readNCount. | |||
| func (s *Scratch) writeCount() error { | |||
| var ( | |||
| tableLog = s.actualTableLog | |||
| tableSize = 1 << tableLog | |||
| previous0 bool | |||
| charnum uint16 | |||
| maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 | |||
| // Write Table Size | |||
| bitStream = uint32(tableLog - minTablelog) | |||
| bitCount = uint(4) | |||
| remaining = int16(tableSize + 1) /* +1 for extra accuracy */ | |||
| threshold = int16(tableSize) | |||
| nbBits = uint(tableLog + 1) | |||
| ) | |||
| if cap(s.Out) < maxHeaderSize { | |||
| s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) | |||
| } | |||
| outP := uint(0) | |||
| out := s.Out[:maxHeaderSize] | |||
| // stops at 1 | |||
| for remaining > 1 { | |||
| if previous0 { | |||
| start := charnum | |||
| for s.norm[charnum] == 0 { | |||
| charnum++ | |||
| } | |||
| for charnum >= start+24 { | |||
| start += 24 | |||
| bitStream += uint32(0xFFFF) << bitCount | |||
| out[outP] = byte(bitStream) | |||
| out[outP+1] = byte(bitStream >> 8) | |||
| outP += 2 | |||
| bitStream >>= 16 | |||
| } | |||
| for charnum >= start+3 { | |||
| start += 3 | |||
| bitStream += 3 << bitCount | |||
| bitCount += 2 | |||
| } | |||
| bitStream += uint32(charnum-start) << bitCount | |||
| bitCount += 2 | |||
| if bitCount > 16 { | |||
| out[outP] = byte(bitStream) | |||
| out[outP+1] = byte(bitStream >> 8) | |||
| outP += 2 | |||
| bitStream >>= 16 | |||
| bitCount -= 16 | |||
| } | |||
| } | |||
| count := s.norm[charnum] | |||
| charnum++ | |||
| max := (2*threshold - 1) - remaining | |||
| if count < 0 { | |||
| remaining += count | |||
| } else { | |||
| remaining -= count | |||
| } | |||
| count++ // +1 for extra accuracy | |||
| if count >= threshold { | |||
| count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ | |||
| } | |||
| bitStream += uint32(count) << bitCount | |||
| bitCount += nbBits | |||
| if count < max { | |||
| bitCount-- | |||
| } | |||
| previous0 = count == 1 | |||
| if remaining < 1 { | |||
| return errors.New("internal error: remaining<1") | |||
| } | |||
| for remaining < threshold { | |||
| nbBits-- | |||
| threshold >>= 1 | |||
| } | |||
| if bitCount > 16 { | |||
| out[outP] = byte(bitStream) | |||
| out[outP+1] = byte(bitStream >> 8) | |||
| outP += 2 | |||
| bitStream >>= 16 | |||
| bitCount -= 16 | |||
| } | |||
| } | |||
| out[outP] = byte(bitStream) | |||
| out[outP+1] = byte(bitStream >> 8) | |||
| outP += (bitCount + 7) / 8 | |||
| if charnum > s.symbolLen { | |||
| return errors.New("internal error: charnum > s.symbolLen") | |||
| } | |||
| s.Out = out[:outP] | |||
| return nil | |||
| } | |||
| // symbolTransform contains the state transform for a symbol. | |||
| type symbolTransform struct { | |||
| deltaFindState int32 | |||
| deltaNbBits uint32 | |||
| } | |||
| // String prints values as a human readable string. | |||
| func (s symbolTransform) String() string { | |||
| return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) | |||
| } | |||
| // cTable contains tables used for compression. | |||
| type cTable struct { | |||
| tableSymbol []byte | |||
| stateTable []uint16 | |||
| symbolTT []symbolTransform | |||
| } | |||
| // allocCtable will allocate tables needed for compression. | |||
| // If existing tables a re big enough, they are simply re-used. | |||
| func (s *Scratch) allocCtable() { | |||
| tableSize := 1 << s.actualTableLog | |||
| // get tableSymbol that is big enough. | |||
| if cap(s.ct.tableSymbol) < tableSize { | |||
| s.ct.tableSymbol = make([]byte, tableSize) | |||
| } | |||
| s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] | |||
| ctSize := tableSize | |||
| if cap(s.ct.stateTable) < ctSize { | |||
| s.ct.stateTable = make([]uint16, ctSize) | |||
| } | |||
| s.ct.stateTable = s.ct.stateTable[:ctSize] | |||
| if cap(s.ct.symbolTT) < 256 { | |||
| s.ct.symbolTT = make([]symbolTransform, 256) | |||
| } | |||
| s.ct.symbolTT = s.ct.symbolTT[:256] | |||
| } | |||
| // buildCTable will populate the compression table so it is ready to be used. | |||
| func (s *Scratch) buildCTable() error { | |||
| tableSize := uint32(1 << s.actualTableLog) | |||
| highThreshold := tableSize - 1 | |||
| var cumul [maxSymbolValue + 2]int16 | |||
| s.allocCtable() | |||
| tableSymbol := s.ct.tableSymbol[:tableSize] | |||
| // symbol start positions | |||
| { | |||
| cumul[0] = 0 | |||
| for ui, v := range s.norm[:s.symbolLen-1] { | |||
| u := byte(ui) // one less than reference | |||
| if v == -1 { | |||
| // Low proba symbol | |||
| cumul[u+1] = cumul[u] + 1 | |||
| tableSymbol[highThreshold] = u | |||
| highThreshold-- | |||
| } else { | |||
| cumul[u+1] = cumul[u] + v | |||
| } | |||
| } | |||
| // Encode last symbol separately to avoid overflowing u | |||
| u := int(s.symbolLen - 1) | |||
| v := s.norm[s.symbolLen-1] | |||
| if v == -1 { | |||
| // Low proba symbol | |||
| cumul[u+1] = cumul[u] + 1 | |||
| tableSymbol[highThreshold] = byte(u) | |||
| highThreshold-- | |||
| } else { | |||
| cumul[u+1] = cumul[u] + v | |||
| } | |||
| if uint32(cumul[s.symbolLen]) != tableSize { | |||
| return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) | |||
| } | |||
| cumul[s.symbolLen] = int16(tableSize) + 1 | |||
| } | |||
| // Spread symbols | |||
| s.zeroBits = false | |||
| { | |||
| step := tableStep(tableSize) | |||
| tableMask := tableSize - 1 | |||
| var position uint32 | |||
| // if any symbol > largeLimit, we may have 0 bits output. | |||
| largeLimit := int16(1 << (s.actualTableLog - 1)) | |||
| for ui, v := range s.norm[:s.symbolLen] { | |||
| symbol := byte(ui) | |||
| if v > largeLimit { | |||
| s.zeroBits = true | |||
| } | |||
| for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { | |||
| tableSymbol[position] = symbol | |||
| position = (position + step) & tableMask | |||
| for position > highThreshold { | |||
| position = (position + step) & tableMask | |||
| } /* Low proba area */ | |||
| } | |||
| } | |||
| // Check if we have gone through all positions | |||
| if position != 0 { | |||
| return errors.New("position!=0") | |||
| } | |||
| } | |||
| // Build table | |||
| table := s.ct.stateTable | |||
| { | |||
| tsi := int(tableSize) | |||
| for u, v := range tableSymbol { | |||
| // TableU16 : sorted by symbol order; gives next state value | |||
| table[cumul[v]] = uint16(tsi + u) | |||
| cumul[v]++ | |||
| } | |||
| } | |||
| // Build Symbol Transformation Table | |||
| { | |||
| total := int16(0) | |||
| symbolTT := s.ct.symbolTT[:s.symbolLen] | |||
| tableLog := s.actualTableLog | |||
| tl := (uint32(tableLog) << 16) - (1 << tableLog) | |||
| for i, v := range s.norm[:s.symbolLen] { | |||
| switch v { | |||
| case 0: | |||
| case -1, 1: | |||
| symbolTT[i].deltaNbBits = tl | |||
| symbolTT[i].deltaFindState = int32(total - 1) | |||
| total++ | |||
| default: | |||
| maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) | |||
| minStatePlus := uint32(v) << maxBitsOut | |||
| symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus | |||
| symbolTT[i].deltaFindState = int32(total - v) | |||
| total += v | |||
| } | |||
| } | |||
| if total != int16(tableSize) { | |||
| return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // countSimple will create a simple histogram in s.count. | |||
| // Returns the biggest count. | |||
| // Does not update s.clearCount. | |||
| func (s *Scratch) countSimple(in []byte) (max int) { | |||
| for _, v := range in { | |||
| s.count[v]++ | |||
| } | |||
| m := uint32(0) | |||
| for i, v := range s.count[:] { | |||
| if v > m { | |||
| m = v | |||
| } | |||
| if v > 0 { | |||
| s.symbolLen = uint16(i) + 1 | |||
| } | |||
| } | |||
| return int(m) | |||
| } | |||
| // minTableLog provides the minimum logSize to safely represent a distribution. | |||
| func (s *Scratch) minTableLog() uint8 { | |||
| minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 | |||
| minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 | |||
| if minBitsSrc < minBitsSymbols { | |||
| return uint8(minBitsSrc) | |||
| } | |||
| return uint8(minBitsSymbols) | |||
| } | |||
| // optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog | |||
| func (s *Scratch) optimalTableLog() { | |||
| tableLog := s.TableLog | |||
| minBits := s.minTableLog() | |||
| maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 | |||
| if maxBitsSrc < tableLog { | |||
| // Accuracy can be reduced | |||
| tableLog = maxBitsSrc | |||
| } | |||
| if minBits > tableLog { | |||
| tableLog = minBits | |||
| } | |||
| // Need a minimum to safely represent all symbol values | |||
| if tableLog < minTablelog { | |||
| tableLog = minTablelog | |||
| } | |||
| if tableLog > maxTableLog { | |||
| tableLog = maxTableLog | |||
| } | |||
| s.actualTableLog = tableLog | |||
| } | |||
| var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} | |||
| // normalizeCount will normalize the count of the symbols so | |||
| // the total is equal to the table size. | |||
| func (s *Scratch) normalizeCount() error { | |||
| var ( | |||
| tableLog = s.actualTableLog | |||
| scale = 62 - uint64(tableLog) | |||
| step = (1 << 62) / uint64(s.br.remain()) | |||
| vStep = uint64(1) << (scale - 20) | |||
| stillToDistribute = int16(1 << tableLog) | |||
| largest int | |||
| largestP int16 | |||
| lowThreshold = (uint32)(s.br.remain() >> tableLog) | |||
| ) | |||
| for i, cnt := range s.count[:s.symbolLen] { | |||
| // already handled | |||
| // if (count[s] == s.length) return 0; /* rle special case */ | |||
| if cnt == 0 { | |||
| s.norm[i] = 0 | |||
| continue | |||
| } | |||
| if cnt <= lowThreshold { | |||
| s.norm[i] = -1 | |||
| stillToDistribute-- | |||
| } else { | |||
| proba := (int16)((uint64(cnt) * step) >> scale) | |||
| if proba < 8 { | |||
| restToBeat := vStep * uint64(rtbTable[proba]) | |||
| v := uint64(cnt)*step - (uint64(proba) << scale) | |||
| if v > restToBeat { | |||
| proba++ | |||
| } | |||
| } | |||
| if proba > largestP { | |||
| largestP = proba | |||
| largest = i | |||
| } | |||
| s.norm[i] = proba | |||
| stillToDistribute -= proba | |||
| } | |||
| } | |||
| if -stillToDistribute >= (s.norm[largest] >> 1) { | |||
| // corner case, need another normalization method | |||
| return s.normalizeCount2() | |||
| } | |||
| s.norm[largest] += stillToDistribute | |||
| return nil | |||
| } | |||
| // Secondary normalization method. | |||
| // To be used when primary method fails. | |||
| func (s *Scratch) normalizeCount2() error { | |||
| const notYetAssigned = -2 | |||
| var ( | |||
| distributed uint32 | |||
| total = uint32(s.br.remain()) | |||
| tableLog = s.actualTableLog | |||
| lowThreshold = total >> tableLog | |||
| lowOne = (total * 3) >> (tableLog + 1) | |||
| ) | |||
| for i, cnt := range s.count[:s.symbolLen] { | |||
| if cnt == 0 { | |||
| s.norm[i] = 0 | |||
| continue | |||
| } | |||
| if cnt <= lowThreshold { | |||
| s.norm[i] = -1 | |||
| distributed++ | |||
| total -= cnt | |||
| continue | |||
| } | |||
| if cnt <= lowOne { | |||
| s.norm[i] = 1 | |||
| distributed++ | |||
| total -= cnt | |||
| continue | |||
| } | |||
| s.norm[i] = notYetAssigned | |||
| } | |||
| toDistribute := (1 << tableLog) - distributed | |||
| if (total / toDistribute) > lowOne { | |||
| // risk of rounding to zero | |||
| lowOne = (total * 3) / (toDistribute * 2) | |||
| for i, cnt := range s.count[:s.symbolLen] { | |||
| if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { | |||
| s.norm[i] = 1 | |||
| distributed++ | |||
| total -= cnt | |||
| continue | |||
| } | |||
| } | |||
| toDistribute = (1 << tableLog) - distributed | |||
| } | |||
| if distributed == uint32(s.symbolLen)+1 { | |||
| // all values are pretty poor; | |||
| // probably incompressible data (should have already been detected); | |||
| // find max, then give all remaining points to max | |||
| var maxV int | |||
| var maxC uint32 | |||
| for i, cnt := range s.count[:s.symbolLen] { | |||
| if cnt > maxC { | |||
| maxV = i | |||
| maxC = cnt | |||
| } | |||
| } | |||
| s.norm[maxV] += int16(toDistribute) | |||
| return nil | |||
| } | |||
| if total == 0 { | |||
| // all of the symbols were low enough for the lowOne or lowThreshold | |||
| for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { | |||
| if s.norm[i] > 0 { | |||
| toDistribute-- | |||
| s.norm[i]++ | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| var ( | |||
| vStepLog = 62 - uint64(tableLog) | |||
| mid = uint64((1 << (vStepLog - 1)) - 1) | |||
| rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining | |||
| tmpTotal = mid | |||
| ) | |||
| for i, cnt := range s.count[:s.symbolLen] { | |||
| if s.norm[i] == notYetAssigned { | |||
| var ( | |||
| end = tmpTotal + uint64(cnt)*rStep | |||
| sStart = uint32(tmpTotal >> vStepLog) | |||
| sEnd = uint32(end >> vStepLog) | |||
| weight = sEnd - sStart | |||
| ) | |||
| if weight < 1 { | |||
| return errors.New("weight < 1") | |||
| } | |||
| s.norm[i] = int16(weight) | |||
| tmpTotal = end | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // validateNorm validates the normalized histogram table. | |||
| func (s *Scratch) validateNorm() (err error) { | |||
| var total int | |||
| for _, v := range s.norm[:s.symbolLen] { | |||
| if v >= 0 { | |||
| total += int(v) | |||
| } else { | |||
| total -= int(v) | |||
| } | |||
| } | |||
| defer func() { | |||
| if err == nil { | |||
| return | |||
| } | |||
| fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) | |||
| for i, v := range s.norm[:s.symbolLen] { | |||
| fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) | |||
| } | |||
| }() | |||
| if total != (1 << s.actualTableLog) { | |||
| return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog) | |||
| } | |||
| for i, v := range s.count[s.symbolLen:] { | |||
| if v != 0 { | |||
| return fmt.Errorf("warning: Found symbol out of range, %d after cut", i) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,374 @@ | |||
| package fse | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| ) | |||
| const ( | |||
| tablelogAbsoluteMax = 15 | |||
| ) | |||
| // Decompress a block of data. | |||
| // You can provide a scratch buffer to avoid allocations. | |||
| // If nil is provided a temporary one will be allocated. | |||
| // It is possible, but by no way guaranteed that corrupt data will | |||
| // return an error. | |||
| // It is up to the caller to verify integrity of the returned data. | |||
| // Use a predefined Scrach to set maximum acceptable output size. | |||
| func Decompress(b []byte, s *Scratch) ([]byte, error) { | |||
| s, err := s.prepare(b) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| s.Out = s.Out[:0] | |||
| err = s.readNCount() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = s.buildDtable() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = s.decompress() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return s.Out, nil | |||
| } | |||
| // readNCount will read the symbol distribution so decoding tables can be constructed. | |||
| func (s *Scratch) readNCount() error { | |||
| var ( | |||
| charnum uint16 | |||
| previous0 bool | |||
| b = &s.br | |||
| ) | |||
| iend := b.remain() | |||
| if iend < 4 { | |||
| return errors.New("input too small") | |||
| } | |||
| bitStream := b.Uint32() | |||
| nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog | |||
| if nbBits > tablelogAbsoluteMax { | |||
| return errors.New("tableLog too large") | |||
| } | |||
| bitStream >>= 4 | |||
| bitCount := uint(4) | |||
| s.actualTableLog = uint8(nbBits) | |||
| remaining := int32((1 << nbBits) + 1) | |||
| threshold := int32(1 << nbBits) | |||
| gotTotal := int32(0) | |||
| nbBits++ | |||
| for remaining > 1 { | |||
| if previous0 { | |||
| n0 := charnum | |||
| for (bitStream & 0xFFFF) == 0xFFFF { | |||
| n0 += 24 | |||
| if b.off < iend-5 { | |||
| b.advance(2) | |||
| bitStream = b.Uint32() >> bitCount | |||
| } else { | |||
| bitStream >>= 16 | |||
| bitCount += 16 | |||
| } | |||
| } | |||
| for (bitStream & 3) == 3 { | |||
| n0 += 3 | |||
| bitStream >>= 2 | |||
| bitCount += 2 | |||
| } | |||
| n0 += uint16(bitStream & 3) | |||
| bitCount += 2 | |||
| if n0 > maxSymbolValue { | |||
| return errors.New("maxSymbolValue too small") | |||
| } | |||
| for charnum < n0 { | |||
| s.norm[charnum&0xff] = 0 | |||
| charnum++ | |||
| } | |||
| if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { | |||
| b.advance(bitCount >> 3) | |||
| bitCount &= 7 | |||
| bitStream = b.Uint32() >> bitCount | |||
| } else { | |||
| bitStream >>= 2 | |||
| } | |||
| } | |||
| max := (2*(threshold) - 1) - (remaining) | |||
| var count int32 | |||
| if (int32(bitStream) & (threshold - 1)) < max { | |||
| count = int32(bitStream) & (threshold - 1) | |||
| bitCount += nbBits - 1 | |||
| } else { | |||
| count = int32(bitStream) & (2*threshold - 1) | |||
| if count >= threshold { | |||
| count -= max | |||
| } | |||
| bitCount += nbBits | |||
| } | |||
| count-- // extra accuracy | |||
| if count < 0 { | |||
| // -1 means +1 | |||
| remaining += count | |||
| gotTotal -= count | |||
| } else { | |||
| remaining -= count | |||
| gotTotal += count | |||
| } | |||
| s.norm[charnum&0xff] = int16(count) | |||
| charnum++ | |||
| previous0 = count == 0 | |||
| for remaining < threshold { | |||
| nbBits-- | |||
| threshold >>= 1 | |||
| } | |||
| if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { | |||
| b.advance(bitCount >> 3) | |||
| bitCount &= 7 | |||
| } else { | |||
| bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) | |||
| b.off = len(b.b) - 4 | |||
| } | |||
| bitStream = b.Uint32() >> (bitCount & 31) | |||
| } | |||
| s.symbolLen = charnum | |||
| if s.symbolLen <= 1 { | |||
| return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) | |||
| } | |||
| if s.symbolLen > maxSymbolValue+1 { | |||
| return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) | |||
| } | |||
| if remaining != 1 { | |||
| return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) | |||
| } | |||
| if bitCount > 32 { | |||
| return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) | |||
| } | |||
| if gotTotal != 1<<s.actualTableLog { | |||
| return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog) | |||
| } | |||
| b.advance((bitCount + 7) >> 3) | |||
| return nil | |||
| } | |||
| // decSymbol contains information about a state entry, | |||
| // Including the state offset base, the output symbol and | |||
| // the number of bits to read for the low part of the destination state. | |||
| type decSymbol struct { | |||
| newState uint16 | |||
| symbol uint8 | |||
| nbBits uint8 | |||
| } | |||
| // allocDtable will allocate decoding tables if they are not big enough. | |||
| func (s *Scratch) allocDtable() { | |||
| tableSize := 1 << s.actualTableLog | |||
| if cap(s.decTable) < tableSize { | |||
| s.decTable = make([]decSymbol, tableSize) | |||
| } | |||
| s.decTable = s.decTable[:tableSize] | |||
| if cap(s.ct.tableSymbol) < 256 { | |||
| s.ct.tableSymbol = make([]byte, 256) | |||
| } | |||
| s.ct.tableSymbol = s.ct.tableSymbol[:256] | |||
| if cap(s.ct.stateTable) < 256 { | |||
| s.ct.stateTable = make([]uint16, 256) | |||
| } | |||
| s.ct.stateTable = s.ct.stateTable[:256] | |||
| } | |||
| // buildDtable will build the decoding table. | |||
| func (s *Scratch) buildDtable() error { | |||
| tableSize := uint32(1 << s.actualTableLog) | |||
| highThreshold := tableSize - 1 | |||
| s.allocDtable() | |||
| symbolNext := s.ct.stateTable[:256] | |||
| // Init, lay down lowprob symbols | |||
| s.zeroBits = false | |||
| { | |||
| largeLimit := int16(1 << (s.actualTableLog - 1)) | |||
| for i, v := range s.norm[:s.symbolLen] { | |||
| if v == -1 { | |||
| s.decTable[highThreshold].symbol = uint8(i) | |||
| highThreshold-- | |||
| symbolNext[i] = 1 | |||
| } else { | |||
| if v >= largeLimit { | |||
| s.zeroBits = true | |||
| } | |||
| symbolNext[i] = uint16(v) | |||
| } | |||
| } | |||
| } | |||
| // Spread symbols | |||
| { | |||
| tableMask := tableSize - 1 | |||
| step := tableStep(tableSize) | |||
| position := uint32(0) | |||
| for ss, v := range s.norm[:s.symbolLen] { | |||
| for i := 0; i < int(v); i++ { | |||
| s.decTable[position].symbol = uint8(ss) | |||
| position = (position + step) & tableMask | |||
| for position > highThreshold { | |||
| // lowprob area | |||
| position = (position + step) & tableMask | |||
| } | |||
| } | |||
| } | |||
| if position != 0 { | |||
| // position must reach all cells once, otherwise normalizedCounter is incorrect | |||
| return errors.New("corrupted input (position != 0)") | |||
| } | |||
| } | |||
| // Build Decoding table | |||
| { | |||
| tableSize := uint16(1 << s.actualTableLog) | |||
| for u, v := range s.decTable { | |||
| symbol := v.symbol | |||
| nextState := symbolNext[symbol] | |||
| symbolNext[symbol] = nextState + 1 | |||
| nBits := s.actualTableLog - byte(highBits(uint32(nextState))) | |||
| s.decTable[u].nbBits = nBits | |||
| newState := (nextState << nBits) - tableSize | |||
| if newState >= tableSize { | |||
| return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) | |||
| } | |||
| if newState == uint16(u) && nBits == 0 { | |||
| // Seems weird that this is possible with nbits > 0. | |||
| return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) | |||
| } | |||
| s.decTable[u].newState = newState | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // decompress will decompress the bitstream. | |||
| // If the buffer is over-read an error is returned. | |||
| func (s *Scratch) decompress() error { | |||
| br := &s.bits | |||
| br.init(s.br.unread()) | |||
| var s1, s2 decoder | |||
| // Initialize and decode first state and symbol. | |||
| s1.init(br, s.decTable, s.actualTableLog) | |||
| s2.init(br, s.decTable, s.actualTableLog) | |||
| // Use temp table to avoid bound checks/append penalty. | |||
| var tmp = s.ct.tableSymbol[:256] | |||
| var off uint8 | |||
| // Main part | |||
| if !s.zeroBits { | |||
| for br.off >= 8 { | |||
| br.fillFast() | |||
| tmp[off+0] = s1.nextFast() | |||
| tmp[off+1] = s2.nextFast() | |||
| br.fillFast() | |||
| tmp[off+2] = s1.nextFast() | |||
| tmp[off+3] = s2.nextFast() | |||
| off += 4 | |||
| // When off is 0, we have overflowed and should write. | |||
| if off == 0 { | |||
| s.Out = append(s.Out, tmp...) | |||
| if len(s.Out) >= s.DecompressLimit { | |||
| return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) | |||
| } | |||
| } | |||
| } | |||
| } else { | |||
| for br.off >= 8 { | |||
| br.fillFast() | |||
| tmp[off+0] = s1.next() | |||
| tmp[off+1] = s2.next() | |||
| br.fillFast() | |||
| tmp[off+2] = s1.next() | |||
| tmp[off+3] = s2.next() | |||
| off += 4 | |||
| if off == 0 { | |||
| s.Out = append(s.Out, tmp...) | |||
| // When off is 0, we have overflowed and should write. | |||
| if len(s.Out) >= s.DecompressLimit { | |||
| return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| s.Out = append(s.Out, tmp[:off]...) | |||
| // Final bits, a bit more expensive check | |||
| for { | |||
| if s1.finished() { | |||
| s.Out = append(s.Out, s1.final(), s2.final()) | |||
| break | |||
| } | |||
| br.fill() | |||
| s.Out = append(s.Out, s1.next()) | |||
| if s2.finished() { | |||
| s.Out = append(s.Out, s2.final(), s1.final()) | |||
| break | |||
| } | |||
| s.Out = append(s.Out, s2.next()) | |||
| if len(s.Out) >= s.DecompressLimit { | |||
| return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) | |||
| } | |||
| } | |||
| return br.close() | |||
| } | |||
| // decoder keeps track of the current state and updates it from the bitstream. | |||
| type decoder struct { | |||
| state uint16 | |||
| br *bitReader | |||
| dt []decSymbol | |||
| } | |||
| // init will initialize the decoder and read the first state from the stream. | |||
| func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { | |||
| d.dt = dt | |||
| d.br = in | |||
| d.state = in.getBits(tableLog) | |||
| } | |||
| // next returns the next symbol and sets the next state. | |||
| // At least tablelog bits must be available in the bit reader. | |||
| func (d *decoder) next() uint8 { | |||
| n := &d.dt[d.state] | |||
| lowBits := d.br.getBits(n.nbBits) | |||
| d.state = n.newState + lowBits | |||
| return n.symbol | |||
| } | |||
| // finished returns true if all bits have been read from the bitstream | |||
| // and the next state would require reading bits from the input. | |||
| func (d *decoder) finished() bool { | |||
| return d.br.finished() && d.dt[d.state].nbBits > 0 | |||
| } | |||
| // final returns the current state symbol without decoding the next. | |||
| func (d *decoder) final() uint8 { | |||
| return d.dt[d.state].symbol | |||
| } | |||
| // nextFast returns the next symbol and sets the next state. | |||
| // This can only be used if no symbols are 0 bits. | |||
| // At least tablelog bits must be available in the bit reader. | |||
| func (d *decoder) nextFast() uint8 { | |||
| n := d.dt[d.state] | |||
| lowBits := d.br.getBitsFast(n.nbBits) | |||
| d.state = n.newState + lowBits | |||
| return n.symbol | |||
| } | |||
| @@ -0,0 +1,144 @@ | |||
| // Copyright 2018 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. | |||
| // Package fse provides Finite State Entropy encoding and decoding. | |||
| // | |||
| // Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding | |||
| // for byte blocks as implemented in zstd. | |||
| // | |||
| // See https://github.com/klauspost/compress/tree/master/fse for more information. | |||
| package fse | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "math/bits" | |||
| ) | |||
| const ( | |||
| /*!MEMORY_USAGE : | |||
| * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) | |||
| * Increasing memory usage improves compression ratio | |||
| * Reduced memory usage can improve speed, due to cache effect | |||
| * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ | |||
| maxMemoryUsage = 14 | |||
| defaultMemoryUsage = 13 | |||
| maxTableLog = maxMemoryUsage - 2 | |||
| maxTablesize = 1 << maxTableLog | |||
| defaultTablelog = defaultMemoryUsage - 2 | |||
| minTablelog = 5 | |||
| maxSymbolValue = 255 | |||
| ) | |||
| var ( | |||
| // ErrIncompressible is returned when input is judged to be too hard to compress. | |||
| ErrIncompressible = errors.New("input is not compressible") | |||
| // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. | |||
| ErrUseRLE = errors.New("input is single value repeated") | |||
| ) | |||
| // Scratch provides temporary storage for compression and decompression. | |||
| type Scratch struct { | |||
| // Private | |||
| count [maxSymbolValue + 1]uint32 | |||
| norm [maxSymbolValue + 1]int16 | |||
| br byteReader | |||
| bits bitReader | |||
| bw bitWriter | |||
| ct cTable // Compression tables. | |||
| decTable []decSymbol // Decompression table. | |||
| maxCount int // count of the most probable symbol | |||
| // Per block parameters. | |||
| // These can be used to override compression parameters of the block. | |||
| // Do not touch, unless you know what you are doing. | |||
| // Out is output buffer. | |||
| // If the scratch is re-used before the caller is done processing the output, | |||
| // set this field to nil. | |||
| // Otherwise the output buffer will be re-used for next Compression/Decompression step | |||
| // and allocation will be avoided. | |||
| Out []byte | |||
| // DecompressLimit limits the maximum decoded size acceptable. | |||
| // If > 0 decompression will stop when approximately this many bytes | |||
| // has been decoded. | |||
| // If 0, maximum size will be 2GB. | |||
| DecompressLimit int | |||
| symbolLen uint16 // Length of active part of the symbol table. | |||
| actualTableLog uint8 // Selected tablelog. | |||
| zeroBits bool // no bits has prob > 50%. | |||
| clearCount bool // clear count | |||
| // MaxSymbolValue will override the maximum symbol value of the next block. | |||
| MaxSymbolValue uint8 | |||
| // TableLog will attempt to override the tablelog for the next block. | |||
| TableLog uint8 | |||
| } | |||
| // Histogram allows to populate the histogram and skip that step in the compression, | |||
| // It otherwise allows to inspect the histogram when compression is done. | |||
| // To indicate that you have populated the histogram call HistogramFinished | |||
| // with the value of the highest populated symbol, as well as the number of entries | |||
| // in the most populated entry. These are accepted at face value. | |||
| // The returned slice will always be length 256. | |||
| func (s *Scratch) Histogram() []uint32 { | |||
| return s.count[:] | |||
| } | |||
| // HistogramFinished can be called to indicate that the histogram has been populated. | |||
| // maxSymbol is the index of the highest set symbol of the next data segment. | |||
| // maxCount is the number of entries in the most populated entry. | |||
| // These are accepted at face value. | |||
| func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { | |||
| s.maxCount = maxCount | |||
| s.symbolLen = uint16(maxSymbol) + 1 | |||
| s.clearCount = maxCount != 0 | |||
| } | |||
| // prepare will prepare and allocate scratch tables used for both compression and decompression. | |||
| func (s *Scratch) prepare(in []byte) (*Scratch, error) { | |||
| if s == nil { | |||
| s = &Scratch{} | |||
| } | |||
| if s.MaxSymbolValue == 0 { | |||
| s.MaxSymbolValue = 255 | |||
| } | |||
| if s.TableLog == 0 { | |||
| s.TableLog = defaultTablelog | |||
| } | |||
| if s.TableLog > maxTableLog { | |||
| return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) | |||
| } | |||
| if cap(s.Out) == 0 { | |||
| s.Out = make([]byte, 0, len(in)) | |||
| } | |||
| if s.clearCount && s.maxCount == 0 { | |||
| for i := range s.count { | |||
| s.count[i] = 0 | |||
| } | |||
| s.clearCount = false | |||
| } | |||
| s.br.init(in) | |||
| if s.DecompressLimit == 0 { | |||
| // Max size 2GB. | |||
| s.DecompressLimit = (2 << 30) - 1 | |||
| } | |||
| return s, nil | |||
| } | |||
| // tableStep returns the next table index. | |||
| func tableStep(tableSize uint32) uint32 { | |||
| return (tableSize >> 1) + (tableSize >> 3) + 3 | |||
| } | |||
| func highBits(val uint32) (n uint32) { | |||
| return uint32(bits.Len32(val) - 1) | |||
| } | |||
| @@ -0,0 +1,4 @@ | |||
| #!/bin/sh | |||
| cd s2/cmd/_s2sx/ || exit 1 | |||
| go generate . | |||
| @@ -0,0 +1,349 @@ | |||
| // Copyright 2009 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Package gzip implements reading and writing of gzip format compressed files, | |||
| // as specified in RFC 1952. | |||
| package gzip | |||
| import ( | |||
| "bufio" | |||
| "compress/gzip" | |||
| "encoding/binary" | |||
| "hash/crc32" | |||
| "io" | |||
| "time" | |||
| "github.com/klauspost/compress/flate" | |||
| ) | |||
| const ( | |||
| gzipID1 = 0x1f | |||
| gzipID2 = 0x8b | |||
| gzipDeflate = 8 | |||
| flagText = 1 << 0 | |||
| flagHdrCrc = 1 << 1 | |||
| flagExtra = 1 << 2 | |||
| flagName = 1 << 3 | |||
| flagComment = 1 << 4 | |||
| ) | |||
| var ( | |||
| // ErrChecksum is returned when reading GZIP data that has an invalid checksum. | |||
| ErrChecksum = gzip.ErrChecksum | |||
| // ErrHeader is returned when reading GZIP data that has an invalid header. | |||
| ErrHeader = gzip.ErrHeader | |||
| ) | |||
| var le = binary.LittleEndian | |||
| // noEOF converts io.EOF to io.ErrUnexpectedEOF. | |||
| func noEOF(err error) error { | |||
| if err == io.EOF { | |||
| return io.ErrUnexpectedEOF | |||
| } | |||
| return err | |||
| } | |||
| // The gzip file stores a header giving metadata about the compressed file. | |||
| // That header is exposed as the fields of the Writer and Reader structs. | |||
| // | |||
| // Strings must be UTF-8 encoded and may only contain Unicode code points | |||
| // U+0001 through U+00FF, due to limitations of the GZIP file format. | |||
| type Header struct { | |||
| Comment string // comment | |||
| Extra []byte // "extra data" | |||
| ModTime time.Time // modification time | |||
| Name string // file name | |||
| OS byte // operating system type | |||
| } | |||
| // A Reader is an io.Reader that can be read to retrieve | |||
| // uncompressed data from a gzip-format compressed file. | |||
| // | |||
| // In general, a gzip file can be a concatenation of gzip files, | |||
| // each with its own header. Reads from the Reader | |||
| // return the concatenation of the uncompressed data of each. | |||
| // Only the first header is recorded in the Reader fields. | |||
| // | |||
| // Gzip files store a length and checksum of the uncompressed data. | |||
| // The Reader will return a ErrChecksum when Read | |||
| // reaches the end of the uncompressed data if it does not | |||
| // have the expected length or checksum. Clients should treat data | |||
| // returned by Read as tentative until they receive the io.EOF | |||
| // marking the end of the data. | |||
| type Reader struct { | |||
| Header // valid after NewReader or Reader.Reset | |||
| r flate.Reader | |||
| br *bufio.Reader | |||
| decompressor io.ReadCloser | |||
| digest uint32 // CRC-32, IEEE polynomial (section 8) | |||
| size uint32 // Uncompressed size (section 2.3.1) | |||
| buf [512]byte | |||
| err error | |||
| multistream bool | |||
| } | |||
| // NewReader creates a new Reader reading the given reader. | |||
| // If r does not also implement io.ByteReader, | |||
| // the decompressor may read more data than necessary from r. | |||
| // | |||
| // It is the caller's responsibility to call Close on the Reader when done. | |||
| // | |||
| // The Reader.Header fields will be valid in the Reader returned. | |||
| func NewReader(r io.Reader) (*Reader, error) { | |||
| z := new(Reader) | |||
| if err := z.Reset(r); err != nil { | |||
| return nil, err | |||
| } | |||
| return z, nil | |||
| } | |||
| // Reset discards the Reader z's state and makes it equivalent to the | |||
| // result of its original state from NewReader, but reading from r instead. | |||
| // This permits reusing a Reader rather than allocating a new one. | |||
| func (z *Reader) Reset(r io.Reader) error { | |||
| *z = Reader{ | |||
| decompressor: z.decompressor, | |||
| multistream: true, | |||
| } | |||
| if rr, ok := r.(flate.Reader); ok { | |||
| z.r = rr | |||
| } else { | |||
| // Reuse if we can. | |||
| if z.br != nil { | |||
| z.br.Reset(r) | |||
| } else { | |||
| z.br = bufio.NewReader(r) | |||
| } | |||
| z.r = z.br | |||
| } | |||
| z.Header, z.err = z.readHeader() | |||
| return z.err | |||
| } | |||
| // Multistream controls whether the reader supports multistream files. | |||
| // | |||
| // If enabled (the default), the Reader expects the input to be a sequence | |||
| // of individually gzipped data streams, each with its own header and | |||
| // trailer, ending at EOF. The effect is that the concatenation of a sequence | |||
| // of gzipped files is treated as equivalent to the gzip of the concatenation | |||
| // of the sequence. This is standard behavior for gzip readers. | |||
| // | |||
| // Calling Multistream(false) disables this behavior; disabling the behavior | |||
| // can be useful when reading file formats that distinguish individual gzip | |||
| // data streams or mix gzip data streams with other data streams. | |||
| // In this mode, when the Reader reaches the end of the data stream, | |||
| // Read returns io.EOF. If the underlying reader implements io.ByteReader, | |||
| // it will be left positioned just after the gzip stream. | |||
| // To start the next stream, call z.Reset(r) followed by z.Multistream(false). | |||
| // If there is no next stream, z.Reset(r) will return io.EOF. | |||
| func (z *Reader) Multistream(ok bool) { | |||
| z.multistream = ok | |||
| } | |||
| // readString reads a NUL-terminated string from z.r. | |||
| // It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and | |||
| // will output a string encoded using UTF-8. | |||
| // This method always updates z.digest with the data read. | |||
| func (z *Reader) readString() (string, error) { | |||
| var err error | |||
| needConv := false | |||
| for i := 0; ; i++ { | |||
| if i >= len(z.buf) { | |||
| return "", ErrHeader | |||
| } | |||
| z.buf[i], err = z.r.ReadByte() | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| if z.buf[i] > 0x7f { | |||
| needConv = true | |||
| } | |||
| if z.buf[i] == 0 { | |||
| // Digest covers the NUL terminator. | |||
| z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) | |||
| // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). | |||
| if needConv { | |||
| s := make([]rune, 0, i) | |||
| for _, v := range z.buf[:i] { | |||
| s = append(s, rune(v)) | |||
| } | |||
| return string(s), nil | |||
| } | |||
| return string(z.buf[:i]), nil | |||
| } | |||
| } | |||
| } | |||
| // readHeader reads the GZIP header according to section 2.3.1. | |||
| // This method does not set z.err. | |||
| func (z *Reader) readHeader() (hdr Header, err error) { | |||
| if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { | |||
| // RFC 1952, section 2.2, says the following: | |||
| // A gzip file consists of a series of "members" (compressed data sets). | |||
| // | |||
| // Other than this, the specification does not clarify whether a | |||
| // "series" is defined as "one or more" or "zero or more". To err on the | |||
| // side of caution, Go interprets this to mean "zero or more". | |||
| // Thus, it is okay to return io.EOF here. | |||
| return hdr, err | |||
| } | |||
| if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { | |||
| return hdr, ErrHeader | |||
| } | |||
| flg := z.buf[3] | |||
| hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) | |||
| // z.buf[8] is XFL and is currently ignored. | |||
| hdr.OS = z.buf[9] | |||
| z.digest = crc32.ChecksumIEEE(z.buf[:10]) | |||
| if flg&flagExtra != 0 { | |||
| if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { | |||
| return hdr, noEOF(err) | |||
| } | |||
| z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) | |||
| data := make([]byte, le.Uint16(z.buf[:2])) | |||
| if _, err = io.ReadFull(z.r, data); err != nil { | |||
| return hdr, noEOF(err) | |||
| } | |||
| z.digest = crc32.Update(z.digest, crc32.IEEETable, data) | |||
| hdr.Extra = data | |||
| } | |||
| var s string | |||
| if flg&flagName != 0 { | |||
| if s, err = z.readString(); err != nil { | |||
| return hdr, err | |||
| } | |||
| hdr.Name = s | |||
| } | |||
| if flg&flagComment != 0 { | |||
| if s, err = z.readString(); err != nil { | |||
| return hdr, err | |||
| } | |||
| hdr.Comment = s | |||
| } | |||
| if flg&flagHdrCrc != 0 { | |||
| if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { | |||
| return hdr, noEOF(err) | |||
| } | |||
| digest := le.Uint16(z.buf[:2]) | |||
| if digest != uint16(z.digest) { | |||
| return hdr, ErrHeader | |||
| } | |||
| } | |||
| z.digest = 0 | |||
| if z.decompressor == nil { | |||
| z.decompressor = flate.NewReader(z.r) | |||
| } else { | |||
| z.decompressor.(flate.Resetter).Reset(z.r, nil) | |||
| } | |||
| return hdr, nil | |||
| } | |||
| // Read implements io.Reader, reading uncompressed bytes from its underlying Reader. | |||
| func (z *Reader) Read(p []byte) (n int, err error) { | |||
| if z.err != nil { | |||
| return 0, z.err | |||
| } | |||
| for n == 0 { | |||
| n, z.err = z.decompressor.Read(p) | |||
| z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) | |||
| z.size += uint32(n) | |||
| if z.err != io.EOF { | |||
| // In the normal case we return here. | |||
| return n, z.err | |||
| } | |||
| // Finished file; check checksum and size. | |||
| if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { | |||
| z.err = noEOF(err) | |||
| return n, z.err | |||
| } | |||
| digest := le.Uint32(z.buf[:4]) | |||
| size := le.Uint32(z.buf[4:8]) | |||
| if digest != z.digest || size != z.size { | |||
| z.err = ErrChecksum | |||
| return n, z.err | |||
| } | |||
| z.digest, z.size = 0, 0 | |||
| // File is ok; check if there is another. | |||
| if !z.multistream { | |||
| return n, io.EOF | |||
| } | |||
| z.err = nil // Remove io.EOF | |||
| if _, z.err = z.readHeader(); z.err != nil { | |||
| return n, z.err | |||
| } | |||
| } | |||
| return n, nil | |||
| } | |||
| // Support the io.WriteTo interface for io.Copy and friends. | |||
| func (z *Reader) WriteTo(w io.Writer) (int64, error) { | |||
| total := int64(0) | |||
| crcWriter := crc32.NewIEEE() | |||
| for { | |||
| if z.err != nil { | |||
| if z.err == io.EOF { | |||
| return total, nil | |||
| } | |||
| return total, z.err | |||
| } | |||
| // We write both to output and digest. | |||
| mw := io.MultiWriter(w, crcWriter) | |||
| n, err := z.decompressor.(io.WriterTo).WriteTo(mw) | |||
| total += n | |||
| z.size += uint32(n) | |||
| if err != nil { | |||
| z.err = err | |||
| return total, z.err | |||
| } | |||
| // Finished file; check checksum + size. | |||
| if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { | |||
| if err == io.EOF { | |||
| err = io.ErrUnexpectedEOF | |||
| } | |||
| z.err = err | |||
| return total, err | |||
| } | |||
| z.digest = crcWriter.Sum32() | |||
| digest := le.Uint32(z.buf[:4]) | |||
| size := le.Uint32(z.buf[4:8]) | |||
| if digest != z.digest || size != z.size { | |||
| z.err = ErrChecksum | |||
| return total, z.err | |||
| } | |||
| z.digest, z.size = 0, 0 | |||
| // File is ok; check if there is another. | |||
| if !z.multistream { | |||
| return total, nil | |||
| } | |||
| crcWriter.Reset() | |||
| z.err = nil // Remove io.EOF | |||
| if _, z.err = z.readHeader(); z.err != nil { | |||
| if z.err == io.EOF { | |||
| return total, nil | |||
| } | |||
| return total, z.err | |||
| } | |||
| } | |||
| } | |||
| // Close closes the Reader. It does not close the underlying io.Reader. | |||
| // In order for the GZIP checksum to be verified, the reader must be | |||
| // fully consumed until the io.EOF. | |||
| func (z *Reader) Close() error { return z.decompressor.Close() } | |||
| @@ -0,0 +1,269 @@ | |||
| // Copyright 2010 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package gzip | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "hash/crc32" | |||
| "io" | |||
| "github.com/klauspost/compress/flate" | |||
| ) | |||
| // These constants are copied from the flate package, so that code that imports | |||
| // "compress/gzip" does not also have to import "compress/flate". | |||
| const ( | |||
| NoCompression = flate.NoCompression | |||
| BestSpeed = flate.BestSpeed | |||
| BestCompression = flate.BestCompression | |||
| DefaultCompression = flate.DefaultCompression | |||
| ConstantCompression = flate.ConstantCompression | |||
| HuffmanOnly = flate.HuffmanOnly | |||
| // StatelessCompression will do compression but without maintaining any state | |||
| // between Write calls. | |||
| // There will be no memory kept between Write calls, | |||
| // but compression and speed will be suboptimal. | |||
| // Because of this, the size of actual Write calls will affect output size. | |||
| StatelessCompression = -3 | |||
| ) | |||
| // A Writer is an io.WriteCloser. | |||
| // Writes to a Writer are compressed and written to w. | |||
| type Writer struct { | |||
| Header // written at first call to Write, Flush, or Close | |||
| w io.Writer | |||
| level int | |||
| err error | |||
| compressor *flate.Writer | |||
| digest uint32 // CRC-32, IEEE polynomial (section 8) | |||
| size uint32 // Uncompressed size (section 2.3.1) | |||
| wroteHeader bool | |||
| closed bool | |||
| buf [10]byte | |||
| } | |||
| // NewWriter returns a new Writer. | |||
| // Writes to the returned writer are compressed and written to w. | |||
| // | |||
| // It is the caller's responsibility to call Close on the WriteCloser when done. | |||
| // Writes may be buffered and not flushed until Close. | |||
| // | |||
| // Callers that wish to set the fields in Writer.Header must do so before | |||
| // the first call to Write, Flush, or Close. | |||
| func NewWriter(w io.Writer) *Writer { | |||
| z, _ := NewWriterLevel(w, DefaultCompression) | |||
| return z | |||
| } | |||
| // NewWriterLevel is like NewWriter but specifies the compression level instead | |||
| // of assuming DefaultCompression. | |||
| // | |||
| // The compression level can be DefaultCompression, NoCompression, or any | |||
| // integer value between BestSpeed and BestCompression inclusive. The error | |||
| // returned will be nil if the level is valid. | |||
| func NewWriterLevel(w io.Writer, level int) (*Writer, error) { | |||
| if level < StatelessCompression || level > BestCompression { | |||
| return nil, fmt.Errorf("gzip: invalid compression level: %d", level) | |||
| } | |||
| z := new(Writer) | |||
| z.init(w, level) | |||
| return z, nil | |||
| } | |||
| func (z *Writer) init(w io.Writer, level int) { | |||
| compressor := z.compressor | |||
| if level != StatelessCompression { | |||
| if compressor != nil { | |||
| compressor.Reset(w) | |||
| } | |||
| } | |||
| *z = Writer{ | |||
| Header: Header{ | |||
| OS: 255, // unknown | |||
| }, | |||
| w: w, | |||
| level: level, | |||
| compressor: compressor, | |||
| } | |||
| } | |||
| // Reset discards the Writer z's state and makes it equivalent to the | |||
| // result of its original state from NewWriter or NewWriterLevel, but | |||
| // writing to w instead. This permits reusing a Writer rather than | |||
| // allocating a new one. | |||
| func (z *Writer) Reset(w io.Writer) { | |||
| z.init(w, z.level) | |||
| } | |||
| // writeBytes writes a length-prefixed byte slice to z.w. | |||
| func (z *Writer) writeBytes(b []byte) error { | |||
| if len(b) > 0xffff { | |||
| return errors.New("gzip.Write: Extra data is too large") | |||
| } | |||
| le.PutUint16(z.buf[:2], uint16(len(b))) | |||
| _, err := z.w.Write(z.buf[:2]) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = z.w.Write(b) | |||
| return err | |||
| } | |||
| // writeString writes a UTF-8 string s in GZIP's format to z.w. | |||
| // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). | |||
| func (z *Writer) writeString(s string) (err error) { | |||
| // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. | |||
| needconv := false | |||
| for _, v := range s { | |||
| if v == 0 || v > 0xff { | |||
| return errors.New("gzip.Write: non-Latin-1 header string") | |||
| } | |||
| if v > 0x7f { | |||
| needconv = true | |||
| } | |||
| } | |||
| if needconv { | |||
| b := make([]byte, 0, len(s)) | |||
| for _, v := range s { | |||
| b = append(b, byte(v)) | |||
| } | |||
| _, err = z.w.Write(b) | |||
| } else { | |||
| _, err = io.WriteString(z.w, s) | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // GZIP strings are NUL-terminated. | |||
| z.buf[0] = 0 | |||
| _, err = z.w.Write(z.buf[:1]) | |||
| return err | |||
| } | |||
| // Write writes a compressed form of p to the underlying io.Writer. The | |||
| // compressed bytes are not necessarily flushed until the Writer is closed. | |||
| func (z *Writer) Write(p []byte) (int, error) { | |||
| if z.err != nil { | |||
| return 0, z.err | |||
| } | |||
| var n int | |||
| // Write the GZIP header lazily. | |||
| if !z.wroteHeader { | |||
| z.wroteHeader = true | |||
| z.buf[0] = gzipID1 | |||
| z.buf[1] = gzipID2 | |||
| z.buf[2] = gzipDeflate | |||
| z.buf[3] = 0 | |||
| if z.Extra != nil { | |||
| z.buf[3] |= 0x04 | |||
| } | |||
| if z.Name != "" { | |||
| z.buf[3] |= 0x08 | |||
| } | |||
| if z.Comment != "" { | |||
| z.buf[3] |= 0x10 | |||
| } | |||
| le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) | |||
| if z.level == BestCompression { | |||
| z.buf[8] = 2 | |||
| } else if z.level == BestSpeed { | |||
| z.buf[8] = 4 | |||
| } else { | |||
| z.buf[8] = 0 | |||
| } | |||
| z.buf[9] = z.OS | |||
| n, z.err = z.w.Write(z.buf[:10]) | |||
| if z.err != nil { | |||
| return n, z.err | |||
| } | |||
| if z.Extra != nil { | |||
| z.err = z.writeBytes(z.Extra) | |||
| if z.err != nil { | |||
| return n, z.err | |||
| } | |||
| } | |||
| if z.Name != "" { | |||
| z.err = z.writeString(z.Name) | |||
| if z.err != nil { | |||
| return n, z.err | |||
| } | |||
| } | |||
| if z.Comment != "" { | |||
| z.err = z.writeString(z.Comment) | |||
| if z.err != nil { | |||
| return n, z.err | |||
| } | |||
| } | |||
| if z.compressor == nil && z.level != StatelessCompression { | |||
| z.compressor, _ = flate.NewWriter(z.w, z.level) | |||
| } | |||
| } | |||
| z.size += uint32(len(p)) | |||
| z.digest = crc32.Update(z.digest, crc32.IEEETable, p) | |||
| if z.level == StatelessCompression { | |||
| return len(p), flate.StatelessDeflate(z.w, p, false, nil) | |||
| } | |||
| n, z.err = z.compressor.Write(p) | |||
| return n, z.err | |||
| } | |||
| // Flush flushes any pending compressed data to the underlying writer. | |||
| // | |||
| // It is useful mainly in compressed network protocols, to ensure that | |||
| // a remote reader has enough data to reconstruct a packet. Flush does | |||
| // not return until the data has been written. If the underlying | |||
| // writer returns an error, Flush returns that error. | |||
| // | |||
| // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. | |||
| func (z *Writer) Flush() error { | |||
| if z.err != nil { | |||
| return z.err | |||
| } | |||
| if z.closed || z.level == StatelessCompression { | |||
| return nil | |||
| } | |||
| if !z.wroteHeader { | |||
| z.Write(nil) | |||
| if z.err != nil { | |||
| return z.err | |||
| } | |||
| } | |||
| z.err = z.compressor.Flush() | |||
| return z.err | |||
| } | |||
| // Close closes the Writer, flushing any unwritten data to the underlying | |||
| // io.Writer, but does not close the underlying io.Writer. | |||
| func (z *Writer) Close() error { | |||
| if z.err != nil { | |||
| return z.err | |||
| } | |||
| if z.closed { | |||
| return nil | |||
| } | |||
| z.closed = true | |||
| if !z.wroteHeader { | |||
| z.Write(nil) | |||
| if z.err != nil { | |||
| return z.err | |||
| } | |||
| } | |||
| if z.level == StatelessCompression { | |||
| z.err = flate.StatelessDeflate(z.w, nil, true, nil) | |||
| } else { | |||
| z.err = z.compressor.Close() | |||
| } | |||
| if z.err != nil { | |||
| return z.err | |||
| } | |||
| le.PutUint32(z.buf[:4], z.digest) | |||
| le.PutUint32(z.buf[4:8], z.size) | |||
| _, z.err = z.w.Write(z.buf[:8]) | |||
| return z.err | |||
| } | |||
| @@ -0,0 +1 @@ | |||
| /huff0-fuzz.zip | |||
| @@ -0,0 +1,89 @@ | |||
| # Huff0 entropy compression | |||
| This package provides Huff0 encoding and decoding as used in zstd. | |||
| [Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), | |||
| a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU | |||
| (Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. | |||
| This can be used for compressing input with a lot of similar input values to the smallest number of bytes. | |||
| This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, | |||
| but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. | |||
| * [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) | |||
| ## News | |||
| This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. | |||
| This ensures that most functionality is well tested. | |||
| # Usage | |||
| This package provides a low level interface that allows to compress single independent blocks. | |||
| Each block is separate, and there is no built in integrity checks. | |||
| This means that the caller should keep track of block sizes and also do checksums if needed. | |||
| Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and | |||
| [`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. | |||
| You must provide input and will receive the output and maybe an error. | |||
| These error values can be returned: | |||
| | Error | Description | | |||
| |---------------------|-----------------------------------------------------------------------------| | |||
| | `<nil>` | Everything ok, output is returned | | |||
| | `ErrIncompressible` | Returned when input is judged to be too hard to compress | | |||
| | `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | | |||
| | `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | | |||
| | `(error)` | An internal error occurred. | | |||
| As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. | |||
| To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object | |||
| that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same | |||
| object can be used for both. | |||
| Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this | |||
| you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. | |||
| The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. | |||
| ## Tables and re-use | |||
| Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. | |||
| The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) | |||
| that controls this behaviour. See the documentation for details. This can be altered between each block. | |||
| Do however note that this information is *not* stored in the output block and it is up to the users of the package to | |||
| record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, | |||
| based on the boolean reported back from the CompressXX call. | |||
| If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the | |||
| [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. | |||
| ## Decompressing | |||
| The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). | |||
| This will initialize the decoding tables. | |||
| You can supply the complete block to `ReadTable` and it will return the data part of the block | |||
| which can be given to the decompressor. | |||
| Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) | |||
| or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. | |||
| For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. | |||
| You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back | |||
| your input was likely corrupted. | |||
| It is important to note that a successful decoding does *not* mean your output matches your original input. | |||
| There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. | |||
| # Contributing | |||
| Contributions are always welcome. Be aware that adding public functions will require good justification and breaking | |||
| changes will likely not be accepted. If in doubt open an issue before writing the PR. | |||
| @@ -0,0 +1,233 @@ | |||
| // Copyright 2018 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. | |||
| package huff0 | |||
| import ( | |||
| "encoding/binary" | |||
| "errors" | |||
| "fmt" | |||
| "io" | |||
| ) | |||
| // bitReader reads a bitstream in reverse. | |||
| // The last set bit indicates the start of the stream and is used | |||
| // for aligning the input. | |||
| type bitReaderBytes struct { | |||
| in []byte | |||
| off uint // next byte to read is at in[off - 1] | |||
| value uint64 | |||
| bitsRead uint8 | |||
| } | |||
| // init initializes and resets the bit reader. | |||
| func (b *bitReaderBytes) init(in []byte) error { | |||
| if len(in) < 1 { | |||
| return errors.New("corrupt stream: too short") | |||
| } | |||
| b.in = in | |||
| b.off = uint(len(in)) | |||
| // The highest bit of the last byte indicates where to start | |||
| v := in[len(in)-1] | |||
| if v == 0 { | |||
| return errors.New("corrupt stream, did not find end of stream") | |||
| } | |||
| b.bitsRead = 64 | |||
| b.value = 0 | |||
| if len(in) >= 8 { | |||
| b.fillFastStart() | |||
| } else { | |||
| b.fill() | |||
| b.fill() | |||
| } | |||
| b.advance(8 - uint8(highBit32(uint32(v)))) | |||
| return nil | |||
| } | |||
| // peekBitsFast requires that at least one bit is requested every time. | |||
| // There are no checks if the buffer is filled. | |||
| func (b *bitReaderBytes) peekByteFast() uint8 { | |||
| got := uint8(b.value >> 56) | |||
| return got | |||
| } | |||
| func (b *bitReaderBytes) advance(n uint8) { | |||
| b.bitsRead += n | |||
| b.value <<= n & 63 | |||
| } | |||
| // fillFast() will make sure at least 32 bits are available. | |||
| // There must be at least 4 bytes available. | |||
| func (b *bitReaderBytes) fillFast() { | |||
| if b.bitsRead < 32 { | |||
| return | |||
| } | |||
| // 2 bounds checks. | |||
| v := b.in[b.off-4 : b.off] | |||
| v = v[:4] | |||
| low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |||
| b.value |= uint64(low) << (b.bitsRead - 32) | |||
| b.bitsRead -= 32 | |||
| b.off -= 4 | |||
| } | |||
| // fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. | |||
| func (b *bitReaderBytes) fillFastStart() { | |||
| // Do single re-slice to avoid bounds checks. | |||
| b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) | |||
| b.bitsRead = 0 | |||
| b.off -= 8 | |||
| } | |||
| // fill() will make sure at least 32 bits are available. | |||
| func (b *bitReaderBytes) fill() { | |||
| if b.bitsRead < 32 { | |||
| return | |||
| } | |||
| if b.off > 4 { | |||
| v := b.in[b.off-4:] | |||
| v = v[:4] | |||
| low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |||
| b.value |= uint64(low) << (b.bitsRead - 32) | |||
| b.bitsRead -= 32 | |||
| b.off -= 4 | |||
| return | |||
| } | |||
| for b.off > 0 { | |||
| b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) | |||
| b.bitsRead -= 8 | |||
| b.off-- | |||
| } | |||
| } | |||
| // finished returns true if all bits have been read from the bit stream. | |||
| func (b *bitReaderBytes) finished() bool { | |||
| return b.off == 0 && b.bitsRead >= 64 | |||
| } | |||
| func (b *bitReaderBytes) remaining() uint { | |||
| return b.off*8 + uint(64-b.bitsRead) | |||
| } | |||
| // close the bitstream and returns an error if out-of-buffer reads occurred. | |||
| func (b *bitReaderBytes) close() error { | |||
| // Release reference. | |||
| b.in = nil | |||
| if b.remaining() > 0 { | |||
| return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) | |||
| } | |||
| if b.bitsRead > 64 { | |||
| return io.ErrUnexpectedEOF | |||
| } | |||
| return nil | |||
| } | |||
| // bitReaderShifted reads a bitstream in reverse. | |||
| // The last set bit indicates the start of the stream and is used | |||
| // for aligning the input. | |||
| type bitReaderShifted struct { | |||
| in []byte | |||
| off uint // next byte to read is at in[off - 1] | |||
| value uint64 | |||
| bitsRead uint8 | |||
| } | |||
| // init initializes and resets the bit reader. | |||
| func (b *bitReaderShifted) init(in []byte) error { | |||
| if len(in) < 1 { | |||
| return errors.New("corrupt stream: too short") | |||
| } | |||
| b.in = in | |||
| b.off = uint(len(in)) | |||
| // The highest bit of the last byte indicates where to start | |||
| v := in[len(in)-1] | |||
| if v == 0 { | |||
| return errors.New("corrupt stream, did not find end of stream") | |||
| } | |||
| b.bitsRead = 64 | |||
| b.value = 0 | |||
| if len(in) >= 8 { | |||
| b.fillFastStart() | |||
| } else { | |||
| b.fill() | |||
| b.fill() | |||
| } | |||
| b.advance(8 - uint8(highBit32(uint32(v)))) | |||
| return nil | |||
| } | |||
| // peekBitsFast requires that at least one bit is requested every time. | |||
| // There are no checks if the buffer is filled. | |||
| func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { | |||
| return uint16(b.value >> ((64 - n) & 63)) | |||
| } | |||
| func (b *bitReaderShifted) advance(n uint8) { | |||
| b.bitsRead += n | |||
| b.value <<= n & 63 | |||
| } | |||
| // fillFast() will make sure at least 32 bits are available. | |||
| // There must be at least 4 bytes available. | |||
| func (b *bitReaderShifted) fillFast() { | |||
| if b.bitsRead < 32 { | |||
| return | |||
| } | |||
| // 2 bounds checks. | |||
| v := b.in[b.off-4 : b.off] | |||
| v = v[:4] | |||
| low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |||
| b.value |= uint64(low) << ((b.bitsRead - 32) & 63) | |||
| b.bitsRead -= 32 | |||
| b.off -= 4 | |||
| } | |||
| // fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. | |||
| func (b *bitReaderShifted) fillFastStart() { | |||
| // Do single re-slice to avoid bounds checks. | |||
| b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) | |||
| b.bitsRead = 0 | |||
| b.off -= 8 | |||
| } | |||
| // fill() will make sure at least 32 bits are available. | |||
| func (b *bitReaderShifted) fill() { | |||
| if b.bitsRead < 32 { | |||
| return | |||
| } | |||
| if b.off > 4 { | |||
| v := b.in[b.off-4:] | |||
| v = v[:4] | |||
| low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |||
| b.value |= uint64(low) << ((b.bitsRead - 32) & 63) | |||
| b.bitsRead -= 32 | |||
| b.off -= 4 | |||
| return | |||
| } | |||
| for b.off > 0 { | |||
| b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) | |||
| b.bitsRead -= 8 | |||
| b.off-- | |||
| } | |||
| } | |||
| func (b *bitReaderShifted) remaining() uint { | |||
| return b.off*8 + uint(64-b.bitsRead) | |||
| } | |||
| // close the bitstream and returns an error if out-of-buffer reads occurred. | |||
| func (b *bitReaderShifted) close() error { | |||
| // Release reference. | |||
| b.in = nil | |||
| if b.remaining() > 0 { | |||
| return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) | |||
| } | |||
| if b.bitsRead > 64 { | |||
| return io.ErrUnexpectedEOF | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,95 @@ | |||
| // Copyright 2018 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. | |||
| package huff0 | |||
| // bitWriter will write bits. | |||
| // First bit will be LSB of the first byte of output. | |||
| type bitWriter struct { | |||
| bitContainer uint64 | |||
| nBits uint8 | |||
| out []byte | |||
| } | |||
| // bitMask16 is bitmasks. Has extra to avoid bounds check. | |||
| var bitMask16 = [32]uint16{ | |||
| 0, 1, 3, 7, 0xF, 0x1F, | |||
| 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, | |||
| 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, | |||
| 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, | |||
| 0xFFFF, 0xFFFF} /* up to 16 bits */ | |||
| // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. | |||
| // It will not check if there is space for them, so the caller must ensure that it has flushed recently. | |||
| func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { | |||
| b.bitContainer |= uint64(value) << (b.nBits & 63) | |||
| b.nBits += bits | |||
| } | |||
| // encSymbol will add up to 16 bits. value may not contain more set bits than indicated. | |||
| // It will not check if there is space for them, so the caller must ensure that it has flushed recently. | |||
| func (b *bitWriter) encSymbol(ct cTable, symbol byte) { | |||
| enc := ct[symbol] | |||
| b.bitContainer |= uint64(enc.val) << (b.nBits & 63) | |||
| if false { | |||
| if enc.nBits == 0 { | |||
| panic("nbits 0") | |||
| } | |||
| } | |||
| b.nBits += enc.nBits | |||
| } | |||
| // encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. | |||
| // It will not check if there is space for them, so the caller must ensure that it has flushed recently. | |||
| func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { | |||
| encA := ct[av] | |||
| encB := ct[bv] | |||
| sh := b.nBits & 63 | |||
| combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) | |||
| b.bitContainer |= combined << sh | |||
| if false { | |||
| if encA.nBits == 0 { | |||
| panic("nbitsA 0") | |||
| } | |||
| if encB.nBits == 0 { | |||
| panic("nbitsB 0") | |||
| } | |||
| } | |||
| b.nBits += encA.nBits + encB.nBits | |||
| } | |||
| // flush32 will flush out, so there are at least 32 bits available for writing. | |||
| func (b *bitWriter) flush32() { | |||
| if b.nBits < 32 { | |||
| return | |||
| } | |||
| b.out = append(b.out, | |||
| byte(b.bitContainer), | |||
| byte(b.bitContainer>>8), | |||
| byte(b.bitContainer>>16), | |||
| byte(b.bitContainer>>24)) | |||
| b.nBits -= 32 | |||
| b.bitContainer >>= 32 | |||
| } | |||
| // flushAlign will flush remaining full bytes and align to next byte boundary. | |||
| func (b *bitWriter) flushAlign() { | |||
| nbBytes := (b.nBits + 7) >> 3 | |||
| for i := uint8(0); i < nbBytes; i++ { | |||
| b.out = append(b.out, byte(b.bitContainer>>(i*8))) | |||
| } | |||
| b.nBits = 0 | |||
| b.bitContainer = 0 | |||
| } | |||
| // close will write the alignment bit and write the final byte(s) | |||
| // to the output. | |||
| func (b *bitWriter) close() error { | |||
| // End mark | |||
| b.addBits16Clean(1, 1) | |||
| // flush until next byte. | |||
| b.flushAlign() | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,44 @@ | |||
| // Copyright 2018 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Based on work Copyright (c) 2013, Yann Collet, released under BSD License. | |||
| package huff0 | |||
| // byteReader provides a byte reader that reads | |||
| // little endian values from a byte stream. | |||
| // The input stream is manually advanced. | |||
| // The reader performs no bounds checks. | |||
| type byteReader struct { | |||
| b []byte | |||
| off int | |||
| } | |||
| // init will initialize the reader and set the input. | |||
| func (b *byteReader) init(in []byte) { | |||
| b.b = in | |||
| b.off = 0 | |||
| } | |||
| // Int32 returns a little endian int32 starting at current offset. | |||
| func (b byteReader) Int32() int32 { | |||
| v3 := int32(b.b[b.off+3]) | |||
| v2 := int32(b.b[b.off+2]) | |||
| v1 := int32(b.b[b.off+1]) | |||
| v0 := int32(b.b[b.off]) | |||
| return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 | |||
| } | |||
| // Uint32 returns a little endian uint32 starting at current offset. | |||
| func (b byteReader) Uint32() uint32 { | |||
| v3 := uint32(b.b[b.off+3]) | |||
| v2 := uint32(b.b[b.off+2]) | |||
| v1 := uint32(b.b[b.off+1]) | |||
| v0 := uint32(b.b[b.off]) | |||
| return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 | |||
| } | |||
| // remain will return the number of bytes remaining. | |||
| func (b byteReader) remain() int { | |||
| return len(b.b) - b.off | |||
| } | |||
| @@ -0,0 +1,730 @@ | |||
| package huff0 | |||
| import ( | |||
| "fmt" | |||
| "math" | |||
| "runtime" | |||
| "sync" | |||
| ) | |||
| // Compress1X will compress the input. | |||
| // The output can be decoded using Decompress1X. | |||
| // Supply a Scratch object. The scratch object contains state about re-use, | |||
| // So when sharing across independent encodes, be sure to set the re-use policy. | |||
| func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { | |||
| s, err = s.prepare(in) | |||
| if err != nil { | |||
| return nil, false, err | |||
| } | |||
| return compress(in, s, s.compress1X) | |||
| } | |||
| // Compress4X will compress the input. The input is split into 4 independent blocks | |||
| // and compressed similar to Compress1X. | |||
| // The output can be decoded using Decompress4X. | |||
| // Supply a Scratch object. The scratch object contains state about re-use, | |||
| // So when sharing across independent encodes, be sure to set the re-use policy. | |||
| func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { | |||
| s, err = s.prepare(in) | |||
| if err != nil { | |||
| return nil, false, err | |||
| } | |||
| if false { | |||
| // TODO: compress4Xp only slightly faster. | |||
| const parallelThreshold = 8 << 10 | |||
| if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { | |||
| return compress(in, s, s.compress4X) | |||
| } | |||
| return compress(in, s, s.compress4Xp) | |||
| } | |||
| return compress(in, s, s.compress4X) | |||
| } | |||
| func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { | |||
| // Nuke previous table if we cannot reuse anyway. | |||
| if s.Reuse == ReusePolicyNone { | |||
| s.prevTable = s.prevTable[:0] | |||
| } | |||
| // Create histogram, if none was provided. | |||
| maxCount := s.maxCount | |||
| var canReuse = false | |||
| if maxCount == 0 { | |||
| maxCount, canReuse = s.countSimple(in) | |||
| } else { | |||
| canReuse = s.canUseTable(s.prevTable) | |||
| } | |||
| // We want the output size to be less than this: | |||
| wantSize := len(in) | |||
| if s.WantLogLess > 0 { | |||
| wantSize -= wantSize >> s.WantLogLess | |||
| } | |||
| // Reset for next run. | |||
| s.clearCount = true | |||
| s.maxCount = 0 | |||
| if maxCount >= len(in) { | |||
| if maxCount > len(in) { | |||
| return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) | |||
| } | |||
| if len(in) == 1 { | |||
| return nil, false, ErrIncompressible | |||
| } | |||
| // One symbol, use RLE | |||
| return nil, false, ErrUseRLE | |||
| } | |||
| if maxCount == 1 || maxCount < (len(in)>>7) { | |||
| // Each symbol present maximum once or too well distributed. | |||
| return nil, false, ErrIncompressible | |||
| } | |||
| if s.Reuse == ReusePolicyMust && !canReuse { | |||
| // We must reuse, but we can't. | |||
| return nil, false, ErrIncompressible | |||
| } | |||
| if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { | |||
| keepTable := s.cTable | |||
| keepTL := s.actualTableLog | |||
| s.cTable = s.prevTable | |||
| s.actualTableLog = s.prevTableLog | |||
| s.Out, err = compressor(in) | |||
| s.cTable = keepTable | |||
| s.actualTableLog = keepTL | |||
| if err == nil && len(s.Out) < wantSize { | |||
| s.OutData = s.Out | |||
| return s.Out, true, nil | |||
| } | |||
| if s.Reuse == ReusePolicyMust { | |||
| return nil, false, ErrIncompressible | |||
| } | |||
| // Do not attempt to re-use later. | |||
| s.prevTable = s.prevTable[:0] | |||
| } | |||
| // Calculate new table. | |||
| err = s.buildCTable() | |||
| if err != nil { | |||
| return nil, false, err | |||
| } | |||
| if false && !s.canUseTable(s.cTable) { | |||
| panic("invalid table generated") | |||
| } | |||
| if s.Reuse == ReusePolicyAllow && canReuse { | |||
| hSize := len(s.Out) | |||
| oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) | |||
| newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) | |||
| if oldSize <= hSize+newSize || hSize+12 >= wantSize { | |||
| // Retain cTable even if we re-use. | |||
| keepTable := s.cTable | |||
| keepTL := s.actualTableLog | |||
| s.cTable = s.prevTable | |||
| s.actualTableLog = s.prevTableLog | |||
| s.Out, err = compressor(in) | |||
| // Restore ctable. | |||
| s.cTable = keepTable | |||
| s.actualTableLog = keepTL | |||
| if err != nil { | |||
| return nil, false, err | |||
| } | |||
| if len(s.Out) >= wantSize { | |||
| return nil, false, ErrIncompressible | |||
| } | |||
| s.OutData = s.Out | |||
| return s.Out, true, nil | |||
| } | |||
| } | |||
| // Use new table | |||
| err = s.cTable.write(s) | |||
| if err != nil { | |||
| s.OutTable = nil | |||
| return nil, false, err | |||
| } | |||
| s.OutTable = s.Out | |||
| // Compress using new table | |||
| s.Out, err = compressor(in) | |||
| if err != nil { | |||
| s.OutTable = nil | |||
| return nil, false, err | |||
| } | |||
| if len(s.Out) >= wantSize { | |||
| s.OutTable = nil | |||
| return nil, false, ErrIncompressible | |||
| } | |||
| // Move current table into previous. | |||
| s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] | |||
| s.OutData = s.Out[len(s.OutTable):] | |||
| return s.Out, false, nil | |||
| } | |||
| // EstimateSizes will estimate the data sizes | |||
| func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { | |||
| s, err = s.prepare(in) | |||
| if err != nil { | |||
| return 0, 0, 0, err | |||
| } | |||
| // Create histogram, if none was provided. | |||
| tableSz, dataSz, reuseSz = -1, -1, -1 | |||
| maxCount := s.maxCount | |||
| var canReuse = false | |||
| if maxCount == 0 { | |||
| maxCount, canReuse = s.countSimple(in) | |||
| } else { | |||
| canReuse = s.canUseTable(s.prevTable) | |||
| } | |||
| // We want the output size to be less than this: | |||
| wantSize := len(in) | |||
| if s.WantLogLess > 0 { | |||
| wantSize -= wantSize >> s.WantLogLess | |||
| } | |||
| // Reset for next run. | |||
| s.clearCount = true | |||
| s.maxCount = 0 | |||
| if maxCount >= len(in) { | |||
| if maxCount > len(in) { | |||
| return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) | |||
| } | |||
| if len(in) == 1 { | |||
| return 0, 0, 0, ErrIncompressible | |||
| } | |||
| // One symbol, use RLE | |||
| return 0, 0, 0, ErrUseRLE | |||
| } | |||
| if maxCount == 1 || maxCount < (len(in)>>7) { | |||
| // Each symbol present maximum once or too well distributed. | |||
| return 0, 0, 0, ErrIncompressible | |||
| } | |||
| // Calculate new table. | |||
| err = s.buildCTable() | |||
| if err != nil { | |||
| return 0, 0, 0, err | |||
| } | |||
| if false && !s.canUseTable(s.cTable) { | |||
| panic("invalid table generated") | |||
| } | |||
| tableSz, err = s.cTable.estTableSize(s) | |||
| if err != nil { | |||
| return 0, 0, 0, err | |||
| } | |||
| if canReuse { | |||
| reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) | |||
| } | |||
| dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) | |||
| // Restore | |||
| return tableSz, dataSz, reuseSz, nil | |||
| } | |||
| func (s *Scratch) compress1X(src []byte) ([]byte, error) { | |||
| return s.compress1xDo(s.Out, src) | |||
| } | |||
| func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { | |||
| var bw = bitWriter{out: dst} | |||
| // N is length divisible by 4. | |||
| n := len(src) | |||
| n -= n & 3 | |||
| cTable := s.cTable[:256] | |||
| // Encode last bytes. | |||
| for i := len(src) & 3; i > 0; i-- { | |||
| bw.encSymbol(cTable, src[n+i-1]) | |||
| } | |||
| n -= 4 | |||
| if s.actualTableLog <= 8 { | |||
| for ; n >= 0; n -= 4 { | |||
| tmp := src[n : n+4] | |||
| // tmp should be len 4 | |||
| bw.flush32() | |||
| bw.encTwoSymbols(cTable, tmp[3], tmp[2]) | |||
| bw.encTwoSymbols(cTable, tmp[1], tmp[0]) | |||
| } | |||
| } else { | |||
| for ; n >= 0; n -= 4 { | |||
| tmp := src[n : n+4] | |||
| // tmp should be len 4 | |||
| bw.flush32() | |||
| bw.encTwoSymbols(cTable, tmp[3], tmp[2]) | |||
| bw.flush32() | |||
| bw.encTwoSymbols(cTable, tmp[1], tmp[0]) | |||
| } | |||
| } | |||
| err := bw.close() | |||
| return bw.out, err | |||
| } | |||
| var sixZeros [6]byte | |||
| func (s *Scratch) compress4X(src []byte) ([]byte, error) { | |||
| if len(src) < 12 { | |||
| return nil, ErrIncompressible | |||
| } | |||
| segmentSize := (len(src) + 3) / 4 | |||
| // Add placeholder for output length | |||
| offsetIdx := len(s.Out) | |||
| s.Out = append(s.Out, sixZeros[:]...) | |||
| for i := 0; i < 4; i++ { | |||
| toDo := src | |||
| if len(toDo) > segmentSize { | |||
| toDo = toDo[:segmentSize] | |||
| } | |||
| src = src[len(toDo):] | |||
| var err error | |||
| idx := len(s.Out) | |||
| s.Out, err = s.compress1xDo(s.Out, toDo) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if len(s.Out)-idx > math.MaxUint16 { | |||
| // We cannot store the size in the jump table | |||
| return nil, ErrIncompressible | |||
| } | |||
| // Write compressed length as little endian before block. | |||
| if i < 3 { | |||
| // Last length is not written. | |||
| length := len(s.Out) - idx | |||
| s.Out[i*2+offsetIdx] = byte(length) | |||
| s.Out[i*2+offsetIdx+1] = byte(length >> 8) | |||
| } | |||
| } | |||
| return s.Out, nil | |||
| } | |||
| // compress4Xp will compress 4 streams using separate goroutines. | |||
| func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { | |||
| if len(src) < 12 { | |||
| return nil, ErrIncompressible | |||
| } | |||
| // Add placeholder for output length | |||
| s.Out = s.Out[:6] | |||
| segmentSize := (len(src) + 3) / 4 | |||
| var wg sync.WaitGroup | |||
| var errs [4]error | |||
| wg.Add(4) | |||
| for i := 0; i < 4; i++ { | |||
| toDo := src | |||
| if len(toDo) > segmentSize { | |||
| toDo = toDo[:segmentSize] | |||
| } | |||
| src = src[len(toDo):] | |||
| // Separate goroutine for each block. | |||
| go func(i int) { | |||
| s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) | |||
| wg.Done() | |||
| }(i) | |||
| } | |||
| wg.Wait() | |||
| for i := 0; i < 4; i++ { | |||
| if errs[i] != nil { | |||
| return nil, errs[i] | |||
| } | |||
| o := s.tmpOut[i] | |||
| if len(o) > math.MaxUint16 { | |||
| // We cannot store the size in the jump table | |||
| return nil, ErrIncompressible | |||
| } | |||
| // Write compressed length as little endian before block. | |||
| if i < 3 { | |||
| // Last length is not written. | |||
| s.Out[i*2] = byte(len(o)) | |||
| s.Out[i*2+1] = byte(len(o) >> 8) | |||
| } | |||
| // Write output. | |||
| s.Out = append(s.Out, o...) | |||
| } | |||
| return s.Out, nil | |||
| } | |||
| // countSimple will create a simple histogram in s.count. | |||
| // Returns the biggest count. | |||
| // Does not update s.clearCount. | |||
| func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { | |||
| reuse = true | |||
| for _, v := range in { | |||
| s.count[v]++ | |||
| } | |||
| m := uint32(0) | |||
| if len(s.prevTable) > 0 { | |||
| for i, v := range s.count[:] { | |||
| if v > m { | |||
| m = v | |||
| } | |||
| if v > 0 { | |||
| s.symbolLen = uint16(i) + 1 | |||
| if i >= len(s.prevTable) { | |||
| reuse = false | |||
| } else { | |||
| if s.prevTable[i].nBits == 0 { | |||
| reuse = false | |||
| } | |||
| } | |||
| } | |||
| } | |||
| return int(m), reuse | |||
| } | |||
| for i, v := range s.count[:] { | |||
| if v > m { | |||
| m = v | |||
| } | |||
| if v > 0 { | |||
| s.symbolLen = uint16(i) + 1 | |||
| } | |||
| } | |||
| return int(m), false | |||
| } | |||
| func (s *Scratch) canUseTable(c cTable) bool { | |||
| if len(c) < int(s.symbolLen) { | |||
| return false | |||
| } | |||
| for i, v := range s.count[:s.symbolLen] { | |||
| if v != 0 && c[i].nBits == 0 { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| //lint:ignore U1000 used for debugging | |||
| func (s *Scratch) validateTable(c cTable) bool { | |||
| if len(c) < int(s.symbolLen) { | |||
| return false | |||
| } | |||
| for i, v := range s.count[:s.symbolLen] { | |||
| if v != 0 { | |||
| if c[i].nBits == 0 { | |||
| return false | |||
| } | |||
| if c[i].nBits > s.actualTableLog { | |||
| return false | |||
| } | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| // minTableLog provides the minimum logSize to safely represent a distribution. | |||
| func (s *Scratch) minTableLog() uint8 { | |||
| minBitsSrc := highBit32(uint32(s.br.remain())) + 1 | |||
| minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 | |||
| if minBitsSrc < minBitsSymbols { | |||
| return uint8(minBitsSrc) | |||
| } | |||
| return uint8(minBitsSymbols) | |||
| } | |||
| // optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog | |||
| func (s *Scratch) optimalTableLog() { | |||
| tableLog := s.TableLog | |||
| minBits := s.minTableLog() | |||
| maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 | |||
| if maxBitsSrc < tableLog { | |||
| // Accuracy can be reduced | |||
| tableLog = maxBitsSrc | |||
| } | |||
| if minBits > tableLog { | |||
| tableLog = minBits | |||
| } | |||
| // Need a minimum to safely represent all symbol values | |||
| if tableLog < minTablelog { | |||
| tableLog = minTablelog | |||
| } | |||
| if tableLog > tableLogMax { | |||
| tableLog = tableLogMax | |||
| } | |||
| s.actualTableLog = tableLog | |||
| } | |||
| type cTableEntry struct { | |||
| val uint16 | |||
| nBits uint8 | |||
| // We have 8 bits extra | |||
| } | |||
| const huffNodesMask = huffNodesLen - 1 | |||
| func (s *Scratch) buildCTable() error { | |||
| s.optimalTableLog() | |||
| s.huffSort() | |||
| if cap(s.cTable) < maxSymbolValue+1 { | |||
| s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) | |||
| } else { | |||
| s.cTable = s.cTable[:s.symbolLen] | |||
| for i := range s.cTable { | |||
| s.cTable[i] = cTableEntry{} | |||
| } | |||
| } | |||
| var startNode = int16(s.symbolLen) | |||
| nonNullRank := s.symbolLen - 1 | |||
| nodeNb := startNode | |||
| huffNode := s.nodes[1 : huffNodesLen+1] | |||
| // This overlays the slice above, but allows "-1" index lookups. | |||
| // Different from reference implementation. | |||
| huffNode0 := s.nodes[0 : huffNodesLen+1] | |||
| for huffNode[nonNullRank].count == 0 { | |||
| nonNullRank-- | |||
| } | |||
| lowS := int16(nonNullRank) | |||
| nodeRoot := nodeNb + lowS - 1 | |||
| lowN := nodeNb | |||
| huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count | |||
| huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) | |||
| nodeNb++ | |||
| lowS -= 2 | |||
| for n := nodeNb; n <= nodeRoot; n++ { | |||
| huffNode[n].count = 1 << 30 | |||
| } | |||
| // fake entry, strong barrier | |||
| huffNode0[0].count = 1 << 31 | |||
| // create parents | |||
| for nodeNb <= nodeRoot { | |||
| var n1, n2 int16 | |||
| if huffNode0[lowS+1].count < huffNode0[lowN+1].count { | |||
| n1 = lowS | |||
| lowS-- | |||
| } else { | |||
| n1 = lowN | |||
| lowN++ | |||
| } | |||
| if huffNode0[lowS+1].count < huffNode0[lowN+1].count { | |||
| n2 = lowS | |||
| lowS-- | |||
| } else { | |||
| n2 = lowN | |||
| lowN++ | |||
| } | |||
| huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count | |||
| huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) | |||
| nodeNb++ | |||
| } | |||
| // distribute weights (unlimited tree height) | |||
| huffNode[nodeRoot].nbBits = 0 | |||
| for n := nodeRoot - 1; n >= startNode; n-- { | |||
| huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 | |||
| } | |||
| for n := uint16(0); n <= nonNullRank; n++ { | |||
| huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 | |||
| } | |||
| s.actualTableLog = s.setMaxHeight(int(nonNullRank)) | |||
| maxNbBits := s.actualTableLog | |||
| // fill result into tree (val, nbBits) | |||
| if maxNbBits > tableLogMax { | |||
| return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) | |||
| } | |||
| var nbPerRank [tableLogMax + 1]uint16 | |||
| var valPerRank [16]uint16 | |||
| for _, v := range huffNode[:nonNullRank+1] { | |||
| nbPerRank[v.nbBits]++ | |||
| } | |||
| // determine stating value per rank | |||
| { | |||
| min := uint16(0) | |||
| for n := maxNbBits; n > 0; n-- { | |||
| // get starting value within each rank | |||
| valPerRank[n] = min | |||
| min += nbPerRank[n] | |||
| min >>= 1 | |||
| } | |||
| } | |||
| // push nbBits per symbol, symbol order | |||
| for _, v := range huffNode[:nonNullRank+1] { | |||
| s.cTable[v.symbol].nBits = v.nbBits | |||
| } | |||
| // assign value within rank, symbol order | |||
| t := s.cTable[:s.symbolLen] | |||
| for n, val := range t { | |||
| nbits := val.nBits & 15 | |||
| v := valPerRank[nbits] | |||
| t[n].val = v | |||
| valPerRank[nbits] = v + 1 | |||
| } | |||
| return nil | |||
| } | |||
| // huffSort will sort symbols, decreasing order. | |||
| func (s *Scratch) huffSort() { | |||
| type rankPos struct { | |||
| base uint32 | |||
| current uint32 | |||
| } | |||
| // Clear nodes | |||
| nodes := s.nodes[:huffNodesLen+1] | |||
| s.nodes = nodes | |||
| nodes = nodes[1 : huffNodesLen+1] | |||
| // Sort into buckets based on length of symbol count. | |||
| var rank [32]rankPos | |||
| for _, v := range s.count[:s.symbolLen] { | |||
| r := highBit32(v+1) & 31 | |||
| rank[r].base++ | |||
| } | |||
| // maxBitLength is log2(BlockSizeMax) + 1 | |||
| const maxBitLength = 18 + 1 | |||
| for n := maxBitLength; n > 0; n-- { | |||
| rank[n-1].base += rank[n].base | |||
| } | |||
| for n := range rank[:maxBitLength] { | |||
| rank[n].current = rank[n].base | |||
| } | |||
| for n, c := range s.count[:s.symbolLen] { | |||
| r := (highBit32(c+1) + 1) & 31 | |||
| pos := rank[r].current | |||
| rank[r].current++ | |||
| prev := nodes[(pos-1)&huffNodesMask] | |||
| for pos > rank[r].base && c > prev.count { | |||
| nodes[pos&huffNodesMask] = prev | |||
| pos-- | |||
| prev = nodes[(pos-1)&huffNodesMask] | |||
| } | |||
| nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} | |||
| } | |||
| } | |||
| func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { | |||
| maxNbBits := s.actualTableLog | |||
| huffNode := s.nodes[1 : huffNodesLen+1] | |||
| //huffNode = huffNode[: huffNodesLen] | |||
| largestBits := huffNode[lastNonNull].nbBits | |||
| // early exit : no elt > maxNbBits | |||
| if largestBits <= maxNbBits { | |||
| return largestBits | |||
| } | |||
| totalCost := int(0) | |||
| baseCost := int(1) << (largestBits - maxNbBits) | |||
| n := uint32(lastNonNull) | |||
| for huffNode[n].nbBits > maxNbBits { | |||
| totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) | |||
| huffNode[n].nbBits = maxNbBits | |||
| n-- | |||
| } | |||
| // n stops at huffNode[n].nbBits <= maxNbBits | |||
| for huffNode[n].nbBits == maxNbBits { | |||
| n-- | |||
| } | |||
| // n end at index of smallest symbol using < maxNbBits | |||
| // renorm totalCost | |||
| totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ | |||
| // repay normalized cost | |||
| { | |||
| const noSymbol = 0xF0F0F0F0 | |||
| var rankLast [tableLogMax + 2]uint32 | |||
| for i := range rankLast[:] { | |||
| rankLast[i] = noSymbol | |||
| } | |||
| // Get pos of last (smallest) symbol per rank | |||
| { | |||
| currentNbBits := maxNbBits | |||
| for pos := int(n); pos >= 0; pos-- { | |||
| if huffNode[pos].nbBits >= currentNbBits { | |||
| continue | |||
| } | |||
| currentNbBits = huffNode[pos].nbBits // < maxNbBits | |||
| rankLast[maxNbBits-currentNbBits] = uint32(pos) | |||
| } | |||
| } | |||
| for totalCost > 0 { | |||
| nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 | |||
| for ; nBitsToDecrease > 1; nBitsToDecrease-- { | |||
| highPos := rankLast[nBitsToDecrease] | |||
| lowPos := rankLast[nBitsToDecrease-1] | |||
| if highPos == noSymbol { | |||
| continue | |||
| } | |||
| if lowPos == noSymbol { | |||
| break | |||
| } | |||
| highTotal := huffNode[highPos].count | |||
| lowTotal := 2 * huffNode[lowPos].count | |||
| if highTotal <= lowTotal { | |||
| break | |||
| } | |||
| } | |||
| // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) | |||
| // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary | |||
| // FIXME: try to remove | |||
| for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { | |||
| nBitsToDecrease++ | |||
| } | |||
| totalCost -= 1 << (nBitsToDecrease - 1) | |||
| if rankLast[nBitsToDecrease-1] == noSymbol { | |||
| // this rank is no longer empty | |||
| rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] | |||
| } | |||
| huffNode[rankLast[nBitsToDecrease]].nbBits++ | |||
| if rankLast[nBitsToDecrease] == 0 { | |||
| /* special case, reached largest symbol */ | |||
| rankLast[nBitsToDecrease] = noSymbol | |||
| } else { | |||
| rankLast[nBitsToDecrease]-- | |||
| if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { | |||
| rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ | |||
| } | |||
| } | |||
| } | |||
| for totalCost < 0 { /* Sometimes, cost correction overshoot */ | |||
| if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ | |||
| for huffNode[n].nbBits == maxNbBits { | |||
| n-- | |||
| } | |||
| huffNode[n+1].nbBits-- | |||
| rankLast[1] = n + 1 | |||
| totalCost++ | |||
| continue | |||
| } | |||
| huffNode[rankLast[1]+1].nbBits-- | |||
| rankLast[1]++ | |||
| totalCost++ | |||
| } | |||
| } | |||
| return maxNbBits | |||
| } | |||
| type nodeElt struct { | |||
| count uint32 | |||
| parent uint16 | |||
| symbol byte | |||
| nbBits uint8 | |||
| } | |||
| @@ -0,0 +1,222 @@ | |||
| //go:build amd64 && !appengine && !noasm && gc | |||
| // +build amd64,!appengine,!noasm,gc | |||
| // This file contains the specialisation of Decoder.Decompress4X | |||
| // and Decoder.Decompress1X that use an asm implementation of thir main loops. | |||
| package huff0 | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "github.com/klauspost/compress/internal/cpuinfo" | |||
| ) | |||
| // decompress4x_main_loop_x86 is an x86 assembler implementation | |||
| // of Decompress4X when tablelog > 8. | |||
| //go:noescape | |||
| func decompress4x_main_loop_amd64(ctx *decompress4xContext) | |||
| // decompress4x_8b_loop_x86 is an x86 assembler implementation | |||
| // of Decompress4X when tablelog <= 8 which decodes 4 entries | |||
| // per loop. | |||
| //go:noescape | |||
| func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) | |||
| // fallback8BitSize is the size where using Go version is faster. | |||
| const fallback8BitSize = 800 | |||
| type decompress4xContext struct { | |||
| pbr *[4]bitReaderShifted | |||
| peekBits uint8 | |||
| out *byte | |||
| dstEvery int | |||
| tbl *dEntrySingle | |||
| decoded int | |||
| limit *byte | |||
| } | |||
| // Decompress4X will decompress a 4X encoded stream. | |||
| // The length of the supplied input must match the end of a block exactly. | |||
| // The *capacity* of the dst slice must match the destination size of | |||
| // the uncompressed data exactly. | |||
| func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { | |||
| if len(d.dt.single) == 0 { | |||
| return nil, errors.New("no table loaded") | |||
| } | |||
| if len(src) < 6+(4*1) { | |||
| return nil, errors.New("input too small") | |||
| } | |||
| use8BitTables := d.actualTableLog <= 8 | |||
| if cap(dst) < fallback8BitSize && use8BitTables { | |||
| return d.decompress4X8bit(dst, src) | |||
| } | |||
| var br [4]bitReaderShifted | |||
| // Decode "jump table" | |||
| start := 6 | |||
| for i := 0; i < 3; i++ { | |||
| length := int(src[i*2]) | (int(src[i*2+1]) << 8) | |||
| if start+length >= len(src) { | |||
| return nil, errors.New("truncated input (or invalid offset)") | |||
| } | |||
| err := br[i].init(src[start : start+length]) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| start += length | |||
| } | |||
| err := br[3].init(src[start:]) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // destination, offset to match first output | |||
| dstSize := cap(dst) | |||
| dst = dst[:dstSize] | |||
| out := dst | |||
| dstEvery := (dstSize + 3) / 4 | |||
| const tlSize = 1 << tableLogMax | |||
| const tlMask = tlSize - 1 | |||
| single := d.dt.single[:tlSize] | |||
| var decoded int | |||
| if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { | |||
| ctx := decompress4xContext{ | |||
| pbr: &br, | |||
| peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() | |||
| out: &out[0], | |||
| dstEvery: dstEvery, | |||
| tbl: &single[0], | |||
| limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. | |||
| } | |||
| if use8BitTables { | |||
| decompress4x_8b_main_loop_amd64(&ctx) | |||
| } else { | |||
| decompress4x_main_loop_amd64(&ctx) | |||
| } | |||
| decoded = ctx.decoded | |||
| out = out[decoded/4:] | |||
| } | |||
| // Decode remaining. | |||
| remainBytes := dstEvery - (decoded / 4) | |||
| for i := range br { | |||
| offset := dstEvery * i | |||
| endsAt := offset + remainBytes | |||
| if endsAt > len(out) { | |||
| endsAt = len(out) | |||
| } | |||
| br := &br[i] | |||
| bitsLeft := br.remaining() | |||
| for bitsLeft > 0 { | |||
| br.fill() | |||
| if offset >= endsAt { | |||
| return nil, errors.New("corruption detected: stream overrun 4") | |||
| } | |||
| // Read value and increment offset. | |||
| val := br.peekBitsFast(d.actualTableLog) | |||
| v := single[val&tlMask].entry | |||
| nBits := uint8(v) | |||
| br.advance(nBits) | |||
| bitsLeft -= uint(nBits) | |||
| out[offset] = uint8(v >> 8) | |||
| offset++ | |||
| } | |||
| if offset != endsAt { | |||
| return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) | |||
| } | |||
| decoded += offset - dstEvery*i | |||
| err = br.close() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| if dstSize != decoded { | |||
| return nil, errors.New("corruption detected: short output block") | |||
| } | |||
| return dst, nil | |||
| } | |||
| // decompress4x_main_loop_x86 is an x86 assembler implementation | |||
| // of Decompress1X when tablelog > 8. | |||
| //go:noescape | |||
| func decompress1x_main_loop_amd64(ctx *decompress1xContext) | |||
| // decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation | |||
| // of Decompress1X when tablelog > 8. | |||
| //go:noescape | |||
| func decompress1x_main_loop_bmi2(ctx *decompress1xContext) | |||
| type decompress1xContext struct { | |||
| pbr *bitReaderShifted | |||
| peekBits uint8 | |||
| out *byte | |||
| outCap int | |||
| tbl *dEntrySingle | |||
| decoded int | |||
| } | |||
| // Error reported by asm implementations | |||
| const error_max_decoded_size_exeeded = -1 | |||
| // Decompress1X will decompress a 1X encoded stream. | |||
| // The cap of the output buffer will be the maximum decompressed size. | |||
| // The length of the supplied input must match the end of a block exactly. | |||
| func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { | |||
| if len(d.dt.single) == 0 { | |||
| return nil, errors.New("no table loaded") | |||
| } | |||
| var br bitReaderShifted | |||
| err := br.init(src) | |||
| if err != nil { | |||
| return dst, err | |||
| } | |||
| maxDecodedSize := cap(dst) | |||
| dst = dst[:maxDecodedSize] | |||
| const tlSize = 1 << tableLogMax | |||
| const tlMask = tlSize - 1 | |||
| if maxDecodedSize >= 4 { | |||
| ctx := decompress1xContext{ | |||
| pbr: &br, | |||
| out: &dst[0], | |||
| outCap: maxDecodedSize, | |||
| peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() | |||
| tbl: &d.dt.single[0], | |||
| } | |||
| if cpuinfo.HasBMI2() { | |||
| decompress1x_main_loop_bmi2(&ctx) | |||
| } else { | |||
| decompress1x_main_loop_amd64(&ctx) | |||
| } | |||
| if ctx.decoded == error_max_decoded_size_exeeded { | |||
| return nil, ErrMaxDecodedSizeExceeded | |||
| } | |||
| dst = dst[:ctx.decoded] | |||
| } | |||
| // br < 8, so uint8 is fine | |||
| bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead | |||
| for bitsLeft > 0 { | |||
| br.fill() | |||
| if len(dst) >= maxDecodedSize { | |||
| br.close() | |||
| return nil, ErrMaxDecodedSizeExceeded | |||
| } | |||
| v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] | |||
| nBits := uint8(v.entry) | |||
| br.advance(nBits) | |||
| bitsLeft -= nBits | |||
| dst = append(dst, uint8(v.entry>>8)) | |||
| } | |||
| return dst, br.close() | |||
| } | |||
| @@ -0,0 +1,847 @@ | |||
| // Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. | |||
| //go:build amd64 && !appengine && !noasm && gc | |||
| // +build amd64,!appengine,!noasm,gc | |||
| // func decompress4x_main_loop_amd64(ctx *decompress4xContext) | |||
| TEXT ·decompress4x_main_loop_amd64(SB), $0-8 | |||
| XORQ DX, DX | |||
| // Preload values | |||
| MOVQ ctx+0(FP), AX | |||
| MOVBQZX 8(AX), DI | |||
| MOVQ 16(AX), SI | |||
| MOVQ 48(AX), BX | |||
| MOVQ 24(AX), R9 | |||
| MOVQ 32(AX), R10 | |||
| MOVQ (AX), R11 | |||
| // Main loop | |||
| main_loop: | |||
| MOVQ SI, R8 | |||
| CMPQ R8, BX | |||
| SETGE DL | |||
| // br0.fillFast32() | |||
| MOVQ 32(R11), R12 | |||
| MOVBQZX 40(R11), R13 | |||
| CMPQ R13, $0x20 | |||
| JBE skip_fill0 | |||
| MOVQ 24(R11), AX | |||
| SUBQ $0x20, R13 | |||
| SUBQ $0x04, AX | |||
| MOVQ (R11), R14 | |||
| // b.value |= uint64(low) << (b.bitsRead & 63) | |||
| MOVL (AX)(R14*1), R14 | |||
| MOVQ R13, CX | |||
| SHLQ CL, R14 | |||
| MOVQ AX, 24(R11) | |||
| ORQ R14, R12 | |||
| // exhausted = exhausted || (br0.off < 4) | |||
| CMPQ AX, $0x04 | |||
| SETLT AL | |||
| ORB AL, DL | |||
| skip_fill0: | |||
| // val0 := br0.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v0 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br0.advance(uint8(v0.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val1 := br0.peekTopBits(peekBits) | |||
| MOVQ DI, CX | |||
| MOVQ R12, R14 | |||
| SHRQ CL, R14 | |||
| // v1 := table[val1&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br0.advance(uint8(v1.entry)) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // these two writes get coalesced | |||
| // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |||
| // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |||
| MOVW AX, (R8) | |||
| // update the bitreader structure | |||
| MOVQ R12, 32(R11) | |||
| MOVB R13, 40(R11) | |||
| ADDQ R9, R8 | |||
| // br1.fillFast32() | |||
| MOVQ 80(R11), R12 | |||
| MOVBQZX 88(R11), R13 | |||
| CMPQ R13, $0x20 | |||
| JBE skip_fill1 | |||
| MOVQ 72(R11), AX | |||
| SUBQ $0x20, R13 | |||
| SUBQ $0x04, AX | |||
| MOVQ 48(R11), R14 | |||
| // b.value |= uint64(low) << (b.bitsRead & 63) | |||
| MOVL (AX)(R14*1), R14 | |||
| MOVQ R13, CX | |||
| SHLQ CL, R14 | |||
| MOVQ AX, 72(R11) | |||
| ORQ R14, R12 | |||
| // exhausted = exhausted || (br1.off < 4) | |||
| CMPQ AX, $0x04 | |||
| SETLT AL | |||
| ORB AL, DL | |||
| skip_fill1: | |||
| // val0 := br1.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v0 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br1.advance(uint8(v0.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val1 := br1.peekTopBits(peekBits) | |||
| MOVQ DI, CX | |||
| MOVQ R12, R14 | |||
| SHRQ CL, R14 | |||
| // v1 := table[val1&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br1.advance(uint8(v1.entry)) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // these two writes get coalesced | |||
| // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |||
| // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |||
| MOVW AX, (R8) | |||
| // update the bitreader structure | |||
| MOVQ R12, 80(R11) | |||
| MOVB R13, 88(R11) | |||
| ADDQ R9, R8 | |||
| // br2.fillFast32() | |||
| MOVQ 128(R11), R12 | |||
| MOVBQZX 136(R11), R13 | |||
| CMPQ R13, $0x20 | |||
| JBE skip_fill2 | |||
| MOVQ 120(R11), AX | |||
| SUBQ $0x20, R13 | |||
| SUBQ $0x04, AX | |||
| MOVQ 96(R11), R14 | |||
| // b.value |= uint64(low) << (b.bitsRead & 63) | |||
| MOVL (AX)(R14*1), R14 | |||
| MOVQ R13, CX | |||
| SHLQ CL, R14 | |||
| MOVQ AX, 120(R11) | |||
| ORQ R14, R12 | |||
| // exhausted = exhausted || (br2.off < 4) | |||
| CMPQ AX, $0x04 | |||
| SETLT AL | |||
| ORB AL, DL | |||
| skip_fill2: | |||
| // val0 := br2.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v0 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br2.advance(uint8(v0.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val1 := br2.peekTopBits(peekBits) | |||
| MOVQ DI, CX | |||
| MOVQ R12, R14 | |||
| SHRQ CL, R14 | |||
| // v1 := table[val1&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br2.advance(uint8(v1.entry)) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // these two writes get coalesced | |||
| // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |||
| // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |||
| MOVW AX, (R8) | |||
| // update the bitreader structure | |||
| MOVQ R12, 128(R11) | |||
| MOVB R13, 136(R11) | |||
| ADDQ R9, R8 | |||
| // br3.fillFast32() | |||
| MOVQ 176(R11), R12 | |||
| MOVBQZX 184(R11), R13 | |||
| CMPQ R13, $0x20 | |||
| JBE skip_fill3 | |||
| MOVQ 168(R11), AX | |||
| SUBQ $0x20, R13 | |||
| SUBQ $0x04, AX | |||
| MOVQ 144(R11), R14 | |||
| // b.value |= uint64(low) << (b.bitsRead & 63) | |||
| MOVL (AX)(R14*1), R14 | |||
| MOVQ R13, CX | |||
| SHLQ CL, R14 | |||
| MOVQ AX, 168(R11) | |||
| ORQ R14, R12 | |||
| // exhausted = exhausted || (br3.off < 4) | |||
| CMPQ AX, $0x04 | |||
| SETLT AL | |||
| ORB AL, DL | |||
| skip_fill3: | |||
| // val0 := br3.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v0 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br3.advance(uint8(v0.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val1 := br3.peekTopBits(peekBits) | |||
| MOVQ DI, CX | |||
| MOVQ R12, R14 | |||
| SHRQ CL, R14 | |||
| // v1 := table[val1&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br3.advance(uint8(v1.entry)) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // these two writes get coalesced | |||
| // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |||
| // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |||
| MOVW AX, (R8) | |||
| // update the bitreader structure | |||
| MOVQ R12, 176(R11) | |||
| MOVB R13, 184(R11) | |||
| ADDQ $0x02, SI | |||
| TESTB DL, DL | |||
| JZ main_loop | |||
| MOVQ ctx+0(FP), AX | |||
| SUBQ 16(AX), SI | |||
| SHLQ $0x02, SI | |||
| MOVQ SI, 40(AX) | |||
| RET | |||
| // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) | |||
| TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 | |||
| XORQ DX, DX | |||
| // Preload values | |||
| MOVQ ctx+0(FP), CX | |||
| MOVBQZX 8(CX), DI | |||
| MOVQ 16(CX), BX | |||
| MOVQ 48(CX), SI | |||
| MOVQ 24(CX), R9 | |||
| MOVQ 32(CX), R10 | |||
| MOVQ (CX), R11 | |||
| // Main loop | |||
| main_loop: | |||
| MOVQ BX, R8 | |||
| CMPQ R8, SI | |||
| SETGE DL | |||
| // br0.fillFast32() | |||
| MOVQ 32(R11), R12 | |||
| MOVBQZX 40(R11), R13 | |||
| CMPQ R13, $0x20 | |||
| JBE skip_fill0 | |||
| MOVQ 24(R11), R14 | |||
| SUBQ $0x20, R13 | |||
| SUBQ $0x04, R14 | |||
| MOVQ (R11), R15 | |||
| // b.value |= uint64(low) << (b.bitsRead & 63) | |||
| MOVL (R14)(R15*1), R15 | |||
| MOVQ R13, CX | |||
| SHLQ CL, R15 | |||
| MOVQ R14, 24(R11) | |||
| ORQ R15, R12 | |||
| // exhausted = exhausted || (br0.off < 4) | |||
| CMPQ R14, $0x04 | |||
| SETLT AL | |||
| ORB AL, DL | |||
| skip_fill0: | |||
| // val0 := br0.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v0 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br0.advance(uint8(v0.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val1 := br0.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v1 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br0.advance(uint8(v1.entry) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| BSWAPL AX | |||
| // val2 := br0.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v2 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br0.advance(uint8(v2.entry) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val3 := br0.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v3 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br0.advance(uint8(v3.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| BSWAPL AX | |||
| // these four writes get coalesced | |||
| // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |||
| // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |||
| // out[id * dstEvery + 3] = uint8(v2.entry >> 8) | |||
| // out[id * dstEvery + 4] = uint8(v3.entry >> 8) | |||
| MOVL AX, (R8) | |||
| // update the bitreader structure | |||
| MOVQ R12, 32(R11) | |||
| MOVB R13, 40(R11) | |||
| ADDQ R9, R8 | |||
| // br1.fillFast32() | |||
| MOVQ 80(R11), R12 | |||
| MOVBQZX 88(R11), R13 | |||
| CMPQ R13, $0x20 | |||
| JBE skip_fill1 | |||
| MOVQ 72(R11), R14 | |||
| SUBQ $0x20, R13 | |||
| SUBQ $0x04, R14 | |||
| MOVQ 48(R11), R15 | |||
| // b.value |= uint64(low) << (b.bitsRead & 63) | |||
| MOVL (R14)(R15*1), R15 | |||
| MOVQ R13, CX | |||
| SHLQ CL, R15 | |||
| MOVQ R14, 72(R11) | |||
| ORQ R15, R12 | |||
| // exhausted = exhausted || (br1.off < 4) | |||
| CMPQ R14, $0x04 | |||
| SETLT AL | |||
| ORB AL, DL | |||
| skip_fill1: | |||
| // val0 := br1.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v0 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br1.advance(uint8(v0.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val1 := br1.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v1 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br1.advance(uint8(v1.entry) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| BSWAPL AX | |||
| // val2 := br1.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v2 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br1.advance(uint8(v2.entry) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val3 := br1.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v3 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br1.advance(uint8(v3.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| BSWAPL AX | |||
| // these four writes get coalesced | |||
| // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |||
| // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |||
| // out[id * dstEvery + 3] = uint8(v2.entry >> 8) | |||
| // out[id * dstEvery + 4] = uint8(v3.entry >> 8) | |||
| MOVL AX, (R8) | |||
| // update the bitreader structure | |||
| MOVQ R12, 80(R11) | |||
| MOVB R13, 88(R11) | |||
| ADDQ R9, R8 | |||
| // br2.fillFast32() | |||
| MOVQ 128(R11), R12 | |||
| MOVBQZX 136(R11), R13 | |||
| CMPQ R13, $0x20 | |||
| JBE skip_fill2 | |||
| MOVQ 120(R11), R14 | |||
| SUBQ $0x20, R13 | |||
| SUBQ $0x04, R14 | |||
| MOVQ 96(R11), R15 | |||
| // b.value |= uint64(low) << (b.bitsRead & 63) | |||
| MOVL (R14)(R15*1), R15 | |||
| MOVQ R13, CX | |||
| SHLQ CL, R15 | |||
| MOVQ R14, 120(R11) | |||
| ORQ R15, R12 | |||
| // exhausted = exhausted || (br2.off < 4) | |||
| CMPQ R14, $0x04 | |||
| SETLT AL | |||
| ORB AL, DL | |||
| skip_fill2: | |||
| // val0 := br2.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v0 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br2.advance(uint8(v0.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val1 := br2.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v1 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br2.advance(uint8(v1.entry) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| BSWAPL AX | |||
| // val2 := br2.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v2 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br2.advance(uint8(v2.entry) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val3 := br2.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v3 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br2.advance(uint8(v3.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| BSWAPL AX | |||
| // these four writes get coalesced | |||
| // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |||
| // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |||
| // out[id * dstEvery + 3] = uint8(v2.entry >> 8) | |||
| // out[id * dstEvery + 4] = uint8(v3.entry >> 8) | |||
| MOVL AX, (R8) | |||
| // update the bitreader structure | |||
| MOVQ R12, 128(R11) | |||
| MOVB R13, 136(R11) | |||
| ADDQ R9, R8 | |||
| // br3.fillFast32() | |||
| MOVQ 176(R11), R12 | |||
| MOVBQZX 184(R11), R13 | |||
| CMPQ R13, $0x20 | |||
| JBE skip_fill3 | |||
| MOVQ 168(R11), R14 | |||
| SUBQ $0x20, R13 | |||
| SUBQ $0x04, R14 | |||
| MOVQ 144(R11), R15 | |||
| // b.value |= uint64(low) << (b.bitsRead & 63) | |||
| MOVL (R14)(R15*1), R15 | |||
| MOVQ R13, CX | |||
| SHLQ CL, R15 | |||
| MOVQ R14, 168(R11) | |||
| ORQ R15, R12 | |||
| // exhausted = exhausted || (br3.off < 4) | |||
| CMPQ R14, $0x04 | |||
| SETLT AL | |||
| ORB AL, DL | |||
| skip_fill3: | |||
| // val0 := br3.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v0 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br3.advance(uint8(v0.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val1 := br3.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v1 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br3.advance(uint8(v1.entry) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| BSWAPL AX | |||
| // val2 := br3.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v2 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br3.advance(uint8(v2.entry) | |||
| MOVB CH, AH | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| // val3 := br3.peekTopBits(peekBits) | |||
| MOVQ R12, R14 | |||
| MOVQ DI, CX | |||
| SHRQ CL, R14 | |||
| // v3 := table[val0&mask] | |||
| MOVW (R10)(R14*2), CX | |||
| // br3.advance(uint8(v3.entry) | |||
| MOVB CH, AL | |||
| SHLQ CL, R12 | |||
| ADDB CL, R13 | |||
| BSWAPL AX | |||
| // these four writes get coalesced | |||
| // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |||
| // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |||
| // out[id * dstEvery + 3] = uint8(v2.entry >> 8) | |||
| // out[id * dstEvery + 4] = uint8(v3.entry >> 8) | |||
| MOVL AX, (R8) | |||
| // update the bitreader structure | |||
| MOVQ R12, 176(R11) | |||
| MOVB R13, 184(R11) | |||
| ADDQ $0x04, BX | |||
| TESTB DL, DL | |||
| JZ main_loop | |||
| MOVQ ctx+0(FP), AX | |||
| SUBQ 16(AX), BX | |||
| SHLQ $0x02, BX | |||
| MOVQ BX, 40(AX) | |||
| RET | |||
| // func decompress1x_main_loop_amd64(ctx *decompress1xContext) | |||
| TEXT ·decompress1x_main_loop_amd64(SB), $0-8 | |||
| MOVQ ctx+0(FP), CX | |||
| MOVQ 16(CX), DX | |||
| MOVQ 24(CX), BX | |||
| CMPQ BX, $0x04 | |||
| JB error_max_decoded_size_exeeded | |||
| LEAQ (DX)(BX*1), BX | |||
| MOVQ (CX), SI | |||
| MOVQ (SI), R8 | |||
| MOVQ 24(SI), R9 | |||
| MOVQ 32(SI), R10 | |||
| MOVBQZX 40(SI), R11 | |||
| MOVQ 32(CX), SI | |||
| MOVBQZX 8(CX), DI | |||
| JMP loop_condition | |||
| main_loop: | |||
| // Check if we have room for 4 bytes in the output buffer | |||
| LEAQ 4(DX), CX | |||
| CMPQ CX, BX | |||
| JGE error_max_decoded_size_exeeded | |||
| // Decode 4 values | |||
| CMPQ R11, $0x20 | |||
| JL bitReader_fillFast_1_end | |||
| SUBQ $0x20, R11 | |||
| SUBQ $0x04, R9 | |||
| MOVL (R8)(R9*1), R12 | |||
| MOVQ R11, CX | |||
| SHLQ CL, R12 | |||
| ORQ R12, R10 | |||
| bitReader_fillFast_1_end: | |||
| MOVQ DI, CX | |||
| MOVQ R10, R12 | |||
| SHRQ CL, R12 | |||
| MOVW (SI)(R12*2), CX | |||
| MOVB CH, AL | |||
| MOVBQZX CL, CX | |||
| ADDQ CX, R11 | |||
| SHLQ CL, R10 | |||
| MOVQ DI, CX | |||
| MOVQ R10, R12 | |||
| SHRQ CL, R12 | |||
| MOVW (SI)(R12*2), CX | |||
| MOVB CH, AH | |||
| MOVBQZX CL, CX | |||
| ADDQ CX, R11 | |||
| SHLQ CL, R10 | |||
| BSWAPL AX | |||
| CMPQ R11, $0x20 | |||
| JL bitReader_fillFast_2_end | |||
| SUBQ $0x20, R11 | |||
| SUBQ $0x04, R9 | |||
| MOVL (R8)(R9*1), R12 | |||
| MOVQ R11, CX | |||
| SHLQ CL, R12 | |||
| ORQ R12, R10 | |||
| bitReader_fillFast_2_end: | |||
| MOVQ DI, CX | |||
| MOVQ R10, R12 | |||
| SHRQ CL, R12 | |||
| MOVW (SI)(R12*2), CX | |||
| MOVB CH, AH | |||
| MOVBQZX CL, CX | |||
| ADDQ CX, R11 | |||
| SHLQ CL, R10 | |||
| MOVQ DI, CX | |||
| MOVQ R10, R12 | |||
| SHRQ CL, R12 | |||
| MOVW (SI)(R12*2), CX | |||
| MOVB CH, AL | |||
| MOVBQZX CL, CX | |||
| ADDQ CX, R11 | |||
| SHLQ CL, R10 | |||
| BSWAPL AX | |||
| // Store the decoded values | |||
| MOVL AX, (DX) | |||
| ADDQ $0x04, DX | |||
| loop_condition: | |||
| CMPQ R9, $0x08 | |||
| JGE main_loop | |||
| // Update ctx structure | |||
| MOVQ ctx+0(FP), AX | |||
| SUBQ 16(AX), DX | |||
| MOVQ DX, 40(AX) | |||
| MOVQ (AX), AX | |||
| MOVQ R9, 24(AX) | |||
| MOVQ R10, 32(AX) | |||
| MOVB R11, 40(AX) | |||
| RET | |||
| // Report error | |||
| error_max_decoded_size_exeeded: | |||
| MOVQ ctx+0(FP), AX | |||
| MOVQ $-1, CX | |||
| MOVQ CX, 40(AX) | |||
| RET | |||
| // func decompress1x_main_loop_bmi2(ctx *decompress1xContext) | |||
| // Requires: BMI2 | |||
| TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 | |||
| MOVQ ctx+0(FP), CX | |||
| MOVQ 16(CX), DX | |||
| MOVQ 24(CX), BX | |||
| CMPQ BX, $0x04 | |||
| JB error_max_decoded_size_exeeded | |||
| LEAQ (DX)(BX*1), BX | |||
| MOVQ (CX), SI | |||
| MOVQ (SI), R8 | |||
| MOVQ 24(SI), R9 | |||
| MOVQ 32(SI), R10 | |||
| MOVBQZX 40(SI), R11 | |||
| MOVQ 32(CX), SI | |||
| MOVBQZX 8(CX), DI | |||
| JMP loop_condition | |||
| main_loop: | |||
| // Check if we have room for 4 bytes in the output buffer | |||
| LEAQ 4(DX), CX | |||
| CMPQ CX, BX | |||
| JGE error_max_decoded_size_exeeded | |||
| // Decode 4 values | |||
| CMPQ R11, $0x20 | |||
| JL bitReader_fillFast_1_end | |||
| SUBQ $0x20, R11 | |||
| SUBQ $0x04, R9 | |||
| MOVL (R8)(R9*1), CX | |||
| SHLXQ R11, CX, CX | |||
| ORQ CX, R10 | |||
| bitReader_fillFast_1_end: | |||
| SHRXQ DI, R10, CX | |||
| MOVW (SI)(CX*2), CX | |||
| MOVB CH, AL | |||
| MOVBQZX CL, CX | |||
| ADDQ CX, R11 | |||
| SHLXQ CX, R10, R10 | |||
| SHRXQ DI, R10, CX | |||
| MOVW (SI)(CX*2), CX | |||
| MOVB CH, AH | |||
| MOVBQZX CL, CX | |||
| ADDQ CX, R11 | |||
| SHLXQ CX, R10, R10 | |||
| BSWAPL AX | |||
| CMPQ R11, $0x20 | |||
| JL bitReader_fillFast_2_end | |||
| SUBQ $0x20, R11 | |||
| SUBQ $0x04, R9 | |||
| MOVL (R8)(R9*1), CX | |||
| SHLXQ R11, CX, CX | |||
| ORQ CX, R10 | |||
| bitReader_fillFast_2_end: | |||
| SHRXQ DI, R10, CX | |||
| MOVW (SI)(CX*2), CX | |||
| MOVB CH, AH | |||
| MOVBQZX CL, CX | |||
| ADDQ CX, R11 | |||
| SHLXQ CX, R10, R10 | |||
| SHRXQ DI, R10, CX | |||
| MOVW (SI)(CX*2), CX | |||
| MOVB CH, AL | |||
| MOVBQZX CL, CX | |||
| ADDQ CX, R11 | |||
| SHLXQ CX, R10, R10 | |||
| BSWAPL AX | |||
| // Store the decoded values | |||
| MOVL AX, (DX) | |||
| ADDQ $0x04, DX | |||
| loop_condition: | |||
| CMPQ R9, $0x08 | |||
| JGE main_loop | |||
| // Update ctx structure | |||
| MOVQ ctx+0(FP), AX | |||
| SUBQ 16(AX), DX | |||
| MOVQ DX, 40(AX) | |||
| MOVQ (AX), AX | |||
| MOVQ R9, 24(AX) | |||
| MOVQ R10, 32(AX) | |||
| MOVB R11, 40(AX) | |||
| RET | |||
| // Report error | |||
| error_max_decoded_size_exeeded: | |||
| MOVQ ctx+0(FP), AX | |||
| MOVQ $-1, CX | |||
| MOVQ CX, 40(AX) | |||
| RET | |||
| @@ -0,0 +1,295 @@ | |||
| //go:build !amd64 || appengine || !gc || noasm | |||
| // +build !amd64 appengine !gc noasm | |||
| // This file contains a generic implementation of Decoder.Decompress4X. | |||
| package huff0 | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| ) | |||
| // Decompress4X will decompress a 4X encoded stream. | |||
| // The length of the supplied input must match the end of a block exactly. | |||
| // The *capacity* of the dst slice must match the destination size of | |||
| // the uncompressed data exactly. | |||
| func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { | |||
| if len(d.dt.single) == 0 { | |||
| return nil, errors.New("no table loaded") | |||
| } | |||
| if len(src) < 6+(4*1) { | |||
| return nil, errors.New("input too small") | |||
| } | |||
| if use8BitTables && d.actualTableLog <= 8 { | |||
| return d.decompress4X8bit(dst, src) | |||
| } | |||
| var br [4]bitReaderShifted | |||
| // Decode "jump table" | |||
| start := 6 | |||
| for i := 0; i < 3; i++ { | |||
| length := int(src[i*2]) | (int(src[i*2+1]) << 8) | |||
| if start+length >= len(src) { | |||
| return nil, errors.New("truncated input (or invalid offset)") | |||
| } | |||
| err := br[i].init(src[start : start+length]) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| start += length | |||
| } | |||
| err := br[3].init(src[start:]) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // destination, offset to match first output | |||
| dstSize := cap(dst) | |||
| dst = dst[:dstSize] | |||
| out := dst | |||
| dstEvery := (dstSize + 3) / 4 | |||
| const tlSize = 1 << tableLogMax | |||
| const tlMask = tlSize - 1 | |||
| single := d.dt.single[:tlSize] | |||
| // Use temp table to avoid bound checks/append penalty. | |||
| buf := d.buffer() | |||
| var off uint8 | |||
| var decoded int | |||
| // Decode 2 values from each decoder/loop. | |||
| const bufoff = 256 | |||
| for { | |||
| if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { | |||
| break | |||
| } | |||
| { | |||
| const stream = 0 | |||
| const stream2 = 1 | |||
| br[stream].fillFast() | |||
| br[stream2].fillFast() | |||
| val := br[stream].peekBitsFast(d.actualTableLog) | |||
| val2 := br[stream2].peekBitsFast(d.actualTableLog) | |||
| v := single[val&tlMask] | |||
| v2 := single[val2&tlMask] | |||
| br[stream].advance(uint8(v.entry)) | |||
| br[stream2].advance(uint8(v2.entry)) | |||
| buf[stream][off] = uint8(v.entry >> 8) | |||
| buf[stream2][off] = uint8(v2.entry >> 8) | |||
| val = br[stream].peekBitsFast(d.actualTableLog) | |||
| val2 = br[stream2].peekBitsFast(d.actualTableLog) | |||
| v = single[val&tlMask] | |||
| v2 = single[val2&tlMask] | |||
| br[stream].advance(uint8(v.entry)) | |||
| br[stream2].advance(uint8(v2.entry)) | |||
| buf[stream][off+1] = uint8(v.entry >> 8) | |||
| buf[stream2][off+1] = uint8(v2.entry >> 8) | |||
| } | |||
| { | |||
| const stream = 2 | |||
| const stream2 = 3 | |||
| br[stream].fillFast() | |||
| br[stream2].fillFast() | |||
| val := br[stream].peekBitsFast(d.actualTableLog) | |||
| val2 := br[stream2].peekBitsFast(d.actualTableLog) | |||
| v := single[val&tlMask] | |||
| v2 := single[val2&tlMask] | |||
| br[stream].advance(uint8(v.entry)) | |||
| br[stream2].advance(uint8(v2.entry)) | |||
| buf[stream][off] = uint8(v.entry >> 8) | |||
| buf[stream2][off] = uint8(v2.entry >> 8) | |||
| val = br[stream].peekBitsFast(d.actualTableLog) | |||
| val2 = br[stream2].peekBitsFast(d.actualTableLog) | |||
| v = single[val&tlMask] | |||
| v2 = single[val2&tlMask] | |||
| br[stream].advance(uint8(v.entry)) | |||
| br[stream2].advance(uint8(v2.entry)) | |||
| buf[stream][off+1] = uint8(v.entry >> 8) | |||
| buf[stream2][off+1] = uint8(v2.entry >> 8) | |||
| } | |||
| off += 2 | |||
| if off == 0 { | |||
| if bufoff > dstEvery { | |||
| d.bufs.Put(buf) | |||
| return nil, errors.New("corruption detected: stream overrun 1") | |||
| } | |||
| copy(out, buf[0][:]) | |||
| copy(out[dstEvery:], buf[1][:]) | |||
| copy(out[dstEvery*2:], buf[2][:]) | |||
| copy(out[dstEvery*3:], buf[3][:]) | |||
| out = out[bufoff:] | |||
| decoded += bufoff * 4 | |||
| // There must at least be 3 buffers left. | |||
| if len(out) < dstEvery*3 { | |||
| d.bufs.Put(buf) | |||
| return nil, errors.New("corruption detected: stream overrun 2") | |||
| } | |||
| } | |||
| } | |||
| if off > 0 { | |||
| ioff := int(off) | |||
| if len(out) < dstEvery*3+ioff { | |||
| d.bufs.Put(buf) | |||
| return nil, errors.New("corruption detected: stream overrun 3") | |||
| } | |||
| copy(out, buf[0][:off]) | |||
| copy(out[dstEvery:], buf[1][:off]) | |||
| copy(out[dstEvery*2:], buf[2][:off]) | |||
| copy(out[dstEvery*3:], buf[3][:off]) | |||
| decoded += int(off) * 4 | |||
| out = out[off:] | |||
| } | |||
| // Decode remaining. | |||
| remainBytes := dstEvery - (decoded / 4) | |||
| for i := range br { | |||
| offset := dstEvery * i | |||
| endsAt := offset + remainBytes | |||
| if endsAt > len(out) { | |||
| endsAt = len(out) | |||
| } | |||
| br := &br[i] | |||
| bitsLeft := br.remaining() | |||
| for bitsLeft > 0 { | |||
| br.fill() | |||
| if offset >= endsAt { | |||
| d.bufs.Put(buf) | |||
| return nil, errors.New("corruption detected: stream overrun 4") | |||
| } | |||
| // Read value and increment offset. | |||
| val := br.peekBitsFast(d.actualTableLog) | |||
| v := single[val&tlMask].entry | |||
| nBits := uint8(v) | |||
| br.advance(nBits) | |||
| bitsLeft -= uint(nBits) | |||
| out[offset] = uint8(v >> 8) | |||
| offset++ | |||
| } | |||
| if offset != endsAt { | |||
| d.bufs.Put(buf) | |||
| return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) | |||
| } | |||
| decoded += offset - dstEvery*i | |||
| err = br.close() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| d.bufs.Put(buf) | |||
| if dstSize != decoded { | |||
| return nil, errors.New("corruption detected: short output block") | |||
| } | |||
| return dst, nil | |||
| } | |||
| // Decompress1X will decompress a 1X encoded stream. | |||
| // The cap of the output buffer will be the maximum decompressed size. | |||
| // The length of the supplied input must match the end of a block exactly. | |||
| func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { | |||
| if len(d.dt.single) == 0 { | |||
| return nil, errors.New("no table loaded") | |||
| } | |||
| if use8BitTables && d.actualTableLog <= 8 { | |||
| return d.decompress1X8Bit(dst, src) | |||
| } | |||
| var br bitReaderShifted | |||
| err := br.init(src) | |||
| if err != nil { | |||
| return dst, err | |||
| } | |||
| maxDecodedSize := cap(dst) | |||
| dst = dst[:0] | |||
| // Avoid bounds check by always having full sized table. | |||
| const tlSize = 1 << tableLogMax | |||
| const tlMask = tlSize - 1 | |||
| dt := d.dt.single[:tlSize] | |||
| // Use temp table to avoid bound checks/append penalty. | |||
| bufs := d.buffer() | |||
| buf := &bufs[0] | |||
| var off uint8 | |||
| for br.off >= 8 { | |||
| br.fillFast() | |||
| v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |||
| br.advance(uint8(v.entry)) | |||
| buf[off+0] = uint8(v.entry >> 8) | |||
| v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |||
| br.advance(uint8(v.entry)) | |||
| buf[off+1] = uint8(v.entry >> 8) | |||
| // Refill | |||
| br.fillFast() | |||
| v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |||
| br.advance(uint8(v.entry)) | |||
| buf[off+2] = uint8(v.entry >> 8) | |||
| v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |||
| br.advance(uint8(v.entry)) | |||
| buf[off+3] = uint8(v.entry >> 8) | |||
| off += 4 | |||
| if off == 0 { | |||
| if len(dst)+256 > maxDecodedSize { | |||
| br.close() | |||
| d.bufs.Put(bufs) | |||
| return nil, ErrMaxDecodedSizeExceeded | |||
| } | |||
| dst = append(dst, buf[:]...) | |||
| } | |||
| } | |||
| if len(dst)+int(off) > maxDecodedSize { | |||
| d.bufs.Put(bufs) | |||
| br.close() | |||
| return nil, ErrMaxDecodedSizeExceeded | |||
| } | |||
| dst = append(dst, buf[:off]...) | |||
| // br < 8, so uint8 is fine | |||
| bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead | |||
| for bitsLeft > 0 { | |||
| br.fill() | |||
| if false && br.bitsRead >= 32 { | |||
| if br.off >= 4 { | |||
| v := br.in[br.off-4:] | |||
| v = v[:4] | |||
| low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |||
| br.value = (br.value << 32) | uint64(low) | |||
| br.bitsRead -= 32 | |||
| br.off -= 4 | |||
| } else { | |||
| for br.off > 0 { | |||
| br.value = (br.value << 8) | uint64(br.in[br.off-1]) | |||
| br.bitsRead -= 8 | |||
| br.off-- | |||
| } | |||
| } | |||
| } | |||
| if len(dst) >= maxDecodedSize { | |||
| d.bufs.Put(bufs) | |||
| br.close() | |||
| return nil, ErrMaxDecodedSizeExceeded | |||
| } | |||
| v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] | |||
| nBits := uint8(v.entry) | |||
| br.advance(nBits) | |||
| bitsLeft -= nBits | |||
| dst = append(dst, uint8(v.entry>>8)) | |||
| } | |||
| d.bufs.Put(bufs) | |||
| return dst, br.close() | |||
| } | |||
| @@ -0,0 +1,337 @@ | |||
| // Package huff0 provides fast huffman encoding as used in zstd. | |||
| // | |||
| // See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. | |||
| package huff0 | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "math" | |||
| "math/bits" | |||
| "sync" | |||
| "github.com/klauspost/compress/fse" | |||
| ) | |||
| const ( | |||
| maxSymbolValue = 255 | |||
| // zstandard limits tablelog to 11, see: | |||
| // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description | |||
| tableLogMax = 11 | |||
| tableLogDefault = 11 | |||
| minTablelog = 5 | |||
| huffNodesLen = 512 | |||
| // BlockSizeMax is maximum input size for a single block uncompressed. | |||
| BlockSizeMax = 1<<18 - 1 | |||
| ) | |||
| var ( | |||
| // ErrIncompressible is returned when input is judged to be too hard to compress. | |||
| ErrIncompressible = errors.New("input is not compressible") | |||
| // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. | |||
| ErrUseRLE = errors.New("input is single value repeated") | |||
| // ErrTooBig is return if input is too large for a single block. | |||
| ErrTooBig = errors.New("input too big") | |||
| // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. | |||
| ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") | |||
| ) | |||
| type ReusePolicy uint8 | |||
| const ( | |||
| // ReusePolicyAllow will allow reuse if it produces smaller output. | |||
| ReusePolicyAllow ReusePolicy = iota | |||
| // ReusePolicyPrefer will re-use aggressively if possible. | |||
| // This will not check if a new table will produce smaller output, | |||
| // except if the current table is impossible to use or | |||
| // compressed output is bigger than input. | |||
| ReusePolicyPrefer | |||
| // ReusePolicyNone will disable re-use of tables. | |||
| // This is slightly faster than ReusePolicyAllow but may produce larger output. | |||
| ReusePolicyNone | |||
| // ReusePolicyMust must allow reuse and produce smaller output. | |||
| ReusePolicyMust | |||
| ) | |||
| type Scratch struct { | |||
| count [maxSymbolValue + 1]uint32 | |||
| // Per block parameters. | |||
| // These can be used to override compression parameters of the block. | |||
| // Do not touch, unless you know what you are doing. | |||
| // Out is output buffer. | |||
| // If the scratch is re-used before the caller is done processing the output, | |||
| // set this field to nil. | |||
| // Otherwise the output buffer will be re-used for next Compression/Decompression step | |||
| // and allocation will be avoided. | |||
| Out []byte | |||
| // OutTable will contain the table data only, if a new table has been generated. | |||
| // Slice of the returned data. | |||
| OutTable []byte | |||
| // OutData will contain the compressed data. | |||
| // Slice of the returned data. | |||
| OutData []byte | |||
| // MaxDecodedSize will set the maximum allowed output size. | |||
| // This value will automatically be set to BlockSizeMax if not set. | |||
| // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. | |||
| MaxDecodedSize int | |||
| br byteReader | |||
| // MaxSymbolValue will override the maximum symbol value of the next block. | |||
| MaxSymbolValue uint8 | |||
| // TableLog will attempt to override the tablelog for the next block. | |||
| // Must be <= 11 and >= 5. | |||
| TableLog uint8 | |||
| // Reuse will specify the reuse policy | |||
| Reuse ReusePolicy | |||
| // WantLogLess allows to specify a log 2 reduction that should at least be achieved, | |||
| // otherwise the block will be returned as incompressible. | |||
| // The reduction should then at least be (input size >> WantLogLess) | |||
| // If WantLogLess == 0 any improvement will do. | |||
| WantLogLess uint8 | |||
| symbolLen uint16 // Length of active part of the symbol table. | |||
| maxCount int // count of the most probable symbol | |||
| clearCount bool // clear count | |||
| actualTableLog uint8 // Selected tablelog. | |||
| prevTableLog uint8 // Tablelog for previous table | |||
| prevTable cTable // Table used for previous compression. | |||
| cTable cTable // compression table | |||
| dt dTable // decompression table | |||
| nodes []nodeElt | |||
| tmpOut [4][]byte | |||
| fse *fse.Scratch | |||
| decPool sync.Pool // *[4][256]byte buffers. | |||
| huffWeight [maxSymbolValue + 1]byte | |||
| } | |||
| // TransferCTable will transfer the previously used compression table. | |||
| func (s *Scratch) TransferCTable(src *Scratch) { | |||
| if cap(s.prevTable) < len(src.prevTable) { | |||
| s.prevTable = make(cTable, 0, maxSymbolValue+1) | |||
| } | |||
| s.prevTable = s.prevTable[:len(src.prevTable)] | |||
| copy(s.prevTable, src.prevTable) | |||
| s.prevTableLog = src.prevTableLog | |||
| } | |||
| func (s *Scratch) prepare(in []byte) (*Scratch, error) { | |||
| if len(in) > BlockSizeMax { | |||
| return nil, ErrTooBig | |||
| } | |||
| if s == nil { | |||
| s = &Scratch{} | |||
| } | |||
| if s.MaxSymbolValue == 0 { | |||
| s.MaxSymbolValue = maxSymbolValue | |||
| } | |||
| if s.TableLog == 0 { | |||
| s.TableLog = tableLogDefault | |||
| } | |||
| if s.TableLog > tableLogMax || s.TableLog < minTablelog { | |||
| return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) | |||
| } | |||
| if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { | |||
| s.MaxDecodedSize = BlockSizeMax | |||
| } | |||
| if s.clearCount && s.maxCount == 0 { | |||
| for i := range s.count { | |||
| s.count[i] = 0 | |||
| } | |||
| s.clearCount = false | |||
| } | |||
| if cap(s.Out) == 0 { | |||
| s.Out = make([]byte, 0, len(in)) | |||
| } | |||
| s.Out = s.Out[:0] | |||
| s.OutTable = nil | |||
| s.OutData = nil | |||
| if cap(s.nodes) < huffNodesLen+1 { | |||
| s.nodes = make([]nodeElt, 0, huffNodesLen+1) | |||
| } | |||
| s.nodes = s.nodes[:0] | |||
| if s.fse == nil { | |||
| s.fse = &fse.Scratch{} | |||
| } | |||
| s.br.init(in) | |||
| return s, nil | |||
| } | |||
| type cTable []cTableEntry | |||
| func (c cTable) write(s *Scratch) error { | |||
| var ( | |||
| // precomputed conversion table | |||
| bitsToWeight [tableLogMax + 1]byte | |||
| huffLog = s.actualTableLog | |||
| // last weight is not saved. | |||
| maxSymbolValue = uint8(s.symbolLen - 1) | |||
| huffWeight = s.huffWeight[:256] | |||
| ) | |||
| const ( | |||
| maxFSETableLog = 6 | |||
| ) | |||
| // convert to weight | |||
| bitsToWeight[0] = 0 | |||
| for n := uint8(1); n < huffLog+1; n++ { | |||
| bitsToWeight[n] = huffLog + 1 - n | |||
| } | |||
| // Acquire histogram for FSE. | |||
| hist := s.fse.Histogram() | |||
| hist = hist[:256] | |||
| for i := range hist[:16] { | |||
| hist[i] = 0 | |||
| } | |||
| for n := uint8(0); n < maxSymbolValue; n++ { | |||
| v := bitsToWeight[c[n].nBits] & 15 | |||
| huffWeight[n] = v | |||
| hist[v]++ | |||
| } | |||
| // FSE compress if feasible. | |||
| if maxSymbolValue >= 2 { | |||
| huffMaxCnt := uint32(0) | |||
| huffMax := uint8(0) | |||
| for i, v := range hist[:16] { | |||
| if v == 0 { | |||
| continue | |||
| } | |||
| huffMax = byte(i) | |||
| if v > huffMaxCnt { | |||
| huffMaxCnt = v | |||
| } | |||
| } | |||
| s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) | |||
| s.fse.TableLog = maxFSETableLog | |||
| b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) | |||
| if err == nil && len(b) < int(s.symbolLen>>1) { | |||
| s.Out = append(s.Out, uint8(len(b))) | |||
| s.Out = append(s.Out, b...) | |||
| return nil | |||
| } | |||
| // Unable to compress (RLE/uncompressible) | |||
| } | |||
| // write raw values as 4-bits (max : 15) | |||
| if maxSymbolValue > (256 - 128) { | |||
| // should not happen : likely means source cannot be compressed | |||
| return ErrIncompressible | |||
| } | |||
| op := s.Out | |||
| // special case, pack weights 4 bits/weight. | |||
| op = append(op, 128|(maxSymbolValue-1)) | |||
| // be sure it doesn't cause msan issue in final combination | |||
| huffWeight[maxSymbolValue] = 0 | |||
| for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { | |||
| op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) | |||
| } | |||
| s.Out = op | |||
| return nil | |||
| } | |||
| func (c cTable) estTableSize(s *Scratch) (sz int, err error) { | |||
| var ( | |||
| // precomputed conversion table | |||
| bitsToWeight [tableLogMax + 1]byte | |||
| huffLog = s.actualTableLog | |||
| // last weight is not saved. | |||
| maxSymbolValue = uint8(s.symbolLen - 1) | |||
| huffWeight = s.huffWeight[:256] | |||
| ) | |||
| const ( | |||
| maxFSETableLog = 6 | |||
| ) | |||
| // convert to weight | |||
| bitsToWeight[0] = 0 | |||
| for n := uint8(1); n < huffLog+1; n++ { | |||
| bitsToWeight[n] = huffLog + 1 - n | |||
| } | |||
| // Acquire histogram for FSE. | |||
| hist := s.fse.Histogram() | |||
| hist = hist[:256] | |||
| for i := range hist[:16] { | |||
| hist[i] = 0 | |||
| } | |||
| for n := uint8(0); n < maxSymbolValue; n++ { | |||
| v := bitsToWeight[c[n].nBits] & 15 | |||
| huffWeight[n] = v | |||
| hist[v]++ | |||
| } | |||
| // FSE compress if feasible. | |||
| if maxSymbolValue >= 2 { | |||
| huffMaxCnt := uint32(0) | |||
| huffMax := uint8(0) | |||
| for i, v := range hist[:16] { | |||
| if v == 0 { | |||
| continue | |||
| } | |||
| huffMax = byte(i) | |||
| if v > huffMaxCnt { | |||
| huffMaxCnt = v | |||
| } | |||
| } | |||
| s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) | |||
| s.fse.TableLog = maxFSETableLog | |||
| b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) | |||
| if err == nil && len(b) < int(s.symbolLen>>1) { | |||
| sz += 1 + len(b) | |||
| return sz, nil | |||
| } | |||
| // Unable to compress (RLE/uncompressible) | |||
| } | |||
| // write raw values as 4-bits (max : 15) | |||
| if maxSymbolValue > (256 - 128) { | |||
| // should not happen : likely means source cannot be compressed | |||
| return 0, ErrIncompressible | |||
| } | |||
| // special case, pack weights 4 bits/weight. | |||
| sz += 1 + int(maxSymbolValue/2) | |||
| return sz, nil | |||
| } | |||
| // estimateSize returns the estimated size in bytes of the input represented in the | |||
| // histogram supplied. | |||
| func (c cTable) estimateSize(hist []uint32) int { | |||
| nbBits := uint32(7) | |||
| for i, v := range c[:len(hist)] { | |||
| nbBits += uint32(v.nBits) * hist[i] | |||
| } | |||
| return int(nbBits >> 3) | |||
| } | |||
| // minSize returns the minimum possible size considering the shannon limit. | |||
| func (s *Scratch) minSize(total int) int { | |||
| nbBits := float64(7) | |||
| fTotal := float64(total) | |||
| for _, v := range s.count[:s.symbolLen] { | |||
| n := float64(v) | |||
| if n > 0 { | |||
| nbBits += math.Log2(fTotal/n) * n | |||
| } | |||
| } | |||
| return int(nbBits) >> 3 | |||
| } | |||
| func highBit32(val uint32) (n uint32) { | |||
| return uint32(bits.Len32(val) - 1) | |||
| } | |||
| @@ -0,0 +1,34 @@ | |||
| // Package cpuinfo gives runtime info about the current CPU. | |||
| // | |||
| // This is a very limited module meant for use internally | |||
| // in this project. For more versatile solution check | |||
| // https://github.com/klauspost/cpuid. | |||
| package cpuinfo | |||
| // HasBMI1 checks whether an x86 CPU supports the BMI1 extension. | |||
| func HasBMI1() bool { | |||
| return hasBMI1 | |||
| } | |||
| // HasBMI2 checks whether an x86 CPU supports the BMI2 extension. | |||
| func HasBMI2() bool { | |||
| return hasBMI2 | |||
| } | |||
| // DisableBMI2 will disable BMI2, for testing purposes. | |||
| // Call returned function to restore previous state. | |||
| func DisableBMI2() func() { | |||
| old := hasBMI2 | |||
| hasBMI2 = false | |||
| return func() { | |||
| hasBMI2 = old | |||
| } | |||
| } | |||
| // HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. | |||
| func HasBMI() bool { | |||
| return HasBMI1() && HasBMI2() | |||
| } | |||
| var hasBMI1 bool | |||
| var hasBMI2 bool | |||
| @@ -0,0 +1,11 @@ | |||
| //go:build amd64 && !appengine && !noasm && gc | |||
| // +build amd64,!appengine,!noasm,gc | |||
| package cpuinfo | |||
| // go:noescape | |||
| func x86extensions() (bmi1, bmi2 bool) | |||
| func init() { | |||
| hasBMI1, hasBMI2 = x86extensions() | |||
| } | |||
| @@ -0,0 +1,36 @@ | |||
| // +build !appengine | |||
| // +build gc | |||
| // +build !noasm | |||
| #include "textflag.h" | |||
| #include "funcdata.h" | |||
| #include "go_asm.h" | |||
| TEXT ·x86extensions(SB), NOSPLIT, $0 | |||
| // 1. determine max EAX value | |||
| XORQ AX, AX | |||
| CPUID | |||
| CMPQ AX, $7 | |||
| JB unsupported | |||
| // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" | |||
| MOVQ $7, AX | |||
| MOVQ $0, CX | |||
| CPUID | |||
| BTQ $3, BX // bit 3 = BMI1 | |||
| SETCS AL | |||
| BTQ $8, BX // bit 8 = BMI2 | |||
| SETCS AH | |||
| MOVB AL, bmi1+0(FP) | |||
| MOVB AH, bmi2+1(FP) | |||
| RET | |||
| unsupported: | |||
| XORQ AX, AX | |||
| MOVB AL, bmi1+0(FP) | |||
| MOVB AL, bmi2+1(FP) | |||
| RET | |||
| @@ -0,0 +1,27 @@ | |||
| Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| * Neither the name of Google Inc. nor the names of its | |||
| contributors may be used to endorse or promote products derived from | |||
| this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,264 @@ | |||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package snapref | |||
| import ( | |||
| "encoding/binary" | |||
| "errors" | |||
| "io" | |||
| ) | |||
| var ( | |||
| // ErrCorrupt reports that the input is invalid. | |||
| ErrCorrupt = errors.New("snappy: corrupt input") | |||
| // ErrTooLarge reports that the uncompressed length is too large. | |||
| ErrTooLarge = errors.New("snappy: decoded block is too large") | |||
| // ErrUnsupported reports that the input isn't supported. | |||
| ErrUnsupported = errors.New("snappy: unsupported input") | |||
| errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") | |||
| ) | |||
| // DecodedLen returns the length of the decoded block. | |||
| func DecodedLen(src []byte) (int, error) { | |||
| v, _, err := decodedLen(src) | |||
| return v, err | |||
| } | |||
| // decodedLen returns the length of the decoded block and the number of bytes | |||
| // that the length header occupied. | |||
| func decodedLen(src []byte) (blockLen, headerLen int, err error) { | |||
| v, n := binary.Uvarint(src) | |||
| if n <= 0 || v > 0xffffffff { | |||
| return 0, 0, ErrCorrupt | |||
| } | |||
| const wordSize = 32 << (^uint(0) >> 32 & 1) | |||
| if wordSize == 32 && v > 0x7fffffff { | |||
| return 0, 0, ErrTooLarge | |||
| } | |||
| return int(v), n, nil | |||
| } | |||
| const ( | |||
| decodeErrCodeCorrupt = 1 | |||
| decodeErrCodeUnsupportedLiteralLength = 2 | |||
| ) | |||
| // Decode returns the decoded form of src. The returned slice may be a sub- | |||
| // slice of dst if dst was large enough to hold the entire decoded block. | |||
| // Otherwise, a newly allocated slice will be returned. | |||
| // | |||
| // The dst and src must not overlap. It is valid to pass a nil dst. | |||
| // | |||
| // Decode handles the Snappy block format, not the Snappy stream format. | |||
| func Decode(dst, src []byte) ([]byte, error) { | |||
| dLen, s, err := decodedLen(src) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if dLen <= len(dst) { | |||
| dst = dst[:dLen] | |||
| } else { | |||
| dst = make([]byte, dLen) | |||
| } | |||
| switch decode(dst, src[s:]) { | |||
| case 0: | |||
| return dst, nil | |||
| case decodeErrCodeUnsupportedLiteralLength: | |||
| return nil, errUnsupportedLiteralLength | |||
| } | |||
| return nil, ErrCorrupt | |||
| } | |||
| // NewReader returns a new Reader that decompresses from r, using the framing | |||
| // format described at | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| func NewReader(r io.Reader) *Reader { | |||
| return &Reader{ | |||
| r: r, | |||
| decoded: make([]byte, maxBlockSize), | |||
| buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), | |||
| } | |||
| } | |||
| // Reader is an io.Reader that can read Snappy-compressed bytes. | |||
| // | |||
| // Reader handles the Snappy stream format, not the Snappy block format. | |||
| type Reader struct { | |||
| r io.Reader | |||
| err error | |||
| decoded []byte | |||
| buf []byte | |||
| // decoded[i:j] contains decoded bytes that have not yet been passed on. | |||
| i, j int | |||
| readHeader bool | |||
| } | |||
| // Reset discards any buffered data, resets all state, and switches the Snappy | |||
| // reader to read from r. This permits reusing a Reader rather than allocating | |||
| // a new one. | |||
| func (r *Reader) Reset(reader io.Reader) { | |||
| r.r = reader | |||
| r.err = nil | |||
| r.i = 0 | |||
| r.j = 0 | |||
| r.readHeader = false | |||
| } | |||
| func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { | |||
| if _, r.err = io.ReadFull(r.r, p); r.err != nil { | |||
| if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { | |||
| r.err = ErrCorrupt | |||
| } | |||
| return false | |||
| } | |||
| return true | |||
| } | |||
| func (r *Reader) fill() error { | |||
| for r.i >= r.j { | |||
| if !r.readFull(r.buf[:4], true) { | |||
| return r.err | |||
| } | |||
| chunkType := r.buf[0] | |||
| if !r.readHeader { | |||
| if chunkType != chunkTypeStreamIdentifier { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| r.readHeader = true | |||
| } | |||
| chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 | |||
| if chunkLen > len(r.buf) { | |||
| r.err = ErrUnsupported | |||
| return r.err | |||
| } | |||
| // The chunk types are specified at | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| switch chunkType { | |||
| case chunkTypeCompressedData: | |||
| // Section 4.2. Compressed data (chunk type 0x00). | |||
| if chunkLen < checksumSize { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| buf := r.buf[:chunkLen] | |||
| if !r.readFull(buf, false) { | |||
| return r.err | |||
| } | |||
| checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 | |||
| buf = buf[checksumSize:] | |||
| n, err := DecodedLen(buf) | |||
| if err != nil { | |||
| r.err = err | |||
| return r.err | |||
| } | |||
| if n > len(r.decoded) { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| if _, err := Decode(r.decoded, buf); err != nil { | |||
| r.err = err | |||
| return r.err | |||
| } | |||
| if crc(r.decoded[:n]) != checksum { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| r.i, r.j = 0, n | |||
| continue | |||
| case chunkTypeUncompressedData: | |||
| // Section 4.3. Uncompressed data (chunk type 0x01). | |||
| if chunkLen < checksumSize { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| buf := r.buf[:checksumSize] | |||
| if !r.readFull(buf, false) { | |||
| return r.err | |||
| } | |||
| checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 | |||
| // Read directly into r.decoded instead of via r.buf. | |||
| n := chunkLen - checksumSize | |||
| if n > len(r.decoded) { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| if !r.readFull(r.decoded[:n], false) { | |||
| return r.err | |||
| } | |||
| if crc(r.decoded[:n]) != checksum { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| r.i, r.j = 0, n | |||
| continue | |||
| case chunkTypeStreamIdentifier: | |||
| // Section 4.1. Stream identifier (chunk type 0xff). | |||
| if chunkLen != len(magicBody) { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| if !r.readFull(r.buf[:len(magicBody)], false) { | |||
| return r.err | |||
| } | |||
| for i := 0; i < len(magicBody); i++ { | |||
| if r.buf[i] != magicBody[i] { | |||
| r.err = ErrCorrupt | |||
| return r.err | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| if chunkType <= 0x7f { | |||
| // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). | |||
| r.err = ErrUnsupported | |||
| return r.err | |||
| } | |||
| // Section 4.4 Padding (chunk type 0xfe). | |||
| // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). | |||
| if !r.readFull(r.buf[:chunkLen], false) { | |||
| return r.err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // Read satisfies the io.Reader interface. | |||
| func (r *Reader) Read(p []byte) (int, error) { | |||
| if r.err != nil { | |||
| return 0, r.err | |||
| } | |||
| if err := r.fill(); err != nil { | |||
| return 0, err | |||
| } | |||
| n := copy(p, r.decoded[r.i:r.j]) | |||
| r.i += n | |||
| return n, nil | |||
| } | |||
| // ReadByte satisfies the io.ByteReader interface. | |||
| func (r *Reader) ReadByte() (byte, error) { | |||
| if r.err != nil { | |||
| return 0, r.err | |||
| } | |||
| if err := r.fill(); err != nil { | |||
| return 0, err | |||
| } | |||
| c := r.decoded[r.i] | |||
| r.i++ | |||
| return c, nil | |||
| } | |||
| @@ -0,0 +1,113 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package snapref | |||
| // decode writes the decoding of src to dst. It assumes that the varint-encoded | |||
| // length of the decompressed bytes has already been read, and that len(dst) | |||
| // equals that length. | |||
| // | |||
| // It returns 0 on success or a decodeErrCodeXxx error code on failure. | |||
| func decode(dst, src []byte) int { | |||
| var d, s, offset, length int | |||
| for s < len(src) { | |||
| switch src[s] & 0x03 { | |||
| case tagLiteral: | |||
| x := uint32(src[s] >> 2) | |||
| switch { | |||
| case x < 60: | |||
| s++ | |||
| case x == 60: | |||
| s += 2 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-1]) | |||
| case x == 61: | |||
| s += 3 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-2]) | uint32(src[s-1])<<8 | |||
| case x == 62: | |||
| s += 4 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | |||
| case x == 63: | |||
| s += 5 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | |||
| } | |||
| length = int(x) + 1 | |||
| if length <= 0 { | |||
| return decodeErrCodeUnsupportedLiteralLength | |||
| } | |||
| if length > len(dst)-d || length > len(src)-s { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| copy(dst[d:], src[s:s+length]) | |||
| d += length | |||
| s += length | |||
| continue | |||
| case tagCopy1: | |||
| s += 2 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = 4 + int(src[s-2])>>2&0x7 | |||
| offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) | |||
| case tagCopy2: | |||
| s += 3 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = 1 + int(src[s-3])>>2 | |||
| offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) | |||
| case tagCopy4: | |||
| s += 5 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = 1 + int(src[s-5])>>2 | |||
| offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) | |||
| } | |||
| if offset <= 0 || d < offset || length > len(dst)-d { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| // Copy from an earlier sub-slice of dst to a later sub-slice. | |||
| // If no overlap, use the built-in copy: | |||
| if offset >= length { | |||
| copy(dst[d:d+length], dst[d-offset:]) | |||
| d += length | |||
| continue | |||
| } | |||
| // Unlike the built-in copy function, this byte-by-byte copy always runs | |||
| // forwards, even if the slices overlap. Conceptually, this is: | |||
| // | |||
| // d += forwardCopy(dst[d:d+length], dst[d-offset:]) | |||
| // | |||
| // We align the slices into a and b and show the compiler they are the same size. | |||
| // This allows the loop to run without bounds checks. | |||
| a := dst[d : d+length] | |||
| b := dst[d-offset:] | |||
| b = b[:len(a)] | |||
| for i := range a { | |||
| a[i] = b[i] | |||
| } | |||
| d += length | |||
| } | |||
| if d != len(dst) { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| return 0 | |||
| } | |||
| @@ -0,0 +1,289 @@ | |||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package snapref | |||
| import ( | |||
| "encoding/binary" | |||
| "errors" | |||
| "io" | |||
| ) | |||
| // Encode returns the encoded form of src. The returned slice may be a sub- | |||
| // slice of dst if dst was large enough to hold the entire encoded block. | |||
| // Otherwise, a newly allocated slice will be returned. | |||
| // | |||
| // The dst and src must not overlap. It is valid to pass a nil dst. | |||
| // | |||
| // Encode handles the Snappy block format, not the Snappy stream format. | |||
| func Encode(dst, src []byte) []byte { | |||
| if n := MaxEncodedLen(len(src)); n < 0 { | |||
| panic(ErrTooLarge) | |||
| } else if len(dst) < n { | |||
| dst = make([]byte, n) | |||
| } | |||
| // The block starts with the varint-encoded length of the decompressed bytes. | |||
| d := binary.PutUvarint(dst, uint64(len(src))) | |||
| for len(src) > 0 { | |||
| p := src | |||
| src = nil | |||
| if len(p) > maxBlockSize { | |||
| p, src = p[:maxBlockSize], p[maxBlockSize:] | |||
| } | |||
| if len(p) < minNonLiteralBlockSize { | |||
| d += emitLiteral(dst[d:], p) | |||
| } else { | |||
| d += encodeBlock(dst[d:], p) | |||
| } | |||
| } | |||
| return dst[:d] | |||
| } | |||
| // inputMargin is the minimum number of extra input bytes to keep, inside | |||
| // encodeBlock's inner loop. On some architectures, this margin lets us | |||
| // implement a fast path for emitLiteral, where the copy of short (<= 16 byte) | |||
| // literals can be implemented as a single load to and store from a 16-byte | |||
| // register. That literal's actual length can be as short as 1 byte, so this | |||
| // can copy up to 15 bytes too much, but that's OK as subsequent iterations of | |||
| // the encoding loop will fix up the copy overrun, and this inputMargin ensures | |||
| // that we don't overrun the dst and src buffers. | |||
| const inputMargin = 16 - 1 | |||
| // minNonLiteralBlockSize is the minimum size of the input to encodeBlock that | |||
| // could be encoded with a copy tag. This is the minimum with respect to the | |||
| // algorithm used by encodeBlock, not a minimum enforced by the file format. | |||
| // | |||
| // The encoded output must start with at least a 1 byte literal, as there are | |||
| // no previous bytes to copy. A minimal (1 byte) copy after that, generated | |||
| // from an emitCopy call in encodeBlock's main loop, would require at least | |||
| // another inputMargin bytes, for the reason above: we want any emitLiteral | |||
| // calls inside encodeBlock's main loop to use the fast path if possible, which | |||
| // requires being able to overrun by inputMargin bytes. Thus, | |||
| // minNonLiteralBlockSize equals 1 + 1 + inputMargin. | |||
| // | |||
| // The C++ code doesn't use this exact threshold, but it could, as discussed at | |||
| // https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion | |||
| // The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an | |||
| // optimization. It should not affect the encoded form. This is tested by | |||
| // TestSameEncodingAsCppShortCopies. | |||
| const minNonLiteralBlockSize = 1 + 1 + inputMargin | |||
| // MaxEncodedLen returns the maximum length of a snappy block, given its | |||
| // uncompressed length. | |||
| // | |||
| // It will return a negative value if srcLen is too large to encode. | |||
| func MaxEncodedLen(srcLen int) int { | |||
| n := uint64(srcLen) | |||
| if n > 0xffffffff { | |||
| return -1 | |||
| } | |||
| // Compressed data can be defined as: | |||
| // compressed := item* literal* | |||
| // item := literal* copy | |||
| // | |||
| // The trailing literal sequence has a space blowup of at most 62/60 | |||
| // since a literal of length 60 needs one tag byte + one extra byte | |||
| // for length information. | |||
| // | |||
| // Item blowup is trickier to measure. Suppose the "copy" op copies | |||
| // 4 bytes of data. Because of a special check in the encoding code, | |||
| // we produce a 4-byte copy only if the offset is < 65536. Therefore | |||
| // the copy op takes 3 bytes to encode, and this type of item leads | |||
| // to at most the 62/60 blowup for representing literals. | |||
| // | |||
| // Suppose the "copy" op copies 5 bytes of data. If the offset is big | |||
| // enough, it will take 5 bytes to encode the copy op. Therefore the | |||
| // worst case here is a one-byte literal followed by a five-byte copy. | |||
| // That is, 6 bytes of input turn into 7 bytes of "compressed" data. | |||
| // | |||
| // This last factor dominates the blowup, so the final estimate is: | |||
| n = 32 + n + n/6 | |||
| if n > 0xffffffff { | |||
| return -1 | |||
| } | |||
| return int(n) | |||
| } | |||
| var errClosed = errors.New("snappy: Writer is closed") | |||
| // NewWriter returns a new Writer that compresses to w. | |||
| // | |||
| // The Writer returned does not buffer writes. There is no need to Flush or | |||
| // Close such a Writer. | |||
| // | |||
| // Deprecated: the Writer returned is not suitable for many small writes, only | |||
| // for few large writes. Use NewBufferedWriter instead, which is efficient | |||
| // regardless of the frequency and shape of the writes, and remember to Close | |||
| // that Writer when done. | |||
| func NewWriter(w io.Writer) *Writer { | |||
| return &Writer{ | |||
| w: w, | |||
| obuf: make([]byte, obufLen), | |||
| } | |||
| } | |||
| // NewBufferedWriter returns a new Writer that compresses to w, using the | |||
| // framing format described at | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| // | |||
| // The Writer returned buffers writes. Users must call Close to guarantee all | |||
| // data has been forwarded to the underlying io.Writer. They may also call | |||
| // Flush zero or more times before calling Close. | |||
| func NewBufferedWriter(w io.Writer) *Writer { | |||
| return &Writer{ | |||
| w: w, | |||
| ibuf: make([]byte, 0, maxBlockSize), | |||
| obuf: make([]byte, obufLen), | |||
| } | |||
| } | |||
| // Writer is an io.Writer that can write Snappy-compressed bytes. | |||
| // | |||
| // Writer handles the Snappy stream format, not the Snappy block format. | |||
| type Writer struct { | |||
| w io.Writer | |||
| err error | |||
| // ibuf is a buffer for the incoming (uncompressed) bytes. | |||
| // | |||
| // Its use is optional. For backwards compatibility, Writers created by the | |||
| // NewWriter function have ibuf == nil, do not buffer incoming bytes, and | |||
| // therefore do not need to be Flush'ed or Close'd. | |||
| ibuf []byte | |||
| // obuf is a buffer for the outgoing (compressed) bytes. | |||
| obuf []byte | |||
| // wroteStreamHeader is whether we have written the stream header. | |||
| wroteStreamHeader bool | |||
| } | |||
| // Reset discards the writer's state and switches the Snappy writer to write to | |||
| // w. This permits reusing a Writer rather than allocating a new one. | |||
| func (w *Writer) Reset(writer io.Writer) { | |||
| w.w = writer | |||
| w.err = nil | |||
| if w.ibuf != nil { | |||
| w.ibuf = w.ibuf[:0] | |||
| } | |||
| w.wroteStreamHeader = false | |||
| } | |||
| // Write satisfies the io.Writer interface. | |||
| func (w *Writer) Write(p []byte) (nRet int, errRet error) { | |||
| if w.ibuf == nil { | |||
| // Do not buffer incoming bytes. This does not perform or compress well | |||
| // if the caller of Writer.Write writes many small slices. This | |||
| // behavior is therefore deprecated, but still supported for backwards | |||
| // compatibility with code that doesn't explicitly Flush or Close. | |||
| return w.write(p) | |||
| } | |||
| // The remainder of this method is based on bufio.Writer.Write from the | |||
| // standard library. | |||
| for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { | |||
| var n int | |||
| if len(w.ibuf) == 0 { | |||
| // Large write, empty buffer. | |||
| // Write directly from p to avoid copy. | |||
| n, _ = w.write(p) | |||
| } else { | |||
| n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) | |||
| w.ibuf = w.ibuf[:len(w.ibuf)+n] | |||
| w.Flush() | |||
| } | |||
| nRet += n | |||
| p = p[n:] | |||
| } | |||
| if w.err != nil { | |||
| return nRet, w.err | |||
| } | |||
| n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) | |||
| w.ibuf = w.ibuf[:len(w.ibuf)+n] | |||
| nRet += n | |||
| return nRet, nil | |||
| } | |||
| func (w *Writer) write(p []byte) (nRet int, errRet error) { | |||
| if w.err != nil { | |||
| return 0, w.err | |||
| } | |||
| for len(p) > 0 { | |||
| obufStart := len(magicChunk) | |||
| if !w.wroteStreamHeader { | |||
| w.wroteStreamHeader = true | |||
| copy(w.obuf, magicChunk) | |||
| obufStart = 0 | |||
| } | |||
| var uncompressed []byte | |||
| if len(p) > maxBlockSize { | |||
| uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] | |||
| } else { | |||
| uncompressed, p = p, nil | |||
| } | |||
| checksum := crc(uncompressed) | |||
| // Compress the buffer, discarding the result if the improvement | |||
| // isn't at least 12.5%. | |||
| compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) | |||
| chunkType := uint8(chunkTypeCompressedData) | |||
| chunkLen := 4 + len(compressed) | |||
| obufEnd := obufHeaderLen + len(compressed) | |||
| if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { | |||
| chunkType = chunkTypeUncompressedData | |||
| chunkLen = 4 + len(uncompressed) | |||
| obufEnd = obufHeaderLen | |||
| } | |||
| // Fill in the per-chunk header that comes before the body. | |||
| w.obuf[len(magicChunk)+0] = chunkType | |||
| w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) | |||
| w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) | |||
| w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) | |||
| w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) | |||
| w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) | |||
| w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) | |||
| w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) | |||
| if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { | |||
| w.err = err | |||
| return nRet, err | |||
| } | |||
| if chunkType == chunkTypeUncompressedData { | |||
| if _, err := w.w.Write(uncompressed); err != nil { | |||
| w.err = err | |||
| return nRet, err | |||
| } | |||
| } | |||
| nRet += len(uncompressed) | |||
| } | |||
| return nRet, nil | |||
| } | |||
| // Flush flushes the Writer to its underlying io.Writer. | |||
| func (w *Writer) Flush() error { | |||
| if w.err != nil { | |||
| return w.err | |||
| } | |||
| if len(w.ibuf) == 0 { | |||
| return nil | |||
| } | |||
| w.write(w.ibuf) | |||
| w.ibuf = w.ibuf[:0] | |||
| return w.err | |||
| } | |||
| // Close calls Flush and then closes the Writer. | |||
| func (w *Writer) Close() error { | |||
| w.Flush() | |||
| ret := w.err | |||
| if w.err == nil { | |||
| w.err = errClosed | |||
| } | |||
| return ret | |||
| } | |||
| @@ -0,0 +1,236 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package snapref | |||
| func load32(b []byte, i int) uint32 { | |||
| b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. | |||
| return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 | |||
| } | |||
| func load64(b []byte, i int) uint64 { | |||
| b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. | |||
| return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | | |||
| uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 | |||
| } | |||
| // emitLiteral writes a literal chunk and returns the number of bytes written. | |||
| // | |||
| // It assumes that: | |||
| // dst is long enough to hold the encoded bytes | |||
| // 1 <= len(lit) && len(lit) <= 65536 | |||
| func emitLiteral(dst, lit []byte) int { | |||
| i, n := 0, uint(len(lit)-1) | |||
| switch { | |||
| case n < 60: | |||
| dst[0] = uint8(n)<<2 | tagLiteral | |||
| i = 1 | |||
| case n < 1<<8: | |||
| dst[0] = 60<<2 | tagLiteral | |||
| dst[1] = uint8(n) | |||
| i = 2 | |||
| default: | |||
| dst[0] = 61<<2 | tagLiteral | |||
| dst[1] = uint8(n) | |||
| dst[2] = uint8(n >> 8) | |||
| i = 3 | |||
| } | |||
| return i + copy(dst[i:], lit) | |||
| } | |||
| // emitCopy writes a copy chunk and returns the number of bytes written. | |||
| // | |||
| // It assumes that: | |||
| // dst is long enough to hold the encoded bytes | |||
| // 1 <= offset && offset <= 65535 | |||
| // 4 <= length && length <= 65535 | |||
| func emitCopy(dst []byte, offset, length int) int { | |||
| i := 0 | |||
| // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The | |||
| // threshold for this loop is a little higher (at 68 = 64 + 4), and the | |||
| // length emitted down below is is a little lower (at 60 = 64 - 4), because | |||
| // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed | |||
| // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as | |||
| // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as | |||
| // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a | |||
| // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an | |||
| // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. | |||
| for length >= 68 { | |||
| // Emit a length 64 copy, encoded as 3 bytes. | |||
| dst[i+0] = 63<<2 | tagCopy2 | |||
| dst[i+1] = uint8(offset) | |||
| dst[i+2] = uint8(offset >> 8) | |||
| i += 3 | |||
| length -= 64 | |||
| } | |||
| if length > 64 { | |||
| // Emit a length 60 copy, encoded as 3 bytes. | |||
| dst[i+0] = 59<<2 | tagCopy2 | |||
| dst[i+1] = uint8(offset) | |||
| dst[i+2] = uint8(offset >> 8) | |||
| i += 3 | |||
| length -= 60 | |||
| } | |||
| if length >= 12 || offset >= 2048 { | |||
| // Emit the remaining copy, encoded as 3 bytes. | |||
| dst[i+0] = uint8(length-1)<<2 | tagCopy2 | |||
| dst[i+1] = uint8(offset) | |||
| dst[i+2] = uint8(offset >> 8) | |||
| return i + 3 | |||
| } | |||
| // Emit the remaining copy, encoded as 2 bytes. | |||
| dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 | |||
| dst[i+1] = uint8(offset) | |||
| return i + 2 | |||
| } | |||
| // extendMatch returns the largest k such that k <= len(src) and that | |||
| // src[i:i+k-j] and src[j:k] have the same contents. | |||
| // | |||
| // It assumes that: | |||
| // 0 <= i && i < j && j <= len(src) | |||
| func extendMatch(src []byte, i, j int) int { | |||
| for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { | |||
| } | |||
| return j | |||
| } | |||
| func hash(u, shift uint32) uint32 { | |||
| return (u * 0x1e35a7bd) >> shift | |||
| } | |||
| // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlock(dst, src []byte) (d int) { | |||
| // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. | |||
| // The table element type is uint16, as s < sLimit and sLimit < len(src) | |||
| // and len(src) <= maxBlockSize and maxBlockSize == 65536. | |||
| const ( | |||
| maxTableSize = 1 << 14 | |||
| // tableMask is redundant, but helps the compiler eliminate bounds | |||
| // checks. | |||
| tableMask = maxTableSize - 1 | |||
| ) | |||
| shift := uint32(32 - 8) | |||
| for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { | |||
| shift-- | |||
| } | |||
| // In Go, all array elements are zero-initialized, so there is no advantage | |||
| // to a smaller tableSize per se. However, it matches the C++ algorithm, | |||
| // and in the asm versions of this code, we can get away with zeroing only | |||
| // the first tableSize elements. | |||
| var table [maxTableSize]uint16 | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := len(src) - inputMargin | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| nextEmit := 0 | |||
| // The encoded form must start with a literal, as there are no previous | |||
| // bytes to copy, so we start looking for hash matches at s == 1. | |||
| s := 1 | |||
| nextHash := hash(load32(src, s), shift) | |||
| for { | |||
| // Copied from the C++ snappy implementation: | |||
| // | |||
| // Heuristic match skipping: If 32 bytes are scanned with no matches | |||
| // found, start looking only at every other byte. If 32 more bytes are | |||
| // scanned (or skipped), look at every third byte, etc.. When a match | |||
| // is found, immediately go back to looking at every byte. This is a | |||
| // small loss (~5% performance, ~0.1% density) for compressible data | |||
| // due to more bookkeeping, but for non-compressible data (such as | |||
| // JPEG) it's a huge win since the compressor quickly "realizes" the | |||
| // data is incompressible and doesn't bother looking for matches | |||
| // everywhere. | |||
| // | |||
| // The "skip" variable keeps track of how many bytes there are since | |||
| // the last match; dividing it by 32 (ie. right-shifting by five) gives | |||
| // the number of bytes to move ahead for each iteration. | |||
| skip := 32 | |||
| nextS := s | |||
| candidate := 0 | |||
| for { | |||
| s = nextS | |||
| bytesBetweenHashLookups := skip >> 5 | |||
| nextS = s + bytesBetweenHashLookups | |||
| skip += bytesBetweenHashLookups | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| candidate = int(table[nextHash&tableMask]) | |||
| table[nextHash&tableMask] = uint16(s) | |||
| nextHash = hash(load32(src, nextS), shift) | |||
| if load32(src, s) == load32(src, candidate) { | |||
| break | |||
| } | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| d += emitLiteral(dst[d:], src[nextEmit:s]) | |||
| // Call emitCopy, and then see if another emitCopy could be our next | |||
| // move. Repeat until we find no match for the input immediately after | |||
| // what was consumed by the last emitCopy call. | |||
| // | |||
| // If we exit this loop normally then we need to call emitLiteral next, | |||
| // though we don't yet know how big the literal will be. We handle that | |||
| // by proceeding to the next iteration of the main loop. We also can | |||
| // exit this loop via goto if we get close to exhausting the input. | |||
| for { | |||
| // Invariant: we have a 4-byte match at s, and no need to emit any | |||
| // literal bytes prior to s. | |||
| base := s | |||
| // Extend the 4-byte match as long as possible. | |||
| // | |||
| // This is an inlined version of: | |||
| // s = extendMatch(src, candidate+4, s+4) | |||
| s += 4 | |||
| for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { | |||
| } | |||
| d += emitCopy(dst[d:], base-candidate, s-base) | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| // We could immediately start working at s now, but to improve | |||
| // compression we first update the hash table at s-1 and at s. If | |||
| // another emitCopy is not our next move, also calculate nextHash | |||
| // at s+1. At least on GOARCH=amd64, these three hash calculations | |||
| // are faster as one load64 call (with some shifts) instead of | |||
| // three load32 calls. | |||
| x := load64(src, s-1) | |||
| prevHash := hash(uint32(x>>0), shift) | |||
| table[prevHash&tableMask] = uint16(s - 1) | |||
| currHash := hash(uint32(x>>8), shift) | |||
| candidate = int(table[currHash&tableMask]) | |||
| table[currHash&tableMask] = uint16(s) | |||
| if uint32(x>>8) != load32(src, candidate) { | |||
| nextHash = hash(uint32(x>>16), shift) | |||
| s++ | |||
| break | |||
| } | |||
| } | |||
| } | |||
| emitRemainder: | |||
| if nextEmit < len(src) { | |||
| d += emitLiteral(dst[d:], src[nextEmit:]) | |||
| } | |||
| return d | |||
| } | |||
| @@ -0,0 +1,98 @@ | |||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Package snapref implements the Snappy compression format. It aims for very | |||
| // high speeds and reasonable compression. | |||
| // | |||
| // There are actually two Snappy formats: block and stream. They are related, | |||
| // but different: trying to decompress block-compressed data as a Snappy stream | |||
| // will fail, and vice versa. The block format is the Decode and Encode | |||
| // functions and the stream format is the Reader and Writer types. | |||
| // | |||
| // The block format, the more common case, is used when the complete size (the | |||
| // number of bytes) of the original data is known upfront, at the time | |||
| // compression starts. The stream format, also known as the framing format, is | |||
| // for when that isn't always true. | |||
| // | |||
| // The canonical, C++ implementation is at https://github.com/google/snappy and | |||
| // it only implements the block format. | |||
| package snapref | |||
| import ( | |||
| "hash/crc32" | |||
| ) | |||
| /* | |||
| Each encoded block begins with the varint-encoded length of the decoded data, | |||
| followed by a sequence of chunks. Chunks begin and end on byte boundaries. The | |||
| first byte of each chunk is broken into its 2 least and 6 most significant bits | |||
| called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. | |||
| Zero means a literal tag. All other values mean a copy tag. | |||
| For literal tags: | |||
| - If m < 60, the next 1 + m bytes are literal bytes. | |||
| - Otherwise, let n be the little-endian unsigned integer denoted by the next | |||
| m - 59 bytes. The next 1 + n bytes after that are literal bytes. | |||
| For copy tags, length bytes are copied from offset bytes ago, in the style of | |||
| Lempel-Ziv compression algorithms. In particular: | |||
| - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). | |||
| The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 | |||
| of the offset. The next byte is bits 0-7 of the offset. | |||
| - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). | |||
| The length is 1 + m. The offset is the little-endian unsigned integer | |||
| denoted by the next 2 bytes. | |||
| - For l == 3, this tag is a legacy format that is no longer issued by most | |||
| encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in | |||
| [1, 65). The length is 1 + m. The offset is the little-endian unsigned | |||
| integer denoted by the next 4 bytes. | |||
| */ | |||
| const ( | |||
| tagLiteral = 0x00 | |||
| tagCopy1 = 0x01 | |||
| tagCopy2 = 0x02 | |||
| tagCopy4 = 0x03 | |||
| ) | |||
| const ( | |||
| checksumSize = 4 | |||
| chunkHeaderSize = 4 | |||
| magicChunk = "\xff\x06\x00\x00" + magicBody | |||
| magicBody = "sNaPpY" | |||
| // maxBlockSize is the maximum size of the input to encodeBlock. It is not | |||
| // part of the wire format per se, but some parts of the encoder assume | |||
| // that an offset fits into a uint16. | |||
| // | |||
| // Also, for the framing format (Writer type instead of Encode function), | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt says | |||
| // that "the uncompressed data in a chunk must be no longer than 65536 | |||
| // bytes". | |||
| maxBlockSize = 65536 | |||
| // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is | |||
| // hard coded to be a const instead of a variable, so that obufLen can also | |||
| // be a const. Their equivalence is confirmed by | |||
| // TestMaxEncodedLenOfMaxBlockSize. | |||
| maxEncodedLenOfMaxBlockSize = 76490 | |||
| obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize | |||
| obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize | |||
| ) | |||
| const ( | |||
| chunkTypeCompressedData = 0x00 | |||
| chunkTypeUncompressedData = 0x01 | |||
| chunkTypePadding = 0xfe | |||
| chunkTypeStreamIdentifier = 0xff | |||
| ) | |||
| var crcTable = crc32.MakeTable(crc32.Castagnoli) | |||
| // crc implements the checksum specified in section 3 of | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| func crc(b []byte) uint32 { | |||
| c := crc32.Update(0, crcTable, b) | |||
| return uint32(c>>15|c<<17) + 0xa282ead8 | |||
| } | |||
| @@ -0,0 +1,15 @@ | |||
| testdata/bench | |||
| # These explicitly listed benchmark data files are for an obsolete version of | |||
| # snappy_test.go. | |||
| testdata/alice29.txt | |||
| testdata/asyoulik.txt | |||
| testdata/fireworks.jpeg | |||
| testdata/geo.protodata | |||
| testdata/html | |||
| testdata/html_x_4 | |||
| testdata/kppkn.gtb | |||
| testdata/lcet10.txt | |||
| testdata/paper-100k.pdf | |||
| testdata/plrabn12.txt | |||
| testdata/urls.10K | |||
| @@ -0,0 +1,28 @@ | |||
| Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. | |||
| Copyright (c) 2019 Klaus Post. All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| * Neither the name of Google Inc. nor the names of its | |||
| contributors may be used to endorse or promote products derived from | |||
| this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,965 @@ | |||
| # S2 Compression | |||
| S2 is an extension of [Snappy](https://github.com/google/snappy). | |||
| S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads. | |||
| Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy. | |||
| This means that S2 can seamlessly replace Snappy without converting compressed content. | |||
| S2 can produce Snappy compatible output, faster and better than Snappy. | |||
| If you want full benefit of the changes you should use s2 without Snappy compatibility. | |||
| S2 is designed to have high throughput on content that cannot be compressed. | |||
| This is important, so you don't have to worry about spending CPU cycles on already compressed data. | |||
| ## Benefits over Snappy | |||
| * Better compression | |||
| * Adjustable compression (3 levels) | |||
| * Concurrent stream compression | |||
| * Faster decompression, even for Snappy compatible content | |||
| * Concurrent Snappy/S2 stream decompression | |||
| * Ability to quickly skip forward in compressed stream | |||
| * Random seeking with indexes | |||
| * Compatible with reading Snappy compressed content | |||
| * Smaller block size overhead on incompressible blocks | |||
| * Block concatenation | |||
| * Uncompressed stream mode | |||
| * Automatic stream size padding | |||
| * Snappy compatible block compression | |||
| ## Drawbacks over Snappy | |||
| * Not optimized for 32 bit systems | |||
| * Streams use slightly more memory due to larger blocks and concurrency (configurable) | |||
| # Usage | |||
| Installation: `go get -u github.com/klauspost/compress/s2` | |||
| Full package documentation: | |||
| [![godoc][1]][2] | |||
| [1]: https://godoc.org/github.com/klauspost/compress?status.svg | |||
| [2]: https://godoc.org/github.com/klauspost/compress/s2 | |||
| ## Compression | |||
| ```Go | |||
| func EncodeStream(src io.Reader, dst io.Writer) error { | |||
| enc := s2.NewWriter(dst) | |||
| _, err := io.Copy(enc, src) | |||
| if err != nil { | |||
| enc.Close() | |||
| return err | |||
| } | |||
| // Blocks until compression is done. | |||
| return enc.Close() | |||
| } | |||
| ``` | |||
| You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete. | |||
| For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method. | |||
| The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2. | |||
| It is possible to flush any buffered data using the `Flush()` method. | |||
| This will block until all data sent to the encoder has been written to the output. | |||
| S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader. | |||
| As a final method to compress data, if you have a single block of data you would like to have encoded as a stream, | |||
| a slightly more efficient method is to use the `EncodeBuffer` method. | |||
| This will take ownership of the buffer until the stream is closed. | |||
| ```Go | |||
| func EncodeStream(src []byte, dst io.Writer) error { | |||
| enc := s2.NewWriter(dst) | |||
| // The encoder owns the buffer until Flush or Close is called. | |||
| err := enc.EncodeBuffer(buf) | |||
| if err != nil { | |||
| enc.Close() | |||
| return err | |||
| } | |||
| // Blocks until compression is done. | |||
| return enc.Close() | |||
| } | |||
| ``` | |||
| Each call to `EncodeBuffer` will result in discrete blocks being created without buffering, | |||
| so it should only be used a single time per stream. | |||
| If you need to write several blocks, you should use the regular io.Writer interface. | |||
| ## Decompression | |||
| ```Go | |||
| func DecodeStream(src io.Reader, dst io.Writer) error { | |||
| dec := s2.NewReader(src) | |||
| _, err := io.Copy(dst, dec) | |||
| return err | |||
| } | |||
| ``` | |||
| Similar to the Writer, a Reader can be reused using the `Reset` method. | |||
| For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available. | |||
| However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed. | |||
| For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`. | |||
| Do however note that these functions (similar to Snappy) does not provide validation of data, | |||
| so data corruption may be undetected. Stream encoding provides CRC checks of data. | |||
| It is possible to efficiently skip forward in a compressed stream using the `Skip()` method. | |||
| For big skips the decompressor is able to skip blocks without decompressing them. | |||
| ## Single Blocks | |||
| Similar to Snappy S2 offers single block compression. | |||
| Blocks do not offer the same flexibility and safety as streams, | |||
| but may be preferable for very small payloads, less than 100K. | |||
| Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result. | |||
| It is possible to provide a destination buffer. | |||
| If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used. | |||
| If not a new will be allocated. | |||
| Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression. | |||
| Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`. | |||
| Again an optional destination buffer can be supplied. | |||
| The `s2.DecodedLen(src)` can be used to get the minimum capacity needed. | |||
| If that is not satisfied a new buffer will be allocated. | |||
| Block function always operate on a single goroutine since it should only be used for small payloads. | |||
| # Commandline tools | |||
| Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression. | |||
| Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases). | |||
| Installing then requires Go to be installed. To install them, use: | |||
| `go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest` | |||
| To build binaries to the current folder use: | |||
| `go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d` | |||
| ## s2c | |||
| ``` | |||
| Usage: s2c [options] file1 file2 | |||
| Compresses all files supplied as input separately. | |||
| Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'. | |||
| By default output files will be overwritten. | |||
| Use - as the only file name to read from stdin and write to stdout. | |||
| Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt | |||
| Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt | |||
| File names beginning with 'http://' and 'https://' will be downloaded and compressed. | |||
| Only http response code 200 is accepted. | |||
| Options: | |||
| -bench int | |||
| Run benchmark n times. No output will be written | |||
| -blocksize string | |||
| Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M") | |||
| -c Write all output to stdout. Multiple input files will be concatenated | |||
| -cpu int | |||
| Compress using this amount of threads (default 32) | |||
| -faster | |||
| Compress faster, but with a minor compression loss | |||
| -help | |||
| Display help | |||
| -index | |||
| Add seek index (default true) | |||
| -o string | |||
| Write output to another file. Single input file only | |||
| -pad string | |||
| Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1") | |||
| -q Don't write any output to terminal, except errors | |||
| -rm | |||
| Delete source file(s) after successful compression | |||
| -safe | |||
| Do not overwrite output files | |||
| -slower | |||
| Compress more, but a lot slower | |||
| -snappy | |||
| Generate Snappy compatible output stream | |||
| -verify | |||
| Verify written files | |||
| ``` | |||
| ## s2d | |||
| ``` | |||
| Usage: s2d [options] file1 file2 | |||
| Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'. | |||
| Output file names have the extension removed. By default output files will be overwritten. | |||
| Use - as the only file name to read from stdin and write to stdout. | |||
| Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt | |||
| Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt | |||
| File names beginning with 'http://' and 'https://' will be downloaded and decompressed. | |||
| Extensions on downloaded files are ignored. Only http response code 200 is accepted. | |||
| Options: | |||
| -bench int | |||
| Run benchmark n times. No output will be written | |||
| -c Write all output to stdout. Multiple input files will be concatenated | |||
| -help | |||
| Display help | |||
| -o string | |||
| Write output to another file. Single input file only | |||
| -offset string | |||
| Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index | |||
| -q Don't write any output to terminal, except errors | |||
| -rm | |||
| Delete source file(s) after successful decompression | |||
| -safe | |||
| Do not overwrite output files | |||
| -tail string | |||
| Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index | |||
| -verify | |||
| Verify files, but do not write output | |||
| ``` | |||
| ## s2sx: self-extracting archives | |||
| s2sx allows creating self-extracting archives with no dependencies. | |||
| By default, executables are created for the same platforms as the host os, | |||
| but this can be overridden with `-os` and `-arch` parameters. | |||
| Extracted files have 0666 permissions, except when untar option used. | |||
| ``` | |||
| Usage: s2sx [options] file1 file2 | |||
| Compresses all files supplied as input separately. | |||
| If files have '.s2' extension they are assumed to be compressed already. | |||
| Output files are written as 'filename.s2sx' and with '.exe' for windows targets. | |||
| If output is big, an additional file with ".more" is written. This must be included as well. | |||
| By default output files will be overwritten. | |||
| Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt | |||
| Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt | |||
| Options: | |||
| -arch string | |||
| Destination architecture (default "amd64") | |||
| -c Write all output to stdout. Multiple input files will be concatenated | |||
| -cpu int | |||
| Compress using this amount of threads (default 32) | |||
| -help | |||
| Display help | |||
| -max string | |||
| Maximum executable size. Rest will be written to another file. (default "1G") | |||
| -os string | |||
| Destination operating system (default "windows") | |||
| -q Don't write any output to terminal, except errors | |||
| -rm | |||
| Delete source file(s) after successful compression | |||
| -safe | |||
| Do not overwrite output files | |||
| -untar | |||
| Untar on destination | |||
| ``` | |||
| Available platforms are: | |||
| * darwin-amd64 | |||
| * darwin-arm64 | |||
| * linux-amd64 | |||
| * linux-arm | |||
| * linux-arm64 | |||
| * linux-mips64 | |||
| * linux-ppc64le | |||
| * windows-386 | |||
| * windows-amd64 | |||
| By default, there is a size limit of 1GB for the output executable. | |||
| When this is exceeded the remaining file content is written to a file called | |||
| output+`.more`. This file must be included for a successful extraction and | |||
| placed alongside the executable for a successful extraction. | |||
| This file *must* have the same name as the executable, so if the executable is renamed, | |||
| so must the `.more` file. | |||
| This functionality is disabled with stdin/stdout. | |||
| ### Self-extracting TAR files | |||
| If you wrap a TAR file you can specify `-untar` to make it untar on the destination host. | |||
| Files are extracted to the current folder with the path specified in the tar file. | |||
| Note that tar files are not validated before they are wrapped. | |||
| For security reasons files that move below the root folder are not allowed. | |||
| # Performance | |||
| This section will focus on comparisons to Snappy. | |||
| This package is solely aimed at replacing Snappy as a high speed compression package. | |||
| If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) | |||
| gives better compression, but typically at speeds slightly below "better" mode in this package. | |||
| Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation. | |||
| Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput. | |||
| A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain. | |||
| The content compressed in this mode is fully compatible with the standard decoder. | |||
| Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): | |||
| | File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | | |||
| |-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| | |||
| | [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% | | |||
| | (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - | | |||
| | [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% | | |||
| | (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - | | |||
| | [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% | | |||
| | (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - | | |||
| | [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% | | |||
| | (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - | | |||
| | [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% | | |||
| | (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - | | |||
| | [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% | | |||
| | (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - | | |||
| | [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% | | |||
| | (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - | | |||
| | sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% | | |||
| | (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - | | |||
| | [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% | | |||
| | (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - | | |||
| | [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% | | |||
| | (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - | | |||
| | [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% | | |||
| | (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - | | |||
| ### Legend | |||
| * `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. | |||
| * `S2 throughput`: Throughput of S2 in MB/s. | |||
| * `S2 % smaller`: How many percent of the Snappy output size is S2 better. | |||
| * `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. | |||
| * `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. | |||
| * `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression. | |||
| There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. | |||
| Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size. | |||
| The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. | |||
| Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup. | |||
| This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above). | |||
| ## Decompression | |||
| S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used. | |||
| S2 vs Snappy **decompression** speed. Both operating on single core: | |||
| | File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy | | |||
| |-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------| | |||
| | [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x | | |||
| | [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x | | |||
| | [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x | | |||
| | [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x | | |||
| | [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x | | |||
| | [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x | | |||
| | [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x | | |||
| | sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x | | |||
| | [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x | | |||
| | [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x | | |||
| | [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x | | |||
| ### Legend | |||
| * `S2 Throughput`: Decompression speed of S2 encoded content. | |||
| * `Better Throughput`: Decompression speed of S2 "better" encoded content. | |||
| * `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed. | |||
| While the decompression code hasn't changed, there is a significant speedup in decompression speed. | |||
| S2 prefers longer matches and will typically only find matches that are 6 bytes or longer. | |||
| While this reduces compression a bit, it improves decompression speed. | |||
| The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy. | |||
| Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: | |||
| | File | S2 Throughput | S2 throughput | | |||
| |--------------------------------|--------------|---------------| | |||
| | consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | | |||
| | 10gb.tar.s2 | 1.30x | 867.07 MB/s | | |||
| | rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | | |||
| | github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | | |||
| | github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | | |||
| | enwik9.s2 | 1.67x | 681.53 MB/s | | |||
| | adresser.json.s2 | 3.41x | 4230.53 MB/s | | |||
| | silesia.tar.s2 | 1.52x | 811.58 | | |||
| Even though S2 typically compresses better than Snappy, decompression speed is always better. | |||
| ### Concurrent Stream Decompression | |||
| For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent) | |||
| that will decode a full stream using multiple goroutines. | |||
| Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 <input>`, best of 3: | |||
| | Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` | | |||
| |-------------------------------------------|------------|------------|------------|------------|-------------| | |||
| | enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s | | |||
| | enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s | | |||
| | sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s | | |||
| | sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s | | |||
| | sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s | | |||
| Scaling can be expected to be pretty linear until memory bandwidth is saturated. | |||
| For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads. | |||
| ## Block compression | |||
| When compressing blocks no concurrent compression is performed just as Snappy. | |||
| This is because blocks are for smaller payloads and generally will not benefit from concurrent compression. | |||
| An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input. | |||
| In rare, worst case scenario Snappy blocks could be significantly bigger than the input. | |||
| ### Mixed content blocks | |||
| The most reliable is a wide dataset. | |||
| For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), | |||
| 53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. | |||
| | * | Input | Output | Reduction | MB/s | | |||
| |-------------------|------------|------------|-----------|--------| | |||
| | S2 | 4014735833 | 1059723369 | 73.60% | **934.34** | | |||
| | S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 | | |||
| | S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 | | |||
| | Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 | | |||
| | S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 | | |||
| | LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 | | |||
| S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". | |||
| "Better" mode provides the same compression speed as LZ4 with better compression ratio. | |||
| When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression. | |||
| As can be seen from the other benchmarks decompression should also be easier on the S2 generated output. | |||
| Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for | |||
| other Go compressors: | |||
| | * | Input | Output | Reduction | MB/s | | |||
| |-------------------|------------|------------|-----------|--------| | |||
| | Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 | | |||
| | Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 | | |||
| | Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 | | |||
| | Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 | | |||
| ### Standard block compression | |||
| Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. | |||
| So individual benchmarks should only be seen as a guideline and the overall picture is more important. | |||
| These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above. | |||
| Block compression. Parallel benchmark running on 16 cores, 16 goroutines. | |||
| AMD64 assembly is use for both S2 and Snappy. | |||
| | Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | | |||
| |-----------------------|-------------|---------|--------------|-------------|-------------|-------------| | |||
| | html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s | | |||
| | urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s | | |||
| | fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s | | |||
| | fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s | | |||
| | paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s | | |||
| | html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s | | |||
| | alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s | | |||
| | asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s | | |||
| | lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s | | |||
| | plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s | | |||
| | geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s | | |||
| | kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s | | |||
| | alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s | | |||
| | alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s | | |||
| | alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s | | |||
| | alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s | | |||
| | Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed | | |||
| |-----------------------|-------------|------------------|----------|--------------| | |||
| | html | 22.31% | 7.58% | 1.07x | 1.20x | | |||
| | urls.10K | 47.78% | 14.36% | 1.22x | 1.18x | | |||
| | fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x | | |||
| | fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x | | |||
| | paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x | | |||
| | html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x | | |||
| | alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x | | |||
| | asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x | | |||
| | lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x | | |||
| | plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x | | |||
| | geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x | | |||
| | kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x | | |||
| | alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x | | |||
| | alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x | | |||
| | alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x | | |||
| | alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x | | |||
| Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. | |||
| Decompression speed is better than Snappy, except in one case. | |||
| Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline. | |||
| Size is on average around Snappy, but varies on content type. | |||
| In cases where compression is worse, it usually is compensated by a speed boost. | |||
| ### Better compression | |||
| Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. | |||
| So individual benchmarks should only be seen as a guideline and the overall picture is more important. | |||
| | Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | | |||
| |-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| | |||
| | html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s | | |||
| | urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s | | |||
| | fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s | | |||
| | fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s | | |||
| | paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s | | |||
| | html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s | | |||
| | alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s | | |||
| | asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s | | |||
| | lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s | | |||
| | plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s | | |||
| | geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s | | |||
| | kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s | | |||
| | alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s | | |||
| | alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s | | |||
| | alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s | | |||
| | alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s | | |||
| | Relative Perf | Snappy size | Better size | Better Speed | Better dec | | |||
| |-----------------------|-------------|-------------|--------------|------------| | |||
| | html | 22.31% | 13.18% | 0.48x | 0.98x | | |||
| | urls.10K | 47.78% | 24.43% | 0.50x | 0.93x | | |||
| | fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x | | |||
| | fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x | | |||
| | paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x | | |||
| | html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x | | |||
| | alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x | | |||
| | asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x | | |||
| | lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x | | |||
| | plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x | | |||
| | geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x | | |||
| | kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x | | |||
| | alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x | | |||
| | alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x | | |||
| | alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x | | |||
| | alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x | | |||
| Except for the mostly incompressible JPEG image compression is better and usually in the | |||
| double digits in terms of percentage reduction over Snappy. | |||
| The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder | |||
| to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down. | |||
| This mode aims to provide better compression at the expense of performance and achieves that | |||
| without a huge performance penalty, except on very small blocks. | |||
| Decompression speed suffers a little compared to the regular S2 mode, | |||
| but still manages to be close to Snappy in spite of increased compression. | |||
| # Best compression mode | |||
| S2 offers a "best" compression mode. | |||
| This will compress as much as possible with little regard to CPU usage. | |||
| Mainly for offline compression, but where decompression speed should still | |||
| be high and compatible with other S2 compressed data. | |||
| Some examples compared on 16 core CPU, amd64 assembly used: | |||
| ``` | |||
| * enwik10 | |||
| Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s | |||
| Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s | |||
| Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s | |||
| * github-june-2days-2019.json | |||
| Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s | |||
| Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s | |||
| Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s | |||
| * nyc-taxi-data-10M.csv | |||
| Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s | |||
| Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s | |||
| Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s | |||
| * 10gb.tar | |||
| Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s | |||
| Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s | |||
| Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/ | |||
| * consensus.db.10gb | |||
| Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s | |||
| Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s | |||
| Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s | |||
| ``` | |||
| Decompression speed should be around the same as using the 'better' compression mode. | |||
| # Snappy Compatibility | |||
| S2 now offers full compatibility with Snappy. | |||
| This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output. | |||
| There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by | |||
| simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`. | |||
| This uses "better" mode for all operations. | |||
| If you would like more control, you can use the s2 package as described below: | |||
| ## Blocks | |||
| Snappy compatible blocks can be generated with the S2 encoder. | |||
| Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace | |||
| | Snappy | S2 replacement | | |||
| |----------------------------|-------------------------| | |||
| | snappy.Encode(...) | s2.EncodeSnappy(...) | | |||
| | snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | | |||
| `s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. | |||
| `s2.ConcatBlocks` is compatible with snappy blocks. | |||
| Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), | |||
| 53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: | |||
| | Encoder | Size | MB/s | Reduction | | |||
| |-----------------------|------------|------------|------------ | |||
| | snappy.Encode | 1128706759 | 725.59 | 71.89% | | |||
| | s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | | |||
| | s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | | |||
| | s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%**| | |||
| ## Streams | |||
| For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`. | |||
| All other options are available, but note that block size limit is different for snappy. | |||
| Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput: | |||
| | File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best | | |||
| |-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------| | |||
| | nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s | | |||
| | enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s | | |||
| | 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s | | |||
| | github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s | | |||
| | consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s | | |||
| # Decompression | |||
| All decompression functions map directly to equivalent s2 functions. | |||
| | Snappy | S2 replacement | | |||
| |------------------------|--------------------| | |||
| | snappy.Decode(...) | s2.Decode(...) | | |||
| | snappy.DecodedLen(...) | s2.DecodedLen(...) | | |||
| | snappy.NewReader(...) | s2.NewReader(...) | | |||
| Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip) | |||
| are also available for Snappy streams. | |||
| If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize) | |||
| on your Reader will reduce memory consumption. | |||
| # Concatenating blocks and streams. | |||
| Concatenating streams will concatenate the output of both without recompressing them. | |||
| While this is inefficient in terms of compression it might be usable in certain scenarios. | |||
| The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement. | |||
| Blocks can be concatenated using the `ConcatBlocks` function. | |||
| Snappy blocks/streams can safely be concatenated with S2 blocks and streams. | |||
| Streams with indexes (see below) will currently not work on concatenated streams. | |||
| # Stream Seek Index | |||
| S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data. | |||
| The index can either be appended to the stream as a skippable block or returned for separate storage. | |||
| When the index is appended to a stream it will be skipped by regular decoders, | |||
| so the output remains compatible with other decoders. | |||
| ## Creating an Index | |||
| To automatically add an index to a stream, add `WriterAddIndex()` option to your writer. | |||
| Then the index will be added to the stream when `Close()` is called. | |||
| ``` | |||
| // Add Index to stream... | |||
| enc := s2.NewWriter(w, s2.WriterAddIndex()) | |||
| io.Copy(enc, r) | |||
| enc.Close() | |||
| ``` | |||
| If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`. | |||
| This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`. | |||
| ``` | |||
| // Get index for separate storage... | |||
| enc := s2.NewWriter(w) | |||
| io.Copy(enc, r) | |||
| index, err := enc.CloseIndex() | |||
| ``` | |||
| The `index` can then be used needing to read from the stream. | |||
| This means the index can be used without needing to seek to the end of the stream | |||
| or for manually forwarding streams. See below. | |||
| Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function. | |||
| ## Using Indexes | |||
| To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available. | |||
| Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader. | |||
| If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported. | |||
| Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. | |||
| ``` | |||
| dec := s2.NewReader(r) | |||
| rs, err := dec.ReadSeeker(false, nil) | |||
| rs.Seek(wantOffset, io.SeekStart) | |||
| ``` | |||
| Get a seeker to seek forward. Since no index is provided, the index is read from the stream. | |||
| This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. | |||
| A custom index can be specified which will be used if supplied. | |||
| When using a custom index, it will not be read from the input stream. | |||
| ``` | |||
| dec := s2.NewReader(r) | |||
| rs, err := dec.ReadSeeker(false, index) | |||
| rs.Seek(wantOffset, io.SeekStart) | |||
| ``` | |||
| This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker | |||
| ``` | |||
| dec := s2.NewReader(r) | |||
| rs, err := dec.ReadSeeker(true, index) | |||
| rs.Seek(wantOffset, io.SeekStart) | |||
| ``` | |||
| Finally, since we specify that we want to do random seeking `r` must be an io.Seeker. | |||
| The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader, | |||
| meaning changes performed to one is reflected in the other. | |||
| To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used. | |||
| ## Manually Forwarding Streams | |||
| Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type. | |||
| This can be used for parsing indexes, either separate or in streams. | |||
| In some cases it may not be possible to serve a seekable stream. | |||
| This can for instance be an HTTP stream, where the Range request | |||
| is sent at the start of the stream. | |||
| With a little bit of extra code it is still possible to use indexes | |||
| to forward to specific offset with a single forward skip. | |||
| It is possible to load the index manually like this: | |||
| ``` | |||
| var index s2.Index | |||
| _, err = index.Load(idxBytes) | |||
| ``` | |||
| This can be used to figure out how much to offset the compressed stream: | |||
| ``` | |||
| compressedOffset, uncompressedOffset, err := index.Find(wantOffset) | |||
| ``` | |||
| The `compressedOffset` is the number of bytes that should be skipped | |||
| from the beginning of the compressed file. | |||
| The `uncompressedOffset` will then be offset of the uncompressed bytes returned | |||
| when decoding from that position. This will always be <= wantOffset. | |||
| When creating a decoder it must be specified that it should *not* expect a stream identifier | |||
| at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset` | |||
| we create the decoder like this: | |||
| ``` | |||
| dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier()) | |||
| ``` | |||
| We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want. | |||
| This is done using the regular "Skip" function: | |||
| ``` | |||
| err = dec.Skip(wantOffset - uncompressedOffset) | |||
| ``` | |||
| This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. | |||
| ## Index Format: | |||
| Each block is structured as a snappy skippable block, with the chunk ID 0x99. | |||
| The block can be read from the front, but contains information so it can be read from the back as well. | |||
| Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), | |||
| with un-encoded value length of 64 bits, unless other limits are specified. | |||
| | Content | Format | | |||
| |---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| | |||
| | ID, `[1]byte` | Always 0x99. | | |||
| | Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | | |||
| | Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | | |||
| | UncompressedSize, Varint | Total Uncompressed size. | | |||
| | CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | | |||
| | EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | | |||
| | Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | | |||
| | HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | | |||
| | UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | | |||
| | CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | | |||
| | Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | | |||
| | Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | | |||
| For regular streams the uncompressed offsets are fully predictable, | |||
| so `HasUncompressedOffsets` allows to specify that compressed blocks all have | |||
| exactly `EstBlockSize` bytes of uncompressed content. | |||
| Entries *must* be in order, starting with the lowest offset, | |||
| and there *must* be no uncompressed offset duplicates. | |||
| Entries *may* point to the start of a skippable block, | |||
| but it is then not allowed to also have an entry for the next block since | |||
| that would give an uncompressed offset duplicate. | |||
| There is no requirement for all blocks to be represented in the index. | |||
| In fact there is a maximum of 65536 block entries in an index. | |||
| The writer can use any method to reduce the number of entries. | |||
| An implicit block start at 0,0 can be assumed. | |||
| ### Decoding entries: | |||
| ``` | |||
| // Read Uncompressed entries. | |||
| // Each assumes EstBlockSize delta from previous. | |||
| for each entry { | |||
| uOff = 0 | |||
| if HasUncompressedOffsets == 1 { | |||
| uOff = ReadVarInt // Read value from stream | |||
| } | |||
| // Except for the first entry, use previous values. | |||
| if entryNum == 0 { | |||
| entry[entryNum].UncompressedOffset = uOff | |||
| continue | |||
| } | |||
| // Uncompressed uses previous offset and adds EstBlockSize | |||
| entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff | |||
| } | |||
| // Guess that the first block will be 50% of uncompressed size. | |||
| // Integer truncating division must be used. | |||
| CompressGuess := EstBlockSize / 2 | |||
| // Read Compressed entries. | |||
| // Each assumes CompressGuess delta from previous. | |||
| // CompressGuess is adjusted for each value. | |||
| for each entry { | |||
| cOff = ReadVarInt // Read value from stream | |||
| // Except for the first entry, use previous values. | |||
| if entryNum == 0 { | |||
| entry[entryNum].CompressedOffset = cOff | |||
| continue | |||
| } | |||
| // Compressed uses previous and our estimate. | |||
| entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff | |||
| // Adjust compressed offset for next loop, integer truncating division must be used. | |||
| CompressGuess += cOff/2 | |||
| } | |||
| ``` | |||
| To decode from any given uncompressed offset `(wantOffset)`: | |||
| * Iterate entries until `entry[n].UncompressedOffset > wantOffset`. | |||
| * Start decoding from `entry[n-1].CompressedOffset`. | |||
| * Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream. | |||
| See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. | |||
| # Format Extensions | |||
| * Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. | |||
| * [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB). | |||
| * Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset. | |||
| Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0. | |||
| The length is specified by reading the 3-bit length specified in the tag and decode using this table: | |||
| | Length | Actual Length | | |||
| |--------|----------------------| | |||
| | 0 | 4 | | |||
| | 1 | 5 | | |||
| | 2 | 6 | | |||
| | 3 | 7 | | |||
| | 4 | 8 | | |||
| | 5 | 8 + read 1 byte | | |||
| | 6 | 260 + read 2 bytes | | |||
| | 7 | 65540 + read 3 bytes | | |||
| This allows any repeat offset + length to be represented by 2 to 5 bytes. | |||
| Lengths are stored as little endian values. | |||
| The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams. | |||
| Default streaming block size is 1MB. | |||
| # LICENSE | |||
| This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. | |||
| Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. | |||
| @@ -0,0 +1,568 @@ | |||
| // Copyright 2016 The Go Authors. All rights reserved. | |||
| // Copyright (c) 2019 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // +build !appengine | |||
| // +build gc | |||
| // +build !noasm | |||
| #include "textflag.h" | |||
| #define R_TMP0 AX | |||
| #define R_TMP1 BX | |||
| #define R_LEN CX | |||
| #define R_OFF DX | |||
| #define R_SRC SI | |||
| #define R_DST DI | |||
| #define R_DBASE R8 | |||
| #define R_DLEN R9 | |||
| #define R_DEND R10 | |||
| #define R_SBASE R11 | |||
| #define R_SLEN R12 | |||
| #define R_SEND R13 | |||
| #define R_TMP2 R14 | |||
| #define R_TMP3 R15 | |||
| // The asm code generally follows the pure Go code in decode_other.go, except | |||
| // where marked with a "!!!". | |||
| // func decode(dst, src []byte) int | |||
| // | |||
| // All local variables fit into registers. The non-zero stack size is only to | |||
| // spill registers and push args when issuing a CALL. The register allocation: | |||
| // - R_TMP0 scratch | |||
| // - R_TMP1 scratch | |||
| // - R_LEN length or x (shared) | |||
| // - R_OFF offset | |||
| // - R_SRC &src[s] | |||
| // - R_DST &dst[d] | |||
| // + R_DBASE dst_base | |||
| // + R_DLEN dst_len | |||
| // + R_DEND dst_base + dst_len | |||
| // + R_SBASE src_base | |||
| // + R_SLEN src_len | |||
| // + R_SEND src_base + src_len | |||
| // - R_TMP2 used by doCopy | |||
| // - R_TMP3 used by doCopy | |||
| // | |||
| // The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the | |||
| // function, and after a CALL returns, and are not otherwise modified. | |||
| // | |||
| // The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. | |||
| // The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. | |||
| TEXT ·s2Decode(SB), NOSPLIT, $48-56 | |||
| // Initialize R_SRC, R_DST and R_DBASE-R_SEND. | |||
| MOVQ dst_base+0(FP), R_DBASE | |||
| MOVQ dst_len+8(FP), R_DLEN | |||
| MOVQ R_DBASE, R_DST | |||
| MOVQ R_DBASE, R_DEND | |||
| ADDQ R_DLEN, R_DEND | |||
| MOVQ src_base+24(FP), R_SBASE | |||
| MOVQ src_len+32(FP), R_SLEN | |||
| MOVQ R_SBASE, R_SRC | |||
| MOVQ R_SBASE, R_SEND | |||
| ADDQ R_SLEN, R_SEND | |||
| XORQ R_OFF, R_OFF | |||
| loop: | |||
| // for s < len(src) | |||
| CMPQ R_SRC, R_SEND | |||
| JEQ end | |||
| // R_LEN = uint32(src[s]) | |||
| // | |||
| // switch src[s] & 0x03 | |||
| MOVBLZX (R_SRC), R_LEN | |||
| MOVL R_LEN, R_TMP1 | |||
| ANDL $3, R_TMP1 | |||
| CMPL R_TMP1, $1 | |||
| JAE tagCopy | |||
| // ---------------------------------------- | |||
| // The code below handles literal tags. | |||
| // case tagLiteral: | |||
| // x := uint32(src[s] >> 2) | |||
| // switch | |||
| SHRL $2, R_LEN | |||
| CMPL R_LEN, $60 | |||
| JAE tagLit60Plus | |||
| // case x < 60: | |||
| // s++ | |||
| INCQ R_SRC | |||
| doLit: | |||
| // This is the end of the inner "switch", when we have a literal tag. | |||
| // | |||
| // We assume that R_LEN == x and x fits in a uint32, where x is the variable | |||
| // used in the pure Go decode_other.go code. | |||
| // length = int(x) + 1 | |||
| // | |||
| // Unlike the pure Go code, we don't need to check if length <= 0 because | |||
| // R_LEN can hold 64 bits, so the increment cannot overflow. | |||
| INCQ R_LEN | |||
| // Prepare to check if copying length bytes will run past the end of dst or | |||
| // src. | |||
| // | |||
| // R_TMP0 = len(dst) - d | |||
| // R_TMP1 = len(src) - s | |||
| MOVQ R_DEND, R_TMP0 | |||
| SUBQ R_DST, R_TMP0 | |||
| MOVQ R_SEND, R_TMP1 | |||
| SUBQ R_SRC, R_TMP1 | |||
| // !!! Try a faster technique for short (16 or fewer bytes) copies. | |||
| // | |||
| // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { | |||
| // goto callMemmove // Fall back on calling runtime·memmove. | |||
| // } | |||
| // | |||
| // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s | |||
| // against 21 instead of 16, because it cannot assume that all of its input | |||
| // is contiguous in memory and so it needs to leave enough source bytes to | |||
| // read the next tag without refilling buffers, but Go's Decode assumes | |||
| // contiguousness (the src argument is a []byte). | |||
| CMPQ R_LEN, $16 | |||
| JGT callMemmove | |||
| CMPQ R_TMP0, $16 | |||
| JLT callMemmove | |||
| CMPQ R_TMP1, $16 | |||
| JLT callMemmove | |||
| // !!! Implement the copy from src to dst as a 16-byte load and store. | |||
| // (Decode's documentation says that dst and src must not overlap.) | |||
| // | |||
| // This always copies 16 bytes, instead of only length bytes, but that's | |||
| // OK. If the input is a valid Snappy encoding then subsequent iterations | |||
| // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a | |||
| // non-nil error), so the overrun will be ignored. | |||
| // | |||
| // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or | |||
| // 16-byte loads and stores. This technique probably wouldn't be as | |||
| // effective on architectures that are fussier about alignment. | |||
| MOVOU 0(R_SRC), X0 | |||
| MOVOU X0, 0(R_DST) | |||
| // d += length | |||
| // s += length | |||
| ADDQ R_LEN, R_DST | |||
| ADDQ R_LEN, R_SRC | |||
| JMP loop | |||
| callMemmove: | |||
| // if length > len(dst)-d || length > len(src)-s { etc } | |||
| CMPQ R_LEN, R_TMP0 | |||
| JGT errCorrupt | |||
| CMPQ R_LEN, R_TMP1 | |||
| JGT errCorrupt | |||
| // copy(dst[d:], src[s:s+length]) | |||
| // | |||
| // This means calling runtime·memmove(&dst[d], &src[s], length), so we push | |||
| // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those | |||
| // three registers to the stack, to save local variables across the CALL. | |||
| MOVQ R_DST, 0(SP) | |||
| MOVQ R_SRC, 8(SP) | |||
| MOVQ R_LEN, 16(SP) | |||
| MOVQ R_DST, 24(SP) | |||
| MOVQ R_SRC, 32(SP) | |||
| MOVQ R_LEN, 40(SP) | |||
| MOVQ R_OFF, 48(SP) | |||
| CALL runtime·memmove(SB) | |||
| // Restore local variables: unspill registers from the stack and | |||
| // re-calculate R_DBASE-R_SEND. | |||
| MOVQ 24(SP), R_DST | |||
| MOVQ 32(SP), R_SRC | |||
| MOVQ 40(SP), R_LEN | |||
| MOVQ 48(SP), R_OFF | |||
| MOVQ dst_base+0(FP), R_DBASE | |||
| MOVQ dst_len+8(FP), R_DLEN | |||
| MOVQ R_DBASE, R_DEND | |||
| ADDQ R_DLEN, R_DEND | |||
| MOVQ src_base+24(FP), R_SBASE | |||
| MOVQ src_len+32(FP), R_SLEN | |||
| MOVQ R_SBASE, R_SEND | |||
| ADDQ R_SLEN, R_SEND | |||
| // d += length | |||
| // s += length | |||
| ADDQ R_LEN, R_DST | |||
| ADDQ R_LEN, R_SRC | |||
| JMP loop | |||
| tagLit60Plus: | |||
| // !!! This fragment does the | |||
| // | |||
| // s += x - 58; if uint(s) > uint(len(src)) { etc } | |||
| // | |||
| // checks. In the asm version, we code it once instead of once per switch case. | |||
| ADDQ R_LEN, R_SRC | |||
| SUBQ $58, R_SRC | |||
| CMPQ R_SRC, R_SEND | |||
| JA errCorrupt | |||
| // case x == 60: | |||
| CMPL R_LEN, $61 | |||
| JEQ tagLit61 | |||
| JA tagLit62Plus | |||
| // x = uint32(src[s-1]) | |||
| MOVBLZX -1(R_SRC), R_LEN | |||
| JMP doLit | |||
| tagLit61: | |||
| // case x == 61: | |||
| // x = uint32(src[s-2]) | uint32(src[s-1])<<8 | |||
| MOVWLZX -2(R_SRC), R_LEN | |||
| JMP doLit | |||
| tagLit62Plus: | |||
| CMPL R_LEN, $62 | |||
| JA tagLit63 | |||
| // case x == 62: | |||
| // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | |||
| // We read one byte, safe to read one back, since we are just reading tag. | |||
| // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8 | |||
| MOVL -4(R_SRC), R_LEN | |||
| SHRL $8, R_LEN | |||
| JMP doLit | |||
| tagLit63: | |||
| // case x == 63: | |||
| // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | |||
| MOVL -4(R_SRC), R_LEN | |||
| JMP doLit | |||
| // The code above handles literal tags. | |||
| // ---------------------------------------- | |||
| // The code below handles copy tags. | |||
| tagCopy4: | |||
| // case tagCopy4: | |||
| // s += 5 | |||
| ADDQ $5, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| CMPQ R_SRC, R_SEND | |||
| JA errCorrupt | |||
| // length = 1 + int(src[s-5])>>2 | |||
| SHRQ $2, R_LEN | |||
| INCQ R_LEN | |||
| // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) | |||
| MOVLQZX -4(R_SRC), R_OFF | |||
| JMP doCopy | |||
| tagCopy2: | |||
| // case tagCopy2: | |||
| // s += 3 | |||
| ADDQ $3, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| CMPQ R_SRC, R_SEND | |||
| JA errCorrupt | |||
| // length = 1 + int(src[s-3])>>2 | |||
| SHRQ $2, R_LEN | |||
| INCQ R_LEN | |||
| // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) | |||
| MOVWQZX -2(R_SRC), R_OFF | |||
| JMP doCopy | |||
| tagCopy: | |||
| // We have a copy tag. We assume that: | |||
| // - R_TMP1 == src[s] & 0x03 | |||
| // - R_LEN == src[s] | |||
| CMPQ R_TMP1, $2 | |||
| JEQ tagCopy2 | |||
| JA tagCopy4 | |||
| // case tagCopy1: | |||
| // s += 2 | |||
| ADDQ $2, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| CMPQ R_SRC, R_SEND | |||
| JA errCorrupt | |||
| // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) | |||
| // length = 4 + int(src[s-2])>>2&0x7 | |||
| MOVBQZX -1(R_SRC), R_TMP1 | |||
| MOVQ R_LEN, R_TMP0 | |||
| SHRQ $2, R_LEN | |||
| ANDQ $0xe0, R_TMP0 | |||
| ANDQ $7, R_LEN | |||
| SHLQ $3, R_TMP0 | |||
| ADDQ $4, R_LEN | |||
| ORQ R_TMP1, R_TMP0 | |||
| // check if repeat code, ZF set by ORQ. | |||
| JZ repeatCode | |||
| // This is a regular copy, transfer our temporary value to R_OFF (length) | |||
| MOVQ R_TMP0, R_OFF | |||
| JMP doCopy | |||
| // This is a repeat code. | |||
| repeatCode: | |||
| // If length < 9, reuse last offset, with the length already calculated. | |||
| CMPQ R_LEN, $9 | |||
| JL doCopyRepeat | |||
| // Read additional bytes for length. | |||
| JE repeatLen1 | |||
| // Rare, so the extra branch shouldn't hurt too much. | |||
| CMPQ R_LEN, $10 | |||
| JE repeatLen2 | |||
| JMP repeatLen3 | |||
| // Read repeat lengths. | |||
| repeatLen1: | |||
| // s ++ | |||
| ADDQ $1, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| CMPQ R_SRC, R_SEND | |||
| JA errCorrupt | |||
| // length = src[s-1] + 8 | |||
| MOVBQZX -1(R_SRC), R_LEN | |||
| ADDL $8, R_LEN | |||
| JMP doCopyRepeat | |||
| repeatLen2: | |||
| // s +=2 | |||
| ADDQ $2, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| CMPQ R_SRC, R_SEND | |||
| JA errCorrupt | |||
| // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8) | |||
| MOVWQZX -2(R_SRC), R_LEN | |||
| ADDL $260, R_LEN | |||
| JMP doCopyRepeat | |||
| repeatLen3: | |||
| // s +=3 | |||
| ADDQ $3, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| CMPQ R_SRC, R_SEND | |||
| JA errCorrupt | |||
| // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16) | |||
| // Read one byte further back (just part of the tag, shifted out) | |||
| MOVL -4(R_SRC), R_LEN | |||
| SHRL $8, R_LEN | |||
| ADDL $65540, R_LEN | |||
| JMP doCopyRepeat | |||
| doCopy: | |||
| // This is the end of the outer "switch", when we have a copy tag. | |||
| // | |||
| // We assume that: | |||
| // - R_LEN == length && R_LEN > 0 | |||
| // - R_OFF == offset | |||
| // if d < offset { etc } | |||
| MOVQ R_DST, R_TMP1 | |||
| SUBQ R_DBASE, R_TMP1 | |||
| CMPQ R_TMP1, R_OFF | |||
| JLT errCorrupt | |||
| // Repeat values can skip the test above, since any offset > 0 will be in dst. | |||
| doCopyRepeat: | |||
| // if offset <= 0 { etc } | |||
| CMPQ R_OFF, $0 | |||
| JLE errCorrupt | |||
| // if length > len(dst)-d { etc } | |||
| MOVQ R_DEND, R_TMP1 | |||
| SUBQ R_DST, R_TMP1 | |||
| CMPQ R_LEN, R_TMP1 | |||
| JGT errCorrupt | |||
| // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length | |||
| // | |||
| // Set: | |||
| // - R_TMP2 = len(dst)-d | |||
| // - R_TMP3 = &dst[d-offset] | |||
| MOVQ R_DEND, R_TMP2 | |||
| SUBQ R_DST, R_TMP2 | |||
| MOVQ R_DST, R_TMP3 | |||
| SUBQ R_OFF, R_TMP3 | |||
| // !!! Try a faster technique for short (16 or fewer bytes) forward copies. | |||
| // | |||
| // First, try using two 8-byte load/stores, similar to the doLit technique | |||
| // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is | |||
| // still OK if offset >= 8. Note that this has to be two 8-byte load/stores | |||
| // and not one 16-byte load/store, and the first store has to be before the | |||
| // second load, due to the overlap if offset is in the range [8, 16). | |||
| // | |||
| // if length > 16 || offset < 8 || len(dst)-d < 16 { | |||
| // goto slowForwardCopy | |||
| // } | |||
| // copy 16 bytes | |||
| // d += length | |||
| CMPQ R_LEN, $16 | |||
| JGT slowForwardCopy | |||
| CMPQ R_OFF, $8 | |||
| JLT slowForwardCopy | |||
| CMPQ R_TMP2, $16 | |||
| JLT slowForwardCopy | |||
| MOVQ 0(R_TMP3), R_TMP0 | |||
| MOVQ R_TMP0, 0(R_DST) | |||
| MOVQ 8(R_TMP3), R_TMP1 | |||
| MOVQ R_TMP1, 8(R_DST) | |||
| ADDQ R_LEN, R_DST | |||
| JMP loop | |||
| slowForwardCopy: | |||
| // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we | |||
| // can still try 8-byte load stores, provided we can overrun up to 10 extra | |||
| // bytes. As above, the overrun will be fixed up by subsequent iterations | |||
| // of the outermost loop. | |||
| // | |||
| // The C++ snappy code calls this technique IncrementalCopyFastPath. Its | |||
| // commentary says: | |||
| // | |||
| // ---- | |||
| // | |||
| // The main part of this loop is a simple copy of eight bytes at a time | |||
| // until we've copied (at least) the requested amount of bytes. However, | |||
| // if d and d-offset are less than eight bytes apart (indicating a | |||
| // repeating pattern of length < 8), we first need to expand the pattern in | |||
| // order to get the correct results. For instance, if the buffer looks like | |||
| // this, with the eight-byte <d-offset> and <d> patterns marked as | |||
| // intervals: | |||
| // | |||
| // abxxxxxxxxxxxx | |||
| // [------] d-offset | |||
| // [------] d | |||
| // | |||
| // a single eight-byte copy from <d-offset> to <d> will repeat the pattern | |||
| // once, after which we can move <d> two bytes without moving <d-offset>: | |||
| // | |||
| // ababxxxxxxxxxx | |||
| // [------] d-offset | |||
| // [------] d | |||
| // | |||
| // and repeat the exercise until the two no longer overlap. | |||
| // | |||
| // This allows us to do very well in the special case of one single byte | |||
| // repeated many times, without taking a big hit for more general cases. | |||
| // | |||
| // The worst case of extra writing past the end of the match occurs when | |||
| // offset == 1 and length == 1; the last copy will read from byte positions | |||
| // [0..7] and write to [4..11], whereas it was only supposed to write to | |||
| // position 1. Thus, ten excess bytes. | |||
| // | |||
| // ---- | |||
| // | |||
| // That "10 byte overrun" worst case is confirmed by Go's | |||
| // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy | |||
| // and finishSlowForwardCopy algorithm. | |||
| // | |||
| // if length > len(dst)-d-10 { | |||
| // goto verySlowForwardCopy | |||
| // } | |||
| SUBQ $10, R_TMP2 | |||
| CMPQ R_LEN, R_TMP2 | |||
| JGT verySlowForwardCopy | |||
| // We want to keep the offset, so we use R_TMP2 from here. | |||
| MOVQ R_OFF, R_TMP2 | |||
| makeOffsetAtLeast8: | |||
| // !!! As above, expand the pattern so that offset >= 8 and we can use | |||
| // 8-byte load/stores. | |||
| // | |||
| // for offset < 8 { | |||
| // copy 8 bytes from dst[d-offset:] to dst[d:] | |||
| // length -= offset | |||
| // d += offset | |||
| // offset += offset | |||
| // // The two previous lines together means that d-offset, and therefore | |||
| // // R_TMP3, is unchanged. | |||
| // } | |||
| CMPQ R_TMP2, $8 | |||
| JGE fixUpSlowForwardCopy | |||
| MOVQ (R_TMP3), R_TMP1 | |||
| MOVQ R_TMP1, (R_DST) | |||
| SUBQ R_TMP2, R_LEN | |||
| ADDQ R_TMP2, R_DST | |||
| ADDQ R_TMP2, R_TMP2 | |||
| JMP makeOffsetAtLeast8 | |||
| fixUpSlowForwardCopy: | |||
| // !!! Add length (which might be negative now) to d (implied by R_DST being | |||
| // &dst[d]) so that d ends up at the right place when we jump back to the | |||
| // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if | |||
| // length is positive, copying the remaining length bytes will write to the | |||
| // right place. | |||
| MOVQ R_DST, R_TMP0 | |||
| ADDQ R_LEN, R_DST | |||
| finishSlowForwardCopy: | |||
| // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative | |||
| // length means that we overrun, but as above, that will be fixed up by | |||
| // subsequent iterations of the outermost loop. | |||
| CMPQ R_LEN, $0 | |||
| JLE loop | |||
| MOVQ (R_TMP3), R_TMP1 | |||
| MOVQ R_TMP1, (R_TMP0) | |||
| ADDQ $8, R_TMP3 | |||
| ADDQ $8, R_TMP0 | |||
| SUBQ $8, R_LEN | |||
| JMP finishSlowForwardCopy | |||
| verySlowForwardCopy: | |||
| // verySlowForwardCopy is a simple implementation of forward copy. In C | |||
| // parlance, this is a do/while loop instead of a while loop, since we know | |||
| // that length > 0. In Go syntax: | |||
| // | |||
| // for { | |||
| // dst[d] = dst[d - offset] | |||
| // d++ | |||
| // length-- | |||
| // if length == 0 { | |||
| // break | |||
| // } | |||
| // } | |||
| MOVB (R_TMP3), R_TMP1 | |||
| MOVB R_TMP1, (R_DST) | |||
| INCQ R_TMP3 | |||
| INCQ R_DST | |||
| DECQ R_LEN | |||
| JNZ verySlowForwardCopy | |||
| JMP loop | |||
| // The code above handles copy tags. | |||
| // ---------------------------------------- | |||
| end: | |||
| // This is the end of the "for s < len(src)". | |||
| // | |||
| // if d != len(dst) { etc } | |||
| CMPQ R_DST, R_DEND | |||
| JNE errCorrupt | |||
| // return 0 | |||
| MOVQ $0, ret+48(FP) | |||
| RET | |||
| errCorrupt: | |||
| // return decodeErrCodeCorrupt | |||
| MOVQ $1, ret+48(FP) | |||
| RET | |||
| @@ -0,0 +1,574 @@ | |||
| // Copyright 2020 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // +build !appengine | |||
| // +build gc | |||
| // +build !noasm | |||
| #include "textflag.h" | |||
| #define R_TMP0 R2 | |||
| #define R_TMP1 R3 | |||
| #define R_LEN R4 | |||
| #define R_OFF R5 | |||
| #define R_SRC R6 | |||
| #define R_DST R7 | |||
| #define R_DBASE R8 | |||
| #define R_DLEN R9 | |||
| #define R_DEND R10 | |||
| #define R_SBASE R11 | |||
| #define R_SLEN R12 | |||
| #define R_SEND R13 | |||
| #define R_TMP2 R14 | |||
| #define R_TMP3 R15 | |||
| // TEST_SRC will check if R_SRC is <= SRC_END | |||
| #define TEST_SRC() \ | |||
| CMP R_SEND, R_SRC \ | |||
| BGT errCorrupt | |||
| // MOVD R_SRC, R_TMP1 | |||
| // SUB R_SBASE, R_TMP1, R_TMP1 | |||
| // CMP R_SLEN, R_TMP1 | |||
| // BGT errCorrupt | |||
| // The asm code generally follows the pure Go code in decode_other.go, except | |||
| // where marked with a "!!!". | |||
| // func decode(dst, src []byte) int | |||
| // | |||
| // All local variables fit into registers. The non-zero stack size is only to | |||
| // spill registers and push args when issuing a CALL. The register allocation: | |||
| // - R_TMP0 scratch | |||
| // - R_TMP1 scratch | |||
| // - R_LEN length or x | |||
| // - R_OFF offset | |||
| // - R_SRC &src[s] | |||
| // - R_DST &dst[d] | |||
| // + R_DBASE dst_base | |||
| // + R_DLEN dst_len | |||
| // + R_DEND dst_base + dst_len | |||
| // + R_SBASE src_base | |||
| // + R_SLEN src_len | |||
| // + R_SEND src_base + src_len | |||
| // - R_TMP2 used by doCopy | |||
| // - R_TMP3 used by doCopy | |||
| // | |||
| // The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the | |||
| // function, and after a CALL returns, and are not otherwise modified. | |||
| // | |||
| // The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. | |||
| // The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. | |||
| TEXT ·s2Decode(SB), NOSPLIT, $56-64 | |||
| // Initialize R_SRC, R_DST and R_DBASE-R_SEND. | |||
| MOVD dst_base+0(FP), R_DBASE | |||
| MOVD dst_len+8(FP), R_DLEN | |||
| MOVD R_DBASE, R_DST | |||
| MOVD R_DBASE, R_DEND | |||
| ADD R_DLEN, R_DEND, R_DEND | |||
| MOVD src_base+24(FP), R_SBASE | |||
| MOVD src_len+32(FP), R_SLEN | |||
| MOVD R_SBASE, R_SRC | |||
| MOVD R_SBASE, R_SEND | |||
| ADD R_SLEN, R_SEND, R_SEND | |||
| MOVD $0, R_OFF | |||
| loop: | |||
| // for s < len(src) | |||
| CMP R_SEND, R_SRC | |||
| BEQ end | |||
| // R_LEN = uint32(src[s]) | |||
| // | |||
| // switch src[s] & 0x03 | |||
| MOVBU (R_SRC), R_LEN | |||
| MOVW R_LEN, R_TMP1 | |||
| ANDW $3, R_TMP1 | |||
| MOVW $1, R1 | |||
| CMPW R1, R_TMP1 | |||
| BGE tagCopy | |||
| // ---------------------------------------- | |||
| // The code below handles literal tags. | |||
| // case tagLiteral: | |||
| // x := uint32(src[s] >> 2) | |||
| // switch | |||
| MOVW $60, R1 | |||
| LSRW $2, R_LEN, R_LEN | |||
| CMPW R_LEN, R1 | |||
| BLS tagLit60Plus | |||
| // case x < 60: | |||
| // s++ | |||
| ADD $1, R_SRC, R_SRC | |||
| doLit: | |||
| // This is the end of the inner "switch", when we have a literal tag. | |||
| // | |||
| // We assume that R_LEN == x and x fits in a uint32, where x is the variable | |||
| // used in the pure Go decode_other.go code. | |||
| // length = int(x) + 1 | |||
| // | |||
| // Unlike the pure Go code, we don't need to check if length <= 0 because | |||
| // R_LEN can hold 64 bits, so the increment cannot overflow. | |||
| ADD $1, R_LEN, R_LEN | |||
| // Prepare to check if copying length bytes will run past the end of dst or | |||
| // src. | |||
| // | |||
| // R_TMP0 = len(dst) - d | |||
| // R_TMP1 = len(src) - s | |||
| MOVD R_DEND, R_TMP0 | |||
| SUB R_DST, R_TMP0, R_TMP0 | |||
| MOVD R_SEND, R_TMP1 | |||
| SUB R_SRC, R_TMP1, R_TMP1 | |||
| // !!! Try a faster technique for short (16 or fewer bytes) copies. | |||
| // | |||
| // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { | |||
| // goto callMemmove // Fall back on calling runtime·memmove. | |||
| // } | |||
| // | |||
| // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s | |||
| // against 21 instead of 16, because it cannot assume that all of its input | |||
| // is contiguous in memory and so it needs to leave enough source bytes to | |||
| // read the next tag without refilling buffers, but Go's Decode assumes | |||
| // contiguousness (the src argument is a []byte). | |||
| CMP $16, R_LEN | |||
| BGT callMemmove | |||
| CMP $16, R_TMP0 | |||
| BLT callMemmove | |||
| CMP $16, R_TMP1 | |||
| BLT callMemmove | |||
| // !!! Implement the copy from src to dst as a 16-byte load and store. | |||
| // (Decode's documentation says that dst and src must not overlap.) | |||
| // | |||
| // This always copies 16 bytes, instead of only length bytes, but that's | |||
| // OK. If the input is a valid Snappy encoding then subsequent iterations | |||
| // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a | |||
| // non-nil error), so the overrun will be ignored. | |||
| // | |||
| // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or | |||
| // 16-byte loads and stores. This technique probably wouldn't be as | |||
| // effective on architectures that are fussier about alignment. | |||
| LDP 0(R_SRC), (R_TMP2, R_TMP3) | |||
| STP (R_TMP2, R_TMP3), 0(R_DST) | |||
| // d += length | |||
| // s += length | |||
| ADD R_LEN, R_DST, R_DST | |||
| ADD R_LEN, R_SRC, R_SRC | |||
| B loop | |||
| callMemmove: | |||
| // if length > len(dst)-d || length > len(src)-s { etc } | |||
| CMP R_TMP0, R_LEN | |||
| BGT errCorrupt | |||
| CMP R_TMP1, R_LEN | |||
| BGT errCorrupt | |||
| // copy(dst[d:], src[s:s+length]) | |||
| // | |||
| // This means calling runtime·memmove(&dst[d], &src[s], length), so we push | |||
| // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those | |||
| // three registers to the stack, to save local variables across the CALL. | |||
| MOVD R_DST, 8(RSP) | |||
| MOVD R_SRC, 16(RSP) | |||
| MOVD R_LEN, 24(RSP) | |||
| MOVD R_DST, 32(RSP) | |||
| MOVD R_SRC, 40(RSP) | |||
| MOVD R_LEN, 48(RSP) | |||
| MOVD R_OFF, 56(RSP) | |||
| CALL runtime·memmove(SB) | |||
| // Restore local variables: unspill registers from the stack and | |||
| // re-calculate R_DBASE-R_SEND. | |||
| MOVD 32(RSP), R_DST | |||
| MOVD 40(RSP), R_SRC | |||
| MOVD 48(RSP), R_LEN | |||
| MOVD 56(RSP), R_OFF | |||
| MOVD dst_base+0(FP), R_DBASE | |||
| MOVD dst_len+8(FP), R_DLEN | |||
| MOVD R_DBASE, R_DEND | |||
| ADD R_DLEN, R_DEND, R_DEND | |||
| MOVD src_base+24(FP), R_SBASE | |||
| MOVD src_len+32(FP), R_SLEN | |||
| MOVD R_SBASE, R_SEND | |||
| ADD R_SLEN, R_SEND, R_SEND | |||
| // d += length | |||
| // s += length | |||
| ADD R_LEN, R_DST, R_DST | |||
| ADD R_LEN, R_SRC, R_SRC | |||
| B loop | |||
| tagLit60Plus: | |||
| // !!! This fragment does the | |||
| // | |||
| // s += x - 58; if uint(s) > uint(len(src)) { etc } | |||
| // | |||
| // checks. In the asm version, we code it once instead of once per switch case. | |||
| ADD R_LEN, R_SRC, R_SRC | |||
| SUB $58, R_SRC, R_SRC | |||
| TEST_SRC() | |||
| // case x == 60: | |||
| MOVW $61, R1 | |||
| CMPW R1, R_LEN | |||
| BEQ tagLit61 | |||
| BGT tagLit62Plus | |||
| // x = uint32(src[s-1]) | |||
| MOVBU -1(R_SRC), R_LEN | |||
| B doLit | |||
| tagLit61: | |||
| // case x == 61: | |||
| // x = uint32(src[s-2]) | uint32(src[s-1])<<8 | |||
| MOVHU -2(R_SRC), R_LEN | |||
| B doLit | |||
| tagLit62Plus: | |||
| CMPW $62, R_LEN | |||
| BHI tagLit63 | |||
| // case x == 62: | |||
| // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | |||
| MOVHU -3(R_SRC), R_LEN | |||
| MOVBU -1(R_SRC), R_TMP1 | |||
| ORR R_TMP1<<16, R_LEN | |||
| B doLit | |||
| tagLit63: | |||
| // case x == 63: | |||
| // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | |||
| MOVWU -4(R_SRC), R_LEN | |||
| B doLit | |||
| // The code above handles literal tags. | |||
| // ---------------------------------------- | |||
| // The code below handles copy tags. | |||
| tagCopy4: | |||
| // case tagCopy4: | |||
| // s += 5 | |||
| ADD $5, R_SRC, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| MOVD R_SRC, R_TMP1 | |||
| SUB R_SBASE, R_TMP1, R_TMP1 | |||
| CMP R_SLEN, R_TMP1 | |||
| BGT errCorrupt | |||
| // length = 1 + int(src[s-5])>>2 | |||
| MOVD $1, R1 | |||
| ADD R_LEN>>2, R1, R_LEN | |||
| // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) | |||
| MOVWU -4(R_SRC), R_OFF | |||
| B doCopy | |||
| tagCopy2: | |||
| // case tagCopy2: | |||
| // s += 3 | |||
| ADD $3, R_SRC, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| TEST_SRC() | |||
| // length = 1 + int(src[s-3])>>2 | |||
| MOVD $1, R1 | |||
| ADD R_LEN>>2, R1, R_LEN | |||
| // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) | |||
| MOVHU -2(R_SRC), R_OFF | |||
| B doCopy | |||
| tagCopy: | |||
| // We have a copy tag. We assume that: | |||
| // - R_TMP1 == src[s] & 0x03 | |||
| // - R_LEN == src[s] | |||
| CMP $2, R_TMP1 | |||
| BEQ tagCopy2 | |||
| BGT tagCopy4 | |||
| // case tagCopy1: | |||
| // s += 2 | |||
| ADD $2, R_SRC, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| TEST_SRC() | |||
| // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) | |||
| // Calculate offset in R_TMP0 in case it is a repeat. | |||
| MOVD R_LEN, R_TMP0 | |||
| AND $0xe0, R_TMP0 | |||
| MOVBU -1(R_SRC), R_TMP1 | |||
| ORR R_TMP0<<3, R_TMP1, R_TMP0 | |||
| // length = 4 + int(src[s-2])>>2&0x7 | |||
| MOVD $7, R1 | |||
| AND R_LEN>>2, R1, R_LEN | |||
| ADD $4, R_LEN, R_LEN | |||
| // check if repeat code with offset 0. | |||
| CMP $0, R_TMP0 | |||
| BEQ repeatCode | |||
| // This is a regular copy, transfer our temporary value to R_OFF (offset) | |||
| MOVD R_TMP0, R_OFF | |||
| B doCopy | |||
| // This is a repeat code. | |||
| repeatCode: | |||
| // If length < 9, reuse last offset, with the length already calculated. | |||
| CMP $9, R_LEN | |||
| BLT doCopyRepeat | |||
| BEQ repeatLen1 | |||
| CMP $10, R_LEN | |||
| BEQ repeatLen2 | |||
| repeatLen3: | |||
| // s +=3 | |||
| ADD $3, R_SRC, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| TEST_SRC() | |||
| // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540 | |||
| MOVBU -1(R_SRC), R_TMP0 | |||
| MOVHU -3(R_SRC), R_LEN | |||
| ORR R_TMP0<<16, R_LEN, R_LEN | |||
| ADD $65540, R_LEN, R_LEN | |||
| B doCopyRepeat | |||
| repeatLen2: | |||
| // s +=2 | |||
| ADD $2, R_SRC, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| TEST_SRC() | |||
| // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260 | |||
| MOVHU -2(R_SRC), R_LEN | |||
| ADD $260, R_LEN, R_LEN | |||
| B doCopyRepeat | |||
| repeatLen1: | |||
| // s +=1 | |||
| ADD $1, R_SRC, R_SRC | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| TEST_SRC() | |||
| // length = src[s-1] + 8 | |||
| MOVBU -1(R_SRC), R_LEN | |||
| ADD $8, R_LEN, R_LEN | |||
| B doCopyRepeat | |||
| doCopy: | |||
| // This is the end of the outer "switch", when we have a copy tag. | |||
| // | |||
| // We assume that: | |||
| // - R_LEN == length && R_LEN > 0 | |||
| // - R_OFF == offset | |||
| // if d < offset { etc } | |||
| MOVD R_DST, R_TMP1 | |||
| SUB R_DBASE, R_TMP1, R_TMP1 | |||
| CMP R_OFF, R_TMP1 | |||
| BLT errCorrupt | |||
| // Repeat values can skip the test above, since any offset > 0 will be in dst. | |||
| doCopyRepeat: | |||
| // if offset <= 0 { etc } | |||
| CMP $0, R_OFF | |||
| BLE errCorrupt | |||
| // if length > len(dst)-d { etc } | |||
| MOVD R_DEND, R_TMP1 | |||
| SUB R_DST, R_TMP1, R_TMP1 | |||
| CMP R_TMP1, R_LEN | |||
| BGT errCorrupt | |||
| // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length | |||
| // | |||
| // Set: | |||
| // - R_TMP2 = len(dst)-d | |||
| // - R_TMP3 = &dst[d-offset] | |||
| MOVD R_DEND, R_TMP2 | |||
| SUB R_DST, R_TMP2, R_TMP2 | |||
| MOVD R_DST, R_TMP3 | |||
| SUB R_OFF, R_TMP3, R_TMP3 | |||
| // !!! Try a faster technique for short (16 or fewer bytes) forward copies. | |||
| // | |||
| // First, try using two 8-byte load/stores, similar to the doLit technique | |||
| // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is | |||
| // still OK if offset >= 8. Note that this has to be two 8-byte load/stores | |||
| // and not one 16-byte load/store, and the first store has to be before the | |||
| // second load, due to the overlap if offset is in the range [8, 16). | |||
| // | |||
| // if length > 16 || offset < 8 || len(dst)-d < 16 { | |||
| // goto slowForwardCopy | |||
| // } | |||
| // copy 16 bytes | |||
| // d += length | |||
| CMP $16, R_LEN | |||
| BGT slowForwardCopy | |||
| CMP $8, R_OFF | |||
| BLT slowForwardCopy | |||
| CMP $16, R_TMP2 | |||
| BLT slowForwardCopy | |||
| MOVD 0(R_TMP3), R_TMP0 | |||
| MOVD R_TMP0, 0(R_DST) | |||
| MOVD 8(R_TMP3), R_TMP1 | |||
| MOVD R_TMP1, 8(R_DST) | |||
| ADD R_LEN, R_DST, R_DST | |||
| B loop | |||
| slowForwardCopy: | |||
| // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we | |||
| // can still try 8-byte load stores, provided we can overrun up to 10 extra | |||
| // bytes. As above, the overrun will be fixed up by subsequent iterations | |||
| // of the outermost loop. | |||
| // | |||
| // The C++ snappy code calls this technique IncrementalCopyFastPath. Its | |||
| // commentary says: | |||
| // | |||
| // ---- | |||
| // | |||
| // The main part of this loop is a simple copy of eight bytes at a time | |||
| // until we've copied (at least) the requested amount of bytes. However, | |||
| // if d and d-offset are less than eight bytes apart (indicating a | |||
| // repeating pattern of length < 8), we first need to expand the pattern in | |||
| // order to get the correct results. For instance, if the buffer looks like | |||
| // this, with the eight-byte <d-offset> and <d> patterns marked as | |||
| // intervals: | |||
| // | |||
| // abxxxxxxxxxxxx | |||
| // [------] d-offset | |||
| // [------] d | |||
| // | |||
| // a single eight-byte copy from <d-offset> to <d> will repeat the pattern | |||
| // once, after which we can move <d> two bytes without moving <d-offset>: | |||
| // | |||
| // ababxxxxxxxxxx | |||
| // [------] d-offset | |||
| // [------] d | |||
| // | |||
| // and repeat the exercise until the two no longer overlap. | |||
| // | |||
| // This allows us to do very well in the special case of one single byte | |||
| // repeated many times, without taking a big hit for more general cases. | |||
| // | |||
| // The worst case of extra writing past the end of the match occurs when | |||
| // offset == 1 and length == 1; the last copy will read from byte positions | |||
| // [0..7] and write to [4..11], whereas it was only supposed to write to | |||
| // position 1. Thus, ten excess bytes. | |||
| // | |||
| // ---- | |||
| // | |||
| // That "10 byte overrun" worst case is confirmed by Go's | |||
| // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy | |||
| // and finishSlowForwardCopy algorithm. | |||
| // | |||
| // if length > len(dst)-d-10 { | |||
| // goto verySlowForwardCopy | |||
| // } | |||
| SUB $10, R_TMP2, R_TMP2 | |||
| CMP R_TMP2, R_LEN | |||
| BGT verySlowForwardCopy | |||
| // We want to keep the offset, so we use R_TMP2 from here. | |||
| MOVD R_OFF, R_TMP2 | |||
| makeOffsetAtLeast8: | |||
| // !!! As above, expand the pattern so that offset >= 8 and we can use | |||
| // 8-byte load/stores. | |||
| // | |||
| // for offset < 8 { | |||
| // copy 8 bytes from dst[d-offset:] to dst[d:] | |||
| // length -= offset | |||
| // d += offset | |||
| // offset += offset | |||
| // // The two previous lines together means that d-offset, and therefore | |||
| // // R_TMP3, is unchanged. | |||
| // } | |||
| CMP $8, R_TMP2 | |||
| BGE fixUpSlowForwardCopy | |||
| MOVD (R_TMP3), R_TMP1 | |||
| MOVD R_TMP1, (R_DST) | |||
| SUB R_TMP2, R_LEN, R_LEN | |||
| ADD R_TMP2, R_DST, R_DST | |||
| ADD R_TMP2, R_TMP2, R_TMP2 | |||
| B makeOffsetAtLeast8 | |||
| fixUpSlowForwardCopy: | |||
| // !!! Add length (which might be negative now) to d (implied by R_DST being | |||
| // &dst[d]) so that d ends up at the right place when we jump back to the | |||
| // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if | |||
| // length is positive, copying the remaining length bytes will write to the | |||
| // right place. | |||
| MOVD R_DST, R_TMP0 | |||
| ADD R_LEN, R_DST, R_DST | |||
| finishSlowForwardCopy: | |||
| // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative | |||
| // length means that we overrun, but as above, that will be fixed up by | |||
| // subsequent iterations of the outermost loop. | |||
| MOVD $0, R1 | |||
| CMP R1, R_LEN | |||
| BLE loop | |||
| MOVD (R_TMP3), R_TMP1 | |||
| MOVD R_TMP1, (R_TMP0) | |||
| ADD $8, R_TMP3, R_TMP3 | |||
| ADD $8, R_TMP0, R_TMP0 | |||
| SUB $8, R_LEN, R_LEN | |||
| B finishSlowForwardCopy | |||
| verySlowForwardCopy: | |||
| // verySlowForwardCopy is a simple implementation of forward copy. In C | |||
| // parlance, this is a do/while loop instead of a while loop, since we know | |||
| // that length > 0. In Go syntax: | |||
| // | |||
| // for { | |||
| // dst[d] = dst[d - offset] | |||
| // d++ | |||
| // length-- | |||
| // if length == 0 { | |||
| // break | |||
| // } | |||
| // } | |||
| MOVB (R_TMP3), R_TMP1 | |||
| MOVB R_TMP1, (R_DST) | |||
| ADD $1, R_TMP3, R_TMP3 | |||
| ADD $1, R_DST, R_DST | |||
| SUB $1, R_LEN, R_LEN | |||
| CBNZ R_LEN, verySlowForwardCopy | |||
| B loop | |||
| // The code above handles copy tags. | |||
| // ---------------------------------------- | |||
| end: | |||
| // This is the end of the "for s < len(src)". | |||
| // | |||
| // if d != len(dst) { etc } | |||
| CMP R_DEND, R_DST | |||
| BNE errCorrupt | |||
| // return 0 | |||
| MOVD $0, ret+48(FP) | |||
| RET | |||
| errCorrupt: | |||
| // return decodeErrCodeCorrupt | |||
| MOVD $1, R_TMP0 | |||
| MOVD R_TMP0, ret+48(FP) | |||
| RET | |||
| @@ -0,0 +1,17 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Copyright (c) 2019 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| //go:build (amd64 || arm64) && !appengine && gc && !noasm | |||
| // +build amd64 arm64 | |||
| // +build !appengine | |||
| // +build gc | |||
| // +build !noasm | |||
| package s2 | |||
| // decode has the same semantics as in decode_other.go. | |||
| // | |||
| //go:noescape | |||
| func s2Decode(dst, src []byte) int | |||
| @@ -0,0 +1,267 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Copyright (c) 2019 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| //go:build (!amd64 && !arm64) || appengine || !gc || noasm | |||
| // +build !amd64,!arm64 appengine !gc noasm | |||
| package s2 | |||
| import ( | |||
| "fmt" | |||
| "strconv" | |||
| ) | |||
| // decode writes the decoding of src to dst. It assumes that the varint-encoded | |||
| // length of the decompressed bytes has already been read, and that len(dst) | |||
| // equals that length. | |||
| // | |||
| // It returns 0 on success or a decodeErrCodeXxx error code on failure. | |||
| func s2Decode(dst, src []byte) int { | |||
| const debug = false | |||
| if debug { | |||
| fmt.Println("Starting decode, dst len:", len(dst)) | |||
| } | |||
| var d, s, length int | |||
| offset := 0 | |||
| // As long as we can read at least 5 bytes... | |||
| for s < len(src)-5 { | |||
| switch src[s] & 0x03 { | |||
| case tagLiteral: | |||
| x := uint32(src[s] >> 2) | |||
| switch { | |||
| case x < 60: | |||
| s++ | |||
| case x == 60: | |||
| s += 2 | |||
| x = uint32(src[s-1]) | |||
| case x == 61: | |||
| s += 3 | |||
| x = uint32(src[s-2]) | uint32(src[s-1])<<8 | |||
| case x == 62: | |||
| s += 4 | |||
| x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | |||
| case x == 63: | |||
| s += 5 | |||
| x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | |||
| } | |||
| length = int(x) + 1 | |||
| if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| if debug { | |||
| fmt.Println("literals, length:", length, "d-after:", d+length) | |||
| } | |||
| copy(dst[d:], src[s:s+length]) | |||
| d += length | |||
| s += length | |||
| continue | |||
| case tagCopy1: | |||
| s += 2 | |||
| length = int(src[s-2]) >> 2 & 0x7 | |||
| toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) | |||
| if toffset == 0 { | |||
| if debug { | |||
| fmt.Print("(repeat) ") | |||
| } | |||
| // keep last offset | |||
| switch length { | |||
| case 5: | |||
| s += 1 | |||
| length = int(uint32(src[s-1])) + 4 | |||
| case 6: | |||
| s += 2 | |||
| length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) | |||
| case 7: | |||
| s += 3 | |||
| length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) | |||
| default: // 0-> 4 | |||
| } | |||
| } else { | |||
| offset = toffset | |||
| } | |||
| length += 4 | |||
| case tagCopy2: | |||
| s += 3 | |||
| length = 1 + int(src[s-3])>>2 | |||
| offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) | |||
| case tagCopy4: | |||
| s += 5 | |||
| length = 1 + int(src[s-5])>>2 | |||
| offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) | |||
| } | |||
| if offset <= 0 || d < offset || length > len(dst)-d { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| if debug { | |||
| fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) | |||
| } | |||
| // Copy from an earlier sub-slice of dst to a later sub-slice. | |||
| // If no overlap, use the built-in copy: | |||
| if offset > length { | |||
| copy(dst[d:d+length], dst[d-offset:]) | |||
| d += length | |||
| continue | |||
| } | |||
| // Unlike the built-in copy function, this byte-by-byte copy always runs | |||
| // forwards, even if the slices overlap. Conceptually, this is: | |||
| // | |||
| // d += forwardCopy(dst[d:d+length], dst[d-offset:]) | |||
| // | |||
| // We align the slices into a and b and show the compiler they are the same size. | |||
| // This allows the loop to run without bounds checks. | |||
| a := dst[d : d+length] | |||
| b := dst[d-offset:] | |||
| b = b[:len(a)] | |||
| for i := range a { | |||
| a[i] = b[i] | |||
| } | |||
| d += length | |||
| } | |||
| // Remaining with extra checks... | |||
| for s < len(src) { | |||
| switch src[s] & 0x03 { | |||
| case tagLiteral: | |||
| x := uint32(src[s] >> 2) | |||
| switch { | |||
| case x < 60: | |||
| s++ | |||
| case x == 60: | |||
| s += 2 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-1]) | |||
| case x == 61: | |||
| s += 3 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-2]) | uint32(src[s-1])<<8 | |||
| case x == 62: | |||
| s += 4 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | |||
| case x == 63: | |||
| s += 5 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | |||
| } | |||
| length = int(x) + 1 | |||
| if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| if debug { | |||
| fmt.Println("literals, length:", length, "d-after:", d+length) | |||
| } | |||
| copy(dst[d:], src[s:s+length]) | |||
| d += length | |||
| s += length | |||
| continue | |||
| case tagCopy1: | |||
| s += 2 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = int(src[s-2]) >> 2 & 0x7 | |||
| toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) | |||
| if toffset == 0 { | |||
| if debug { | |||
| fmt.Print("(repeat) ") | |||
| } | |||
| // keep last offset | |||
| switch length { | |||
| case 5: | |||
| s += 1 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = int(uint32(src[s-1])) + 4 | |||
| case 6: | |||
| s += 2 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) | |||
| case 7: | |||
| s += 3 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) | |||
| default: // 0-> 4 | |||
| } | |||
| } else { | |||
| offset = toffset | |||
| } | |||
| length += 4 | |||
| case tagCopy2: | |||
| s += 3 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = 1 + int(src[s-3])>>2 | |||
| offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) | |||
| case tagCopy4: | |||
| s += 5 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = 1 + int(src[s-5])>>2 | |||
| offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) | |||
| } | |||
| if offset <= 0 || d < offset || length > len(dst)-d { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| if debug { | |||
| fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) | |||
| } | |||
| // Copy from an earlier sub-slice of dst to a later sub-slice. | |||
| // If no overlap, use the built-in copy: | |||
| if offset > length { | |||
| copy(dst[d:d+length], dst[d-offset:]) | |||
| d += length | |||
| continue | |||
| } | |||
| // Unlike the built-in copy function, this byte-by-byte copy always runs | |||
| // forwards, even if the slices overlap. Conceptually, this is: | |||
| // | |||
| // d += forwardCopy(dst[d:d+length], dst[d-offset:]) | |||
| // | |||
| // We align the slices into a and b and show the compiler they are the same size. | |||
| // This allows the loop to run without bounds checks. | |||
| a := dst[d : d+length] | |||
| b := dst[d-offset:] | |||
| b = b[:len(a)] | |||
| for i := range a { | |||
| a[i] = b[i] | |||
| } | |||
| d += length | |||
| } | |||
| if d != len(dst) { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| return 0 | |||
| } | |||
| @@ -0,0 +1,456 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Copyright (c) 2019 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package s2 | |||
| import ( | |||
| "bytes" | |||
| "encoding/binary" | |||
| "math/bits" | |||
| ) | |||
| func load32(b []byte, i int) uint32 { | |||
| return binary.LittleEndian.Uint32(b[i:]) | |||
| } | |||
| func load64(b []byte, i int) uint64 { | |||
| return binary.LittleEndian.Uint64(b[i:]) | |||
| } | |||
| // hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <64. | |||
| func hash6(u uint64, h uint8) uint32 { | |||
| const prime6bytes = 227718039650203 | |||
| return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) | |||
| } | |||
| func encodeGo(dst, src []byte) []byte { | |||
| if n := MaxEncodedLen(len(src)); n < 0 { | |||
| panic(ErrTooLarge) | |||
| } else if len(dst) < n { | |||
| dst = make([]byte, n) | |||
| } | |||
| // The block starts with the varint-encoded length of the decompressed bytes. | |||
| d := binary.PutUvarint(dst, uint64(len(src))) | |||
| if len(src) == 0 { | |||
| return dst[:d] | |||
| } | |||
| if len(src) < minNonLiteralBlockSize { | |||
| d += emitLiteral(dst[d:], src) | |||
| return dst[:d] | |||
| } | |||
| n := encodeBlockGo(dst[d:], src) | |||
| if n > 0 { | |||
| d += n | |||
| return dst[:d] | |||
| } | |||
| // Not compressible | |||
| d += emitLiteral(dst[d:], src) | |||
| return dst[:d] | |||
| } | |||
| // encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlockGo(dst, src []byte) (d int) { | |||
| // Initialize the hash table. | |||
| const ( | |||
| tableBits = 14 | |||
| maxTableSize = 1 << tableBits | |||
| debug = false | |||
| ) | |||
| var table [maxTableSize]uint32 | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := len(src) - inputMargin | |||
| // Bail if we can't compress to at least this. | |||
| dstLimit := len(src) - len(src)>>5 - 5 | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| nextEmit := 0 | |||
| // The encoded form must start with a literal, as there are no previous | |||
| // bytes to copy, so we start looking for hash matches at s == 1. | |||
| s := 1 | |||
| cv := load64(src, s) | |||
| // We search for a repeat at -1, but don't output repeats when nextEmit == 0 | |||
| repeat := 1 | |||
| for { | |||
| candidate := 0 | |||
| for { | |||
| // Next src position to check | |||
| nextS := s + (s-nextEmit)>>6 + 4 | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| hash0 := hash6(cv, tableBits) | |||
| hash1 := hash6(cv>>8, tableBits) | |||
| candidate = int(table[hash0]) | |||
| candidate2 := int(table[hash1]) | |||
| table[hash0] = uint32(s) | |||
| table[hash1] = uint32(s + 1) | |||
| hash2 := hash6(cv>>16, tableBits) | |||
| // Check repeat at offset checkRep. | |||
| const checkRep = 1 | |||
| if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { | |||
| base := s + checkRep | |||
| // Extend back | |||
| for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { | |||
| i-- | |||
| base-- | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:base]) | |||
| // Extend forward | |||
| candidate := s - repeat + 4 + checkRep | |||
| s += 4 + checkRep | |||
| for s <= sLimit { | |||
| if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { | |||
| s += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| candidate += 8 | |||
| } | |||
| if debug { | |||
| // Validate match. | |||
| if s <= candidate { | |||
| panic("s <= candidate") | |||
| } | |||
| a := src[base:s] | |||
| b := src[base-repeat : base-repeat+(s-base)] | |||
| if !bytes.Equal(a, b) { | |||
| panic("mismatch") | |||
| } | |||
| } | |||
| if nextEmit > 0 { | |||
| // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. | |||
| d += emitRepeat(dst[d:], repeat, s-base) | |||
| } else { | |||
| // First match, cannot be repeat. | |||
| d += emitCopy(dst[d:], repeat, s-base) | |||
| } | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| cv = load64(src, s) | |||
| continue | |||
| } | |||
| if uint32(cv) == load32(src, candidate) { | |||
| break | |||
| } | |||
| candidate = int(table[hash2]) | |||
| if uint32(cv>>8) == load32(src, candidate2) { | |||
| table[hash2] = uint32(s + 2) | |||
| candidate = candidate2 | |||
| s++ | |||
| break | |||
| } | |||
| table[hash2] = uint32(s + 2) | |||
| if uint32(cv>>16) == load32(src, candidate) { | |||
| s += 2 | |||
| break | |||
| } | |||
| cv = load64(src, nextS) | |||
| s = nextS | |||
| } | |||
| // Extend backwards. | |||
| // The top bytes will be rechecked to get the full match. | |||
| for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { | |||
| candidate-- | |||
| s-- | |||
| } | |||
| // Bail if we exceed the maximum size. | |||
| if d+(s-nextEmit) > dstLimit { | |||
| return 0 | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| d += emitLiteral(dst[d:], src[nextEmit:s]) | |||
| // Call emitCopy, and then see if another emitCopy could be our next | |||
| // move. Repeat until we find no match for the input immediately after | |||
| // what was consumed by the last emitCopy call. | |||
| // | |||
| // If we exit this loop normally then we need to call emitLiteral next, | |||
| // though we don't yet know how big the literal will be. We handle that | |||
| // by proceeding to the next iteration of the main loop. We also can | |||
| // exit this loop via goto if we get close to exhausting the input. | |||
| for { | |||
| // Invariant: we have a 4-byte match at s, and no need to emit any | |||
| // literal bytes prior to s. | |||
| base := s | |||
| repeat = base - candidate | |||
| // Extend the 4-byte match as long as possible. | |||
| s += 4 | |||
| candidate += 4 | |||
| for s <= len(src)-8 { | |||
| if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { | |||
| s += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| candidate += 8 | |||
| } | |||
| d += emitCopy(dst[d:], repeat, s-base) | |||
| if debug { | |||
| // Validate match. | |||
| if s <= candidate { | |||
| panic("s <= candidate") | |||
| } | |||
| a := src[base:s] | |||
| b := src[base-repeat : base-repeat+(s-base)] | |||
| if !bytes.Equal(a, b) { | |||
| panic("mismatch") | |||
| } | |||
| } | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| if d > dstLimit { | |||
| // Do we have space for more, if not bail. | |||
| return 0 | |||
| } | |||
| // Check for an immediate match, otherwise start search at s+1 | |||
| x := load64(src, s-2) | |||
| m2Hash := hash6(x, tableBits) | |||
| currHash := hash6(x>>16, tableBits) | |||
| candidate = int(table[currHash]) | |||
| table[m2Hash] = uint32(s - 2) | |||
| table[currHash] = uint32(s) | |||
| if debug && s == candidate { | |||
| panic("s == candidate") | |||
| } | |||
| if uint32(x>>16) != load32(src, candidate) { | |||
| cv = load64(src, s+1) | |||
| s++ | |||
| break | |||
| } | |||
| } | |||
| } | |||
| emitRemainder: | |||
| if nextEmit < len(src) { | |||
| // Bail if we exceed the maximum size. | |||
| if d+len(src)-nextEmit > dstLimit { | |||
| return 0 | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:]) | |||
| } | |||
| return d | |||
| } | |||
| func encodeBlockSnappyGo(dst, src []byte) (d int) { | |||
| // Initialize the hash table. | |||
| const ( | |||
| tableBits = 14 | |||
| maxTableSize = 1 << tableBits | |||
| ) | |||
| var table [maxTableSize]uint32 | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := len(src) - inputMargin | |||
| // Bail if we can't compress to at least this. | |||
| dstLimit := len(src) - len(src)>>5 - 5 | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| nextEmit := 0 | |||
| // The encoded form must start with a literal, as there are no previous | |||
| // bytes to copy, so we start looking for hash matches at s == 1. | |||
| s := 1 | |||
| cv := load64(src, s) | |||
| // We search for a repeat at -1, but don't output repeats when nextEmit == 0 | |||
| repeat := 1 | |||
| for { | |||
| candidate := 0 | |||
| for { | |||
| // Next src position to check | |||
| nextS := s + (s-nextEmit)>>6 + 4 | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| hash0 := hash6(cv, tableBits) | |||
| hash1 := hash6(cv>>8, tableBits) | |||
| candidate = int(table[hash0]) | |||
| candidate2 := int(table[hash1]) | |||
| table[hash0] = uint32(s) | |||
| table[hash1] = uint32(s + 1) | |||
| hash2 := hash6(cv>>16, tableBits) | |||
| // Check repeat at offset checkRep. | |||
| const checkRep = 1 | |||
| if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { | |||
| base := s + checkRep | |||
| // Extend back | |||
| for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { | |||
| i-- | |||
| base-- | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:base]) | |||
| // Extend forward | |||
| candidate := s - repeat + 4 + checkRep | |||
| s += 4 + checkRep | |||
| for s <= sLimit { | |||
| if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { | |||
| s += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| candidate += 8 | |||
| } | |||
| d += emitCopyNoRepeat(dst[d:], repeat, s-base) | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| cv = load64(src, s) | |||
| continue | |||
| } | |||
| if uint32(cv) == load32(src, candidate) { | |||
| break | |||
| } | |||
| candidate = int(table[hash2]) | |||
| if uint32(cv>>8) == load32(src, candidate2) { | |||
| table[hash2] = uint32(s + 2) | |||
| candidate = candidate2 | |||
| s++ | |||
| break | |||
| } | |||
| table[hash2] = uint32(s + 2) | |||
| if uint32(cv>>16) == load32(src, candidate) { | |||
| s += 2 | |||
| break | |||
| } | |||
| cv = load64(src, nextS) | |||
| s = nextS | |||
| } | |||
| // Extend backwards | |||
| for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { | |||
| candidate-- | |||
| s-- | |||
| } | |||
| // Bail if we exceed the maximum size. | |||
| if d+(s-nextEmit) > dstLimit { | |||
| return 0 | |||
| } | |||
| // A 4-byte match has been found. We'll later see if more than 4 bytes | |||
| // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | |||
| // them as literal bytes. | |||
| d += emitLiteral(dst[d:], src[nextEmit:s]) | |||
| // Call emitCopy, and then see if another emitCopy could be our next | |||
| // move. Repeat until we find no match for the input immediately after | |||
| // what was consumed by the last emitCopy call. | |||
| // | |||
| // If we exit this loop normally then we need to call emitLiteral next, | |||
| // though we don't yet know how big the literal will be. We handle that | |||
| // by proceeding to the next iteration of the main loop. We also can | |||
| // exit this loop via goto if we get close to exhausting the input. | |||
| for { | |||
| // Invariant: we have a 4-byte match at s, and no need to emit any | |||
| // literal bytes prior to s. | |||
| base := s | |||
| repeat = base - candidate | |||
| // Extend the 4-byte match as long as possible. | |||
| s += 4 | |||
| candidate += 4 | |||
| for s <= len(src)-8 { | |||
| if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { | |||
| s += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| candidate += 8 | |||
| } | |||
| d += emitCopyNoRepeat(dst[d:], repeat, s-base) | |||
| if false { | |||
| // Validate match. | |||
| a := src[base:s] | |||
| b := src[base-repeat : base-repeat+(s-base)] | |||
| if !bytes.Equal(a, b) { | |||
| panic("mismatch") | |||
| } | |||
| } | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| if d > dstLimit { | |||
| // Do we have space for more, if not bail. | |||
| return 0 | |||
| } | |||
| // Check for an immediate match, otherwise start search at s+1 | |||
| x := load64(src, s-2) | |||
| m2Hash := hash6(x, tableBits) | |||
| currHash := hash6(x>>16, tableBits) | |||
| candidate = int(table[currHash]) | |||
| table[m2Hash] = uint32(s - 2) | |||
| table[currHash] = uint32(s) | |||
| if uint32(x>>16) != load32(src, candidate) { | |||
| cv = load64(src, s+1) | |||
| s++ | |||
| break | |||
| } | |||
| } | |||
| } | |||
| emitRemainder: | |||
| if nextEmit < len(src) { | |||
| // Bail if we exceed the maximum size. | |||
| if d+len(src)-nextEmit > dstLimit { | |||
| return 0 | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:]) | |||
| } | |||
| return d | |||
| } | |||
| @@ -0,0 +1,142 @@ | |||
| //go:build !appengine && !noasm && gc | |||
| // +build !appengine,!noasm,gc | |||
| package s2 | |||
| // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlock(dst, src []byte) (d int) { | |||
| const ( | |||
| // Use 12 bit table when less than... | |||
| limit12B = 16 << 10 | |||
| // Use 10 bit table when less than... | |||
| limit10B = 4 << 10 | |||
| // Use 8 bit table when less than... | |||
| limit8B = 512 | |||
| ) | |||
| if len(src) >= 4<<20 { | |||
| return encodeBlockAsm(dst, src) | |||
| } | |||
| if len(src) >= limit12B { | |||
| return encodeBlockAsm4MB(dst, src) | |||
| } | |||
| if len(src) >= limit10B { | |||
| return encodeBlockAsm12B(dst, src) | |||
| } | |||
| if len(src) >= limit8B { | |||
| return encodeBlockAsm10B(dst, src) | |||
| } | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| return encodeBlockAsm8B(dst, src) | |||
| } | |||
| // encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlockBetter(dst, src []byte) (d int) { | |||
| const ( | |||
| // Use 12 bit table when less than... | |||
| limit12B = 16 << 10 | |||
| // Use 10 bit table when less than... | |||
| limit10B = 4 << 10 | |||
| // Use 8 bit table when less than... | |||
| limit8B = 512 | |||
| ) | |||
| if len(src) > 4<<20 { | |||
| return encodeBetterBlockAsm(dst, src) | |||
| } | |||
| if len(src) >= limit12B { | |||
| return encodeBetterBlockAsm4MB(dst, src) | |||
| } | |||
| if len(src) >= limit10B { | |||
| return encodeBetterBlockAsm12B(dst, src) | |||
| } | |||
| if len(src) >= limit8B { | |||
| return encodeBetterBlockAsm10B(dst, src) | |||
| } | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| return encodeBetterBlockAsm8B(dst, src) | |||
| } | |||
| // encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlockSnappy(dst, src []byte) (d int) { | |||
| const ( | |||
| // Use 12 bit table when less than... | |||
| limit12B = 16 << 10 | |||
| // Use 10 bit table when less than... | |||
| limit10B = 4 << 10 | |||
| // Use 8 bit table when less than... | |||
| limit8B = 512 | |||
| ) | |||
| if len(src) >= 64<<10 { | |||
| return encodeSnappyBlockAsm(dst, src) | |||
| } | |||
| if len(src) >= limit12B { | |||
| return encodeSnappyBlockAsm64K(dst, src) | |||
| } | |||
| if len(src) >= limit10B { | |||
| return encodeSnappyBlockAsm12B(dst, src) | |||
| } | |||
| if len(src) >= limit8B { | |||
| return encodeSnappyBlockAsm10B(dst, src) | |||
| } | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| return encodeSnappyBlockAsm8B(dst, src) | |||
| } | |||
| // encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlockBetterSnappy(dst, src []byte) (d int) { | |||
| const ( | |||
| // Use 12 bit table when less than... | |||
| limit12B = 16 << 10 | |||
| // Use 10 bit table when less than... | |||
| limit10B = 4 << 10 | |||
| // Use 8 bit table when less than... | |||
| limit8B = 512 | |||
| ) | |||
| if len(src) >= 64<<10 { | |||
| return encodeSnappyBetterBlockAsm(dst, src) | |||
| } | |||
| if len(src) >= limit12B { | |||
| return encodeSnappyBetterBlockAsm64K(dst, src) | |||
| } | |||
| if len(src) >= limit10B { | |||
| return encodeSnappyBetterBlockAsm12B(dst, src) | |||
| } | |||
| if len(src) >= limit8B { | |||
| return encodeSnappyBetterBlockAsm10B(dst, src) | |||
| } | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| return encodeSnappyBetterBlockAsm8B(dst, src) | |||
| } | |||
| @@ -0,0 +1,630 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Copyright (c) 2019 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package s2 | |||
| import ( | |||
| "fmt" | |||
| "math/bits" | |||
| ) | |||
| // encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlockBest(dst, src []byte) (d int) { | |||
| // Initialize the hash tables. | |||
| const ( | |||
| // Long hash matches. | |||
| lTableBits = 19 | |||
| maxLTableSize = 1 << lTableBits | |||
| // Short hash matches. | |||
| sTableBits = 16 | |||
| maxSTableSize = 1 << sTableBits | |||
| inputMargin = 8 + 2 | |||
| ) | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := len(src) - inputMargin | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| var lTable [maxLTableSize]uint64 | |||
| var sTable [maxSTableSize]uint64 | |||
| // Bail if we can't compress to at least this. | |||
| dstLimit := len(src) - 5 | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| nextEmit := 0 | |||
| // The encoded form must start with a literal, as there are no previous | |||
| // bytes to copy, so we start looking for hash matches at s == 1. | |||
| s := 1 | |||
| cv := load64(src, s) | |||
| // We search for a repeat at -1, but don't output repeats when nextEmit == 0 | |||
| repeat := 1 | |||
| const lowbitMask = 0xffffffff | |||
| getCur := func(x uint64) int { | |||
| return int(x & lowbitMask) | |||
| } | |||
| getPrev := func(x uint64) int { | |||
| return int(x >> 32) | |||
| } | |||
| const maxSkip = 64 | |||
| for { | |||
| type match struct { | |||
| offset int | |||
| s int | |||
| length int | |||
| score int | |||
| rep bool | |||
| } | |||
| var best match | |||
| for { | |||
| // Next src position to check | |||
| nextS := (s-nextEmit)>>8 + 1 | |||
| if nextS > maxSkip { | |||
| nextS = s + maxSkip | |||
| } else { | |||
| nextS += s | |||
| } | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| hashL := hash8(cv, lTableBits) | |||
| hashS := hash4(cv, sTableBits) | |||
| candidateL := lTable[hashL] | |||
| candidateS := sTable[hashS] | |||
| score := func(m match) int { | |||
| // Matches that are longer forward are penalized since we must emit it as a literal. | |||
| score := m.length - m.s | |||
| if nextEmit == m.s { | |||
| // If we do not have to emit literals, we save 1 byte | |||
| score++ | |||
| } | |||
| offset := m.s - m.offset | |||
| if m.rep { | |||
| return score - emitRepeatSize(offset, m.length) | |||
| } | |||
| return score - emitCopySize(offset, m.length) | |||
| } | |||
| matchAt := func(offset, s int, first uint32, rep bool) match { | |||
| if best.length != 0 && best.s-best.offset == s-offset { | |||
| // Don't retest if we have the same offset. | |||
| return match{offset: offset, s: s} | |||
| } | |||
| if load32(src, offset) != first { | |||
| return match{offset: offset, s: s} | |||
| } | |||
| m := match{offset: offset, s: s, length: 4 + offset, rep: rep} | |||
| s += 4 | |||
| for s <= sLimit { | |||
| if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { | |||
| m.length += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| m.length += 8 | |||
| } | |||
| m.length -= offset | |||
| m.score = score(m) | |||
| if m.score <= -m.s { | |||
| // Eliminate if no savings, we might find a better one. | |||
| m.length = 0 | |||
| } | |||
| return m | |||
| } | |||
| bestOf := func(a, b match) match { | |||
| if b.length == 0 { | |||
| return a | |||
| } | |||
| if a.length == 0 { | |||
| return b | |||
| } | |||
| as := a.score + b.s | |||
| bs := b.score + a.s | |||
| if as >= bs { | |||
| return a | |||
| } | |||
| return b | |||
| } | |||
| best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) | |||
| best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) | |||
| best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) | |||
| { | |||
| best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) | |||
| if best.length > 0 { | |||
| // s+1 | |||
| nextShort := sTable[hash4(cv>>8, sTableBits)] | |||
| s := s + 1 | |||
| cv := load64(src, s) | |||
| nextLong := lTable[hash8(cv, lTableBits)] | |||
| best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) | |||
| best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) | |||
| best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) | |||
| best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) | |||
| // Repeat at + 2 | |||
| best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) | |||
| // s+2 | |||
| if true { | |||
| nextShort = sTable[hash4(cv>>8, sTableBits)] | |||
| s++ | |||
| cv = load64(src, s) | |||
| nextLong = lTable[hash8(cv, lTableBits)] | |||
| best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) | |||
| best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) | |||
| best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) | |||
| best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) | |||
| } | |||
| // Search for a match at best match end, see if that is better. | |||
| if sAt := best.s + best.length; sAt < sLimit { | |||
| sBack := best.s | |||
| backL := best.length | |||
| // Load initial values | |||
| cv = load64(src, sBack) | |||
| // Search for mismatch | |||
| next := lTable[hash8(load64(src, sAt), lTableBits)] | |||
| //next := sTable[hash4(load64(src, sAt), sTableBits)] | |||
| if checkAt := getCur(next) - backL; checkAt > 0 { | |||
| best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) | |||
| } | |||
| if checkAt := getPrev(next) - backL; checkAt > 0 { | |||
| best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| // Update table | |||
| lTable[hashL] = uint64(s) | candidateL<<32 | |||
| sTable[hashS] = uint64(s) | candidateS<<32 | |||
| if best.length > 0 { | |||
| break | |||
| } | |||
| cv = load64(src, nextS) | |||
| s = nextS | |||
| } | |||
| // Extend backwards, not needed for repeats... | |||
| s = best.s | |||
| if !best.rep { | |||
| for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { | |||
| best.offset-- | |||
| best.length++ | |||
| s-- | |||
| } | |||
| } | |||
| if false && best.offset >= s { | |||
| panic(fmt.Errorf("t %d >= s %d", best.offset, s)) | |||
| } | |||
| // Bail if we exceed the maximum size. | |||
| if d+(s-nextEmit) > dstLimit { | |||
| return 0 | |||
| } | |||
| base := s | |||
| offset := s - best.offset | |||
| s += best.length | |||
| if offset > 65535 && s-base <= 5 && !best.rep { | |||
| // Bail if the match is equal or worse to the encoding. | |||
| s = best.s + 1 | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| cv = load64(src, s) | |||
| continue | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:base]) | |||
| if best.rep { | |||
| if nextEmit > 0 { | |||
| // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. | |||
| d += emitRepeat(dst[d:], offset, best.length) | |||
| } else { | |||
| // First match, cannot be repeat. | |||
| d += emitCopy(dst[d:], offset, best.length) | |||
| } | |||
| } else { | |||
| d += emitCopy(dst[d:], offset, best.length) | |||
| } | |||
| repeat = offset | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| if d > dstLimit { | |||
| // Do we have space for more, if not bail. | |||
| return 0 | |||
| } | |||
| // Fill tables... | |||
| for i := best.s + 1; i < s; i++ { | |||
| cv0 := load64(src, i) | |||
| long0 := hash8(cv0, lTableBits) | |||
| short0 := hash4(cv0, sTableBits) | |||
| lTable[long0] = uint64(i) | lTable[long0]<<32 | |||
| sTable[short0] = uint64(i) | sTable[short0]<<32 | |||
| } | |||
| cv = load64(src, s) | |||
| } | |||
| emitRemainder: | |||
| if nextEmit < len(src) { | |||
| // Bail if we exceed the maximum size. | |||
| if d+len(src)-nextEmit > dstLimit { | |||
| return 0 | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:]) | |||
| } | |||
| return d | |||
| } | |||
| // encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlockBestSnappy(dst, src []byte) (d int) { | |||
| // Initialize the hash tables. | |||
| const ( | |||
| // Long hash matches. | |||
| lTableBits = 19 | |||
| maxLTableSize = 1 << lTableBits | |||
| // Short hash matches. | |||
| sTableBits = 16 | |||
| maxSTableSize = 1 << sTableBits | |||
| inputMargin = 8 + 2 | |||
| ) | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := len(src) - inputMargin | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| var lTable [maxLTableSize]uint64 | |||
| var sTable [maxSTableSize]uint64 | |||
| // Bail if we can't compress to at least this. | |||
| dstLimit := len(src) - 5 | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| nextEmit := 0 | |||
| // The encoded form must start with a literal, as there are no previous | |||
| // bytes to copy, so we start looking for hash matches at s == 1. | |||
| s := 1 | |||
| cv := load64(src, s) | |||
| // We search for a repeat at -1, but don't output repeats when nextEmit == 0 | |||
| repeat := 1 | |||
| const lowbitMask = 0xffffffff | |||
| getCur := func(x uint64) int { | |||
| return int(x & lowbitMask) | |||
| } | |||
| getPrev := func(x uint64) int { | |||
| return int(x >> 32) | |||
| } | |||
| const maxSkip = 64 | |||
| for { | |||
| type match struct { | |||
| offset int | |||
| s int | |||
| length int | |||
| score int | |||
| } | |||
| var best match | |||
| for { | |||
| // Next src position to check | |||
| nextS := (s-nextEmit)>>8 + 1 | |||
| if nextS > maxSkip { | |||
| nextS = s + maxSkip | |||
| } else { | |||
| nextS += s | |||
| } | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| hashL := hash8(cv, lTableBits) | |||
| hashS := hash4(cv, sTableBits) | |||
| candidateL := lTable[hashL] | |||
| candidateS := sTable[hashS] | |||
| score := func(m match) int { | |||
| // Matches that are longer forward are penalized since we must emit it as a literal. | |||
| score := m.length - m.s | |||
| if nextEmit == m.s { | |||
| // If we do not have to emit literals, we save 1 byte | |||
| score++ | |||
| } | |||
| offset := m.s - m.offset | |||
| return score - emitCopyNoRepeatSize(offset, m.length) | |||
| } | |||
| matchAt := func(offset, s int, first uint32) match { | |||
| if best.length != 0 && best.s-best.offset == s-offset { | |||
| // Don't retest if we have the same offset. | |||
| return match{offset: offset, s: s} | |||
| } | |||
| if load32(src, offset) != first { | |||
| return match{offset: offset, s: s} | |||
| } | |||
| m := match{offset: offset, s: s, length: 4 + offset} | |||
| s += 4 | |||
| for s <= sLimit { | |||
| if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { | |||
| m.length += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| m.length += 8 | |||
| } | |||
| m.length -= offset | |||
| m.score = score(m) | |||
| if m.score <= -m.s { | |||
| // Eliminate if no savings, we might find a better one. | |||
| m.length = 0 | |||
| } | |||
| return m | |||
| } | |||
| bestOf := func(a, b match) match { | |||
| if b.length == 0 { | |||
| return a | |||
| } | |||
| if a.length == 0 { | |||
| return b | |||
| } | |||
| as := a.score + b.s | |||
| bs := b.score + a.s | |||
| if as >= bs { | |||
| return a | |||
| } | |||
| return b | |||
| } | |||
| best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv))) | |||
| best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv))) | |||
| best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv))) | |||
| { | |||
| best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) | |||
| if best.length > 0 { | |||
| // s+1 | |||
| nextShort := sTable[hash4(cv>>8, sTableBits)] | |||
| s := s + 1 | |||
| cv := load64(src, s) | |||
| nextLong := lTable[hash8(cv, lTableBits)] | |||
| best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) | |||
| best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) | |||
| best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) | |||
| best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) | |||
| // Repeat at + 2 | |||
| best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) | |||
| // s+2 | |||
| if true { | |||
| nextShort = sTable[hash4(cv>>8, sTableBits)] | |||
| s++ | |||
| cv = load64(src, s) | |||
| nextLong = lTable[hash8(cv, lTableBits)] | |||
| best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) | |||
| best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) | |||
| best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) | |||
| best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) | |||
| } | |||
| // Search for a match at best match end, see if that is better. | |||
| if sAt := best.s + best.length; sAt < sLimit { | |||
| sBack := best.s | |||
| backL := best.length | |||
| // Load initial values | |||
| cv = load64(src, sBack) | |||
| // Search for mismatch | |||
| next := lTable[hash8(load64(src, sAt), lTableBits)] | |||
| //next := sTable[hash4(load64(src, sAt), sTableBits)] | |||
| if checkAt := getCur(next) - backL; checkAt > 0 { | |||
| best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) | |||
| } | |||
| if checkAt := getPrev(next) - backL; checkAt > 0 { | |||
| best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| // Update table | |||
| lTable[hashL] = uint64(s) | candidateL<<32 | |||
| sTable[hashS] = uint64(s) | candidateS<<32 | |||
| if best.length > 0 { | |||
| break | |||
| } | |||
| cv = load64(src, nextS) | |||
| s = nextS | |||
| } | |||
| // Extend backwards, not needed for repeats... | |||
| s = best.s | |||
| if true { | |||
| for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { | |||
| best.offset-- | |||
| best.length++ | |||
| s-- | |||
| } | |||
| } | |||
| if false && best.offset >= s { | |||
| panic(fmt.Errorf("t %d >= s %d", best.offset, s)) | |||
| } | |||
| // Bail if we exceed the maximum size. | |||
| if d+(s-nextEmit) > dstLimit { | |||
| return 0 | |||
| } | |||
| base := s | |||
| offset := s - best.offset | |||
| s += best.length | |||
| if offset > 65535 && s-base <= 5 { | |||
| // Bail if the match is equal or worse to the encoding. | |||
| s = best.s + 1 | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| cv = load64(src, s) | |||
| continue | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:base]) | |||
| d += emitCopyNoRepeat(dst[d:], offset, best.length) | |||
| repeat = offset | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| if d > dstLimit { | |||
| // Do we have space for more, if not bail. | |||
| return 0 | |||
| } | |||
| // Fill tables... | |||
| for i := best.s + 1; i < s; i++ { | |||
| cv0 := load64(src, i) | |||
| long0 := hash8(cv0, lTableBits) | |||
| short0 := hash4(cv0, sTableBits) | |||
| lTable[long0] = uint64(i) | lTable[long0]<<32 | |||
| sTable[short0] = uint64(i) | sTable[short0]<<32 | |||
| } | |||
| cv = load64(src, s) | |||
| } | |||
| emitRemainder: | |||
| if nextEmit < len(src) { | |||
| // Bail if we exceed the maximum size. | |||
| if d+len(src)-nextEmit > dstLimit { | |||
| return 0 | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:]) | |||
| } | |||
| return d | |||
| } | |||
| // emitCopySize returns the size to encode the offset+length | |||
| // | |||
| // It assumes that: | |||
| // 1 <= offset && offset <= math.MaxUint32 | |||
| // 4 <= length && length <= 1 << 24 | |||
| func emitCopySize(offset, length int) int { | |||
| if offset >= 65536 { | |||
| i := 0 | |||
| if length > 64 { | |||
| length -= 64 | |||
| if length >= 4 { | |||
| // Emit remaining as repeats | |||
| return 5 + emitRepeatSize(offset, length) | |||
| } | |||
| i = 5 | |||
| } | |||
| if length == 0 { | |||
| return i | |||
| } | |||
| return i + 5 | |||
| } | |||
| // Offset no more than 2 bytes. | |||
| if length > 64 { | |||
| if offset < 2048 { | |||
| // Emit 8 bytes, then rest as repeats... | |||
| return 2 + emitRepeatSize(offset, length-8) | |||
| } | |||
| // Emit remaining as repeats, at least 4 bytes remain. | |||
| return 3 + emitRepeatSize(offset, length-60) | |||
| } | |||
| if length >= 12 || offset >= 2048 { | |||
| return 3 | |||
| } | |||
| // Emit the remaining copy, encoded as 2 bytes. | |||
| return 2 | |||
| } | |||
| // emitCopyNoRepeatSize returns the size to encode the offset+length | |||
| // | |||
| // It assumes that: | |||
| // 1 <= offset && offset <= math.MaxUint32 | |||
| // 4 <= length && length <= 1 << 24 | |||
| func emitCopyNoRepeatSize(offset, length int) int { | |||
| if offset >= 65536 { | |||
| return 5 + 5*(length/64) | |||
| } | |||
| // Offset no more than 2 bytes. | |||
| if length > 64 { | |||
| // Emit remaining as repeats, at least 4 bytes remain. | |||
| return 3 + 3*(length/60) | |||
| } | |||
| if length >= 12 || offset >= 2048 { | |||
| return 3 | |||
| } | |||
| // Emit the remaining copy, encoded as 2 bytes. | |||
| return 2 | |||
| } | |||
| // emitRepeatSize returns the number of bytes required to encode a repeat. | |||
| // Length must be at least 4 and < 1<<24 | |||
| func emitRepeatSize(offset, length int) int { | |||
| // Repeat offset, make length cheaper | |||
| if length <= 4+4 || (length < 8+4 && offset < 2048) { | |||
| return 2 | |||
| } | |||
| if length < (1<<8)+4+4 { | |||
| return 3 | |||
| } | |||
| if length < (1<<16)+(1<<8)+4 { | |||
| return 4 | |||
| } | |||
| const maxRepeat = (1 << 24) - 1 | |||
| length -= (1 << 16) - 4 | |||
| left := 0 | |||
| if length > maxRepeat { | |||
| left = length - maxRepeat + 4 | |||
| length = maxRepeat - 4 | |||
| } | |||
| if left > 0 { | |||
| return 5 + emitRepeatSize(offset, left) | |||
| } | |||
| return 5 | |||
| } | |||
| @@ -0,0 +1,431 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Copyright (c) 2019 Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package s2 | |||
| import ( | |||
| "math/bits" | |||
| ) | |||
| // hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <32. | |||
| func hash4(u uint64, h uint8) uint32 { | |||
| const prime4bytes = 2654435761 | |||
| return (uint32(u) * prime4bytes) >> ((32 - h) & 31) | |||
| } | |||
| // hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <64. | |||
| func hash5(u uint64, h uint8) uint32 { | |||
| const prime5bytes = 889523592379 | |||
| return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) | |||
| } | |||
| // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <64. | |||
| func hash7(u uint64, h uint8) uint32 { | |||
| const prime7bytes = 58295818150454627 | |||
| return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) | |||
| } | |||
| // hash8 returns the hash of u to fit in a hash table with h bits. | |||
| // Preferably h should be a constant and should always be <64. | |||
| func hash8(u uint64, h uint8) uint32 { | |||
| const prime8bytes = 0xcf1bbcdcb7a56463 | |||
| return uint32((u * prime8bytes) >> ((64 - h) & 63)) | |||
| } | |||
| // encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlockBetterGo(dst, src []byte) (d int) { | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := len(src) - inputMargin | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| // Initialize the hash tables. | |||
| const ( | |||
| // Long hash matches. | |||
| lTableBits = 16 | |||
| maxLTableSize = 1 << lTableBits | |||
| // Short hash matches. | |||
| sTableBits = 14 | |||
| maxSTableSize = 1 << sTableBits | |||
| ) | |||
| var lTable [maxLTableSize]uint32 | |||
| var sTable [maxSTableSize]uint32 | |||
| // Bail if we can't compress to at least this. | |||
| dstLimit := len(src) - len(src)>>5 - 6 | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| nextEmit := 0 | |||
| // The encoded form must start with a literal, as there are no previous | |||
| // bytes to copy, so we start looking for hash matches at s == 1. | |||
| s := 1 | |||
| cv := load64(src, s) | |||
| // We initialize repeat to 0, so we never match on first attempt | |||
| repeat := 0 | |||
| for { | |||
| candidateL := 0 | |||
| nextS := 0 | |||
| for { | |||
| // Next src position to check | |||
| nextS = s + (s-nextEmit)>>7 + 1 | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| hashL := hash7(cv, lTableBits) | |||
| hashS := hash4(cv, sTableBits) | |||
| candidateL = int(lTable[hashL]) | |||
| candidateS := int(sTable[hashS]) | |||
| lTable[hashL] = uint32(s) | |||
| sTable[hashS] = uint32(s) | |||
| // Check repeat at offset checkRep. | |||
| const checkRep = 1 | |||
| if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { | |||
| base := s + checkRep | |||
| // Extend back | |||
| for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { | |||
| i-- | |||
| base-- | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:base]) | |||
| // Extend forward | |||
| candidate := s - repeat + 4 + checkRep | |||
| s += 4 + checkRep | |||
| for s < len(src) { | |||
| if len(src)-s < 8 { | |||
| if src[s] == src[candidate] { | |||
| s++ | |||
| candidate++ | |||
| continue | |||
| } | |||
| break | |||
| } | |||
| if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { | |||
| s += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| candidate += 8 | |||
| } | |||
| if nextEmit > 0 { | |||
| // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. | |||
| d += emitRepeat(dst[d:], repeat, s-base) | |||
| } else { | |||
| // First match, cannot be repeat. | |||
| d += emitCopy(dst[d:], repeat, s-base) | |||
| } | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| cv = load64(src, s) | |||
| continue | |||
| } | |||
| if uint32(cv) == load32(src, candidateL) { | |||
| break | |||
| } | |||
| // Check our short candidate | |||
| if uint32(cv) == load32(src, candidateS) { | |||
| // Try a long candidate at s+1 | |||
| hashL = hash7(cv>>8, lTableBits) | |||
| candidateL = int(lTable[hashL]) | |||
| lTable[hashL] = uint32(s + 1) | |||
| if uint32(cv>>8) == load32(src, candidateL) { | |||
| s++ | |||
| break | |||
| } | |||
| // Use our short candidate. | |||
| candidateL = candidateS | |||
| break | |||
| } | |||
| cv = load64(src, nextS) | |||
| s = nextS | |||
| } | |||
| // Extend backwards | |||
| for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { | |||
| candidateL-- | |||
| s-- | |||
| } | |||
| // Bail if we exceed the maximum size. | |||
| if d+(s-nextEmit) > dstLimit { | |||
| return 0 | |||
| } | |||
| base := s | |||
| offset := base - candidateL | |||
| // Extend the 4-byte match as long as possible. | |||
| s += 4 | |||
| candidateL += 4 | |||
| for s < len(src) { | |||
| if len(src)-s < 8 { | |||
| if src[s] == src[candidateL] { | |||
| s++ | |||
| candidateL++ | |||
| continue | |||
| } | |||
| break | |||
| } | |||
| if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { | |||
| s += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| candidateL += 8 | |||
| } | |||
| if offset > 65535 && s-base <= 5 && repeat != offset { | |||
| // Bail if the match is equal or worse to the encoding. | |||
| s = nextS + 1 | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| cv = load64(src, s) | |||
| continue | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:base]) | |||
| if repeat == offset { | |||
| d += emitRepeat(dst[d:], offset, s-base) | |||
| } else { | |||
| d += emitCopy(dst[d:], offset, s-base) | |||
| repeat = offset | |||
| } | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| if d > dstLimit { | |||
| // Do we have space for more, if not bail. | |||
| return 0 | |||
| } | |||
| // Index match start+1 (long) and start+2 (short) | |||
| index0 := base + 1 | |||
| // Index match end-2 (long) and end-1 (short) | |||
| index1 := s - 2 | |||
| cv0 := load64(src, index0) | |||
| cv1 := load64(src, index1) | |||
| cv = load64(src, s) | |||
| lTable[hash7(cv0, lTableBits)] = uint32(index0) | |||
| lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) | |||
| lTable[hash7(cv1, lTableBits)] = uint32(index1) | |||
| lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) | |||
| sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) | |||
| sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) | |||
| sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) | |||
| } | |||
| emitRemainder: | |||
| if nextEmit < len(src) { | |||
| // Bail if we exceed the maximum size. | |||
| if d+len(src)-nextEmit > dstLimit { | |||
| return 0 | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:]) | |||
| } | |||
| return d | |||
| } | |||
| // encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize | |||
| func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { | |||
| // sLimit is when to stop looking for offset/length copies. The inputMargin | |||
| // lets us use a fast path for emitLiteral in the main loop, while we are | |||
| // looking for copies. | |||
| sLimit := len(src) - inputMargin | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| // Initialize the hash tables. | |||
| const ( | |||
| // Long hash matches. | |||
| lTableBits = 16 | |||
| maxLTableSize = 1 << lTableBits | |||
| // Short hash matches. | |||
| sTableBits = 14 | |||
| maxSTableSize = 1 << sTableBits | |||
| ) | |||
| var lTable [maxLTableSize]uint32 | |||
| var sTable [maxSTableSize]uint32 | |||
| // Bail if we can't compress to at least this. | |||
| dstLimit := len(src) - len(src)>>5 - 6 | |||
| // nextEmit is where in src the next emitLiteral should start from. | |||
| nextEmit := 0 | |||
| // The encoded form must start with a literal, as there are no previous | |||
| // bytes to copy, so we start looking for hash matches at s == 1. | |||
| s := 1 | |||
| cv := load64(src, s) | |||
| // We initialize repeat to 0, so we never match on first attempt | |||
| repeat := 0 | |||
| const maxSkip = 100 | |||
| for { | |||
| candidateL := 0 | |||
| nextS := 0 | |||
| for { | |||
| // Next src position to check | |||
| nextS = (s-nextEmit)>>7 + 1 | |||
| if nextS > maxSkip { | |||
| nextS = s + maxSkip | |||
| } else { | |||
| nextS += s | |||
| } | |||
| if nextS > sLimit { | |||
| goto emitRemainder | |||
| } | |||
| hashL := hash7(cv, lTableBits) | |||
| hashS := hash4(cv, sTableBits) | |||
| candidateL = int(lTable[hashL]) | |||
| candidateS := int(sTable[hashS]) | |||
| lTable[hashL] = uint32(s) | |||
| sTable[hashS] = uint32(s) | |||
| if uint32(cv) == load32(src, candidateL) { | |||
| break | |||
| } | |||
| // Check our short candidate | |||
| if uint32(cv) == load32(src, candidateS) { | |||
| // Try a long candidate at s+1 | |||
| hashL = hash7(cv>>8, lTableBits) | |||
| candidateL = int(lTable[hashL]) | |||
| lTable[hashL] = uint32(s + 1) | |||
| if uint32(cv>>8) == load32(src, candidateL) { | |||
| s++ | |||
| break | |||
| } | |||
| // Use our short candidate. | |||
| candidateL = candidateS | |||
| break | |||
| } | |||
| cv = load64(src, nextS) | |||
| s = nextS | |||
| } | |||
| // Extend backwards | |||
| for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { | |||
| candidateL-- | |||
| s-- | |||
| } | |||
| // Bail if we exceed the maximum size. | |||
| if d+(s-nextEmit) > dstLimit { | |||
| return 0 | |||
| } | |||
| base := s | |||
| offset := base - candidateL | |||
| // Extend the 4-byte match as long as possible. | |||
| s += 4 | |||
| candidateL += 4 | |||
| for s < len(src) { | |||
| if len(src)-s < 8 { | |||
| if src[s] == src[candidateL] { | |||
| s++ | |||
| candidateL++ | |||
| continue | |||
| } | |||
| break | |||
| } | |||
| if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { | |||
| s += bits.TrailingZeros64(diff) >> 3 | |||
| break | |||
| } | |||
| s += 8 | |||
| candidateL += 8 | |||
| } | |||
| if offset > 65535 && s-base <= 5 && repeat != offset { | |||
| // Bail if the match is equal or worse to the encoding. | |||
| s = nextS + 1 | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| cv = load64(src, s) | |||
| continue | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:base]) | |||
| d += emitCopyNoRepeat(dst[d:], offset, s-base) | |||
| repeat = offset | |||
| nextEmit = s | |||
| if s >= sLimit { | |||
| goto emitRemainder | |||
| } | |||
| if d > dstLimit { | |||
| // Do we have space for more, if not bail. | |||
| return 0 | |||
| } | |||
| // Index match start+1 (long) and start+2 (short) | |||
| index0 := base + 1 | |||
| // Index match end-2 (long) and end-1 (short) | |||
| index1 := s - 2 | |||
| cv0 := load64(src, index0) | |||
| cv1 := load64(src, index1) | |||
| cv = load64(src, s) | |||
| lTable[hash7(cv0, lTableBits)] = uint32(index0) | |||
| lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1) | |||
| lTable[hash7(cv1, lTableBits)] = uint32(index1) | |||
| lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1) | |||
| sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) | |||
| sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2) | |||
| sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) | |||
| } | |||
| emitRemainder: | |||
| if nextEmit < len(src) { | |||
| // Bail if we exceed the maximum size. | |||
| if d+len(src)-nextEmit > dstLimit { | |||
| return 0 | |||
| } | |||
| d += emitLiteral(dst[d:], src[nextEmit:]) | |||
| } | |||
| return d | |||
| } | |||
| @@ -0,0 +1,307 @@ | |||
| //go:build !amd64 || appengine || !gc || noasm | |||
| // +build !amd64 appengine !gc noasm | |||
| package s2 | |||
| import ( | |||
| "math/bits" | |||
| ) | |||
| // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) | |||
| func encodeBlock(dst, src []byte) (d int) { | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| return encodeBlockGo(dst, src) | |||
| } | |||
| // encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) | |||
| func encodeBlockBetter(dst, src []byte) (d int) { | |||
| return encodeBlockBetterGo(dst, src) | |||
| } | |||
| // encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) | |||
| func encodeBlockBetterSnappy(dst, src []byte) (d int) { | |||
| return encodeBlockBetterSnappyGo(dst, src) | |||
| } | |||
| // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) | |||
| func encodeBlockSnappy(dst, src []byte) (d int) { | |||
| if len(src) < minNonLiteralBlockSize { | |||
| return 0 | |||
| } | |||
| return encodeBlockSnappyGo(dst, src) | |||
| } | |||
| // emitLiteral writes a literal chunk and returns the number of bytes written. | |||
| // | |||
| // It assumes that: | |||
| // dst is long enough to hold the encoded bytes | |||
| // 0 <= len(lit) && len(lit) <= math.MaxUint32 | |||
| func emitLiteral(dst, lit []byte) int { | |||
| if len(lit) == 0 { | |||
| return 0 | |||
| } | |||
| const num = 63<<2 | tagLiteral | |||
| i, n := 0, uint(len(lit)-1) | |||
| switch { | |||
| case n < 60: | |||
| dst[0] = uint8(n)<<2 | tagLiteral | |||
| i = 1 | |||
| case n < 1<<8: | |||
| dst[1] = uint8(n) | |||
| dst[0] = 60<<2 | tagLiteral | |||
| i = 2 | |||
| case n < 1<<16: | |||
| dst[2] = uint8(n >> 8) | |||
| dst[1] = uint8(n) | |||
| dst[0] = 61<<2 | tagLiteral | |||
| i = 3 | |||
| case n < 1<<24: | |||
| dst[3] = uint8(n >> 16) | |||
| dst[2] = uint8(n >> 8) | |||
| dst[1] = uint8(n) | |||
| dst[0] = 62<<2 | tagLiteral | |||
| i = 4 | |||
| default: | |||
| dst[4] = uint8(n >> 24) | |||
| dst[3] = uint8(n >> 16) | |||
| dst[2] = uint8(n >> 8) | |||
| dst[1] = uint8(n) | |||
| dst[0] = 63<<2 | tagLiteral | |||
| i = 5 | |||
| } | |||
| return i + copy(dst[i:], lit) | |||
| } | |||
| // emitRepeat writes a repeat chunk and returns the number of bytes written. | |||
| // Length must be at least 4 and < 1<<24 | |||
| func emitRepeat(dst []byte, offset, length int) int { | |||
| // Repeat offset, make length cheaper | |||
| length -= 4 | |||
| if length <= 4 { | |||
| dst[0] = uint8(length)<<2 | tagCopy1 | |||
| dst[1] = 0 | |||
| return 2 | |||
| } | |||
| if length < 8 && offset < 2048 { | |||
| // Encode WITH offset | |||
| dst[1] = uint8(offset) | |||
| dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 | |||
| return 2 | |||
| } | |||
| if length < (1<<8)+4 { | |||
| length -= 4 | |||
| dst[2] = uint8(length) | |||
| dst[1] = 0 | |||
| dst[0] = 5<<2 | tagCopy1 | |||
| return 3 | |||
| } | |||
| if length < (1<<16)+(1<<8) { | |||
| length -= 1 << 8 | |||
| dst[3] = uint8(length >> 8) | |||
| dst[2] = uint8(length >> 0) | |||
| dst[1] = 0 | |||
| dst[0] = 6<<2 | tagCopy1 | |||
| return 4 | |||
| } | |||
| const maxRepeat = (1 << 24) - 1 | |||
| length -= 1 << 16 | |||
| left := 0 | |||
| if length > maxRepeat { | |||
| left = length - maxRepeat + 4 | |||
| length = maxRepeat - 4 | |||
| } | |||
| dst[4] = uint8(length >> 16) | |||
| dst[3] = uint8(length >> 8) | |||
| dst[2] = uint8(length >> 0) | |||
| dst[1] = 0 | |||
| dst[0] = 7<<2 | tagCopy1 | |||
| if left > 0 { | |||
| return 5 + emitRepeat(dst[5:], offset, left) | |||
| } | |||
| return 5 | |||
| } | |||
| // emitCopy writes a copy chunk and returns the number of bytes written. | |||
| // | |||
| // It assumes that: | |||
| // dst is long enough to hold the encoded bytes | |||
| // 1 <= offset && offset <= math.MaxUint32 | |||
| // 4 <= length && length <= 1 << 24 | |||
| func emitCopy(dst []byte, offset, length int) int { | |||
| if offset >= 65536 { | |||
| i := 0 | |||
| if length > 64 { | |||
| // Emit a length 64 copy, encoded as 5 bytes. | |||
| dst[4] = uint8(offset >> 24) | |||
| dst[3] = uint8(offset >> 16) | |||
| dst[2] = uint8(offset >> 8) | |||
| dst[1] = uint8(offset) | |||
| dst[0] = 63<<2 | tagCopy4 | |||
| length -= 64 | |||
| if length >= 4 { | |||
| // Emit remaining as repeats | |||
| return 5 + emitRepeat(dst[5:], offset, length) | |||
| } | |||
| i = 5 | |||
| } | |||
| if length == 0 { | |||
| return i | |||
| } | |||
| // Emit a copy, offset encoded as 4 bytes. | |||
| dst[i+0] = uint8(length-1)<<2 | tagCopy4 | |||
| dst[i+1] = uint8(offset) | |||
| dst[i+2] = uint8(offset >> 8) | |||
| dst[i+3] = uint8(offset >> 16) | |||
| dst[i+4] = uint8(offset >> 24) | |||
| return i + 5 | |||
| } | |||
| // Offset no more than 2 bytes. | |||
| if length > 64 { | |||
| off := 3 | |||
| if offset < 2048 { | |||
| // emit 8 bytes as tagCopy1, rest as repeats. | |||
| dst[1] = uint8(offset) | |||
| dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 | |||
| length -= 8 | |||
| off = 2 | |||
| } else { | |||
| // Emit a length 60 copy, encoded as 3 bytes. | |||
| // Emit remaining as repeat value (minimum 4 bytes). | |||
| dst[2] = uint8(offset >> 8) | |||
| dst[1] = uint8(offset) | |||
| dst[0] = 59<<2 | tagCopy2 | |||
| length -= 60 | |||
| } | |||
| // Emit remaining as repeats, at least 4 bytes remain. | |||
| return off + emitRepeat(dst[off:], offset, length) | |||
| } | |||
| if length >= 12 || offset >= 2048 { | |||
| // Emit the remaining copy, encoded as 3 bytes. | |||
| dst[2] = uint8(offset >> 8) | |||
| dst[1] = uint8(offset) | |||
| dst[0] = uint8(length-1)<<2 | tagCopy2 | |||
| return 3 | |||
| } | |||
| // Emit the remaining copy, encoded as 2 bytes. | |||
| dst[1] = uint8(offset) | |||
| dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 | |||
| return 2 | |||
| } | |||
| // emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. | |||
| // | |||
| // It assumes that: | |||
| // dst is long enough to hold the encoded bytes | |||
| // 1 <= offset && offset <= math.MaxUint32 | |||
| // 4 <= length && length <= 1 << 24 | |||
| func emitCopyNoRepeat(dst []byte, offset, length int) int { | |||
| if offset >= 65536 { | |||
| i := 0 | |||
| if length > 64 { | |||
| // Emit a length 64 copy, encoded as 5 bytes. | |||
| dst[4] = uint8(offset >> 24) | |||
| dst[3] = uint8(offset >> 16) | |||
| dst[2] = uint8(offset >> 8) | |||
| dst[1] = uint8(offset) | |||
| dst[0] = 63<<2 | tagCopy4 | |||
| length -= 64 | |||
| if length >= 4 { | |||
| // Emit remaining as repeats | |||
| return 5 + emitCopyNoRepeat(dst[5:], offset, length) | |||
| } | |||
| i = 5 | |||
| } | |||
| if length == 0 { | |||
| return i | |||
| } | |||
| // Emit a copy, offset encoded as 4 bytes. | |||
| dst[i+0] = uint8(length-1)<<2 | tagCopy4 | |||
| dst[i+1] = uint8(offset) | |||
| dst[i+2] = uint8(offset >> 8) | |||
| dst[i+3] = uint8(offset >> 16) | |||
| dst[i+4] = uint8(offset >> 24) | |||
| return i + 5 | |||
| } | |||
| // Offset no more than 2 bytes. | |||
| if length > 64 { | |||
| // Emit a length 60 copy, encoded as 3 bytes. | |||
| // Emit remaining as repeat value (minimum 4 bytes). | |||
| dst[2] = uint8(offset >> 8) | |||
| dst[1] = uint8(offset) | |||
| dst[0] = 59<<2 | tagCopy2 | |||
| length -= 60 | |||
| // Emit remaining as repeats, at least 4 bytes remain. | |||
| return 3 + emitCopyNoRepeat(dst[3:], offset, length) | |||
| } | |||
| if length >= 12 || offset >= 2048 { | |||
| // Emit the remaining copy, encoded as 3 bytes. | |||
| dst[2] = uint8(offset >> 8) | |||
| dst[1] = uint8(offset) | |||
| dst[0] = uint8(length-1)<<2 | tagCopy2 | |||
| return 3 | |||
| } | |||
| // Emit the remaining copy, encoded as 2 bytes. | |||
| dst[1] = uint8(offset) | |||
| dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 | |||
| return 2 | |||
| } | |||
| // matchLen returns how many bytes match in a and b | |||
| // | |||
| // It assumes that: | |||
| // len(a) <= len(b) | |||
| // | |||
| func matchLen(a []byte, b []byte) int { | |||
| b = b[:len(a)] | |||
| var checked int | |||
| if len(a) > 4 { | |||
| // Try 4 bytes first | |||
| if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { | |||
| return bits.TrailingZeros32(diff) >> 3 | |||
| } | |||
| // Switch to 8 byte matching. | |||
| checked = 4 | |||
| a = a[4:] | |||
| b = b[4:] | |||
| for len(a) >= 8 { | |||
| b = b[:len(a)] | |||
| if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { | |||
| return checked + (bits.TrailingZeros64(diff) >> 3) | |||
| } | |||
| checked += 8 | |||
| a = a[8:] | |||
| b = b[8:] | |||
| } | |||
| } | |||
| b = b[:len(a)] | |||
| for i := range a { | |||
| if a[i] != b[i] { | |||
| return int(i) + checked | |||
| } | |||
| } | |||
| return len(a) + checked | |||
| } | |||
| @@ -0,0 +1,191 @@ | |||
| // Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. | |||
| //go:build !appengine && !noasm && gc && !noasm | |||
| // +build !appengine,!noasm,gc,!noasm | |||
| package s2 | |||
| func _dummy_() | |||
| // encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4294967295 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBlockAsm(dst []byte, src []byte) int | |||
| // encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4194304 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBlockAsm4MB(dst []byte, src []byte) int | |||
| // encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 16383 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBlockAsm12B(dst []byte, src []byte) int | |||
| // encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4095 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBlockAsm10B(dst []byte, src []byte) int | |||
| // encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 511 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBlockAsm8B(dst []byte, src []byte) int | |||
| // encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4294967295 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBetterBlockAsm(dst []byte, src []byte) int | |||
| // encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4194304 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBetterBlockAsm4MB(dst []byte, src []byte) int | |||
| // encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 16383 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBetterBlockAsm12B(dst []byte, src []byte) int | |||
| // encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4095 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBetterBlockAsm10B(dst []byte, src []byte) int | |||
| // encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 511 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeBetterBlockAsm8B(dst []byte, src []byte) int | |||
| // encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4294967295 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBlockAsm(dst []byte, src []byte) int | |||
| // encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 65535 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBlockAsm64K(dst []byte, src []byte) int | |||
| // encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 16383 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBlockAsm12B(dst []byte, src []byte) int | |||
| // encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4095 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBlockAsm10B(dst []byte, src []byte) int | |||
| // encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 511 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBlockAsm8B(dst []byte, src []byte) int | |||
| // encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4294967295 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int | |||
| // encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 65535 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int | |||
| // encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 16383 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int | |||
| // encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 4095 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int | |||
| // encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. | |||
| // Maximum input 511 bytes. | |||
| // It assumes that the varint-encoded length of the decompressed bytes has already been written. | |||
| // | |||
| //go:noescape | |||
| func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int | |||
| // emitLiteral writes a literal chunk and returns the number of bytes written. | |||
| // | |||
| // It assumes that: | |||
| // dst is long enough to hold the encoded bytes with margin of 0 bytes | |||
| // 0 <= len(lit) && len(lit) <= math.MaxUint32 | |||
| // | |||
| //go:noescape | |||
| func emitLiteral(dst []byte, lit []byte) int | |||
| // emitRepeat writes a repeat chunk and returns the number of bytes written. | |||
| // Length must be at least 4 and < 1<<32 | |||
| // | |||
| //go:noescape | |||
| func emitRepeat(dst []byte, offset int, length int) int | |||
| // emitCopy writes a copy chunk and returns the number of bytes written. | |||
| // | |||
| // It assumes that: | |||
| // dst is long enough to hold the encoded bytes | |||
| // 1 <= offset && offset <= math.MaxUint32 | |||
| // 4 <= length && length <= 1 << 24 | |||
| // | |||
| //go:noescape | |||
| func emitCopy(dst []byte, offset int, length int) int | |||
| // emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. | |||
| // | |||
| // It assumes that: | |||
| // dst is long enough to hold the encoded bytes | |||
| // 1 <= offset && offset <= math.MaxUint32 | |||
| // 4 <= length && length <= 1 << 24 | |||
| // | |||
| //go:noescape | |||
| func emitCopyNoRepeat(dst []byte, offset int, length int) int | |||
| // matchLen returns how many bytes match in a and b | |||
| // | |||
| // It assumes that: | |||
| // len(a) <= len(b) | |||
| // | |||
| //go:noescape | |||
| func matchLen(a []byte, b []byte) int | |||
| @@ -0,0 +1,598 @@ | |||
| // Copyright (c) 2022+ Klaus Post. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package s2 | |||
| import ( | |||
| "bytes" | |||
| "encoding/binary" | |||
| "encoding/json" | |||
| "fmt" | |||
| "io" | |||
| "sort" | |||
| ) | |||
| const ( | |||
| S2IndexHeader = "s2idx\x00" | |||
| S2IndexTrailer = "\x00xdi2s" | |||
| maxIndexEntries = 1 << 16 | |||
| ) | |||
| // Index represents an S2/Snappy index. | |||
| type Index struct { | |||
| TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown. | |||
| TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown. | |||
| info []struct { | |||
| compressedOffset int64 | |||
| uncompressedOffset int64 | |||
| } | |||
| estBlockUncomp int64 | |||
| } | |||
| func (i *Index) reset(maxBlock int) { | |||
| i.estBlockUncomp = int64(maxBlock) | |||
| i.TotalCompressed = -1 | |||
| i.TotalUncompressed = -1 | |||
| if len(i.info) > 0 { | |||
| i.info = i.info[:0] | |||
| } | |||
| } | |||
| // allocInfos will allocate an empty slice of infos. | |||
| func (i *Index) allocInfos(n int) { | |||
| if n > maxIndexEntries { | |||
| panic("n > maxIndexEntries") | |||
| } | |||
| i.info = make([]struct { | |||
| compressedOffset int64 | |||
| uncompressedOffset int64 | |||
| }, 0, n) | |||
| } | |||
| // add an uncompressed and compressed pair. | |||
| // Entries must be sent in order. | |||
| func (i *Index) add(compressedOffset, uncompressedOffset int64) error { | |||
| if i == nil { | |||
| return nil | |||
| } | |||
| lastIdx := len(i.info) - 1 | |||
| if lastIdx >= 0 { | |||
| latest := i.info[lastIdx] | |||
| if latest.uncompressedOffset == uncompressedOffset { | |||
| // Uncompressed didn't change, don't add entry, | |||
| // but update start index. | |||
| latest.compressedOffset = compressedOffset | |||
| i.info[lastIdx] = latest | |||
| return nil | |||
| } | |||
| if latest.uncompressedOffset > uncompressedOffset { | |||
| return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) | |||
| } | |||
| if latest.compressedOffset > compressedOffset { | |||
| return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) | |||
| } | |||
| } | |||
| i.info = append(i.info, struct { | |||
| compressedOffset int64 | |||
| uncompressedOffset int64 | |||
| }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset}) | |||
| return nil | |||
| } | |||
| // Find the offset at or before the wanted (uncompressed) offset. | |||
| // If offset is 0 or positive it is the offset from the beginning of the file. | |||
| // If the uncompressed size is known, the offset must be within the file. | |||
| // If an offset outside the file is requested io.ErrUnexpectedEOF is returned. | |||
| // If the offset is negative, it is interpreted as the distance from the end of the file, | |||
| // where -1 represents the last byte. | |||
| // If offset from the end of the file is requested, but size is unknown, | |||
| // ErrUnsupported will be returned. | |||
| func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) { | |||
| if i.TotalUncompressed < 0 { | |||
| return 0, 0, ErrCorrupt | |||
| } | |||
| if offset < 0 { | |||
| offset = i.TotalUncompressed + offset | |||
| if offset < 0 { | |||
| return 0, 0, io.ErrUnexpectedEOF | |||
| } | |||
| } | |||
| if offset > i.TotalUncompressed { | |||
| return 0, 0, io.ErrUnexpectedEOF | |||
| } | |||
| if len(i.info) > 200 { | |||
| n := sort.Search(len(i.info), func(n int) bool { | |||
| return i.info[n].uncompressedOffset > offset | |||
| }) | |||
| if n == 0 { | |||
| n = 1 | |||
| } | |||
| return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil | |||
| } | |||
| for _, info := range i.info { | |||
| if info.uncompressedOffset > offset { | |||
| break | |||
| } | |||
| compressedOff = info.compressedOffset | |||
| uncompressedOff = info.uncompressedOffset | |||
| } | |||
| return compressedOff, uncompressedOff, nil | |||
| } | |||
| // reduce to stay below maxIndexEntries | |||
| func (i *Index) reduce() { | |||
| if len(i.info) < maxIndexEntries && i.estBlockUncomp >= 1<<20 { | |||
| return | |||
| } | |||
| // Algorithm, keep 1, remove removeN entries... | |||
| removeN := (len(i.info) + 1) / maxIndexEntries | |||
| src := i.info | |||
| j := 0 | |||
| // Each block should be at least 1MB, but don't reduce below 1000 entries. | |||
| for i.estBlockUncomp*(int64(removeN)+1) < 1<<20 && len(i.info)/(removeN+1) > 1000 { | |||
| removeN++ | |||
| } | |||
| for idx := 0; idx < len(src); idx++ { | |||
| i.info[j] = src[idx] | |||
| j++ | |||
| idx += removeN | |||
| } | |||
| i.info = i.info[:j] | |||
| // Update maxblock estimate. | |||
| i.estBlockUncomp += i.estBlockUncomp * int64(removeN) | |||
| } | |||
| func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte { | |||
| i.reduce() | |||
| var tmp [binary.MaxVarintLen64]byte | |||
| initSize := len(b) | |||
| // We make the start a skippable header+size. | |||
| b = append(b, ChunkTypeIndex, 0, 0, 0) | |||
| b = append(b, []byte(S2IndexHeader)...) | |||
| // Total Uncompressed size | |||
| n := binary.PutVarint(tmp[:], uncompTotal) | |||
| b = append(b, tmp[:n]...) | |||
| // Total Compressed size | |||
| n = binary.PutVarint(tmp[:], compTotal) | |||
| b = append(b, tmp[:n]...) | |||
| // Put EstBlockUncomp size | |||
| n = binary.PutVarint(tmp[:], i.estBlockUncomp) | |||
| b = append(b, tmp[:n]...) | |||
| // Put length | |||
| n = binary.PutVarint(tmp[:], int64(len(i.info))) | |||
| b = append(b, tmp[:n]...) | |||
| // Check if we should add uncompressed offsets | |||
| var hasUncompressed byte | |||
| for idx, info := range i.info { | |||
| if idx == 0 { | |||
| if info.uncompressedOffset != 0 { | |||
| hasUncompressed = 1 | |||
| break | |||
| } | |||
| continue | |||
| } | |||
| if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp { | |||
| hasUncompressed = 1 | |||
| break | |||
| } | |||
| } | |||
| b = append(b, hasUncompressed) | |||
| // Add each entry | |||
| if hasUncompressed == 1 { | |||
| for idx, info := range i.info { | |||
| uOff := info.uncompressedOffset | |||
| if idx > 0 { | |||
| prev := i.info[idx-1] | |||
| uOff -= prev.uncompressedOffset + (i.estBlockUncomp) | |||
| } | |||
| n = binary.PutVarint(tmp[:], uOff) | |||
| b = append(b, tmp[:n]...) | |||
| } | |||
| } | |||
| // Initial compressed size estimate. | |||
| cPredict := i.estBlockUncomp / 2 | |||
| for idx, info := range i.info { | |||
| cOff := info.compressedOffset | |||
| if idx > 0 { | |||
| prev := i.info[idx-1] | |||
| cOff -= prev.compressedOffset + cPredict | |||
| // Update compressed size prediction, with half the error. | |||
| cPredict += cOff / 2 | |||
| } | |||
| n = binary.PutVarint(tmp[:], cOff) | |||
| b = append(b, tmp[:n]...) | |||
| } | |||
| // Add Total Size. | |||
| // Stored as fixed size for easier reading. | |||
| binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer))) | |||
| b = append(b, tmp[:4]...) | |||
| // Trailer | |||
| b = append(b, []byte(S2IndexTrailer)...) | |||
| // Update size | |||
| chunkLen := len(b) - initSize - skippableFrameHeader | |||
| b[initSize+1] = uint8(chunkLen >> 0) | |||
| b[initSize+2] = uint8(chunkLen >> 8) | |||
| b[initSize+3] = uint8(chunkLen >> 16) | |||
| //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal) | |||
| return b | |||
| } | |||
| // Load a binary index. | |||
| // A zero value Index can be used or a previous one can be reused. | |||
| func (i *Index) Load(b []byte) ([]byte, error) { | |||
| if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) { | |||
| return b, io.ErrUnexpectedEOF | |||
| } | |||
| if b[0] != ChunkTypeIndex { | |||
| return b, ErrCorrupt | |||
| } | |||
| chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 | |||
| b = b[4:] | |||
| // Validate we have enough... | |||
| if len(b) < chunkLen { | |||
| return b, io.ErrUnexpectedEOF | |||
| } | |||
| if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { | |||
| return b, ErrUnsupported | |||
| } | |||
| b = b[len(S2IndexHeader):] | |||
| // Total Uncompressed | |||
| if v, n := binary.Varint(b); n <= 0 || v < 0 { | |||
| return b, ErrCorrupt | |||
| } else { | |||
| i.TotalUncompressed = v | |||
| b = b[n:] | |||
| } | |||
| // Total Compressed | |||
| if v, n := binary.Varint(b); n <= 0 { | |||
| return b, ErrCorrupt | |||
| } else { | |||
| i.TotalCompressed = v | |||
| b = b[n:] | |||
| } | |||
| // Read EstBlockUncomp | |||
| if v, n := binary.Varint(b); n <= 0 { | |||
| return b, ErrCorrupt | |||
| } else { | |||
| if v < 0 { | |||
| return b, ErrCorrupt | |||
| } | |||
| i.estBlockUncomp = v | |||
| b = b[n:] | |||
| } | |||
| var entries int | |||
| if v, n := binary.Varint(b); n <= 0 { | |||
| return b, ErrCorrupt | |||
| } else { | |||
| if v < 0 || v > maxIndexEntries { | |||
| return b, ErrCorrupt | |||
| } | |||
| entries = int(v) | |||
| b = b[n:] | |||
| } | |||
| if cap(i.info) < entries { | |||
| i.allocInfos(entries) | |||
| } | |||
| i.info = i.info[:entries] | |||
| if len(b) < 1 { | |||
| return b, io.ErrUnexpectedEOF | |||
| } | |||
| hasUncompressed := b[0] | |||
| b = b[1:] | |||
| if hasUncompressed&1 != hasUncompressed { | |||
| return b, ErrCorrupt | |||
| } | |||
| // Add each uncompressed entry | |||
| for idx := range i.info { | |||
| var uOff int64 | |||
| if hasUncompressed != 0 { | |||
| // Load delta | |||
| if v, n := binary.Varint(b); n <= 0 { | |||
| return b, ErrCorrupt | |||
| } else { | |||
| uOff = v | |||
| b = b[n:] | |||
| } | |||
| } | |||
| if idx > 0 { | |||
| prev := i.info[idx-1].uncompressedOffset | |||
| uOff += prev + (i.estBlockUncomp) | |||
| if uOff <= prev { | |||
| return b, ErrCorrupt | |||
| } | |||
| } | |||
| if uOff < 0 { | |||
| return b, ErrCorrupt | |||
| } | |||
| i.info[idx].uncompressedOffset = uOff | |||
| } | |||
| // Initial compressed size estimate. | |||
| cPredict := i.estBlockUncomp / 2 | |||
| // Add each compressed entry | |||
| for idx := range i.info { | |||
| var cOff int64 | |||
| if v, n := binary.Varint(b); n <= 0 { | |||
| return b, ErrCorrupt | |||
| } else { | |||
| cOff = v | |||
| b = b[n:] | |||
| } | |||
| if idx > 0 { | |||
| // Update compressed size prediction, with half the error. | |||
| cPredictNew := cPredict + cOff/2 | |||
| prev := i.info[idx-1].compressedOffset | |||
| cOff += prev + cPredict | |||
| if cOff <= prev { | |||
| return b, ErrCorrupt | |||
| } | |||
| cPredict = cPredictNew | |||
| } | |||
| if cOff < 0 { | |||
| return b, ErrCorrupt | |||
| } | |||
| i.info[idx].compressedOffset = cOff | |||
| } | |||
| if len(b) < 4+len(S2IndexTrailer) { | |||
| return b, io.ErrUnexpectedEOF | |||
| } | |||
| // Skip size... | |||
| b = b[4:] | |||
| // Check trailer... | |||
| if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) { | |||
| return b, ErrCorrupt | |||
| } | |||
| return b[len(S2IndexTrailer):], nil | |||
| } | |||
| // LoadStream will load an index from the end of the supplied stream. | |||
| // ErrUnsupported will be returned if the signature cannot be found. | |||
| // ErrCorrupt will be returned if unexpected values are found. | |||
| // io.ErrUnexpectedEOF is returned if there are too few bytes. | |||
| // IO errors are returned as-is. | |||
| func (i *Index) LoadStream(rs io.ReadSeeker) error { | |||
| // Go to end. | |||
| _, err := rs.Seek(-10, io.SeekEnd) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| var tmp [10]byte | |||
| _, err = io.ReadFull(rs, tmp[:]) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Check trailer... | |||
| if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) { | |||
| return ErrUnsupported | |||
| } | |||
| sz := binary.LittleEndian.Uint32(tmp[:4]) | |||
| if sz > maxChunkSize+skippableFrameHeader { | |||
| return ErrCorrupt | |||
| } | |||
| _, err = rs.Seek(-int64(sz), io.SeekEnd) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Read index. | |||
| buf := make([]byte, sz) | |||
| _, err = io.ReadFull(rs, buf) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = i.Load(buf) | |||
| return err | |||
| } | |||
| // IndexStream will return an index for a stream. | |||
| // The stream structure will be checked, but | |||
| // data within blocks is not verified. | |||
| // The returned index can either be appended to the end of the stream | |||
| // or stored separately. | |||
| func IndexStream(r io.Reader) ([]byte, error) { | |||
| var i Index | |||
| var buf [maxChunkSize]byte | |||
| var readHeader bool | |||
| for { | |||
| _, err := io.ReadFull(r, buf[:4]) | |||
| if err != nil { | |||
| if err == io.EOF { | |||
| return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil | |||
| } | |||
| return nil, err | |||
| } | |||
| // Start of this chunk. | |||
| startChunk := i.TotalCompressed | |||
| i.TotalCompressed += 4 | |||
| chunkType := buf[0] | |||
| if !readHeader { | |||
| if chunkType != chunkTypeStreamIdentifier { | |||
| return nil, ErrCorrupt | |||
| } | |||
| readHeader = true | |||
| } | |||
| chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16 | |||
| if chunkLen < checksumSize { | |||
| return nil, ErrCorrupt | |||
| } | |||
| i.TotalCompressed += int64(chunkLen) | |||
| _, err = io.ReadFull(r, buf[:chunkLen]) | |||
| if err != nil { | |||
| return nil, io.ErrUnexpectedEOF | |||
| } | |||
| // The chunk types are specified at | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| switch chunkType { | |||
| case chunkTypeCompressedData: | |||
| // Section 4.2. Compressed data (chunk type 0x00). | |||
| // Skip checksum. | |||
| dLen, err := DecodedLen(buf[checksumSize:]) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if dLen > maxBlockSize { | |||
| return nil, ErrCorrupt | |||
| } | |||
| if i.estBlockUncomp == 0 { | |||
| // Use first block for estimate... | |||
| i.estBlockUncomp = int64(dLen) | |||
| } | |||
| err = i.add(startChunk, i.TotalUncompressed) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| i.TotalUncompressed += int64(dLen) | |||
| continue | |||
| case chunkTypeUncompressedData: | |||
| n2 := chunkLen - checksumSize | |||
| if n2 > maxBlockSize { | |||
| return nil, ErrCorrupt | |||
| } | |||
| if i.estBlockUncomp == 0 { | |||
| // Use first block for estimate... | |||
| i.estBlockUncomp = int64(n2) | |||
| } | |||
| err = i.add(startChunk, i.TotalUncompressed) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| i.TotalUncompressed += int64(n2) | |||
| continue | |||
| case chunkTypeStreamIdentifier: | |||
| // Section 4.1. Stream identifier (chunk type 0xff). | |||
| if chunkLen != len(magicBody) { | |||
| return nil, ErrCorrupt | |||
| } | |||
| if string(buf[:len(magicBody)]) != magicBody { | |||
| if string(buf[:len(magicBody)]) != magicBodySnappy { | |||
| return nil, ErrCorrupt | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| if chunkType <= 0x7f { | |||
| // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). | |||
| return nil, ErrUnsupported | |||
| } | |||
| if chunkLen > maxChunkSize { | |||
| return nil, ErrUnsupported | |||
| } | |||
| // Section 4.4 Padding (chunk type 0xfe). | |||
| // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). | |||
| } | |||
| } | |||
| // JSON returns the index as JSON text. | |||
| func (i *Index) JSON() []byte { | |||
| x := struct { | |||
| TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown. | |||
| TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown. | |||
| Offsets []struct { | |||
| CompressedOffset int64 `json:"compressed"` | |||
| UncompressedOffset int64 `json:"uncompressed"` | |||
| } `json:"offsets"` | |||
| EstBlockUncomp int64 `json:"est_block_uncompressed"` | |||
| }{ | |||
| TotalUncompressed: i.TotalUncompressed, | |||
| TotalCompressed: i.TotalCompressed, | |||
| EstBlockUncomp: i.estBlockUncomp, | |||
| } | |||
| for _, v := range i.info { | |||
| x.Offsets = append(x.Offsets, struct { | |||
| CompressedOffset int64 `json:"compressed"` | |||
| UncompressedOffset int64 `json:"uncompressed"` | |||
| }{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset}) | |||
| } | |||
| b, _ := json.MarshalIndent(x, "", " ") | |||
| return b | |||
| } | |||
| // RemoveIndexHeaders will trim all headers and trailers from a given index. | |||
| // This is expected to save 20 bytes. | |||
| // These can be restored using RestoreIndexHeaders. | |||
| // This removes a layer of security, but is the most compact representation. | |||
| // Returns nil if headers contains errors. | |||
| // The returned slice references the provided slice. | |||
| func RemoveIndexHeaders(b []byte) []byte { | |||
| const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4 | |||
| if len(b) <= save { | |||
| return nil | |||
| } | |||
| if b[0] != ChunkTypeIndex { | |||
| return nil | |||
| } | |||
| chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 | |||
| b = b[4:] | |||
| // Validate we have enough... | |||
| if len(b) < chunkLen { | |||
| return nil | |||
| } | |||
| b = b[:chunkLen] | |||
| if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { | |||
| return nil | |||
| } | |||
| b = b[len(S2IndexHeader):] | |||
| if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) { | |||
| return nil | |||
| } | |||
| b = bytes.TrimSuffix(b, []byte(S2IndexTrailer)) | |||
| if len(b) < 4 { | |||
| return nil | |||
| } | |||
| return b[:len(b)-4] | |||
| } | |||
| // RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders. | |||
| // No error checking is performed on the input. | |||
| // If a 0 length slice is sent, it is returned without modification. | |||
| func RestoreIndexHeaders(in []byte) []byte { | |||
| if len(in) == 0 { | |||
| return in | |||
| } | |||
| b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4) | |||
| b = append(b, ChunkTypeIndex, 0, 0, 0) | |||
| b = append(b, []byte(S2IndexHeader)...) | |||
| b = append(b, in...) | |||
| var tmp [4]byte | |||
| binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer))) | |||
| b = append(b, tmp[:4]...) | |||
| // Trailer | |||
| b = append(b, []byte(S2IndexTrailer)...) | |||
| chunkLen := len(b) - skippableFrameHeader | |||
| b[1] = uint8(chunkLen >> 0) | |||
| b[2] = uint8(chunkLen >> 8) | |||
| b[3] = uint8(chunkLen >> 16) | |||
| return b | |||
| } | |||