提交 d400838e 编写于 作者: M Mislav Marohnić

Remove obsolete self-updating functionality

We might bring back the self-updater at some point, but right now it's
dead code and it's not clear to me how to implement it in a fashion
where it also updates dependent files such as shell completion scripts
and man pages.
上级 24392286
......@@ -10,7 +10,6 @@ import (
"github.com/github/hub/cmd"
"github.com/github/hub/git"
"github.com/github/hub/ui"
"github.com/github/hub/utils"
"github.com/kballard/go-shellquote"
flag "github.com/ogier/pflag"
)
......@@ -85,10 +84,6 @@ func (r *Runner) Execute() ExecError {
forceFail = true
}
updater := NewUpdater()
err := updater.PromptForUpdate()
utils.Check(err)
git.GlobalFlags = args.GlobalFlags // preserve git global flags
if !isBuiltInHubCommand(args.Command) {
expandAlias(args)
......@@ -106,7 +101,7 @@ func (r *Runner) Execute() ExecError {
gitArgs := []string{args.Command}
gitArgs = append(gitArgs, args.Params...)
err = git.Run(gitArgs...)
err := git.Run(gitArgs...)
return newExecError(err)
}
......
package commands
import (
"os"
"github.com/github/hub/utils"
)
var cmdSelfupdate = &Command{
Run: update,
Usage: "selfupdate",
Long: "Update hub to the latest version.",
}
func init() {
CmdRunner.Use(cmdSelfupdate)
}
func update(cmd *Command, args *Args) {
updater := NewUpdater()
err := updater.Update()
utils.Check(err)
os.Exit(0)
}
package commands
import (
"archive/zip"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/github/hub/git"
"github.com/github/hub/github"
"github.com/github/hub/ui"
"github.com/github/hub/utils"
"github.com/github/hub/version"
goupdate "github.com/inconshreveable/go-update"
)
const (
hubAutoUpdateConfig = "hub.autoUpdate"
)
var EnableAutoUpdate = false
func NewUpdater() *Updater {
ver := os.Getenv("HUB_VERSION")
if ver == "" {
ver = version.Version
}
timestampPath := filepath.Join(os.Getenv("HOME"), ".config", "hub-update")
return &Updater{
Host: github.DefaultGitHubHost(),
CurrentVersion: ver,
timestampPath: timestampPath,
}
}
type Updater struct {
Host string
CurrentVersion string
timestampPath string
}
func (updater *Updater) timeToUpdate() bool {
if updater.CurrentVersion == "dev" || readTime(updater.timestampPath).After(time.Now()) {
return false
}
// the next update is in about 14 days
wait := 13*24*time.Hour + randDuration(24*time.Hour)
return writeTime(updater.timestampPath, time.Now().Add(wait))
}
func (updater *Updater) PromptForUpdate() (err error) {
config := autoUpdateConfig()
if config == "never" || !updater.timeToUpdate() {
return
}
releaseName, version := updater.latestReleaseNameAndVersion()
if version != "" && version != updater.CurrentVersion {
switch config {
case "always":
err = updater.updateTo(releaseName, version)
default:
ui.Println("There is a newer version of hub available.")
ui.Printf("Would you like to update? ([Y]es/[N]o/[A]lways/N[e]ver): ")
var confirm string
fmt.Scan(&confirm)
always := utils.IsOption(confirm, "a", "always")
if always || utils.IsOption(confirm, "y", "yes") {
err = updater.updateTo(releaseName, version)
}
saveAutoUpdateConfiguration(confirm, always)
}
}
return
}
func (updater *Updater) Update() (err error) {
config := autoUpdateConfig()
if config == "never" {
ui.Println("Update is disabled")
return
}
releaseName, version := updater.latestReleaseNameAndVersion()
if version == "" {
ui.Println("There is no newer version of hub available.")
return
}
if version == updater.CurrentVersion {
ui.Printf("You're already on the latest version: %s\n", version)
} else {
err = updater.updateTo(releaseName, version)
}
return
}
func (updater *Updater) latestReleaseNameAndVersion() (name, version string) {
// Create Client with a stub Host
c := github.Client{Host: &github.Host{Host: updater.Host}}
name, _ = c.GhLatestTagName()
version = strings.TrimPrefix(name, "v")
return
}
func (updater *Updater) updateTo(releaseName, version string) (err error) {
ui.Printf("Updating gh to %s...\n", version)
downloadURL := fmt.Sprintf("https://%s/github/hub/releases/download/%s/hub%s_%s_%s.zip", updater.Host, releaseName, version, runtime.GOOS, runtime.GOARCH)
path, err := downloadFile(downloadURL)
if err != nil {
return
}
exec, err := unzipExecutable(path)
if err != nil {
return
}
err, _ = goupdate.New().FromFile(exec)
if err == nil {
ui.Println("Done!")
}
return
}
func unzipExecutable(path string) (exec string, err error) {
rc, err := zip.OpenReader(path)
if err != nil {
err = fmt.Errorf("Can't open zip file %s: %s", path, err)
return
}
defer rc.Close()
for _, file := range rc.File {
if !strings.HasPrefix(file.Name, "gh") {
continue
}
dir := filepath.Dir(path)
exec, err = unzipFile(file, dir)
break
}
if exec == "" && err == nil {
err = fmt.Errorf("No gh executable is found in %s", path)
}
return
}
func unzipFile(file *zip.File, to string) (exec string, err error) {
frc, err := file.Open()
if err != nil {
err = fmt.Errorf("Can't open zip entry %s when reading: %s", file.Name, err)
return
}
defer frc.Close()
dest := filepath.Join(to, filepath.Base(file.Name))
f, err := os.Create(dest)
if err != nil {
return
}
defer f.Close()
copied, err := io.Copy(f, frc)
if err != nil {
return
}
if uint32(copied) != file.UncompressedSize {
err = fmt.Errorf("Zip entry %s is corrupted", file.Name)
return
}
exec = f.Name()
return
}
func downloadFile(url string) (path string, err error) {
dir, err := ioutil.TempDir("", "gh-update")
if err != nil {
return
}
resp, err := http.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode >= 300 || resp.StatusCode < 200 {
err = fmt.Errorf("Can't download %s: %d", url, resp.StatusCode)
return
}
file, err := os.Create(filepath.Join(dir, filepath.Base(url)))
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, resp.Body)
if err != nil {
return
}
path = file.Name()
return
}
func randDuration(n time.Duration) time.Duration {
return time.Duration(rand.Int63n(int64(n)))
}
func readTime(path string) time.Time {
p, err := ioutil.ReadFile(path)
if os.IsNotExist(err) {
return time.Time{}
}
if err != nil {
return time.Now().Add(1000 * time.Hour)
}
t, err := time.Parse(time.RFC3339, strings.TrimSpace(string(p)))
if err != nil {
return time.Time{}
}
return t
}
func writeTime(path string, t time.Time) bool {
return ioutil.WriteFile(path, []byte(t.Format(time.RFC3339)), 0644) == nil
}
func saveAutoUpdateConfiguration(confirm string, always bool) {
if always {
git.SetGlobalConfig(hubAutoUpdateConfig, "always")
} else if utils.IsOption(confirm, "e", "never") {
git.SetGlobalConfig(hubAutoUpdateConfig, "never")
}
}
func autoUpdateConfig() (opt string) {
if EnableAutoUpdate {
opt, _ = git.GlobalConfig(hubAutoUpdateConfig)
} else {
opt = "never"
}
return
}
package commands
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"github.com/bmizerany/assert"
"github.com/github/hub/fixtures"
)
func TestUpdater_downloadFile(t *testing.T) {
mux := http.NewServeMux()
server := httptest.NewServer(mux)
defer server.Close()
mux.HandleFunc("/gh.zip", func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "GET", r.Method)
fmt.Fprint(w, "1234")
})
path, err := downloadFile(fmt.Sprintf("%s/gh.zip", server.URL))
assert.Equal(t, nil, err)
content, err := ioutil.ReadFile(path)
assert.Equal(t, nil, err)
assert.Equal(t, "1234", string(content))
assert.Equal(t, "gh.zip", filepath.Base(path))
}
func TestUpdater_unzipExecutable(t *testing.T) {
target, _ := ioutil.TempFile("", "unzip-test")
defer target.Close()
source, _ := os.Open(filepath.Join("..", "fixtures", "gh.zip"))
defer source.Close()
_, err := io.Copy(target, source)
assert.Equal(t, nil, err)
exec, err := unzipExecutable(target.Name())
assert.Equal(t, nil, err)
assert.Equal(t, "gh", filepath.Base(exec))
}
func TestUpdater_timeToUpdate(t *testing.T) {
// file doesn't exist
timestampDir, _ := ioutil.TempDir("", "timestampDir-test")
timestampPath := filepath.Join(timestampDir, "gh-update")
updater := Updater{timestampPath: timestampPath}
assert.T(t, updater.timeToUpdate())
timestamp, err := ioutil.ReadFile(timestampPath)
assert.Equal(t, nil, err)
assert.NotEqual(t, "", string(timestamp))
// invalid timestamp format
timestampFile, _ := ioutil.TempFile("", "timestampFile-test")
updater = Updater{timestampPath: timestampFile.Name()}
assert.T(t, updater.timeToUpdate())
timestamp, err = ioutil.ReadFile(timestampFile.Name())
assert.Equal(t, nil, err)
assert.NotEqual(t, "", string(timestamp))
// dev version
updater = Updater{CurrentVersion: "dev"}
assert.T(t, !updater.timeToUpdate())
}
func TestSaveAlwaysAutoUpdateOption(t *testing.T) {
checkSavedAutoUpdateOption(t, true, "a", "always")
checkSavedAutoUpdateOption(t, true, "always", "always")
}
func TestSaveNeverAutoUpdateOption(t *testing.T) {
checkSavedAutoUpdateOption(t, false, "e", "never")
checkSavedAutoUpdateOption(t, false, "never", "never")
}
func TestDoesntSaveYesAutoUpdateOption(t *testing.T) {
checkSavedAutoUpdateOption(t, false, "y", "")
checkSavedAutoUpdateOption(t, false, "yes", "")
}
func TestDoesntSaveNoAutoUpdateOption(t *testing.T) {
checkSavedAutoUpdateOption(t, false, "n", "")
checkSavedAutoUpdateOption(t, false, "no", "")
}
func checkSavedAutoUpdateOption(t *testing.T, always bool, confirm, expected string) {
EnableAutoUpdate = true
repo := fixtures.SetupTestRepo()
defer repo.TearDown()
saveAutoUpdateConfiguration(confirm, always)
assert.Equal(t, expected, autoUpdateConfig())
EnableAutoUpdate = false
}
......@@ -535,23 +535,6 @@ func (client *Client) UpdateIssue(project *Project, issueNumber int, params map[
return
}
func (client *Client) GhLatestTagName() (tagName string, err error) {
releases, err := client.FetchReleases(&Project{Owner: "jingweno", Name: "gh"})
if err != nil {
err = FormatError("getting gh releases", err)
return
}
if len(releases) == 0 {
err = fmt.Errorf("No gh release is available")
return
}
tagName = releases[0].TagName
return
}
func (client *Client) CurrentUser() (user *octokit.User, err error) {
url, err := octokit.CurrentUserURL.Expand(nil)
if err != nil {
......
Copyright 2014 Alan Shreve
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# go-update: Automatically update Go programs from the internet
go-update allows a program to update itself by replacing its executable file
with a new version. It provides the flexibility to implement different updating user experiences
like auto-updating, or manual user-initiated updates. It also boasts
advanced features like binary patching and code signing verification.
Updating your program to a new version is as easy as:
err, errRecover := update.New().FromUrl("http://release.example.com/2.0/myprogram")
if err != nil {
fmt.Printf("Update failed: %v\n", err)
}
## Documentation and API Reference
Comprehensive API documentation and code examples are available in the code documentation available on godoc.org:
[![GoDoc](https://godoc.org/github.com/inconshreveable/go-update?status.svg)](https://godoc.org/github.com/inconshreveable/go-update)
## Features
- Cross platform support (Windows too!)
- Binary patch application
- Checksum verification
- Code signing verification
- Support for updating arbitrary files
## [equinox.io](https://equinox.io)
go-update provides the primitives for building self-updating applications, but there a number of other challenges
involved in a complete updating solution such as hosting, code signing, update channels, gradual rollout,
dynamically computing binary patches, tracking update metrics like versions and failures, plus more.
I provide this service, a complete solution, free for open source projects, at [equinox.io](https://equinox.io).
## License
Apache
package check
import (
"bytes"
_ "crypto/sha512" // for tls cipher support
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"runtime"
"github.com/inconshreveable/go-update"
"github.com/kardianos/osext"
)
type Initiative string
const (
INITIATIVE_NEVER Initiative = "never"
INITIATIVE_AUTO = "auto"
INITIATIVE_MANUAL = "manual"
)
var NoUpdateAvailable error = fmt.Errorf("No update available")
type Params struct {
// protocol version
Version int `json:"version"`
// identifier of the application to update
AppId string `json:"app_id"`
// version of the application updating itself
AppVersion string `json:"app_version"`
// operating system of target platform
OS string `json:"-"`
// hardware architecture of target platform
Arch string `json:"-"`
// application-level user identifier
UserId string `json:"user_id"`
// checksum of the binary to replace (used for returning diff patches)
Checksum string `json:"checksum"`
// release channel (empty string means 'stable')
Channel string `json:"-"`
// tags for custom update channels
Tags map[string]string `json:"tags"`
}
type Result struct {
up *update.Update
// should the update be applied automatically/manually
Initiative Initiative `json:"initiative"`
// url where to download the updated application
Url string `json:"url"`
// a URL to a patch to apply
PatchUrl string `json:"patch_url"`
// the patch format (only bsdiff supported at the moment)
PatchType update.PatchType `json:"patch_type"`
// version of the new application
Version string `json:"version"`
// expected checksum of the new application
Checksum string `json:"checksum"`
// signature for verifying update authenticity
Signature string `json:"signature"`
}
// CheckForUpdate makes an HTTP post to a URL with the JSON serialized
// representation of Params. It returns the deserialized result object
// returned by the remote endpoint or an error. If you do not set
// OS/Arch, CheckForUpdate will populate them for you. Similarly, if
// Version is 0, it will be set to 1. Lastly, if Checksum is the empty
// string, it will be automatically be computed for the running program's
// executable file.
func (p *Params) CheckForUpdate(url string, up *update.Update) (*Result, error) {
if p.Tags == nil {
p.Tags = make(map[string]string)
}
if p.Channel == "" {
p.Channel = "stable"
}
if p.OS == "" {
p.OS = runtime.GOOS
}
if p.Arch == "" {
p.Arch = runtime.GOARCH
}
if p.Version == 0 {
p.Version = 1
}
// ignore errors auto-populating the checksum
// if it fails, you just won't be able to patch
if up.TargetPath == "" {
p.Checksum = defaultChecksum()
} else {
checksum, err := update.ChecksumForFile(up.TargetPath)
if err != nil {
return nil, err
}
p.Checksum = hex.EncodeToString(checksum)
}
p.Tags["os"] = p.OS
p.Tags["arch"] = p.Arch
p.Tags["channel"] = p.Channel
body, err := json.Marshal(p)
if err != nil {
return nil, err
}
resp, err := http.Post(url, "application/json", bytes.NewReader(body))
if err != nil {
return nil, err
}
// no content means no available update
if resp.StatusCode == 204 {
return nil, NoUpdateAvailable
}
defer resp.Body.Close()
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
result := &Result{up: up}
if err := json.Unmarshal(respBytes, result); err != nil {
return nil, err
}
return result, nil
}
func (p *Params) CheckAndApplyUpdate(url string, up *update.Update) (result *Result, err error, errRecover error) {
// check for an update
result, err = p.CheckForUpdate(url, up)
if err != nil {
return
}
// run the available update
err, errRecover = result.Update()
return
}
func (r *Result) Update() (err error, errRecover error) {
if r.Checksum != "" {
r.up.Checksum, err = hex.DecodeString(r.Checksum)
if err != nil {
return
}
}
if r.Signature != "" {
r.up.Signature, err = hex.DecodeString(r.Signature)
if err != nil {
return
}
}
if r.PatchType != "" {
r.up.PatchType = r.PatchType
}
if r.Url == "" && r.PatchUrl == "" {
err = fmt.Errorf("Result does not contain an update url or patch update url")
return
}
if r.PatchUrl != "" {
err, errRecover = r.up.FromUrl(r.PatchUrl)
if err == nil {
// success!
return
} else {
// failed to update from patch URL, try with the whole thing
if r.Url == "" || errRecover != nil {
// we can't try updating from a URL with the full contents
// in these cases, so fail
return
} else {
r.up.PatchType = update.PATCHTYPE_NONE
}
}
}
// try updating from a URL with the full contents
return r.up.FromUrl(r.Url)
}
func defaultChecksum() string {
path, err := osext.Executable()
if err != nil {
return ""
}
checksum, err := update.ChecksumForFile(path)
if err != nil {
return ""
}
return hex.EncodeToString(checksum)
}
package download
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"net/http"
"os"
"runtime"
)
type roundTripper struct {
RoundTripFn func(*http.Request) (*http.Response, error)
}
func (rt *roundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
return rt.RoundTripFn(r)
}
// Download encapsulates the state and parameters to download content
// from a URL which:
//
// - Publishes the percentage of the download completed to a channel.
// - May resume a previous download that was partially completed.
//
// Create an instance with the New() factory function.
type Download struct {
// net/http.Client to use when downloading the update.
// If nil, a default http.Client is used
HttpClient *http.Client
// As bytes are downloaded, they are written to Target.
// Download also uses the Target's Seek method to determine
// the size of partial-downloads so that it may properly
// request the remaining bytes to resume the download.
Target Target
// Progress returns the percentage of the download
// completed as an integer between 0 and 100
Progress chan (int)
// HTTP Method to use in the download request. Default is "GET"
Method string
// HTTP URL to issue the download request to
Url string
}
// New initializes a new Download object which will download
// the content from url into target.
func New(url string, target Target) *Download {
return &Download{
HttpClient: new(http.Client),
Progress: make(chan int),
Method: "GET",
Url: url,
Target: target,
}
}
// Get() downloads the content of a url to a target destination.
//
// Only HTTP/1.1 servers that implement the Range header support resuming a
// partially completed download.
//
// On success, the server must return 200 and the content, or 206 when resuming a partial download.
// If the HTTP server returns a 3XX redirect, it will be followed according to d.HttpClient's redirect policy.
//
func (d *Download) Get() (err error) {
// Close the progress channel whenever this function completes
defer close(d.Progress)
// determine the size of the download target to determine if we're resuming a partial download
offset, err := d.Target.Size()
if err != nil {
return
}
// create the download request
req, err := http.NewRequest(d.Method, d.Url, nil)
if err != nil {
return
}
// we have to add headers like this so they get used across redirects
trans := d.HttpClient.Transport
if trans == nil {
trans = http.DefaultTransport
}
d.HttpClient.Transport = &roundTripper{
RoundTripFn: func(r *http.Request) (*http.Response, error) {
// add header for download continuation
if offset > 0 {
r.Header.Add("Range", fmt.Sprintf("%d-", offset))
}
// ask for gzipped content so that net/http won't unzip it for us
// and destroy the content length header we need for progress calculations
r.Header.Add("Accept-Encoding", "gzip")
return trans.RoundTrip(r)
},
}
// issue the download request
resp, err := d.HttpClient.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
switch resp.StatusCode {
// ok
case 200, 206:
// server error
default:
err = fmt.Errorf("Non 2XX response when downloading update: %s", resp.Status)
return
}
// Determine how much we have to download
// net/http sets this to -1 when it is unknown
clength := resp.ContentLength
// Read the content from the response body
rd := resp.Body
// meter the rate at which we download content for
// progress reporting if we know how much to expect
if clength > 0 {
rd = &meteredReader{rd: rd, totalSize: clength, progress: d.Progress}
}
// Decompress the content if necessary
if resp.Header.Get("Content-Encoding") == "gzip" {
rd, err = gzip.NewReader(rd)
if err != nil {
return
}
}
// Download the update
_, err = io.Copy(d.Target, rd)
if err != nil {
return
}
return
}
// meteredReader wraps a ReadCloser. Calls to a meteredReader's Read() method
// publish updates to a progress channel with the percentage read so far.
type meteredReader struct {
rd io.ReadCloser
totalSize int64
progress chan int
totalRead int64
ticks int64
}
func (m *meteredReader) Close() error {
return m.rd.Close()
}
func (m *meteredReader) Read(b []byte) (n int, err error) {
chunkSize := (m.totalSize / 100) + 1
lenB := int64(len(b))
var nChunk int
for start := int64(0); start < lenB; start += int64(nChunk) {
end := start + chunkSize
if end > lenB {
end = lenB
}
nChunk, err = m.rd.Read(b[start:end])
n += nChunk
m.totalRead += int64(nChunk)
if m.totalRead > (m.ticks * chunkSize) {
m.ticks += 1
// try to send on channel, but don't block if it's full
select {
case m.progress <- int(m.ticks + 1):
default:
}
// give the progress channel consumer a chance to run
runtime.Gosched()
}
if err != nil {
return
}
}
return
}
// A Target is what you can supply to Download,
// it's just an io.Writer with a Size() method so that
// the a Download can "resume" an interrupted download
type Target interface {
io.Writer
Size() (int, error)
}
type FileTarget struct {
*os.File
}
func (t *FileTarget) Size() (int, error) {
if fi, err := t.File.Stat(); err != nil {
return 0, err
} else {
return int(fi.Size()), nil
}
}
type MemoryTarget struct {
bytes.Buffer
}
func (t *MemoryTarget) Size() (int, error) {
return t.Buffer.Len(), nil
}
// +build !windows
package update
func hideFile(path string) error {
return nil
}
package update
import (
"syscall"
"unsafe"
)
func hideFile(path string) error {
kernel32 := syscall.NewLazyDLL("kernel32.dll")
setFileAttributes := kernel32.NewProc("SetFileAttributesW")
r1, _, err := setFileAttributes.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), 2)
if r1 == 0 {
return err
} else {
return nil
}
}
/*
go-update allows a program to update itself by replacing its executable file
with a new version. It provides the flexibility to implement different updating user experiences
like auto-updating, or manual user-initiated updates. It also boasts
advanced features like binary patching and code signing verification.
Updating your program to a new version is as easy as:
err, errRecover := update.New().FromUrl("http://release.example.com/2.0/myprogram")
if err != nil {
fmt.Printf("Update failed: %v\n", err)
}
You may also choose to update from other data sources such as a file or an io.Reader:
err, errRecover := update.New().FromFile("/path/to/update")
Binary Diff Patching
Binary diff updates are supported and easy to use:
up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF)
err, errRecover := up.FromUrl("http://release.example.com/2.0/mypatch")
Checksum Verification
You should also verify the checksum of new updates as well as verify
the digital signature of an update. Note that even when you choose to apply
a patch, the checksum is verified against the complete update after that patch
has been applied.
up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
err, errRecover := up.FromUrl("http://release.example.com/2.0/mypatch")
Updating other files
Updating arbitrary files is also supported. You may update files which are
not the currently running program:
up := update.New().Target("/usr/local/bin/some-program")
err, errRecover := up.FromUrl("http://release.example.com/2.0/some-program")
Code Signing
Truly secure updates use code signing to verify that the update was issued by a trusted party.
To do this, you'll need to generate a public/private key pair. You can do this with openssl,
or the equinox.io client (https://equinox.io/client) can easily generate one for you:
# with equinox client
equinox genkey --private-key=private.pem --public-key=public.pem
# with openssl
openssl genrsa -out private.pem 2048
openssl rsa -in private.pem -out public.pem -pubout
Once you have your key pair, you can instruct your program to validate its updates
with the public key:
const publicKey = `-----BEGIN PUBLIC KEY-----
...
-----END PUBLIC KEY-----`
up, err := update.New().VerifySignatureWithPEM(publicKey)
if err != nil {
return fmt.Errorf("Bad public key: '%v': %v", publicKey, err)
}
Once you've configured your program this way, it will disallow all updates unless they
are properly signed. You must now pass in the signature to verify with:
up.VerifySignature(signature).FromUrl("http://dl.example.com/update")
Error Handling and Recovery
To perform an update, the process must be able to read its executable file and to write
to the directory that contains its executable file. It can be useful to check whether the process
has the necessary permissions to perform an update before trying to apply one. Use the
CanUpdate call to provide a useful message to the user if the update can't proceed without
elevated permissions:
up := update.New().Target("/etc/hosts")
err := up.CanUpdate()
if err != nil {
fmt.Printf("Can't update because: '%v'. Try as root or Administrator\n", err)
return
}
err, errRecover := up.FromUrl("https://example.com/new/hosts")
Although exceedingly unlikely, the update operation itself is not atomic and can fail
in such a way that a user's computer is left in an inconsistent state. If that happens,
go-update attempts to recover to leave the system in a good state. If the recovery step
fails (even more unlikely), a second error, referred to as "errRecover" will be non-nil
so that you may inform your users of the bad news. You should handle this case as shown
here:
err, errRecover := up.FromUrl("https://example.com/update")
if err != nil {
fmt.Printf("Update failed: %v\n", err)
if errRecover != nil {
fmt.Printf("Failed to recover bad update: %v!\n", errRecover)
fmt.Printf("Program exectuable may be missing!\n")
}
}
Subpackages
Sub-package check contains the client functionality for a simple protocol for negotiating
whether a new update is available, where it is, and the metadata needed for verifying it.
Sub-package download contains functionality for downloading from an HTTP endpoint
while outputting a progress meter and supports resuming partial downloads.
*/
package update
import (
"bytes"
"crypto"
"crypto/rsa"
"crypto/sha256"
_ "crypto/sha512" // for tls cipher support
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/inconshreveable/go-update/download"
"github.com/kardianos/osext"
"github.com/kr/binarydist"
)
// The type of a binary patch, if any. Only bsdiff is supported
type PatchType string
const (
PATCHTYPE_BSDIFF PatchType = "bsdiff"
PATCHTYPE_NONE = ""
)
type Update struct {
// empty string means "path of the current executable"
TargetPath string
// type of patch to apply. PATCHTYPE_NONE means "not a patch"
PatchType
// sha256 checksum of the new binary to verify against
Checksum []byte
// public key to use for signature verification
PublicKey *rsa.PublicKey
// signature to use for signature verification
Signature []byte
}
func (u *Update) getPath() (string, error) {
if u.TargetPath == "" {
return osext.Executable()
} else {
return u.TargetPath, nil
}
}
// New creates a new Update object.
// A default update object assumes the complete binary
// content will be used for update (not a patch) and that
// the intended target is the running executable.
//
// Use this as the start of a chain of calls on the Update
// object to build up your configuration. Example:
//
// up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
//
func New() *Update {
return &Update{
TargetPath: "",
PatchType: PATCHTYPE_NONE,
}
}
// Target configures the update to update the file at the given path.
// The emptry string means 'the executable file of the running program'.
func (u *Update) Target(path string) *Update {
u.TargetPath = path
return u
}
// ApplyPatch configures the update to treat the contents of the update
// as a patch to apply to the existing to target. You must specify the
// format of the patch. Only PATCHTYPE_BSDIFF is supported at the moment.
func (u *Update) ApplyPatch(patchType PatchType) *Update {
u.PatchType = patchType
return u
}
// VerifyChecksum configures the update to verify that the
// the update has the given sha256 checksum.
func (u *Update) VerifyChecksum(checksum []byte) *Update {
u.Checksum = checksum
return u
}
// VerifySignature configures the update to verify the given
// signature of the update. You must also call one of the
// VerifySignatureWith* functions to specify a public key
// to use for verification.
func (u *Update) VerifySignature(signature []byte) *Update {
u.Signature = signature
return u
}
// VerifySignatureWith configures the update to use the given RSA
// public key to verify the update's signature. You must also call
// VerifySignature() with a signature to check.
//
// You'll probably want to use VerifySignatureWithPEM instead of
// parsing the public key yourself.
func (u *Update) VerifySignatureWith(publicKey *rsa.PublicKey) *Update {
u.PublicKey = publicKey
return u
}
// VerifySignatureWithPEM configures the update to use the given PEM-formatted
// RSA public key to verify the update's signature. You must also call
// VerifySignature() with a signature to check.
//
// A PEM formatted public key typically begins with
// -----BEGIN PUBLIC KEY-----
func (u *Update) VerifySignatureWithPEM(publicKeyPEM []byte) (*Update, error) {
block, _ := pem.Decode(publicKeyPEM)
if block == nil {
return u, fmt.Errorf("Couldn't parse PEM data")
}
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return u, err
}
var ok bool
u.PublicKey, ok = pub.(*rsa.PublicKey)
if !ok {
return u, fmt.Errorf("Public key isn't an RSA public key")
}
return u, nil
}
// FromUrl updates the target with the contents of the given URL.
func (u *Update) FromUrl(url string) (err error, errRecover error) {
target := new(download.MemoryTarget)
err = download.New(url, target).Get()
if err != nil {
return
}
return u.FromStream(target)
}
// FromFile updates the target the contents of the given file.
func (u *Update) FromFile(path string) (err error, errRecover error) {
// open the new updated contents
fp, err := os.Open(path)
if err != nil {
return
}
defer fp.Close()
// do the update
return u.FromStream(fp)
}
// FromStream updates the target file with the contents of the supplied io.Reader.
//
// FromStream performs the following actions to ensure a safe cross-platform update:
//
// 1. If configured, applies the contents of the io.Reader as a binary patch.
//
// 2. If configured, computes the sha256 checksum and verifies it matches.
//
// 3. If configured, verifies the RSA signature with a public key.
//
// 4. Creates a new file, /path/to/.target.new with mode 0755 with the contents of the updated file
//
// 5. Renames /path/to/target to /path/to/.target.old
//
// 6. Renames /path/to/.target.new to /path/to/target
//
// 7. If the rename is successful, deletes /path/to/.target.old, returns no error
//
// 8. If the rename fails, attempts to rename /path/to/.target.old back to /path/to/target
// If this operation fails, it is reported in the errRecover return value so as not to
// mask the original error that caused the recovery attempt.
//
// On Windows, the removal of /path/to/.target.old always fails, so instead,
// we just make the old file hidden instead.
func (u *Update) FromStream(updateWith io.Reader) (err error, errRecover error) {
updatePath, err := u.getPath()
if err != nil {
return
}
var newBytes []byte
// apply a patch if requested
switch u.PatchType {
case PATCHTYPE_BSDIFF:
newBytes, err = applyPatch(updateWith, updatePath)
if err != nil {
return
}
case PATCHTYPE_NONE:
// no patch to apply, go on through
newBytes, err = ioutil.ReadAll(updateWith)
if err != nil {
return
}
default:
err = fmt.Errorf("Unrecognized patch type: %s", u.PatchType)
return
}
// verify checksum if requested
if u.Checksum != nil {
if err = verifyChecksum(newBytes, u.Checksum); err != nil {
return
}
}
// verify signature if requested
if u.Signature != nil || u.PublicKey != nil {
if u.Signature == nil {
err = fmt.Errorf("No public key specified to verify signature")
return
}
if u.PublicKey == nil {
err = fmt.Errorf("No signature to verify!")
return
}
if err = verifySignature(newBytes, u.Signature, u.PublicKey); err != nil {
return
}
}
// get the directory the executable exists in
updateDir := filepath.Dir(updatePath)
filename := filepath.Base(updatePath)
// Copy the contents of of newbinary to a the new executable file
newPath := filepath.Join(updateDir, fmt.Sprintf(".%s.new", filename))
fp, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
if err != nil {
return
}
defer fp.Close()
_, err = io.Copy(fp, bytes.NewReader(newBytes))
// if we don't call fp.Close(), windows won't let us move the new executable
// because the file will still be "in use"
fp.Close()
// this is where we'll move the executable to so that we can swap in the updated replacement
oldPath := filepath.Join(updateDir, fmt.Sprintf(".%s.old", filename))
// delete any existing old exec file - this is necessary on Windows for two reasons:
// 1. after a successful update, Windows can't remove the .old file because the process is still running
// 2. windows rename operations fail if the destination file already exists
_ = os.Remove(oldPath)
// move the existing executable to a new file in the same directory
err = os.Rename(updatePath, oldPath)
if err != nil {
return
}
// move the new exectuable in to become the new program
err = os.Rename(newPath, updatePath)
if err != nil {
// copy unsuccessful
errRecover = os.Rename(oldPath, updatePath)
} else {
// copy successful, remove the old binary
errRemove := os.Remove(oldPath)
// windows has trouble with removing old binaries, so hide it instead
if errRemove != nil {
_ = hideFile(oldPath)
}
}
return
}
// CanUpdate() determines whether the process has the correct permissions to
// perform the requested update. If the update can proceed, it returns nil, otherwise
// it returns the error that would occur if an update were attempted.
func (u *Update) CanUpdate() (err error) {
// get the directory the file exists in
path, err := u.getPath()
if err != nil {
return
}
fileDir := filepath.Dir(path)
fileName := filepath.Base(path)
// attempt to open a file in the file's directory
newPath := filepath.Join(fileDir, fmt.Sprintf(".%s.new", fileName))
fp, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
if err != nil {
return
}
fp.Close()
_ = os.Remove(newPath)
return
}
func applyPatch(patch io.Reader, updatePath string) ([]byte, error) {
// open the file to update
old, err := os.Open(updatePath)
if err != nil {
return nil, err
}
defer old.Close()
// apply the patch
applied := new(bytes.Buffer)
if err = binarydist.Patch(old, applied, patch); err != nil {
return nil, err
}
return applied.Bytes(), nil
}
func verifyChecksum(updated []byte, expectedChecksum []byte) error {
checksum, err := ChecksumForBytes(updated)
if err != nil {
return err
}
if !bytes.Equal(expectedChecksum, checksum) {
return fmt.Errorf("Updated file has wrong checksum. Expected: %x, got: %x", expectedChecksum, checksum)
}
return nil
}
// ChecksumForFile returns the sha256 checksum for the given file
func ChecksumForFile(path string) ([]byte, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return ChecksumForReader(f)
}
// ChecksumForReader returns the sha256 checksum for the entire
// contents of the given reader.
func ChecksumForReader(rd io.Reader) ([]byte, error) {
h := sha256.New()
if _, err := io.Copy(h, rd); err != nil {
return nil, err
}
return h.Sum(nil), nil
}
// ChecksumForBytes returns the sha256 checksum for the given bytes
func ChecksumForBytes(source []byte) ([]byte, error) {
return ChecksumForReader(bytes.NewReader(source))
}
func verifySignature(source, signature []byte, publicKey *rsa.PublicKey) error {
checksum, err := ChecksumForBytes(source)
if err != nil {
return err
}
return rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, checksum, signature)
}
package update
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"github.com/kr/binarydist"
"io/ioutil"
"net"
"net/http"
"os"
"testing"
)
var (
oldFile = []byte{0xDE, 0xAD, 0xBE, 0xEF}
newFile = []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
)
func cleanup(path string) {
os.Remove(path)
}
// we write with a separate name for each test so that we can run them in parallel
func writeOldFile(path string, t *testing.T) {
if err := ioutil.WriteFile(path, oldFile, 0777); err != nil {
t.Fatalf("Failed to write file for testing preparation: %v", err)
}
}
func validateUpdate(path string, err error, t *testing.T) {
if err != nil {
t.Fatalf("Failed to update: %v", err)
}
buf, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("Failed to read file post-update: %v", err)
}
if !bytes.Equal(buf, newFile) {
t.Fatalf("File was not updated! Bytes read: %v, Bytes expected: %v", buf, newFile)
}
}
func TestFromStream(t *testing.T) {
t.Parallel()
fName := "TestFromStream"
defer cleanup(fName)
writeOldFile(fName, t)
err, _ := New().Target(fName).FromStream(bytes.NewReader(newFile))
validateUpdate(fName, err, t)
}
func TestFromFile(t *testing.T) {
t.Parallel()
fName := "TestFromFile"
newFName := "NewTestFromFile"
defer cleanup(fName)
defer cleanup(newFName)
writeOldFile(fName, t)
if err := ioutil.WriteFile(newFName, newFile, 0777); err != nil {
t.Fatalf("Failed to write file to update from: %v", err)
}
err, _ := New().Target(fName).FromFile(newFName)
validateUpdate(fName, err, t)
}
func TestFromUrl(t *testing.T) {
t.Parallel()
fName := "TestFromUrl"
defer cleanup(fName)
writeOldFile(fName, t)
l, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatalf("Couldn't bind listener: %v", err)
}
addr := l.Addr().String()
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write(newFile)
}))
err, _ = New().Target(fName).FromUrl("http://" + addr)
validateUpdate(fName, err, t)
}
func TestVerifyChecksum(t *testing.T) {
t.Parallel()
fName := "TestVerifyChecksum"
defer cleanup(fName)
writeOldFile(fName, t)
checksum, err := ChecksumForBytes(newFile)
if err != nil {
t.Fatalf("Failed to compute checksum: %v", err)
}
err, _ = New().Target(fName).VerifyChecksum(checksum).FromStream(bytes.NewReader(newFile))
validateUpdate(fName, err, t)
}
func TestVerifyChecksumNegative(t *testing.T) {
t.Parallel()
fName := "TestVerifyChecksumNegative"
defer cleanup(fName)
writeOldFile(fName, t)
badChecksum := []byte{0x0A, 0x0B, 0x0C, 0xFF}
err, _ := New().Target(fName).VerifyChecksum(badChecksum).FromStream(bytes.NewReader(newFile))
if err == nil {
t.Fatalf("Failed to detect bad checksum!")
}
}
func TestApplyPatch(t *testing.T) {
t.Parallel()
fName := "TestApplyPatch"
defer cleanup(fName)
writeOldFile(fName, t)
patch := new(bytes.Buffer)
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(newFile), patch)
if err != nil {
t.Fatalf("Failed to create patch: %v", err)
}
up := New().Target(fName).ApplyPatch(PATCHTYPE_BSDIFF)
err, _ = up.FromStream(bytes.NewReader(patch.Bytes()))
validateUpdate(fName, err, t)
}
func TestCorruptPatch(t *testing.T) {
t.Parallel()
fName := "TestCorruptPatch"
defer cleanup(fName)
writeOldFile(fName, t)
badPatch := []byte{0x44, 0x38, 0x86, 0x3c, 0x4f, 0x8d, 0x26, 0x54, 0xb, 0x11, 0xce, 0xfe, 0xc1, 0xc0, 0xf8, 0x31, 0x38, 0xa0, 0x12, 0x1a, 0xa2, 0x57, 0x2a, 0xe1, 0x3a, 0x48, 0x62, 0x40, 0x2b, 0x81, 0x12, 0xb1, 0x21, 0xa5, 0x16, 0xed, 0x73, 0xd6, 0x54, 0x84, 0x29, 0xa6, 0xd6, 0xb2, 0x1b, 0xfb, 0xe6, 0xbe, 0x7b, 0x70}
up := New().Target(fName).ApplyPatch(PATCHTYPE_BSDIFF)
err, _ := up.FromStream(bytes.NewReader(badPatch))
if err == nil {
t.Fatalf("Failed to detect corrupt patch!")
}
}
func TestVerifyChecksumPatchNegative(t *testing.T) {
t.Parallel()
fName := "TestVerifyChecksumPatchNegative"
defer cleanup(fName)
writeOldFile(fName, t)
checksum, err := ChecksumForBytes(newFile)
if err != nil {
t.Fatalf("Failed to compute checksum: %v", err)
}
patch := new(bytes.Buffer)
anotherFile := []byte{0x77, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66}
err = binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(anotherFile), patch)
if err != nil {
t.Fatalf("Failed to create patch: %v", err)
}
up := New().Target(fName).ApplyPatch(PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
err, _ = up.FromStream(bytes.NewReader(patch.Bytes()))
if err == nil {
t.Fatalf("Failed to detect patch to wrong file!")
}
}
const publicKey = `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxSWmu7trWKAwDFjiCN2D
Tk2jj2sgcr/CMlI4cSSiIOHrXCFxP1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKab
b9ead+kD0kxk7i2bFYvKX43oq66IW0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4
y20C59dPr9Dpcz8DZkdLsBV6YKF6Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjT
x4xRnjgTRRRlZvRtALHMUkIChgxDOhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv5
5fhJ08Rz7mmZmtH5JxTK5XTquo59sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7Nrf
fQIDAQAB
-----END PUBLIC KEY-----`
const privateKey = `-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAxSWmu7trWKAwDFjiCN2DTk2jj2sgcr/CMlI4cSSiIOHrXCFx
P1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKabb9ead+kD0kxk7i2bFYvKX43oq66I
W0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4y20C59dPr9Dpcz8DZkdLsBV6YKF6
Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjTx4xRnjgTRRRlZvRtALHMUkIChgxD
OhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv55fhJ08Rz7mmZmtH5JxTK5XTquo59
sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7NrffQIDAQABAoIBAAkN+6RvrTR61voa
Mvd5RQiZpEN4Bht/Fyo8gH8h0Zh1B9xJZOwlmMZLS5fdtHlfLEhR8qSrGDBL61vq
I8KkhEsUufF78EL+YzxVN+Q7cWYGHIOWFokqza7hzpSxUQO6lPOMQ1eIZaNueJTB
Zu07/47ISPPg/bXzgGVcpYlTCPTjUwKjtfyMqvX9AD7fIyYRm6zfE7EHj1J2sBFt
Yz1OGELg6HfJwXfpnPfBvftD0hWGzJ78Bp71fPJe6n5gnqmSqRvrcXNWFnH/yqkN
d6vPIxD6Z3LjvyZpkA7JillLva2L/zcIFhg4HZvQnWd8/PpDnUDonu36hcj4SC5j
W4aVPLkCgYEA4XzNKWxqYcajzFGZeSxlRHupSAl2MT7Cc5085MmE7dd31wK2T8O4
n7N4bkm/rjTbX85NsfWdKtWb6mpp8W3VlLP0rp4a/12OicVOkg4pv9LZDmY0sRlE
YuDJk1FeCZ50UrwTZI3rZ9IhZHhkgVA6uWAs7tYndONkxNHG0pjqs4sCgYEA39MZ
JwMqo3qsPntpgP940cCLflEsjS9hYNO3+Sv8Dq3P0HLVhBYajJnotf8VuU0fsQZG
grmtVn1yThFbMq7X1oY4F0XBA+paSiU18c4YyUnwax2u4sw9U/Q9tmQUZad5+ueT
qriMBwGv+ewO+nQxqvAsMUmemrVzrfwA5Oct+hcCgYAfiyXoNZJsOy2O15twqBVC
j0oPGcO+/9iT89sg5lACNbI+EdMPNYIOVTzzsL1v0VUfAe08h++Enn1BPcG0VHkc
ZFBGXTfJoXzfKQrkw7ZzbzuOGB4m6DH44xlP0oIlNlVvfX/5ASF9VJf3RiBJNsAA
TsP6ZVr/rw/ZuL7nlxy+IQKBgDhL/HOXlE3yOQiuOec8WsNHTs7C1BXe6PtVxVxi
988pYK/pclL6zEq5G5NLSceF4obAMVQIJ9UtUGbabrncyGUo9UrFPLsjYvprSZo8
YHegpVwL50UcYgCP2kXZ/ldjPIcjYDz8lhvdDMor2cidGTEJn9P11HLNWP9V91Ob
4jCZAoGAPNRSC5cC8iP/9j+s2/kdkfWJiNaolPYAUrmrkL6H39PYYZM5tnhaIYJV
Oh9AgABamU0eb3p3vXTISClVgV7ifq1HyZ7BSUhMfaY2Jk/s3sUHCWFxPZe9sgEG
KinIY/373KIkIV/5g4h2v1w330IWcfptxKcY/Er3DJr38f695GE=
-----END RSA PRIVATE KEY-----`
func sign(privatePEM string, source []byte, t *testing.T) []byte {
block, _ := pem.Decode([]byte(privatePEM))
if block == nil {
t.Fatalf("Failed to parse private key PEM")
}
priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
t.Fatalf("Failed to parse private key DER")
}
checksum, err := ChecksumForBytes(source)
if err != nil {
t.Fatalf("Failed to make checksum")
}
sig, err := rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA256, checksum)
if err != nil {
t.Fatalf("Failed to sign: %v", sig)
}
return sig
}
func TestVerifySignature(t *testing.T) {
t.Parallel()
fName := "TestVerifySignature"
defer cleanup(fName)
writeOldFile(fName, t)
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
signature := sign(privateKey, newFile, t)
err, _ = up.VerifySignature(signature).FromStream(bytes.NewReader(newFile))
validateUpdate(fName, err, t)
}
func TestVerifyFailBadSignature(t *testing.T) {
t.Parallel()
fName := "TestVerifyFailBadSignature"
defer cleanup(fName)
writeOldFile(fName, t)
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
badSig := []byte{0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA}
err, _ = up.VerifySignature(badSig).FromStream(bytes.NewReader(newFile))
if err == nil {
t.Fatalf("Did not fail with bad signature")
}
}
func TestVerifyFailNoSignature(t *testing.T) {
t.Parallel()
fName := "TestVerifySignatureWithPEM"
defer cleanup(fName)
writeOldFile(fName, t)
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
err, _ = up.VerifySignature([]byte{}).FromStream(bytes.NewReader(newFile))
if err == nil {
t.Fatalf("Did not fail with empty signature")
}
}
const wrongKey = `-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEArKqjT+xOFJILe0CX7lKfQy52YwWLF9devYtLeUHTbPOueGLy
6CjrXJBrWIxNBxRd53y4dtgiMqCX6Gmmvuy8HnfbBuJjR2mcdEYo8UDy+aSVBQ6T
/ND7Fd7KSzOruEFFzl2QFnZ/SrW/nsXdGyuF8l+YIwjZJRyV6StZkZ4ydOzOqUk9
FXTeIkhX/Q7/jTETw7L3wxMyLgJAlV3lxDsPkMjxymngtbAIjwEjLsVeU+Prcz2e
Ww34SZQ8qwzAdXieuDPryMwEsCcgc5NAKJFNL8TppYGDXOHI7CRXqHfNiJq2R+kQ
LdxRvmfx8/iu4xM2hBzk4uSDS6TTn2AnWBm+cQIDAQABAoIBAFp//aUwaCRj/9yU
GI3zhEJEIgz4pNTUL3YNgnuFwvlCJ9o1kreYavRTRdBdiSoCxM1GE7FGy3XZsoVA
iwNbNaaKj6RmGD8f3b8b3u3EaxXp66mA4JQMPO5TnZgY9xJWM+5cH9+GMGXKKStg
7ekFwOkuraD/TEElYHWcIRAv6KZbc/YOIa6YDKi+1Gc7u0MeIvwqN7nwaBAoJKUE
ZrJIfYKIViD/ZrCpgWN47C9x8w3ne7iiDrYoYct+0reC9LFlqwVBtDnyVx/q3upW
zzczbNQagu3w0QgprDGhy0ZhDNxuylV3XBWTB+xBrFQgz6rD3LzUPywlbt0N7ZmD
936MVSECgYEA1IElCahF/+hC/OxFgy98DubAUDGmrvxWeZF3bvTseWZQp/gzxVS+
SYumYyd2Ysx5+UjXQlVgR6BbDG13+DpSpZm6+MeWHBAR+KA2qCg009SDFv7l26/d
xMT7lvIWz7ckQDb/+jvhF9HL2llyTN1Zex+n3XBeAMKNrPaubdEBFsUCgYEA0AIO
tZMtzOpioAR1lGbwIguq04msDdrJNaY2TKrLeviJuQUw94fgL+3ULAPsiyxaU/Gv
vln11R7aIp1SJ09T2UoFRbty+6SGRC56+Wh0pn5VnAi7aT6qdkYWhEjhqRHuXosf
PYboXBuMwA0FBUTxWQL/lux2PZgvBkniYh5jI70CgYEAk9KmhhpFX2gdOT3OeRxO
CzufaemwDqfAK97yGwBLg4OV9dJliQ6TNCvt+amY489jxfJSs3UafZjh3TpFKyq/
FS1kb+y+0hSnu7EPdFhLr1N0QUndcb3b4iY48V7EWYgHspfP5y1CPsSVLvXr2eZc
eZaiuhqReavczAXpfsDWJhUCgYEAwmUp2gfyhc+G3IVOXaLWSPseaxP+9/PAl6L+
nCgCgqpEC+YOHUee/SwHXhtMtcR9pnX5CKyKUuLCehcM8C/y7N+AjerhSsw3rwDB
bNVyLydiWrDOdU1bga1+3aI/QwK/AxyB1b5+6ZXVtKZ2SrZj2Aw1UZcr6eSQDhB+
wbQkcwECgYBF13FMA6OOon992t9H3I+4KDgmz6G6mz3bVXSoFWfO1p/yXP04BzJl
jtLFvFVTZdMs2o/wTd4SL6gYjx9mlOWwM8FblmjfiNSUVIyye33fRntEAr1n+FYI
Xhv6aVnNdaGehGIqQxXFoGyiJxG3RYNkSwaTOamxY1V+ceLuO26n2Q==
-----END RSA PRIVATE KEY-----`
func TestVerifyFailWrongSignature(t *testing.T) {
t.Parallel()
fName := "TestVerifyFailWrongSignature"
defer cleanup(fName)
writeOldFile(fName, t)
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
signature := sign(wrongKey, newFile, t)
err, _ = up.VerifySignature(signature).FromStream(bytes.NewReader(newFile))
if err == nil {
t.Fatalf("Verified an update that was signed by an untrusted key!")
}
}
func TestSignatureButNoPublicKey(t *testing.T) {
t.Parallel()
fName := "TestSignatureButNoPublicKey"
defer cleanup(fName)
writeOldFile(fName, t)
sig := sign(privateKey, newFile, t)
err, _ := New().Target(fName).VerifySignature(sig).FromStream(bytes.NewReader(newFile))
if err == nil {
t.Fatalf("Allowed an update with a signautre verification when no public key was specified!")
}
}
func TestPublicKeyButNoSignature(t *testing.T) {
t.Parallel()
fName := "TestPublicKeyButNoSignature"
defer cleanup(fName)
writeOldFile(fName, t)
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
err, _ = up.FromStream(bytes.NewReader(newFile))
if err == nil {
t.Fatalf("Allowed an update with no signautre when a public key was specified!")
}
}
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
### Extensions to the "os" package.
## Find the current Executable and ExecutableFolder.
There is sometimes utility in finding the current executable file
that is running. This can be used for upgrading the current executable
or finding resources located relative to the executable file. Both
working directory and the os.Args[0] value are arbitrary and cannot
be relied on; os.Args[0] can be "faked".
Multi-platform and supports:
* Linux
* OS X
* Windows
* Plan 9
* BSDs.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extensions to the standard "os" package.
package osext
import "path/filepath"
// Executable returns an absolute path that can be used to
// re-invoke the current program.
// It may not be valid after the current program exits.
func Executable() (string, error) {
p, err := executable()
return filepath.Clean(p), err
}
// Returns same path as Executable, returns just the folder
// path. Excludes the executable name and any trailing slash.
func ExecutableFolder() (string, error) {
p, err := Executable()
if err != nil {
return "", err
}
return filepath.Dir(p), nil
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package osext
import (
"os"
"strconv"
"syscall"
)
func executable() (string, error) {
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
if err != nil {
return "", err
}
defer f.Close()
return syscall.Fd2path(int(f.Fd()))
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux netbsd openbsd solaris dragonfly
package osext
import (
"errors"
"fmt"
"os"
"runtime"
"strings"
)
func executable() (string, error) {
switch runtime.GOOS {
case "linux":
const deletedTag = " (deleted)"
execpath, err := os.Readlink("/proc/self/exe")
if err != nil {
return execpath, err
}
execpath = strings.TrimSuffix(execpath, deletedTag)
execpath = strings.TrimPrefix(execpath, deletedTag)
return execpath, nil
case "netbsd":
return os.Readlink("/proc/curproc/exe")
case "openbsd", "dragonfly":
return os.Readlink("/proc/curproc/file")
case "solaris":
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
}
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd
package osext
import (
"os"
"path/filepath"
"runtime"
"syscall"
"unsafe"
)
var initCwd, initCwdErr = os.Getwd()
func executable() (string, error) {
var mib [4]int32
switch runtime.GOOS {
case "freebsd":
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
case "darwin":
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
}
n := uintptr(0)
// Get length.
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
if errNum != 0 {
return "", errNum
}
if n == 0 { // This shouldn't happen.
return "", nil
}
buf := make([]byte, n)
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
if errNum != 0 {
return "", errNum
}
if n == 0 { // This shouldn't happen.
return "", nil
}
for i, v := range buf {
if v == 0 {
buf = buf[:i]
break
}
}
var err error
execPath := string(buf)
// execPath will not be empty due to above checks.
// Try to get the absolute path if the execPath is not rooted.
if execPath[0] != '/' {
execPath, err = getAbs(execPath)
if err != nil {
return execPath, err
}
}
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
// actual executable.
if runtime.GOOS == "darwin" {
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
return execPath, err
}
}
return execPath, nil
}
func getAbs(execPath string) (string, error) {
if initCwdErr != nil {
return execPath, initCwdErr
}
// The execPath may begin with a "../" or a "./" so clean it first.
// Join the two paths, trailing and starting slashes undetermined, so use
// the generic Join function.
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin linux freebsd netbsd windows
package osext
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
)
const (
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
executableEnvValueMatch = "match"
executableEnvValueDelete = "delete"
)
func TestPrintExecutable(t *testing.T) {
ef, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
t.Log("Executable:", ef)
}
func TestPrintExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
t.Log("Executable Folder:", ef)
}
func TestExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
if ef[len(ef)-1] == filepath.Separator {
t.Fatal("ExecutableFolder ends with a trailing slash.")
}
}
func TestExecutableMatch(t *testing.T) {
ep, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
// fullpath to be of the form "dir/prog".
dir := filepath.Dir(filepath.Dir(ep))
fullpath, err := filepath.Rel(dir, ep)
if err != nil {
t.Fatalf("filepath.Rel: %v", err)
}
// Make child start with a relative program path.
// Alter argv[0] for child to verify getting real path without argv[0].
cmd := &exec.Cmd{
Dir: dir,
Path: fullpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("exec(self) failed: %v", err)
}
outs := string(out)
if !filepath.IsAbs(outs) {
t.Fatalf("Child returned %q, want an absolute path", out)
}
if !sameFile(outs, ep) {
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
}
}
func TestExecutableDelete(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip()
}
fpath, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
r, w := io.Pipe()
stderrBuff := &bytes.Buffer{}
stdoutBuff := &bytes.Buffer{}
cmd := &exec.Cmd{
Path: fpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
Stdin: r,
Stderr: stderrBuff,
Stdout: stdoutBuff,
}
err = cmd.Start()
if err != nil {
t.Fatalf("exec(self) start failed: %v", err)
}
tempPath := fpath + "_copy"
_ = os.Remove(tempPath)
err = copyFile(tempPath, fpath)
if err != nil {
t.Fatalf("copy file failed: %v", err)
}
err = os.Remove(fpath)
if err != nil {
t.Fatalf("remove running test file failed: %v", err)
}
err = os.Rename(tempPath, fpath)
if err != nil {
t.Fatalf("rename copy to previous name failed: %v", err)
}
w.Write([]byte{0})
w.Close()
err = cmd.Wait()
if err != nil {
t.Fatalf("exec wait failed: %v", err)
}
childPath := stderrBuff.String()
if !filepath.IsAbs(childPath) {
t.Fatalf("Child returned %q, want an absolute path", childPath)
}
if !sameFile(childPath, fpath) {
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
}
}
func sameFile(fn1, fn2 string) bool {
fi1, err := os.Stat(fn1)
if err != nil {
return false
}
fi2, err := os.Stat(fn2)
if err != nil {
return false
}
return os.SameFile(fi1, fi2)
}
func copyFile(dest, src string) error {
df, err := os.Create(dest)
if err != nil {
return err
}
defer df.Close()
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
_, err = io.Copy(df, sf)
return err
}
func TestMain(m *testing.M) {
env := os.Getenv(executableEnvVar)
switch env {
case "":
os.Exit(m.Run())
case executableEnvValueMatch:
// First chdir to another path.
dir := "/"
if runtime.GOOS == "windows" {
dir = filepath.VolumeName(".")
}
os.Chdir(dir)
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
case executableEnvValueDelete:
bb := make([]byte, 1)
var err error
n, err := os.Stdin.Read(bb)
if err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
os.Exit(2)
}
if n != 1 {
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
os.Exit(2)
}
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
}
os.Exit(0)
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package osext
import (
"syscall"
"unicode/utf16"
"unsafe"
)
var (
kernel = syscall.MustLoadDLL("kernel32.dll")
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
)
// GetModuleFileName() with hModule = NULL
func executable() (exePath string, err error) {
return getModuleFileName()
}
func getModuleFileName() (string, error) {
var n uint32
b := make([]uint16, syscall.MAX_PATH)
size := uint32(len(b))
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
n = uint32(r0)
if n == 0 {
return "", e1
}
return string(utf16.Decode(b[0:n])), nil
}
Copyright 2012 Keith Rarick
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
# binarydist
Package binarydist implements binary diff and patch as described on
<http://www.daemonology.net/bsdiff/>. It reads and writes files
compatible with the tools there.
Documentation at <http://go.pkgdoc.org/github.com/kr/binarydist>.
package binarydist
import (
"io"
"os/exec"
)
type bzip2Writer struct {
c *exec.Cmd
w io.WriteCloser
}
func (w bzip2Writer) Write(b []byte) (int, error) {
return w.w.Write(b)
}
func (w bzip2Writer) Close() error {
if err := w.w.Close(); err != nil {
return err
}
return w.c.Wait()
}
// Package compress/bzip2 implements only decompression,
// so we'll fake it by running bzip2 in another process.
func newBzip2Writer(w io.Writer) (wc io.WriteCloser, err error) {
var bw bzip2Writer
bw.c = exec.Command("bzip2", "-c")
bw.c.Stdout = w
if bw.w, err = bw.c.StdinPipe(); err != nil {
return nil, err
}
if err = bw.c.Start(); err != nil {
return nil, err
}
return bw, nil
}
package binarydist
import (
"crypto/rand"
"io"
"io/ioutil"
"os"
)
func mustOpen(path string) *os.File {
f, err := os.Open(path)
if err != nil {
panic(err)
}
return f
}
func mustReadAll(r io.Reader) []byte {
b, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
return b
}
func fileCmp(a, b *os.File) int64 {
sa, err := a.Seek(0, 2)
if err != nil {
panic(err)
}
sb, err := b.Seek(0, 2)
if err != nil {
panic(err)
}
if sa != sb {
return sa
}
_, err = a.Seek(0, 0)
if err != nil {
panic(err)
}
_, err = b.Seek(0, 0)
if err != nil {
panic(err)
}
pa, err := ioutil.ReadAll(a)
if err != nil {
panic(err)
}
pb, err := ioutil.ReadAll(b)
if err != nil {
panic(err)
}
for i := range pa {
if pa[i] != pb[i] {
return int64(i)
}
}
return -1
}
func mustWriteRandFile(path string, size int) *os.File {
p := make([]byte, size)
_, err := rand.Read(p)
if err != nil {
panic(err)
}
f, err := os.Create(path)
if err != nil {
panic(err)
}
_, err = f.Write(p)
if err != nil {
panic(err)
}
_, err = f.Seek(0, 0)
if err != nil {
panic(err)
}
return f
}
package binarydist
import (
"bytes"
"encoding/binary"
"io"
"io/ioutil"
)
func swap(a []int, i, j int) { a[i], a[j] = a[j], a[i] }
func split(I, V []int, start, length, h int) {
var i, j, k, x, jj, kk int
if length < 16 {
for k = start; k < start+length; k += j {
j = 1
x = V[I[k]+h]
for i = 1; k+i < start+length; i++ {
if V[I[k+i]+h] < x {
x = V[I[k+i]+h]
j = 0
}
if V[I[k+i]+h] == x {
swap(I, k+i, k+j)
j++
}
}
for i = 0; i < j; i++ {
V[I[k+i]] = k + j - 1
}
if j == 1 {
I[k] = -1
}
}
return
}
x = V[I[start+length/2]+h]
jj = 0
kk = 0
for i = start; i < start+length; i++ {
if V[I[i]+h] < x {
jj++
}
if V[I[i]+h] == x {
kk++
}
}
jj += start
kk += jj
i = start
j = 0
k = 0
for i < jj {
if V[I[i]+h] < x {
i++
} else if V[I[i]+h] == x {
swap(I, i, jj+j)
j++
} else {
swap(I, i, kk+k)
k++
}
}
for jj+j < kk {
if V[I[jj+j]+h] == x {
j++
} else {
swap(I, jj+j, kk+k)
k++
}
}
if jj > start {
split(I, V, start, jj-start, h)
}
for i = 0; i < kk-jj; i++ {
V[I[jj+i]] = kk - 1
}
if jj == kk-1 {
I[jj] = -1
}
if start+length > kk {
split(I, V, kk, start+length-kk, h)
}
}
func qsufsort(obuf []byte) []int {
var buckets [256]int
var i, h int
I := make([]int, len(obuf)+1)
V := make([]int, len(obuf)+1)
for _, c := range obuf {
buckets[c]++
}
for i = 1; i < 256; i++ {
buckets[i] += buckets[i-1]
}
copy(buckets[1:], buckets[:])
buckets[0] = 0
for i, c := range obuf {
buckets[c]++
I[buckets[c]] = i
}
I[0] = len(obuf)
for i, c := range obuf {
V[i] = buckets[c]
}
V[len(obuf)] = 0
for i = 1; i < 256; i++ {
if buckets[i] == buckets[i-1]+1 {
I[buckets[i]] = -1
}
}
I[0] = -1
for h = 1; I[0] != -(len(obuf) + 1); h += h {
var n int
for i = 0; i < len(obuf)+1; {
if I[i] < 0 {
n -= I[i]
i -= I[i]
} else {
if n != 0 {
I[i-n] = -n
}
n = V[I[i]] + 1 - i
split(I, V, i, n, h)
i += n
n = 0
}
}
if n != 0 {
I[i-n] = -n
}
}
for i = 0; i < len(obuf)+1; i++ {
I[V[i]] = i
}
return I
}
func matchlen(a, b []byte) (i int) {
for i < len(a) && i < len(b) && a[i] == b[i] {
i++
}
return i
}
func search(I []int, obuf, nbuf []byte, st, en int) (pos, n int) {
if en-st < 2 {
x := matchlen(obuf[I[st]:], nbuf)
y := matchlen(obuf[I[en]:], nbuf)
if x > y {
return I[st], x
} else {
return I[en], y
}
}
x := st + (en-st)/2
if bytes.Compare(obuf[I[x]:], nbuf) < 0 {
return search(I, obuf, nbuf, x, en)
} else {
return search(I, obuf, nbuf, st, x)
}
panic("unreached")
}
// Diff computes the difference between old and new, according to the bsdiff
// algorithm, and writes the result to patch.
func Diff(old, new io.Reader, patch io.Writer) error {
obuf, err := ioutil.ReadAll(old)
if err != nil {
return err
}
nbuf, err := ioutil.ReadAll(new)
if err != nil {
return err
}
pbuf, err := diffBytes(obuf, nbuf)
if err != nil {
return err
}
_, err = patch.Write(pbuf)
return err
}
func diffBytes(obuf, nbuf []byte) ([]byte, error) {
var patch seekBuffer
err := diff(obuf, nbuf, &patch)
if err != nil {
return nil, err
}
return patch.buf, nil
}
func diff(obuf, nbuf []byte, patch io.WriteSeeker) error {
var lenf int
I := qsufsort(obuf)
db := make([]byte, len(nbuf))
eb := make([]byte, len(nbuf))
var dblen, eblen int
var hdr header
hdr.Magic = magic
hdr.NewSize = int64(len(nbuf))
err := binary.Write(patch, signMagLittleEndian{}, &hdr)
if err != nil {
return err
}
// Compute the differences, writing ctrl as we go
pfbz2, err := newBzip2Writer(patch)
if err != nil {
return err
}
var scan, pos, length int
var lastscan, lastpos, lastoffset int
for scan < len(nbuf) {
var oldscore int
scan += length
for scsc := scan; scan < len(nbuf); scan++ {
pos, length = search(I, obuf, nbuf[scan:], 0, len(obuf))
for ; scsc < scan+length; scsc++ {
if scsc+lastoffset < len(obuf) &&
obuf[scsc+lastoffset] == nbuf[scsc] {
oldscore++
}
}
if (length == oldscore && length != 0) || length > oldscore+8 {
break
}
if scan+lastoffset < len(obuf) && obuf[scan+lastoffset] == nbuf[scan] {
oldscore--
}
}
if length != oldscore || scan == len(nbuf) {
var s, Sf int
lenf = 0
for i := 0; lastscan+i < scan && lastpos+i < len(obuf); {
if obuf[lastpos+i] == nbuf[lastscan+i] {
s++
}
i++
if s*2-i > Sf*2-lenf {
Sf = s
lenf = i
}
}
lenb := 0
if scan < len(nbuf) {
var s, Sb int
for i := 1; (scan >= lastscan+i) && (pos >= i); i++ {
if obuf[pos-i] == nbuf[scan-i] {
s++
}
if s*2-i > Sb*2-lenb {
Sb = s
lenb = i
}
}
}
if lastscan+lenf > scan-lenb {
overlap := (lastscan + lenf) - (scan - lenb)
s := 0
Ss := 0
lens := 0
for i := 0; i < overlap; i++ {
if nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {
s++
}
if nbuf[scan-lenb+i] == obuf[pos-lenb+i] {
s--
}
if s > Ss {
Ss = s
lens = i + 1
}
}
lenf += lens - overlap
lenb -= lens
}
for i := 0; i < lenf; i++ {
db[dblen+i] = nbuf[lastscan+i] - obuf[lastpos+i]
}
for i := 0; i < (scan-lenb)-(lastscan+lenf); i++ {
eb[eblen+i] = nbuf[lastscan+lenf+i]
}
dblen += lenf
eblen += (scan - lenb) - (lastscan + lenf)
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(lenf))
if err != nil {
pfbz2.Close()
return err
}
val := (scan - lenb) - (lastscan + lenf)
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
if err != nil {
pfbz2.Close()
return err
}
val = (pos - lenb) - (lastpos + lenf)
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
if err != nil {
pfbz2.Close()
return err
}
lastscan = scan - lenb
lastpos = pos - lenb
lastoffset = pos - scan
}
}
err = pfbz2.Close()
if err != nil {
return err
}
// Compute size of compressed ctrl data
l64, err := patch.Seek(0, 1)
if err != nil {
return err
}
hdr.CtrlLen = int64(l64 - 32)
// Write compressed diff data
pfbz2, err = newBzip2Writer(patch)
if err != nil {
return err
}
n, err := pfbz2.Write(db[:dblen])
if err != nil {
pfbz2.Close()
return err
}
if n != dblen {
pfbz2.Close()
return io.ErrShortWrite
}
err = pfbz2.Close()
if err != nil {
return err
}
// Compute size of compressed diff data
n64, err := patch.Seek(0, 1)
if err != nil {
return err
}
hdr.DiffLen = n64 - l64
// Write compressed extra data
pfbz2, err = newBzip2Writer(patch)
if err != nil {
return err
}
n, err = pfbz2.Write(eb[:eblen])
if err != nil {
pfbz2.Close()
return err
}
if n != eblen {
pfbz2.Close()
return io.ErrShortWrite
}
err = pfbz2.Close()
if err != nil {
return err
}
// Seek to the beginning, write the header, and close the file
_, err = patch.Seek(0, 0)
if err != nil {
return err
}
err = binary.Write(patch, signMagLittleEndian{}, &hdr)
if err != nil {
return err
}
return nil
}
package binarydist
import (
"bytes"
"io/ioutil"
"os"
"os/exec"
"testing"
)
var diffT = []struct {
old *os.File
new *os.File
}{
{
old: mustWriteRandFile("test.old", 1e3),
new: mustWriteRandFile("test.new", 1e3),
},
{
old: mustOpen("testdata/sample.old"),
new: mustOpen("testdata/sample.new"),
},
}
func TestDiff(t *testing.T) {
for _, s := range diffT {
got, err := ioutil.TempFile("/tmp", "bspatch.")
if err != nil {
panic(err)
}
os.Remove(got.Name())
exp, err := ioutil.TempFile("/tmp", "bspatch.")
if err != nil {
panic(err)
}
cmd := exec.Command("bsdiff", s.old.Name(), s.new.Name(), exp.Name())
cmd.Stdout = os.Stdout
err = cmd.Run()
os.Remove(exp.Name())
if err != nil {
panic(err)
}
err = Diff(s.old, s.new, got)
if err != nil {
t.Fatal("err", err)
}
_, err = got.Seek(0, 0)
if err != nil {
panic(err)
}
gotBuf := mustReadAll(got)
expBuf := mustReadAll(exp)
if !bytes.Equal(gotBuf, expBuf) {
t.Fail()
t.Logf("diff %s %s", s.old.Name(), s.new.Name())
t.Logf("%s: len(got) = %d", got.Name(), len(gotBuf))
t.Logf("%s: len(exp) = %d", exp.Name(), len(expBuf))
i := matchlen(gotBuf, expBuf)
t.Logf("produced different output at pos %d; %d != %d", i, gotBuf[i], expBuf[i])
}
}
}
// Package binarydist implements binary diff and patch as described on
// http://www.daemonology.net/bsdiff/. It reads and writes files
// compatible with the tools there.
package binarydist
var magic = [8]byte{'B', 'S', 'D', 'I', 'F', 'F', '4', '0'}
// File format:
// 0 8 "BSDIFF40"
// 8 8 X
// 16 8 Y
// 24 8 sizeof(newfile)
// 32 X bzip2(control block)
// 32+X Y bzip2(diff block)
// 32+X+Y ??? bzip2(extra block)
// with control block a set of triples (x,y,z) meaning "add x bytes
// from oldfile to x bytes from the diff block; copy y bytes from the
// extra block; seek forwards in oldfile by z bytes".
type header struct {
Magic [8]byte
CtrlLen int64
DiffLen int64
NewSize int64
}
package binarydist
// SignMagLittleEndian is the numeric encoding used by the bsdiff tools.
// It implements binary.ByteOrder using a sign-magnitude format
// and little-endian byte order. Only methods Uint64 and String
// have been written; the rest panic.
type signMagLittleEndian struct{}
func (signMagLittleEndian) Uint16(b []byte) uint16 { panic("unimplemented") }
func (signMagLittleEndian) PutUint16(b []byte, v uint16) { panic("unimplemented") }
func (signMagLittleEndian) Uint32(b []byte) uint32 { panic("unimplemented") }
func (signMagLittleEndian) PutUint32(b []byte, v uint32) { panic("unimplemented") }
func (signMagLittleEndian) Uint64(b []byte) uint64 {
y := int64(b[0]) |
int64(b[1])<<8 |
int64(b[2])<<16 |
int64(b[3])<<24 |
int64(b[4])<<32 |
int64(b[5])<<40 |
int64(b[6])<<48 |
int64(b[7]&0x7f)<<56
if b[7]&0x80 != 0 {
y = -y
}
return uint64(y)
}
func (signMagLittleEndian) PutUint64(b []byte, v uint64) {
x := int64(v)
neg := x < 0
if neg {
x = -x
}
b[0] = byte(x)
b[1] = byte(x >> 8)
b[2] = byte(x >> 16)
b[3] = byte(x >> 24)
b[4] = byte(x >> 32)
b[5] = byte(x >> 40)
b[6] = byte(x >> 48)
b[7] = byte(x >> 56)
if neg {
b[7] |= 0x80
}
}
func (signMagLittleEndian) String() string { return "signMagLittleEndian" }
package binarydist
import (
"bytes"
"compress/bzip2"
"encoding/binary"
"errors"
"io"
"io/ioutil"
)
var ErrCorrupt = errors.New("corrupt patch")
// Patch applies patch to old, according to the bspatch algorithm,
// and writes the result to new.
func Patch(old io.Reader, new io.Writer, patch io.Reader) error {
var hdr header
err := binary.Read(patch, signMagLittleEndian{}, &hdr)
if err != nil {
return err
}
if hdr.Magic != magic {
return ErrCorrupt
}
if hdr.CtrlLen < 0 || hdr.DiffLen < 0 || hdr.NewSize < 0 {
return ErrCorrupt
}
ctrlbuf := make([]byte, hdr.CtrlLen)
_, err = io.ReadFull(patch, ctrlbuf)
if err != nil {
return err
}
cpfbz2 := bzip2.NewReader(bytes.NewReader(ctrlbuf))
diffbuf := make([]byte, hdr.DiffLen)
_, err = io.ReadFull(patch, diffbuf)
if err != nil {
return err
}
dpfbz2 := bzip2.NewReader(bytes.NewReader(diffbuf))
// The entire rest of the file is the extra block.
epfbz2 := bzip2.NewReader(patch)
obuf, err := ioutil.ReadAll(old)
if err != nil {
return err
}
nbuf := make([]byte, hdr.NewSize)
var oldpos, newpos int64
for newpos < hdr.NewSize {
var ctrl struct{ Add, Copy, Seek int64 }
err = binary.Read(cpfbz2, signMagLittleEndian{}, &ctrl)
if err != nil {
return err
}
// Sanity-check
if newpos+ctrl.Add > hdr.NewSize {
return ErrCorrupt
}
// Read diff string
_, err = io.ReadFull(dpfbz2, nbuf[newpos:newpos+ctrl.Add])
if err != nil {
return ErrCorrupt
}
// Add old data to diff string
for i := int64(0); i < ctrl.Add; i++ {
if oldpos+i >= 0 && oldpos+i < int64(len(obuf)) {
nbuf[newpos+i] += obuf[oldpos+i]
}
}
// Adjust pointers
newpos += ctrl.Add
oldpos += ctrl.Add
// Sanity-check
if newpos+ctrl.Copy > hdr.NewSize {
return ErrCorrupt
}
// Read extra string
_, err = io.ReadFull(epfbz2, nbuf[newpos:newpos+ctrl.Copy])
if err != nil {
return ErrCorrupt
}
// Adjust pointers
newpos += ctrl.Copy
oldpos += ctrl.Seek
}
// Write the new file
for len(nbuf) > 0 {
n, err := new.Write(nbuf)
if err != nil {
return err
}
nbuf = nbuf[n:]
}
return nil
}
package binarydist
import (
"io/ioutil"
"os"
"os/exec"
"testing"
)
func TestPatch(t *testing.T) {
mustWriteRandFile("test.old", 1e3)
mustWriteRandFile("test.new", 1e3)
got, err := ioutil.TempFile("/tmp", "bspatch.")
if err != nil {
panic(err)
}
os.Remove(got.Name())
err = exec.Command("bsdiff", "test.old", "test.new", "test.patch").Run()
if err != nil {
panic(err)
}
err = Patch(mustOpen("test.old"), got, mustOpen("test.patch"))
if err != nil {
t.Fatal("err", err)
}
ref, err := got.Seek(0, 2)
if err != nil {
panic(err)
}
t.Logf("got %d bytes", ref)
if n := fileCmp(got, mustOpen("test.new")); n > -1 {
t.Fatalf("produced different output at pos %d", n)
}
}
func TestPatchHk(t *testing.T) {
got, err := ioutil.TempFile("/tmp", "bspatch.")
if err != nil {
panic(err)
}
os.Remove(got.Name())
err = Patch(mustOpen("testdata/sample.old"), got, mustOpen("testdata/sample.patch"))
if err != nil {
t.Fatal("err", err)
}
ref, err := got.Seek(0, 2)
if err != nil {
panic(err)
}
t.Logf("got %d bytes", ref)
if n := fileCmp(got, mustOpen("testdata/sample.new")); n > -1 {
t.Fatalf("produced different output at pos %d", n)
}
}
package binarydist
import (
"errors"
)
type seekBuffer struct {
buf []byte
pos int
}
func (b *seekBuffer) Write(p []byte) (n int, err error) {
n = copy(b.buf[b.pos:], p)
if n == len(p) {
b.pos += n
return n, nil
}
b.buf = append(b.buf, p[n:]...)
b.pos += len(p)
return len(p), nil
}
func (b *seekBuffer) Seek(offset int64, whence int) (ret int64, err error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(b.pos) + offset
case 2:
abs = int64(len(b.buf)) + offset
default:
return 0, errors.New("binarydist: invalid whence")
}
if abs < 0 {
return 0, errors.New("binarydist: negative position")
}
if abs >= 1<<31 {
return 0, errors.New("binarydist: position out of range")
}
b.pos = int(abs)
return abs, nil
}
package binarydist
import (
"bytes"
"crypto/rand"
"testing"
)
var sortT = [][]byte{
mustRandBytes(1000),
mustReadAll(mustOpen("test.old")),
[]byte("abcdefabcdef"),
}
func TestQsufsort(t *testing.T) {
for _, s := range sortT {
I := qsufsort(s)
for i := 1; i < len(I); i++ {
if bytes.Compare(s[I[i-1]:], s[I[i]:]) > 0 {
t.Fatalf("unsorted at %d", i)
}
}
}
}
func mustRandBytes(n int) []byte {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
panic(err)
}
return b
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册