1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 package signappx
18
19 import (
20 "archive/zip"
21 "crypto"
22 "crypto/hmac"
23 "encoding/base64"
24 "encoding/xml"
25 "errors"
26 "fmt"
27 "io"
28 "strings"
29
30 "github.com/sassoftware/relic/lib/zipslicer"
31 )
32
33 const blockMapSize = 64 * 1024
34
35 var hashAlgs = map[crypto.Hash]string{
36 crypto.SHA256: "http://www.w3.org/2001/04/xmlenc#sha256",
37 crypto.SHA384: "http://www.w3.org/2001/04/xmldsig-more#sha384",
38 crypto.SHA512: "http://www.w3.org/2001/04/xmlenc#sha512",
39 }
40
41 var noHashFiles = map[string]bool{
42 appxSignature: true,
43 appxCodeIntegrity: true,
44 appxContentTypes: true,
45 appxBlockMap: true,
46 }
47
48 type blockMap struct {
49 XMLName xml.Name `xml:"http://schemas.microsoft.com/appx/2010/blockmap BlockMap"`
50 HashMethod string `xml:",attr"`
51 File []blockFile
52
53 Hash crypto.Hash `xml:"-"`
54 unverifiedSizes bool
55 }
56
57 type blockFile struct {
58 Name string `xml:",attr"`
59 Size uint64 `xml:",attr"`
60 LfhSize int `xml:",attr"`
61 Block []block
62 }
63
64 type block struct {
65 Hash string `xml:",attr"`
66 Size uint64 `xml:",attr,omitempty"`
67 }
68
69 func verifyBlockMap(inz *zip.Reader, files zipFiles, skipDigests bool) error {
70 isBundle := files[bundleManifestFile] != nil
71 zf := files[appxBlockMap]
72 if zf == nil {
73 return errors.New("missing block map")
74 }
75 blob, err := readZipFile(zf)
76 if err != nil {
77 return err
78 }
79 var bm blockMap
80 if err := xml.Unmarshal(blob, &bm); err != nil {
81 return fmt.Errorf("error parsing block map: %s", err)
82 }
83 var hash crypto.Hash
84 for hash2, alg := range hashAlgs {
85 if alg == bm.HashMethod {
86 hash = hash2
87 break
88 }
89 }
90 if hash == 0 {
91 return errors.New("unsupported hash in block map")
92 }
93 bm.Hash = hash
94 bmfiles := bm.File
95 for _, zf := range inz.File {
96 if noHashFiles[zf.Name] || (isBundle && strings.HasSuffix(zf.Name, ".appx")) {
97 continue
98 }
99 if len(bmfiles) == 0 {
100 return fmt.Errorf("blockmap: unhashed zip file %s", zf.Name)
101 }
102 bmf := bmfiles[0]
103 bmfiles = bmfiles[1:]
104 name := zipToDos(zf.Name)
105 if bmf.Name != name {
106 return fmt.Errorf("blockmap: file mismatch: %s != %s", bmf.Name, name)
107 } else if bmf.Size != zf.UncompressedSize64 {
108 return fmt.Errorf("blockmap: file mismatch: %s: size %d != %d", name, bmf.Size, zf.UncompressedSize64)
109 }
110 if len(bmf.Block) != int((zf.UncompressedSize64+blockMapSize-1)/blockMapSize) {
111 return errors.New("blockmap: file mismatch")
112 }
113 if skipDigests {
114 continue
115 }
116 r, err := zf.Open()
117 if err != nil {
118 return err
119 }
120 remaining := zf.UncompressedSize64
121 for i, block := range bmf.Block {
122 count := remaining
123 if count > blockMapSize {
124 count = blockMapSize
125 }
126 remaining -= count
127 d := hash.New()
128 if _, err := io.CopyN(d, r, int64(count)); err != nil {
129 return err
130 }
131 calc := d.Sum(nil)
132 expected, err := base64.StdEncoding.DecodeString(block.Hash)
133 if err != nil {
134 return fmt.Errorf("blockmap: %s", err)
135 }
136 if !hmac.Equal(calc, expected) {
137 return fmt.Errorf("blockmap: digest mismatch for %s block %d: calculated %x != found %x", name, i, calc, expected)
138 }
139 }
140 if err := r.Close(); err != nil {
141 return err
142 }
143 if remaining > 0 {
144 return errors.New("blockmap: file mismatch")
145 }
146 }
147 return nil
148 }
149
150 func (b *blockMap) SetHash(hash crypto.Hash) error {
151 alg := hashAlgs[hash]
152 if alg == "" {
153 return errors.New("unsupported hash algorithm")
154 }
155 b.HashMethod = alg
156 b.Hash = hash
157 return nil
158 }
159
160
161
162
163 func (b *blockMap) CopySizes(blob []byte) error {
164 var orig blockMap
165 if err := xml.Unmarshal(blob, &orig); err != nil {
166 return fmt.Errorf("error parsing block map: %s", err)
167 }
168 for i, oldf := range orig.File {
169 zipName := dosToZip(oldf.Name)
170 if zipName == appxManifest || zipName == bundleManifestFile {
171
172
173 continue
174 } else if i >= len(b.File) {
175 return errors.New("old block map has too many files")
176 }
177 newf := &b.File[i]
178 if newf.Name != oldf.Name {
179 return fmt.Errorf("old block map doesn't match new: %s", oldf.Name)
180 }
181 for j, oldblock := range oldf.Block {
182 newf.Block[j].Size = oldblock.Size
183 }
184 }
185 b.unverifiedSizes = false
186 return nil
187 }
188
189 func (b *blockMap) AddFile(f *zipslicer.File, raw, cooked io.Writer) error {
190 bmf := blockFile{Name: zipToDos(f.Name)}
191 lfh, err := f.GetLocalHeader()
192 if err != nil {
193 return fmt.Errorf("hashing zip metadata: %s", err)
194 }
195 bmf.LfhSize = len(lfh)
196 if raw != nil {
197 raw.Write(lfh)
198 }
199 rc, err := f.OpenAndTeeRaw(raw)
200 if err != nil {
201 return fmt.Errorf("hashing zip metadata: %s", err)
202 }
203
204 for {
205 d := b.Hash.New()
206 w := io.Writer(d)
207 if cooked != nil {
208 w = io.MultiWriter(d, cooked)
209 }
210 n, err := io.CopyN(w, rc, blockMapSize)
211 if n > 0 {
212 bmf.Size += uint64(n)
213 hash := base64.StdEncoding.EncodeToString(d.Sum(nil))
214 bmf.Block = append(bmf.Block, block{Hash: hash})
215 }
216 if err == io.EOF {
217 break
218 } else if err != nil {
219 return err
220 }
221 }
222 if err := rc.Close(); err != nil {
223 return err
224 }
225 dd, err := f.GetDataDescriptor()
226 if err != nil {
227 return fmt.Errorf("hashing zip metadata: %s", err)
228 }
229 if raw != nil {
230 raw.Write(dd)
231 }
232 if !(noHashFiles[f.Name] || strings.HasSuffix(f.Name, ".appx")) {
233 if f.Method != zip.Store {
234 b.unverifiedSizes = true
235 }
236 b.File = append(b.File, bmf)
237 }
238 return nil
239 }
240
241 func (b *blockMap) Marshal() ([]byte, error) {
242 if b.unverifiedSizes {
243 return nil, errors.New("found compressed files not already in blockmap")
244 }
245 return marshalXML(b, false)
246 }
247
248 func zipToDos(name string) string {
249 return strings.Replace(name, "/", "\\", -1)
250 }
251
252 func dosToZip(name string) string {
253 return strings.Replace(name, "\\", "/", -1)
254 }
255
View as plain text