Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(868)

Delta Between Two Patch Sets: sha3/sha3_test.go

Issue 7760044: go.crypto/sha3: new package
Left Patch Set: diff -r 8dd5caec1eae https://code.google.com/p/go.crypto/ Created 11 years ago
Right Patch Set: diff -r 8dd5caec1eae https://code.google.com/p/go.crypto/ Created 11 years ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « sha3/sha3.go ('k') | no next file » | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2013 The Go Authors. All rights reserved. 1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 package sha3 5 package sha3
6 6
7 // These tests are a subset of those provided by the Keccak web site(http://kecc ak.noekeon.org/). 7 // These tests are a subset of those provided by the Keccak web site(http://kecc ak.noekeon.org/).
8 8
9 import ( 9 import (
10 "bytes" 10 "bytes"
11 "encoding/hex" 11 "encoding/hex"
12 "fmt" 12 "fmt"
13 "hash" 13 "hash"
14 "strings" 14 "strings"
15 "testing" 15 "testing"
16 ) 16 )
17 17
18 // testDigests is a helper map storing a list of digests of each standard size. 18 // testDigests maintains a digest state of each standard type.
19 var testDigests = map[int]*digest{ 19 var testDigests = map[string]*digest{
20 » 224: {outputSize: 224 / 8}, 20 » "Keccak224": {outputSize: 224 / 8, capacity: 2 * 224 / 8},
21 » 256: {outputSize: 256 / 8}, 21 » "Keccak256": {outputSize: 256 / 8, capacity: 2 * 256 / 8},
22 » 384: {outputSize: 384 / 8}, 22 » "Keccak384": {outputSize: 384 / 8, capacity: 2 * 384 / 8},
23 » 512: {outputSize: 512 / 8}, 23 » "Keccak512": {outputSize: 512 / 8, capacity: 2 * 512 / 8},
24 } 24 }
25 25
26 // hashTest represents a test vector inputs and expected outputs at multiple siz es. 26 // testVector represents a test input and expected outputs from multiple algorit hm variants.
27 type hashTest struct { 27 type testVector struct {
28 » input string 28 » desc string
29 » repeatCount int // input will be concatenated the input this many t imes. 29 » input []byte
30 » inputHex bool // If true, the input string is interpreted in hex. 30 » repeat int // input will be concatenated the input this many times.
31 » expectedOutputs map[int]string 31 » want map[string]string
32 } 32 }
33 33
34 // decodeHex converts an hex-encoded string into a raw byte string. 34 // decodeHex converts an hex-encoded string into a raw byte string.
35 func decodeHex(s string) []byte { 35 func decodeHex(s string) []byte {
36 b, err := hex.DecodeString(s) 36 b, err := hex.DecodeString(s)
37 if err != nil { 37 if err != nil {
38 panic(err) 38 panic(err)
39 } 39 }
40 return b 40 return b
41 } 41 }
42 42
43 // Test computes a test given a hash implementation. 43 // shortTestVectors stores a series of short testVectors.
44 // It returns true or false based on the test passing, plus an error string for printing. 44 // Inputs of 8, 248, and 264 bits from http://keccak.noekeon.org/ are included b elow.
45 func (t *hashTest) Test(testDigests map[int]*digest) error { 45 // The standard defines additional test inputs of all sizes between 0 and 2047 b its.
46 46 // Because the current implementation can only handle an integral number of byte s,
47 » // Reset all digests before testing 47 // most of the standard test inputs can't be used.
48 » for size := range t.expectedOutputs { 48 var shortKeccakTestVectors = []testVector{
49 » » testDigests[size].Reset() 49 » {
50 » } 50 » » desc: "short-8b",
51 51 » » input: decodeHex("CC"),
52 » // Convert input data to a byte array, decoding Hex if necessary 52 » » repeat: 1,
53 » var encoded []byte 53 » » want: map[string]string{
54 » if t.inputHex { 54 » » » "Keccak224": "A9CAB59EB40A10B246290F2D6086E32E3689FAF1D2 6B470C899F2802",
55 » » encoded = decodeHex(t.input) 55 » » » "Keccak256": "EEAD6DBFC7340A56CAEDC044696A168870549A6A7F 6F56961E84A54BD9970B8A",
56 » } else { 56 » » » "Keccak384": "1B84E62A46E5A201861754AF5DC95C4A1A69CAF4A7 96AE405680161E29572641F5FA1E8641D7958336EE7B11C58F73E9",
57 » » encoded = []byte(t.input) 57 » » » "Keccak512": "8630C13CBD066EA74BBE7FE468FEC1DEE10EDC1254 FB4C1B7C5FD69B646E44160B8CE01D05A0908CA790DFB080F4B513BC3B6225ECE7A810371441A5AC 666EB9",
58 » } 58 » » },
59 » // Write input data each digests, based on the test specification t. 59 » },
60 » for i := 0; i < t.repeatCount || i == 0; i++ { 60 » {
61 » » for size := range t.expectedOutputs { 61 » » desc: "short-248b",
62 » » » testDigests[size].Write(encoded) 62 » » input: decodeHex("84FB51B517DF6C5ACCB5D022F8F28DA09B10232D42320 FFC32DBECC3835B29"),
63 » » } 63 » » repeat: 1,
64 » } 64 » » want: map[string]string{
65 65 » » » "Keccak224": "81AF3A7A5BD4C1F948D6AF4B96F93C3B0CF9C0E7A6 DA6FCD71EEC7F6",
66 » // Verify that each output size produced the expected output. 66 » » » "Keccak256": "D477FB02CAAA95B3280EC8EE882C29D9E8A654B21E F178E0F97571BF9D4D3C1C",
67 » for size, want := range t.expectedOutputs { 67 » » » "Keccak384": "503DCAA4ADDA5A9420B2E436DD62D9AB2E0254295C 2982EF67FCE40F117A2400AB492F7BD5D133C6EC2232268BC27B42",
68 » » got := strings.ToLower(hex.EncodeToString(testDigests[size].Sum( nil))) 68 » » » "Keccak512": "9D8098D8D6EDBBAA2BCFC6FB2F89C3EAC67FEC25CD FE75AA7BD570A648E8C8945FF2EC280F6DCF73386109155C5BBC444C707BB42EAB873F5F7476657B 1BC1A8",
69 » » if got != strings.ToLower(want) { 69 » » },
70 » » » errorInput := t.input 70 » },
71 » » » if !t.inputHex { 71 » {
72 » » » » errorInput = "\"" + errorInput + "\"" 72 » » desc: "short-264b",
73 » » » } else { 73 » » input: decodeHex("DE8F1B3FAA4B7040ED4563C3B8E598253178E87E4D0DF 75E4FF2F2DEDD5A0BE046"),
74 » » » » errorInput = "0x" + strings.ToLower(errorInput) 74 » » repeat: 1,
75 » » want: map[string]string{
76 » » » "Keccak224": "F217812E362EC64D4DC5EACFABC165184BFA456E5C 32C2C7900253D0",
77 » » » "Keccak256": "E78C421E6213AFF8DE1F025759A4F2C943DB62BBDE 359C8737E19B3776ED2DD2",
78 » » » "Keccak384": "CF38764973F1EC1C34B5433AE75A3AAD1AAEF6AB19 7850C56C8617BCD6A882F6666883AC17B2DCCDBAA647075D0972B5",
79 » » » "Keccak512": "9A7688E31AAF40C15575FC58C6B39267AAD3722E69 6E518A9945CF7F7C0FEA84CB3CB2E9F0384A6B5DC671ADE7FB4D2B27011173F3EEEAF17CB451CF26 542031",
80 » » },
81 » },
82 }
83
84 // longTestVectors stores a single 64 MiB long testVector.
85 // This is a prefix of the "very long" 1 GiB test vector defined in the Keccak s tandard at
86 // http://keccak.noekeon.org/. We truncate here to keep the running time on the order of seconds.
87 var longKeccakTestVectors = []testVector{
88 » {
89 » » desc: "long-64MiB",
90 » » input: []byte("abcdefghbcdefghicdefghijdefghijkefghijklfghijklm ghijklmnhijklmno"),
91 » » repeat: 1024 * 1024,
92 » » want: map[string]string{
93 » » » "Keccak224": "50E35E40980FEEFF1EA490957B0E970257F75EA0D4 10EE0F0B8A7A58",
94 » » » "Keccak256": "5015A4935F0B51E091C6550A94DCD262C08998232C CAA22E7F0756DEAC0DC0D0",
95 » » » "Keccak384": "7907A8D0FAA7BC6A90FE14C6C958C956A0877E7514 55D8F13ACDB96F144B5896E716C06EC0CB56557A94EF5C3355F6F3",
96 » » » "Keccak512": "3EC327D6759F769DEB74E80CA70C831BC29CAB048A 4BF4190E4A1DD5C6507CF2B4B58937FDE81D36014E7DFE1B1DD8B0F27CB7614F9A645FEC114F1DAA EFC056",
97 » » },
98 » },
99 }
100
101 // TestKeccakVectors checks that correct output is produced for a set of known t estVectors.
102 func TestKeccakVectors(t *testing.T) {
103 » testCases := append([]testVector{}, shortKeccakTestVectors...)
104 » if !testing.Short() {
105 » » testCases = append(testCases, longKeccakTestVectors...)
106 » }
107 » for _, tc := range testCases {
108 » » for alg, want := range tc.want {
109 » » » testDigests[alg].Reset()
110 » » » // Write input data each digests, based on the test spec ification t.
111 » » » for i := 0; i < tc.repeat; i++ {
112 » » » » testDigests[alg].Write(tc.input)
75 } 113 }
76 » » » if t.repeatCount > 0 { 114 » » » // Verify that each algorithm version produced the expec ted output.
77 » » » » errorInput += fmt.Sprintf("×%d", t.repeatCount) 115 » » » got := strings.ToUpper(hex.EncodeToString(testDigests[al g].Sum(nil)))
116 » » » if got != want {
117 » » » » t.Errorf("%s, alg=%s\ngot %q, want %q", tc.desc, alg, got, want)
78 } 118 }
79 » » » return fmt.Errorf("SHA3[%d](%s) = %q, wanted %q", testDi gests[size].Size()*8, errorInput, got, strings.ToLower(want)) 119 » » }
80 » » } 120 » }
81 » } 121 }
82 » return nil 122
83 } 123 // dumpState is a debugging function to pretty-print the internal state of the h ash.
84
85 func (d *digest) dumpState() { 124 func (d *digest) dumpState() {
86 » fmt.Printf("%d B SHA3 hash, %d input B absorbed\n", d.outputSize, d.tota lAbsorbed) 125 » fmt.Printf("SHA3 hash, %d B output, %d B capacity (%d B rate)\n", d.outp utSize, d.capacity, d.rate())
87 » fmt.Printf("%d B capacity, %d B data rate\n", d.capacity(), d.rate()) 126 » fmt.Printf("Internal state after absorbing %d B:\n", d.absorbed)
88 127
89 for x := 0; x < sliceSize; x++ { 128 for x := 0; x < sliceSize; x++ {
90 for y := 0; y < sliceSize; y++ { 129 for y := 0; y < sliceSize; y++ {
91 fmt.Printf("%v, ", d.a[x*sliceSize+y]) 130 fmt.Printf("%v, ", d.a[x*sliceSize+y])
92 } 131 }
93 fmt.Println("") 132 fmt.Println("")
94 } 133 }
95 } 134 }
96 135
97 // TestUnalignedWrite tests that writing data in an arbitrary pattern with small input buffers 136 // TestUnalignedWrite tests that writing data in an arbitrary pattern with small input buffers.
98 func TestUnalignedWrite(t *testing.T) { 137 func TestUnalignedWrite(t *testing.T) {
99 buf := sequentialBytes(0x10000) 138 buf := sequentialBytes(0x10000)
100 » for size := range testDigests { 139 » for alg, d := range testDigests {
101 » » testDigests[size].Reset() 140 » » d.Reset()
102 » » testDigests[size].Write(buf) 141 » » d.Write(buf)
103 » » want := testDigests[size].Sum(nil) 142 » » want := d.Sum(nil)
104 » » testDigests[size].Reset() 143 » » d.Reset()
105 » » i := 0 144 » » for i := 0; i < len(buf); {
106 » » for i < len(buf) {
107 // Cycle through offsets which make a 137 byte sequence. 145 // Cycle through offsets which make a 137 byte sequence.
108 // Because 137 is prime this sequence should exercise al l corner cases. 146 // Because 137 is prime this sequence should exercise al l corner cases.
109 offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 , 13, 14, 15, 16, 1} 147 offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 , 13, 14, 15, 16, 1}
110 for _, j := range offsets { 148 for _, j := range offsets {
111 j = minInt(j, len(buf)-i) 149 j = minInt(j, len(buf)-i)
112 » » » » testDigests[size].Write(buf[i : i+j]) 150 » » » » d.Write(buf[i : i+j])
113 i += j 151 i += j
114 } 152 }
115 } 153 }
116 » » got := testDigests[size].Sum(nil) 154 » » got := d.Sum(nil)
117 » » if !bytes.Equal(want, got) { 155 » » if !bytes.Equal(got, want) {
118 » » » t.Fatalf(fmt.Sprintf("Unalighned SHA3[%d] = wanted %x, g ot %x", testDigests[size].Size()*8, want, got)) 156 » » » t.Errorf("Unaligned writes, alg=%s\ngot %q, want %q", al g, got, want)
119 } 157 }
120 } 158 }
121 } 159 }
122 160
123 // sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing. 161 // sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
124 func sequentialBytes(size int) []byte { 162 func sequentialBytes(size int) []byte {
125 result := make([]byte, size) 163 result := make([]byte, size)
126 for i := range result { 164 for i := range result {
127 result[i] = byte(i) 165 result[i] = byte(i)
128 } 166 }
129 return result 167 return result
130 } 168 }
131 169
132 // benchmarkBlockWrite tests the speed of writing data and never calling the per mutation function. 170 // benchmarkBlockWrite tests the speed of writing data and never calling the per mutation function.
133 func benchmarkBlockWrite(b *testing.B, d *digest) { 171 func benchmarkBlockWrite(b *testing.B, d *digest) {
134 b.StopTimer() 172 b.StopTimer()
135 d.Reset() 173 d.Reset()
136 // Write all but the last byte of a block, to ensure that the permutatio n is not called. 174 // Write all but the last byte of a block, to ensure that the permutatio n is not called.
137 data := sequentialBytes(d.rate() - 1) 175 data := sequentialBytes(d.rate() - 1)
138 » b.SetBytes(int64(len(data)) - 1) 176 » b.SetBytes(int64(len(data)))
139 » b.StartTimer() 177 » b.StartTimer()
140 » for i := 0; i < b.N; i++ { 178 » for i := 0; i < b.N; i++ {
141 » » d.totalAbsorbed = 0 // Reset totalAbsorbed to avoid ever calling the permutation function 179 » » d.absorbed = 0 // Reset absorbed to avoid ever calling the permu tation function
142 d.Write(data) 180 d.Write(data)
143 } 181 }
144 b.StopTimer() 182 b.StopTimer()
145 d.Reset() 183 d.Reset()
146 } 184 }
147 185
148 // BenchmarkPermutationFunction measures the speed of the permutation function w ith no input data. 186 // BenchmarkPermutationFunction measures the speed of the permutation function w ith no input data.
149 func BenchmarkPermutationFunction(b *testing.B) { 187 func BenchmarkPermutationFunction(b *testing.B) {
150 b.StopTimer() 188 b.StopTimer()
151 » d := testDigests[512] 189 » d := testDigests["Keccak512"]
152 » d.Reset() 190 » d.Reset()
191 » b.SetBytes(int64(stateSize))
153 b.StartTimer() 192 b.StartTimer()
154 for i := 0; i < b.N; i++ { 193 for i := 0; i < b.N; i++ {
155 d.keccakF() 194 d.keccakF()
156 } 195 }
157 b.StopTimer() 196 b.StopTimer()
158 d.Reset() 197 d.Reset()
159 } 198 }
160 199
161 // BenchmarkSingleByteWrite tests the latency from writing a single byte 200 // BenchmarkSingleByteWrite tests the latency from writing a single byte
162 func BenchmarkSingleByteWrite(b *testing.B) { 201 func BenchmarkSingleByteWrite(b *testing.B) {
163 b.StopTimer() 202 b.StopTimer()
164 » d := testDigests[512] 203 » d := testDigests["Keccak512"]
165 » d.Reset() 204 » d.Reset()
166
167 data := sequentialBytes(1) //1 byte buffer 205 data := sequentialBytes(1) //1 byte buffer
168 b.SetBytes(int64(d.rate()) - 1) 206 b.SetBytes(int64(d.rate()) - 1)
169 b.StartTimer() 207 b.StartTimer()
170 for i := 0; i < b.N; i++ { 208 for i := 0; i < b.N; i++ {
171 » » d.totalAbsorbed = 0 // rest the written count to avoid ever call ing the permutation function 209 » » d.absorbed = 0 // Reset absorbed to avoid ever calling the permu tation function
172 210
173 // Write all but the last byte of a block, one byte at a time. 211 // Write all but the last byte of a block, one byte at a time.
174 for j := 0; j < d.rate()-1; j++ { 212 for j := 0; j < d.rate()-1; j++ {
175 d.Write(data) 213 d.Write(data)
176 } 214 }
177 } 215 }
178 b.StopTimer() 216 b.StopTimer()
179 d.Reset() 217 d.Reset()
180 } 218 }
181 219
182 // BenchmarkSingleByteX measures the block write speed for each size of the dige st. 220 // BenchmarkSingleByteX measures the block write speed for each size of the dige st.
183 func BenchmarkBlockWrite512(b *testing.B) { benchmarkBlockWrite(b, testDigests[5 12]) } 221 func BenchmarkBlockWrite512(b *testing.B) { benchmarkBlockWrite(b, testDigests[" Keccak512"]) }
184 func BenchmarkBlockWrite384(b *testing.B) { benchmarkBlockWrite(b, testDigests[3 84]) } 222 func BenchmarkBlockWrite384(b *testing.B) { benchmarkBlockWrite(b, testDigests[" Keccak384"]) }
185 func BenchmarkBlockWrite256(b *testing.B) { benchmarkBlockWrite(b, testDigests[2 56]) } 223 func BenchmarkBlockWrite256(b *testing.B) { benchmarkBlockWrite(b, testDigests[" Keccak256"]) }
186 func BenchmarkBlockWrite224(b *testing.B) { benchmarkBlockWrite(b, testDigests[2 24]) } 224 func BenchmarkBlockWrite224(b *testing.B) { benchmarkBlockWrite(b, testDigests[" Keccak224"]) }
187 225
188 // benchmarkBulkHash tests the speed to hash a 16 KiB buffer. 226 // benchmarkBulkHash tests the speed to hash a 16 KiB buffer.
189 func benchmarkBulkHash(b *testing.B, h hash.Hash) { 227 func benchmarkBulkHash(b *testing.B, h hash.Hash) {
190 b.StopTimer() 228 b.StopTimer()
191 h.Reset() 229 h.Reset()
192 size := 1 << 14 230 size := 1 << 14
193 data := sequentialBytes(size) 231 data := sequentialBytes(size)
194 b.SetBytes(int64(size)) 232 b.SetBytes(int64(size))
195 b.StartTimer() 233 b.StartTimer()
196 234
197 for i := 0; i < b.N; i++ { 235 for i := 0; i < b.N; i++ {
198 h.Write(data) 236 h.Write(data)
199 h.Sum(nil) 237 h.Sum(nil)
200 } 238 }
201 b.StopTimer() 239 b.StopTimer()
202 h.Reset() 240 h.Reset()
203 } 241 }
204 242
205 // benchmarkBulkHashSHA3_X test the speed to hash a 16 KiB buffer by calling ben chmarkBulkHash. 243 // benchmarkBulkKeccakX test the speed to hash a 16 KiB buffer by calling benchm arkBulkHash.
206 func BenchmarkBulkHashSHA3_512(b *testing.B) { benchmarkBulkHash(b, New512()) } 244 func BenchmarkBulkKeccak512(b *testing.B) { benchmarkBulkHash(b, NewKeccak512()) }
207 func BenchmarkBulkHashSHA3_384(b *testing.B) { benchmarkBulkHash(b, New384()) } 245 func BenchmarkBulkKeccak384(b *testing.B) { benchmarkBulkHash(b, NewKeccak384()) }
208 func BenchmarkBulkHashSHA3_256(b *testing.B) { benchmarkBulkHash(b, New256()) } 246 func BenchmarkBulkKeccak256(b *testing.B) { benchmarkBulkHash(b, NewKeccak256()) }
209 func BenchmarkBulkHashSHA3_224(b *testing.B) { benchmarkBulkHash(b, New224()) } 247 func BenchmarkBulkKeccak224(b *testing.B) { benchmarkBulkHash(b, NewKeccak224()) }
210
211 // longTestVectors stores a lengthy test vector for each output size.
212 // The computed test vector is about 413 MiB long, taken from http://keccak.noek eon.org/.
213 var longTestVectors = []hashTest{
214 » {
215 » » input: "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmgh ijklmnhijklmno",
216 » » repeatCount: 16 * 1024 * 1024,
217 » » inputHex: false,
218 » » expectedOutputs: map[int]string{
219 » » » 224: "C42E4AEE858E1A8AD2976896B9D23DD187F64436EE15969AFD BC68C5",
220 » » » 256: "5F313C39963DCF792B5470D4ADE9F3A356A3E4021748690A95 8372E2B06F82A4",
221 » » » 384: "9B7168B4494A80A86408E6B9DC4E5A1837C85DD8FF452ED410 F2832959C08C8C0D040A892EB9A755776372D4A8732315",
222 » » » 512: "3E122EDAF37398231CFACA4C7C216C9D66D5B899EC1D7AC617 C40C7261906A45FC01617A021E5DA3BD8D4182695B5CB785A28237CBB167590E34718E56D8AAB8",
223 » » },
224 » },
225 }
226
227 // TestLongVectors tests a series of very long test vectors stored in longTestVe ctors.
228 // This test is disabled for short testing mode as it is relatively time-consumi ng.
229 func TestLongVectors(t *testing.T) {
230 » if testing.Short() {
231 » » return
232 » }
233
234 » testVector(t, longTestVectors)
235 }
236
237 // shortTestVectors stores a series of short test vector for each output size.
238 // Test vectors of 8, 248, and 264 bits from http://keccak.noekeon.org/ are incl uded below.
239 // The standard defines additional test vectors for all bit sizes between the ra nge of 0 and 2047.
240 // Because the current implementation can only handle an integral number of byte s,
241 // most of the standard test vectors can't be computed.
242 var shortTestVectors = []hashTest{
243 » {
244 » » input: "CC",
245 » » repeatCount: 1,
246 » » inputHex: true,
247 » » expectedOutputs: map[int]string{
248 » » » 224: "A9CAB59EB40A10B246290F2D6086E32E3689FAF1D26B470C89 9F2802",
249 » » » 256: "EEAD6DBFC7340A56CAEDC044696A168870549A6A7F6F56961E 84A54BD9970B8A",
250 » » » 384: "1B84E62A46E5A201861754AF5DC95C4A1A69CAF4A796AE4056 80161E29572641F5FA1E8641D7958336EE7B11C58F73E9",
251 » » » 512: "8630C13CBD066EA74BBE7FE468FEC1DEE10EDC1254FB4C1B7C 5FD69B646E44160B8CE01D05A0908CA790DFB080F4B513BC3B6225ECE7A810371441A5AC666EB9",
252 » » },
253 » },
254 » {
255 » » input: "84FB51B517DF6C5ACCB5D022F8F28DA09B10232D42320FFC32 DBECC3835B29",
256 » » repeatCount: 1,
257 » » inputHex: true,
258 » » expectedOutputs: map[int]string{
259 » » » 224: "81AF3A7A5BD4C1F948D6AF4B96F93C3B0CF9C0E7A6DA6FCD71 EEC7F6",
260 » » » 256: "D477FB02CAAA95B3280EC8EE882C29D9E8A654B21EF178E0F9 7571BF9D4D3C1C",
261 » » » 384: "503DCAA4ADDA5A9420B2E436DD62D9AB2E0254295C2982EF67 FCE40F117A2400AB492F7BD5D133C6EC2232268BC27B42",
262 » » » 512: "9D8098D8D6EDBBAA2BCFC6FB2F89C3EAC67FEC25CDFE75AA7B D570A648E8C8945FF2EC280F6DCF73386109155C5BBC444C707BB42EAB873F5F7476657B1BC1A8",
263 » » },
264 » },
265 » {
266 » » input: "DE8F1B3FAA4B7040ED4563C3B8E598253178E87E4D0DF75E4F F2F2DEDD5A0BE046",
267 » » repeatCount: 1,
268 » » inputHex: true,
269 » » expectedOutputs: map[int]string{
270 » » » 224: "F217812E362EC64D4DC5EACFABC165184BFA456E5C32C2C790 0253D0",
271 » » » 256: "E78C421E6213AFF8DE1F025759A4F2C943DB62BBDE359C8737 E19B3776ED2DD2",
272 » » » 384: "CF38764973F1EC1C34B5433AE75A3AAD1AAEF6AB197850C56C 8617BCD6A882F6666883AC17B2DCCDBAA647075D0972B5",
273 » » » 512: "9A7688E31AAF40C15575FC58C6B39267AAD3722E696E518A99 45CF7F7C0FEA84CB3CB2E9F0384A6B5DC671ADE7FB4D2B27011173F3EEEAF17CB451CF26542031",
274 » » },
275 » },
276 }
277
278 // TestShortVectors tests a series of short (non-repeated) test vectors for each output size.
279 func TestShortVectors(t *testing.T) {
280 » testVector(t, shortTestVectors)
281 }
282
283 // testVector is a helper function to compute tests on each size of SHA3.
284 func testVector(t *testing.T, v []hashTest) {
285 » for _, test := range v {
286 » » error := test.Test(testDigests)
287 » » if error != nil {
288 » » » t.Fatalf(error.Error())
289 » » }
290 » }
291 }
LEFTRIGHT
« sha3/sha3.go ('k') | no next file » | Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Toggle Comments ('s')

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b