M4RI  1.0.1
xor_template.h
1 #include <m4ri/m4ri_config.h>
2 #include <m4ri/misc.h>
3 
12 static inline void __M4RI_TEMPLATE_NAME(_mzd_combine)(word *m, word const *t[N], wi_t wide) {
13  assert(1 <= N && N <= 8);
14 
15 #if __M4RI_HAVE_SSE2
16 
17  assert( (__M4RI_ALIGNMENT(m,16) == 8) | (__M4RI_ALIGNMENT(m,16) == 0) );
18 
19  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
20  case 8: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[7],16));
21  case 7: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[6],16));
22  case 6: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[5],16));
23  case 5: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[4],16));
24  case 4: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[3],16));
25  case 3: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[2],16));
26  case 2: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[1],16));
27  case 1: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[0],16));
28  };
29 
30  if (__M4RI_UNLIKELY(__M4RI_ALIGNMENT(m,16) == 8)) {
31  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
32  case 8: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++ ^ *t[7]++; break;
33  case 7: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++; break;
34  case 6: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++; break;
35  case 5: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++; break;
36  case 4: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++; break;
37  case 3: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++; break;
38  case 2: *m++ ^= *t[0]++ ^ *t[1]++; break;
39  case 1: *m++ ^= *t[0]++; break;
40  };
41  wide--;
42  }
43 
44  __m128i *m__ = (__m128i*)m;
45  __m128i *t__[N];
46 
47  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
48  case 8: t__[N-8] = (__m128i*)t[N-8];
49  case 7: t__[N-7] = (__m128i*)t[N-7];
50  case 6: t__[N-6] = (__m128i*)t[N-6];
51  case 5: t__[N-5] = (__m128i*)t[N-5];
52  case 4: t__[N-4] = (__m128i*)t[N-4];
53  case 3: t__[N-3] = (__m128i*)t[N-3];
54  case 2: t__[N-2] = (__m128i*)t[N-2];
55  case 1: t__[N-1] = (__m128i*)t[N-1];
56  };
57 
58  __m128i xmm0, xmm1, xmm2, xmm3;
59 
60  for(wi_t i=0; i< (wide>>1); i++) {
61 
62  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
63  case 8:
64  xmm0 = _mm_xor_si128(*t__[0]++, *t__[1]++); xmm1 = _mm_xor_si128(*t__[2]++, *t__[3]++);
65  xmm2 = _mm_xor_si128(*t__[4]++, *t__[5]++); xmm3 = _mm_xor_si128(*t__[6]++, *t__[7]++);
66  xmm0 = _mm_xor_si128(xmm0, xmm1); xmm2 = _mm_xor_si128(xmm2, xmm3);
67  xmm0 = _mm_xor_si128(xmm0, xmm2); xmm0 = _mm_xor_si128(*m__, xmm0);
68  break;
69  case 7:
70  xmm0 = _mm_xor_si128(*t__[0]++, *t__[1]++); xmm1 = _mm_xor_si128(*t__[2]++, *t__[3]++);
71  xmm0 = _mm_xor_si128(xmm0, *t__[4]++); xmm1 = _mm_xor_si128(xmm1, *t__[5]++);
72  xmm0 = _mm_xor_si128(xmm0, *t__[6]++); xmm0 = _mm_xor_si128(xmm0, xmm1);
73  xmm0 = _mm_xor_si128(*m__, xmm0);
74  break;
75  case 6:
76  xmm0 = _mm_xor_si128(*t__[0]++, *t__[1]++); xmm1 = _mm_xor_si128(*t__[2]++, *t__[3]++);
77  xmm0 = _mm_xor_si128(xmm0, *t__[4]++); xmm1 = _mm_xor_si128(xmm1, *t__[5]++);
78  xmm0 = _mm_xor_si128(xmm0, xmm1); xmm0 = _mm_xor_si128(*m__, xmm0);
79  break;
80  case 5:
81  xmm0 = _mm_xor_si128(*t__[0]++, *t__[1]++); xmm1 = _mm_xor_si128(*t__[2]++, *t__[3]++);
82  xmm0 = _mm_xor_si128(xmm0, *t__[4]++); xmm0 = _mm_xor_si128(xmm0, xmm1);
83  xmm0 = _mm_xor_si128(*m__, xmm0);
84  break;
85  case 4:
86  xmm0 = _mm_xor_si128(*t__[0]++, *t__[1]++); xmm1 = _mm_xor_si128(*t__[2]++, *t__[3]++);
87  xmm0 = _mm_xor_si128(xmm0, xmm1); xmm0 = _mm_xor_si128(*m__, xmm0);
88  break;
89  case 3:
90  xmm0 = _mm_xor_si128(*t__[0]++, *t__[1]++); xmm1 = _mm_xor_si128(*m__, *t__[2]++);
91  xmm0 = _mm_xor_si128(xmm0, xmm1);
92  break;
93  case 2:
94  xmm0 = _mm_xor_si128(*t__[0]++, *t__[1]++); xmm0 = _mm_xor_si128(*m__, xmm0);
95  break;
96  case 1:
97  xmm0 = _mm_xor_si128(*m__, *t__[0]++);
98  break;
99  };
100  *m__++ = xmm0;
101  }
102 
103  if(wide & 0x1) {
104  m = (word*)m__;
105  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
106  case 8: t[N-8] = (word*)t__[N-8];
107  case 7: t[N-7] = (word*)t__[N-7];
108  case 6: t[N-6] = (word*)t__[N-6];
109  case 5: t[N-5] = (word*)t__[N-5];
110  case 4: t[N-4] = (word*)t__[N-4];
111  case 3: t[N-3] = (word*)t__[N-3];
112  case 2: t[N-2] = (word*)t__[N-2];
113  case 1: t[N-1] = (word*)t__[N-1];
114  }
115 
116  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
117  case 8: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++ ^ *t[7]++; break;
118  case 7: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++; break;
119  case 6: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++; break;
120  case 5: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++; break;
121  case 4: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++; break;
122  case 3: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++; break;
123  case 2: *m++ ^= *t[0]++ ^ *t[1]++; break;
124  case 1: *m++ ^= *t[0]++; break;
125  }
126  }
127  return;
128 #else
129 
130  for(wi_t i=0; i< wide; i++) {
131  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
132  case 8: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++ ^ *t[7]++; break;
133  case 7: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++; break;
134  case 6: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++; break;
135  case 5: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++; break;
136  case 4: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++; break;
137  case 3: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++; break;
138  case 2: *m++ ^= *t[0]++ ^ *t[1]++; break;
139  case 1: *m++ ^= *t[0]++; break;
140  }
141  }
142 
143  return;
144 #endif // __M4RI_HAVE_SSE2
145 }
146 
155 static inline void __M4RI_TEMPLATE_NAME(_mzd_combine_u)(word *m, word const *t[N], wi_t wide) {
156  assert(1 <= N && N <= 8);
157 
158 #if __M4RI_HAVE_SSE2
159 
160  assert( (__M4RI_ALIGNMENT(m,16) == 8) );
161 
162  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
163  case 8: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[7],16));
164  case 7: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[6],16));
165  case 6: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[5],16));
166  case 5: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[4],16));
167  case 4: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[3],16));
168  case 3: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[2],16));
169  case 2: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[1],16));
170  case 1: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[0],16));
171  };
172 
173  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
174  case 8: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++ ^ *t[7]++; break;
175  case 7: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++; break;
176  case 6: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++; break;
177  case 5: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++; break;
178  case 4: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++; break;
179  case 3: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++; break;
180  case 2: *m++ ^= *t[0]++ ^ *t[1]++; break;
181  case 1: *m++ ^= *t[0]++; break;
182  };
183  wide--;
184 
185  __m128i *m__ = (__m128i*)m;
186  __m128i *t__[N];
187 
188  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
189  case 8: t__[N-8] = (__m128i*)t[N-8];
190  case 7: t__[N-7] = (__m128i*)t[N-7];
191  case 6: t__[N-6] = (__m128i*)t[N-6];
192  case 5: t__[N-5] = (__m128i*)t[N-5];
193  case 4: t__[N-4] = (__m128i*)t[N-4];
194  case 3: t__[N-3] = (__m128i*)t[N-3];
195  case 2: t__[N-2] = (__m128i*)t[N-2];
196  case 1: t__[N-1] = (__m128i*)t[N-1];
197  };
198 
199  __m128i xmm1;
200 
201  for(wi_t i=0; i< (wide>>1); i++) {
202  xmm1 = _mm_xor_si128(*m__, *t__[0]++);
203 
204  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
205  case 8: xmm1 = _mm_xor_si128(xmm1, *t__[N-7]++);
206  case 7: xmm1 = _mm_xor_si128(xmm1, *t__[N-6]++);
207  case 6: xmm1 = _mm_xor_si128(xmm1, *t__[N-5]++);
208  case 5: xmm1 = _mm_xor_si128(xmm1, *t__[N-4]++);
209  case 4: xmm1 = _mm_xor_si128(xmm1, *t__[N-3]++);
210  case 3: xmm1 = _mm_xor_si128(xmm1, *t__[N-2]++);
211  case 2: xmm1 = _mm_xor_si128(xmm1, *t__[N-1]++);
212  case 1: break;
213  };
214  *m__++ = xmm1;
215  }
216 
217  if(wide & 0x1) {
218  m = (word*)m__;
219  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
220  case 8: t[N-8] = (word*)t__[N-8];
221  case 7: t[N-7] = (word*)t__[N-7];
222  case 6: t[N-6] = (word*)t__[N-6];
223  case 5: t[N-5] = (word*)t__[N-5];
224  case 4: t[N-4] = (word*)t__[N-4];
225  case 3: t[N-3] = (word*)t__[N-3];
226  case 2: t[N-2] = (word*)t__[N-2];
227  case 1: t[N-1] = (word*)t__[N-1];
228  }
229 
230  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
231  case 8: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++ ^ *t[7]++; break;
232  case 7: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++; break;
233  case 6: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++; break;
234  case 5: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++; break;
235  case 4: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++; break;
236  case 3: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++; break;
237  case 2: *m++ ^= *t[0]++ ^ *t[1]++; break;
238  case 1: *m++ ^= *t[0]++; break;
239  }
240  }
241  return;
242 #else
243 
244  for(wi_t i=0; i< wide; i++) {
245  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
246  case 8: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++ ^ *t[7]++; break;
247  case 7: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++; break;
248  case 6: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++; break;
249  case 5: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++; break;
250  case 4: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++; break;
251  case 3: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++; break;
252  case 2: *m++ ^= *t[0]++ ^ *t[1]++; break;
253  case 1: *m++ ^= *t[0]++; break;
254  }
255  }
256 
257  return;
258 #endif // __M4RI_HAVE_SSE2
259 }
260 
269 static inline void __M4RI_TEMPLATE_NAME(_mzd_combine_a)(word *m, word const *t[N], wi_t wide) {
270  assert(1 <= N && N <= 8);
271 
272 #if __M4RI_HAVE_SSE2
273 
274  assert( __M4RI_ALIGNMENT(m,16) == 0 );
275 
276  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
277  case 8: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[7],16));
278  case 7: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[6],16));
279  case 6: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[5],16));
280  case 5: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[4],16));
281  case 4: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[3],16));
282  case 3: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[2],16));
283  case 2: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[1],16));
284  case 1: assert(__M4RI_ALIGNMENT(m,16) == __M4RI_ALIGNMENT(t[0],16));
285  };
286 
287  __m128i *m__ = (__m128i*)m;
288  __m128i *t__[N];
289 
290  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
291  case 8: t__[N-8] = (__m128i*)t[N-8];
292  case 7: t__[N-7] = (__m128i*)t[N-7];
293  case 6: t__[N-6] = (__m128i*)t[N-6];
294  case 5: t__[N-5] = (__m128i*)t[N-5];
295  case 4: t__[N-4] = (__m128i*)t[N-4];
296  case 3: t__[N-3] = (__m128i*)t[N-3];
297  case 2: t__[N-2] = (__m128i*)t[N-2];
298  case 1: t__[N-1] = (__m128i*)t[N-1];
299  };
300 
301  __m128i xmm1;
302 
303  for(wi_t i=0; i< (wide>>1); i++) {
304  xmm1 = _mm_xor_si128(*m__, *t__[0]++);
305 
306  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
307  case 8: xmm1 = _mm_xor_si128(xmm1, *t__[N-7]++);
308  case 7: xmm1 = _mm_xor_si128(xmm1, *t__[N-6]++);
309  case 6: xmm1 = _mm_xor_si128(xmm1, *t__[N-5]++);
310  case 5: xmm1 = _mm_xor_si128(xmm1, *t__[N-4]++);
311  case 4: xmm1 = _mm_xor_si128(xmm1, *t__[N-3]++);
312  case 3: xmm1 = _mm_xor_si128(xmm1, *t__[N-2]++);
313  case 2: xmm1 = _mm_xor_si128(xmm1, *t__[N-1]++);
314  case 1: break;
315  };
316  *m__++ = xmm1;
317  }
318 
319  if(wide & 0x1) {
320  m = (word*)m__;
321  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
322  case 8: t[N-8] = (word*)t__[N-8];
323  case 7: t[N-7] = (word*)t__[N-7];
324  case 6: t[N-6] = (word*)t__[N-6];
325  case 5: t[N-5] = (word*)t__[N-5];
326  case 4: t[N-4] = (word*)t__[N-4];
327  case 3: t[N-3] = (word*)t__[N-3];
328  case 2: t[N-2] = (word*)t__[N-2];
329  case 1: t[N-1] = (word*)t__[N-1];
330  }
331 
332  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
333  case 8: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++ ^ *t[7]++; break;
334  case 7: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++; break;
335  case 6: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++; break;
336  case 5: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++; break;
337  case 4: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++; break;
338  case 3: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++; break;
339  case 2: *m++ ^= *t[0]++ ^ *t[1]++; break;
340  case 1: *m++ ^= *t[0]++; break;
341  }
342  }
343  return;
344 #else
345 
346  for(wi_t i=0; i< wide; i++) {
347  switch(N) { /* we rely on the compiler to optimise this switch away, it reads nicer than #if */
348  case 8: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++ ^ *t[7]++; break;
349  case 7: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++ ^ *t[6]++; break;
350  case 6: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++ ^ *t[5]++; break;
351  case 5: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++ ^ *t[4]++; break;
352  case 4: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++ ^ *t[3]++; break;
353  case 3: *m++ ^= *t[0]++ ^ *t[1]++ ^ *t[2]++; break;
354  case 2: *m++ ^= *t[0]++ ^ *t[1]++; break;
355  case 1: *m++ ^= *t[0]++; break;
356  }
357  }
358 
359  return;
360 #endif // __M4RI_HAVE_SSE2
361 }
Helper functions.
#define __M4RI_ALIGNMENT(addr, n)
Return alignment of addr w.r.t. n. For example the address 17 would be 1 aligned w....
Definition: misc.h:421
#define __M4RI_UNLIKELY(cond)
Macro to help with branch prediction.
Definition: misc.h:449
uint64_t word
A word is the typical packed data structure to represent packed bits.
Definition: misc.h:87
int wi_t
Type of word indexes.
Definition: misc.h:80