@@ -1473,11 +1473,17 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
1473
1473
self . fatal ( "dynamic alloca not supported yet" )
1474
1474
}
1475
1475
1476
- fn load ( & mut self , ty : Self :: Type , ptr : Self :: Value , _align : Align ) -> Self :: Value {
1476
+ fn load ( & mut self , ty : Self :: Type , ptr : Self :: Value , align : Align ) -> Self :: Value {
1477
1477
let ( ptr, access_ty) = self . adjust_pointer_for_typed_access ( ptr, ty) ;
1478
1478
let loaded_val = ptr. const_fold_load ( self ) . unwrap_or_else ( || {
1479
1479
self . emit ( )
1480
- . load ( access_ty, None , ptr. def ( self ) , None , empty ( ) )
1480
+ . load (
1481
+ access_ty,
1482
+ None ,
1483
+ ptr. def ( self ) ,
1484
+ Some ( MemoryAccess :: ALIGNED ) ,
1485
+ std:: iter:: once ( Operand :: LiteralBit32 ( align. bytes ( ) as _ ) ) ,
1486
+ )
1481
1487
. unwrap ( )
1482
1488
. with_type ( access_ty)
1483
1489
} ) ;
@@ -1599,12 +1605,17 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
1599
1605
// ignore
1600
1606
}
1601
1607
1602
- fn store ( & mut self , val : Self :: Value , ptr : Self :: Value , _align : Align ) -> Self :: Value {
1608
+ fn store ( & mut self , val : Self :: Value , ptr : Self :: Value , align : Align ) -> Self :: Value {
1603
1609
let ( ptr, access_ty) = self . adjust_pointer_for_typed_access ( ptr, val. ty ) ;
1604
1610
let val = self . bitcast ( val, access_ty) ;
1605
1611
1606
1612
self . emit ( )
1607
- . store ( ptr. def ( self ) , val. def ( self ) , None , empty ( ) )
1613
+ . store (
1614
+ ptr. def ( self ) ,
1615
+ val. def ( self ) ,
1616
+ Some ( MemoryAccess :: ALIGNED ) ,
1617
+ std:: iter:: once ( Operand :: LiteralBit32 ( align. bytes ( ) as _ ) ) ,
1618
+ )
1608
1619
. unwrap ( ) ;
1609
1620
// FIXME(eddyb) this is meant to be a handle the store instruction itself.
1610
1621
val
@@ -2262,9 +2273,9 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
2262
2273
fn memcpy (
2263
2274
& mut self ,
2264
2275
dst : Self :: Value ,
2265
- _dst_align : Align ,
2276
+ dst_align : Align ,
2266
2277
src : Self :: Value ,
2267
- _src_align : Align ,
2278
+ src_align : Align ,
2268
2279
size : Self :: Value ,
2269
2280
flags : MemFlags ,
2270
2281
) {
@@ -2302,12 +2313,29 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
2302
2313
}
2303
2314
} ) ;
2304
2315
2316
+ // Pass all operands as `additional_params` since rspirv doesn't allow specifying
2317
+ // extra operands ofter the first `MemoryAccess`
2318
+ let mut ops: SmallVec < [ _ ; 4 ] > = Default :: default ( ) ;
2319
+ ops. push ( Operand :: MemoryAccess ( MemoryAccess :: ALIGNED ) ) ;
2320
+ if src_align != dst_align {
2321
+ if self . emit ( ) . version ( ) . unwrap ( ) > ( 1 , 3 ) {
2322
+ ops. push ( Operand :: LiteralBit32 ( dst_align. bytes ( ) as _ ) ) ;
2323
+ ops. push ( Operand :: MemoryAccess ( MemoryAccess :: ALIGNED ) ) ;
2324
+ ops. push ( Operand :: LiteralBit32 ( src_align. bytes ( ) as _ ) ) ;
2325
+ } else {
2326
+ let align = dst_align. min ( src_align) ;
2327
+ ops. push ( Operand :: LiteralBit32 ( align. bytes ( ) as _ ) ) ;
2328
+ }
2329
+ } else {
2330
+ ops. push ( Operand :: LiteralBit32 ( dst_align. bytes ( ) as _ ) ) ;
2331
+ }
2332
+
2305
2333
if let Some ( ( dst, src) ) = typed_copy_dst_src {
2306
2334
if let Some ( const_value) = src. const_fold_load ( self ) {
2307
2335
self . store ( const_value, dst, Align :: from_bytes ( 0 ) . unwrap ( ) ) ;
2308
2336
} else {
2309
2337
self . emit ( )
2310
- . copy_memory ( dst. def ( self ) , src. def ( self ) , None , None , empty ( ) )
2338
+ . copy_memory ( dst. def ( self ) , src. def ( self ) , None , None , ops )
2311
2339
. unwrap ( ) ;
2312
2340
}
2313
2341
} else {
@@ -2318,7 +2346,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
2318
2346
size. def ( self ) ,
2319
2347
None ,
2320
2348
None ,
2321
- empty ( ) ,
2349
+ ops ,
2322
2350
)
2323
2351
. unwrap ( ) ;
2324
2352
self . zombie ( dst. def ( self ) , "cannot memcpy dynamically sized data" ) ;
0 commit comments