@@ -1067,30 +1067,32 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
1067
1067
struct net_device * netdev )
1068
1068
{
1069
1069
unsigned int fn = mlx5_get_dev_index (dev );
1070
+ unsigned long flags ;
1070
1071
1071
1072
if (fn >= ldev -> ports )
1072
1073
return ;
1073
1074
1074
- spin_lock (& lag_lock );
1075
+ spin_lock_irqsave (& lag_lock , flags );
1075
1076
ldev -> pf [fn ].netdev = netdev ;
1076
1077
ldev -> tracker .netdev_state [fn ].link_up = 0 ;
1077
1078
ldev -> tracker .netdev_state [fn ].tx_enabled = 0 ;
1078
- spin_unlock (& lag_lock );
1079
+ spin_unlock_irqrestore (& lag_lock , flags );
1079
1080
}
1080
1081
1081
1082
static void mlx5_ldev_remove_netdev (struct mlx5_lag * ldev ,
1082
1083
struct net_device * netdev )
1083
1084
{
1085
+ unsigned long flags ;
1084
1086
int i ;
1085
1087
1086
- spin_lock (& lag_lock );
1088
+ spin_lock_irqsave (& lag_lock , flags );
1087
1089
for (i = 0 ; i < ldev -> ports ; i ++ ) {
1088
1090
if (ldev -> pf [i ].netdev == netdev ) {
1089
1091
ldev -> pf [i ].netdev = NULL ;
1090
1092
break ;
1091
1093
}
1092
1094
}
1093
- spin_unlock (& lag_lock );
1095
+ spin_unlock_irqrestore (& lag_lock , flags );
1094
1096
}
1095
1097
1096
1098
static void mlx5_ldev_add_mdev (struct mlx5_lag * ldev ,
@@ -1246,12 +1248,13 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
1246
1248
bool mlx5_lag_is_roce (struct mlx5_core_dev * dev )
1247
1249
{
1248
1250
struct mlx5_lag * ldev ;
1251
+ unsigned long flags ;
1249
1252
bool res ;
1250
1253
1251
- spin_lock (& lag_lock );
1254
+ spin_lock_irqsave (& lag_lock , flags );
1252
1255
ldev = mlx5_lag_dev (dev );
1253
1256
res = ldev && __mlx5_lag_is_roce (ldev );
1254
- spin_unlock (& lag_lock );
1257
+ spin_unlock_irqrestore (& lag_lock , flags );
1255
1258
1256
1259
return res ;
1257
1260
}
@@ -1260,12 +1263,13 @@ EXPORT_SYMBOL(mlx5_lag_is_roce);
1260
1263
bool mlx5_lag_is_active (struct mlx5_core_dev * dev )
1261
1264
{
1262
1265
struct mlx5_lag * ldev ;
1266
+ unsigned long flags ;
1263
1267
bool res ;
1264
1268
1265
- spin_lock (& lag_lock );
1269
+ spin_lock_irqsave (& lag_lock , flags );
1266
1270
ldev = mlx5_lag_dev (dev );
1267
1271
res = ldev && __mlx5_lag_is_active (ldev );
1268
- spin_unlock (& lag_lock );
1272
+ spin_unlock_irqrestore (& lag_lock , flags );
1269
1273
1270
1274
return res ;
1271
1275
}
@@ -1274,13 +1278,14 @@ EXPORT_SYMBOL(mlx5_lag_is_active);
1274
1278
bool mlx5_lag_is_master (struct mlx5_core_dev * dev )
1275
1279
{
1276
1280
struct mlx5_lag * ldev ;
1281
+ unsigned long flags ;
1277
1282
bool res ;
1278
1283
1279
- spin_lock (& lag_lock );
1284
+ spin_lock_irqsave (& lag_lock , flags );
1280
1285
ldev = mlx5_lag_dev (dev );
1281
1286
res = ldev && __mlx5_lag_is_active (ldev ) &&
1282
1287
dev == ldev -> pf [MLX5_LAG_P1 ].dev ;
1283
- spin_unlock (& lag_lock );
1288
+ spin_unlock_irqrestore (& lag_lock , flags );
1284
1289
1285
1290
return res ;
1286
1291
}
@@ -1289,12 +1294,13 @@ EXPORT_SYMBOL(mlx5_lag_is_master);
1289
1294
bool mlx5_lag_is_sriov (struct mlx5_core_dev * dev )
1290
1295
{
1291
1296
struct mlx5_lag * ldev ;
1297
+ unsigned long flags ;
1292
1298
bool res ;
1293
1299
1294
- spin_lock (& lag_lock );
1300
+ spin_lock_irqsave (& lag_lock , flags );
1295
1301
ldev = mlx5_lag_dev (dev );
1296
1302
res = ldev && __mlx5_lag_is_sriov (ldev );
1297
- spin_unlock (& lag_lock );
1303
+ spin_unlock_irqrestore (& lag_lock , flags );
1298
1304
1299
1305
return res ;
1300
1306
}
@@ -1303,13 +1309,14 @@ EXPORT_SYMBOL(mlx5_lag_is_sriov);
1303
1309
bool mlx5_lag_is_shared_fdb (struct mlx5_core_dev * dev )
1304
1310
{
1305
1311
struct mlx5_lag * ldev ;
1312
+ unsigned long flags ;
1306
1313
bool res ;
1307
1314
1308
- spin_lock (& lag_lock );
1315
+ spin_lock_irqsave (& lag_lock , flags );
1309
1316
ldev = mlx5_lag_dev (dev );
1310
1317
res = ldev && __mlx5_lag_is_sriov (ldev ) &&
1311
1318
test_bit (MLX5_LAG_MODE_FLAG_SHARED_FDB , & ldev -> mode_flags );
1312
- spin_unlock (& lag_lock );
1319
+ spin_unlock_irqrestore (& lag_lock , flags );
1313
1320
1314
1321
return res ;
1315
1322
}
@@ -1352,9 +1359,10 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
1352
1359
{
1353
1360
struct net_device * ndev = NULL ;
1354
1361
struct mlx5_lag * ldev ;
1362
+ unsigned long flags ;
1355
1363
int i ;
1356
1364
1357
- spin_lock (& lag_lock );
1365
+ spin_lock_irqsave (& lag_lock , flags );
1358
1366
ldev = mlx5_lag_dev (dev );
1359
1367
1360
1368
if (!(ldev && __mlx5_lag_is_roce (ldev )))
@@ -1373,7 +1381,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
1373
1381
dev_hold (ndev );
1374
1382
1375
1383
unlock :
1376
- spin_unlock (& lag_lock );
1384
+ spin_unlock_irqrestore (& lag_lock , flags );
1377
1385
1378
1386
return ndev ;
1379
1387
}
@@ -1383,10 +1391,11 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1383
1391
struct net_device * slave )
1384
1392
{
1385
1393
struct mlx5_lag * ldev ;
1394
+ unsigned long flags ;
1386
1395
u8 port = 0 ;
1387
1396
int i ;
1388
1397
1389
- spin_lock (& lag_lock );
1398
+ spin_lock_irqsave (& lag_lock , flags );
1390
1399
ldev = mlx5_lag_dev (dev );
1391
1400
if (!(ldev && __mlx5_lag_is_roce (ldev )))
1392
1401
goto unlock ;
@@ -1401,7 +1410,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1401
1410
port = ldev -> v2p_map [port * ldev -> buckets ];
1402
1411
1403
1412
unlock :
1404
- spin_unlock (& lag_lock );
1413
+ spin_unlock_irqrestore (& lag_lock , flags );
1405
1414
return port ;
1406
1415
}
1407
1416
EXPORT_SYMBOL (mlx5_lag_get_slave_port );
@@ -1422,8 +1431,9 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
1422
1431
{
1423
1432
struct mlx5_core_dev * peer_dev = NULL ;
1424
1433
struct mlx5_lag * ldev ;
1434
+ unsigned long flags ;
1425
1435
1426
- spin_lock (& lag_lock );
1436
+ spin_lock_irqsave (& lag_lock , flags );
1427
1437
ldev = mlx5_lag_dev (dev );
1428
1438
if (!ldev )
1429
1439
goto unlock ;
@@ -1433,7 +1443,7 @@ struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
1433
1443
ldev -> pf [MLX5_LAG_P1 ].dev ;
1434
1444
1435
1445
unlock :
1436
- spin_unlock (& lag_lock );
1446
+ spin_unlock_irqrestore (& lag_lock , flags );
1437
1447
return peer_dev ;
1438
1448
}
1439
1449
EXPORT_SYMBOL (mlx5_lag_get_peer_mdev );
@@ -1446,6 +1456,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1446
1456
int outlen = MLX5_ST_SZ_BYTES (query_cong_statistics_out );
1447
1457
struct mlx5_core_dev * * mdev ;
1448
1458
struct mlx5_lag * ldev ;
1459
+ unsigned long flags ;
1449
1460
int num_ports ;
1450
1461
int ret , i , j ;
1451
1462
void * out ;
@@ -1462,7 +1473,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1462
1473
1463
1474
memset (values , 0 , sizeof (* values ) * num_counters );
1464
1475
1465
- spin_lock (& lag_lock );
1476
+ spin_lock_irqsave (& lag_lock , flags );
1466
1477
ldev = mlx5_lag_dev (dev );
1467
1478
if (ldev && __mlx5_lag_is_active (ldev )) {
1468
1479
num_ports = ldev -> ports ;
@@ -1472,7 +1483,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1472
1483
num_ports = 1 ;
1473
1484
mdev [MLX5_LAG_P1 ] = dev ;
1474
1485
}
1475
- spin_unlock (& lag_lock );
1486
+ spin_unlock_irqrestore (& lag_lock , flags );
1476
1487
1477
1488
for (i = 0 ; i < num_ports ; ++ i ) {
1478
1489
u32 in [MLX5_ST_SZ_DW (query_cong_statistics_in )] = {};
0 commit comments