@@ -36,7 +36,7 @@ def inference(self, g, device, batch_size, num_workers, buffer_device=None):
36
36
# example is that the intermediate results can also benefit from prefetching.
37
37
g .ndata ['h' ] = g .ndata ['feat' ]
38
38
sampler = dgl .dataloading .MultiLayerFullNeighborSampler (1 , prefetch_node_feats = ['h' ])
39
- dataloader = dgl .dataloading .NodeDataLoader (
39
+ dataloader = dgl .dataloading .DataLoader (
40
40
g , torch .arange (g .num_nodes ()).to (g .device ), sampler , device = device ,
41
41
batch_size = 1000 , shuffle = False , drop_last = False , num_workers = num_workers ,
42
42
persistent_workers = (num_workers > 0 ))
@@ -77,7 +77,7 @@ def train(rank, world_size, graph, num_classes, split_idx):
77
77
graph , train_idx , sampler ,
78
78
device = 'cuda' , batch_size = 1000 , shuffle = True , drop_last = False ,
79
79
num_workers = 0 , use_ddp = True , use_uva = True )
80
- valid_dataloader = dgl .dataloading .NodeDataLoader (
80
+ valid_dataloader = dgl .dataloading .DataLoader (
81
81
graph , valid_idx , sampler , device = 'cuda' , batch_size = 1024 , shuffle = True ,
82
82
drop_last = False , num_workers = 0 , use_uva = True )
83
83
0 commit comments