tech:slurm
Differences
This shows you the differences between two versions of the page.
Both sides previous revisionPrevious revisionNext revision | Previous revisionNext revisionBoth sides next revision | ||
tech:slurm [2020/05/27 10:57] – kohofer | tech:slurm [2020/06/30 17:23] – [Create modules file] kohofer | ||
---|---|---|---|
Line 391: | Line 391: | ||
==== Create modules file ==== | ==== Create modules file ==== | ||
+ | **PYTHON** | ||
cd / | cd / | ||
Line 407: | Line 408: | ||
</ | </ | ||
- | | ||
+ | **CUDA** | ||
+ | vi / | ||
+ | |||
+ | < | ||
+ | #%Module1.0 | ||
+ | proc ModulesHelp { } { | ||
+ | global dotversion | ||
+ | |||
+ | puts stderr " | ||
+ | } | ||
+ | |||
+ | module-whatis " | ||
+ | |||
+ | set | ||
+ | |||
+ | setenv | ||
+ | prepend-path | ||
+ | prepend-path | ||
+ | </ | ||
===== GCC ===== | ===== GCC ===== | ||
Line 533: | Line 552: | ||
</ | </ | ||
- | ===== Example | + | ===== Examples |
+ | |||
+ | ==== Example mnist ==== | ||
An simple example to use nvidia GPU! | An simple example to use nvidia GPU! | ||
+ | The example consists of the following files: | ||
+ | |||
+ | * README.md | ||
+ | * requirements.txt | ||
+ | * main.job | ||
+ | * main.py | ||
+ | |||
+ | Create a folder mnist and place the 4 files in there. | ||
+ | |||
+ | mkdir mnist | ||
+ | |||
+ | cat README.md | ||
+ | |||
+ | < | ||
+ | # Basic MNIST Example | ||
+ | |||
+ | ```bash | ||
+ | pip install -r requirements.txt | ||
+ | python main.py | ||
+ | # CUDA_VISIBLE_DEVICES=2 python main.py | ||
+ | ``` | ||
+ | </ | ||
+ | |||
+ | |||
+ | cat requirements.txt | ||
+ | < | ||
+ | torch | ||
+ | torchvision | ||
+ | </ | ||
+ | |||
+ | |||
+ | cat main.job | ||
< | < | ||
#!/bin/bash | #!/bin/bash | ||
Line 554: | Line 607: | ||
#SBATCH --mail-type=ALL | #SBATCH --mail-type=ALL | ||
#SBATCH --mail-user=< | #SBATCH --mail-user=< | ||
+ | |||
+ | ml load miniconda3 | ||
+ | python3 main.py | ||
</ | </ | ||
+ | Remove < | ||
+ | {(xssnipper>, | ||
+ | from __future__ import print_function | ||
+ | import argparse | ||
+ | import torch | ||
+ | import torch.nn as nn | ||
+ | import torch.nn.functional as F | ||
+ | import torch.optim as optim | ||
+ | from torchvision import datasets, transforms | ||
+ | from torch.optim.lr_scheduler import StepLR | ||
- | ml load miniconda3 | ||
- | python3 main.py | + | class Net(nn.Module): |
+ | def __init__(self): | ||
+ | super(Net, self).__init__() | ||
+ | self.conv1 = nn.Conv2d(1, | ||
+ | self.conv2 = nn.Conv2d(32, | ||
+ | self.dropout1 = nn.Dropout2d(0.25) | ||
+ | self.dropout2 = nn.Dropout2d(0.5) | ||
+ | self.fc1 = nn.Linear(9216, | ||
+ | self.fc2 = nn.Linear(128, | ||
+ | def forward(self, | ||
+ | x = self.conv1(x) | ||
+ | x = F.relu(x) | ||
+ | x = self.conv2(x) | ||
+ | x = F.max_pool2d(x, | ||
+ | x = self.dropout1(x) | ||
+ | x = torch.flatten(x, | ||
+ | x = self.fc1(x) | ||
+ | x = F.relu(x) | ||
+ | x = self.dropout2(x) | ||
+ | x = self.fc2(x) | ||
+ | output = F.log_softmax(x, | ||
+ | return output | ||
+ | |||
+ | def train(args, model, device, train_loader, | ||
+ | model.train() | ||
+ | for batch_idx, (data, target) in enumerate(train_loader): | ||
+ | data, target = data.to(device), | ||
+ | optimizer.zero_grad() | ||
+ | output = model(data) | ||
+ | loss = F.nll_loss(output, | ||
+ | loss.backward() | ||
+ | optimizer.step() | ||
+ | if batch_idx % args.log_interval == 0: | ||
+ | print(' | ||
+ | epoch, batch_idx * len(data), len(train_loader.dataset), | ||
+ | 100. * batch_idx / len(train_loader), | ||
+ | |||
+ | |||
+ | def test(args, model, device, test_loader): | ||
+ | model.eval() | ||
+ | test_loss = 0 | ||
+ | correct = 0 | ||
+ | with torch.no_grad(): | ||
+ | for data, target in test_loader: | ||
+ | data, target = data.to(device), | ||
+ | output = model(data) | ||
+ | test_loss += F.nll_loss(output, | ||
+ | pred = output.argmax(dim=1, | ||
+ | correct += pred.eq(target.view_as(pred)).sum().item() | ||
+ | |||
+ | test_loss /= len(test_loader.dataset) | ||
+ | |||
+ | print(' | ||
+ | test_loss, correct, len(test_loader.dataset), | ||
+ | 100. * correct / len(test_loader.dataset))) | ||
+ | |||
+ | |||
+ | def main(): | ||
+ | # Training settings | ||
+ | parser = argparse.ArgumentParser(description=' | ||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | |||
+ | parser.add_argument(' | ||
+ | help=' | ||
+ | args = parser.parse_args() | ||
+ | use_cuda = not args.no_cuda and torch.cuda.is_available() | ||
+ | |||
+ | torch.manual_seed(args.seed) | ||
+ | |||
+ | device = torch.device(" | ||
+ | |||
+ | kwargs = {' | ||
+ | train_loader = torch.utils.data.DataLoader( | ||
+ | datasets.MNIST(' | ||
+ | | ||
+ | | ||
+ | | ||
+ | ])), | ||
+ | batch_size=args.batch_size, | ||
+ | test_loader = torch.utils.data.DataLoader( | ||
+ | datasets.MNIST(' | ||
+ | | ||
+ | | ||
+ | ])), | ||
+ | batch_size=args.test_batch_size, | ||
+ | |||
+ | model = Net().to(device) | ||
+ | optimizer = optim.Adadelta(model.parameters(), | ||
+ | |||
+ | scheduler = StepLR(optimizer, | ||
+ | for epoch in range(1, args.epochs + 1): | ||
+ | train(args, model, device, train_loader, | ||
+ | test(args, model, device, test_loader) | ||
+ | scheduler.step() | ||
+ | |||
+ | if args.save_model: | ||
+ | torch.save(model.state_dict(), | ||
+ | |||
+ | |||
+ | if __name__ == ' | ||
+ | main() | ||
+ | |||
+ | )} | ||
+ | |||
+ | Once you have all files launch this command on slurm-ctrl: | ||
+ | |||
+ | sbatch main.job | ||
+ | |||
+ | Check your job with | ||
+ | |||
+ | squeue | ||
+ | |||
+ | |||
+ | ---- | ||
+ | |||
+ | |||
+ | ===== CUDA NVIDIA TESLA Infos ===== | ||
+ | |||
+ | === nvidia-smi === | ||
+ | |||
+ | |||
+ | root@gpu02: | ||
+ | |||
+ | < | ||
+ | Every 2.0s: nvidia-smi | ||
+ | |||
+ | Mon Jun 22 17:49:14 2020 | ||
+ | +-----------------------------------------------------------------------------+ | ||
+ | | NVIDIA-SMI 440.64.00 | ||
+ | |-------------------------------+----------------------+----------------------+ | ||
+ | | GPU Name Persistence-M| Bus-Id | ||
+ | | Fan Temp Perf Pwr: | ||
+ | |===============================+======================+======================| | ||
+ | | | ||
+ | | N/A | ||
+ | +-------------------------------+----------------------+----------------------+ | ||
+ | | | ||
+ | | N/A | ||
+ | +-------------------------------+----------------------+----------------------+ | ||
+ | |||
+ | +-----------------------------------------------------------------------------+ | ||
+ | | Processes: | ||
+ | | GPU | ||
+ | |=============================================================================| | ||
+ | | 0 8627 C / | ||
+ | +-----------------------------------------------------------------------------+ | ||
+ | |||
+ | </ | ||
+ | |||
+ | === deviceQuery === | ||
+ | |||
+ | |||
+ | To run the deviceQuery it is necessary to make it first! | ||
+ | |||
+ | root@gpu03: | ||
+ | make | ||
+ | |||
+ | Add PATH to the system wide environment | ||
+ | |||
+ | vi / | ||
+ | |||
+ | Add this to the end | ||
+ | |||
+ | / | ||
+ | |||
+ | Next enable/ | ||
+ | |||
+ | source / | ||
+ | |||
+ | < | ||
+ | root@gpu03: | ||
+ | deviceQuery Starting... | ||
+ | |||
+ | CUDA Device Query (Runtime API) version (CUDART static linking) | ||
+ | |||
+ | Detected 2 CUDA Capable device(s) | ||
+ | |||
+ | Device 0: "Tesla V100-PCIE-32GB" | ||
+ | CUDA Driver Version / Runtime Version | ||
+ | CUDA Capability Major/Minor version number: | ||
+ | Total amount of global memory: | ||
+ | (80) Multiprocessors, | ||
+ | GPU Max Clock rate: 1380 MHz (1.38 GHz) | ||
+ | Memory Clock rate: 877 Mhz | ||
+ | Memory Bus Width: | ||
+ | L2 Cache Size: | ||
+ | Maximum Texture Dimension Size (x, | ||
+ | Maximum Layered 1D Texture Size, (num) layers | ||
+ | Maximum Layered 2D Texture Size, (num) layers | ||
+ | Total amount of constant memory: | ||
+ | Total amount of shared memory per block: | ||
+ | Total number of registers available per block: 65536 | ||
+ | Warp size: 32 | ||
+ | Maximum number of threads per multiprocessor: | ||
+ | Maximum number of threads per block: | ||
+ | Max dimension size of a thread block (x,y,z): (1024, 1024, 64) | ||
+ | Max dimension size of a grid size (x,y,z): (2147483647, | ||
+ | Maximum memory pitch: | ||
+ | Texture alignment: | ||
+ | Concurrent copy and kernel execution: | ||
+ | Run time limit on kernels: | ||
+ | Integrated GPU sharing Host Memory: | ||
+ | Support host page-locked memory mapping: | ||
+ | Alignment requirement for Surfaces: | ||
+ | Device has ECC support: | ||
+ | Device supports Unified Addressing (UVA): | ||
+ | Device supports Compute Preemption: | ||
+ | Supports Cooperative Kernel Launch: | ||
+ | Supports MultiDevice Co-op Kernel Launch: | ||
+ | Device PCI Domain ID / Bus ID / location ID: 0 / 59 / 0 | ||
+ | Compute Mode: | ||
+ | < Default (multiple host threads can use :: | ||
+ | |||
+ | Device 1: "Tesla V100-PCIE-32GB" | ||
+ | CUDA Driver Version / Runtime Version | ||
+ | CUDA Capability Major/Minor version number: | ||
+ | Total amount of global memory: | ||
+ | (80) Multiprocessors, | ||
+ | GPU Max Clock rate: 1380 MHz (1.38 GHz) | ||
+ | Memory Clock rate: 877 Mhz | ||
+ | Memory Bus Width: | ||
+ | L2 Cache Size: | ||
+ | Maximum Texture Dimension Size (x, | ||
+ | Maximum Layered 1D Texture Size, (num) layers | ||
+ | Maximum Layered 2D Texture Size, (num) layers | ||
+ | Total amount of constant memory: | ||
+ | Total amount of shared memory per block: | ||
+ | Total number of registers available per block: 65536 | ||
+ | Warp size: 32 | ||
+ | Maximum number of threads per multiprocessor: | ||
+ | Maximum number of threads per block: | ||
+ | Max dimension size of a thread block (x,y,z): (1024, 1024, 64) | ||
+ | Max dimension size of a grid size (x,y,z): (2147483647, | ||
+ | Maximum memory pitch: | ||
+ | Texture alignment: | ||
+ | Concurrent copy and kernel execution: | ||
+ | Run time limit on kernels: | ||
+ | Integrated GPU sharing Host Memory: | ||
+ | Support host page-locked memory mapping: | ||
+ | Alignment requirement for Surfaces: | ||
+ | Device has ECC support: | ||
+ | Device supports Unified Addressing (UVA): | ||
+ | Device supports Compute Preemption: | ||
+ | Supports Cooperative Kernel Launch: | ||
+ | Supports MultiDevice Co-op Kernel Launch: | ||
+ | Device PCI Domain ID / Bus ID / location ID: 0 / 175 / 0 | ||
+ | Compute Mode: | ||
+ | < Default (multiple host threads can use :: | ||
+ | > Peer access from Tesla V100-PCIE-32GB (GPU0) -> Tesla V100-PCIE-32GB (GPU1) : Yes | ||
+ | > Peer access from Tesla V100-PCIE-32GB (GPU1) -> Tesla V100-PCIE-32GB (GPU0) : Yes | ||
+ | |||
+ | deviceQuery, | ||
+ | Result = PASS | ||
+ | </ | ||
===== Links ===== | ===== Links ===== |
/data/www/wiki.inf.unibz.it/data/pages/tech/slurm.txt · Last modified: 2022/11/24 16:17 by kohofer