2023-02-09 11:26:50 +00:00
|
|
|
import enum
|
2023-03-22 04:45:42 +08:00
|
|
|
import random
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
|
|
|
|
|
|
|
from cacheflow.parallel_utils.parallel_state import model_parallel_is_initialized
|
|
|
|
from cacheflow.parallel_utils.tensor_parallel import model_parallel_cuda_manual_seed
|
2023-02-09 11:26:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Device(enum.Enum):
|
|
|
|
GPU = enum.auto()
|
|
|
|
CPU = enum.auto()
|
|
|
|
|
|
|
|
|
|
|
|
class Counter:
|
|
|
|
|
|
|
|
def __init__(self, start: int = 0) -> None:
|
|
|
|
self.counter = start
|
|
|
|
|
2023-02-14 01:19:27 +00:00
|
|
|
def __next__(self) -> int:
|
2023-02-09 11:26:50 +00:00
|
|
|
id = self.counter
|
|
|
|
self.counter += 1
|
|
|
|
return id
|
|
|
|
|
|
|
|
def reset(self) -> None:
|
|
|
|
self.counter = 0
|
2023-03-22 04:45:42 +08:00
|
|
|
|
|
|
|
def set_random_seed(seed: int):
|
|
|
|
random.seed(seed)
|
|
|
|
np.random.seed(seed)
|
|
|
|
torch.manual_seed(seed)
|
|
|
|
if torch.cuda.is_available():
|
|
|
|
torch.cuda.manual_seed_all(seed)
|
|
|
|
|
|
|
|
if model_parallel_is_initialized():
|
|
|
|
model_parallel_cuda_manual_seed(seed)
|