Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 19 additions & 19 deletions arize/pandas/embeddings/base_generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,7 @@ class BaseEmbeddingGenerator(ABC):
def __init__(self, use_case: Enum, model_name: str, batch_size: int = 100, **kwargs):
self.__use_case = self._parse_use_case(use_case=use_case)
self.__model_name = model_name
self.__device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self.device == torch.device("cpu"):
logger.warning(
"No available GPU has been detected. The use of GPU acceleration is "
"strongly recommended. You can check for GPU availability by running "
"`torch.cuda.is_available()`"
)
self.__device = self.select_device()
self.__batch_size = batch_size
logger.info(f"Downloading pre-trained model '{self.model_name}'")
try:
Expand All @@ -48,6 +42,20 @@ def __init__(self, use_case: Enum, model_name: str, batch_size: int = 100, **kwa
except Exception as e:
raise e

def select_device(self) -> torch.device:
if torch.cuda.is_available():
device = torch.device("cuda")
elif torch.backends.mps.is_available():
device = torch.device("mps")
else:
device = torch.device("cpu")
logger.warning(
"No available GPU has been detected. The use of GPU acceleration is "
"strongly recommended. You can check for GPU availability by running "
"`torch.cuda.is_available()` or `torch.backends.mps.is_available()`."
)
return device

@property
def use_case(self) -> str:
return self.__use_case
Expand Down Expand Up @@ -85,9 +93,7 @@ def _parse_use_case(use_case: Enum) -> str:
uc_task = use_case.name
return f"{uc_area}.{uc_task}"

def _get_embedding_vector(
self, batch: Dict[str, torch.Tensor], method
) -> Dict[str, torch.Tensor]:
def _get_embedding_vector(self, batch: Dict[str, torch.Tensor], method) -> Dict[str, torch.Tensor]:
with torch.no_grad():
outputs = self.model(**batch)
# (batch_size, seq_length/or/num_tokens, hidden_size)
Expand Down Expand Up @@ -126,16 +132,12 @@ def __repr__(self) -> str:
)

def __init__(self, use_case: Enum, model_name: str, tokenizer_max_length: int = 512, **kwargs):
super(NLPEmbeddingGenerator, self).__init__(
use_case=use_case, model_name=model_name, **kwargs
)
super(NLPEmbeddingGenerator, self).__init__(use_case=use_case, model_name=model_name, **kwargs)
self.__tokenizer_max_length = tokenizer_max_length
# We don't check for the tokenizer's existence since it is coupled with the corresponding model
# We check the model's existence in `BaseEmbeddingGenerator`
logger.info(f"Downloading tokenizer for '{self.model_name}'")
self.__tokenizer = AutoTokenizer.from_pretrained(
self.model_name, model_max_length=self.tokenizer_max_length
)
self.__tokenizer = AutoTokenizer.from_pretrained(self.model_name, model_max_length=self.tokenizer_max_length)

@property
def tokenizer(self):
Expand Down Expand Up @@ -168,9 +170,7 @@ def __repr__(self) -> str:
)

def __init__(self, use_case: Enum, model_name: str, **kwargs):
super(CVEmbeddingGenerator, self).__init__(
use_case=use_case, model_name=model_name, **kwargs
)
super(CVEmbeddingGenerator, self).__init__(use_case=use_case, model_name=model_name, **kwargs)
logger.info("Downloading image processor")
# We don't check for the image processor's existence since it is coupled with the corresponding model
# We check the model's existence in `BaseEmbeddingGenerator`
Expand Down