wenjin_lee
commited on
Commit
·
cd705e3
1
Parent(s):
d9aaf5e
Remove logging statements
Browse files- modeling_zeranker.py +0 -7
modeling_zeranker.py
CHANGED
|
@@ -20,11 +20,6 @@ from transformers.models.qwen3.modeling_qwen3 import Qwen3ForCausalLM
|
|
| 20 |
from transformers.tokenization_utils_base import BatchEncoding
|
| 21 |
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
|
| 22 |
|
| 23 |
-
import logging
|
| 24 |
-
|
| 25 |
-
logger = logging.getLogger(__name__)
|
| 26 |
-
print("Running code of HF Model")
|
| 27 |
-
|
| 28 |
# pyright: reportUnknownMemberType=false
|
| 29 |
# pyright: reportUnknownVariableType=false
|
| 30 |
|
|
@@ -131,9 +126,7 @@ def predict(
|
|
| 131 |
query_documents = [[sentence[0], sentence[1]] for sentence in sentences]
|
| 132 |
|
| 133 |
if not hasattr(self, "inner_model"):
|
| 134 |
-
logger.info(f"Memory reserved [Within Model File] Before Loading Model: {torch.cuda.memory_reserved()}")
|
| 135 |
self.inner_tokenizer, self.inner_model = load_model(global_device)
|
| 136 |
-
logger.info(f"Memory reserved [Within Model File] After Loading Model: {torch.cuda.memory_reserved()}")
|
| 137 |
self.inner_model.eval()
|
| 138 |
self.inner_model.gradient_checkpointing_disable()
|
| 139 |
self.inner_yes_token_id = self.inner_tokenizer.encode(
|
|
|
|
| 20 |
from transformers.tokenization_utils_base import BatchEncoding
|
| 21 |
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
# pyright: reportUnknownMemberType=false
|
| 24 |
# pyright: reportUnknownVariableType=false
|
| 25 |
|
|
|
|
| 126 |
query_documents = [[sentence[0], sentence[1]] for sentence in sentences]
|
| 127 |
|
| 128 |
if not hasattr(self, "inner_model"):
|
|
|
|
| 129 |
self.inner_tokenizer, self.inner_model = load_model(global_device)
|
|
|
|
| 130 |
self.inner_model.eval()
|
| 131 |
self.inner_model.gradient_checkpointing_disable()
|
| 132 |
self.inner_yes_token_id = self.inner_tokenizer.encode(
|