@@ -14,6 +14,7 @@ from llama_recipes.inference.chat_utils import read_dialogs_from_file, format_to
from llama_recipes.inference.model_utils import load_model, load_peft_model
from llama_recipes.inference.safety_utils import get_safety_checker
+from accelerate.utils import is_xpu_available
def main(
model_name,