{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "# Import des librairies\n", "from PIL import Image\n", "import cv2\n", "import os\n", "import logging\n", "from datetime import datetime" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# Import de Pytorch : https://pytorch.org/hub/ultralytics_yolov5/\n", "import torch" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "# Configuration de la journalisation pour suivre le processus et les erreurs\n", "logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using cache found in /home/heuzef/.var/app/com.vscodium.codium/cache/torch/hub/ultralytics_yolov5_master\n", "YOLOv5 🚀 2024-7-4 Python-3.11.9 torch-2.3.1+cu121 CPU\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients, 16.4 GFLOPs\n", "Adding AutoShape... \n" ] } ], "source": [ "# Chargement du modèle YOLOv5 pré-entraîné depuis le hub de modèles de PyTorch\n", "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "def process_images(base_path, html_path, out_path):\n", " # Je crée une liste pour stocker les entrées de données pour le fichier HTML\n", " html_data = []\n", " \n", " # Je parcours chaque dossier représentant une espèce dans le répertoire de base\n", " for species_folder in os.listdir(base_path):\n", " species_path = os.path.join(base_path, species_folder)\n", " dest_path = os.path.join(out_path, species_folder)\n", " if os.path.isdir(species_path):\n", " for image_file in os.listdir(species_path):\n", " try:\n", " image_path = os.path.join(species_path, image_file)\n", " # Je charge l'image à partir du disque\n", " image = Image.open(image_path)\n", " # Je convertis l'image en format attendu par le modèle\n", " results = model(image)\n", " # Je récupère les boîtes englobantes des prédictions du modèle\n", " boxes = results.xyxy[0] # Coordonnées des boîtes sous forme de Tensor\n", " if len(boxes) > 0:\n", " box = boxes[0]\n", " # Je convertis les coordonnées des boîtes en entiers pour le recadrage\n", " x1, y1, x2, y2 = int(box[0].item()), int(box[1].item()), int(box[2].item()), int(box[3].item())\n", " # Je recadre l'image selon la boîte englobante\n", " cropped_image = image.crop((x1, y1, x2, y2))\n", " # Je redimensionne l'image pour l'analyse standard en vision par ordinateur\n", " cropped_image = cropped_image.resize((224, 224))\n", " # Je construis le nouveau chemin de l'image avec des informations détaillées\n", " # new_image_path = f'{species_path}/{image_file[:-4]}_cropped_{x1}_{y1}_{x2}_{y2}.jpg'\n", " new_image_path = f'{dest_path}/{image_file[:-4]}.jpg'\n", " cropped_image.save(new_image_path)\n", " logging.info(f'Image processed and saved: {new_image_path}')\n", " # Je stocke les informations pour le fichier HTML\n", " html_data.append((species_folder, image_file, new_image_path, x1, y1, x2, y2))\n", " else:\n", " logging.warning(f'No bounding box found for image: {image_path}')\n", " except Exception as e:\n", " logging.error(f'Error processing image {image_path}: {e}')\n", " \n", " # Je génère un fichier HTML pour visualiser les résultats\n", " generate_html(html_data, html_path)\n", "\n", "def generate_html(data, file_path):\n", " # Je crée un fichier HTML pour afficher les images et leurs informations\n", " with open(file_path, 'w') as file:\n", " file.write('
Species | Original Image | Processed Image | Box Coordinates |
---|---|---|---|
{species} | ({x1}, {y1}, {x2}, {y2}) |