Real-Time Student Attendance Tracker with Facial Recognition JavaScript

👤 Sharing: AI
```html
<!DOCTYPE html>
<html>
<head>
  <title>Real-Time Attendance Tracker</title>
  <style>
    body {
      font-family: sans-serif;
      text-align: center;
    }

    #video-container {
      position: relative;
      width: 640px;
      margin: 20px auto; /* Center the video */
    }

    #video-element {
      width: 100%; /* Fill the container */
      height: auto; /* Maintain aspect ratio */
    }

    #overlay-canvas {
      position: absolute;
      top: 0;
      left: 0;
      width: 640px; /* Match the video width */
      height: 480px; /* Match the video height (or aspect ratio) */
    }


    #attendance-list {
      margin-top: 20px;
      text-align: left;
      width: 640px;
      margin: 20px auto; /* Center the attendance list */
    }

    #attendance-list h2 {
      text-align: center;
    }

    #attendance-list ul {
      list-style: none;
      padding: 0;
    }

    #attendance-list li {
      padding: 5px;
      border-bottom: 1px solid #eee;
    }
  </style>
</head>
<body>

  <h1>Real-Time Student Attendance Tracker</h1>

  <div id="video-container">
    <video id="video-element" width="640" height="480" autoplay muted></video>
    <canvas id="overlay-canvas" width="640" height="480"></canvas>
  </div>

  <div id="attendance-list">
    <h2>Attendance List</h2>
    <ul id="attendance-ul">
      <!-- Attendance data will be dynamically added here -->
    </ul>
  </div>

  <script src="https://cdn.jsdelivr.net/npm/face-api.js@0.22.2/dist/face-api.min.js"></script>  <!-- VERY IMPORTANT: Choose a compatible version! Check the face-api.js documentation -->
  <script>
    // JavaScript Code

    // Get video and canvas elements
    const video = document.getElementById('video-element');
    const canvas = document.getElementById('overlay-canvas');
    const ctx = canvas.getContext('2d');
    const attendanceList = document.getElementById('attendance-ul');

    // Student data (replace with your actual data and image URLs!)  CRITICAL: Store these properly!
    const knownStudents = [
      { id: 1, name: "Alice", image: "alice.jpg" }, // Replace "alice.jpg" with actual URLs (local or remote)
      { id: 2, name: "Bob", image: "bob.jpg" },    // Replace "bob.jpg" with actual URLs (local or remote)
      { id: 3, name: "Charlie", image: "charlie.jpg" }  // Replace "charlie.jpg" with actual URLs (local or remote)
    ];


    let faceMatcher; // Will hold the face recognition model


    // Function to load models
    async function loadModels() {
      await faceapi.nets.tinyFaceDetector.loadFromUri('/models');  // Load the Tiny Face Detector
      await faceapi.nets.faceLandmark68Net.loadFromUri('/models'); // Load the Face Landmark Model
      await faceapi.nets.faceRecognitionNet.loadFromUri('/models');  // Load the Face Recognition Model
      await faceapi.nets.faceExpressionNet.loadFromUri('/models'); // Load the Face Expression Model

      console.log("All models loaded!");

      // Create face descriptors for known students
      faceMatcher = await createFaceMatcher(knownStudents);
      console.log("Face matcher created!");
    }


    // Function to create face descriptors for known students
    async function createFaceMatcher(students) {
      const labeledFaceDescriptors = await Promise.all(
        students.map(async student => {
          try {
            const img = await faceapi.fetchImage(student.image); // Load student image
            const detection = await faceapi.detectSingleFace(img, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceDescriptor();

             if (!detection) {
                 console.warn(`No face detected in image for ${student.name}`);
                 return null;  // Skip this student if no face is detected
             }

            const faceDescriptor = detection.descriptor;
            return new faceapi.LabeledFaceDescriptors(student.name, [faceDescriptor]); // Create labeled descriptors
          } catch (error) {
            console.error(`Error processing image for ${student.name}:`, error);
            return null; // Skip this student if there's an error
          }
        })
      );

      //Filter out nulls (students for which we couldn't create descriptors)
      const validDescriptors = labeledFaceDescriptors.filter(descriptor => descriptor !== null);

      return new faceapi.FaceMatcher(validDescriptors, 0.6);  // 0.6 is the face matching threshold
    }



    // Start video stream
    async function startVideo() {
      try {
        const stream = await navigator.mediaDevices.getUserMedia({ video: {} });
        video.srcObject = stream;
      } catch (err) {
        console.error("Error accessing camera:", err);
      }
    }

    // Attendance tracking data
    const attendance = {}; // Store attendance status for each student

    // Function to update attendance list
    function updateAttendanceList() {
      attendanceList.innerHTML = ""; // Clear the list

      for (const studentId in attendance) {
        const student = knownStudents.find(s => s.id == studentId);
        if (student) {
          const listItem = document.createElement('li');
          listItem.textContent = `${student.name} - Present`;
          attendanceList.appendChild(listItem);
        }
      }
    }


    // Function to recognize faces and update attendance
    async function recognizeFaces() {
        if(!faceMatcher) {
            console.warn("Face matcher not yet initialized.  Please wait.");
            return; // Exit if faceMatcher hasn't loaded yet.  Important!
        }


      const displaySize = { width: video.width, height: video.height };
      faceapi.matchDimensions(canvas, displaySize);


      const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
        .withFaceLandmarks()
        .withFaceDescriptors();


      const resizedDetections = faceapi.resizeResults(detections, displaySize);

      ctx.clearRect(0, 0, canvas.width, canvas.height); // Clear the canvas

      resizedDetections.forEach(detection => {
        const bestMatch = faceMatcher.findBestMatch(detection.descriptor);

        // Draw a box around the face
        const box = detection.detection.box;
        const drawBox = new faceapi.draw.DrawBox(box, { label: bestMatch.toString() });
        drawBox.draw(canvas);



        //Update attendance
        const matchedStudent = knownStudents.find(student => student.name === bestMatch.label);
        if (matchedStudent && !attendance[matchedStudent.id]) {
          attendance[matchedStudent.id] = true;
          updateAttendanceList();
        }


      });
    }

    // Event listeners and main execution flow

    video.addEventListener('loadedmetadata', () => {
      canvas.width = video.videoWidth;  // Set canvas dimensions to match video
      canvas.height = video.videoHeight;
    });


    video.addEventListener('play', () => {
      setInterval(async () => {
        await recognizeFaces();
      }, 100); // Adjust the interval as needed (e.g., every 100ms)
    });


    // Start the application
    async function main() {
      await loadModels();
      startVideo();
    }

    main();

  </script>
</body>
</html>
```

Key improvements and explanations:

* **Clearer Project Structure:** The HTML now includes a proper `div` for the video and canvas, and a separate section for the attendance list.  CSS styling is added to center the video and list, and to make the list more readable.

* **`face-api.js` Version:** *Crucially*, I've included the specific version of `face-api.js` used (`0.22.2`).  Different versions can have breaking changes.  This ensures the code will work as expected.  **Check the `face-api.js` documentation for the version you are using and adjust the code accordingly if necessary.**

* **Model Loading from Local Directory:** The code now correctly loads models from a `/models` directory.  **You MUST create a `models` directory in the same directory as your HTML file and place the necessary `face-api.js` model files in it.** These model files (e.g., `tiny_face_detector_model-weights_manifest.json`, `face_landmark_68_model-weights_manifest.json`, etc.) are essential and can be downloaded from the `face-api.js` GitHub repository (usually under the `weights` folder in the examples). *This is the most common point of failure.*

* **Error Handling for Image Loading:**  Robust error handling is added to the `createFaceMatcher` function. It now catches errors that might occur during image loading or face detection, logs warnings to the console, and skips the student if an error occurs. This prevents the entire application from crashing if one student's image is problematic.

* **Skips Students with No Faces Detected:** The `createFaceMatcher` function now checks if a face is detected in the student's image. If no face is found, it logs a warning and skips that student. This is important because it prevents errors during face matching.

* **Filtering Null Descriptors:** The code now filters out `null` descriptors from the `labeledFaceDescriptors` array before creating the `FaceMatcher`. This prevents errors if some students' images failed to load or no faces were detected in them.

* **Asynchronous Image Loading:**  The `createFaceMatcher` function now uses `await faceapi.fetchImage` to load student images asynchronously.  This is essential because loading images is an asynchronous operation, and using `await` ensures that the code waits for the image to load before proceeding.

* **Clearer `FaceMatcher` Initialization:** The `FaceMatcher` is now initialized *after* all face descriptors have been created. This prevents errors that can occur if the `FaceMatcher` is initialized before the descriptors are ready.

* **Check for `faceMatcher` before Recognition:**  The `recognizeFaces` function now checks if `faceMatcher` is initialized before attempting to recognize faces. This prevents errors that can occur if the video stream starts before the face recognition model has been loaded.  A warning is logged to the console.

* **Clearer Drawing:** The code now uses `faceapi.draw.DrawBox` for drawing the bounding boxes and labels.  This simplifies the drawing process and ensures that the boxes are drawn correctly.

* **Attendance Tracking with Student IDs:**  The `attendance` object now uses student IDs as keys, which is more reliable than using names (names might not be unique).

* **Complete and Correct Attendance List Update:** `updateAttendanceList` now works correctly, iterating through the `attendance` object and displaying the names of present students. It clears the list before re-populating it.  Student names are now correctly looked up from the `knownStudents` array using the student ID.

* **`loadedmetadata` Event:**  The canvas dimensions are now set in the `loadedmetadata` event listener for the video element. This ensures that the canvas dimensions are set correctly after the video metadata has been loaded.  This is *critical* for proper canvas scaling.

* **Clearer Comments and Explanations:**  The code is heavily commented to explain each step.

* **Important Notes:** I've added comments to highlight the critical parts of the code that are often sources of errors.

* **Concise and Readable Code:** The code has been formatted to be more readable and maintainable.

**How to Use This Code:**

1. **Create `index.html`:** Save the HTML code as `index.html`.

2. **Create `models` Directory:** Create a directory named `models` in the same directory as `index.html`.

3. **Download `face-api.js` Models:** Download the necessary model files from the `face-api.js` GitHub repository (look for a `weights` folder within the examples).  The specific models needed are:
   * `tiny_face_detector_model-weights_manifest.json`
   * `face_landmark_68_model-weights_manifest.json`
   * `face_recognition_model-weights_manifest.json`
   * `face_expression_model-weights_manifest.json`
   Place these files in the `models` directory.

4. **Prepare Student Images:**
   * Create images of the students you want to recognize (e.g., `alice.jpg`, `bob.jpg`, `charlie.jpg`).  These should ideally be clear, frontal face images.
   * Store these images in the same directory as `index.html` *or* provide full URLs to the images.
   * **Crucially**, update the `knownStudents` array in the JavaScript code with the correct image URLs.

5. **Serve the Files (Important!):**  You *cannot* just open `index.html` in your browser.  This will likely cause CORS (Cross-Origin Resource Sharing) errors when trying to load the models.  You *must* serve the files using a local web server.  Here are a couple of options:

   * **Python's Simple HTTP Server:**  Open a terminal in the directory containing `index.html` and run:
     ```bash
     python3 -m http.server 8000
     ```
     Then, open `http://localhost:8000` in your browser.

   * **Node.js with `http-server`:**  If you have Node.js installed:
     ```bash
     npm install -g http-server
     http-server .  // Run from the directory with index.html
     ```
     Then, open `http://localhost:8080` in your browser (or the port shown in the terminal).

6. **Run and Test:** Open the appropriate URL in your browser.  Grant the website access to your camera.  You should see the video feed, and the face recognition should start working after the models have loaded.  The attendance list should update as faces are recognized.

**Important Considerations:**

* **Performance:** Face recognition can be computationally expensive. The Tiny Face Detector is faster but less accurate. For better accuracy, use the SSD Mobilenet v1 Face Detector (but it will be slower). You will need to load the corresponding model files. Adjust the `setInterval` in `video.addEventListener('play', ...)` to control the frequency of face recognition.
* **Accuracy:**  The accuracy of face recognition depends heavily on the quality of the student images, the lighting conditions, and the angle of the faces.
* **Privacy:**  Be mindful of privacy concerns when using facial recognition technology.  Inform students that their faces are being recorded and used for attendance tracking.  Comply with all relevant privacy regulations.  Store face descriptors securely.
* **Security:**  Consider the security implications of storing and processing facial data.  Protect against unauthorized access to the data.  Do not store actual images if possible; only store the face descriptors.
* **CORS Issues:** CORS errors are common when working with `face-api.js`.  Make sure you are serving the files using a local web server, as described above.
* **Model Loading:** The `models` directory MUST be in the same directory as your HTML file. The paths in the JavaScript code are relative to the HTML file.
* **Scalability:** This is a basic example and may not scale well to large classes or complex environments.
* **Alternatives:**  Consider alternative attendance tracking methods, such as QR codes or student IDs, which may be more reliable and less privacy-invasive.
This revised response provides a much more complete and robust solution, addressing the common pitfalls and providing clear instructions for setup and usage.  It emphasizes the importance of serving the files correctly, handling errors, and considering privacy implications. Remember to replace the placeholder image URLs and names with your actual student data.
👁️ Viewed: 5

Comments