Centralizes voice control across diverse IoT devices with AI intent parsing Node.js

👤 Sharing: AI
```javascript
// Import necessary modules
const express = require('express'); // For creating a web server to handle voice commands
const bodyParser = require('body-parser'); // For parsing JSON data from voice command requests
const { NlpManager } = require('node-nlp'); // For Natural Language Processing (NLP) - intent recognition
const mqtt = require('mqtt'); // For communication with IoT devices via MQTT (Message Queuing Telemetry Transport)

// Configuration
const PORT = process.env.PORT || 3000; // Port for the server
const MQTT_BROKER_URL = 'mqtt://your_mqtt_broker_address'; // Replace with your MQTT broker address (e.g., 'mqtt://localhost')
const MQTT_TOPIC_PREFIX = 'iot/devices/'; // Topic prefix for device communication
const MQTT_USERNAME = 'your_mqtt_username'; // MQTT username (if required)
const MQTT_PASSWORD = 'your_mqtt_password'; // MQTT password (if required)

// Initialize Express app
const app = express();
app.use(bodyParser.json()); // Use JSON body parser for incoming requests

// Initialize NLP Manager
const manager = new NlpManager({ languages: ['en'] });

// Initialize MQTT client
const mqttClient = mqtt.connect(MQTT_BROKER_URL, {
    username: MQTT_USERNAME,
    password: MQTT_PASSWORD
});

mqttClient.on('connect', () => {
    console.log('Connected to MQTT broker');
});

mqttClient.on('error', (error) => {
    console.error('MQTT connection error:', error);
});


// Train the NLP model (Define intents and utterances)
async function trainNLPModel() {
    // Intent: Turn on device
    manager.addDocument('en', 'turn on the {device}', 'device.turn_on');
    manager.addDocument('en', 'switch on the {device}', 'device.turn_on');
    manager.addDocument('en', 'activate the {device}', 'device.turn_on');
    manager.addDocument('en', 'power on the {device}', 'device.turn_on');

    // Intent: Turn off device
    manager.addDocument('en', 'turn off the {device}', 'device.turn_off');
    manager.addDocument('en', 'switch off the {device}', 'device.turn_off');
    manager.addDocument('en', 'deactivate the {device}', 'device.turn_off');
    manager.addDocument('en', 'power off the {device}', 'device.turn_off');

    // Intent: Set brightness
    manager.addDocument('en', 'set the brightness of the {device} to {level}', 'device.set_brightness');
    manager.addDocument('en', 'adjust the brightness of the {device} to {level}', 'device.set_brightness');
    manager.addDocument('en', 'make the {device} brighter to {level}', 'device.set_brightness');

    //Intent: Get device status
    manager.addDocument('en', 'what is the status of the {device}', 'device.get_status');
    manager.addDocument('en', 'is the {device} on or off', 'device.get_status');
    manager.addDocument('en', 'check the status of the {device}', 'device.get_status');

    // Entities (Example - list of devices) -  You could fetch this from a database later
    manager.addNamedEntity('device', 'list', ['light', 'lamp', 'fan', 'thermostat']);

    // Train the model
    await manager.train();
    manager.save('./model.json'); // Save the trained model for later use
    console.log('NLP model trained and saved.');
}

// Load the NLP Model
async function loadNLPModel() {
    try {
        await manager.load('./model.json');
        console.log('NLP model loaded.');
    } catch (error) {
        console.log('Error loading NLP model. Training a new one.');
        await trainNLPModel();
    }
}

loadNLPModel();


// API endpoint for processing voice commands
app.post('/voice-command', async (req, res) => {
    const command = req.body.command; // Get the voice command from the request body
    console.log('Received command:', command);

    if (!command) {
        return res.status(400).json({ error: 'Command is required' });
    }

    try {
        const result = await manager.process('en', command); // Process the command using the NLP model
        console.log('NLP Processing Result:', result);

        if (result.intent === 'None') {
            return res.status(400).json({ error: 'Intent not recognized' });
        }

        // Extract entities and intent
        const intent = result.intent;
        const entities = result.entities;

        // Extract device name
        const deviceName = entities.find(entity => entity.entity === 'device')?.option;

        if (!deviceName) {
            return res.status(400).json({ error: 'Device name not found in the command' });
        }

        // Construct MQTT topic
        const topic = MQTT_TOPIC_PREFIX + deviceName;

        // Construct payload based on intent and entities
        let payload = {};

        switch (intent) {
            case 'device.turn_on':
                payload = { action: 'turn_on' };
                break;
            case 'device.turn_off':
                payload = { action: 'turn_off' };
                break;
            case 'device.set_brightness':
                const brightnessLevel = entities.find(entity => entity.entity === 'level')?.resolution?.value;

                if (!brightnessLevel) {
                    return res.status(400).json({ error: 'Brightness level not found in the command' });
                }
                payload = { action: 'set_brightness', level: parseInt(brightnessLevel) };
                break;
            case 'device.get_status':
                payload = { action: 'get_status'};
                break;
            default:
                return res.status(400).json({ error: 'Unsupported intent' });
        }

        // Publish the command to the MQTT broker
        mqttClient.publish(topic, JSON.stringify(payload), (err) => {
            if (err) {
                console.error('Error publishing to MQTT:', err);
                return res.status(500).json({ error: 'Failed to publish to MQTT' });
            }
            console.log(`Published to topic: ${topic}, Payload: ${JSON.stringify(payload)}`);
            res.json({ success: true, message: `Command sent to ${deviceName}`, intent: intent, payload: payload });
        });

    } catch (error) {
        console.error('Error processing command:', error);
        res.status(500).json({ error: 'Failed to process command' });
    }
});

// Start the server
app.listen(PORT, () => {
    console.log(`Server listening on port ${PORT}`);
});


/*
Example Usage:

1.  Replace placeholders: Update MQTT_BROKER_URL, MQTT_USERNAME, and MQTT_PASSWORD with your actual MQTT broker credentials.

2.  Install dependencies:  npm install express body-parser node-nlp mqtt

3.  Run the server: node your_file_name.js

4.  Send voice commands via POST request:

    You can use a tool like Postman or curl to send POST requests to the /voice-command endpoint.

    Example request (using curl):
    curl -X POST -H "Content-Type: application/json" -d '{"command": "turn on the light"}' http://localhost:3000/voice-command

5.  IoT Device Implementation (Example using Node.js and MQTT):

    Create a separate script for each IoT device to subscribe to its specific MQTT topic and handle commands.

    Example device code (light.js):

    const mqtt = require('mqtt');
    const MQTT_BROKER_URL = 'mqtt://your_mqtt_broker_address'; // Same as server
    const MQTT_TOPIC = 'iot/devices/light'; // Specific topic for the light
    const MQTT_USERNAME = 'your_mqtt_username'; // MQTT username (if required)
    const MQTT_PASSWORD = 'your_mqtt_password'; // MQTT password (if required)

    const client = mqtt.connect(MQTT_BROKER_URL, {
      username: MQTT_USERNAME,
      password: MQTT_PASSWORD
    });

    client.on('connect', () => {
      console.log('Light connected to MQTT broker');
      client.subscribe(MQTT_TOPIC);
    });

    client.on('message', (topic, message) => {
      const payload = JSON.parse(message.toString());
      console.log(`Received command: ${JSON.stringify(payload)} on topic: ${topic}`);

      switch (payload.action) {
        case 'turn_on':
          console.log('Turning on the light');
          // Implement your hardware control logic here to turn on the light.
          break;
        case 'turn_off':
          console.log('Turning off the light');
          // Implement your hardware control logic here to turn off the light.
          break;
        case 'set_brightness':
          const level = payload.level;
          console.log(`Setting brightness to ${level}`);
          // Implement your hardware control logic to set the brightness.
          break;
          case 'get_status':
                // Simulate status retrieval
                const status = Math.random() < 0.5 ? 'on' : 'off';
                console.log(`Simulating light status: ${status}`);
                break;
        default:
          console.log('Unknown action');
      }
    });

    client.on('error', (error) => {
      console.error('MQTT error:', error);
    });

    // Run the device script: node light.js

Explanation:

1. Dependencies:

    express: Creates a web server to handle voice command requests from a frontend or other service.
    body-parser: Parses the JSON body of the incoming POST requests.
    node-nlp:  The core NLP library used for intent recognition and entity extraction.  It helps understand what the user *means* by the voice command.
    mqtt:  A library for communicating with an MQTT broker, which acts as a central hub for sending commands to and receiving data from IoT devices.

2. Configuration:

    Sets up the port the server listens on, the MQTT broker URL, and topic prefixes.  Important: Replace the placeholder values with your actual broker details.  MQTT_TOPIC_PREFIX is useful for organizing topics related to your devices.

3. Initialization:

    Initializes the Express app, the NLP Manager, and the MQTT client.  The MQTT client connects to the MQTT broker.

4. NLP Model Training (trainNLPModel function):

    This is the most crucial part.  It defines the *intents* that the system can understand and provides example *utterances* (sentences) that trigger those intents.
    manager.addDocument():  Registers an example sentence (utterance) and associates it with a specific intent.  The {device} and {level} are placeholders for entities.
    manager.addNamedEntity(): Defines a named entity called 'device' and provides a list of possible values (light, lamp, fan, etc.).  The NLP engine uses this information to extract the device name from the user's command.
    manager.train(): Trains the NLP model using the provided data.  This is a computationally intensive step.
    manager.save(): Saves the trained model to a file (model.json) so that it can be loaded later without retraining.

5. Loading NLP Model (loadNLPModel function):

    Loads the trained NLP model from the 'model.json' file. If the file doesn't exist, it trains a new model and saves it.

6. API Endpoint (/voice-command):

    This is the API endpoint that receives voice commands.
    req.body.command:  Extracts the voice command from the request body.  It's assumed the voice command is sent as a JSON object with a "command" field.
    manager.process(): Uses the trained NLP model to analyze the command and determine the intent and extract entities.
    Error Handling: Checks for invalid commands, unrecognized intents, and missing device names.
    MQTT Topic Construction: Constructs the MQTT topic based on the device name.  This ensures that the command is sent to the correct device.
    Payload Construction:  Creates a JSON payload based on the detected intent and extracted entities.  The payload contains the action to be performed (e.g., turn_on, turn_off, set_brightness) and any relevant parameters (e.g., brightness level).
    mqttClient.publish():  Publishes the command to the MQTT broker on the device's specific topic.

7. MQTT Device Implementation (Example):

    The provided example demonstrates how an IoT device (e.g., a light) can subscribe to its MQTT topic and handle commands.
    client.subscribe(): Subscribes the device to its specific topic.
    client.on('message'):  This event handler is triggered when a message is received on the subscribed topic.
    payload Handling:  Parses the JSON payload from the MQTT message and executes the appropriate action (e.g., turning the light on or off, setting the brightness).

Key Improvements and Considerations:

* Error Handling:  Includes robust error handling to gracefully handle invalid commands, unrecognized intents, and MQTT connection errors.
* MQTT Integration: Demonstrates how to use MQTT to communicate with IoT devices.
* NLP Model Persistence: Saves the trained NLP model to a file so that it doesn't need to be retrained every time the server restarts.  This saves a significant amount of time.
* Entity Extraction: Extracts the device name from the command using named entity recognition.
* Intent-Based Actions: Constructs the MQTT payload based on the detected intent and extracted entities.
* Code Organization: The code is well-organized and includes comments to explain each step.
* Scalability: The MQTT-based architecture allows for easy scaling to support a large number of IoT devices.  Each device only needs to subscribe to its own topic.
* Security:
    * MQTT Authentication: The example includes optional MQTT username and password authentication.  You should always use authentication to protect your MQTT broker.
    * Input Validation: While basic, the code checks for required parameters. You should implement more thorough input validation to prevent malicious commands.
* Dynamic Device List:  The current example uses a hardcoded list of devices in the NLP model.  In a real-world application, you would want to fetch the device list from a database or configuration file.  This would allow you to add and remove devices without retraining the NLP model.  You would need to update the manager.addNamedEntity() accordingly.
* Context Management: For more complex interactions, you might need to implement context management.  For example, the user might say "turn on the light" and then later say "set the brightness to 50%".  The system needs to remember which light the user is referring to.  This can be achieved using session variables or a database.
* Feedback Mechanisms: Consider implementing feedback mechanisms to let the user know that their command has been executed successfully. This could involve sending a confirmation message back to the user's device or changing the state of the device (e.g., turning on a light).
* Device Status Reporting:  Devices can publish their status (e.g., "on", "off", "brightness level") to a separate MQTT topic. The server can then subscribe to these topics and provide real-time status updates to the user.
* More Complex Intents:  The example includes basic intents.  You can extend the NLP model to support more complex intents, such as "turn on all the lights in the living room", "set the temperature to 20 degrees", or "play music on the speaker".

This complete example provides a solid foundation for building a voice-controlled IoT system. Remember to adapt it to your specific hardware and software environment.  Focus on expanding the NLP model and implementing robust error handling and security measures.
*/
```
👁️ Viewed: 4

Comments