# -*- coding: utf-8 -*- """Untitled32.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1p0-6f2W12BK0UkzE_Pmi-_unUccbz-w- """ import tensorflow as tf from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import numpy as np # Step 1: Load the Iris dataset iris = load_iris() X = iris.data # Features: Sepal/Petal length/width y = iris.target.reshape(-1, 1) # Labels: Classes 0, 1, 2 # Step 2: One-hot encode the labels encoder = OneHotEncoder(sparse_output=False) y_one_hot = encoder.fit_transform(y) # Step 3: Split into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y_one_hot, test_size=0.2, random_state=42) # Step 4: Build the TensorFlow model model = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=(X.shape[1],)), # Input layer (4 features) tf.keras.layers.Dense(10, activation='relu'), # Hidden layer (10 units) tf.keras.layers.Dense(3, activation='softmax') # Output layer (3 classes) ]) # Step 5: Compile the model model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # Step 6: Train the model history = model.fit(X_train, y_train, epochs=100, batch_size=16, validation_split=0.1, verbose=1) # Step 7: Evaluate the model loss, accuracy = model.evaluate(X_test, y_test) print(f"Test Loss: {loss:.4f}") print(f"Test Accuracy: {accuracy:.4f}") # Step 8: Make Predictions predictions = model.predict(X_test) predicted_classes = np.argmax(predictions, axis=1) true_classes = np.argmax(y_test, axis=1) print(f"True Labels: {true_classes}") print(f"Predicted Labels: {predicted_classes}")