Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
# Keyword-Based News Fetcher

## Description
A Python module that fetches the top news articles for a given keyword using [NewsAPI.org](https://newsapi.org/).
Features:
- Fetch top 3 news articles (can be changed in script).
- Display title + URL.
- Shortens long URLs for cleaner output.
- Logs fetched news to `news_log.txt`.
- Accepts commands like `show news about AI`.

## Usage
1. Install dependencies:

<div align="center">

# 🤖 Jarvis AI Assistant
Expand Down Expand Up @@ -718,3 +732,4 @@ If you find this project helpful, please consider:
**© 2025 Varnit Kumar. All rights reserved.**

</div>
>>>>>>> bd1d8cfda59b40748beac88a96cbf5f19c082e32
44 changes: 24 additions & 20 deletions backend/auth/recoganize.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,16 @@
import cv2
import pyautogui as p
from backend.config import (
FACE_TRAINER_PATH, FACE_CASCADE_PATH, FACE_RECOGNITION_CONFIDENCE,
CAMERA_INDEX, CAMERA_WIDTH, CAMERA_HEIGHT, FACE_RECOGNITION_NAMES
FACE_TRAINER_PATH,
FACE_CASCADE_PATH,
FACE_RECOGNITION_CONFIDENCE,
CAMERA_INDEX,
CAMERA_WIDTH,
CAMERA_HEIGHT,
FACE_RECOGNITION_NAMES,
)


def AuthenticateFace():

flag = ""
Expand All @@ -20,20 +26,19 @@ def AuthenticateFace():

font = cv2.FONT_HERSHEY_SIMPLEX # denotes the font type


id = 2 # number of persons you want to Recognize


names = FACE_RECOGNITION_NAMES # names from configuration


cam = cv2.VideoCapture(CAMERA_INDEX, cv2.CAP_DSHOW) # cv2.CAP_DSHOW to remove warning
cam = cv2.VideoCapture(
CAMERA_INDEX, cv2.CAP_DSHOW
) # cv2.CAP_DSHOW to remove warning
cam.set(3, CAMERA_WIDTH) # set video FrameWidht
cam.set(4, CAMERA_HEIGHT) # set video FrameHeight

# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)

# flag = True

Expand All @@ -51,16 +56,16 @@ def AuthenticateFace():
minSize=(int(minW), int(minH)),
)

for(x, y, w, h) in faces:
for x, y, w, h in faces:

# used to draw a rectangle on any image
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

# to predict on every single image
id, accuracy = recognizer.predict(converted_image[y:y+h, x:x+w])
id, accuracy = recognizer.predict(converted_image[y : y + h, x : x + w])

# Check if accuracy is less them 100 ==> "0" is perfect match
if (accuracy < FACE_RECOGNITION_CONFIDENCE):
if accuracy < FACE_RECOGNITION_CONFIDENCE:
id = names[id]
accuracy = " {0}%".format(round(100 - accuracy))
flag = 1
Expand All @@ -69,22 +74,21 @@ def AuthenticateFace():
accuracy = " {0}%".format(round(100 - accuracy))
flag = 0

cv2.putText(img, str(id), (x+5, y-5), font, 1, (255, 255, 255), 2)
cv2.putText(img, str(accuracy), (x+5, y+h-5),
font, 1, (255, 255, 0), 1)
cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
cv2.putText(
img, str(accuracy), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1
)

cv2.imshow('camera', img)
cv2.imshow("camera", img)

k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
k = cv2.waitKey(10) & 0xFF # Press 'ESC' for exiting video
if k == 27:
break
if flag == 1:
break


# Do a bit of cleanup

cam.release()
cv2.destroyAllWindows()
return flag

46 changes: 27 additions & 19 deletions backend/auth/sample.py
Original file line number Diff line number Diff line change
@@ -1,42 +1,50 @@
import cv2

cam = cv2.VideoCapture(0, cv2.CAP_DSHOW) #create a video capture object which is helpful to capture videos through webcam
cam.set(3, 640) # set video FrameWidth
cam.set(4, 480) # set video FrameHeight
cam = cv2.VideoCapture(
0, cv2.CAP_DSHOW
) # create a video capture object which is helpful to capture videos through webcam
cam.set(3, 640) # set video FrameWidth
cam.set(4, 480) # set video FrameHeight


detector = cv2.CascadeClassifier('backend\\auth\\haarcascade_frontalface_default.xml')
#Haar Cascade classifier is an effective object detection approach
detector = cv2.CascadeClassifier("backend\\auth\\haarcascade_frontalface_default.xml")
# Haar Cascade classifier is an effective object detection approach

face_id = input("Enter a Numeric user ID here: ")
#Use integer ID for every new face (0,1,2,3,4,5,6,7,8,9........)
# Use integer ID for every new face (0,1,2,3,4,5,6,7,8,9........)

print("Taking samples, look at camera ....... ")
count = 0 # Initializing sampling face count
count = 0 # Initializing sampling face count

while True:

ret, img = cam.read() #read the frames using the above created object
converted_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #The function converts an input image from one color space to another
ret, img = cam.read() # read the frames using the above created object
converted_image = cv2.cvtColor(
img, cv2.COLOR_BGR2GRAY
) # The function converts an input image from one color space to another
faces = detector.detectMultiScale(converted_image, 1.3, 5)

for (x,y,w,h) in faces:
for x, y, w, h in faces:

cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) #used to draw a rectangle on any image
cv2.rectangle(
img, (x, y), (x + w, y + h), (255, 0, 0), 2
) # used to draw a rectangle on any image
count += 1


cv2.imwrite("backend\\auth\\samples\\face." + str(face_id) + '.' + str(count) + ".jpg", converted_image[y:y+h,x:x+w])
cv2.imwrite(
"backend\\auth\\samples\\face." + str(face_id) + "." + str(count) + ".jpg",
converted_image[y : y + h, x : x + w],
)
# To capture & Save images into the datasets folder

cv2.imshow('image', img) #Used to display an image in a window
cv2.imshow("image", img) # Used to display an image in a window

k = cv2.waitKey(100) & 0xff # Waits for a pressed key
if k == 27: # Press 'ESC' to stop
k = cv2.waitKey(100) & 0xFF # Waits for a pressed key
if k == 27: # Press 'ESC' to stop
break
elif count >= 100: # Take 50 sample (More sample --> More accuracy)
break
elif count >= 100: # Take 50 sample (More sample --> More accuracy)
break

print("Samples taken now closing the program....")
cam.release()
cv2.destroyAllWindows()
cv2.destroyAllWindows()
36 changes: 19 additions & 17 deletions backend/auth/trainer.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,43 @@
import cv2
import numpy as np
from PIL import Image #pillow package
from PIL import Image # pillow package
import os

path = 'backend\\auth\\samples' # Path for samples already taken
path = "backend\\auth\\samples" # Path for samples already taken

recognizer = cv2.face.LBPHFaceRecognizer_create() # Local Binary Patterns Histograms
recognizer = cv2.face.LBPHFaceRecognizer_create() # Local Binary Patterns Histograms
detector = cv2.CascadeClassifier("backend\\auth\\haarcascade_frontalface_default.xml")
#Haar Cascade classifier is an effective object detection approach
# Haar Cascade classifier is an effective object detection approach


def Images_And_Labels(path): # function to fetch the images and labels
def Images_And_Labels(path): # function to fetch the images and labels

imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
faceSamples = []
ids = []

for imagePath in imagePaths: # to iterate particular image path
for imagePath in imagePaths: # to iterate particular image path

gray_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_arr = np.array(gray_img,'uint8') #creating an array
gray_img = Image.open(imagePath).convert("L") # convert it to grayscale
img_arr = np.array(gray_img, "uint8") # creating an array

id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_arr)

for (x,y,w,h) in faces:
faceSamples.append(img_arr[y:y+h,x:x+w])
for x, y, w, h in faces:
faceSamples.append(img_arr[y : y + h, x : x + w])
ids.append(id)

return faceSamples,ids
return faceSamples, ids

print ("Training faces. It will take a few seconds. Wait ...")

faces,ids = Images_And_Labels(path)
print("Training faces. It will take a few seconds. Wait ...")

faces, ids = Images_And_Labels(path)
recognizer.train(faces, np.array(ids))

recognizer.write('backend\\auth\\trainer\\trainer.yml') # Save the trained model as trainer.yml
recognizer.write(
"backend\\auth\\trainer\\trainer.yml"
) # Save the trained model as trainer.yml

print("Model trained, Now we can recognize your face.")

37 changes: 24 additions & 13 deletions backend/command.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,24 +3,33 @@
import speech_recognition as sr
import eel
from backend.config import (
TTS_VOICE_ID, TTS_RATE, TTS_VOLUME, TTS_ENGINE,
SPEECH_LANGUAGE, SPEECH_TIMEOUT, SPEECH_PHRASE_TIMEOUT, SPEECH_PAUSE_THRESHOLD
TTS_VOICE_ID,
TTS_RATE,
TTS_VOLUME,
TTS_ENGINE,
SPEECH_LANGUAGE,
SPEECH_TIMEOUT,
SPEECH_PHRASE_TIMEOUT,
SPEECH_PAUSE_THRESHOLD,
)


def speak(text):
text = str(text)
engine = pyttsx3.init(TTS_ENGINE)
voices = engine.getProperty('voices')
voices = engine.getProperty("voices")
# print(voices)
engine.setProperty('voice', voices[TTS_VOICE_ID].id)
engine.setProperty("voice", voices[TTS_VOICE_ID].id)
eel.DisplayMessage(text)
engine.say(text)
engine.runAndWait()
engine.setProperty('rate', TTS_RATE)
engine.setProperty("rate", TTS_RATE)
eel.receiverText(text)


# Expose the Python function to JavaScript


def takecommand():
r = sr.Recognizer()
with sr.Microphone() as source:
Expand All @@ -36,8 +45,7 @@ def takecommand():
query = r.recognize_google(audio, language=SPEECH_LANGUAGE)
print(f"User said: {query}\n")
eel.DisplayMessage(query)



speak(query)
except Exception as e:
print(f"Error: {str(e)}\n")
Expand All @@ -46,7 +54,6 @@ def takecommand():
return query.lower()



@eel.expose
def takeAllCommands(message=None):
if message is None:
Expand All @@ -59,36 +66,40 @@ def takeAllCommands(message=None):
query = message # If there's a message, use it
print(f"Message received: {query}")
eel.senderText(query)

try:
if query:
if "open" in query:
from backend.feature import openCommand

openCommand(query)
elif "send message" in query or "call" in query or "video call" in query:
from backend.feature import findContact, whatsApp

flag = ""
Phone, name = findContact(query)
if Phone != 0:
if "send message" in query:
flag = 'message'
flag = "message"
speak("What message to send?")
query = takecommand() # Ask for the message text
elif "call" in query:
flag = 'call'
flag = "call"
else:
flag = 'video call'
flag = "video call"
whatsApp(Phone, query, flag, name)
elif "on youtube" in query:
from backend.feature import PlayYoutube

PlayYoutube(query)
else:
from backend.feature import chatBot

chatBot(query)
else:
speak("No command was given.")
except Exception as e:
print(f"An error occurred: {e}")
speak("Sorry, something went wrong.")

eel.ShowHood()
2 changes: 1 addition & 1 deletion backend/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,4 +72,4 @@
YOUTUBE_INTEGRATION_ENABLED = config.youtube_integration_enabled
AI_CHATBOT_ENABLED = config.ai_chatbot_enabled
SYSTEM_CONTROL_ENABLED = config.system_control_enabled
WEB_INTERFACE_ENABLED = config.web_interface_enabled
WEB_INTERFACE_ENABLED = config.web_interface_enabled
Loading