aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.devcontainer/Dockerfile32
-rw-r--r--.devcontainer/devcontainer.json56
-rw-r--r--README.md6
3 files changed, 3 insertions, 91 deletions
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
deleted file mode 100644
index 9bcb4d7..0000000
--- a/.devcontainer/Dockerfile
+++ /dev/null
@@ -1,32 +0,0 @@
-FROM osrf/ros:noetic-desktop-full
-
-# Add vscode user with same UID and GID as your host system
-# (copied from https://code.visualstudio.com/remote/advancedcontainers/add-nonroot-user#_creating-a-nonroot-user)
-ARG USERNAME=vscode
-ARG USER_UID=1000
-ARG USER_GID=$USER_UID
-RUN groupadd --gid $USER_GID $USERNAME \
- && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME \
- && apt-get update \
- && apt-get install -y sudo \
- && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \
- && chmod 0440 /etc/sudoers.d/$USERNAME
-# Switch from root to user
-USER $USERNAME
-
-# Add user to video group to allow access to webcam
-RUN sudo usermod --append --groups video $USERNAME
-
-# Update all packages
-RUN sudo apt update && sudo apt upgrade -y
-
-# Install Git
-RUN sudo apt install -y git python3-pip
-
-# Rosdep update
-RUN rosdep update
-
-RUN pip3 install jinja2 ollama geocoder requests python-dotenv parsimonious SpeechRecognition
-
-# Source the ROS setup file
-RUN echo "source /opt/ros/${ROS_DISTRO}/setup.bash" >> ~/.bashrc
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
deleted file mode 100644
index 53bcc43..0000000
--- a/.devcontainer/devcontainer.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "name": "noetic-llama devcontainer",
- "dockerFile": "Dockerfile",
- "runArgs": [
- "--privileged",
- "--network=host",
- "-v=/dev:/dev",
- "--privileged",
- "--runtime=nvidia",
- "--device-cgroup-rule" "a *:* rmw",
- "--cap-add=SYS_PTRACE",
- "--security-opt=seccomp:unconfined",
- "--security-opt=apparmor:unconfined",
- "--volume=/tmp/.X11-unix:/tmp/.X11-unix",
- "--volume=/home/agilex/.Xauthority:/home/ros/.Xauthority",
- "--gpus=all",
- "--env-file","apikeys.env"
- ],
- "containerEnv": {
- "DISPLAY": ":0",
- "LIBGL_ALWAYS_SOFTWARE": "1" // Needed for software rendering of opengl
- },
- "workspaceMount": "source=${localWorkspaceFolder},target=/${localWorkspaceFolderBasename},type=bind",
- "workspaceFolder": "/${localWorkspaceFolderBasename}",
- "mounts": [
- "source=${localEnv:HOME}${localEnv:USERPROFILE}/.bash_history,target=/home/vscode/.bash_history,type=bind"
- ],
- "features": {
- "ghcr.io/devcontainers/features/python:1": {}
- },
- "customizations": {
- "vscode": {
- "extensions": [
- "dotjoshjohnson.xml",
- "zachflower.uncrustify",
- "ms-azuretools.vscode-docker",
- "ms-iot.vscode-ros",
- "ms-python.python",
- "ms-vscode.cpptools",
- "redhat.vscode-yaml",
- "smilerobotics.urdf",
- "streetsidesoftware.code-spell-checker",
- "twxs.cmake",
- "yzhang.markdown-all-in-one"
- ]
- },
- "settings": {
- "terminal.integrated.profiles.linux": {
- "bash": {
- "path": "bash"
- }
- },
- "terminal.integrated.defaultProfile.linux": "bash"
- }
- }
-}
diff --git a/README.md b/README.md
index e955c56..4301e26 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,9 @@
# noetic-llama
-ROS Noetic Wrapper for ollama, allowing for function calling
+`docker-compose` docker containers required for LCASTOR intent recognition and transcription with whisper.
+Currently, `whisperwrapper` needs to be run outside of docker due to problems passing through the microphone, which is why it is symlinked here.
-Given a python library [`capabilities`](/noetic-llama/src/ollamawrapper/src/capabilities) containing only functions, it inspects this library to generate an initial prompt to tell the
+Given a python library `capabilities` containing only functions, it inspects this library to generate an initial prompt to tell the
LLM the possible functions. Then, with a ROS service call, these functions are called:
`rosrun ollamawrapper ollamawrapper`
@@ -25,4 +26,3 @@ add(num1=0, num2=1):
## TODOs
- [ ] Make a proper parser for the function calls returned by ollama instead of just using `exec()`, this will allow us to fetch the return value of the functions, and it's also much safer. A grammar has already been made, see [ollamafunctiongrammar.ppeg](/noetic-llama/src/ollamawrapper/src/ollamafunctiongrammar.ppeg), just need to finish the abstract syntax tree parsing (see [parser.py](/noetic-llama/src/ollamawrapper/src/parser.py))
- - [ ] Fix the docker container. I've been working in an Ubuntu 20.04 VM so far