Automated Deployment and Compilation of Llama.cpp

The below vagrantfile describes the process required for compiling the latest Llama.cpp tools for use with local GGUF based models. This is useful, as the training tools have been removed from the current build of Llama.cpp, and they are simple to add if you know where to look. I will leave the sourcing of the llama.cpp version to the reader. The tools stopped working at a certain point during the development, and are not required, as most models are not trained from scratch. Should you like to train from scratch, please reach out.

# -*- mode: ruby -*-
# vi: set ft=ruby :

# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
  config.vm.define :ubuntu_llama do |ubuntu_llama|
    ubuntu_llama.vm.box = "generic-x64/ubuntu2310"
    ubuntu_llama.vm.provider :libvirt do |libvirt|
      libvirt.id_ssh_key_file = "/home/jason/projects/kali_remote/id_ssh.key"
      #libvirt.uri = "qemu+ssh://10.130.10.50/system"
      libvirt.host = '10.130.10.50'
      libvirt.connect_via_ssh = true
      libvirt.username = "jason"
      libvirt.password = ''
      libvirt.memory = 2048
      libvirt.cpus = 2
    end
  end
 
  # Default false
  config.ssh.forward_agent = true
  config.ssh.forward_x11 = true
 
  # Default true
  config.ssh.keep_alive = false
 
  # Default 300s
  config.vm.boot_timeout = 900

  config.vm.provision "file", source: "~/projects/gm-com-chain.pem", destination: "~/"
  
  config.vm.provision "shell", inline: <<-SHELL
    echo foo
    sudo cp /home/vagrant/gm-com-chain.pem /etc/ssl/certs/
    sudo sed -i '/#X11Forwarding no/s//X11Forwarding yes/' /etc/ssh/sshd_config
    sudo rm -rf /root/.Xauthority
    sudo rm -rf /root/.serverauth.*  
  SHELL

  config.vm.provision "shell", privileged: false, inline: <<-SHELL
    echo bar
    echo export PATH="$PATH:$HOME/.cargo/bin:$HOME/veilid/target/release:$HOME/.local/bin" >> ~/bash.bashrc
    echo export PATH="$PATH:$HOME/.cargo/bin:$HOME/veilid/target/release:$HOME/.local/bin" >> ~/.bash_profile
    echo $PATH
  SHELL

  config.vm.provision "file", source: "~/projects/crt.pem", destination: "~/"
  config.vm.provision "shell", inline: <<-SHELL
    echo null
    sudo cp /home/vagrant/crt.pem /usr/local/share/ca-certificates/crt.crt
    sudo update-ca-certificates
  SHELL

  config.vm.provision "shell", privileged: false, inline: <<-SHELL
    echo bas
    echo $PATH
    echo fooby

    sudo apt-get update
    sudo apt-get install -y git cmake python3-pip python3-dev python3-venv gcc g++ make jq  
    git clone https://github.com/ggerganov/llama.cpp.git
    cd llama.cpp
    cmake -B build
    cmake --build build --config Release
    python3 -m venv --system-site-packages ~/llama 
    source ~/llama/bin/activate 
    pip3 install llama-cpp-python[server]
    make
    echo barby
    
  SHELL


  config.vm.synced_folder ".", "/vagrant", type: "rsync",
    rsync__exclude: ".git/, ./vagrant"
end