File size: 411 Bytes
13d3ba0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
/*
List here all the headers you want to expose in the Python bindings,
then run `python regenerate.py` (see details in README.md)
*/
#include "ggml.h"
#include "ggml-metal.h"
#include "ggml-opencl.h"
// Headers below are currently only present in the llama.cpp repository, comment them out if you don't have them.
#include "k_quants.h"
#include "ggml-alloc.h"
#include "ggml-cuda.h"
#include "ggml-mpi.h" |