Dataset Viewer
Auto-converted to Parquet
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
4
721
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
5
91
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
321 values
visit_date
timestamp[ns]date
2016-08-12 09:31:09
2023-09-06 10:45:07
revision_date
timestamp[ns]date
2010-09-28 14:01:40
2023-09-06 06:22:19
committer_date
timestamp[ns]date
2010-09-28 14:01:40
2023-09-06 06:22:19
github_id
int64
426
681M
star_events_count
int64
101
243k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[ns]date
2012-06-28 18:51:49
2023-09-14 21:59:16
gha_created_at
timestamp[ns]date
2008-02-11 22:55:26
2023-08-10 11:14:58
gha_language
stringclasses
147 values
src_encoding
stringclasses
26 values
language
stringclasses
2 values
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
6
10.2M
extension
stringclasses
115 values
filename
stringlengths
3
113
content
stringlengths
6
10.2M
9b6962fbcc1fabcd0fbea42ea3a77ef1adf79a83
40b83dec47e6e24acf12f2c57fbd34a92205aa02
/cpu-cycles/libcpucycles/cpucycles/arm64-vct.c
a04b59e108edf6d1ba1e61cc82371f27b69be12a
[ "CC0-1.0", "Apache-2.0" ]
permissive
nymtech/nym
803133d223108773dfd65838707f00b23c83bdc2
a50b4ad211261a3d37a53eafd67770d57568dbeb
refs/heads/develop
2023-08-30T07:03:32.700000
2023-08-29T15:18:36
2023-08-29T15:18:36
232,312,458
896
254
null
2023-09-14T15:54:13
2020-01-07T11:42:53
Rust
UTF-8
C
false
false
358
c
arm64-vct.c
// version 20230105 // public domain // djb // adapted from supercop/cpucycles/vct.c #include "cpucycles_internal.h" long long ticks(void) { long long result; asm volatile("mrs %0, CNTVCT_EL0" : "=r" (result)); return result; } long long ticks_setup(void) { if (!cpucycles_works(ticks)) return cpucycles_SKIP; return cpucycles_FINDMULTIPLIER; }
06491a5a27a2ddef41c62fc33d0230cefb6016fc
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
/SOFTWARE/A64-TERES/linux-a64/drivers/ata/libata-acpi.c
cf4e7020adacde5e69881a21adb0c578d31d7a3e
[ "Linux-syscall-note", "GPL-2.0-only", "GPL-1.0-or-later", "LicenseRef-scancode-free-unknown", "Apache-2.0" ]
permissive
OLIMEX/DIY-LAPTOP
ae82f4ee79c641d9aee444db9a75f3f6709afa92
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
refs/heads/rel3
2023-08-04T01:54:19.483000
2023-04-03T07:18:12
2023-04-03T07:18:12
80,094,055
507
92
Apache-2.0
2023-04-03T07:05:59
2017-01-26T07:25:50
C
UTF-8
C
false
false
28,836
c
libata-acpi.c
/* * libata-acpi.c * Provides ACPI support for PATA/SATA. * * Copyright (C) 2006 Intel Corp. * Copyright (C) 2006 Randy Dunlap */ #include <linux/module.h> #include <linux/ata.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/libata.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <scsi/scsi_device.h> #include "libata.h" #include <acpi/acpi_bus.h> unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT; module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644); MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM, 0x8=FPDMA non-zero offset, 0x10=FPDMA DMA Setup FIS auto-activate)"); #define NO_PORT_MULT 0xffff #define SATA_ADR(root, pmp) (((root) << 16) | (pmp)) #define REGS_PER_GTF 7 struct ata_acpi_gtf { u8 tf[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */ } __packed; /* * Helper - belongs in the PCI layer somewhere eventually */ static int is_pci_dev(struct device *dev) { return (dev->bus == &pci_bus_type); } static void ata_acpi_clear_gtf(struct ata_device *dev) { kfree(dev->gtf_cache); dev->gtf_cache = NULL; } /** * ata_ap_acpi_handle - provide the acpi_handle for an ata_port * @ap: the acpi_handle returned will correspond to this port * * Returns the acpi_handle for the ACPI namespace object corresponding to * the ata_port passed into the function, or NULL if no such object exists */ acpi_handle ata_ap_acpi_handle(struct ata_port *ap) { if (ap->flags & ATA_FLAG_ACPI_SATA) return NULL; return ap->scsi_host ? DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev) : NULL; } EXPORT_SYMBOL(ata_ap_acpi_handle); /** * ata_dev_acpi_handle - provide the acpi_handle for an ata_device * @dev: the acpi_device returned will correspond to this port * * Returns the acpi_handle for the ACPI namespace object corresponding to * the ata_device passed into the function, or NULL if no such object exists */ acpi_handle ata_dev_acpi_handle(struct ata_device *dev) { acpi_integer adr; struct ata_port *ap = dev->link->ap; if (libata_noacpi || dev->flags & ATA_DFLAG_ACPI_DISABLED) return NULL; if (ap->flags & ATA_FLAG_ACPI_SATA) { if (!sata_pmp_attached(ap)) adr = SATA_ADR(ap->port_no, NO_PORT_MULT); else adr = SATA_ADR(ap->port_no, dev->link->pmp); return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), adr); } else return acpi_get_child(ata_ap_acpi_handle(ap), dev->devno); } EXPORT_SYMBOL(ata_dev_acpi_handle); /* @ap and @dev are the same as ata_acpi_handle_hotplug() */ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) { if (dev) dev->flags |= ATA_DFLAG_DETACH; else { struct ata_link *tlink; struct ata_device *tdev; ata_for_each_link(tlink, ap, EDGE) ata_for_each_dev(tdev, tlink, ALL) tdev->flags |= ATA_DFLAG_DETACH; } ata_port_schedule_eh(ap); } /** * ata_acpi_handle_hotplug - ACPI event handler backend * @ap: ATA port ACPI event occurred * @dev: ATA device ACPI event occurred (can be NULL) * @event: ACPI event which occurred * * All ACPI bay / device realted events end up in this function. If * the event is port-wide @dev is NULL. If the event is specific to a * device, @dev points to it. * * Hotplug (as opposed to unplug) notification is always handled as * port-wide while unplug only kills the target device on device-wide * event. * * LOCKING: * ACPI notify handler context. May sleep. */ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, u32 event) { struct ata_eh_info *ehi = &ap->link.eh_info; int wait = 0; unsigned long flags; spin_lock_irqsave(ap->lock, flags); /* * When dock driver calls into the routine, it will always use * ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and * ACPI_NOTIFY_EJECT_REQUEST for remove */ switch (event) { case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: ata_ehi_push_desc(ehi, "ACPI event"); ata_ehi_hotplugged(ehi); ata_port_freeze(ap); break; case ACPI_NOTIFY_EJECT_REQUEST: ata_ehi_push_desc(ehi, "ACPI event"); ata_acpi_detach_device(ap, dev); wait = 1; break; } spin_unlock_irqrestore(ap->lock, flags); if (wait) { ata_port_wait_eh(ap); flush_work(&ap->hotplug_task.work); } } static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) { struct ata_device *dev = data; ata_acpi_handle_hotplug(dev->link->ap, dev, event); } static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data) { struct ata_port *ap = data; ata_acpi_handle_hotplug(ap, NULL, event); } static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev, u32 event) { struct kobject *kobj = NULL; char event_string[20]; char *envp[] = { event_string, NULL }; if (dev) { if (dev->sdev) kobj = &dev->sdev->sdev_gendev.kobj; } else kobj = &ap->dev->kobj; if (kobj) { snprintf(event_string, 20, "BAY_EVENT=%d", event); kobject_uevent_env(kobj, KOBJ_CHANGE, envp); } } static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data) { ata_acpi_uevent(data, NULL, event); } static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data) { struct ata_device *dev = data; ata_acpi_uevent(dev->link->ap, dev, event); } static const struct acpi_dock_ops ata_acpi_dev_dock_ops = { .handler = ata_acpi_dev_notify_dock, .uevent = ata_acpi_dev_uevent, }; static const struct acpi_dock_ops ata_acpi_ap_dock_ops = { .handler = ata_acpi_ap_notify_dock, .uevent = ata_acpi_ap_uevent, }; void ata_acpi_hotplug_init(struct ata_host *host) { int i; for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; acpi_handle handle; struct ata_device *dev; if (!ap) continue; handle = ata_ap_acpi_handle(ap); if (handle) { /* we might be on a docking station */ register_hotplug_dock_device(handle, &ata_acpi_ap_dock_ops, ap, NULL, NULL); } ata_for_each_dev(dev, &ap->link, ALL) { handle = ata_dev_acpi_handle(dev); if (!handle) continue; /* we might be on a docking station */ register_hotplug_dock_device(handle, &ata_acpi_dev_dock_ops, dev, NULL, NULL); } } } /** * ata_acpi_dissociate - dissociate ATA host from ACPI objects * @host: target ATA host * * This function is called during driver detach after the whole host * is shut down. * * LOCKING: * EH context. */ void ata_acpi_dissociate(struct ata_host *host) { int i; /* Restore initial _GTM values so that driver which attaches * afterward can use them too. */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); if (ata_ap_acpi_handle(ap) && gtm) ata_acpi_stm(ap, gtm); } } static int __ata_acpi_gtm(struct ata_port *ap, acpi_handle handle, struct ata_acpi_gtm *gtm) { struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER }; union acpi_object *out_obj; acpi_status status; int rc = 0; status = acpi_evaluate_object(handle, "_GTM", NULL, &output); rc = -ENOENT; if (status == AE_NOT_FOUND) goto out_free; rc = -EINVAL; if (ACPI_FAILURE(status)) { ata_port_err(ap, "ACPI get timing mode failed (AE 0x%x)\n", status); goto out_free; } out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER) { ata_port_warn(ap, "_GTM returned unexpected object type 0x%x\n", out_obj->type); goto out_free; } if (out_obj->buffer.length != sizeof(struct ata_acpi_gtm)) { ata_port_err(ap, "_GTM returned invalid length %d\n", out_obj->buffer.length); goto out_free; } memcpy(gtm, out_obj->buffer.pointer, sizeof(struct ata_acpi_gtm)); rc = 0; out_free: kfree(output.pointer); return rc; } /** * ata_acpi_gtm - execute _GTM * @ap: target ATA port * @gtm: out parameter for _GTM result * * Evaluate _GTM and store the result in @gtm. * * LOCKING: * EH context. * * RETURNS: * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure. */ int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm) { if (ata_ap_acpi_handle(ap)) return __ata_acpi_gtm(ap, ata_ap_acpi_handle(ap), gtm); else return -EINVAL; } EXPORT_SYMBOL_GPL(ata_acpi_gtm); /** * ata_acpi_stm - execute _STM * @ap: target ATA port * @stm: timing parameter to _STM * * Evaluate _STM with timing parameter @stm. * * LOCKING: * EH context. * * RETURNS: * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure. */ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm) { acpi_status status; struct ata_acpi_gtm stm_buf = *stm; struct acpi_object_list input; union acpi_object in_params[3]; in_params[0].type = ACPI_TYPE_BUFFER; in_params[0].buffer.length = sizeof(struct ata_acpi_gtm); in_params[0].buffer.pointer = (u8 *)&stm_buf; /* Buffers for id may need byteswapping ? */ in_params[1].type = ACPI_TYPE_BUFFER; in_params[1].buffer.length = 512; in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id; in_params[2].type = ACPI_TYPE_BUFFER; in_params[2].buffer.length = 512; in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id; input.count = 3; input.pointer = in_params; status = acpi_evaluate_object(ata_ap_acpi_handle(ap), "_STM", &input, NULL); if (status == AE_NOT_FOUND) return -ENOENT; if (ACPI_FAILURE(status)) { ata_port_err(ap, "ACPI set timing mode failed (status=0x%x)\n", status); return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(ata_acpi_stm); /** * ata_dev_get_GTF - get the drive bootup default taskfile settings * @dev: target ATA device * @gtf: output parameter for buffer containing _GTF taskfile arrays * * This applies to both PATA and SATA drives. * * The _GTF method has no input parameters. * It returns a variable number of register set values (registers * hex 1F1..1F7, taskfiles). * The <variable number> is not known in advance, so have ACPI-CA * allocate the buffer as needed and return it, then free it later. * * LOCKING: * EH context. * * RETURNS: * Number of taskfiles on success, 0 if _GTF doesn't exist. -EINVAL * if _GTF is invalid. */ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) { struct ata_port *ap = dev->link->ap; acpi_status status; struct acpi_buffer output; union acpi_object *out_obj; int rc = 0; /* if _GTF is cached, use the cached value */ if (dev->gtf_cache) { out_obj = dev->gtf_cache; goto done; } /* set up output buffer */ output.length = ACPI_ALLOCATE_BUFFER; output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ if (ata_msg_probe(ap)) ata_dev_dbg(dev, "%s: ENTER: port#: %d\n", __func__, ap->port_no); /* _GTF has no input parameters */ status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL, &output); out_obj = dev->gtf_cache = output.pointer; if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { ata_dev_warn(dev, "_GTF evaluation failed (AE 0x%x)\n", status); rc = -EINVAL; } goto out_free; } if (!output.length || !output.pointer) { if (ata_msg_probe(ap)) ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n", __func__, (unsigned long long)output.length, output.pointer); rc = -EINVAL; goto out_free; } if (out_obj->type != ACPI_TYPE_BUFFER) { ata_dev_warn(dev, "_GTF unexpected object type 0x%x\n", out_obj->type); rc = -EINVAL; goto out_free; } if (out_obj->buffer.length % REGS_PER_GTF) { ata_dev_warn(dev, "unexpected _GTF length (%d)\n", out_obj->buffer.length); rc = -EINVAL; goto out_free; } done: rc = out_obj->buffer.length / REGS_PER_GTF; if (gtf) { *gtf = (void *)out_obj->buffer.pointer; if (ata_msg_probe(ap)) ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n", __func__, *gtf, rc); } return rc; out_free: ata_acpi_clear_gtf(dev); return rc; } /** * ata_acpi_gtm_xfermode - determine xfermode from GTM parameter * @dev: target device * @gtm: GTM parameter to use * * Determine xfermask for @dev from @gtm. * * LOCKING: * None. * * RETURNS: * Determined xfermask. */ unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, const struct ata_acpi_gtm *gtm) { unsigned long xfer_mask = 0; unsigned int type; int unit; u8 mode; /* we always use the 0 slot for crap hardware */ unit = dev->devno; if (!(gtm->flags & 0x10)) unit = 0; /* PIO */ mode = ata_timing_cycle2mode(ATA_SHIFT_PIO, gtm->drive[unit].pio); xfer_mask |= ata_xfer_mode2mask(mode); /* See if we have MWDMA or UDMA data. We don't bother with * MWDMA if UDMA is available as this means the BIOS set UDMA * and our error changedown if it works is UDMA to PIO anyway. */ if (!(gtm->flags & (1 << (2 * unit)))) type = ATA_SHIFT_MWDMA; else type = ATA_SHIFT_UDMA; mode = ata_timing_cycle2mode(type, gtm->drive[unit].dma); xfer_mask |= ata_xfer_mode2mask(mode); return xfer_mask; } EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask); /** * ata_acpi_cbl_80wire - Check for 80 wire cable * @ap: Port to check * @gtm: GTM data to use * * Return 1 if the @gtm indicates the BIOS selected an 80wire mode. */ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) { struct ata_device *dev; ata_for_each_dev(dev, &ap->link, ENABLED) { unsigned long xfer_mask, udma_mask; xfer_mask = ata_acpi_gtm_xfermask(dev, gtm); ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask); if (udma_mask & ~ATA_UDMA_MASK_40C) return 1; } return 0; } EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire); static void ata_acpi_gtf_to_tf(struct ata_device *dev, const struct ata_acpi_gtf *gtf, struct ata_taskfile *tf) { ata_tf_init(dev, tf); tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->protocol = ATA_PROT_NODATA; tf->feature = gtf->tf[0]; /* 0x1f1 */ tf->nsect = gtf->tf[1]; /* 0x1f2 */ tf->lbal = gtf->tf[2]; /* 0x1f3 */ tf->lbam = gtf->tf[3]; /* 0x1f4 */ tf->lbah = gtf->tf[4]; /* 0x1f5 */ tf->device = gtf->tf[5]; /* 0x1f6 */ tf->command = gtf->tf[6]; /* 0x1f7 */ } static int ata_acpi_filter_tf(struct ata_device *dev, const struct ata_taskfile *tf, const struct ata_taskfile *ptf) { if (dev->gtf_filter & ATA_ACPI_FILTER_SETXFER) { /* libata doesn't use ACPI to configure transfer mode. * It will only confuse device configuration. Skip. */ if (tf->command == ATA_CMD_SET_FEATURES && tf->feature == SETFEATURES_XFER) return 1; } if (dev->gtf_filter & ATA_ACPI_FILTER_LOCK) { /* BIOS writers, sorry but we don't wanna lock * features unless the user explicitly said so. */ /* DEVICE CONFIGURATION FREEZE LOCK */ if (tf->command == ATA_CMD_CONF_OVERLAY && tf->feature == ATA_DCO_FREEZE_LOCK) return 1; /* SECURITY FREEZE LOCK */ if (tf->command == ATA_CMD_SEC_FREEZE_LOCK) return 1; /* SET MAX LOCK and SET MAX FREEZE LOCK */ if ((!ptf || ptf->command != ATA_CMD_READ_NATIVE_MAX) && tf->command == ATA_CMD_SET_MAX && (tf->feature == ATA_SET_MAX_LOCK || tf->feature == ATA_SET_MAX_FREEZE_LOCK)) return 1; } if (tf->command == ATA_CMD_SET_FEATURES && tf->feature == SETFEATURES_SATA_ENABLE) { /* inhibit enabling DIPM */ if (dev->gtf_filter & ATA_ACPI_FILTER_DIPM && tf->nsect == SATA_DIPM) return 1; /* inhibit FPDMA non-zero offset */ if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_OFFSET && (tf->nsect == SATA_FPDMA_OFFSET || tf->nsect == SATA_FPDMA_IN_ORDER)) return 1; /* inhibit FPDMA auto activation */ if (dev->gtf_filter & ATA_ACPI_FILTER_FPDMA_AA && tf->nsect == SATA_FPDMA_AA) return 1; } return 0; } /** * ata_acpi_run_tf - send taskfile registers to host controller * @dev: target ATA device * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7) * * Outputs ATA taskfile to standard ATA host controller. * Writes the control, feature, nsect, lbal, lbam, and lbah registers. * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, * hob_lbal, hob_lbam, and hob_lbah. * * This function waits for idle (!BUSY and !DRQ) after writing * registers. If the control register has a new value, this * function also waits for idle after writing control and before * writing the remaining registers. * * LOCKING: * EH context. * * RETURNS: * 1 if command is executed successfully. 0 if ignored, rejected or * filtered out, -errno on other errors. */ static int ata_acpi_run_tf(struct ata_device *dev, const struct ata_acpi_gtf *gtf, const struct ata_acpi_gtf *prev_gtf) { struct ata_taskfile *pptf = NULL; struct ata_taskfile tf, ptf, rtf; unsigned int err_mask; const char *level; const char *descr; char msg[60]; int rc; if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0) && (gtf->tf[3] == 0) && (gtf->tf[4] == 0) && (gtf->tf[5] == 0) && (gtf->tf[6] == 0)) return 0; ata_acpi_gtf_to_tf(dev, gtf, &tf); if (prev_gtf) { ata_acpi_gtf_to_tf(dev, prev_gtf, &ptf); pptf = &ptf; } if (!ata_acpi_filter_tf(dev, &tf, pptf)) { rtf = tf; err_mask = ata_exec_internal(dev, &rtf, NULL, DMA_NONE, NULL, 0, 0); switch (err_mask) { case 0: level = KERN_DEBUG; snprintf(msg, sizeof(msg), "succeeded"); rc = 1; break; case AC_ERR_DEV: level = KERN_INFO; snprintf(msg, sizeof(msg), "rejected by device (Stat=0x%02x Err=0x%02x)", rtf.command, rtf.feature); rc = 0; break; default: level = KERN_ERR; snprintf(msg, sizeof(msg), "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)", err_mask, rtf.command, rtf.feature); rc = -EIO; break; } } else { level = KERN_INFO; snprintf(msg, sizeof(msg), "filtered out"); rc = 0; } descr = ata_get_cmd_descript(tf.command); ata_dev_printk(dev, level, "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n", tf.command, tf.feature, tf.nsect, tf.lbal, tf.lbam, tf.lbah, tf.device, (descr ? descr : "unknown"), msg); return rc; } /** * ata_acpi_exec_tfs - get then write drive taskfile settings * @dev: target ATA device * @nr_executed: out parameter for the number of executed commands * * Evaluate _GTF and execute returned taskfiles. * * LOCKING: * EH context. * * RETURNS: * Number of executed taskfiles on success, 0 if _GTF doesn't exist. * -errno on other errors. */ static int ata_acpi_exec_tfs(struct ata_device *dev, int *nr_executed) { struct ata_acpi_gtf *gtf = NULL, *pgtf = NULL; int gtf_count, i, rc; /* get taskfiles */ rc = ata_dev_get_GTF(dev, &gtf); if (rc < 0) return rc; gtf_count = rc; /* execute them */ for (i = 0; i < gtf_count; i++, gtf++) { rc = ata_acpi_run_tf(dev, gtf, pgtf); if (rc < 0) break; if (rc) { (*nr_executed)++; pgtf = gtf; } } ata_acpi_clear_gtf(dev); if (rc < 0) return rc; return 0; } /** * ata_acpi_push_id - send Identify data to drive * @dev: target ATA device * * _SDD ACPI object: for SATA mode only * Must be after Identify (Packet) Device -- uses its data * ATM this function never returns a failure. It is an optional * method and if it fails for whatever reason, we should still * just keep going. * * LOCKING: * EH context. * * RETURNS: * 0 on success, -ENOENT if _SDD doesn't exist, -errno on failure. */ static int ata_acpi_push_id(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; acpi_status status; struct acpi_object_list input; union acpi_object in_params[1]; if (ata_msg_probe(ap)) ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n", __func__, dev->devno, ap->port_no); /* Give the drive Identify data to the drive via the _SDD method */ /* _SDD: set up input parameters */ input.count = 1; input.pointer = in_params; in_params[0].type = ACPI_TYPE_BUFFER; in_params[0].buffer.length = sizeof(dev->id[0]) * ATA_ID_WORDS; in_params[0].buffer.pointer = (u8 *)dev->id; /* Output buffer: _SDD has no output */ /* It's OK for _SDD to be missing too. */ swap_buf_le16(dev->id, ATA_ID_WORDS); status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_SDD", &input, NULL); swap_buf_le16(dev->id, ATA_ID_WORDS); if (status == AE_NOT_FOUND) return -ENOENT; if (ACPI_FAILURE(status)) { ata_dev_warn(dev, "ACPI _SDD failed (AE 0x%x)\n", status); return -EIO; } return 0; } /** * ata_acpi_on_suspend - ATA ACPI hook called on suspend * @ap: target ATA port * * This function is called when @ap is about to be suspended. All * devices are already put to sleep but the port_suspend() callback * hasn't been executed yet. Error return from this function aborts * suspend. * * LOCKING: * EH context. * * RETURNS: * 0 on success, -errno on failure. */ int ata_acpi_on_suspend(struct ata_port *ap) { /* nada */ return 0; } /** * ata_acpi_on_resume - ATA ACPI hook called on resume * @ap: target ATA port * * This function is called when @ap is resumed - right after port * itself is resumed but before any EH action is taken. * * LOCKING: * EH context. */ void ata_acpi_on_resume(struct ata_port *ap) { const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); struct ata_device *dev; if (ata_ap_acpi_handle(ap) && gtm) { /* _GTM valid */ /* restore timing parameters */ ata_acpi_stm(ap, gtm); /* _GTF should immediately follow _STM so that it can * use values set by _STM. Cache _GTF result and * schedule _GTF. */ ata_for_each_dev(dev, &ap->link, ALL) { ata_acpi_clear_gtf(dev); if (ata_dev_enabled(dev) && ata_dev_get_GTF(dev, NULL) >= 0) dev->flags |= ATA_DFLAG_ACPI_PENDING; } } else { /* SATA _GTF needs to be evaulated after _SDD and * there's no reason to evaluate IDE _GTF early * without _STM. Clear cache and schedule _GTF. */ ata_for_each_dev(dev, &ap->link, ALL) { ata_acpi_clear_gtf(dev); if (ata_dev_enabled(dev)) dev->flags |= ATA_DFLAG_ACPI_PENDING; } } } static int ata_acpi_choose_suspend_state(struct ata_device *dev, bool runtime) { int d_max_in = ACPI_STATE_D3_COLD; if (!runtime) goto out; /* * For ATAPI, runtime D3 cold is only allowed * for ZPODD in zero power ready state */ if (dev->class == ATA_DEV_ATAPI && !(zpodd_dev_enabled(dev) && zpodd_zpready(dev))) d_max_in = ACPI_STATE_D3_HOT; out: return acpi_pm_device_sleep_state(&dev->sdev->sdev_gendev, NULL, d_max_in); } static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state) { bool runtime = PMSG_IS_AUTO(state); struct ata_device *dev; acpi_handle handle; int acpi_state; ata_for_each_dev(dev, &ap->link, ENABLED) { handle = ata_dev_acpi_handle(dev); if (!handle) continue; if (!(state.event & PM_EVENT_RESUME)) { acpi_state = ata_acpi_choose_suspend_state(dev, runtime); if (acpi_state == ACPI_STATE_D0) continue; if (runtime && zpodd_dev_enabled(dev) && acpi_state == ACPI_STATE_D3_COLD) zpodd_enable_run_wake(dev); acpi_bus_set_power(handle, acpi_state); } else { if (runtime && zpodd_dev_enabled(dev)) zpodd_disable_run_wake(dev); acpi_bus_set_power(handle, ACPI_STATE_D0); } } } /* ACPI spec requires _PS0 when IDE power on and _PS3 when power off */ static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state) { struct ata_device *dev; acpi_handle port_handle; port_handle = ata_ap_acpi_handle(ap); if (!port_handle) return; /* channel first and then drives for power on and vica versa for power off */ if (state.event & PM_EVENT_RESUME) acpi_bus_set_power(port_handle, ACPI_STATE_D0); ata_for_each_dev(dev, &ap->link, ENABLED) { acpi_handle dev_handle = ata_dev_acpi_handle(dev); if (!dev_handle) continue; acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ? ACPI_STATE_D0 : ACPI_STATE_D3); } if (!(state.event & PM_EVENT_RESUME)) acpi_bus_set_power(port_handle, ACPI_STATE_D3); } /** * ata_acpi_set_state - set the port power state * @ap: target ATA port * @state: state, on/off * * This function sets a proper ACPI D state for the device on * system and runtime PM operations. */ void ata_acpi_set_state(struct ata_port *ap, pm_message_t state) { if (ap->flags & ATA_FLAG_ACPI_SATA) sata_acpi_set_state(ap, state); else pata_acpi_set_state(ap, state); } /** * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration * @dev: target ATA device * * This function is called when @dev is about to be configured. * IDENTIFY data might have been modified after this hook is run. * * LOCKING: * EH context. * * RETURNS: * Positive number if IDENTIFY data needs to be refreshed, 0 if not, * -errno on failure. */ int ata_acpi_on_devcfg(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; struct ata_eh_context *ehc = &ap->link.eh_context; int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA; int nr_executed = 0; int rc; if (!ata_dev_acpi_handle(dev)) return 0; /* do we need to do _GTF? */ if (!(dev->flags & ATA_DFLAG_ACPI_PENDING) && !(acpi_sata && (ehc->i.flags & ATA_EHI_DID_HARDRESET))) return 0; /* do _SDD if SATA */ if (acpi_sata) { rc = ata_acpi_push_id(dev); if (rc && rc != -ENOENT) goto acpi_err; } /* do _GTF */ rc = ata_acpi_exec_tfs(dev, &nr_executed); if (rc) goto acpi_err; dev->flags &= ~ATA_DFLAG_ACPI_PENDING; /* refresh IDENTIFY page if any _GTF command has been executed */ if (nr_executed) { rc = ata_dev_reread_id(dev, 0); if (rc < 0) { ata_dev_err(dev, "failed to IDENTIFY after ACPI commands\n"); return rc; } } return 0; acpi_err: /* ignore evaluation failure if we can continue safely */ if (rc == -EINVAL && !nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN)) return 0; /* fail and let EH retry once more for unknown IO errors */ if (!(dev->flags & ATA_DFLAG_ACPI_FAILED)) { dev->flags |= ATA_DFLAG_ACPI_FAILED; return rc; } dev->flags |= ATA_DFLAG_ACPI_DISABLED; ata_dev_warn(dev, "ACPI: failed the second time, disabled\n"); /* We can safely continue if no _GTF command has been executed * and port is not frozen. */ if (!nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN)) return 0; return rc; } /** * ata_acpi_on_disable - ATA ACPI hook called when a device is disabled * @dev: target ATA device * * This function is called when @dev is about to be disabled. * * LOCKING: * EH context. */ void ata_acpi_on_disable(struct ata_device *dev) { ata_acpi_clear_gtf(dev); } static int compat_pci_ata(struct ata_port *ap) { struct device *dev = ap->tdev.parent; struct pci_dev *pdev; if (!is_pci_dev(dev)) return 0; pdev = to_pci_dev(dev); if ((pdev->class >> 8) != PCI_CLASS_STORAGE_SATA && (pdev->class >> 8) != PCI_CLASS_STORAGE_IDE) return 0; return 1; } static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle) { if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA) return -ENODEV; *handle = acpi_get_child(DEVICE_ACPI_HANDLE(ap->tdev.parent), ap->port_no); if (!*handle) return -ENODEV; if (__ata_acpi_gtm(ap, *handle, &ap->__acpi_init_gtm) == 0) ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; return 0; } static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev, acpi_handle *handle) { struct ata_device *ata_dev; if (ap->flags & ATA_FLAG_ACPI_SATA) { if (!sata_pmp_attached(ap)) ata_dev = &ap->link.device[sdev->id]; else ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id]; } else { ata_dev = &ap->link.device[sdev->id]; } *handle = ata_dev_acpi_handle(ata_dev); if (!*handle) return -ENODEV; return 0; } static int is_ata_port(const struct device *dev) { return dev->type == &ata_port_type; } static struct ata_port *dev_to_ata_port(struct device *dev) { while (!is_ata_port(dev)) { if (!dev->parent) return NULL; dev = dev->parent; } return to_ata_port(dev); } static int ata_acpi_find_device(struct device *dev, acpi_handle *handle) { struct ata_port *ap = dev_to_ata_port(dev); if (!ap) return -ENODEV; if (!compat_pci_ata(ap)) return -ENODEV; if (scsi_is_host_device(dev)) return ata_acpi_bind_host(ap, handle); else if (scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); return ata_acpi_bind_device(ap, sdev, handle); } else return -ENODEV; } static struct acpi_bus_type ata_acpi_bus = { .name = "ATA", .find_device = ata_acpi_find_device, }; int ata_acpi_register(void) { return scsi_register_acpi_bus_type(&ata_acpi_bus); } void ata_acpi_unregister(void) { scsi_unregister_acpi_bus_type(&ata_acpi_bus); }
2c46152f1f756de53a2675017404209e5c59d290
5ff4b6986e6799bc0e143e060bafc14369030d8b
/toolchain/riscv-isa-sim/riscv/insns/vwsub_wx.h
f72341ba8089e65f75d81736fd4140f0748d8ef6
[ "MIT", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later", "LLVM-exception", "Apache-2.0", "BSD-3-Clause", "LicenseRef-scancode-bsd-3-clause-jtag", "GPL-3.0-or-later" ]
permissive
pulp-platform/mempool
7583204b2436cfc12ed95599463e51ad4df51557
c98fb3ada4f255623eaf9b09861f397a60c3d96b
refs/heads/main
2023-08-08T09:07:56.696000
2023-07-27T17:24:38
2023-07-27T17:24:38
223,218,149
178
28
Apache-2.0
2023-07-27T17:24:39
2019-11-21T16:34:37
C
UTF-8
C
false
false
100
h
vwsub_wx.h
// vwsub.wx vd, vs2, rs1 VI_CHECK_DDS(false); VI_VX_LOOP_WIDEN ({ VI_WIDE_WVX_OP(rs1, -, int); })
582a6be3045da41d01f0156b34d1546ab97310e7
28d0f8c01599f8f6c711bdde0b59f9c2cd221203
/sys/arch/evbarm/stand/bootimx23/power_prep.c
6ea9e175e2fe6cce77285e4c506986b1c3b932f6
[]
no_license
NetBSD/src
1a9cbc22ed778be638b37869ed4fb5c8dd616166
23ee83f7c0aea0777bd89d8ebd7f0cde9880d13c
refs/heads/trunk
2023-08-31T13:24:58.105000
2023-08-27T15:50:47
2023-08-27T15:50:47
88,439,547
656
348
null
2023-07-20T20:07:24
2017-04-16T20:03:43
null
UTF-8
C
false
false
11,314
c
power_prep.c
/* $Id: power_prep.c,v 1.5 2016/08/17 22:04:51 skrll Exp $ */ /* * Copyright (c) 2012 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Petri Laakso. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <sys/cdefs.h> #include <sys/param.h> #include <sys/types.h> #include <arm/imx/imx23_powerreg.h> #include <lib/libkern/libkern.h> #include <lib/libsa/stand.h> #include "common.h" #define PWR_CTRL (HW_POWER_BASE + HW_POWER_CTRL) #define PWR_CTRL_S (HW_POWER_BASE + HW_POWER_CTRL_SET) #define PWR_CTRL_C (HW_POWER_BASE + HW_POWER_CTRL_CLR) #define PWR_5VCTRL (HW_POWER_BASE + HW_POWER_5VCTRL) #define PWR_5VCTRL_S (HW_POWER_BASE + HW_POWER_5VCTRL_SET) #define PWR_5VCTRL_C (HW_POWER_BASE + HW_POWER_5VCTRL_CLR) #define PWR_MINPWR (HW_POWER_BASE + HW_POWER_MINPWR) #define PWR_MINPWR_S (HW_POWER_BASE + HW_POWER_MINPWR_SET) #define PWR_MINPWR_C (HW_POWER_BASE + HW_POWER_MINPWR_CLR) #define PWR_CHARGE (HW_POWER_BASE + HW_POWER_CHARGE) #define PWR_CHARGE_S (HW_POWER_BASE + HW_POWER_CHARGE_SET) #define PWR_CHARGE_C (HW_POWER_BASE + HW_POWER_CHARGE_CLR) #define PWR_VDDDCTRL (HW_POWER_BASE + HW_POWER_VDDDCTRL) #define PWR_VDDACTRL (HW_POWER_BASE + HW_POWER_VDDACTRL) #define PWR_VDDIOCTRL (HW_POWER_BASE + HW_POWER_VDDIOCTRL) #define PWR_VDDMEMCTRL (HW_POWER_BASE + HW_POWER_VDDMEMCTRL) #define PWR_DCDC4P2 (HW_POWER_BASE + HW_POWER_DCDC4P2) #define PWR_MISC (HW_POWER_BASE + HW_POWER_MISC) #define PWR_DCLIMITS (HW_POWER_BASE + HW_POWER_DCLIMITS) #define PWR_LOOPCTRL (HW_POWER_BASE + HW_POWER_LOOPCTRL) #define PWR_LOOPCTRL_S (HW_POWER_BASE + HW_POWER_LOOPCTRL_SET) #define PWR_LOOPCTRL_C (HW_POWER_BASE + HW_POWER_LOOPCTRL_CLR) #define PWR_STATUS (HW_POWER_BASE + HW_POWER_STS) #define PWR_SPEED (HW_POWER_BASE + HW_POWER_SPEED) #define PWR_BATTMONITOR (HW_POWER_BASE + HW_POWER_BATTMONITOR) #define PWR_RESET (HW_POWER_BASE + HW_POWER_RESET) #define PWR_DEBUG (HW_POWER_BASE + HW_POWER_DEBUG) #define PWR_SPECIAL (HW_POWER_BASE + HW_POWER_SPECIAL) #define PWR_VERSION (HW_POWER_BASE + HW_POWER_VERSION) #define VBUSVALID_TRSH 5 /* 4.4V */ #define CHARGE_4P2_ILIMIT_MAX 0x3f #define CMPTRIP 0x1f /* DCDC_4P2 pin >= 1.05 * BATTERY pin. */ #define DROPOUT_CTRL 0xa /* BO 100mV, DCDC selects higher. */ void en_vbusvalid(void); int vbusvalid(void); void power_tune(void); void en_4p2_reg(void); void en_4p2_to_dcdc(void); void power_vddd_from_dcdc(int, int); void power_vdda_from_dcdc(int, int); void power_vddio_from_dcdc(int, int); void power_vddmem(int); /* * Configure the DCDC control logic 5V detection to use VBUSVALID. */ void en_vbusvalid(void) { uint32_t tmp_r; tmp_r = REG_RD(PWR_5VCTRL); tmp_r &= ~HW_POWER_5VCTRL_VBUSVALID_TRSH; tmp_r |= __SHIFTIN(VBUSVALID_TRSH, HW_POWER_5VCTRL_VBUSVALID_TRSH); REG_WR(PWR_5VCTRL, tmp_r); REG_WR(PWR_5VCTRL_S, HW_POWER_5VCTRL_PWRUP_VBUS_CMPS); delay(1000); REG_WR(PWR_5VCTRL_S, HW_POWER_5VCTRL_VBUSVALID_5VDETECT); return; } /* * Test VBUSVALID. */ int vbusvalid(void) { if (REG_RD(PWR_STATUS) & HW_POWER_STS_VBUSVALID) return 1; else return 0; } /* * Set various registers. */ void power_tune(void) { uint32_t tmp_r; REG_WR(PWR_LOOPCTRL_S, HW_POWER_LOOPCTRL_TOGGLE_DIF | HW_POWER_LOOPCTRL_EN_CM_HYST | HW_POWER_LOOPCTRL_EN_DF_HYST | HW_POWER_LOOPCTRL_RCSCALE_THRESH | __SHIFTIN(3, HW_POWER_LOOPCTRL_EN_RCSCALE)); REG_WR(PWR_MINPWR_S, HW_POWER_MINPWR_DOUBLE_FETS); REG_WR(PWR_5VCTRL_S, __SHIFTIN(4, HW_POWER_5VCTRL_HEADROOM_ADJ)); tmp_r = REG_RD(PWR_DCLIMITS); tmp_r &= ~HW_POWER_DCLIMITS_POSLIMIT_BUCK; tmp_r |= __SHIFTIN(0x30, HW_POWER_DCLIMITS_POSLIMIT_BUCK); REG_WR(PWR_DCLIMITS, tmp_r); return; } /* * AN3883.pdf 2.1.3.1 Enabling the 4P2 LinReg */ void en_4p2_reg(void) { uint32_t tmp_r; int ilimit; /* TRG is 4.2V by default. */ tmp_r = REG_RD(PWR_DCDC4P2); tmp_r |= HW_POWER_DCDC4P2_ENABLE_4P2; REG_WR(PWR_DCDC4P2, tmp_r); REG_WR(PWR_CHARGE_S, HW_POWER_CHARGE_ENABLE_LOAD); /* Set CHARGE_4P2_ILIMIT to minimum. */ REG_WR(PWR_5VCTRL_C, HW_POWER_5VCTRL_CHARGE_4P2_ILIMIT); REG_WR(PWR_5VCTRL_S, __SHIFTIN(1, HW_POWER_5VCTRL_CHARGE_4P2_ILIMIT)); /* Power up 4.2V regulation circuit. */ REG_WR(PWR_5VCTRL_C, HW_POWER_5VCTRL_PWD_CHARGE_4P2); /* Ungate path from 4P2 reg to DCDC. */ tmp_r = REG_RD(PWR_DCDC4P2); tmp_r |= HW_POWER_DCDC4P2_ENABLE_DCDC; REG_WR(PWR_DCDC4P2, tmp_r); delay(10000); /* Charge 4P2 capacitance. */ tmp_r = REG_RD(PWR_5VCTRL); for (ilimit = 2; ilimit <= CHARGE_4P2_ILIMIT_MAX; ilimit++) { tmp_r &= ~HW_POWER_5VCTRL_CHARGE_4P2_ILIMIT; tmp_r |= __SHIFTIN(ilimit, HW_POWER_5VCTRL_CHARGE_4P2_ILIMIT); REG_WR(PWR_5VCTRL, tmp_r); delay(10000); } return; } /* * AN3883.pdf 2.1.3.3 Enabling 4P2 Input to DC-DC */ void en_4p2_to_dcdc(void) { uint32_t tmp_r; tmp_r = REG_RD(PWR_DCDC4P2); tmp_r &= ~HW_POWER_DCDC4P2_CMPTRIP; tmp_r |= __SHIFTIN(CMPTRIP, HW_POWER_DCDC4P2_CMPTRIP); tmp_r &= ~HW_POWER_DCDC4P2_DROPOUT_CTRL; tmp_r |= __SHIFTIN(DROPOUT_CTRL, HW_POWER_DCDC4P2_DROPOUT_CTRL); REG_WR(PWR_DCDC4P2, tmp_r); REG_WR(PWR_5VCTRL_C, HW_POWER_5VCTRL_DCDC_XFER); /* Enabling DCDC triggers 5V brownout. */ REG_WR(PWR_5VCTRL_C, HW_POWER_5VCTRL_PWDN_5VBRNOUT); REG_WR(PWR_5VCTRL_S, HW_POWER_5VCTRL_ENABLE_DCDC); delay(10000); REG_WR(PWR_5VCTRL_S, HW_POWER_5VCTRL_PWDN_5VBRNOUT); /* Now DCDC is using 4P2 so I can remove extra temporary load. */ REG_WR(PWR_CHARGE_C, HW_POWER_CHARGE_ENABLE_LOAD); return; } /* * Configure VDDD to source power from DCDC. */ void power_vddd_from_dcdc(int target, int brownout) { uint32_t tmp_r; /* BO_OFFSET must be within 800mV - 1475mV */ if (brownout > 1475) brownout = 1475; else if (brownout < 800) brownout = 800; /* Set LINREG_OFFSET one step below TRG. */ tmp_r = REG_RD(PWR_VDDDCTRL); tmp_r &= ~HW_POWER_VDDDCTRL_LINREG_OFFSET; tmp_r |= __SHIFTIN(2, HW_POWER_VDDDCTRL_LINREG_OFFSET); REG_WR(PWR_VDDDCTRL, tmp_r); delay(10000); /* Enable VDDD switching converter output. */ tmp_r = REG_RD(PWR_VDDDCTRL); tmp_r &= ~HW_POWER_VDDDCTRL_DISABLE_FET; REG_WR(PWR_VDDDCTRL, tmp_r); delay(10000); /* Disable linear regulator output. */ tmp_r = REG_RD(PWR_VDDDCTRL); tmp_r &= ~HW_POWER_VDDDCTRL_ENABLE_LINREG; REG_WR(PWR_VDDDCTRL, tmp_r); delay(10000); /* Set target voltage and brownout level. */ tmp_r = REG_RD(PWR_VDDDCTRL); tmp_r &= ~(HW_POWER_VDDDCTRL_BO_OFFSET | HW_POWER_VDDDCTRL_TRG); tmp_r |= __SHIFTIN(((target - brownout) / 25), HW_POWER_VDDDCTRL_BO_OFFSET); tmp_r |= __SHIFTIN(((target - 800) / 25), HW_POWER_VDDDCTRL_TRG); REG_WR(PWR_VDDDCTRL, tmp_r); delay(10000); /* Enable PWDN_BRNOUT. */ REG_WR(PWR_CTRL_C, HW_POWER_CTRL_VDDD_BO_IRQ); tmp_r = REG_RD(PWR_VDDDCTRL); tmp_r |= HW_POWER_VDDDCTRL_PWDN_BRNOUT; REG_WR(PWR_VDDDCTRL, tmp_r); return; } /* * Configure VDDA to source power from DCDC. */ void power_vdda_from_dcdc(int target, int brownout) { uint32_t tmp_r; /* BO_OFFSET must be within 1400mV - 2175mV */ if (brownout > 2275) brownout = 2275; else if (brownout < 1400) brownout = 1400; /* Set LINREG_OFFSET one step below TRG. */ tmp_r = REG_RD(PWR_VDDACTRL); tmp_r &= ~HW_POWER_VDDACTRL_LINREG_OFFSET; tmp_r |= __SHIFTIN(2, HW_POWER_VDDACTRL_LINREG_OFFSET); REG_WR(PWR_VDDACTRL, tmp_r); delay(10000); /* Enable VDDA switching converter output. */ tmp_r = REG_RD(PWR_VDDACTRL); tmp_r &= ~HW_POWER_VDDACTRL_DISABLE_FET; REG_WR(PWR_VDDACTRL, tmp_r); delay(10000); /* Disable linear regulator output. */ tmp_r = REG_RD(PWR_VDDACTRL); tmp_r &= ~HW_POWER_VDDACTRL_ENABLE_LINREG; REG_WR(PWR_VDDACTRL, tmp_r); delay(10000); /* Set target voltage and brownout level. */ tmp_r = REG_RD(PWR_VDDACTRL); tmp_r &= ~(HW_POWER_VDDACTRL_BO_OFFSET | HW_POWER_VDDACTRL_TRG); tmp_r |= __SHIFTIN(((target - brownout) / 25), HW_POWER_VDDACTRL_BO_OFFSET); tmp_r |= __SHIFTIN(((target - 1500) / 25), HW_POWER_VDDACTRL_TRG); REG_WR(PWR_VDDACTRL, tmp_r); delay(10000); /* Enable PWDN_BRNOUT. */ REG_WR(PWR_CTRL_C, HW_POWER_CTRL_VDDA_BO_IRQ); tmp_r = REG_RD(PWR_VDDACTRL); tmp_r |= HW_POWER_VDDACTRL_PWDN_BRNOUT; REG_WR(PWR_VDDACTRL, tmp_r); return; } /* * Configure VDDIO to source power from DCDC. */ void power_vddio_from_dcdc(int target, int brownout) { uint32_t tmp_r; /* BO_OFFSET must be within 2700mV - 3475mV */ if (brownout > 3475) brownout = 3475; else if (brownout < 2700) brownout = 2700; /* Set LINREG_OFFSET one step below TRG. */ tmp_r = REG_RD(PWR_VDDIOCTRL); tmp_r &= ~HW_POWER_VDDIOCTRL_LINREG_OFFSET; tmp_r |= __SHIFTIN(2, HW_POWER_VDDIOCTRL_LINREG_OFFSET); REG_WR(PWR_VDDIOCTRL, tmp_r); delay(10000); /* Enable VDDIO switching converter output. */ tmp_r = REG_RD(PWR_VDDIOCTRL); tmp_r &= ~HW_POWER_VDDIOCTRL_DISABLE_FET; REG_WR(PWR_VDDIOCTRL, tmp_r); delay(10000); /* Set target voltage and brownout level. */ tmp_r = REG_RD(PWR_VDDIOCTRL); tmp_r &= ~(HW_POWER_VDDIOCTRL_BO_OFFSET | HW_POWER_VDDIOCTRL_TRG); tmp_r |= __SHIFTIN(((target - brownout) / 25), HW_POWER_VDDIOCTRL_BO_OFFSET); tmp_r |= __SHIFTIN(((target - 2800) / 25), HW_POWER_VDDIOCTRL_TRG); REG_WR(PWR_VDDIOCTRL, tmp_r); delay(10000); /* Enable PWDN_BRNOUT. */ REG_WR(PWR_CTRL_C, HW_POWER_CTRL_VDDIO_BO_IRQ); tmp_r = REG_RD(PWR_VDDIOCTRL); tmp_r |= HW_POWER_VDDIOCTRL_PWDN_BRNOUT; REG_WR(PWR_VDDIOCTRL, tmp_r); return; } /* * AN3883.pdf 2.3.1.2 Setting VDDMEM Target Voltage */ void power_vddmem(int target) { uint32_t tmp_r; /* Set target voltage. */ tmp_r = REG_RD(PWR_VDDMEMCTRL); tmp_r &= ~(HW_POWER_VDDMEMCTRL_TRG); tmp_r |= __SHIFTIN(((target - 1700) / 50), HW_POWER_VDDMEMCTRL_TRG); REG_WR(PWR_VDDMEMCTRL, tmp_r); delay(10000); tmp_r = REG_RD(PWR_VDDMEMCTRL); tmp_r |= (HW_POWER_VDDMEMCTRL_PULLDOWN_ACTIVE | HW_POWER_VDDMEMCTRL_ENABLE_ILIMIT | HW_POWER_VDDMEMCTRL_ENABLE_LINREG); REG_WR(PWR_VDDMEMCTRL, tmp_r); delay(1000); tmp_r = REG_RD(PWR_VDDMEMCTRL); tmp_r &= ~(HW_POWER_VDDMEMCTRL_PULLDOWN_ACTIVE | HW_POWER_VDDMEMCTRL_ENABLE_ILIMIT); REG_WR(PWR_VDDMEMCTRL, tmp_r); return; }
92313c0c0dd46f485465cf4c1df170b643d79b58
4caa4cbd5b06a3ceeebb04c077a1baf65375c8e9
/src/fs/iwfsmfile.c
75cc4d594991a7c88e7060b27553ce237e96ef18
[ "MIT" ]
permissive
Softmotions/iowow
22cdfa56944fd38d106d603a41a14541f1d96ffb
a233a246577f5ba02a643a23b417c7bac7feaf06
refs/heads/master
2023-08-08T13:19:12.913000
2023-08-06T05:21:28
2023-08-06T05:21:28
40,618,114
274
25
MIT
2023-08-06T04:13:54
2015-08-12T18:44:57
C
UTF-8
C
false
false
59,849
c
iwfsmfile.c
/************************************************************************************************** * IOWOW library * * MIT License * * Copyright (c) 2012-2022 Softmotions Ltd <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. *************************************************************************************************/ #include "iwfsmfile.h" #include "iwavl.h" #include "iwbits.h" #include "iwlog.h" #include "iwp.h" #include "iwutils.h" #include "iwcfg.h" #include <pthread.h> void iwfs_fsmdbg_dump_fsm_tree(IWFS_FSM *f, const char *hdr); /** * Free-space blocks-tree key. */ struct bkey { uint32_t off; uint32_t len; }; struct bkey_node { struct bkey key; struct iwavl_node node; }; #define BKEY(nptr_) iwavl_entry(nptr_, struct bkey_node, node)->key /** Additional options for `_fsm_set_bit_status_lw` routine */ typedef uint8_t fsm_bmopts_t; /** No options. */ #define FSM_BM_NONE ((fsm_bmopts_t) 0x00U) /** Do not modify bitmap. */ #define FSM_BM_DRY_RUN ((fsm_bmopts_t) 0x01U) /** Perform strict checking of bitmap consistency */ #define FSM_BM_STRICT ((fsm_bmopts_t) 0x02U) /* Maximum size of block: 1Mb */ #define FSM_MAX_BLOCK_POW 20 /* Maximum number of records used in allocation statistics */ #define FSM_MAX_STATS_COUNT 0x0000ffff #define FSM_ENSURE_OPEN(impl_) \ if (!(impl_) || !(impl_)->f) return IW_ERROR_INVALID_STATE; #define FSM_ENSURE_OPEN2(f_) \ if (!(f_) || !(f_)->impl) return IW_ERROR_INVALID_STATE; #define FSMBK_OFFSET(b_) ((b_)->off) #define FSMBK_LENGTH(b_) ((b_)->len) //////////////////////////////////////////////////////////////////////////////////////////////////// struct fsm { IWFS_EXT pool; /**< Underlying rwl file. */ uint64_t bmlen; /**< Free-space bitmap block length in bytes. */ uint64_t bmoff; /**< Free-space bitmap block offset in bytes. */ uint64_t lfbkoff; /**< Offset in blocks of free block chunk with the largest offset. */ uint64_t lfbklen; /**< Length in blocks of free block chunk with the largest offset. */ uint64_t crzsum; /**< Cumulative sum all allocated blocks */ uint64_t crzvar; /**< Record sizes standard variance (deviation^2 * N) */ uint32_t hdrlen; /**< Length of custom file header */ uint32_t crznum; /**< Number of all allocated continuous areas acquired by `allocate` */ uint32_t fsmnum; /**< Number of records in fsm */ IWFS_FSM *f; /**< Self reference. */ IWDLSNR *dlsnr; /**< Data events listener */ struct iwavl_node *root; /**< Free-space tree */ pthread_rwlock_t *ctlrwlk; /**< Methods RW lock */ size_t aunit; /**< System allocation unit size. - Page size on *NIX - Minimal allocation unit for WIN32 */ iwfs_fsm_openflags oflags; /**< Operation mode flags. */ iwfs_omode omode; /**< Open mode. */ uint8_t bpow; /**< Block size power for 2 */ bool mmap_all; /**< Mmap all file data */ iwfs_ext_mmap_opts_t mmap_opts; /**< Defaul mmap options used in `add_mmap` */ }; static iwrc _fsm_ensure_size_lw(struct fsm *fsm, off_t size); //////////////////////////////////////////////////////////////////////////////////////////////////// IW_INLINE int _fsm_cmp_key(const struct bkey *a, const struct bkey *b) { int ret = ((FSMBK_LENGTH(b) < FSMBK_LENGTH(a)) - (FSMBK_LENGTH(a) < FSMBK_LENGTH(b))); if (ret) { return ret; } else { return ((FSMBK_OFFSET(b) < FSMBK_OFFSET(a)) - (FSMBK_OFFSET(a) < FSMBK_OFFSET(b))); } } IW_INLINE int _fsm_cmp_node(const struct iwavl_node *an, const struct iwavl_node *bn) { const struct bkey *ak = &BKEY(an); const struct bkey *bk = &BKEY(bn); return _fsm_cmp_key(ak, bk); } IW_INLINE int _fsm_cmp_ctx(const void *ctx, const struct iwavl_node *bn) { const struct bkey *ak = ctx; const struct bkey *bk = &BKEY(bn); return _fsm_cmp_key(ak, bk); } IW_INLINE iwrc _fsm_ctrl_wlock(struct fsm *fsm) { int rci = fsm->ctlrwlk ? pthread_rwlock_wrlock(fsm->ctlrwlk) : 0; return (rci ? iwrc_set_errno(IW_ERROR_THREADING_ERRNO, rci) : 0); } IW_INLINE iwrc _fsm_ctrl_rlock(struct fsm *fsm) { int rci = fsm->ctlrwlk ? pthread_rwlock_rdlock(fsm->ctlrwlk) : 0; return (rci ? iwrc_set_errno(IW_ERROR_THREADING_ERRNO, rci) : 0); } IW_INLINE iwrc _fsm_ctrl_unlock(struct fsm *fsm) { int rci = fsm->ctlrwlk ? pthread_rwlock_unlock(fsm->ctlrwlk) : 0; return (rci ? iwrc_set_errno(IW_ERROR_THREADING_ERRNO, rci) : 0); } IW_INLINE iwrc _fsm_bmptr(struct fsm *fsm, uint64_t **bmptr) { size_t sp; uint8_t *mm; *bmptr = 0; // get mmap pointer without locked iwrc rc = fsm->pool.probe_mmap(&fsm->pool, fsm->mmap_all ? 0 : fsm->bmoff, &mm, &sp); RCRET(rc); if (fsm->mmap_all) { if (sp < fsm->bmoff + fsm->bmlen) { return IWFS_ERROR_NOT_MMAPED; } *bmptr = (uint64_t*) (mm + fsm->bmoff); } else { if (sp < fsm->bmlen) { return IWFS_ERROR_NOT_MMAPED; } *bmptr = (uint64_t*) mm; } return 0; } IW_INLINE WUR iwrc _fsm_init_bkey_node(struct bkey_node *n, uint64_t offset_blk, uint64_t len_blk) { if (offset_blk > (uint32_t) -1 || len_blk > (uint32_t) -1) { return IW_ERROR_OVERFLOW; } n->key.off = (uint32_t) offset_blk; n->key.len = (uint32_t) len_blk; return 0; } IW_INLINE iwrc _fsm_init_bkey(struct bkey *k, uint64_t offset_blk, uint64_t len_blk) { if (offset_blk > (uint32_t) -1 || len_blk > (uint32_t) -1) { return IW_ERROR_OVERFLOW; } k->off = (uint32_t) offset_blk; k->len = (uint32_t) len_blk; return 0; } IW_INLINE void _fsm_del_fbk2(struct fsm *fsm, struct iwavl_node *n) { iwavl_remove(&fsm->root, n), --fsm->fsmnum; struct bkey_node *bk = iwavl_entry(n, struct bkey_node, node); if (bk->key.off == fsm->lfbkoff) { fsm->lfbkoff = 0; fsm->lfbklen = 0; } free(bk); } IW_INLINE void _fsm_del_fbk(struct fsm *fsm, uint64_t offset_blk, uint64_t length_blk) { struct bkey bkey; if (!_fsm_init_bkey(&bkey, offset_blk, length_blk)) { struct iwavl_node *n = iwavl_lookup(fsm->root, &bkey, _fsm_cmp_ctx); assert(n); if (n) { _fsm_del_fbk2(fsm, n); } } } IW_INLINE iwrc _fsm_put_fbk(struct fsm *fsm, uint64_t offset_blk, uint64_t length_blk) { iwrc rc = 0; struct bkey_node *bk; RCB(finish, bk = malloc(sizeof(*bk))); RCC(rc, finish, _fsm_init_bkey_node(bk, offset_blk, length_blk)); if (iwavl_insert(&fsm->root, &bk->node, _fsm_cmp_node)) { free(bk); } else { ++fsm->fsmnum; if (offset_blk + length_blk >= fsm->lfbkoff + fsm->lfbklen) { fsm->lfbkoff = offset_blk; fsm->lfbklen = length_blk; } } finish: if (rc) { free(bk); } return rc; } IW_INLINE const struct iwavl_node* _fsm_find_matching_fblock_lw( struct fsm *fsm, uint64_t offset_blk, uint64_t length_blk, iwfs_fsm_aflags opts ) { struct bkey bk; const struct iwavl_node *ub, *lb; if (_fsm_init_bkey(&bk, offset_blk, length_blk)) { return 0; } iwavl_lookup_bounds(fsm->root, &bk, _fsm_cmp_ctx, &lb, &ub); struct bkey *uk = ub ? &BKEY(ub) : 0; struct bkey *lk = lb ? &BKEY(lb) : 0; uint64_t lklength = lk ? FSMBK_LENGTH(lk) : 0; uint64_t uklength = uk ? FSMBK_LENGTH(uk) : 0; if (lklength == length_blk) { return lb; } else if (uklength == length_blk) { return ub; } if (lklength > length_blk) { return lb; } else if (uklength > length_blk) { return ub; } return 0; } /** * @brief Set the allocation bits in the fsm bitmap. * * @param fms * @param offset_bits Bit offset in the bitmap. * @param length_bits Number of bits to set * @param bit_status If `1` bits will be set to `1` otherwise `0` * @param opts Operation options */ static iwrc _fsm_set_bit_status_lw( struct fsm *fsm, const uint64_t offset_bits, const uint64_t length_bits_, const int bit_status, const fsm_bmopts_t opts ) { iwrc rc; size_t sp; uint8_t *mm; register int64_t length_bits = length_bits_; register uint64_t *p, set_mask; uint64_t bend = offset_bits + length_bits; int set_bits; if (bend < offset_bits) { // overflow return IW_ERROR_OUT_OF_BOUNDS; } assert(fsm->bmlen * 8 >= offset_bits + length_bits); if (fsm->bmlen * 8 < offset_bits + length_bits) { return IWFS_ERROR_FSM_SEGMENTATION; } if (fsm->mmap_all) { rc = fsm->pool.probe_mmap(&fsm->pool, 0, &mm, &sp); RCRET(rc); if (sp < fsm->bmoff + fsm->bmlen) { return IWFS_ERROR_NOT_MMAPED; } else { mm += fsm->bmoff; } } else { rc = fsm->pool.probe_mmap(&fsm->pool, fsm->bmoff, &mm, &sp); RCRET(rc); if (sp < fsm->bmlen) { return IWFS_ERROR_NOT_MMAPED; } } p = ((uint64_t*) mm) + offset_bits / 64; set_bits = 64 - (offset_bits & (64 - 1)); // NOLINT set_mask = (~((uint64_t) 0) << (offset_bits & (64 - 1))); #ifdef IW_BIGENDIAN while (length_bits - set_bits >= 0) { uint64_t pv = *p; pv = IW_ITOHLL(pv); if (bit_status) { if ((opts & FSM_BM_STRICT) && (pv & set_mask)) { rc = IWFS_ERROR_FSM_SEGMENTATION; } if ((opts & FSM_BM_DRY_RUN) == 0) { pv |= set_mask; *p = IW_HTOILL(pv); } } else { if ((opts & FSM_BM_STRICT) && ((pv & set_mask) != set_mask)) { rc = IWFS_ERROR_FSM_SEGMENTATION; } if ((opts & FSM_BM_DRY_RUN) == 0) { pv &= ~set_mask; *p = IW_HTOILL(pv); } } length_bits -= set_bits; set_bits = 64; set_mask = ~((uint64_t) 0); ++p; } if (length_bits) { uint64_t pv = *p; pv = IW_ITOHLL(pv); set_mask &= (bend & (64 - 1)) ? ((((uint64_t) 1) << (bend & (64 - 1))) - 1) : ~((uint64_t) 0); if (bit_status) { if ((opts & FSM_BM_STRICT) && (pv & set_mask)) { rc = IWFS_ERROR_FSM_SEGMENTATION; } if ((opts & FSM_BM_DRY_RUN) == 0) { pv |= set_mask; *p = IW_HTOILL(pv); } } else { if ((opts & FSM_BM_STRICT) && ((pv & set_mask) != set_mask)) { rc = IWFS_ERROR_FSM_SEGMENTATION; } if ((opts & FSM_BM_DRY_RUN) == 0) { pv &= ~set_mask; *p = IW_HTOILL(pv); } } } #else while (length_bits - set_bits >= 0) { if (bit_status) { if ((opts & FSM_BM_STRICT) && (*p & set_mask)) { rc = IWFS_ERROR_FSM_SEGMENTATION; } if ((opts & FSM_BM_DRY_RUN) == 0) { *p |= set_mask; } } else { if ((opts & FSM_BM_STRICT) && ((*p & set_mask) != set_mask)) { rc = IWFS_ERROR_FSM_SEGMENTATION; } if ((opts & FSM_BM_DRY_RUN) == 0) { *p &= ~set_mask; } } length_bits -= set_bits; set_bits = 64; set_mask = ~((uint64_t) 0); ++p; } if (length_bits) { set_mask &= (bend & (64 - 1)) ? ((((uint64_t) 1) << (bend & (64 - 1))) - 1) : ~((uint64_t) 0); if (bit_status) { if ((opts & FSM_BM_STRICT) && (*p & set_mask)) { rc = IWFS_ERROR_FSM_SEGMENTATION; } if ((opts & FSM_BM_DRY_RUN) == 0) { *p |= set_mask; } } else { if ((opts & FSM_BM_STRICT) && ((*p & set_mask) != set_mask)) { rc = IWFS_ERROR_FSM_SEGMENTATION; } if ((opts & FSM_BM_DRY_RUN) == 0) { *p &= ~set_mask; } } } #endif if (!rc && fsm->dlsnr) { uint64_t so = offset_bits / 8; uint64_t lb = length_bits_ + offset_bits % 8; uint64_t dl = lb / 8; if (lb % 8) { ++dl; } rc = fsm->dlsnr->onwrite(fsm->dlsnr, fsm->bmoff + so, mm + so, dl, 0); } return rc; } /** * @brief Allocate a continuous segment of blocks with page aligned offset. * * @param fsm `struct fsm` * @param length_blk Desired segment length in blocks. * @param [in,out] offset_blk Allocated segment offset in blocks will be stored into. It also specified the desired segment offset to provide * allocation locality. * @param [out] olength_blk Assigned segment length in blocks. * @param max_offset_blk Maximal offset of allocated block. * @param opts Allocation options. */ static iwrc _fsm_blk_allocate_aligned_lw( struct fsm *fsm, const uint64_t length_blk, uint64_t *offset_blk, uint64_t *olength_blk, const uint64_t max_offset_blk, const iwfs_fsm_aflags opts ) { fsm_bmopts_t bopts = FSM_BM_NONE; size_t aunit_blk = (fsm->aunit >> fsm->bpow); assert(fsm && length_blk > 0); if (fsm->oflags & IWFSM_STRICT) { bopts |= FSM_BM_STRICT; } *olength_blk = 0; *offset_blk = 0; /* First attempt */ const struct iwavl_node *nn = _fsm_find_matching_fblock_lw(fsm, 0, length_blk + aunit_blk, opts); if (!nn) { nn = _fsm_find_matching_fblock_lw(fsm, 0, length_blk, opts); if (!nn) { return IWFS_ERROR_NO_FREE_SPACE; } } struct bkey *nk = &BKEY(nn); uint64_t akoff = FSMBK_OFFSET(nk); uint64_t aklen = FSMBK_LENGTH(nk); uint64_t noff = IW_ROUNDUP(akoff, aunit_blk); if ((noff <= max_offset_blk) && (noff < aklen + akoff) && (aklen - (noff - akoff) >= length_blk)) { _fsm_del_fbk(fsm, akoff, aklen); aklen = aklen - (noff - akoff); if (noff > akoff) { _fsm_put_fbk(fsm, akoff, noff - akoff); } if (aklen > length_blk) { _fsm_put_fbk(fsm, noff + length_blk, aklen - length_blk); } *offset_blk = noff; *olength_blk = length_blk; return _fsm_set_bit_status_lw(fsm, noff, length_blk, 1, bopts); } aklen = 0; akoff = UINT64_MAX; // full scan for (struct iwavl_node *n = iwavl_first_in_order(fsm->root); n; n = iwavl_next_in_order(n)) { struct bkey *k = &BKEY(n); uint64_t koff = FSMBK_OFFSET(k); uint64_t klen = FSMBK_LENGTH(k); if (koff < akoff) { noff = IW_ROUNDUP(koff, aunit_blk); if (noff <= max_offset_blk && (noff < klen + koff) && (klen - (noff - koff) >= length_blk)) { akoff = koff; aklen = klen; } } } if (akoff == UINT64_MAX) { return IWFS_ERROR_NO_FREE_SPACE; } _fsm_del_fbk(fsm, akoff, aklen); noff = IW_ROUNDUP(akoff, aunit_blk); aklen = aklen - (noff - akoff); if (noff > akoff) { _fsm_put_fbk(fsm, akoff, noff - akoff); } if (aklen > length_blk) { _fsm_put_fbk(fsm, noff + length_blk, aklen - length_blk); } *offset_blk = noff; *olength_blk = length_blk; return _fsm_set_bit_status_lw(fsm, noff, length_blk, 1, bopts); } static void _fsm_node_destroy(struct iwavl_node *root) { for (struct iwavl_node *n = iwavl_first_in_postorder(root), *p; n && (p = iwavl_get_parent(n), 1); n = iwavl_next_in_postorder(n, p)) { struct bkey_node *bk = iwavl_entry(n, struct bkey_node, node); free(bk); } } /** * @brief Load existing bitmap area into free-space search tree. * @param fsm `struct fsm` * @param bm Bitmap area start ptr * @param len Bitmap area length in bytes. */ static void _fsm_load_fsm_lw(struct fsm *fsm, const uint8_t *bm, uint64_t len) { uint64_t cbnum = 0, fbklength = 0, fbkoffset = 0; _fsm_node_destroy(fsm->root); fsm->root = 0; fsm->fsmnum = 0; for (uint64_t b = 0; b < len; ++b) { register uint8_t bb = bm[b]; if (bb == 0) { fbklength += 8; cbnum += 8; } else if (bb == 0xffU) { if (fbklength) { fbkoffset = cbnum - fbklength; _fsm_put_fbk(fsm, fbkoffset, fbklength); fbklength = 0; } cbnum += 8; } else { for (int i = 0; i < 8; ++i, ++cbnum) { if (bb & (1U << i)) { if (fbklength) { fbkoffset = cbnum - fbklength; _fsm_put_fbk(fsm, fbkoffset, fbklength); fbklength = 0; } } else { ++fbklength; } } } } if (fbklength > 0) { fbkoffset = len * 8 - fbklength; _fsm_put_fbk(fsm, fbkoffset, fbklength); } } /** * @brief Flush a current `iwfsmfile` metadata into the file header. * @param fsm * @param is_sync If `1` perform mmap sync. * @return */ static iwrc _fsm_write_meta_lw(struct fsm *fsm) { uint64_t llv; size_t wlen; uint32_t sp = 0, lv; uint8_t hdr[IWFSM_CUSTOM_HDR_DATA_OFFSET] = { 0 }; /* [FSM_CTL_MAGICK u32][block pow u8] [bmoffset u64][bmlength u64] [u64 crzsum][u32 crznum][u64 crszvar][u256 reserved] [custom header size u32][custom header data...] [fsm data...] */ /* magic */ lv = IW_HTOIL(IWFSM_MAGICK); assert(sp + sizeof(lv) <= IWFSM_CUSTOM_HDR_DATA_OFFSET); memcpy(hdr + sp, &lv, sizeof(lv)); sp += sizeof(lv); /* block pow */ static_assert(sizeof(fsm->bpow) == 1, "sizeof(fms->bpow) == 1"); assert(sp + sizeof(fsm->bpow) <= IWFSM_CUSTOM_HDR_DATA_OFFSET); memcpy(hdr + sp, &fsm->bpow, sizeof(fsm->bpow)); sp += sizeof(fsm->bpow); /* fsm bitmap block offset */ llv = fsm->bmoff; llv = IW_HTOILL(llv); assert(sp + sizeof(llv) <= IWFSM_CUSTOM_HDR_DATA_OFFSET); memcpy(hdr + sp, &llv, sizeof(llv)); sp += sizeof(llv); /* fsm bitmap block length */ llv = fsm->bmlen; llv = IW_HTOILL(llv); assert(sp + sizeof(llv) <= IWFSM_CUSTOM_HDR_DATA_OFFSET); memcpy(hdr + sp, &llv, sizeof(llv)); sp += sizeof(llv); /* Cumulative sum of record sizes acquired by `allocate` */ llv = fsm->crzsum; llv = IW_HTOILL(llv); assert(sp + sizeof(llv) <= IWFSM_CUSTOM_HDR_DATA_OFFSET); memcpy(hdr + sp, &llv, sizeof(llv)); sp += sizeof(llv); /* Cumulative number of records acquired by `allocated` */ lv = fsm->crznum; lv = IW_HTOIL(lv); assert(sp + sizeof(lv) <= IWFSM_CUSTOM_HDR_DATA_OFFSET); memcpy(hdr + sp, &lv, sizeof(lv)); sp += sizeof(lv); /* Record sizes standard variance (deviation^2 * N) */ llv = fsm->crzvar; llv = IW_HTOILL(llv); assert(sp + sizeof(lv) <= IWFSM_CUSTOM_HDR_DATA_OFFSET); memcpy(hdr + sp, &llv, sizeof(llv)); sp += sizeof(llv); /* Reserved */ sp += 32; /* Size of header */ lv = fsm->hdrlen; lv = IW_HTOIL(lv); assert(sp + sizeof(lv) <= IWFSM_CUSTOM_HDR_DATA_OFFSET); memcpy(hdr + sp, &lv, sizeof(lv)); sp += sizeof(lv); assert(sp == IWFSM_CUSTOM_HDR_DATA_OFFSET); return fsm->pool.write(&fsm->pool, 0, hdr, IWFSM_CUSTOM_HDR_DATA_OFFSET, &wlen); } /** * @brief Search for the first next set bit position * starting from the specified offset bit (INCLUDED). */ static uint64_t _fsm_find_next_set_bit( const uint64_t *addr, register uint64_t offset_bit, const uint64_t max_offset_bit, int *found ) { *found = 0; register uint64_t bit, size; register const uint64_t *p = addr + offset_bit / 64; if (offset_bit >= max_offset_bit) { return 0; } bit = offset_bit & (64 - 1); offset_bit -= bit; size = max_offset_bit - offset_bit; #ifdef IW_BIGENDIAN uint64_t pv = *p; if (bit) { pv = IW_ITOHLL(pv) & (~((uint64_t) 0) << bit); if (pv) { pv = iwbits_find_first_sbit64(pv); if (pv >= size) { return 0; } else { *found = 1; return offset_bit + pv; } } if (size <= 64) { return 0; } offset_bit += 64; size -= 64; ++p; } while (size & ~(64 - 1)) { pv = *(p++); if (pv) { *found = 1; return offset_bit + iwbits_find_first_sbit64(IW_ITOHLL(pv)); } offset_bit += 64; size -= 64; } if (!size) { return 0; } pv = *p; pv = IW_ITOHLL(pv) & (~((uint64_t) 0) >> (64 - size)); if (pv) { *found = 1; return offset_bit + iwbits_find_first_sbit64(pv); } else { return 0; } #else register uint64_t tmp; if (bit) { tmp = *p & (~((uint64_t) 0) << bit); if (tmp) { tmp = iwbits_find_first_sbit64(tmp); if (tmp >= size) { return 0; } else { *found = 1; return offset_bit + tmp; } } if (size <= 64) { return 0; } offset_bit += 64; size -= 64; ++p; } while (size & ~(64 - 1)) { if ((tmp = *(p++))) { *found = 1; return offset_bit + iwbits_find_first_sbit64(tmp); } offset_bit += 64; size -= 64; } if (!size) { return 0; } tmp = (*p) & (~((uint64_t) 0) >> (64 - size)); if (tmp) { *found = 1; return offset_bit + iwbits_find_first_sbit64(tmp); } else { return 0; } #endif } /** * @brief Search for the first previous set bit position * starting from the specified offset_bit (EXCLUDED). */ static uint64_t _fsm_find_prev_set_bit( const uint64_t *addr, register uint64_t offset_bit, const uint64_t min_offset_bit, int *found ) { register const uint64_t *p; register uint64_t tmp, bit, size; *found = 0; if (min_offset_bit >= offset_bit) { return 0; } size = offset_bit - min_offset_bit; bit = offset_bit & (64 - 1); p = addr + offset_bit / 64; #ifdef IW_BIGENDIAN uint64_t pv; if (bit) { pv = *p; pv = (iwbits_reverse_64(IW_ITOHLL(pv)) >> (64 - bit)); if (pv) { pv = iwbits_find_first_sbit64(pv); if (pv >= size) { return 0; } else { *found = 1; assert(offset_bit > pv); return offset_bit > pv ? offset_bit - pv - 1 : 0; } } offset_bit -= bit; size -= bit; } while (size & ~(64 - 1)) { if (*(--p)) { pv = *p; *found = 1; tmp = iwbits_find_first_sbit64(iwbits_reverse_64(IW_ITOHLL(pv))); assert(offset_bit > tmp); return offset_bit > tmp ? offset_bit - tmp - 1 : 0; } offset_bit -= 64; size -= 64; } if (size == 0) { return 0; } pv = *(--p); tmp = iwbits_reverse_64(IW_ITOHLL(pv)) & ((((uint64_t) 1) << size) - 1); #else if (bit) { tmp = (iwbits_reverse_64(*p) >> (64 - bit)); if (tmp) { tmp = iwbits_find_first_sbit64(tmp); if (tmp >= size) { return 0; } else { *found = 1; assert(offset_bit > tmp); return offset_bit > tmp ? offset_bit - tmp - 1 : 0; } } offset_bit -= bit; size -= bit; } while (size & ~(64 - 1)) { if (*(--p)) { *found = 1; tmp = iwbits_find_first_sbit64(iwbits_reverse_64(*p)); assert(offset_bit > tmp); return offset_bit > tmp ? offset_bit - tmp - 1 : 0; } offset_bit -= 64; size -= 64; } if (size == 0) { return 0; } tmp = iwbits_reverse_64(*(--p)) & ((((uint64_t) 1) << size) - 1); #endif if (tmp) { uint64_t tmp2; *found = 1; tmp2 = iwbits_find_first_sbit64(tmp); assert(offset_bit > tmp2); return offset_bit > tmp2 ? offset_bit - tmp2 - 1 : 0; } else { return 0; } } /** * @brief Return a previously allocated blocks * back into the free-blocks pool. * * @param fms `struct fsm` * @param offset_blk Starting block number of the specified range. * @param length_blk Range size in blocks. */ static iwrc _fsm_blk_deallocate_lw( struct fsm *fsm, const uint64_t offset_blk, const uint64_t length_blk ) { iwrc rc; uint64_t *bmptr; uint64_t left, right; int hasleft = 0, hasright = 0; uint64_t key_offset = offset_blk, key_length = length_blk; uint64_t rm_offset = 0, rm_length = 0; uint64_t lfbkoff = fsm->lfbkoff; uint64_t end_offset_blk = offset_blk + length_blk; fsm_bmopts_t bopts = FSM_BM_NONE; if (fsm->oflags & IWFSM_STRICT) { bopts |= FSM_BM_STRICT; } rc = _fsm_set_bit_status_lw(fsm, offset_blk, length_blk, 0, bopts); RCRET(rc); rc = _fsm_bmptr(fsm, &bmptr); RCRET(rc); /* Merge with neighborhoods */ left = _fsm_find_prev_set_bit(bmptr, offset_blk, 0, &hasleft); if (lfbkoff && (lfbkoff == end_offset_blk)) { right = lfbkoff + fsm->lfbklen; hasright = 1; } else { uint64_t maxoff = lfbkoff ? lfbkoff : (fsm->bmlen << 3); right = _fsm_find_next_set_bit(bmptr, end_offset_blk, maxoff, &hasright); } if (hasleft) { if (offset_blk > left + 1) { left += 1; rm_offset = left; rm_length = offset_blk - left; _fsm_del_fbk(fsm, rm_offset, rm_length); key_offset = rm_offset; key_length += rm_length; } } else if (offset_blk > 0) { /* zero start */ rm_offset = 0; rm_length = offset_blk; _fsm_del_fbk(fsm, rm_offset, rm_length); key_offset = rm_offset; key_length += rm_length; } if (hasright && (right > end_offset_blk)) { rm_offset = end_offset_blk; rm_length = right - end_offset_blk; _fsm_del_fbk(fsm, rm_offset, rm_length); key_length += rm_length; } IWRC(_fsm_put_fbk(fsm, key_offset, key_length), rc); return rc; } /** * @brief Initialize a new free-space bitmap area. * * If bitmap exists, its content will be moved into newly created area. * Blocks from the previous bitmap are will disposed and deallocated. * * @param fsm `struct fsm * @param bmoff Byte offset of the new bitmap. Value must be page aligned. * @param bmlen Byte length of the new bitmap. Value must be page aligned. Its length must not be lesser than length of old bitmap. */ static iwrc _fsm_init_lw(struct fsm *fsm, uint64_t bmoff, uint64_t bmlen) { iwrc rc; uint8_t *mm, *mm2; size_t sp, sp2; uint64_t old_bmoff, old_bmlen; IWFS_EXT *pool = &fsm->pool; if ((bmlen & ((1U << fsm->bpow) - 1)) || (bmoff & ((1U << fsm->bpow) - 1)) || (bmoff & (fsm->aunit - 1))) { return IWFS_ERROR_RANGE_NOT_ALIGNED; } if (bmlen < fsm->bmlen) { rc = IW_ERROR_INVALID_ARGS; iwlog_ecode_error(rc, "Length of the newly initiated bitmap area (bmlen): %" PRIu64 " must not be lesser than the current bitmap area length %" PRIu64 "", bmlen, fsm->bmlen); return rc; } if (bmlen * 8 < ((bmoff + bmlen) >> fsm->bpow) + 1) { rc = IW_ERROR_INVALID_ARGS; iwlog_ecode_error(rc, "Length of the newly initiated bitmap area (bmlen): %" PRIu64 " is not enough to handle bitmap itself and the file header area.", bmlen); return rc; } rc = _fsm_ensure_size_lw(fsm, bmoff + bmlen); RCRET(rc); if (fsm->mmap_all) { // get mmap area without locking, since we ensured what pool file will not be remapped rc = pool->probe_mmap(pool, 0, &mm, &sp); RCRET(rc); if (sp < bmoff + bmlen) { return IWFS_ERROR_NOT_MMAPED; } else { mm += bmoff; } } else { // get mmap area without locking, since we ensured what pool file will not be remapped rc = pool->probe_mmap(pool, bmoff, &mm, &sp); RCRET(rc); if (sp < bmlen) { return IWFS_ERROR_NOT_MMAPED; } } if (fsm->bmlen) { /* We have an old active bitmap. Lets copy its content to the new location.*/ if (IW_RANGES_OVERLAP(fsm->bmoff, fsm->bmoff + fsm->bmlen, bmoff, bmoff + bmlen)) { iwlog_ecode_error2(rc, "New and old bitmap areas are overlaped"); return IW_ERROR_INVALID_ARGS; } if (fsm->mmap_all) { mm2 = mm - bmoff + fsm->bmoff; } else { rc = pool->probe_mmap(pool, fsm->bmoff, &mm2, &sp2); if (!rc && (sp2 < fsm->bmlen)) { rc = IWFS_ERROR_NOT_MMAPED; } if (rc) { iwlog_ecode_error2(rc, "Old bitmap area is not mmaped"); return rc; } } assert(!((fsm->bmlen - bmlen) & ((1U << fsm->bpow) - 1))); if (fsm->dlsnr) { rc = fsm->dlsnr->onwrite(fsm->dlsnr, bmoff, mm2, fsm->bmlen, 0); RCRET(rc); } memcpy(mm, mm2, fsm->bmlen); if (bmlen > fsm->bmlen) { memset(mm + fsm->bmlen, 0, bmlen - fsm->bmlen); if (fsm->dlsnr) { rc = fsm->dlsnr->onset(fsm->dlsnr, bmoff + fsm->bmlen, 0, bmlen - fsm->bmlen, 0); RCRET(rc); } } } else { mm2 = 0; memset(mm, 0, bmlen); if (fsm->dlsnr) { rc = fsm->dlsnr->onset(fsm->dlsnr, bmoff, 0, bmlen, 0); RCRET(rc); } } /* Backup the previous bitmap range */ old_bmlen = fsm->bmlen; old_bmoff = fsm->bmoff; fsm->bmoff = bmoff; fsm->bmlen = bmlen; RCC(rc, rollback, _fsm_set_bit_status_lw(fsm, (bmoff >> fsm->bpow), (bmlen >> fsm->bpow), 1, FSM_BM_NONE)); if (!old_bmlen) { /* First time initialization */ /* Header allocation */ RCC(rc, rollback, _fsm_set_bit_status_lw(fsm, 0, (fsm->hdrlen >> fsm->bpow), 1, FSM_BM_NONE)); } /* Reload fsm tree */ _fsm_load_fsm_lw(fsm, mm, bmlen); /* Flush new meta */ RCC(rc, rollback, _fsm_write_meta_lw(fsm)); RCC(rc, rollback, pool->sync(pool, IWFS_FDATASYNC)); if (old_bmlen) { /* Now we are save to deallocate the old bitmap */ rc = _fsm_blk_deallocate_lw(fsm, (old_bmoff >> fsm->bpow), (old_bmlen >> fsm->bpow)); if (!fsm->mmap_all) { pool->remove_mmap(pool, old_bmoff); } } return rc; rollback: /* try to rollback previous bitmap state */ fsm->bmoff = old_bmoff; fsm->bmlen = old_bmlen; if (old_bmlen && mm2) { _fsm_load_fsm_lw(fsm, mm2, old_bmlen); } pool->sync(pool, IWFS_FDATASYNC); return rc; } /** * @brief Resize bitmap area. * @param fsm `structfsm * @param size New size of bitmap area in bytes. */ static iwrc _fsm_resize_fsm_bitmap_lw(struct fsm *fsm, uint64_t size) { iwrc rc; uint64_t bmoffset = 0, bmlen, sp; IWFS_EXT *pool = &fsm->pool; if (fsm->bmlen >= size) { return 0; } bmlen = IW_ROUNDUP(size, fsm->aunit); /* align to the system page size. */ rc = _fsm_blk_allocate_aligned_lw( fsm, (bmlen >> fsm->bpow), &bmoffset, &sp, UINT64_MAX, IWFSM_ALLOC_NO_STATS | IWFSM_ALLOC_NO_EXTEND | IWFSM_ALLOC_NO_OVERALLOCATE); if (!rc) { bmoffset = bmoffset << fsm->bpow; bmlen = sp << fsm->bpow; } else if (rc == IWFS_ERROR_NO_FREE_SPACE) { bmoffset = fsm->bmlen * (1 << fsm->bpow) * 8; bmoffset = IW_ROUNDUP(bmoffset, fsm->aunit); } if (!fsm->mmap_all) { rc = pool->add_mmap(pool, bmoffset, bmlen, fsm->mmap_opts); RCRET(rc); } rc = _fsm_init_lw(fsm, bmoffset, bmlen); if (rc && !fsm->mmap_all) { pool->remove_mmap(pool, bmoffset); } return rc; } /** * @brief Allocate a continuous segment of blocks. * * @param fsm `struct fsm * @param length_blk Desired segment length in blocks. * @param [in,out] offset_blk Allocated segment offset in blocks will be stored into. * It also specified the desired segment offset to provide allocation locality. * @param [out] olength_blk Assigned segment length in blocks. * @param opts */ static iwrc _fsm_blk_allocate_lw( struct fsm *fsm, uint64_t length_blk, uint64_t *offset_blk, uint64_t *olength_blk, iwfs_fsm_aflags opts ) { iwrc rc; struct iwavl_node *nn; fsm_bmopts_t bopts = FSM_BM_NONE; if (opts & IWFSM_ALLOC_PAGE_ALIGNED) { while (1) { rc = _fsm_blk_allocate_aligned_lw(fsm, length_blk, offset_blk, olength_blk, UINT64_MAX, opts); if (rc == IWFS_ERROR_NO_FREE_SPACE) { if (opts & IWFSM_ALLOC_NO_EXTEND) { return IWFS_ERROR_NO_FREE_SPACE; } rc = _fsm_resize_fsm_bitmap_lw(fsm, fsm->bmlen << 1); RCRET(rc); continue; } if (!rc && (opts & IWFSM_SOLID_ALLOCATED_SPACE)) { uint64_t bs = *offset_blk; int64_t bl = *olength_blk; rc = _fsm_ensure_size_lw(fsm, (bs << fsm->bpow) + (bl << fsm->bpow)); } return rc; } } *olength_blk = length_blk; start: nn = (struct iwavl_node*) _fsm_find_matching_fblock_lw(fsm, *offset_blk, length_blk, opts); if (nn) { /* use existing free space block */ const struct bkey *nk = &BKEY(nn); uint64_t nlength = FSMBK_LENGTH(nk); *offset_blk = FSMBK_OFFSET(nk); _fsm_del_fbk2(fsm, nn); if (nlength > length_blk) { /* re-save rest of free-space */ if (!(opts & IWFSM_ALLOC_NO_OVERALLOCATE) && fsm->crznum) { /* todo use lognormal distribution? */ double_t d = ((double_t) fsm->crzsum / (double_t) fsm->crznum) /*avg*/ - (double) (nlength - length_blk); /*rest blk size*/ double_t s = ((double_t) fsm->crzvar / (double_t) fsm->crznum) * 6.0; /* blk size dispersion * 6 */ if ((s > 1) && (d > 0) && (d * d > s)) { /* its better to attach rest of block to the record */ *olength_blk = nlength; } else { _fsm_put_fbk(fsm, (*offset_blk + length_blk), (nlength - length_blk)); } } else { _fsm_put_fbk(fsm, (*offset_blk + length_blk), (nlength - length_blk)); } } } else { if (opts & IWFSM_ALLOC_NO_EXTEND) { return IWFS_ERROR_NO_FREE_SPACE; } rc = _fsm_resize_fsm_bitmap_lw(fsm, fsm->bmlen << 1); RCRET(rc); goto start; } if (fsm->oflags & IWFSM_STRICT) { bopts |= FSM_BM_STRICT; } rc = _fsm_set_bit_status_lw(fsm, *offset_blk, *olength_blk, 1, bopts); if (!rc && !(opts & IWFSM_ALLOC_NO_STATS)) { double_t avg; /* Update allocation statistics */ if (fsm->crznum > FSM_MAX_STATS_COUNT) { fsm->crznum = 0; fsm->crzsum = 0; fsm->crzvar = 0; } ++fsm->crznum; fsm->crzsum += length_blk; avg = (double_t) fsm->crzsum / (double_t) fsm->crznum; /* average */ fsm->crzvar += (uint64_t) (((double_t) length_blk - avg) * ((double_t) length_blk - avg) + 0.5L); /* variance */ } if (!rc && (opts & IWFSM_SOLID_ALLOCATED_SPACE)) { uint64_t bs = *offset_blk; int64_t bl = *olength_blk; rc = _fsm_ensure_size_lw(fsm, (bs << fsm->bpow) + (bl << fsm->bpow)); } if (!rc && (opts & IWFSM_SYNC_BMAP)) { uint64_t *bmptr; if (!_fsm_bmptr(fsm, &bmptr)) { IWFS_EXT *pool = &fsm->pool; rc = pool->sync_mmap(pool, fsm->bmoff, IWFS_SYNCDEFAULT); } } return rc; } /** * @brief Remove all free blocks from the and of file and trim its size. */ static iwrc _fsm_trim_tail_lw(struct fsm *fsm) { iwrc rc; int hasleft; uint64_t length, lastblk, *bmptr; IWFS_EXT_STATE fstate; uint64_t offset = 0; if (!(fsm->omode & IWFS_OWRITE)) { return 0; } /* find free space for fsm with lesser offset than actual */ rc = _fsm_blk_allocate_aligned_lw( fsm, (fsm->bmlen >> fsm->bpow), &offset, &length, (fsm->bmoff >> fsm->bpow), IWFSM_ALLOC_NO_EXTEND | IWFSM_ALLOC_NO_OVERALLOCATE | IWFSM_ALLOC_NO_STATS); if (rc && (rc != IWFS_ERROR_NO_FREE_SPACE)) { return rc; } if (rc) { rc = 0; } else if ((offset << fsm->bpow) < fsm->bmoff) { offset = offset << fsm->bpow; length = length << fsm->bpow; assert(offset != fsm->bmoff); fsm->pool.add_mmap(&fsm->pool, offset, length, fsm->mmap_opts); RCC(rc, finish, _fsm_init_lw(fsm, offset, length)); } else { /* shoud never be reached */ assert(0); RCC(rc, finish, _fsm_blk_deallocate_lw(fsm, offset, length)); } RCC(rc, finish, _fsm_bmptr(fsm, &bmptr)); // -V519 lastblk = (fsm->bmoff + fsm->bmlen) >> fsm->bpow; offset = _fsm_find_prev_set_bit(bmptr, (fsm->bmlen << 3), lastblk, &hasleft); if (hasleft) { lastblk = offset + 1; } rc = fsm->pool.state(&fsm->pool, &fstate); if (!rc && (fstate.fsize > (lastblk << fsm->bpow))) { rc = fsm->pool.truncate(&fsm->pool, lastblk << fsm->bpow); } finish: return rc; } static iwrc _fsm_init_impl(struct fsm *fsm, const IWFS_FSM_OPTS *opts) { fsm->oflags = opts->oflags; fsm->aunit = iwp_alloc_unit(); fsm->bpow = opts->bpow; fsm->mmap_all = opts->mmap_all; if (!fsm->bpow) { fsm->bpow = 6; // 64bit block } else if (fsm->bpow > FSM_MAX_BLOCK_POW) { return IWFS_ERROR_INVALID_BLOCK_SIZE; } else if ((1U << fsm->bpow) > fsm->aunit) { return IWFS_ERROR_PLATFORM_PAGE; } return 0; } static iwrc _fsm_init_locks(struct fsm *fsm, const IWFS_FSM_OPTS *opts) { if (opts->oflags & IWFSM_NOLOCKS) { fsm->ctlrwlk = 0; return 0; } fsm->ctlrwlk = calloc(1, sizeof(*fsm->ctlrwlk)); if (!fsm->ctlrwlk) { return iwrc_set_errno(IW_ERROR_ALLOC, errno); } int rci = pthread_rwlock_init(fsm->ctlrwlk, 0); if (rci) { free(fsm->ctlrwlk); fsm->ctlrwlk = 0; return iwrc_set_errno(IW_ERROR_THREADING_ERRNO, rci); } return 0; } static iwrc _fsm_destroy_locks(struct fsm *fsm) { if (!fsm->ctlrwlk) { return 0; } iwrc rc = 0; int rci = pthread_rwlock_destroy(fsm->ctlrwlk); if (rci) { IWRC(iwrc_set_errno(IW_ERROR_THREADING_ERRNO, rci), rc); } free(fsm->ctlrwlk); fsm->ctlrwlk = 0; return rc; } static iwrc _fsm_read_meta_lr(struct fsm *fsm) { iwrc rc; uint32_t lv; uint64_t llv; size_t sp, rp = 0; uint8_t hdr[IWFSM_CUSTOM_HDR_DATA_OFFSET] = { 0 }; /* [FSM_CTL_MAGICK u32][block pow u8] [bmoffset u64][bmlength u64] [u64 crzsum][u32 crznum][u64 crszvar][u256 reserved] [custom header size u32][custom header data...] [fsm data...] */ rc = fsm->pool.read(&fsm->pool, 0, hdr, IWFSM_CUSTOM_HDR_DATA_OFFSET, &sp); if (rc) { iwlog_ecode_error3(rc); return rc; } /* Magic */ memcpy(&lv, hdr + rp, sizeof(lv)); // -V512 lv = IW_ITOHL(lv); if (lv != IWFSM_MAGICK) { rc = IWFS_ERROR_INVALID_FILEMETA; iwlog_ecode_error2(rc, "Invalid file magic number"); return rc; } rp += sizeof(lv); /* Block pow */ memcpy(&fsm->bpow, hdr + rp, sizeof(fsm->bpow)); rp += sizeof(fsm->bpow); if (fsm->bpow > FSM_MAX_BLOCK_POW) { rc = IWFS_ERROR_INVALID_FILEMETA; iwlog_ecode_error(rc, "Invalid file blocks pow: %u", fsm->bpow); return rc; } if ((1U << fsm->bpow) > fsm->aunit) { rc = IWFS_ERROR_PLATFORM_PAGE; iwlog_ecode_error(rc, "Block size: %u must not be greater than system page size: %zu", (1U << fsm->bpow), fsm->aunit); } /* Free-space bitmap offset */ memcpy(&llv, hdr + rp, sizeof(llv)); llv = IW_ITOHLL(llv); fsm->bmoff = llv; rp += sizeof(llv); /* Free-space bitmap length */ memcpy(&llv, hdr + rp, sizeof(llv)); llv = IW_ITOHLL(llv); fsm->bmlen = llv; if (llv & (64 - 1)) { rc = IWFS_ERROR_INVALID_FILEMETA; iwlog_ecode_error(rc, "Free-space bitmap length is not 64bit aligned: %" PRIuMAX "", fsm->bmlen); } rp += sizeof(llv); /* Cumulative sum of record sizes acquired by `allocate` */ memcpy(&llv, hdr + rp, sizeof(llv)); llv = IW_ITOHLL(llv); fsm->crzsum = llv; rp += sizeof(llv); /* Cumulative number of records acquired by `allocated` */ memcpy(&lv, hdr + rp, sizeof(lv)); lv = IW_ITOHL(lv); fsm->crznum = lv; rp += sizeof(lv); /* Record sizes standard variance (deviation^2 * N) */ memcpy(&llv, hdr + rp, sizeof(llv)); llv = IW_ITOHLL(llv); fsm->crzvar = llv; rp += sizeof(llv); /* Reserved */ rp += 32; /* Header size */ memcpy(&lv, hdr + rp, sizeof(lv)); lv = IW_ITOHL(lv); fsm->hdrlen = lv; rp += sizeof(lv); assert(rp == IWFSM_CUSTOM_HDR_DATA_OFFSET); return rc; } static iwrc _fsm_init_new_lw(struct fsm *fsm, const IWFS_FSM_OPTS *opts) { FSM_ENSURE_OPEN(fsm); iwrc rc; uint64_t bmlen, bmoff; IWFS_EXT *pool = &fsm->pool; assert(fsm->aunit && fsm->bpow); fsm->hdrlen = opts->hdrlen + IWFSM_CUSTOM_HDR_DATA_OFFSET; fsm->hdrlen = IW_ROUNDUP(fsm->hdrlen, 1ULL << fsm->bpow); bmlen = opts->bmlen > 0 ? IW_ROUNDUP(opts->bmlen, fsm->aunit) : fsm->aunit; bmoff = IW_ROUNDUP(fsm->hdrlen, fsm->aunit); if (fsm->mmap_all) { /* mmap whole file */ rc = pool->add_mmap(pool, 0, SIZE_T_MAX, fsm->mmap_opts); RCRET(rc); } else { /* mmap header */ rc = pool->add_mmap(pool, 0, fsm->hdrlen, fsm->mmap_opts); RCRET(rc); /* mmap the fsm bitmap index */ rc = pool->add_mmap(pool, bmoff, bmlen, fsm->mmap_opts); RCRET(rc); } return _fsm_init_lw(fsm, bmoff, bmlen); } static iwrc _fsm_init_existing_lw(struct fsm *fsm) { FSM_ENSURE_OPEN(fsm); iwrc rc; size_t sp; uint8_t *mm; IWFS_EXT *pool = &fsm->pool; RCC(rc, finish, _fsm_read_meta_lr(fsm)); if (fsm->mmap_all) { /* mmap the whole file */ RCC(rc, finish, pool->add_mmap(pool, 0, SIZE_T_MAX, fsm->mmap_opts)); RCC(rc, finish, pool->probe_mmap(pool, 0, &mm, &sp)); if (sp < fsm->bmoff + fsm->bmlen) { rc = IWFS_ERROR_NOT_MMAPED; goto finish; } else { mm += fsm->bmoff; } } else { /* mmap the header of file */ RCC(rc, finish, pool->add_mmap(pool, 0, fsm->hdrlen, fsm->mmap_opts)); /* mmap the fsm bitmap index */ RCC(rc, finish, pool->add_mmap(pool, fsm->bmoff, fsm->bmlen, fsm->mmap_opts)); RCC(rc, finish, pool->probe_mmap(pool, fsm->bmoff, &mm, &sp)); if (sp < fsm->bmlen) { rc = IWFS_ERROR_NOT_MMAPED; goto finish; } } _fsm_load_fsm_lw(fsm, mm, fsm->bmlen); finish: return rc; } /** * @brief Check if all blocks within the specified range have been `allocated`. * * @param fsm `struct fsm` * @param offset_blk Starting block number of the specified range. * @param length_blk Range size in blocks. * @param [out] ret Checking result. */ static iwrc _fsm_is_fully_allocated_lr(struct fsm *fsm, uint64_t offset_blk, uint64_t length_blk, int *ret) { uint64_t end = offset_blk + length_blk; *ret = 1; if ((length_blk < 1) || (end < offset_blk) || (end > (fsm->bmlen << 3))) { *ret = 0; return 0; } iwrc rc = _fsm_set_bit_status_lw(fsm, offset_blk, length_blk, 0, FSM_BM_DRY_RUN | FSM_BM_STRICT); if (rc == IWFS_ERROR_FSM_SEGMENTATION) { *ret = 0; return 0; } return rc; } /************************************************************************************************* * Public API * *************************************************************************************************/ static iwrc _fsm_write(struct IWFS_FSM *f, off_t off, const void *buf, size_t siz, size_t *sp) { FSM_ENSURE_OPEN2(f); struct fsm *fsm = f->impl; iwrc rc = _fsm_ctrl_rlock(fsm); RCRET(rc); if (fsm->oflags & IWFSM_STRICT) { int allocated = 0; IWRC(_fsm_is_fully_allocated_lr(fsm, (uint64_t) off >> fsm->bpow, IW_ROUNDUP(siz, 1ULL << fsm->bpow) >> fsm->bpow, &allocated), rc); if (!rc) { if (!allocated) { rc = IWFS_ERROR_FSM_SEGMENTATION; } else { rc = fsm->pool.write(&fsm->pool, off, buf, siz, sp); } } } else { rc = fsm->pool.write(&fsm->pool, off, buf, siz, sp); } _fsm_ctrl_unlock(fsm); return rc; } static iwrc _fsm_read(struct IWFS_FSM *f, off_t off, void *buf, size_t siz, size_t *sp) { FSM_ENSURE_OPEN2(f); struct fsm *fsm = f->impl; iwrc rc = _fsm_ctrl_rlock(fsm); RCRET(rc); if (fsm->oflags & IWFSM_STRICT) { int allocated = 0; IWRC(_fsm_is_fully_allocated_lr(fsm, (uint64_t) off >> fsm->bpow, IW_ROUNDUP(siz, 1ULL << fsm->bpow) >> fsm->bpow, &allocated), rc); if (!rc) { if (!allocated) { rc = IWFS_ERROR_FSM_SEGMENTATION; } else { rc = fsm->pool.read(&fsm->pool, off, buf, siz, sp); } } } else { rc = fsm->pool.read(&fsm->pool, off, buf, siz, sp); } _fsm_ctrl_unlock(fsm); return rc; } static iwrc _fsm_sync(struct IWFS_FSM *f, iwfs_sync_flags flags) { FSM_ENSURE_OPEN2(f); iwrc rc = _fsm_ctrl_rlock(f->impl); RCRET(rc); IWRC(_fsm_write_meta_lw(f->impl), rc); IWRC(f->impl->pool.sync(&f->impl->pool, flags), rc); IWRC(_fsm_ctrl_unlock(f->impl), rc); return rc; } static iwrc _fsm_close(struct IWFS_FSM *f) { if (!f || !f->impl) { return 0; } iwrc rc = 0; struct fsm *fsm = f->impl; IWRC(_fsm_ctrl_wlock(fsm), rc); if (fsm->root && (fsm->omode & IWFS_OWRITE)) { if (!(fsm->oflags & IWFSM_NO_TRIM_ON_CLOSE)) { IWRC(_fsm_trim_tail_lw(fsm), rc); } IWRC(_fsm_write_meta_lw(fsm), rc); if (!fsm->dlsnr) { IWRC(fsm->pool.sync(&fsm->pool, IWFS_SYNCDEFAULT), rc); } } IWRC(fsm->pool.close(&fsm->pool), rc); _fsm_node_destroy(fsm->root); IWRC(_fsm_ctrl_unlock(fsm), rc); IWRC(_fsm_destroy_locks(fsm), rc); f->impl = 0; free(fsm); return rc; } IW_INLINE iwrc _fsm_ensure_size_lw(struct fsm *fsm, off_t size) { return fsm->pool.ensure_size(&fsm->pool, size); } static iwrc _fsm_ensure_size(struct IWFS_FSM *f, off_t size) { FSM_ENSURE_OPEN2(f); iwrc rc = _fsm_ctrl_rlock(f->impl); RCRET(rc); if (f->impl->bmoff + f->impl->bmlen > size) { rc = IWFS_ERROR_RESIZE_FAIL; goto finish; } rc = _fsm_ensure_size_lw(f->impl, size); finish: IWRC(_fsm_ctrl_unlock(f->impl), rc); return rc; } static iwrc _fsm_add_mmap(struct IWFS_FSM *f, off_t off, size_t maxlen, iwfs_ext_mmap_opts_t opts) { FSM_ENSURE_OPEN2(f); return f->impl->pool.add_mmap(&f->impl->pool, off, maxlen, opts); } static iwrc _fsm_remap_all(struct IWFS_FSM *f) { FSM_ENSURE_OPEN2(f); return f->impl->pool.remap_all(&f->impl->pool); } iwrc _fsm_acquire_mmap(struct IWFS_FSM *f, off_t off, uint8_t **mm, size_t *sp) { return f->impl->pool.acquire_mmap(&f->impl->pool, off, mm, sp); } iwrc _fsm_release_mmap(struct IWFS_FSM *f) { return f->impl->pool.release_mmap(&f->impl->pool); } static iwrc _fsm_probe_mmap(struct IWFS_FSM *f, off_t off, uint8_t **mm, size_t *sp) { FSM_ENSURE_OPEN2(f); return f->impl->pool.probe_mmap(&f->impl->pool, off, mm, sp); } static iwrc _fsm_remove_mmap(struct IWFS_FSM *f, off_t off) { FSM_ENSURE_OPEN2(f); return f->impl->pool.remove_mmap(&f->impl->pool, off); } static iwrc _fsm_sync_mmap(struct IWFS_FSM *f, off_t off, iwfs_sync_flags flags) { FSM_ENSURE_OPEN2(f); return f->impl->pool.sync_mmap(&f->impl->pool, off, flags); } static iwrc _fsm_allocate(struct IWFS_FSM *f, off_t len, off_t *oaddr, off_t *olen, iwfs_fsm_aflags opts) { FSM_ENSURE_OPEN2(f); iwrc rc; uint64_t sbnum, nlen; struct fsm *fsm = f->impl; *olen = 0; if (!(fsm->omode & IWFS_OWRITE)) { return IW_ERROR_READONLY; } if (len <= 0) { return IW_ERROR_INVALID_ARGS; } /* Required blocks number */ sbnum = (uint64_t) *oaddr >> fsm->bpow; len = IW_ROUNDUP(len, 1ULL << fsm->bpow); rc = _fsm_ctrl_wlock(fsm); RCRET(rc); rc = _fsm_blk_allocate_lw(f->impl, (uint64_t) len >> fsm->bpow, &sbnum, &nlen, opts); if (!rc) { *olen = (nlen << fsm->bpow); *oaddr = (sbnum << fsm->bpow); } IWRC(_fsm_ctrl_unlock(fsm), rc); return rc; } static iwrc _fsm_reallocate(struct IWFS_FSM *f, off_t nlen, off_t *oaddr, off_t *olen, iwfs_fsm_aflags opts) { FSM_ENSURE_OPEN2(f); iwrc rc; struct fsm *fsm = f->impl; if (!(fsm->omode & IWFS_OWRITE)) { return IW_ERROR_READONLY; } if ((*oaddr & ((1ULL << fsm->bpow) - 1)) || (*olen & ((1ULL << fsm->bpow) - 1))) { return IWFS_ERROR_RANGE_NOT_ALIGNED; } uint64_t sp; uint64_t nlen_blk = IW_ROUNDUP((uint64_t) nlen, 1ULL << fsm->bpow) >> fsm->bpow; uint64_t olen_blk = (uint64_t) *olen >> fsm->bpow; uint64_t oaddr_blk = (uint64_t) *oaddr >> fsm->bpow; uint64_t naddr_blk = oaddr_blk; if (nlen_blk == olen_blk) { return 0; } rc = _fsm_ctrl_wlock(fsm); RCRET(rc); if (nlen_blk < olen_blk) { rc = _fsm_blk_deallocate_lw(fsm, oaddr_blk + nlen_blk, olen_blk - nlen_blk); if (!rc) { *oaddr = oaddr_blk << fsm->bpow; *olen = nlen_blk << fsm->bpow; } } else { RCC(rc, finish, _fsm_blk_allocate_lw(fsm, nlen_blk, &naddr_blk, &sp, opts)); if (naddr_blk != oaddr_blk) { RCC(rc, finish, fsm->pool.copy(&fsm->pool, *oaddr, (size_t) *olen, naddr_blk << fsm->bpow)); } RCC(rc, finish, _fsm_blk_deallocate_lw(fsm, oaddr_blk, olen_blk)); *oaddr = naddr_blk << fsm->bpow; *olen = sp << fsm->bpow; } finish: IWRC(_fsm_ctrl_unlock(fsm), rc); return rc; } static iwrc _fsm_deallocate(struct IWFS_FSM *f, off_t addr, off_t len) { FSM_ENSURE_OPEN2(f); iwrc rc; struct fsm *fsm = f->impl; off_t offset_blk = (uint64_t) addr >> fsm->bpow; off_t length_blk = (uint64_t) len >> fsm->bpow; if (!(fsm->omode & IWFS_OWRITE)) { return IW_ERROR_READONLY; } if (addr & ((1ULL << fsm->bpow) - 1)) { return IWFS_ERROR_RANGE_NOT_ALIGNED; } rc = _fsm_ctrl_wlock(fsm); RCRET(rc); if ( IW_RANGES_OVERLAP(offset_blk, offset_blk + length_blk, 0, (fsm->hdrlen >> fsm->bpow)) || IW_RANGES_OVERLAP(offset_blk, offset_blk + length_blk, (fsm->bmoff >> fsm->bpow), (fsm->bmoff >> fsm->bpow) + (fsm->bmlen >> fsm->bpow))) { // Deny deallocations in header or free-space bitmap itself IWRC(_fsm_ctrl_unlock(fsm), rc); return IWFS_ERROR_FSM_SEGMENTATION; } rc = _fsm_blk_deallocate_lw(fsm, (uint64_t) offset_blk, (uint64_t) length_blk); IWRC(_fsm_ctrl_unlock(fsm), rc); return rc; } static iwrc _fsm_check_allocation_status(struct IWFS_FSM *f, off_t addr, off_t len, bool allocated) { struct fsm *fsm = f->impl; if ((addr & ((1ULL << fsm->bpow) - 1)) || (len & ((1ULL << fsm->bpow) - 1))) { return IWFS_ERROR_RANGE_NOT_ALIGNED; } iwrc rc = _fsm_ctrl_rlock(fsm); RCRET(rc); off_t offset_blk = (uint64_t) addr >> fsm->bpow; off_t length_blk = (uint64_t) len >> fsm->bpow; if ( IW_RANGES_OVERLAP(offset_blk, offset_blk + length_blk, 0, (fsm->hdrlen >> fsm->bpow)) || IW_RANGES_OVERLAP(offset_blk, offset_blk + length_blk, (fsm->bmoff >> fsm->bpow), (fsm->bmoff >> fsm->bpow) + (fsm->bmlen >> fsm->bpow))) { IWRC(_fsm_ctrl_unlock(fsm), rc); return IWFS_ERROR_FSM_SEGMENTATION; } rc = _fsm_set_bit_status_lw(fsm, (uint64_t) offset_blk, (uint64_t) length_blk, allocated ? 0 : 1, FSM_BM_DRY_RUN | FSM_BM_STRICT); IWRC(_fsm_ctrl_unlock(fsm), rc); return rc; } static iwrc _fsm_writehdr(struct IWFS_FSM *f, off_t off, const void *buf, off_t siz) { FSM_ENSURE_OPEN2(f); iwrc rc; uint8_t *mm; if (siz < 1) { return 0; } struct fsm *fsm = f->impl; if ((IWFSM_CUSTOM_HDR_DATA_OFFSET + off + siz) > fsm->hdrlen) { return IW_ERROR_OUT_OF_BOUNDS; } rc = fsm->pool.acquire_mmap(&fsm->pool, 0, &mm, 0); if (!rc) { if (fsm->dlsnr) { rc = fsm->dlsnr->onwrite(fsm->dlsnr, IWFSM_CUSTOM_HDR_DATA_OFFSET + off, buf, siz, 0); } memmove(mm + IWFSM_CUSTOM_HDR_DATA_OFFSET + off, buf, (size_t) siz); IWRC(fsm->pool.release_mmap(&fsm->pool), rc); } return rc; } static iwrc _fsm_readhdr(struct IWFS_FSM *f, off_t off, void *buf, off_t siz) { FSM_ENSURE_OPEN2(f); iwrc rc; uint8_t *mm; if (siz < 1) { return 0; } struct fsm *fsm = f->impl; if ((IWFSM_CUSTOM_HDR_DATA_OFFSET + off + siz) > fsm->hdrlen) { return IW_ERROR_OUT_OF_BOUNDS; } rc = fsm->pool.acquire_mmap(&fsm->pool, 0, &mm, 0); if (!rc) { memmove(buf, mm + IWFSM_CUSTOM_HDR_DATA_OFFSET + off, (size_t) siz); rc = fsm->pool.release_mmap(&fsm->pool); } return rc; } static iwrc _fsm_clear(struct IWFS_FSM *f, iwfs_fsm_clrfalgs clrflags) { FSM_ENSURE_OPEN2(f); struct fsm *fsm = f->impl; uint64_t bmoff, bmlen; iwrc rc = _fsm_ctrl_wlock(fsm); bmlen = fsm->bmlen; if (!bmlen) { goto finish; } if (!fsm->mmap_all && fsm->bmoff) { IWRC(fsm->pool.remove_mmap(&fsm->pool, fsm->bmoff), rc); } bmoff = IW_ROUNDUP(fsm->hdrlen, fsm->aunit); if (!fsm->mmap_all) { IWRC(fsm->pool.add_mmap(&fsm->pool, bmoff, bmlen, fsm->mmap_opts), rc); } RCGO(rc, finish); fsm->bmlen = 0; fsm->bmoff = 0; rc = _fsm_init_lw(fsm, bmoff, bmlen); if (!rc && (clrflags & IWFSM_CLEAR_TRIM)) { rc = _fsm_trim_tail_lw(fsm); } finish: IWRC(_fsm_ctrl_unlock(fsm), rc); return rc; } static iwrc _fsm_extfile(struct IWFS_FSM *f, IWFS_EXT **ext) { FSM_ENSURE_OPEN2(f); *ext = &f->impl->pool; return 0; } static iwrc _fsm_state(struct IWFS_FSM *f, IWFS_FSM_STATE *state) { FSM_ENSURE_OPEN2(f); struct fsm *fsm = f->impl; iwrc rc = _fsm_ctrl_rlock(fsm); memset(state, 0, sizeof(*state)); IWRC(fsm->pool.state(&fsm->pool, &state->exfile), rc); state->block_size = 1U << fsm->bpow; state->oflags = fsm->oflags; state->hdrlen = fsm->hdrlen; state->blocks_num = fsm->bmlen << 3; state->free_segments_num = fsm->fsmnum; state->avg_alloc_size = fsm->crznum > 0 ? (double_t) fsm->crzsum / (double_t) fsm->crznum : 0; state->alloc_dispersion = fsm->crznum > 0 ? (double_t) fsm->crzvar / (double_t) fsm->crznum : 0; IWRC(_fsm_ctrl_unlock(fsm), rc); return rc; } iwrc iwfs_fsmfile_open(IWFS_FSM *f, const IWFS_FSM_OPTS *opts) { assert(f && opts); iwrc rc = 0; IWFS_EXT_STATE fstate = { 0 }; const char *path = opts->exfile.file.path; memset(f, 0, sizeof(*f)); RCC(rc, finish, iwfs_fsmfile_init()); f->write = _fsm_write; f->read = _fsm_read; f->close = _fsm_close; f->sync = _fsm_sync; f->state = _fsm_state; f->ensure_size = _fsm_ensure_size; f->add_mmap = _fsm_add_mmap; f->remap_all = _fsm_remap_all; f->acquire_mmap = _fsm_acquire_mmap; f->probe_mmap = _fsm_probe_mmap; f->release_mmap = _fsm_release_mmap; f->remove_mmap = _fsm_remove_mmap; f->sync_mmap = _fsm_sync_mmap; f->allocate = _fsm_allocate; f->reallocate = _fsm_reallocate; f->deallocate = _fsm_deallocate; f->check_allocation_status = _fsm_check_allocation_status; f->writehdr = _fsm_writehdr; f->readhdr = _fsm_readhdr; f->clear = _fsm_clear; f->extfile = _fsm_extfile; if (!path) { return IW_ERROR_INVALID_ARGS; } struct fsm *fsm = f->impl = calloc(1, sizeof(*f->impl)); if (!fsm) { return iwrc_set_errno(IW_ERROR_ALLOC, errno); } fsm->f = f; fsm->dlsnr = opts->exfile.file.dlsnr; // Copy data changes listener address fsm->mmap_opts = opts->mmap_opts; IWFS_EXT_OPTS rwl_opts = opts->exfile; rwl_opts.use_locks = !(opts->oflags & IWFSM_NOLOCKS); RCC(rc, finish, _fsm_init_impl(fsm, opts)); RCC(rc, finish, _fsm_init_locks(fsm, opts)); RCC(rc, finish, iwfs_exfile_open(&fsm->pool, &rwl_opts)); RCC(rc, finish, fsm->pool.state(&fsm->pool, &fstate)); fsm->omode = fstate.file.opts.omode; if (fstate.file.ostatus & IWFS_OPEN_NEW) { rc = _fsm_init_new_lw(fsm, opts); } else { rc = _fsm_init_existing_lw(fsm); } finish: if (rc) { if (f->impl) { IWRC(_fsm_destroy_locks(f->impl), rc); // we are not locked IWRC(_fsm_close(f), rc); } } return rc; } static const char* _fsmfile_ecodefn(locale_t locale, uint32_t ecode) { if (!((ecode > _IWFS_FSM_ERROR_START) && (ecode < _IWFS_FSM_ERROR_END))) { return 0; } switch (ecode) { case IWFS_ERROR_NO_FREE_SPACE: return "No free space. (IWFS_ERROR_NO_FREE_SPACE)"; case IWFS_ERROR_INVALID_BLOCK_SIZE: return "Invalid block size specified. (IWFS_ERROR_INVALID_BLOCK_SIZE)"; case IWFS_ERROR_RANGE_NOT_ALIGNED: return "Specified range/offset is not aligned with page/block. " "(IWFS_ERROR_RANGE_NOT_ALIGNED)"; case IWFS_ERROR_FSM_SEGMENTATION: return "Free-space map segmentation error. (IWFS_ERROR_FSM_SEGMENTATION)"; case IWFS_ERROR_INVALID_FILEMETA: return "Invalid file metadata. (IWFS_ERROR_INVALID_FILEMETA)"; case IWFS_ERROR_PLATFORM_PAGE: return "The block size incompatible with platform page size, data " "migration required. (IWFS_ERROR_PLATFORM_PAGE)"; case IWFS_ERROR_RESIZE_FAIL: return "Failed to resize file, " "conflicting with free-space map location (IWFS_ERROR_RESIZE_FAIL)"; default: break; } return 0; } iwrc iwfs_fsmfile_init(void) { static int _fsmfile_initialized = 0; iwrc rc = iw_init(); RCRET(rc); if (!__sync_bool_compare_and_swap(&_fsmfile_initialized, 0, 1)) { return 0; // initialized already } return iwlog_register_ecodefn(_fsmfile_ecodefn); } /************************************************************************************************* * Debug API * *************************************************************************************************/ uint64_t iwfs_fsmdbg_number_of_free_areas(IWFS_FSM *f) { struct fsm *fsm = f->impl; return fsm->fsmnum; } uint64_t iwfs_fsmdbg_find_next_set_bit( const uint64_t *addr, uint64_t offset_bit, uint64_t max_offset_bit, int *found ) { return _fsm_find_next_set_bit(addr, offset_bit, max_offset_bit, found); } uint64_t iwfs_fsmdbg_find_prev_set_bit( const uint64_t *addr, uint64_t offset_bit, uint64_t min_offset_bit, int *found ) { return _fsm_find_prev_set_bit(addr, offset_bit, min_offset_bit, found); } void iwfs_fsmdbg_dump_fsm_tree(IWFS_FSM *f, const char *hdr) { assert(f); struct fsm *fsm = f->impl; fprintf(stderr, "FSM TREE: %s\n", hdr); if (!fsm->root) { fprintf(stderr, "NONE\n"); return; } for (struct iwavl_node *n = iwavl_first_in_order(fsm->root); n; n = iwavl_next_in_order(n)) { struct bkey *k = &BKEY(n); uint64_t koff = FSMBK_OFFSET(k); uint64_t klen = FSMBK_LENGTH(k); fprintf(stderr, "[%" PRIu64 " %" PRIu64 "]\n", koff, klen); } } const char* byte_to_binary(int x) { static char b[9]; b[0] = '\0'; int z; for (z = 1; z <= 128; z <<= 1) { strcat(b, ((x & z) == z) ? "1" : "0"); } return b; } iwrc iwfs_fsmdb_dump_fsm_bitmap(IWFS_FSM *f) { assert(f); size_t sp; uint8_t *mm; struct fsm *fsm = f->impl; iwrc rc; if (fsm->mmap_all) { rc = fsm->pool.probe_mmap(&fsm->pool, 0, &mm, &sp); if (!rc) { if (sp <= fsm->bmoff) { rc = IWFS_ERROR_NOT_MMAPED; } else { mm += fsm->bmoff; sp = sp - fsm->bmoff; } } } else { rc = fsm->pool.probe_mmap(&fsm->pool, fsm->bmoff, &mm, &sp); } if (rc) { iwlog_ecode_error3(rc); return rc; } int i = ((fsm->hdrlen >> fsm->bpow) >> 3); // if (impl->bmoff == impl->aunit) { // i += ((impl->bmlen >> impl->bpow) >> 3); // } for ( ; i < sp && i < fsm->bmlen; ++i) { uint8_t b = *(mm + i); fprintf(stderr, "%s", byte_to_binary(b)); } printf("\n"); return 0; } iwrc iwfs_fsmdbg_state(IWFS_FSM *f, IWFS_FSMDBG_STATE *d) { FSM_ENSURE_OPEN2(f); struct fsm *fsm = f->impl; iwrc rc = _fsm_ctrl_rlock(fsm); memset(d, 0, sizeof(*d)); IWRC(fsm->pool.state(&fsm->pool, &d->state.exfile), rc); d->state.block_size = 1U << fsm->bpow; d->state.oflags = fsm->oflags; d->state.hdrlen = fsm->hdrlen; d->state.blocks_num = fsm->bmlen << 3; d->state.free_segments_num = fsm->fsmnum; d->state.avg_alloc_size = fsm->crznum > 0 ? (double_t) fsm->crzsum / (double_t) fsm->crznum : 0; d->state.alloc_dispersion = fsm->crznum > 0 ? (double_t) fsm->crzvar / (double_t) fsm->crznum : 0; d->bmoff = fsm->bmoff; d->bmlen = fsm->bmlen; d->lfbkoff = fsm->lfbkoff; d->lfbklen = fsm->lfbklen; IWRC(_fsm_ctrl_unlock(fsm), rc); return rc; }
b4fb189e29d3cad9c4cc86fda367846d95dd9d99
2d07a646d50c6cc1547b069ecd27c512623d8574
/src/core/os.h
4a26c2c8fc5aeb21fa29b70a144247c7922f17b0
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
bjornbytes/lovr
da40e59eb9c42debbc6e22356d55194283740323
072452a4dafb466e8af9a4bc546b60ae077f8566
refs/heads/master
2023-08-16T13:42:30.581000
2023-07-29T10:37:18
2023-07-29T10:37:18
62,519,414
1,699
164
MIT
2023-09-13T22:21:32
2016-07-03T23:36:45
C
UTF-8
C
false
false
3,311
h
os.h
#include <stdint.h> #include <stdbool.h> #include <stddef.h> #pragma once typedef struct os_window_config { uint32_t width; uint32_t height; bool fullscreen; bool resizable; const char* title; struct { void* data; uint32_t width; uint32_t height; } icon; } os_window_config; typedef enum { MOUSE_LEFT, MOUSE_RIGHT } os_mouse_button; typedef enum { MOUSE_MODE_NORMAL, MOUSE_MODE_GRABBED } os_mouse_mode; typedef enum { KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G, KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N, KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U, KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_SPACE, KEY_ENTER, KEY_TAB, KEY_ESCAPE, KEY_BACKSPACE, KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT, KEY_HOME, KEY_END, KEY_PAGE_UP, KEY_PAGE_DOWN, KEY_INSERT, KEY_DELETE, KEY_F1, KEY_F2, KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7, KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_F12, KEY_BACKTICK, KEY_MINUS, KEY_EQUALS, KEY_LEFT_BRACKET, KEY_RIGHT_BRACKET, KEY_BACKSLASH, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_COMMA, KEY_PERIOD, KEY_SLASH, KEY_LEFT_CONTROL, KEY_LEFT_SHIFT, KEY_LEFT_ALT, KEY_LEFT_OS, KEY_RIGHT_CONTROL, KEY_RIGHT_SHIFT, KEY_RIGHT_ALT, KEY_RIGHT_OS, KEY_CAPS_LOCK, KEY_SCROLL_LOCK, KEY_NUM_LOCK, KEY_COUNT } os_key; typedef enum { BUTTON_PRESSED, BUTTON_RELEASED } os_button_action; typedef enum { OS_PERMISSION_AUDIO_CAPTURE } os_permission; typedef void fn_gl_proc(void); typedef void fn_quit(void); typedef void fn_focus(bool focused); typedef void fn_resize(uint32_t width, uint32_t height); typedef void fn_key(os_button_action action, os_key key, uint32_t scancode, bool repeat); typedef void fn_text(uint32_t codepoint); typedef void fn_permission(os_permission permission, bool granted); bool os_init(void); void os_destroy(void); const char* os_get_name(void); uint32_t os_get_core_count(void); void os_open_console(void); double os_get_time(void); void os_sleep(double seconds); void os_request_permission(os_permission permission); void* os_vm_init(size_t size); bool os_vm_free(void* p, size_t size); bool os_vm_commit(void* p, size_t size); bool os_vm_release(void* p, size_t size); void os_poll_events(void); void os_on_quit(fn_quit* callback); void os_on_focus(fn_focus* callback); void os_on_resize(fn_resize* callback); void os_on_key(fn_key* callback); void os_on_text(fn_text* callback); void os_on_permission(fn_permission* callback); bool os_window_open(const os_window_config* config); bool os_window_is_open(void); void os_window_get_size(uint32_t* width, uint32_t* height); float os_window_get_pixel_density(void); size_t os_get_home_directory(char* buffer, size_t size); size_t os_get_data_directory(char* buffer, size_t size); size_t os_get_working_directory(char* buffer, size_t size); size_t os_get_executable_path(char* buffer, size_t size); size_t os_get_bundle_path(char* buffer, size_t size, const char** root); void os_get_mouse_position(double* x, double* y); void os_set_mouse_mode(os_mouse_mode mode); bool os_is_mouse_down(os_mouse_button button); bool os_is_key_down(os_key key);
62a2b38927916f52b9f9d0a7aef6c3a6c56d962c
6d162c19c9f1dc1d03f330cad63d0dcde1df082d
/qrenderdoc/3rdparty/qt/include/QtWidgets/qtwidgets-config.h
3229c5626be0e4705b873ce9acd7b09975b41bcf
[ "MIT", "LicenseRef-scancode-unknown-license-reference", "LGPL-2.0-or-later", "LGPL-3.0-only", "GPL-3.0-only", "Python-2.0", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-mit-old-style", "LGPL-2.1-or-later", "LGPL-2.1-only", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-other-permissive", "bzip2-1.0.6", "Bison-exception-2.2", "MIT-open-group", "X11", "blessing", "BSD-3-Clause", "BSD-4.3TAHOE", "GPL-2.0-only", "LicenseRef-scancode-free-unknown", "IJG", "xlock", "HPND", "LicenseRef-scancode-xfree86-1.0", "LicenseRef-scancode-pcre", "Libpng", "FTL", "Zlib", "GPL-1.0-or-later", "libtiff", "LicenseRef-scancode-ietf", "LicenseRef-scancode-cavium-malloc", "LicenseRef-scancode-public-domain", "HPND-sell-variant", "ICU", "BSD-2-Clause", "LicenseRef-scancode-lcs-telegraphics", "dtoa", "LicenseRef-scancode-mit-veillard-variant", "LicenseRef-scancode-public-domain-disclaimer", "GFDL-1.1-or-later", "CC-BY-SA-4.0", "CC-BY-SA-3.0", "GFDL-1.3-or-later", "OpenSSL" ]
permissive
baldurk/renderdoc
24efbb84446a9d443bb9350013f3bfab9e9c5923
a214ffcaf38bf5319b2b23d3d014cf3772cda3c6
refs/heads/v1.x
2023-08-16T21:20:43.886000
2023-07-28T22:34:10
2023-08-15T09:09:40
17,253,131
7,729
1,358
MIT
2023-09-13T09:36:53
2014-02-27T15:16:30
C++
UTF-8
C
false
false
2,499
h
qtwidgets-config.h
#define QT_FEATURE_abstractbutton 1 #define QT_FEATURE_abstractslider 1 #define QT_FEATURE_groupbox 1 #define QT_FEATURE_buttongroup 1 #define QT_FEATURE_label 1 #define QT_FEATURE_pushbutton 1 #define QT_FEATURE_menu 1 #define QT_FEATURE_lineedit 1 #define QT_FEATURE_spinbox 1 #define QT_FEATURE_slider 1 #define QT_FEATURE_scrollbar 1 #define QT_FEATURE_scrollarea 1 #define QT_FEATURE_itemviews 1 #define QT_FEATURE_tableview 1 #define QT_FEATURE_toolbutton 1 #define QT_FEATURE_calendarwidget 1 #define QT_FEATURE_checkbox 1 #define QT_FEATURE_dialog 1 #define QT_FEATURE_dialogbuttonbox 1 #define QT_FEATURE_colordialog 1 #define QT_FEATURE_listview 1 #define QT_FEATURE_columnview 1 #define QT_FEATURE_combobox 1 #define QT_FEATURE_commandlinkbutton 1 #define QT_FEATURE_completer 1 #define QT_FEATURE_contextmenu 1 #define QT_FEATURE_datawidgetmapper 1 #define QT_FEATURE_datetimeedit 1 #define QT_FEATURE_dial 1 #define QT_FEATURE_filesystemmodel 1 #define QT_FEATURE_dirmodel 1 #define QT_FEATURE_resizehandler 1 #define QT_FEATURE_mainwindow 1 #define QT_FEATURE_dockwidget 1 #define QT_FEATURE_textedit 1 #define QT_FEATURE_errormessage 1 #define QT_FEATURE_splitter 1 #define QT_FEATURE_stackedwidget 1 #define QT_FEATURE_treeview 1 #define QT_FEATURE_filedialog 1 #define QT_FEATURE_fontcombobox 1 #define QT_FEATURE_fontdialog 1 #define QT_FEATURE_formlayout 1 #define QT_FEATURE_fscompleter 1 #define QT_FEATURE_graphicsview 1 #define QT_FEATURE_graphicseffect 1 #define QT_FEATURE_inputdialog 1 #define QT_FEATURE_keysequenceedit 1 #define QT_FEATURE_lcdnumber 1 #define QT_FEATURE_listwidget 1 #define QT_FEATURE_mdiarea 1 #define QT_FEATURE_menubar 1 #define QT_FEATURE_messagebox 1 #define QT_FEATURE_paint_debug 1 #define QT_FEATURE_progressbar 1 #define QT_FEATURE_progressdialog 1 #define QT_FEATURE_radiobutton 1 #define QT_FEATURE_rubberband 1 #define QT_FEATURE_scroller 1 #define QT_FEATURE_sizegrip 1 #define QT_FEATURE_splashscreen 1 #define QT_FEATURE_statusbar 1 #define QT_FEATURE_statustip 1 #define QT_FEATURE_style_stylesheet 1 #define QT_FEATURE_syntaxhighlighter 1 #define QT_FEATURE_tabbar 1 #define QT_FEATURE_tablewidget 1 #define QT_FEATURE_tabwidget 1 #define QT_FEATURE_textbrowser 1 #define QT_FEATURE_toolbar 1 #define QT_FEATURE_toolbox 1 #define QT_FEATURE_tooltip 1 #define QT_FEATURE_treewidget 1 #define QT_FEATURE_undocommand 1 #define QT_FEATURE_undostack 1 #define QT_FEATURE_undogroup 1 #define QT_FEATURE_undoview 1 #define QT_FEATURE_wizard 1
fdd4fdc47684f6d501ad3c21167636b8932ceba9
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
/www/qt5-webengine/files/patch-src_3rdparty_chromium_third__party_libXNVCtrl_NVCtrl.c
ac5f5048d107a87d963811870fb709de6f37d907
[ "BSD-2-Clause" ]
permissive
freebsd/freebsd-ports
86f2e89d43913412c4f6b2be3e255bc0945eac12
605a2983f245ac63f5420e023e7dce56898ad801
refs/heads/main
2023-08-30T21:46:28.720000
2023-08-30T19:33:44
2023-08-30T19:33:44
1,803,961
916
918
NOASSERTION
2023-09-08T04:06:26
2011-05-26T11:15:35
null
UTF-8
C
false
false
601
c
patch-src_3rdparty_chromium_third__party_libXNVCtrl_NVCtrl.c
--- src/3rdparty/chromium/third_party/libXNVCtrl/NVCtrl.c.orig 2018-11-13 18:25:11 UTC +++ src/3rdparty/chromium/third_party/libXNVCtrl/NVCtrl.c @@ -27,10 +27,6 @@ * libXNVCtrl library properly protects the Display connection. */ -#if !defined(XTHREADS) -#define XTHREADS -#endif /* XTHREADS */ - #define NEED_EVENTS #define NEED_REPLIES #include <stdint.h> @@ -39,6 +35,11 @@ #include <X11/Xutil.h> #include <X11/extensions/Xext.h> #include <X11/extensions/extutil.h> + +#if !defined(XTHREADS) +#define XTHREADS +#endif /* XTHREADS */ + #include "NVCtrlLib.h" #include "nv_control.h"
b209ec7aecd16f73c178dc1236356753ab94e756
98ee5bc94e754d9b1802d66d7b5c5fcf184a6c90
/lib/include/snow3g_submit.h
1f7293115a012087d15475d4addc8aef67c5dbf0
[ "BSD-3-Clause" ]
permissive
intel/intel-ipsec-mb
f180701ca3dbdc26f310c5706cb3e8577defa2df
9e17d6cad1f99e64f3534053a3ff096c46646058
refs/heads/main
2023-08-30T13:20:47.709000
2023-08-28T13:17:46
2023-08-29T13:14:00
73,856,328
255
89
BSD-3-Clause
2023-08-30T08:42:45
2016-11-15T21:22:17
C
UTF-8
C
false
false
2,958
h
snow3g_submit.h
/******************************************************************************* Copyright (c) 2012-2023, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *******************************************************************************/ #ifndef SNOW3G_SUBMIT_H #define SNOW3G_SUBMIT_H #include "intel-ipsec-mb.h" static inline IMB_JOB *def_submit_snow3g_uea2_job(IMB_MGR *state, IMB_JOB *job) { const snow3g_key_schedule_t *key = job->enc_keys; const uint32_t bitlen = (uint32_t) job->msg_len_to_cipher_in_bits; const uint32_t bitoff = (uint32_t) job->cipher_start_offset_in_bits; /* Use bit length API if * - msg length is not a multiple of bytes * - bit offset is not a multiple of bytes */ if ((bitlen & 0x07) || (bitoff & 0x07)) { IMB_SNOW3G_F8_1_BUFFER_BIT(state, key, job->iv, job->src, job->dst, bitlen, bitoff); } else { const uint32_t bytelen = bitlen >> 3; const uint32_t byteoff = bitoff >> 3; const void *src = job->src + byteoff; void *dst = job->dst + byteoff; IMB_SNOW3G_F8_1_BUFFER(state, key, job->iv, src, dst, bytelen); } job->status |= IMB_STATUS_COMPLETED_CIPHER; return job; } static inline IMB_JOB *def_flush_snow3g_uea2_job(IMB_MGR *state) { (void) state; return NULL; } #endif /* SNOW3G_SUBMIT_H */
73bf6a4d7da1e572b123305cd73a8327c7b96c1c
3499b1145f0827498625ec0ac71ba82bbbbda4ed
/board-package-source/libraries/Arduboy/src/ab_logo.c
3718e5cde969f5bb9c3970dd3d85dca135af7bd7
[ "BSD-3-Clause", "BSD-2-Clause", "CC0-1.0" ]
permissive
MrBlinky/Arduboy-homemade-package
09120974a9c6a9ad1871ac68cbf852bb253bbd8e
3b71be313e1a4daaa745a15cdf2b58c92b101441
refs/heads/master
2023-07-22T18:36:15.664000
2023-07-15T22:08:51
2023-07-15T22:08:51
121,283,656
104
31
CC0-1.0
2023-01-02T08:24:54
2018-02-12T18:16:15
C++
UTF-8
C
false
false
1,246
c
ab_logo.c
#include <avr/pgmspace.h> #ifndef ARDUBOY_LOGO_CREATED #define ARDUBOY_LOGO_CREATED // arduboy_logo.png // 88x16 PROGMEM const unsigned char arduboy_logo[] = { 0xF0, 0xF8, 0x9C, 0x8E, 0x87, 0x83, 0x87, 0x8E, 0x9C, 0xF8, 0xF0, 0x00, 0x00, 0xFE, 0xFF, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x0E, 0xFC, 0xF8, 0x00, 0x00, 0xFE, 0xFF, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x0E, 0xFC, 0xF8, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0xFE, 0xFF, 0x83, 0x83, 0x83, 0x83, 0x83, 0xC7, 0xEE, 0x7C, 0x38, 0x00, 0x00, 0xF8, 0xFC, 0x0E, 0x07, 0x03, 0x03, 0x03, 0x07, 0x0E, 0xFC, 0xF8, 0x00, 0x00, 0x3F, 0x7F, 0xE0, 0xC0, 0x80, 0x80, 0xC0, 0xE0, 0x7F, 0x3F, 0xFF, 0xFF, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, 0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x3E, 0x77, 0xE3, 0xC1, 0x00, 0x00, 0x7F, 0xFF, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xE0, 0x70, 0x3F, 0x1F, 0x00, 0x00, 0x1F, 0x3F, 0x70, 0xE0, 0xC0, 0xC0, 0xC0, 0xE0, 0x70, 0x3F, 0x1F, 0x00, 0x00, 0x7F, 0xFF, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xE3, 0x77, 0x3E, 0x1C, 0x00, 0x00, 0x1F, 0x3F, 0x70, 0xE0, 0xC0, 0xC0, 0xC0, 0xE0, 0x70, 0x3F, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00 }; #endif
ccc9dee273b29502ee66d57654c14f21c731fe35
a990004e8263ed825eb4ee21c7eb842dd886cde2
/src/lib/crypto/test/iso8859.c
bed724491092ca4f2aae0a77314378aac318f6ca
[ "BSD-2-Clause" ]
permissive
opendnssec/SoftHSMv2
8233f100f47c62f3b091c00706d9d13bfd1486bc
f4661af6680e187f220921c0a91d17d71a680345
refs/heads/develop
2023-08-04T23:48:24.514000
2023-08-02T20:51:58
2023-08-02T20:51:58
10,314,787
643
327
NOASSERTION
2023-09-01T07:23:24
2013-05-27T12:54:47
C++
UTF-8
C
false
false
878
c
iso8859.c
/* This code was taken from http://www.fourmilab.ch/random/ where it states that: This software is in the public domain. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, without any conditions or restrictions. This software is provided “as is” without express or implied warranty. */ /* ISO 8859/1 Latin-1 alphabetic and upper and lower case bit vector tables. */ /* LINTLIBRARY */ unsigned char isoalpha[32] = { 0,0,0,0,0,0,0,0,127,255,255,224,127,255,255,224,0,0,0,0,0,0,0,0,255,255, 254,255,255,255,254,255 }; unsigned char isoupper[32] = { 0,0,0,0,0,0,0,0,127,255,255,224,0,0,0,0,0,0,0,0,0,0,0,0,255,255,254,254, 0,0,0,0 }; unsigned char isolower[32] = { 0,0,0,0,0,0,0,0,0,0,0,0,127,255,255,224,0,0,0,0,0,0,0,0,0,0,0,1,255,255, 254,255 };
End of preview. Expand in Data Studio

No dataset card yet

Downloads last month
184