mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-20 11:11:24 +00:00
nvme: replace NVME_CEILING macro with howmany()
Suggested by: rpokala MFC after: 3 days
This commit is contained in:
parent
50dea2da12
commit
9c6b5d40eb
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=293354
@ -42,12 +42,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include "nvme_private.h"
|
||||
|
||||
/*
|
||||
* Used for calculating number of CPUs to assign to each core and number of I/O
|
||||
* queues to allocate per controller.
|
||||
*/
|
||||
#define NVME_CEILING(num, div) ((((num) - 1) / (div)) + 1)
|
||||
|
||||
static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
|
||||
struct nvme_async_event_request *aer);
|
||||
static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
|
||||
@ -152,7 +146,7 @@ nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
|
||||
* a controller could theoretically support fewer I/O queues than
|
||||
* MSI-X vectors. So calculate again here just to be safe.
|
||||
*/
|
||||
ctrlr->num_cpus_per_ioq = NVME_CEILING(mp_ncpus, ctrlr->num_io_queues);
|
||||
ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
|
||||
|
||||
ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
|
||||
M_NVME, M_ZERO | M_WAITOK);
|
||||
@ -1029,9 +1023,9 @@ nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
|
||||
* admin queue.
|
||||
*/
|
||||
ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
|
||||
NVME_CEILING(mp_ncpus, num_vectors_available - 1));
|
||||
howmany(mp_ncpus, num_vectors_available - 1));
|
||||
|
||||
ctrlr->num_io_queues = NVME_CEILING(mp_ncpus, ctrlr->num_cpus_per_ioq);
|
||||
ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
|
||||
num_vectors_requested = ctrlr->num_io_queues + 1;
|
||||
num_vectors_allocated = num_vectors_requested;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user