You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

674 lines
18 KiB

  1. #include "shm.h"
  2. #include <stdio.h>
  3. #include <string.h>
  4. #include <assert.h>
  5. #include <errno.h>
  6. #include <unistd.h>
  7. #include <limits.h>
  8. #include <sys/types.h>
  9. #include <sys/mman.h>
  10. #include <sys/time.h>
  11. #include <linux/mman.h>
  12. #include <linux/memfd.h>
  13. #include <fcntl.h>
  14. #include <pixman.h>
  15. #include <fcft/stride.h>
  16. #include <tllist.h>
  17. #define LOG_MODULE "shm"
  18. #define LOG_ENABLE_DBG 0
  19. #include "log.h"
  20. #include "macros.h"
  21. #include "xmalloc.h"
  22. #if !defined(MAP_UNINITIALIZED)
  23. #define MAP_UNINITIALIZED 0
  24. #endif
  25. #define TIME_SCROLL 0
  26. /*
  27. * Maximum memfd size allowed.
  28. *
  29. * On 64-bit, we could in theory use up to 2GB (wk_shm_create_pool()
  30. * is limited to int32_t), since we never mmap() the entire region.
  31. *
  32. * The compositor is different matter - it needs to mmap() the entire
  33. * range, and *keep* the mapping for as long as is has buffers
  34. * referencing it (thus - always). And if we open multiple terminals,
  35. * then the required address space multiples...
  36. *
  37. * That said, 128TB (the total amount of available user address space
  38. * on 64-bit) is *a lot*; we can fit 67108864 2GB memfds into
  39. * that. But, let's be conservative for now.
  40. *
  41. * On 32-bit the available address space is too small and SHM
  42. * scrolling is disabled.
  43. *
  44. * Note: this is the _default_ size. It can be overridden by calling
  45. * shm_set_max_pool_size();
  46. */
  47. static off_t max_pool_size = 512 * 1024 * 1024;
  48. static tll(struct buffer) buffers;
  49. static bool can_punch_hole = false;
  50. static bool can_punch_hole_initialized = false;
  51. #undef MEASURE_SHM_ALLOCS
  52. #if defined(MEASURE_SHM_ALLOCS)
  53. static size_t max_alloced = 0;
  54. #endif
  55. void
  56. shm_set_max_pool_size(off_t _max_pool_size)
  57. {
  58. max_pool_size = _max_pool_size;
  59. }
  60. static void
  61. buffer_destroy_dont_close(struct buffer *buf)
  62. {
  63. if (buf->pix != NULL) {
  64. for (size_t i = 0; i < buf->pix_instances; i++)
  65. if (buf->pix[i] != NULL)
  66. pixman_image_unref(buf->pix[i]);
  67. }
  68. if (buf->wl_buf != NULL)
  69. wl_buffer_destroy(buf->wl_buf);
  70. free(buf->pix);
  71. buf->pix = NULL;
  72. buf->wl_buf = NULL;
  73. buf->mmapped = NULL;
  74. }
  75. static void
  76. buffer_destroy(struct buffer *buf)
  77. {
  78. buffer_destroy_dont_close(buf);
  79. if (buf->real_mmapped != MAP_FAILED)
  80. munmap(buf->real_mmapped, buf->mmap_size);
  81. if (buf->pool != NULL)
  82. wl_shm_pool_destroy(buf->pool);
  83. if (buf->fd >= 0)
  84. close(buf->fd);
  85. buf->real_mmapped = MAP_FAILED;
  86. buf->pool = NULL;
  87. buf->fd = -1;
  88. }
  89. void
  90. shm_fini(void)
  91. {
  92. tll_foreach(buffers, it) {
  93. buffer_destroy(&it->item);
  94. tll_remove(buffers, it);
  95. }
  96. #if defined(MEASURE_SHM_ALLOCS) && MEASURE_SHM_ALLOCS
  97. LOG_INFO("max total allocations was: %zu MB", max_alloced / 1024 / 1024);
  98. #endif
  99. }
  100. static void
  101. buffer_release(void *data, struct wl_buffer *wl_buffer)
  102. {
  103. struct buffer *buffer = data;
  104. LOG_DBG("release: cookie=%lx (buf=%p)", buffer->cookie, (void *)buffer);
  105. assert(buffer->wl_buf == wl_buffer);
  106. assert(buffer->busy);
  107. buffer->busy = false;
  108. }
  109. static const struct wl_buffer_listener buffer_listener = {
  110. .release = &buffer_release,
  111. };
  112. static size_t
  113. page_size(void)
  114. {
  115. static size_t size = 0;
  116. if (size == 0) {
  117. long n = sysconf(_SC_PAGE_SIZE);
  118. if (n <= 0) {
  119. LOG_ERRNO("failed to get page size");
  120. size = 4096;
  121. } else {
  122. size = (size_t)n;
  123. }
  124. }
  125. assert(size > 0);
  126. return size;
  127. }
  128. static bool
  129. instantiate_offset(struct wl_shm *shm, struct buffer *buf, off_t new_offset)
  130. {
  131. assert(buf->fd >= 0);
  132. assert(buf->mmapped == NULL);
  133. assert(buf->wl_buf == NULL);
  134. assert(buf->pix == NULL);
  135. void *mmapped = MAP_FAILED;
  136. struct wl_buffer *wl_buf = NULL;
  137. pixman_image_t **pix = xcalloc(buf->pix_instances, sizeof(*pix));
  138. mmapped = (uint8_t *)buf->real_mmapped + new_offset;
  139. wl_buf = wl_shm_pool_create_buffer(
  140. buf->pool, new_offset, buf->width, buf->height, buf->stride, WL_SHM_FORMAT_ARGB8888);
  141. if (wl_buf == NULL) {
  142. LOG_ERR("failed to create SHM buffer");
  143. goto err;
  144. }
  145. /* One pixman image for each worker thread (do we really need multiple?) */
  146. for (size_t i = 0; i < buf->pix_instances; i++) {
  147. pix[i] = pixman_image_create_bits_no_clear(
  148. PIXMAN_a8r8g8b8, buf->width, buf->height, (uint32_t *)mmapped, buf->stride);
  149. if (pix[i] == NULL) {
  150. LOG_ERR("failed to create pixman image");
  151. goto err;
  152. }
  153. }
  154. buf->offset = new_offset;
  155. buf->mmapped = mmapped;
  156. buf->wl_buf = wl_buf;
  157. buf->pix = pix;
  158. wl_buffer_add_listener(wl_buf, &buffer_listener, buf);
  159. return true;
  160. err:
  161. if (pix != NULL) {
  162. for (size_t i = 0; i < buf->pix_instances; i++)
  163. if (pix[i] != NULL)
  164. pixman_image_unref(pix[i]);
  165. }
  166. free(pix);
  167. if (wl_buf != NULL)
  168. wl_buffer_destroy(wl_buf);
  169. abort();
  170. return false;
  171. }
  172. struct buffer *
  173. shm_get_buffer(struct wl_shm *shm, int width, int height, unsigned long cookie, bool scrollable, size_t pix_instances)
  174. {
  175. /* Purge buffers marked for purging */
  176. tll_foreach(buffers, it) {
  177. if (it->item.cookie != cookie)
  178. continue;
  179. if (!it->item.purge)
  180. continue;
  181. assert(!it->item.busy);
  182. LOG_DBG("cookie=%lx: purging buffer %p (width=%d, height=%d): %zu KB",
  183. cookie, (void *)&it->item, it->item.width, it->item.height,
  184. it->item.size / 1024);
  185. buffer_destroy(&it->item);
  186. tll_remove(buffers, it);
  187. }
  188. tll_foreach(buffers, it) {
  189. if (it->item.width != width)
  190. continue;
  191. if (it->item.height != height)
  192. continue;
  193. if (it->item.cookie != cookie)
  194. continue;
  195. if (!it->item.busy) {
  196. LOG_DBG("cookie=%lx: re-using buffer from cache (buf=%p)",
  197. cookie, (void *)&it->item);
  198. it->item.busy = true;
  199. it->item.purge = false;
  200. assert(it->item.pix_instances == pix_instances);
  201. return &it->item;
  202. }
  203. }
  204. /* Purge old buffers associated with this cookie */
  205. tll_foreach(buffers, it) {
  206. if (it->item.cookie != cookie)
  207. continue;
  208. if (it->item.busy)
  209. continue;
  210. if (it->item.width == width && it->item.height == height)
  211. continue;
  212. LOG_DBG("cookie=%lx: marking buffer %p for purging", cookie, (void *)&it->item);
  213. it->item.purge = true;
  214. }
  215. /*
  216. * No existing buffer available. Create a new one by:
  217. *
  218. * 1. open a memory backed "file" with memfd_create()
  219. * 2. mmap() the memory file, to be used by the pixman image
  220. * 3. create a wayland shm buffer for the same memory file
  221. *
  222. * The pixman image and the wayland buffer are now sharing memory.
  223. */
  224. int pool_fd = -1;
  225. const int stride = stride_for_format_and_width(PIXMAN_a8r8g8b8, width);
  226. const size_t size = stride * height;
  227. void *real_mmapped = MAP_FAILED;
  228. struct wl_shm_pool *pool = NULL;
  229. LOG_DBG("cookie=%lx: allocating new buffer: %zu KB", cookie, size / 1024);
  230. /* Backing memory for SHM */
  231. pool_fd = memfd_create("foot-wayland-shm-buffer-pool", MFD_CLOEXEC | MFD_ALLOW_SEALING);
  232. if (pool_fd == -1) {
  233. LOG_ERRNO("failed to create SHM backing memory file");
  234. goto err;
  235. }
  236. #if defined(__i386__)
  237. off_t initial_offset = 0;
  238. off_t memfd_size = size;
  239. #else
  240. off_t initial_offset = scrollable && max_pool_size > 0 ? (max_pool_size / 4) & ~(page_size() - 1) : 0;
  241. off_t memfd_size = scrollable && max_pool_size > 0 ? max_pool_size : size;
  242. #endif
  243. LOG_DBG("memfd-size: %lu, initial offset: %lu", memfd_size, initial_offset);
  244. if (ftruncate(pool_fd, memfd_size) == -1) {
  245. LOG_ERRNO("failed to set size of SHM backing memory file");
  246. goto err;
  247. }
  248. if (!can_punch_hole_initialized) {
  249. can_punch_hole_initialized = true;
  250. #if defined(__x86_64__)
  251. can_punch_hole = fallocate(
  252. pool_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 1) == 0;
  253. if (!can_punch_hole) {
  254. LOG_WARN(
  255. "fallocate(FALLOC_FL_PUNCH_HOLE) not "
  256. "supported (%s): expect lower performance", strerror(errno));
  257. }
  258. #else
  259. /* This is mostly to make sure we skip the warning issued
  260. * above */
  261. can_punch_hole = false;
  262. #endif
  263. }
  264. if (scrollable && !can_punch_hole) {
  265. initial_offset = 0;
  266. memfd_size = size;
  267. scrollable = false;
  268. if (ftruncate(pool_fd, memfd_size) < 0) {
  269. LOG_ERRNO("failed to set size of SHM backing memory file");
  270. goto err;
  271. }
  272. }
  273. real_mmapped = mmap(
  274. NULL, memfd_size, PROT_READ | PROT_WRITE,
  275. MAP_SHARED | MAP_UNINITIALIZED, pool_fd, 0);
  276. if (real_mmapped == MAP_FAILED) {
  277. LOG_ERRNO("failed to mmap SHM backing memory file");
  278. goto err;
  279. }
  280. /* Seal file - we no longer allow any kind of resizing */
  281. /* TODO: wayland mmaps(PROT_WRITE), for some unknown reason, hence we cannot use F_SEAL_FUTURE_WRITE */
  282. if (fcntl(pool_fd, F_ADD_SEALS,
  283. F_SEAL_GROW | F_SEAL_SHRINK | /*F_SEAL_FUTURE_WRITE |*/ F_SEAL_SEAL) < 0)
  284. {
  285. LOG_ERRNO("failed to seal SHM backing memory file");
  286. /* This is not a fatal error */
  287. }
  288. pool = wl_shm_create_pool(shm, pool_fd, memfd_size);
  289. if (pool == NULL) {
  290. LOG_ERR("failed to create SHM pool");
  291. goto err;
  292. }
  293. /* Push to list of available buffers, but marked as 'busy' */
  294. tll_push_back(
  295. buffers,
  296. ((struct buffer){
  297. .cookie = cookie,
  298. .width = width,
  299. .height = height,
  300. .stride = stride,
  301. .busy = true,
  302. .size = size,
  303. .pix_instances = pix_instances,
  304. .fd = pool_fd,
  305. .pool = pool,
  306. .scrollable = scrollable,
  307. .real_mmapped = real_mmapped,
  308. .mmap_size = memfd_size,
  309. .offset = 0}
  310. )
  311. );
  312. struct buffer *ret = &tll_back(buffers);
  313. if (!instantiate_offset(shm, ret, initial_offset))
  314. goto err;
  315. #if defined(MEASURE_SHM_ALLOCS) && MEASURE_SHM_ALLOCS
  316. {
  317. size_t currently_alloced = 0;
  318. tll_foreach(buffers, it)
  319. currently_alloced += it->item.size;
  320. if (currently_alloced > max_alloced)
  321. max_alloced = currently_alloced;
  322. }
  323. #endif
  324. return ret;
  325. err:
  326. if (pool != NULL)
  327. wl_shm_pool_destroy(pool);
  328. if (real_mmapped != MAP_FAILED)
  329. munmap(real_mmapped, memfd_size);
  330. if (pool_fd != -1)
  331. close(pool_fd);
  332. /* We don't handle this */
  333. abort();
  334. return NULL;
  335. }
  336. bool
  337. shm_can_scroll(const struct buffer *buf)
  338. {
  339. #if defined(__i386__)
  340. /* Not enough virtual address space in 32-bit */
  341. return false;
  342. #else
  343. return can_punch_hole && max_pool_size > 0 && buf->scrollable;
  344. #endif
  345. }
  346. static bool
  347. wrap_buffer(struct wl_shm *shm, struct buffer *buf, off_t new_offset)
  348. {
  349. /* We don't allow overlapping offsets */
  350. off_t UNUSED diff =
  351. new_offset < buf->offset ? buf->offset - new_offset : new_offset - buf->offset;
  352. assert(diff > buf->size);
  353. memcpy((uint8_t *)buf->real_mmapped + new_offset, buf->mmapped, buf->size);
  354. off_t trim_ofs, trim_len;
  355. if (new_offset > buf->offset) {
  356. /* Trim everything *before* the new offset */
  357. trim_ofs = 0;
  358. trim_len = new_offset;
  359. } else {
  360. /* Trim everything *after* the new buffer location */
  361. trim_ofs = new_offset + buf->size;
  362. trim_len = buf->mmap_size - trim_ofs;
  363. }
  364. if (fallocate(
  365. buf->fd,
  366. FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  367. trim_ofs, trim_len) < 0)
  368. {
  369. LOG_ERRNO("failed to trim SHM backing memory file");
  370. return false;
  371. }
  372. /* Re-instantiate pixman+wl_buffer+raw pointersw */
  373. buffer_destroy_dont_close(buf);
  374. return instantiate_offset(shm, buf, new_offset);
  375. }
  376. static bool
  377. shm_scroll_forward(struct wl_shm *shm, struct buffer *buf, int rows,
  378. int top_margin, int top_keep_rows,
  379. int bottom_margin, int bottom_keep_rows)
  380. {
  381. assert(can_punch_hole);
  382. assert(buf->busy);
  383. assert(buf->pix);
  384. assert(buf->wl_buf);
  385. assert(buf->fd >= 0);
  386. LOG_DBG("scrolling %d rows (%d bytes)", rows, rows * buf->stride);
  387. const off_t diff = rows * buf->stride;
  388. assert(rows > 0);
  389. assert(diff < buf->size);
  390. if (buf->offset + diff + buf->size > max_pool_size) {
  391. LOG_DBG("memfd offset wrap around");
  392. if (!wrap_buffer(shm, buf, 0))
  393. goto err;
  394. }
  395. off_t new_offset = buf->offset + diff;
  396. assert(new_offset > buf->offset);
  397. assert(new_offset + buf->size <= max_pool_size);
  398. #if TIME_SCROLL
  399. struct timeval time1;
  400. gettimeofday(&time1, NULL);
  401. struct timeval time2 = time1;
  402. #endif
  403. if (top_keep_rows > 0) {
  404. /* Copy current 'top' region to its new location */
  405. memmove(
  406. (uint8_t *)buf->mmapped + (top_margin + rows) * buf->stride,
  407. (uint8_t *)buf->mmapped + (top_margin + 0) * buf->stride,
  408. top_keep_rows * buf->stride);
  409. #if TIME_SCROLL
  410. gettimeofday(&time2, NULL);
  411. timersub(&time2, &time1, &tot);
  412. LOG_INFO("memmove (top region): %lds %ldus", tot.tv_sec, tot.tv_usec);
  413. #endif
  414. }
  415. /* Destroy old objects (they point to the old offset) */
  416. buffer_destroy_dont_close(buf);
  417. /* Free unused memory - everything up until the new offset */
  418. const off_t trim_ofs = 0;
  419. const off_t trim_len = new_offset;
  420. if (fallocate(
  421. buf->fd,
  422. FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  423. trim_ofs, trim_len) < 0)
  424. {
  425. LOG_ERRNO("failed to trim SHM backing memory file");
  426. goto err;
  427. }
  428. #if TIME_SCROLL
  429. struct timeval time3;
  430. gettimeofday(&time3, NULL);
  431. timersub(&time3, &time2, &tot);
  432. LOG_INFO("PUNCH HOLE: %lds %ldus", tot.tv_sec, tot.tv_usec);
  433. #endif
  434. /* Re-instantiate pixman+wl_buffer+raw pointersw */
  435. bool ret = instantiate_offset(shm, buf, new_offset);
  436. #if TIME_SCROLL
  437. struct timeval time4;
  438. gettimeofday(&time4, NULL);
  439. timersub(&time4, &time3, &tot);
  440. LOG_INFO("instantiate offset: %lds %ldus", tot.tv_sec, tot.tv_usec);
  441. #endif
  442. if (ret && bottom_keep_rows > 0) {
  443. /* Copy 'bottom' region to its new location */
  444. memmove(
  445. (uint8_t *)buf->mmapped + buf->size - (bottom_margin + bottom_keep_rows) * buf->stride,
  446. (uint8_t *)buf->mmapped + buf->size - (bottom_margin + rows + bottom_keep_rows) * buf->stride,
  447. bottom_keep_rows * buf->stride);
  448. #if TIME_SCROLL
  449. struct timeval time5;
  450. gettimeofday(&time5, NULL);
  451. timersub(&time5, &time4, &tot);
  452. LOG_INFO("memmove (bottom region): %lds %ldus", tot.tv_sec, tot.tv_usec);
  453. #endif
  454. }
  455. return ret;
  456. err:
  457. abort();
  458. return false;
  459. }
  460. static bool
  461. shm_scroll_reverse(struct wl_shm *shm, struct buffer *buf, int rows,
  462. int top_margin, int top_keep_rows,
  463. int bottom_margin, int bottom_keep_rows)
  464. {
  465. assert(rows > 0);
  466. const off_t diff = rows * buf->stride;
  467. if (diff > buf->offset) {
  468. LOG_DBG("memfd offset reverse wrap-around");
  469. if (!wrap_buffer(shm, buf, (max_pool_size - buf->size) & ~(page_size() - 1)))
  470. goto err;
  471. }
  472. off_t new_offset = buf->offset - diff;
  473. assert(new_offset < buf->offset);
  474. assert(new_offset <= max_pool_size);
  475. #if TIME_SCROLL
  476. struct timeval time0;
  477. gettimeofday(&time0, NULL);
  478. struct timeval tot;
  479. struct timeval time1 = time0;
  480. #endif
  481. if (bottom_keep_rows > 0) {
  482. /* Copy 'bottom' region to its new location */
  483. memmove(
  484. (uint8_t *)buf->mmapped + buf->size - (bottom_margin + rows + bottom_keep_rows) * buf->stride,
  485. (uint8_t *)buf->mmapped + buf->size - (bottom_margin + bottom_keep_rows) * buf->stride,
  486. bottom_keep_rows * buf->stride);
  487. #if TIME_SCROLL
  488. gettimeofday(&time1, NULL);
  489. timersub(&time1, &time0, &tot);
  490. LOG_INFO("memmove (bottom region): %lds %ldus", tot.tv_sec, tot.tv_usec);
  491. #endif
  492. }
  493. /* Destroy old objects (they point to the old offset) */
  494. buffer_destroy_dont_close(buf);
  495. /* Free unused memory - everything after the relocated buffer */
  496. const off_t trim_ofs = new_offset + buf->size;
  497. const off_t trim_len = buf->mmap_size - trim_ofs;
  498. if (fallocate(
  499. buf->fd,
  500. FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  501. trim_ofs, trim_len) < 0)
  502. {
  503. LOG_ERRNO("failed to trim SHM backing memory");
  504. goto err;
  505. }
  506. #if TIME_SCROLL
  507. struct timeval time2;
  508. gettimeofday(&time2, NULL);
  509. timersub(&time2, &time1, &tot);
  510. LOG_INFO("fallocate: %lds %ldus", tot.tv_sec, tot.tv_usec);
  511. #endif
  512. /* Re-instantiate pixman+wl_buffer+raw pointers */
  513. bool ret = instantiate_offset(shm, buf, new_offset);
  514. #if TIME_SCROLL
  515. struct timeval time3;
  516. gettimeofday(&time3, NULL);
  517. timersub(&time3, &time2, &tot);
  518. LOG_INFO("instantiate offset: %lds %ldus", tot.tv_sec, tot.tv_usec);
  519. #endif
  520. if (ret && top_keep_rows > 0) {
  521. /* Copy current 'top' region to its new location */
  522. memmove(
  523. (uint8_t *)buf->mmapped + (top_margin + 0) * buf->stride,
  524. (uint8_t *)buf->mmapped + (top_margin + rows) * buf->stride,
  525. top_keep_rows * buf->stride);
  526. #if TIME_SCROLL
  527. struct timeval time4;
  528. gettimeofday(&time4, NULL);
  529. timersub(&time4, &time2, &tot);
  530. LOG_INFO("memmove (top region): %lds %ldus", tot.tv_sec, tot.tv_usec);
  531. #endif
  532. }
  533. return ret;
  534. err:
  535. abort();
  536. return false;
  537. }
  538. bool
  539. shm_scroll(struct wl_shm *shm, struct buffer *buf, int rows,
  540. int top_margin, int top_keep_rows,
  541. int bottom_margin, int bottom_keep_rows)
  542. {
  543. if (!shm_can_scroll(buf))
  544. return false;
  545. assert(rows != 0);
  546. return rows > 0
  547. ? shm_scroll_forward(shm, buf, rows, top_margin, top_keep_rows, bottom_margin, bottom_keep_rows)
  548. : shm_scroll_reverse(shm, buf, -rows, top_margin, top_keep_rows, bottom_margin, bottom_keep_rows);
  549. }
  550. void
  551. shm_purge(struct wl_shm *shm, unsigned long cookie)
  552. {
  553. LOG_DBG("cookie=%lx: purging all buffers", cookie);
  554. /* Purge old buffers associated with this cookie */
  555. tll_foreach(buffers, it) {
  556. if (it->item.cookie != cookie)
  557. continue;
  558. assert(!it->item.busy);
  559. buffer_destroy(&it->item);
  560. tll_remove(buffers, it);
  561. }
  562. }