Compare commits

...

385 Commits

Author SHA1 Message Date
vazmin 4adf6b3227 gh actions: upgrade to 1.2.11-1 2025-11-23 10:48:22 +00:00
vazmin f4a799402e gh actions: upgrade to 1.2.11-1 2025-11-23 10:00:56 +00:00
vazmin 27510e9641 gh actions: upgrade to 1.2.11-1 2025-11-23 09:06:43 +00:00
YuQing 848077797b upgrade version to 1.2.11 2025-11-16 17:01:06 +08:00
YuQing d22f9da49c bugfixed: MUST call sf_hold_task in sf_nio_notify for rare case 2025-11-16 15:29:38 +08:00
YuQing 5495455fa7 do NOT call task->finish_callback 2025-11-15 11:14:14 +08:00
YuQing 4da0ff251c upgrade version to 1.2.10 2025-11-11 09:57:18 +08:00
YuQing 2444eac6ce declare use_send_zc field anyway 2025-11-11 09:52:42 +08:00
YuQing a52cc2d5d4 check sf_context->use_io_uring more 2025-11-05 09:58:37 +08:00
YuQing c4af33a497 declare use_io_uring correctly 2025-11-04 15:55:33 +08:00
YuQing fa32972052 move use_io_uring and use_send_zc to struct sf_context 2025-11-04 15:40:00 +08:00
YuQing 688211fbcd correct compile error 2025-11-03 15:22:45 +08:00
YuQing 1b2f521b99 uring cancel callback release task correctly 2025-11-03 14:56:29 +08:00
YuQing ddc528d69d restore function sf_client_sock_in_read_stage 2025-11-02 15:02:54 +08:00
YuQing 32d443b497 MUST call set_read_event anyway after socket send done 2025-10-27 12:28:24 +08:00
YuQing 53dd39500f upgrade version to 1.2.9 2025-10-26 12:27:49 +08:00
YuQing 772a9a6895 Merge remote-tracking branch 'origin/use_iouring' 2025-10-26 12:26:53 +08:00
YuQing 932751d392 send zc done notify callback for recycling buffer 2025-10-20 10:34:47 +08:00
YuQing 817ff547da set alloc_conn_once and max_entries gracefully 2025-10-12 12:28:11 +08:00
YuQing 926cd40114 ioevent_init: set max entries for io_uring gracefully 2025-10-12 10:24:35 +08:00
YuQing b688973cf9 change use_send_zc's default value to true 2025-10-09 14:01:32 +08:00
YuQing b16526e8f7 bug fixed: check_task process correctly 2025-10-06 20:55:29 +08:00
YuQing 68079fc468 IOEventCallback: change event type from short to int 2025-10-05 16:53:21 +08:00
YuQing 3dcc1c570d call sf_proto_init_task_magic when task->shrinked 2025-10-03 21:06:58 +08:00
YuQing cf0950ea62 sf_set_read_event just skipped when use_io_uring is true 2025-10-03 11:33:26 +08:00
YuQing 263171c4fe async_connect use io_uring 2025-09-30 11:26:11 +08:00
YuQing a2ab8a0c01 adapt Linux io_uring OK 2025-09-27 15:41:56 +08:00
YuQing 0f75b039f6 sf_logger_set_schedule_entry change to sf_logger_set_schedule_entries 2025-09-26 19:57:03 +08:00
YuQing ecee21f289 socket send and recv adapt to io_uring 2025-09-25 15:54:38 +08:00
YuQing bc5af8a58b struct sf_network_handler add field use_iouring 2025-09-24 15:59:27 +08:00
YuQing f0ee6ce73f struct sf_context remove field: remove_from_ready_list 2025-09-21 15:08:08 +08:00
vazmin aef9d803d1 gh actions: upgrade to 1.2.8-1 2025-08-16 16:32:03 +00:00
YuQing 6d0465acc4 upgrade version to 1.2.8 2025-08-14 09:44:39 +08:00
YuQing 2e5258045d change SF_BINLOG_FILE_PREFIX to SF_BINLOG_FILE_PREFIX_STR 2025-08-10 12:03:25 +08:00
YuQing fc689a02ba rename fc_combine_two_string to fc_combine_two_strings 2025-08-09 15:22:04 +08:00
YuQing 7b3f6d620a use fc_safe_strcpy and fc_combine_two_string 2025-08-08 21:52:08 +08:00
YuQing 03f64998ce performance opt.: replace snprintf 2025-08-07 19:53:38 +08:00
YuQing 0b4936bd8f performance opt.: replace sprintf again 2025-08-05 18:05:56 +08:00
YuQing ba87f8e1ea performance opt.: replace sprintf 2025-08-04 16:57:35 +08:00
YuQing da2ddc7441 sf_log_config for client correctly 2025-04-23 15:17:23 +08:00
vazmin b83189f021 gh actions: upgrade to 1.2.7-1 2025-04-06 16:56:40 +00:00
YuQing 4ad53f7ee3 correct libserverframe release version 2025-04-01 17:21:01 +08:00
YuQing 003cc74b96 upgrade version to 1.2.7 2025-04-01 16:49:38 +08:00
YuQing 3815c0b2ce export function sf_file_writer_direct_write 2025-03-15 10:10:01 +08:00
YuQing 7ee7e7b535 add function sf_file_writer_get_last_line 2025-03-10 17:01:03 +08:00
YuQing e095ce45c2 add function sf_file_writer_flush_buffer 2025-03-05 20:05:56 +08:00
YuQing f9d8c3d070 add function write_to_binlog_index_file 2025-02-18 15:49:00 +08:00
YuQing fc9c23fb95 call flush_writer_files before rotate file 2025-02-17 10:54:07 +08:00
YuQing 755296bb68 add function sf_proto_send_buf2 2025-02-12 21:59:04 +08:00
YuQing 14a783fd6f sf_binlog_writer_rotate_file can skip empty file 2025-02-10 18:34:04 +08:00
YuQing e9e33883bf Merge branch 'master' of gitee.com:fastdfs100/libserverframe 2025-02-09 09:30:42 +08:00
YuQing 318640572f sf_file_writer.[hc]: support write done callback 2025-02-09 09:26:34 +08:00
YuQing f6e2de5668 upgrade version to 1.2.6 2025-01-27 20:50:18 +08:00
YuQing b6e24d0548 sf_connection_manager.[hc]: get connection functions add parameter shared 2025-01-27 10:59:57 +08:00
YuQing 03974ff302 explicit cast for fast_mblock_alloc_object 2024-12-08 09:29:16 +08:00
YuQing a3e1956128 change macro sf_log_network_error_for_update_ex 2024-10-29 09:52:45 +08:00
YuQing 75e8aacfd9 sf_binlog_writer.[hc] add parameter write_interval_ms for high performance 2024-10-07 09:21:19 +08:00
vazmin be4bad4ee1 gh actions: upgrade to 1.2.5-1 2024-09-29 15:24:39 +00:00
YuQing 3993b79a81 sf_connection_manager_init: set htable_capacity gracefully 2024-09-22 12:21:06 +08:00
YuQing 42c6e2c6b7 struct sf_context add field is_client for RDMA callback alloc_pd 2024-09-20 14:16:59 +08:00
YuQing 6a60a49c32 upgrade version to 1.2.5 2024-09-17 11:24:01 +08:00
YuQing 14d0a1c014 task init callback support extra argument 2024-09-15 12:06:25 +08:00
vazmin a01ccf66dc gh actions: upgrade to 1.2.4-1 2024-06-15 14:45:44 +00:00
YuQing 63d43fc9cc upgrade version to 1.2.4 2024-06-11 09:39:08 +08:00
YuQing e83be7356d change default values of connect timeout and network timeout 2024-04-28 16:20:59 +08:00
vazmin 3dfdb8ace6 gh actions: upgrade to 1.2.3-1 2024-03-17 15:11:04 +00:00
YuQing 35d9be16ee upgrade version to V1.2.3 2024-03-11 11:18:42 +08:00
YuQing 231e2610e5 log square quoted IPv6 address 2024-03-05 18:07:34 +08:00
YuQing 1c796ab819 sf_file_writer_init add parameter: max_record_size 2024-02-29 11:58:05 +08:00
YuQing 7f6ad6dcba sf_load_context_from_config_ex support max_pkg_size_min_value 2024-02-26 16:17:41 +08:00
YuQing 78d65ba2c6 net buffer config for each sf_context instance 2024-02-20 09:53:52 +08:00
YuQing d5a9f40a66 correct macros for struct SFBinlogBuffer 2024-02-15 15:13:40 +08:00
YuQing 9d3a92d7da fixed macro SF_BINLOG_BUFFER_LENGTH 2024-02-05 17:05:39 +08:00
YuQing cab9ce4c4f set flags of protocol header 2024-02-04 09:22:56 +08:00
vazmin 8ea4e28f73 gh actions: upgrade to 1.2.2-1 2024-01-31 12:00:10 +00:00
YuQing 930784191e upgrade version to 1.2.2 2024-01-30 10:49:58 +08:00
YuQing e20a2c04c2 set header flags in sf_proto_init_task_context 2024-01-29 11:08:16 +08:00
vazmin 782697414d gh actions: upgrade to 1.2.1-1 2024-01-01 11:24:45 +00:00
YuQing c861b1cf67 upgrade version to 1.2.1 2023-12-08 15:23:56 +08:00
YuQing c2e7b6e711 add function sf_set_address_family_by_ip 2023-12-05 08:16:54 +08:00
YuQing a969a0df07 support address family IPv4, IPv6 and both 2023-11-29 18:52:02 +08:00
YuQing 5618afabbb code adjust for pull request #6 2023-11-23 16:16:00 +08:00
YuQing 0e1fcdafce
Merge pull request #6 from sunqiangwei1988/master
Added: 增加IPv6支持
2023-11-23 15:58:43 +08:00
YuQing 951d010512
Merge branch 'master' into master 2023-11-23 15:58:26 +08:00
vazmin 9b6c64e346 gh actions: upgrade to 1.2.0-3 2023-11-21 14:36:16 +00:00
YuQing 413f6eef76 add function sf_global_init 2023-11-21 15:54:24 +08:00
vazmin 153905dc44 gh actions: upgrade to 1.2.0-2 2023-11-20 13:24:02 +00:00
vazmin 960e84e2b5 gh actions: upgrade to 1.2.0-1 2023-11-19 14:46:16 +00:00
YuQing a9f4447686 log more info for sf_proto_set_body_length 2023-11-16 10:42:24 +08:00
YuQing cf9088fb0c use task->pending_send_count to prevent re-entry 2023-11-06 10:54:50 +08:00
sunqiangwei1988 c619a5336d Added: 增加IPv6支持
1、增加检测主机是否配置IPv4地址和是否配置IPv6地址的方法。
2、修改sf_service.c文件中_socket_server方法,以支持IPv4和IPv6地址,当服务器为双栈时,优先选择IPv4地址。
2023-11-02 10:27:22 +08:00
YuQing a8867a19c4 sf_send_data_callback and sf_send_done_callback changed 2023-10-26 10:48:22 +08:00
YuQing 89a451b8ce call set_read_event in func sf_nio_deal_task 2023-10-25 10:02:47 +08:00
YuQing db00a7add8 set task recv offset and length correctly 2023-10-24 21:22:56 +08:00
YuQing c3f7254838 add inited variable for run_by struct 2023-10-18 17:20:14 +08:00
YuQing 7f6e7b12b4 remove useless variable: length 2023-09-29 15:01:23 +08:00
YuQing 71e7066c81 invoke send done callback correctly 2023-09-29 14:59:58 +08:00
YuQing 5f5db2b998 support explicit post recv for RDMA 2023-09-28 22:20:52 +08:00
YuQing 60d6b49998 rdma callback get_buffer rename to get_recv_buffer 2023-09-27 11:23:50 +08:00
YuQing a0fe474198 add functions: sf_xxxx_task_send/recv_buffer 2023-09-26 15:51:26 +08:00
YuQing 17c99cdd55 sf_nio_task_is_idle rename to sf_nio_task_send_done 2023-09-26 15:26:42 +08:00
YuQing f8e3fcdc55 adapt to the newest struct fast_task_info from libfastcommon 2023-09-25 18:37:53 +08:00
YuQing cd1920872a sf_recv_response_header check recv length for rdma 2023-09-24 14:31:37 +08:00
YuQing fee1e28348 SF_NIO_STAGE_CONNECT stage should call inc_connection_current_count 2023-09-22 18:44:32 +08:00
YuQing be9b71422f nio support callback connect_done for client 2023-09-22 18:27:12 +08:00
YuQing 3a413408ad add func sf_alloc_init_task_ex to specify reffer_count 2023-09-21 09:03:45 +08:00
YuQing 3c7ebd14d9 MUST call fast_timer_remove after ioevent_detach 2023-09-19 09:37:17 +08:00
YuQing 9fad04f3f9 nio threads support busy_polling_callback 2023-09-18 16:19:10 +08:00
YuQing 9731e736df idempotency support RDMA 2023-09-15 10:39:03 +08:00
YuQing 0eb842dc09 sf_nio.c: check_task adapt to RDMA 2023-09-14 09:50:49 +08:00
YuQing 435ae6bb84 remove quotes for macro LOAD_API 2023-09-13 21:23:47 +08:00
YuQing c6d4612862 send and recv data adapt for RDMA 2023-09-12 16:03:22 +08:00
YuQing fca50e6d49 sf_load_config support fixed_buff_size 2023-09-11 11:36:30 +08:00
YuQing 2463725570 use the newest conn_pool_init_ex1 from libfastcommon 2023-09-10 20:55:46 +08:00
YuQing 12637bf181 set rdma handler listen port 2023-09-08 07:58:46 +08:00
YuQing dedc023235 add parameter comm_type when load from config 2023-09-07 09:38:21 +08:00
YuQing b3334d2ad5 add function sf_set_body_length 2023-09-05 16:45:51 +08:00
YuQing e22400fa1c struct fast_task_info support padding_size for RDMA connection 2023-09-05 09:21:45 +08:00
YuQing 5a29dffc50 load RDMA APIs from library 2023-09-05 07:27:15 +08:00
YuQing 2839183433 move type SFNetworkType to libfastcommon as FCNetworkType 2023-09-04 11:01:36 +08:00
YuQing 36e4922440 callbacks impl. for socket 2023-09-03 18:35:31 +08:00
YuQing 96c7bc9a42 function prototype for socket and rdma both 2023-09-03 11:50:50 +08:00
vazmin 58a796e169 gh actions: upgrade to 1.1.29-1 2023-08-06 07:22:46 +00:00
YuQing c21cc936ef upgrade version to 1.1.29 2023-08-06 09:04:32 +08:00
YuQing f9f7b0f159 add function sf_serializer_pack_id_name_skiplist 2023-08-05 20:48:40 +08:00
YuQing e440273f35 sf_binlog_writer.c: flow control more rigorously 2023-07-30 10:11:00 +08:00
YuQing 27a7696867 bugfixed: sf_binlog_index.c call parse only when row_count > 0 2023-07-29 09:48:25 +08:00
YuQing 024a6b0e8a improve robustness of binlog writer flow control 2023-07-27 10:54:59 +08:00
YuQing 6ce1a711f9 add inline function: sf_binlog_writer_get_waiting_count etc. 2023-07-26 10:35:58 +08:00
vazmin 3e3162c825 gh actions: upgrade to 1.1.28-1 2023-07-23 14:28:20 +00:00
YuQing 8fdb8599c9 upgrade version to 1.1.28 2023-07-07 08:26:32 +08:00
YuQing 566c055f27 use libfastcommon V1.68 2023-07-05 18:09:53 +08:00
YuQing 5e8535db9c function sf_push_to_binlog_write_queue changed 2023-06-30 10:40:05 +08:00
YuQing 1abf7402ca log info when flow ctrol waiting time > 0 gracefully 2023-06-30 10:29:18 +08:00
YuQing d006954ceb sf_binlog_writer_init support call_fsync parameter 2023-06-27 18:19:36 +08:00
YuQing 6e071410dc log warning when flow ctrol waiting time > 0 2023-06-27 16:39:37 +08:00
vazmin e34cc12ae5 gh actions: upgrade to 1.1.27-1 2023-06-24 06:51:30 +00:00
YuQing b15faf68f4 upgrade version to 1.1.27 2023-06-17 15:22:06 +08:00
YuQing a95f4cc725 sf_binlog_writer.[hc]: use config max_delay for flow control 2023-06-16 17:59:29 +08:00
YuQing c9fba3b9a7 Merge branch 'master' of github.com:happyfish100/libserverframe 2023-06-10 14:48:29 +08:00
YuQing 2a245a06aa sf_file_writer.[hc] support config call_fsync for performance 2023-06-10 14:32:00 +08:00
vazmin 6f60ff5825 gh actions: upgrade to 1.1.26-1 2023-06-04 10:52:11 +00:00
YuQing d5139804f9 adapt newest fast_mblock_init_ex2 2023-05-19 11:21:15 +08:00
YuQing 0989cc02fe remove debug info in request_metadata.c 2023-05-18 20:20:27 +08:00
YuQing 5786b0383f request_metadata.c: set thread name in Linux 2023-05-18 16:10:51 +08:00
YuQing 3b946778dd upgrade version to 1.1.26 2023-05-16 09:41:33 +08:00
YuQing 32706b6275 add function sf_socket_close 2023-05-10 20:29:14 +08:00
YuQing 7c6673f78a connection manager support exclude server_id for server side 2023-05-05 16:22:04 +08:00
YuQing 90e144920a use new sorted queue with double link chain for quick push 2023-05-04 20:07:23 +08:00
YuQing c5d64a0d54 change field lc_pair to lcp 2023-03-27 16:27:57 +08:00
YuQing 2272bf2707 add macro func: sf_file_writer_get_last_version_silence 2023-03-27 15:32:25 +08:00
YuQing 0328b32766 sf_file_writer_get_last_version support log_level 2023-03-14 09:48:22 +08:00
YuQing 404f374397 remove useless field: tag 2023-03-12 11:29:56 +08:00
YuQing dfc14de25d add type SFBlockSliceKeyInfo for libdiskallocator 2023-03-10 19:11:16 +08:00
YuQing 30ebb55c27 add type SFBlockKey and SFSliceSize for libdiskallocator and faststore 2023-03-05 08:48:36 +08:00
YuQing 077154f75f add macro func sf_log_network_error_for_delete_ex 2023-03-02 11:25:53 +08:00
YuQing 8bedbb6f27 code simplification for last commit 2023-02-23 10:51:46 +08:00
YuQing 92fbcab0f4 bugfixed: fastdfs issue #620
set notify.stage to SF_NIO_STAGE_NONE before deal_notified_task
2023-02-23 10:30:40 +08:00
vazmin ac923ebaf8 gh actions: upgrade to 1.1.25-1 2023-02-18 05:44:50 +00:00
YuQing a9ebe20b5b upgrade version to 1.1.25 2023-02-15 21:04:04 +08:00
YuQing 13990e3747 code simplification for epoll edge trigger 2023-02-12 20:04:01 +08:00
YuQing 294ad5e636 use field notify_next for notify queue of nio thread 2023-02-12 19:47:31 +08:00
YuQing 7f758fd293 init epoll_edge_trigger to false 2023-02-12 12:20:06 +08:00
YuQing c1ae024da5 enable epoll edge trigger by global variable epoll_edge_trigger 2023-02-12 10:38:46 +08:00
vazmin 1dd9ac656f gh actions: upgrade to 1.1.24-1 2023-01-15 13:50:15 +00:00
YuQing 5a8452721d upgrade version to 1.1.24 2023-01-14 08:40:47 +08:00
YuQing 91f0564158 change log level to debug for hash entry reclaim 2023-01-13 14:25:30 +08:00
YuQing 69f117c956 check socket connected on unexpected stage 2022-12-30 17:23:18 +08:00
YuQing ca3f14df6e get_leader_connection: failover on multi ip addresses 2022-12-23 09:35:01 +08:00
YuQing a6c8c65371 log address count when make_connection fail 2022-12-22 16:04:32 +08:00
YuQing 3ccec6eb36 add function sf_load_data_path_config_ex 2022-12-21 15:45:03 +08:00
YuQing f3afc0af6e show patch part of version info anyway 2022-11-25 16:04:13 +08:00
vazmin e54f2d413e gh actions: upgrade to 1.1.22-1 2022-11-21 14:55:50 +00:00
vazmin 777713e0e4 debian: installation dir changes 2022-11-21 22:36:29 +08:00
YuQing a2dc31dc88 upgrade version to 1.1.23 2022-11-21 08:16:56 +08:00
YuQing f262e60259 make.sh set LIB_VERSION to lib for Ubuntu and Debian 2022-11-20 17:00:35 +08:00
YuQing 3578c0f0af Makefile.in: force symlink library 2022-11-13 17:16:39 +08:00
YuQing 5ca1f6dda6 use newest function normalize_path from libfastcommon 2022-11-07 08:30:49 +08:00
YuQing 53fea21135 make.sh auto create symlink for include 2022-11-07 08:30:09 +08:00
YuQing 3191d01e38 convert errnos: ENOLINK, ENOTEMPTY and ELOOP 2022-11-03 11:11:49 +08:00
YuQing f2bfe72a4f requires libfastcommon 1.0.63 2022-10-26 09:58:16 +08:00
YuQing be38181f71 upgrade version to 1.1.22 2022-10-26 09:53:47 +08:00
YuQing 0b89c09371 SFProtoGetServerStatusReq add field: auth_enabled 2022-10-25 09:24:55 +08:00
YuQing 5522165e5c add macro SF_PROTO_CLIENT_SET_REQ_EX 2022-10-24 20:46:53 +08:00
vazmin 97f9db7a17 gh actions: upgrade to 1.1.21-1 2022-10-08 13:28:40 +00:00
YuQing 8e42e9640d upgrade version to 1.1.21 2022-10-08 09:30:01 +08:00
YuQing 3b5d580b36 set last_versions.done after write 2022-09-30 15:37:05 +08:00
YuQing d4676e9d71 sf_binlog_writer.[hc]: support passive write 2022-09-29 11:44:02 +08:00
vazmin 230250d2f3 gh actions: upgrade to 1.1.20-1 2022-09-22 12:22:39 +00:00
YuQing 16be02e8fd upgrade version to 1.1.20 2022-09-22 09:14:21 +08:00
YuQing 4a30dfe844 add macro: sf_log_network_error_for_update_ex 2022-09-22 08:40:03 +08:00
YuQing 4f3cde053c auto create base_path when it not exist 2022-09-21 11:37:51 +08:00
YuQing 45531cf0c8 output refine for receipt_recv_timeout_callback 2022-09-20 20:39:55 +08:00
YuQing 99078203c0 upgrade version to V1.1.19 2022-09-15 10:41:32 +08:00
YuQing 61d2762411 Merge branch 'master' of github.com:happyfish100/libserverframe 2022-09-14 10:41:12 +08:00
YuQing b7b346ea7f sf_load_global_config_ex add params: max_pkg_size_item_name and need_set_run_by 2022-09-14 10:40:44 +08:00
YuQing cf4856e04b support send_done_callback for FastDFS 2022-09-14 10:38:38 +08:00
vazmin e64bf7f15e gh actions: upgrade to 1.1.18-1 2022-09-07 13:36:38 +00:00
YuQing 78337ec4a3 upgrade version to 1.1.18 2022-09-04 13:51:29 +08:00
YuQing 5ee8ce8fe7 correct macro function sf_service_init 2022-09-03 10:03:09 +08:00
YuQing a0f16319e0 struct sf_shared_mbuffer: Must move buff to last 2022-08-30 11:07:31 +08:00
YuQing 8b22655352 add function sf_release_task_shared_mbuffer 2022-08-28 17:25:50 +08:00
YuQing c27cb2a9af add files: sf_shared_mbuffer.[hc] 2022-08-27 21:39:31 +08:00
YuQing 2ebb51dcfd support alloc_recv_buffer callback 2022-08-25 18:22:16 +08:00
YuQing 3257a5f842 function sf_get_base_path_from_conf_file impl. 2022-08-20 09:59:36 +08:00
vazmin f63843765a gh actions: upgrade to 1.1.17-1 2022-08-15 13:31:54 +00:00
YuQing f08b81b3b9 upgrade version to 1.1.17 2022-08-15 15:19:28 +08:00
YuQing dc9267188d replication quorum support smart mode 2022-08-02 16:22:54 +08:00
YuQing 2d01d91b87 sf_connection_manager.c log module name 2022-07-29 16:40:41 +08:00
vazmin f433589d05 gh actions: upgrade to 1.1.16-1 2022-07-25 13:52:09 +00:00
YuQing 809a1bf997 upgrade version to V1.1.16 2022-07-24 14:58:09 +08:00
YuQing 94ee91d37d bugfixed: sf_file_writer_get_last_lines deal correctly when cross files 2022-07-21 18:34:11 +08:00
YuQing 5da65a172c request_metadata.c: check data_version > 0 for performance 2022-07-08 11:23:32 +08:00
YuQing 803d3cb626 IdempotencyRequestMetadata add field n for integer argument 2022-07-05 19:25:55 +08:00
YuQing a966d1bf4d sf_synchronize_finished_notify_no_lock impl. 2022-07-05 09:01:37 +08:00
YuQing 7cfb8dc89d add functions sf_synchronize_finished_notify/wait 2022-07-04 11:01:24 +08:00
YuQing d95e3ed679 remove debug info 2022-06-30 17:59:44 +08:00
YuQing f63ede788e generate seq_id only once per RPC 2022-06-29 15:02:38 +08:00
YuQing 990ef2d173 request_metadata.[hc] v2 impl. 2022-06-27 22:17:17 +08:00
YuQing 92613c765f request_metadata.[hc] first verson finished 2022-06-27 17:17:57 +08:00
YuQing b364a875c2 add files idempotency/server/request_metadata.[hc] 2022-06-27 11:30:01 +08:00
YuQing c6300318c8 use macro FC_SET_CLOEXEC from libfastcommon 2022-06-25 11:24:59 +08:00
YuQing dfc58be3ec add func sf_nio_add_to_deleted_list 2022-06-25 09:21:02 +08:00
YuQing 8824c35975 open file with flag O_CLOEXEC 2022-06-24 18:56:28 +08:00
YuQing 56ccde45ba idempotency seq_id includes server id and channel id for global unique 2022-06-24 10:37:10 +08:00
YuQing 7f7ba8d835 support set next version when order_mode is VARY 2022-06-24 07:46:04 +08:00
YuQing 22ffe6841d change default values of log_file_rotate_everyday and log_file_keep_days 2022-06-18 18:19:35 +08:00
YuQing 6dd3bfbb22 sf_replication_quorum_check changed 2022-06-17 11:24:31 +08:00
YuQing e8e6cfc64a add replication quorum type and functions 2022-06-16 16:01:01 +08:00
vazmin e344feb092 gh actions: upgrade to 1.1.15-1 2022-06-15 14:26:27 +00:00
YuQing 98c85ba7eb libserverframe.spec: upgrade version 2022-06-06 20:34:57 +08:00
YuQing 1d1d4c9f00 sf_file_writer_get_binlog_indexes ignore file not exist 2022-06-03 15:32:07 +08:00
YuQing bcd1120617 sf_file_writer support specifying file prefix 2022-05-31 21:19:15 +08:00
YuQing 39e5dd419e custom define binlog rotate file size 2022-05-30 11:24:05 +08:00
YuQing 464573f9ff sf_file_writer_set_indexes impl. 2022-05-26 20:14:00 +08:00
YuQing f490366c03 sf_binlog_writer_change_write_index impl. 2022-05-19 18:14:33 +08:00
YuQing 353dde7059 add macro SF_ERROR_EINPROGRESS 2022-05-18 10:39:23 +08:00
YuQing 89a39e85d3 add macro SF_CLUSTER_ERROR_BINLOG_MISSED 2022-05-17 14:58:15 +08:00
YuQing c717646593 sf_file_writer_get_indexes impl. 2022-05-14 16:27:09 +08:00
YuQing c611b9b30c sf_file_writer.[hc] support start_index 2022-05-14 14:21:05 +08:00
YuQing 077a68a974 add two macros for vote node 2022-05-09 16:20:21 +08:00
YuQing 09839f9bf4 log service_name field when connect or communicate error 2022-05-08 10:44:55 +08:00
YuQing 00faf7e637 add function sf_load_cluster_config1 2022-05-06 15:12:19 +08:00
YuQing 07bbf65847 sf_proto_get_server_status_pack use struct 2022-05-06 09:48:49 +08:00
YuQing b38bf00a28 proto get_server_status remove field service_id 2022-04-30 10:20:30 +08:00
YuQing 7f92190c87 add SF_CLUSTER_PROTO_GET_SERVER_STATUS_REQ/RESP 2022-04-29 14:37:59 +08:00
vazmin 312b7752ef gh actions: upgrade to 1.1.14-1 2022-04-28 11:54:26 +00:00
YuQing 3734e68e0b sf_binlog_writer_finish check thread running 2022-04-24 10:57:19 +08:00
YuQing dcd024019b Merge branch 'recovery_and_balance' 2022-04-24 08:26:18 +08:00
YuQing a29ac30f67 upgrade version to V1.1.14 2022-04-22 14:57:26 +08:00
YuQing 78e321f4ad election quorum support sf_election_quorum_auto 2022-04-21 11:29:43 +08:00
YuQing 613c31fcf3 sf_binlog_writer_change_order_by check if versioned writer 2022-04-18 08:59:07 +08:00
YuQing 952647cbc9 order_by feature belongs to writer instead of thread 2022-04-17 18:18:18 +08:00
YuQing a57709de93 sf_connection_manager.c: make_master_connection refined 2022-04-15 16:58:42 +08:00
YuQing 7259eaf6ac log retry count when get connection fail 2022-04-11 10:24:21 +08:00
vazmin 3dd9313dc2 debian update substvars format 2022-04-05 00:37:34 +08:00
vazmin 9e77dac94b feat use debian/substvars 2022-04-04 14:35:52 +08:00
YuQing de943f684a add function sf_load_quorum_config 2022-03-30 21:22:34 +08:00
YuQing b4aaf69962 sf_buffered_writer.h: compile OK. 2022-03-25 15:30:14 +08:00
YuQing 68d41aa690 rename to sf_file_writer_deal_versioned_buffer 2022-03-22 08:23:03 +08:00
YuQing 1a03fec1f6 add function sf_file_writer_get_binlog_index 2022-03-19 16:36:11 +08:00
YuQing a727f382bc add function: sf_binlog_writer_notify_exit 2022-03-18 16:48:26 +08:00
YuQing a265bbbbea add function sf_binlog_writer_destroy 2022-03-17 20:52:41 +08:00
YuQing e061a3dfad add file src/sf_buffered_writer.h 2022-03-16 11:48:00 +08:00
vazmin 246ff83225 debian: add changelog 1.1.13-1 2022-03-13 17:19:01 +08:00
YuQing d129c6151e add function sf_binlog_writer_get_index_filename 2022-03-08 17:13:29 +08:00
YuQing 25ca590416 make.sh: change DEBUG_FLAG to 0 2022-03-06 19:41:16 +08:00
YuQing 2bcf2428e1 upgrade version to 1.1.13 2022-03-03 10:16:00 +08:00
YuQing 8de3678e86 sf_load_global_config_ex: server_name can be NULL 2022-03-03 10:00:29 +08:00
YuQing 6549172c67 support function sf_sharding_htable_delete 2022-02-27 15:57:32 +08:00
YuQing 3e4ddce4a2 add function sf_load_global_base_path 2022-02-26 10:05:25 +08:00
YuQing 1ba160b6d7 change log level to debug 2022-02-25 09:45:40 +08:00
YuQing 9159d9c24b simple_hash rename to fc_simple_hash 2022-02-09 22:39:40 +08:00
YuQing 2d177ab262 sf_iov.[hc] add function sf_iova_memcpy_ex 2022-02-14 10:26:25 +08:00
YuQing 52e34ca393 sf_iova_memset_ex: add const modifier 2022-02-04 15:51:08 +08:00
YuQing fa9e00f3b8 sf_iova_memset_ex for iov and iovcnt 2022-02-04 15:37:59 +08:00
YuQing 5796655ce0 sf_iova_memset impl. 2022-02-04 15:22:11 +08:00
YuQing 23ff87dea0 iova_slice error detect 2022-02-04 10:59:00 +08:00
YuQing a46945b6cd add files: sf_iov.[hc] 2022-02-03 22:30:19 +08:00
vazmin 1adfb10c63 upgrade version to 1.1.12 2022-01-15 20:16:06 +08:00
YuQing cfd7690f4e upgrade version to 1.1.12 2022-01-13 10:07:41 +08:00
YuQing 16f5b42b95 sf_synchronize_counter_wait: check SF_G_CONTINUE_FLAG 2022-01-12 07:05:27 +08:00
vazmin 1a06ea13e7 update debian package version 2021-12-27 21:50:18 +08:00
YuQing 62846f21ff upgrade version to 1.1.11 2021-12-23 11:25:19 +08:00
YuQing 15fc77703a function sf_serializer_read_message changed 2021-11-18 11:19:32 +08:00
YuQing 320f344b3e sf_serializer.[hc]: support id_name_array 2021-11-16 10:08:40 +08:00
YuQing 97b64c67fb sf_synchronize_counter_xxx use mutex lock all 2021-11-08 11:06:30 +08:00
YuQing f4bfe9ad25 move/set void pointer correctly 2021-11-04 20:51:33 +08:00
YuQing 255754eb5f add function sf_synchronize_counter_notify 2021-10-28 16:19:02 +08:00
YuQing b9b466c364 set last_versions.done when binlog_writer_set_next_version 2021-10-17 21:51:28 +08:00
YuQing cccb1c6d23 add function sf_synchronize_counter_wait 2021-10-12 10:54:30 +08:00
YuQing 0eb483f68c add function sf_serializer_pack_buffer 2021-10-09 10:20:31 +08:00
YuQing 1901189515 add struct SFSynchronizeContext 2021-10-05 20:03:05 +08:00
YuQing e35a3ca104 sf_serializer.[hc]: support string array 2021-10-01 09:13:46 +08:00
YuQing fd9d59dbd2 add function sf_binlog_index_expand_array 2021-09-27 17:43:56 +08:00
YuQing ae832465a1 sf_ordered_writer.[hc] impl 2021-09-22 10:53:38 +08:00
YuQing d2b828bd7a add files: sf_ordered_writer.[hc] 2021-09-21 17:07:11 +08:00
YuQing fb7ffa0b1d sf_binlog_writer removes global variable: g_sf_binlog_data_path 2021-09-21 09:26:10 +08:00
YuQing db858fc048 sf_binlog_writer uses sf_file_writer 2021-09-20 21:30:24 +08:00
YuQing 66fe9767f7 add files: sf_file_writer.[hc] 2021-09-20 20:55:23 +08:00
YuQing 8a1f905b2a rename function sf_serializer_read to sf_serializer_read_message 2021-09-20 10:42:47 +08:00
YuQing 8344c8309f sf_serializer.[hc] add function sf_serializer_read 2021-09-17 09:30:15 +08:00
YuQing edf9d58909 add function sf_serializer_pack_integer 2021-09-07 08:21:50 +08:00
YuQing d79310674e rename sf_serialize.[hc] to sf_serializer.[hc] 2021-09-06 10:18:05 +08:00
YuQing 88a0f0a267 add function sf_binlog_writer_get_last_version 2021-09-01 21:13:57 +08:00
YuQing 024c148700 add files: sf_binlog_index.[hc] 2021-08-21 16:12:03 +08:00
YuQing cc5f215a3a sf_serialize_next return array and map correctly 2021-08-20 17:36:20 +08:00
YuQing 62c874bc4f sf_serialize_unpack array and map impl 2021-08-20 11:06:12 +08:00
YuQing c529c5ab8c sf_serialize_unpack integer and string impl 2021-08-19 21:05:42 +08:00
YuQing 186d41fafe add files: sf_serialize.[hc] 2021-08-19 16:49:07 +08:00
zhiming 93882e447b
feat debian package (#1)
* debian packaging

* update deb depends

* update deb depend

* update the shared library install dirs in deb

* update deb rules
2021-08-03 08:39:56 +08:00
YuQing 2d71c389f6 fast_mblock_init_ex1: unify obj name 2021-07-20 20:27:02 +08:00
YuQing 6a40a92725 upgrade version to V1.1.10 2021-07-06 21:24:17 +08:00
YuQing 10ce86d879 add macro: SF_CLUSTER_ERROR_NOT_LEADER 2021-07-05 16:07:11 +08:00
YuQing 77b3938a80 change default network timeout from 30 to 10 2021-07-04 18:26:44 +08:00
YuQing 837e35ccc8 change default connect timeout from 10 to 2 2021-07-04 18:21:32 +08:00
YuQing 65876c51d7 section names use minus(-) such as error-log and slow-log 2021-07-04 10:11:45 +08:00
YuQing 2a57961b59 support error handler callback 2021-06-30 21:22:57 +08:00
YuQing 414f0f1efe add types: SFListLimitInfo and SFProtoLimitInfo 2021-06-29 22:42:02 +08:00
YuQing a42f9c6376 sf_usage_ex add action: status 2021-06-26 11:02:01 +08:00
YuQing c960975d65 sf_sharding_htable.c: correct function name 2021-06-17 15:58:52 +08:00
YuQing 4480a4a39c sf_sharding_htable.[hc]: use ms instead of second 2021-06-15 10:23:48 +08:00
YuQing 5e1444ef71 upgrade version to V1.1.9 2021-06-10 17:27:42 +08:00
YuQing b8f7a86060 avoid writev iovcnt overflow 2021-06-08 15:40:49 +08:00
YuQing d11243964b call writev for iovec array 2021-06-08 14:19:11 +08:00
YuQing ba70c63e80 add type SFMemoryWatermark 2021-06-04 14:48:06 +08:00
YuQing f72295e103 use fc_queue_try_pop_to_queue from libfastcommon 2021-06-01 17:09:57 +08:00
YuQing c1f6fb797b change macro SF_G_BASE_PATH to SF_G_BASE_PATH_STR 2021-05-27 07:58:26 +08:00
YuQing 5befb9d616 add function sf_set_global_base_path 2021-05-25 18:48:33 +08:00
YuQing 561b7c8d7c upgrade version to V1.1.8 2021-05-25 18:32:08 +08:00
YuQing 02eaad6a89 check required argument 2021-05-20 16:49:53 +08:00
YuQing af7b8010f8 correct type from bool to int :( 2021-05-24 21:11:56 +08:00
YuQing c701e4b1c3 correct macro define: sf_parse_daemon_mode_and_action 2021-05-24 20:53:33 +08:00
YuQing 667fd14abb parse cmd options strictly 2021-05-24 20:33:56 +08:00
YuQing d5fb83a140 change -n to -N for option --no-daemon 2021-05-24 14:40:06 +08:00
YuQing c841d6c1b0 change default values 2021-05-23 21:36:43 +08:00
YuQing 662e2036f6 add function sf_load_cluster_config_by_file 2021-05-22 17:12:27 +08:00
YuQing 4d1ef0ba1a add function sf_parse_cmd_option_bool 2021-05-20 09:55:02 +08:00
YuQing 89cef5e19d sf_usage support print other options 2021-05-19 11:50:09 +08:00
YuQing 808984bf1b sf_parse_daemon_mode_and_action enhancement 2021-05-14 16:57:38 +08:00
YuQing e93f6aa506 set max_pkg_size correctly
output size parameters more gracefully
2021-05-11 12:11:37 +08:00
YuQing e3a37b4e41 Merge branch 'config_simplify' 2021-05-10 20:29:36 +08:00
YuQing 01c2beed8a do NOT set thread name in main thread 2021-04-30 15:04:40 +08:00
YuQing e97d85046e upgrade version to V1.1.7 2021-04-30 11:00:53 +08:00
YuQing 592fb2b866 sf_binlog_writer: change thread name for unity 2021-04-30 10:13:08 +08:00
YuQing eea7adde7c set thread name as necessary 2021-04-29 21:33:05 +08:00
YuQing 7d5e5d2340 merge cluster.conf and servers.conf 2021-04-28 20:25:55 +08:00
YuQing 03b6d7b20a change output of sf_log_network_error_ex1 2021-04-24 18:11:33 +08:00
YuQing 6a624ab719 upgrade version to V1.1.6 2021-04-20 10:54:32 +08:00
YuQing 0d79aaf870 add macro SF_SESSION_ERROR_NOT_EXIST 2021-04-20 10:15:56 +08:00
YuQing 94bcf26518 bugfixed: sf_server_update_prepare_and_check MUST use request->body 2021-04-19 21:43:19 +08:00
YuQing e4d529ab29 define macro SF_CLUSTER_CONFIG_SIGN_LEN 2021-04-19 15:29:28 +08:00
YuQing 1443288d29 add macros SF_PROTO_CLIENT_SET_REQ etc. 2021-04-19 08:58:51 +08:00
YuQing 13a21d5908 add types and macros 2021-04-16 21:44:40 +08:00
YuQing 2792b2f275 sf_load_cluster_config_ex: add param full_cluster_filename 2021-04-12 15:29:46 +08:00
YuQing ee179a16f7 sf_recv_response_header: call SF_PROTO_CHECK_MAGIC 2021-04-12 15:29:01 +08:00
YuQing 68db56790c add function sf_notify_all_threads_ex 2021-04-09 21:26:57 +08:00
YuQing f563bb17ef SFProtoRecvBuffer enhanced (add SFProtoRBufferFixedWrapper) 2021-04-07 14:57:52 +08:00
YuQing b49a3370f5 add sf_cluster_cfg.[hc] 2021-03-31 11:00:03 +08:00
YuQing 9f232770da add sf_recv_vary_response and sf_send_and_recv_vary_response 2021-03-19 18:53:59 +08:00
YuQing a7a8f5af4b change connection_manager extra pointer 2021-03-17 14:32:51 +08:00
YuQing f60fcf0d82 server_expect_body_length etc.: remove useless task parameter 2021-03-16 19:05:50 +08:00
YuQing aa4f66c578 add macro server_expect_body_length etc. 2021-03-11 15:16:43 +08:00
YuQing 86017c46ac add functions sf_proto_init_task_context, sf_proto_deal_task_done etc. 2021-03-10 11:15:09 +08:00
YuQing ae600238bc add ENODATA convert 2021-03-05 11:25:21 +08:00
YuQing 221ae1727d add type SFKeyValueArray 2021-03-03 20:57:00 +08:00
YuQing 0b3c1d189f upgrade version to 1.1.5 2021-02-24 11:19:10 +08:00
YuQing 3616fde9e0 push_to_detect_queue when two sptr arrays equal 2021-02-22 21:50:49 +08:00
YuQing 89dd6733b6 split to prepare and start due to daemon_init 2021-02-22 21:21:25 +08:00
YuQing f3e24601d2 connection manager: support detect server for alive 2021-02-22 19:37:57 +08:00
YuQing f7ac526284 connection_manager support option: bg_thread_enabled 2021-02-21 21:20:36 +08:00
YuQing 47ed8fb46c faststore use this connection manager 2021-02-20 12:49:11 +08:00
YuQing 4aeec5385a sf_connection_manager impl all interfaces 2021-02-19 21:05:38 +08:00
YuQing 85f76e2f47 sf_connection_manager impl get_connection etc. 2021-02-19 16:47:45 +08:00
YuQing 3dc8efde5c impl proto_get_group_servers client side 2021-02-18 16:36:33 +08:00
YuQing 54b464e0f5 add get_group_servers protocol 2021-02-17 20:40:33 +08:00
YuQing 25dacd361f impl connection_manager init and add 2021-02-17 11:18:56 +08:00
YuQing 42715c9be0 add sf_connection_manager.[hc] 2021-02-15 13:26:51 +08:00
YuQing 67b412fb88 bugfixed: MUST set stage to SF_NIO_STAGE_NONE first for re-entry 2021-02-10 21:38:45 +08:00
YuQing 3a4a7069b9 add type SFBinlogWriterStat 2021-02-10 11:33:48 +08:00
YuQing 11165cbb20 log NULL continue_callback with SF_NIO_STAGE_CONTINUE 2021-02-09 22:25:32 +08:00
YuQing 1b4e0ad870 bugfixed: do NOT check order_by in sf_binlog_writer_change_next_version 2021-02-08 17:30:21 +08:00
YuQing 6a5079e0a9 upgrade version to 1.1.4 2021-02-01 10:57:08 +08:00
YuQing 87b2e19766 add macro SF_CLUSTER_ERROR_LEADER_VERSION_INCONSISTENT 2021-01-25 16:21:18 +08:00
YuQing 3659542eba sf_binlog_writer: change order_by gracefully 2021-01-24 22:34:19 +08:00
65 changed files with 11789 additions and 1815 deletions

1
.gitignore vendored
View File

@ -31,3 +31,4 @@ src/Makefile
# other
*.swp
*.swo

3
debian/README.Debian vendored Normal file
View File

@ -0,0 +1,3 @@
this network service framework library extract from FastDFS

198
debian/changelog vendored Normal file
View File

@ -0,0 +1,198 @@
libserverframe (1.2.11-1) unstable; urgency=medium
* upgrade to 1.2.11-1
-- YuQing <384681@qq.com> Sun, 23 Nov 2025 10:48:22 +0000
libserverframe (1.2.11-1) unstable; urgency=medium
* upgrade to 1.2.11-1
-- YuQing <384681@qq.com> Sun, 23 Nov 2025 10:00:56 +0000
libserverframe (1.2.11-1) unstable; urgency=medium
* upgrade to 1.2.11-1
-- YuQing <384681@qq.com> Sun, 23 Nov 2025 09:06:43 +0000
libserverframe (1.2.8-1) unstable; urgency=medium
* upgrade to 1.2.8-1
-- YuQing <384681@qq.com> Sat, 16 Aug 2025 16:32:03 +0000
libserverframe (1.2.7-1) unstable; urgency=medium
* upgrade to 1.2.7-1
-- YuQing <384681@qq.com> Sun, 06 Apr 2025 16:56:40 +0000
libserverframe (1.2.5-1) unstable; urgency=medium
* upgrade to 1.2.5-1
-- YuQing <384681@qq.com> Sun, 29 Sep 2024 15:24:39 +0000
libserverframe (1.2.4-1) unstable; urgency=medium
* upgrade to 1.2.4-1
-- YuQing <384681@qq.com> Sat, 15 Jun 2024 14:45:44 +0000
libserverframe (1.2.3-1) unstable; urgency=medium
* upgrade to 1.2.3-1
-- YuQing <384681@qq.com> Sun, 17 Mar 2024 15:11:04 +0000
libserverframe (1.2.2-1) unstable; urgency=medium
* upgrade to 1.2.2-1
-- YuQing <384681@qq.com> Wed, 31 Jan 2024 12:00:10 +0000
libserverframe (1.2.1-1) unstable; urgency=medium
* upgrade to 1.2.1-1
-- YuQing <384681@qq.com> Mon, 01 Jan 2024 11:24:45 +0000
libserverframe (1.2.0-3) unstable; urgency=medium
* upgrade to 1.2.0-3
-- YuQing <384681@qq.com> Tue, 21 Nov 2023 14:36:16 +0000
libserverframe (1.2.0-2) unstable; urgency=medium
* upgrade to 1.2.0-2
-- YuQing <384681@qq.com> Mon, 20 Nov 2023 13:24:02 +0000
libserverframe (1.2.0-1) unstable; urgency=medium
* upgrade to 1.2.0-1
-- YuQing <384681@qq.com> Sun, 19 Nov 2023 14:46:16 +0000
libserverframe (1.1.29-1) unstable; urgency=medium
* upgrade to 1.1.29-1
-- YuQing <384681@qq.com> Sun, 06 Aug 2023 07:22:46 +0000
libserverframe (1.1.28-1) unstable; urgency=medium
* upgrade to 1.1.28-1
-- YuQing <384681@qq.com> Sun, 23 Jul 2023 14:28:20 +0000
libserverframe (1.1.27-1) unstable; urgency=medium
* upgrade to 1.1.27-1
-- YuQing <384681@qq.com> Sat, 24 Jun 2023 06:51:30 +0000
libserverframe (1.1.26-1) unstable; urgency=medium
* upgrade to 1.1.26-1
-- YuQing <384681@qq.com> Sun, 04 Jun 2023 10:52:11 +0000
libserverframe (1.1.25-1) unstable; urgency=medium
* upgrade to 1.1.25-1
-- YuQing <384681@qq.com> Sat, 18 Feb 2023 05:44:50 +0000
libserverframe (1.1.24-1) unstable; urgency=medium
* upgrade to 1.1.24-1
-- YuQing <384681@qq.com> Sun, 15 Jan 2023 13:50:15 +0000
libserverframe (1.1.22-1) unstable; urgency=medium
* upgrade to 1.1.22-1
-- YuQing <384681@qq.com> Mon, 21 Nov 2022 14:55:50 +0000
libserverframe (1.1.21-1) unstable; urgency=medium
* upgrade to 1.1.21-1
-- YuQing <384681@qq.com> Sat, 08 Oct 2022 13:28:40 +0000
libserverframe (1.1.20-1) unstable; urgency=medium
* upgrade to 1.1.20-1
-- YuQing <384681@qq.com> Thu, 22 Sep 2022 12:22:39 +0000
libserverframe (1.1.18-1) unstable; urgency=medium
* upgrade to 1.1.18-1
-- YuQing <384681@qq.com> Wed, 07 Sep 2022 13:36:38 +0000
libserverframe (1.1.17-1) unstable; urgency=medium
* upgrade to 1.1.17-1
-- YuQing <384681@qq.com> Mon, 15 Aug 2022 13:31:54 +0000
libserverframe (1.1.16-1) unstable; urgency=medium
* upgrade to 1.1.16-1
-- YuQing <384681@qq.com> Mon, 25 Jul 2022 13:52:09 +0000
libserverframe (1.1.15-1) unstable; urgency=medium
* upgrade to 1.1.15-1
-- YuQing <384681@qq.com> Wed, 15 Jun 2022 14:26:27 +0000
libserverframe (1.1.14-1) unstable; urgency=medium
* upgrade to 1.1.14-1
-- YuQing <384681@qq.com> Thu, 28 Apr 2022 11:54:26 +0000
libserverframe (1.1.13-1) unstable; urgency=medium
* add files: sf_iov.[hc]
* iova_slice error detect
* sf_iova_memset impl.
* sf_iova_memset_ex for iov and iovcnt
* sf_iova_memset_ex: add const modifier
* sf_iov.[hc] add function sf_iova_memcpy_ex
* simple_hash rename to fc_simple_hash
* change log level to debug
* add function sf_load_global_base_path
* support function sf_sharding_htable_delete
* sf_load_global_config_ex: server_name can be NULL
* upgrade version to 1.1.13
* make.sh: change DEBUG_FLAG to 0
* add function sf_binlog_writer_get_index_filename
-- YuQing <384681@qq.com> Sun, 13 Mar 2022 16:46:17 +0800
libserverframe (1.1.12-1) unstable; urgency=medium
* upgrade version to 1.1.12
-- YuQing <384681@qq.com> Sat, 15 Jan 2022 20:00:21 +0800
libserverframe (1.1.11-1) unstable; urgency=medium
* upgrade version to 1.1.11
-- YuQing <384681@qq.com> Sun, 26 Dec 2021 21:02:05 +0800
libserverframe (1.1.10-1) unstable; urgency=medium
* fixed somebugs
-- YuQing <384681@qq.com> Sat, 10 Jul 2021 22:29:13 +0800

1
debian/compat vendored Normal file
View File

@ -0,0 +1 @@
11

25
debian/control vendored Normal file
View File

@ -0,0 +1,25 @@
Source: libserverframe
Section: net
Priority: optional
Maintainer: YuQing <384681@qq.com>
Build-Depends: debhelper (>=11~)
, libfastcommon-dev (>= 1.0.56)
Standards-Version: 4.1.4
Homepage: https://github.com/happyfish100/libserverframe
Package: libserverframe-dev
Section: net
Architecture: any
Multi-Arch: same
Pre-Depends: ${misc:Pre-Depends}
Depends: ${misc:Depends}, libserverframe (= ${binary:Version})
Description: libserverframe (development files)
This package contains header files.
Package: libserverframe
Section: net
Architecture: any
Multi-Arch: same
Pre-Depends: ${misc:Pre-Depends}
Depends: ${misc:Depends}, ${shlibs:Depends}, libfastcommon (>= ${libfastcommon:Version})
Description: this network service framework library extract from FastDFS

680
debian/copyright vendored Normal file
View File

@ -0,0 +1,680 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: libserverframe
Source: https://github.com/happyfish100/libserverframe
Files: *
Copyright: 2020 YuQing <384681@qq.com>
License: AGPL-3.0+
This program is free software: you can use, redistribute, and/or modify
it under the terms of the GNU Affero General Public License, version 3
or later ("AGPL"), as published by the Free Software Foundation.
.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
# License file: LICENSE
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
.
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
.
Preamble
.
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
.
The precise terms and conditions for copying, distribution and
modification follow.
.
TERMS AND CONDITIONS
.
0. Definitions.
.
"This License" refers to version 3 of the GNU Affero General Public License.
.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
.
A "covered work" means either the unmodified Program or a work based
on the Program.
.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
.
1. Source Code.
.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
.
The Corresponding Source for a work in source code form is that
same work.
.
2. Basic Permissions.
.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
.
4. Conveying Verbatim Copies.
.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
.
5. Conveying Modified Source Versions.
.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
.
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
.
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
.
6. Conveying Non-Source Forms.
.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
.
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
.
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
.
7. Additional Terms.
.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
.
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
.
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
.
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
.
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
.
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
.
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
.
8. Termination.
.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
.
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
.
9. Acceptance Not Required for Having Copies.
.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
.
10. Automatic Licensing of Downstream Recipients.
.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
.
11. Patents.
.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
.
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
.
12. No Surrender of Others' Freedom.
.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
.
13. Remote Network Interaction; Use with the GNU General Public License.
.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
.
14. Revised Versions of this License.
.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
.
15. Disclaimer of Warranty.
.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
.
16. Limitation of Liability.
.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
.
17. Interpretation of Sections 15 and 16.
.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
.
END OF TERMS AND CONDITIONS
.
How to Apply These Terms to Your New Programs
.
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
.
Also add information on how to contact you by electronic and paper mail.
.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

1
debian/libserverframe-dev.install vendored Normal file
View File

@ -0,0 +1 @@
usr/include/sf/*

1
debian/libserverframe.install vendored Normal file
View File

@ -0,0 +1 @@
usr/lib/libserverframe.so*

17
debian/rules vendored Executable file
View File

@ -0,0 +1,17 @@
#!/usr/bin/make -f
export DESTDIR = $(CURDIR)/debian/tmp
%:
dh $@
override_dh_auto_build:
./make.sh clean && ./make.sh
override_dh_auto_install:
./make.sh install
dh_auto_install
.PHONY: override_dh_gencontrol
override_dh_gencontrol:
dh_gencontrol -- -Tdebian/substvars

1
debian/source/format vendored Normal file
View File

@ -0,0 +1 @@
3.0 (quilt)

1
debian/substvars vendored Normal file
View File

@ -0,0 +1 @@
libfastcommon:Version=1.0.83

3
debian/watch vendored Normal file
View File

@ -0,0 +1,3 @@
version=3
opts="mode=git" https://github.com/happyfish100/libserverframe.git \
refs/tags/v([\d\.]+) debian uupdate

View File

@ -1,9 +1,8 @@
%define LibserverframeDevel libserverframe-devel
%define LibserverframeDebuginfo libserverframe-debuginfo
%define CommitVersion %(echo $COMMIT_VERSION)
Name: libserverframe
Version: 1.1.3
Version: 1.2.11
Release: 1%{?dist}
Summary: network framework library
License: AGPL v3.0
@ -11,11 +10,11 @@ Group: Arch/Tech
URL: http://github.com/happyfish100/libserverframe/
Source: http://github.com/happyfish100/libserverframe/%{name}-%{version}.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
BuildRequires: libfastcommon-devel >= 1.0.47
BuildRequires: libfastcommon-devel >= 1.0.83
Requires: %__cp %__mv %__chmod %__grep %__mkdir %__install %__id
Requires: libfastcommon >= 1.0.47
Requires: libfastcommon >= 1.0.83
%description
common framework library

44
make.sh
View File

@ -5,18 +5,32 @@ TARGET_PREFIX=$DESTDIR/usr
TARGET_CONF_PATH=$DESTDIR/etc
LIB_VERSION=lib64
DEBUG_FLAG=1
DEBUG_FLAG=0
if [ -f /usr/include/fastcommon/_os_define.h ]; then
OS_BITS=$(fgrep OS_BITS /usr/include/fastcommon/_os_define.h | awk '{print $NF;}')
OS_BITS=$(grep -F OS_BITS /usr/include/fastcommon/_os_define.h | awk '{print $NF;}')
USE_URING=$(grep -F IOEVENT_USE_URING /usr/include/fastcommon/_os_define.h | awk '{print $NF;}')
elif [ -f /usr/local/include/fastcommon/_os_define.h ]; then
OS_BITS=$(fgrep OS_BITS /usr/local/include/fastcommon/_os_define.h | awk '{print $NF;}')
OS_BITS=$(grep -F OS_BITS /usr/local/include/fastcommon/_os_define.h | awk '{print $NF;}')
USE_URING=$(grep -F IOEVENT_USE_URING /usr/local/include/fastcommon/_os_define.h | awk '{print $NF;}')
else
OS_BITS=64
USE_URING=''
fi
uname=$(uname)
if [ "$OS_BITS" -eq 64 ]; then
LIB_VERSION=lib64
if [ $uname = 'Linux' ]; then
osname=$(cat /etc/os-release | grep -w NAME | awk -F '=' '{print $2;}' | \
awk -F '"' '{if (NF==3) {print $2} else {print $1}}' | awk '{print $1}')
if [ $osname = 'Ubuntu' -o $osname = 'Debian' ]; then
LIB_VERSION=lib
else
LIB_VERSION=lib64
fi
else
LIB_VERSION=lib
fi
else
LIB_VERSION=lib
fi
@ -31,13 +45,16 @@ CFLAGS="$CFLAGS -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE"
if [ "$DEBUG_FLAG" = "1" ]; then
CFLAGS="$CFLAGS -g -O1 -DDEBUG_FLAG"
else
CFLAGS="$CFLAGS -O3"
CFLAGS="$CFLAGS -g -O3"
fi
LIBS=''
uname=$(uname)
if [ "$uname" = "Linux" ]; then
CFLAGS="$CFLAGS"
if [ -n "$USE_URING" ]; then
LIBS="$LIBS -luring"
fi
elif [ "$uname" = "FreeBSD" ] || [ "$uname" = "Darwin" ]; then
CFLAGS="$CFLAGS"
if [ "$uname" = "Darwin" ]; then
@ -66,19 +83,19 @@ elif [ "$uname" = "HP-UX" ]; then
fi
elif [ "$uname" = "FreeBSD" ]; then
if [ -f /usr/lib/libc_r.so ]; then
line=$(nm -D /usr/lib/libc_r.so | grep pthread_create | grep -w T)
line=$(nm -D /usr/lib/libc_r.so | grep -F pthread_create | grep -w T)
if [ $? -eq 0 ]; then
LIBS="$LIBS -lc_r"
have_pthread=1
fi
elif [ -f /lib64/libc_r.so ]; then
line=$(nm -D /lib64/libc_r.so | grep pthread_create | grep -w T)
line=$(nm -D /lib64/libc_r.so | grep -F pthread_create | grep -w T)
if [ $? -eq 0 ]; then
LIBS="$LIBS -lc_r"
have_pthread=1
fi
elif [ -f /usr/lib64/libc_r.so ]; then
line=$(nm -D /usr/lib64/libc_r.so | grep pthread_create | grep -w T)
line=$(nm -D /usr/lib64/libc_r.so | grep -F pthread_create | grep -w T)
if [ $? -eq 0 ]; then
LIBS="$LIBS -lc_r"
have_pthread=1
@ -87,7 +104,7 @@ elif [ "$uname" = "FreeBSD" ]; then
fi
if [ $have_pthread -eq 0 ] && [ "$uname" = "Linux" ]; then
/sbin/ldconfig -p | fgrep libpthread.so > /dev/null
/sbin/ldconfig -p | grep -w libpthread.so > /dev/null
if [ $? -eq 0 ]; then
LIBS="$LIBS -lpthread"
else
@ -113,8 +130,15 @@ sed_replace()
fi
}
cd src
cd src/include
link=$(readlink sf)
if [ $? -ne 0 ] || [ "$link" != '..' -a "$link" != '../' ]; then
ln -sf .. sf
fi
cd ..
cp Makefile.in Makefile
sed_replace "s#\\\$(CC)#gcc#g" Makefile
sed_replace "s#\\\$(CFLAGS)#$CFLAGS#g" Makefile
sed_replace "s#\\\$(LIBS)#$LIBS#g" Makefile
sed_replace "s#\\\$(TARGET_PREFIX)#$TARGET_PREFIX#g" Makefile

View File

@ -6,14 +6,20 @@ LIB_PATH = $(LIBS) -lfastcommon
TARGET_LIB = $(TARGET_PREFIX)/$(LIB_VERSION)
TOP_HEADERS = sf_types.h sf_global.h sf_define.h sf_nio.h sf_service.h \
sf_func.h sf_util.h sf_configs.h sf_proto.h sf_binlog_writer.h \
sf_sharding_htable.h
sf_func.h sf_util.h sf_configs.h sf_proto.h sf_cluster_cfg.h \
sf_sharding_htable.h sf_connection_manager.h sf_serializer.h \
sf_binlog_index.h sf_file_writer.h sf_binlog_writer.h \
sf_ordered_writer.h sf_buffered_writer.h sf_iov.h \
sf_shared_mbuffer.h
IDEMP_COMMON_HEADER = idempotency/common/idempotency_types.h
IDEMP_SERVER_HEADER = idempotency/server/server_types.h \
idempotency/server/server_channel.h \
idempotency/server/request_htable.h \
idempotency/server/channel_htable.h \
idempotency/server/server_handler.h
idempotency/server/server_handler.h \
idempotency/server/request_metadata.h
IDEMP_CLIENT_HEADER = idempotency/client/client_types.h \
idempotency/client/receipt_handler.h \
@ -22,14 +28,19 @@ IDEMP_CLIENT_HEADER = idempotency/client/client_types.h \
ALL_HEADERS = $(TOP_HEADERS) $(IDEMP_SERVER_HEADER) $(IDEMP_CLIENT_HEADER)
SHARED_OBJS = sf_nio.lo sf_service.lo sf_global.lo \
SHARED_OBJS = sf_nio.lo sf_iov.lo sf_service.lo sf_global.lo \
sf_func.lo sf_util.lo sf_configs.lo sf_proto.lo \
sf_binlog_writer.lo sf_sharding_htable.lo \
idempotency/server/server_channel.lo \
idempotency/server/request_htable.lo \
idempotency/server/channel_htable.lo \
idempotency/server/server_handler.lo \
idempotency/client/receipt_handler.lo \
sf_sharding_htable.lo sf_cluster_cfg.lo \
sf_connection_manager.lo sf_serializer.lo \
sf_binlog_index.lo sf_file_writer.lo \
sf_binlog_writer.lo sf_ordered_writer.lo \
sf_shared_mbuffer.lo \
idempotency/server/server_channel.lo \
idempotency/server/request_htable.lo \
idempotency/server/channel_htable.lo \
idempotency/server/server_handler.lo \
idempotency/server/request_metadata.lo \
idempotency/client/receipt_handler.lo \
idempotency/client/client_channel.lo
ALL_OBJS = $(SHARED_OBJS)
@ -50,16 +61,18 @@ libserverframe.so: $(SHARED_OBJS)
install:
mkdir -p $(TARGET_LIB)
mkdir -p $(TARGET_PREFIX)/lib
mkdir -p $(TARGET_PREFIX)/include/sf/idempotency/common
mkdir -p $(TARGET_PREFIX)/include/sf/idempotency/server
mkdir -p $(TARGET_PREFIX)/include/sf/idempotency/client
install -m 755 $(ALL_LIBS) $(TARGET_LIB)
cp -f $(TOP_HEADERS) $(TARGET_PREFIX)/include/sf
cp -f $(IDEMP_COMMON_HEADER) $(TARGET_PREFIX)/include/sf/idempotency/common
cp -f $(IDEMP_SERVER_HEADER) $(TARGET_PREFIX)/include/sf/idempotency/server
cp -f $(IDEMP_CLIENT_HEADER) $(TARGET_PREFIX)/include/sf/idempotency/client
@BUILDROOT=$$(echo "$(TARGET_PREFIX)" | grep BUILDROOT); \
if [ -z "$$BUILDROOT" ] && [ ! -e $(TARGET_PREFIX)/lib/libserverframe.so ]; then ln -s $(TARGET_LIB)/libserverframe.so $(TARGET_PREFIX)/lib/libserverframe.so; fi
if [ -z "$$BUILDROOT" ] && [ "$(TARGET_LIB)" != "$(TARGET_PREFIX)/lib" ]; then ln -sf $(TARGET_LIB)/libserverframe.so $(TARGET_PREFIX)/lib/libserverframe.so; fi
clean:
rm -f $(ALL_OBJS) $(ALL_LIBS) $(ALL_PRGS)

View File

@ -31,11 +31,11 @@
#include "fastcommon/pthread_func.h"
#include "fastcommon/sched_thread.h"
#include "fastcommon/fc_queue.h"
#include "../../sf_util.h"
#include "../../sf_func.h"
#include "../../sf_nio.h"
#include "../../sf_global.h"
#include "../../sf_service.h"
#include "sf/sf_util.h"
#include "sf/sf_func.h"
#include "sf/sf_nio.h"
#include "sf/sf_global.h"
#include "sf/sf_service.h"
#include "client_channel.h"
typedef struct {
@ -129,13 +129,13 @@ static int idempotency_channel_alloc_init(void *element, void *args)
channel = (IdempotencyClientChannel *)element;
if ((result=fast_mblock_init_ex1(&channel->receipt_allocator,
"idempotency_receipt", sizeof(IdempotencyClientReceipt),
"idempotency-receipt", sizeof(IdempotencyClientReceipt),
1024, 0, NULL, NULL, true)) != 0)
{
return result;
}
if ((result=init_pthread_lock_cond_pair(&channel->lc_pair)) != 0) {
if ((result=init_pthread_lock_cond_pair(&channel->lcp)) != 0) {
return result;
}
@ -153,7 +153,7 @@ int client_channel_init(IniFullContext *ini_ctx)
}
if ((result=fast_mblock_init_ex1(&channel_context.channel_allocator,
"channel_info", sizeof(IdempotencyClientChannel),
"channel-info", sizeof(IdempotencyClientChannel),
64, 0, idempotency_channel_alloc_init, NULL, true)) != 0)
{
return result;
@ -171,17 +171,30 @@ void client_channel_destroy()
}
static struct fast_task_info *alloc_channel_task(IdempotencyClientChannel
*channel, const uint32_t hash_code, const char *server_ip,
const uint16_t port, int *err_no)
*channel, const uint32_t hash_code, const FCCommunicationType comm_type,
const char *server_ip, const uint16_t port, int *err_no)
{
struct fast_task_info *task;
SFAddressFamilyHandler *fh;
SFNetworkHandler *handler;
if ((task=sf_alloc_init_task(&g_sf_context, -1)) == NULL) {
if (is_ipv6_addr(server_ip)) {
fh = g_sf_context.handlers + SF_IPV6_ADDRESS_FAMILY_INDEX;
} else {
fh = g_sf_context.handlers + SF_IPV4_ADDRESS_FAMILY_INDEX;
}
if (comm_type == fc_comm_type_sock) {
handler = fh->handlers + SF_SOCKET_NETWORK_HANDLER_INDEX;
} else {
handler = fh->handlers + SF_RDMACM_NETWORK_HANDLER_INDEX;
}
if ((task=sf_alloc_init_task(handler, -1)) == NULL) {
*err_no = ENOMEM;
return NULL;
}
snprintf(task->server_ip, sizeof(task->server_ip), "%s", server_ip);
fc_safe_strcpy(task->server_ip, server_ip);
task->port = port;
task->arg = channel;
task->thread_data = g_sf_context.thread_data +
@ -190,7 +203,8 @@ static struct fast_task_info *alloc_channel_task(IdempotencyClientChannel
channel->last_connect_time = g_current_time;
if ((*err_no=sf_nio_notify(task, SF_NIO_STAGE_CONNECT)) != 0) {
channel->in_ioevent = 0; //rollback
sf_release_task(task);
__sync_sub_and_fetch(&task->reffer_count, 1);
free_queue_push(task);
return NULL;
}
return task;
@ -200,6 +214,15 @@ int idempotency_client_channel_check_reconnect(
IdempotencyClientChannel *channel)
{
int result;
char formatted_ip[FORMATTED_IP_SIZE];
#if IOEVENT_USE_URING
struct fast_task_info *task;
task = channel->task;
if (SF_CTX->use_io_uring && FC_ATOMIC_GET(task->reffer_count) > 1) {
return 0;
}
#endif
if (!__sync_bool_compare_and_swap(&channel->in_ioevent, 0, 1)) {
return 0;
@ -210,11 +233,16 @@ int idempotency_client_channel_check_reconnect(
channel->last_connect_time = g_current_time;
}
logDebug("file: "__FILE__", line: %d, "
"trigger connect to server %s:%u",
__LINE__, channel->task->server_ip,
channel->task->port);
if (FC_LOG_BY_LEVEL(LOG_DEBUG)) {
format_ip_address(channel->task->server_ip, formatted_ip);
logDebug("file: "__FILE__", line: %d, "
"trigger connect to server %s:%u", __LINE__,
formatted_ip, channel->task->port);
}
if (channel->task->event.fd >= 0) {
channel->task->handler->close_connection(channel->task);
}
__sync_bool_compare_and_swap(&channel->task->canceled, 1, 0);
if ((result=sf_nio_notify(channel->task, SF_NIO_STAGE_CONNECT)) == 0) {
channel->last_connect_time = g_current_time;
@ -226,8 +254,8 @@ int idempotency_client_channel_check_reconnect(
}
struct idempotency_client_channel *idempotency_client_channel_get(
const char *server_ip, const uint16_t server_port,
const int timeout, int *err_no)
const FCCommunicationType comm_type, const char *server_ip,
const uint16_t server_port, const int timeout, int *err_no)
{
int r;
int key_len;
@ -239,8 +267,11 @@ struct idempotency_client_channel *idempotency_client_channel_get(
IdempotencyClientChannel *current;
IdempotencyClientChannel *channel;
key_len = snprintf(key, sizeof(key), "%s_%u", server_ip, server_port);
hash_code = simple_hash(key, key_len);
key_len = strlen(server_ip);
memcpy(key, server_ip, key_len);
*(key + key_len++) = '-';
key_len += fc_itoa(server_port, key + key_len);
hash_code = fc_simple_hash(key, key_len);
bucket = channel_context.htable.buckets +
hash_code % channel_context.htable.capacity;
previous = NULL;
@ -277,8 +308,8 @@ struct idempotency_client_channel *idempotency_client_channel_get(
break;
}
channel->task = alloc_channel_task(channel,
hash_code, server_ip, server_port, err_no);
channel->task = alloc_channel_task(channel, hash_code,
comm_type, server_ip, server_port, err_no);
if (channel->task == NULL) {
fast_mblock_free_object(&channel_context.
channel_allocator, channel);
@ -323,8 +354,8 @@ int idempotency_client_channel_push(struct idempotency_client_channel *channel,
receipt->req_id = req_id;
fc_queue_push_ex(&channel->queue, receipt, &notify);
if (notify) {
if (__sync_add_and_fetch(&channel->in_ioevent, 0)) {
if (__sync_add_and_fetch(&channel->established, 0)) {
if (FC_ATOMIC_GET(channel->in_ioevent)) {
if (FC_ATOMIC_GET(channel->established)) {
sf_nio_notify(channel->task, SF_NIO_STAGE_CONTINUE);
}
} else {

View File

@ -22,6 +22,7 @@
#include "fastcommon/pthread_func.h"
#include "fastcommon/sched_thread.h"
#include "fastcommon/fc_atomic.h"
#include "sf/sf_types.h"
#include "client_types.h"
#ifdef __cplusplus
@ -40,13 +41,14 @@ void idempotency_client_channel_config_to_string_ex(
char *output, const int size, const bool add_comma);
struct idempotency_client_channel *idempotency_client_channel_get(
const char *server_ip, const uint16_t server_port,
const int timeout, int *err_no);
const FCCommunicationType comm_type, const char *server_ip,
const uint16_t server_port, const int timeout, int *err_no);
static inline uint64_t idempotency_client_channel_next_seq_id(
struct idempotency_client_channel *channel)
{
return __sync_add_and_fetch(&channel->next_req_id, 1);
return SF_IDEMPOTENCY_NEXT_REQ_ID(channel->server_id,
channel->id, FC_ATOMIC_INC(channel->next_seq));
}
int idempotency_client_channel_push(struct idempotency_client_channel *channel,
@ -74,13 +76,28 @@ static inline void idempotency_client_channel_set_id_key(
static inline int idempotency_client_channel_check_wait_ex(
struct idempotency_client_channel *channel, const int timeout)
{
if (__sync_add_and_fetch(&channel->established, 0)) {
if (FC_ATOMIC_GET(channel->established)) {
return 0;
}
idempotency_client_channel_check_reconnect(channel);
lcp_timedwait_sec(&channel->lc_pair, timeout);
return __sync_add_and_fetch(&channel->established, 0) ? 0 : ETIMEDOUT;
lcp_timedwait_sec(&channel->lcp, timeout);
if (FC_ATOMIC_GET(channel->established)) {
return 0;
} else {
/*
char formatted_ip[FORMATTED_IP_SIZE];
format_ip_address(channel->task->server_ip, formatted_ip);
logInfo("file: "__FILE__", line: %d, "
"channel_check fail, server %s:%u, in_ioevent: %d, "
"canceled: %d, req count: %"PRId64, __LINE__,
formatted_ip, channel->task->port,
__sync_add_and_fetch(&channel->in_ioevent, 0),
__sync_add_and_fetch(&channel->task->canceled, 0),
channel->task->req_count);
*/
return ETIMEDOUT;
}
}
#ifdef __cplusplus

View File

@ -21,6 +21,7 @@
#include "fastcommon/fast_mblock.h"
#include "fastcommon/fc_list.h"
#include "fastcommon/fc_queue.h"
#include "sf/idempotency/common/idempotency_types.h"
typedef struct idempotency_client_config {
bool enabled;
@ -40,11 +41,12 @@ typedef struct idempotency_client_channel {
volatile char in_ioevent;
volatile char established;
int buffer_size; //the min task size of the server and mine
uint32_t server_id;
volatile uint32_t next_seq;
time_t last_connect_time; //for connect frequency control
time_t last_pkg_time; //last communication time
time_t last_report_time; //last report time for rpc receipt
pthread_lock_cond_pair_t lc_pair; //for channel valid check and notify
volatile uint64_t next_req_id;
pthread_lock_cond_pair_t lcp; //for channel valid check and notify
struct fast_mblock_man receipt_allocator;
struct fast_task_info *task;
struct fc_queue queue;
@ -61,6 +63,14 @@ typedef struct idempotency_receipt_thread_context {
} last_check_times;
} IdempotencyReceiptThreadContext;
typedef struct idempotency_receipt_global_vars {
struct {
int task_padding_size;
sf_init_connection_callback init_connection;
} rdma;
IdempotencyReceiptThreadContext *thread_contexts;
} IdempotencyReceiptGlobalVars;
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -41,23 +41,35 @@
#include "client_channel.h"
#include "receipt_handler.h"
static IdempotencyReceiptThreadContext *receipt_thread_contexts = NULL;
static IdempotencyReceiptGlobalVars receipt_global_vars;
static int receipt_init_task(struct fast_task_info *task)
#define RECEIPT_THREAD_CONTEXTS receipt_global_vars.thread_contexts
#define TASK_PADDING_SIZE receipt_global_vars.rdma.task_padding_size
#define RDMA_INIT_CONNECTION receipt_global_vars.rdma.init_connection
static int receipt_init_task(struct fast_task_info *task, void *arg)
{
task->connect_timeout = SF_G_CONNECT_TIMEOUT; //for client side
task->network_timeout = SF_G_NETWORK_TIMEOUT;
return 0;
#if IOEVENT_USE_URING
FC_URING_IS_CLIENT(task) = true;
#endif
if (RDMA_INIT_CONNECTION != NULL) {
return RDMA_INIT_CONNECTION(task, arg);
} else {
return 0;
}
}
static int receipt_recv_timeout_callback(struct fast_task_info *task)
{
IdempotencyClientChannel *channel;
char formatted_ip[FORMATTED_IP_SIZE];
format_ip_address(task->server_ip, formatted_ip);
if (SF_NIO_TASK_STAGE_FETCH(task) == SF_NIO_STAGE_CONNECT) {
logError("file: "__FILE__", line: %d, "
"connect to server %s:%u timeout",
__LINE__, task->server_ip, task->port);
__LINE__, formatted_ip, task->port);
return ETIMEDOUT;
}
@ -65,11 +77,13 @@ static int receipt_recv_timeout_callback(struct fast_task_info *task)
if (channel->waiting_resp_qinfo.head != NULL) {
logError("file: "__FILE__", line: %d, "
"waiting receipt response from server %s:%u timeout",
__LINE__, task->server_ip, task->port);
__LINE__, formatted_ip, task->port);
} else {
logError("file: "__FILE__", line: %d, "
"communication with server %s:%u timeout",
__LINE__, task->server_ip, task->port);
"%s server %s:%u timeout, channel established: %d",
__LINE__, task->nio_stages.current == SF_NIO_STAGE_SEND ?
"send to" : "recv from", formatted_ip, task->port,
FC_ATOMIC_GET(channel->established));
}
return ETIMEDOUT;
@ -78,22 +92,27 @@ static int receipt_recv_timeout_callback(struct fast_task_info *task)
static void receipt_task_finish_cleanup(struct fast_task_info *task)
{
IdempotencyClientChannel *channel;
char formatted_ip[FORMATTED_IP_SIZE];
if (task->event.fd >= 0) {
sf_task_detach_thread(task);
close(task->event.fd);
task->event.fd = -1;
}
channel = (IdempotencyClientChannel *)task->arg;
sf_nio_reset_task_length(task);
task->req_count = 0;
task->pending_send_count = 0;
channel = (IdempotencyClientChannel *)task->arg;
fc_list_del_init(&channel->dlink);
__sync_bool_compare_and_swap(&channel->established, 1, 0);
__sync_bool_compare_and_swap(&channel->in_ioevent, 1, 0);
logDebug("file: "__FILE__", line: %d, "
"receipt task for server %s:%u exit",
__LINE__, task->server_ip, task->port);
if (FC_LOG_BY_LEVEL(LOG_DEBUG)) {
format_ip_address(task->server_ip, formatted_ip);
logDebug("file: "__FILE__", line: %d, "
"receipt task for server %s:%u exit",
__LINE__, formatted_ip, task->port);
}
}
static void setup_channel_request(struct fast_task_info *task)
@ -103,14 +122,15 @@ static void setup_channel_request(struct fast_task_info *task)
SFProtoSetupChannelReq *req;
channel = (IdempotencyClientChannel *)task->arg;
header = (SFCommonProtoHeader *)task->data;
header = (SFCommonProtoHeader *)task->send.ptr->data;
req = (SFProtoSetupChannelReq *)(header + 1);
int2buff(__sync_add_and_fetch(&channel->id, 0), req->channel_id);
int2buff(__sync_add_and_fetch(&channel->key, 0), req->key);
SF_PROTO_SET_HEADER(header, SF_SERVICE_PROTO_SETUP_CHANNEL_REQ,
sizeof(SFProtoSetupChannelReq));
task->length = sizeof(SFCommonProtoHeader) + sizeof(SFProtoSetupChannelReq);
task->send.ptr->length = sizeof(SFCommonProtoHeader) +
sizeof(SFProtoSetupChannelReq);
sf_send_add_event(task);
}
@ -131,15 +151,16 @@ static int check_report_req_receipt(struct fast_task_info *task)
return 0;
}
fc_queue_pop_to_queue(&channel->queue, &channel->waiting_resp_qinfo);
fc_queue_try_pop_to_queue(&channel->queue,
&channel->waiting_resp_qinfo);
if (channel->waiting_resp_qinfo.head == NULL) {
return 0;
}
header = (SFCommonProtoHeader *)task->data;
header = (SFCommonProtoHeader *)task->send.ptr->data;
rheader = (SFProtoReportReqReceiptHeader *)(header + 1);
rbody = rstart = (SFProtoReportReqReceiptBody *)(rheader + 1);
buff_end = task->data + channel->buffer_size;
buff_end = task->send.ptr->data + channel->buffer_size;
last = NULL;
receipt = channel->waiting_resp_qinfo.head;
do {
@ -169,8 +190,9 @@ static int check_report_req_receipt(struct fast_task_info *task)
count = rbody - rstart;
int2buff(count, rheader->count);
task->length = (char *)rbody - task->data;
int2buff(task->length - sizeof(SFCommonProtoHeader), header->body_len);
task->send.ptr->length = (char *)rbody - task->send.ptr->data;
int2buff(task->send.ptr->length - sizeof(SFCommonProtoHeader),
header->body_len);
header->cmd = SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_REQ;
sf_send_add_event(task);
return count;
@ -184,18 +206,18 @@ static void close_channel_request(struct fast_task_info *task)
channel = (IdempotencyClientChannel *)task->arg;
idempotency_client_channel_set_id_key(channel, 0, 0);
header = (SFCommonProtoHeader *)task->data;
header = (SFCommonProtoHeader *)task->send.ptr->data;
SF_PROTO_SET_HEADER(header, SF_SERVICE_PROTO_CLOSE_CHANNEL_REQ, 0);
task->length = sizeof(SFCommonProtoHeader);
task->send.ptr->length = sizeof(SFCommonProtoHeader);
sf_send_add_event(task);
}
static void active_test_request(struct fast_task_info *task)
{
SFCommonProtoHeader *header;
header = (SFCommonProtoHeader *)task->data;
header = (SFCommonProtoHeader *)task->send.ptr->data;
SF_PROTO_SET_HEADER(header, SF_PROTO_ACTIVE_TEST_REQ, 0);
task->length = sizeof(SFCommonProtoHeader);
task->send.ptr->length = sizeof(SFCommonProtoHeader);
sf_send_add_event(task);
}
@ -223,17 +245,22 @@ static void report_req_receipt_request(struct fast_task_info *task,
if (update_lru) {
update_lru_chain(task);
}
task->pending_send_count++;
}
}
static inline int receipt_expect_body_length(struct fast_task_info *task,
const int expect_body_len)
{
if ((int)(task->length - sizeof(SFCommonProtoHeader)) != expect_body_len) {
int body_len;
char formatted_ip[FORMATTED_IP_SIZE];
body_len = task->recv.ptr->length - sizeof(SFCommonProtoHeader);
if (body_len != expect_body_len) {
format_ip_address(task->server_ip, formatted_ip);
logError("file: "__FILE__", line: %d, "
"server %s:%u, response body length: %d != %d",
__LINE__, task->server_ip, task->port, (int)(task->length -
sizeof(SFCommonProtoHeader)), expect_body_len);
"server %s:%u, response body length: %d != %d", __LINE__,
formatted_ip, task->port, body_len, expect_body_len);
return EINVAL;
}
@ -246,6 +273,7 @@ static int deal_setup_channel_response(struct fast_task_info *task)
IdempotencyReceiptThreadContext *thread_ctx;
SFProtoSetupChannelResp *resp;
IdempotencyClientChannel *channel;
char formatted_ip[FORMATTED_IP_SIZE];
int channel_id;
int channel_key;
int buffer_size;
@ -257,28 +285,30 @@ static int deal_setup_channel_response(struct fast_task_info *task)
}
channel = (IdempotencyClientChannel *)task->arg;
if (__sync_add_and_fetch(&channel->established, 0)) {
if (FC_ATOMIC_GET(channel->established)) {
format_ip_address(task->server_ip, formatted_ip);
logWarning("file: "__FILE__", line: %d, "
"response from server %s:%u, unexpected cmd: "
"SETUP_CHANNEL_RESP, ignore it!",
__LINE__, task->server_ip, task->port);
__LINE__, formatted_ip, task->port);
return 0;
}
resp = (SFProtoSetupChannelResp *)(task->data + sizeof(SFCommonProtoHeader));
resp = (SFProtoSetupChannelResp *)SF_PROTO_RECV_BODY(task);
channel_id = buff2int(resp->channel_id);
channel_key = buff2int(resp->key);
buffer_size = buff2int(resp->buffer_size);
channel->server_id = buff2int(resp->server_id);
idempotency_client_channel_set_id_key(channel, channel_id, channel_key);
if (__sync_bool_compare_and_swap(&channel->established, 0, 1)) {
thread_ctx = (IdempotencyReceiptThreadContext *)task->thread_data->arg;
fc_list_add_tail(&channel->dlink, &thread_ctx->head);
}
channel->buffer_size = FC_MIN(buffer_size, task->size);
channel->buffer_size = FC_MIN(buffer_size, task->send.ptr->size);
PTHREAD_MUTEX_LOCK(&channel->lc_pair.lock);
pthread_cond_broadcast(&channel->lc_pair.cond);
PTHREAD_MUTEX_UNLOCK(&channel->lc_pair.lock);
PTHREAD_MUTEX_LOCK(&channel->lcp.lock);
pthread_cond_broadcast(&channel->lcp.cond);
PTHREAD_MUTEX_UNLOCK(&channel->lcp.lock);
if (channel->waiting_resp_qinfo.head != NULL) {
bool notify;
@ -297,6 +327,7 @@ static inline int deal_report_req_receipt_response(struct fast_task_info *task)
IdempotencyClientChannel *channel;
IdempotencyClientReceipt *current;
IdempotencyClientReceipt *deleted;
char formatted_ip[FORMATTED_IP_SIZE];
if ((result=receipt_expect_body_length(task, 0)) != 0) {
return result;
@ -304,13 +335,15 @@ static inline int deal_report_req_receipt_response(struct fast_task_info *task)
channel = (IdempotencyClientChannel *)task->arg;
if (channel->waiting_resp_qinfo.head == NULL) {
format_ip_address(task->server_ip, formatted_ip);
logWarning("file: "__FILE__", line: %d, "
"response from server %s:%u, unexpect cmd: "
"REPORT_REQ_RECEIPT_RESP", __LINE__,
task->server_ip, task->port);
return 0;
formatted_ip, task->port);
return EINVAL;
}
task->pending_send_count--;
current = channel->waiting_resp_qinfo.head;
do {
deleted = current;
@ -327,40 +360,46 @@ static inline int deal_report_req_receipt_response(struct fast_task_info *task)
static int receipt_deal_task(struct fast_task_info *task, const int stage)
{
int result;
SFCommonProtoHeader *header;
char formatted_ip[FORMATTED_IP_SIZE];
do {
if (stage == SF_NIO_STAGE_HANDSHAKE) {
setup_channel_request(task);
result = 0;
break;
} else if (stage == SF_NIO_STAGE_CONTINUE && task->length == 0) {
if (((IdempotencyClientChannel *)task->arg)->established) {
report_req_receipt_request(task, true);
} else {
sf_set_read_event(task); //trigger read event
} else if (stage == SF_NIO_STAGE_CONTINUE) {
if (task->pending_send_count == 0) {
if (((IdempotencyClientChannel *)task->arg)->established) {
report_req_receipt_request(task, true);
} else if (task->req_count > 0) {
sf_set_read_event(task); //trigger read event
}
}
result = 0;
break;
}
result = buff2short(((SFCommonProtoHeader *)task->data)->status);
header = (SFCommonProtoHeader *)task->recv.ptr->data;
result = buff2short(header->status);
if (result != 0) {
int msg_len;
char *message;
msg_len = task->length - sizeof(SFCommonProtoHeader);
message = task->data + sizeof(SFCommonProtoHeader);
msg_len = SF_RECV_BODY_LENGTH(task);
message = SF_PROTO_RECV_BODY(task);
format_ip_address(task->server_ip, formatted_ip);
logError("file: "__FILE__", line: %d, "
"response from server %s:%u, cmd: %d (%s), "
"status: %d, error info: %.*s",
__LINE__, task->server_ip, task->port,
((SFCommonProtoHeader *)task->data)->cmd,
sf_get_cmd_caption(((SFCommonProtoHeader *)task->data)->cmd),
"status: %d, error info: %.*s", __LINE__,
formatted_ip, task->port, header->cmd,
sf_get_cmd_caption(header->cmd),
result, msg_len, message);
break;
}
switch (((SFCommonProtoHeader *)task->data)->cmd) {
switch (header->cmd) {
case SF_SERVICE_PROTO_SETUP_CHANNEL_RESP:
result = deal_setup_channel_response(task);
break;
@ -368,28 +407,35 @@ static int receipt_deal_task(struct fast_task_info *task, const int stage)
result = deal_report_req_receipt_response(task);
break;
case SF_PROTO_ACTIVE_TEST_RESP:
task->pending_send_count--;
result = 0;
break;
case SF_SERVICE_PROTO_CLOSE_CHANNEL_RESP:
result = ECONNRESET; //force to close socket
logDebug("file: "__FILE__", line: %d, "
"close channel to server %s:%u !!!",
__LINE__, task->server_ip, task->port);
if (FC_LOG_BY_LEVEL(LOG_DEBUG)) {
format_ip_address(task->server_ip, formatted_ip);
logDebug("file: "__FILE__", line: %d, "
"close channel to server %s:%u !!!",
__LINE__, formatted_ip, task->port);
}
break;
default:
format_ip_address(task->server_ip, formatted_ip);
logError("file: "__FILE__", line: %d, "
"response from server %s:%u, unexpect cmd: %d (%s)",
__LINE__, task->server_ip, task->port,
((SFCommonProtoHeader *)task->data)->cmd,
sf_get_cmd_caption(((SFCommonProtoHeader *)task->data)->cmd));
__LINE__, formatted_ip, task->port, header->cmd,
sf_get_cmd_caption(header->cmd));
result = EINVAL;
break;
}
if (result == 0) {
update_lru_chain(task);
task->offset = task->length = 0;
report_req_receipt_request(task, false);
task->recv.ptr->length = 0;
task->recv.ptr->offset = 0;
if (task->pending_send_count == 0) {
report_req_receipt_request(task, false);
}
}
} while (0);
@ -409,9 +455,10 @@ static void receipt_thread_check_heartbeat(
break;
}
if (sf_nio_task_is_idle(channel->task)) {
if (channel->task->pending_send_count == 0) {
channel->last_pkg_time = g_current_time;
active_test_request(channel->task);
channel->task->pending_send_count++;
}
}
}
@ -421,18 +468,22 @@ static void receipt_thread_close_idle_channel(
{
IdempotencyClientChannel *channel;
IdempotencyClientChannel *tmp;
char formatted_ip[FORMATTED_IP_SIZE];
fc_list_for_each_entry_safe(channel, tmp, &thread_ctx->head, dlink) {
if (!sf_nio_task_is_idle(channel->task)) {
if (channel->task->pending_send_count > 0) {
continue;
}
if (g_current_time - channel->last_report_time >
g_idempotency_client_cfg.channel_max_idle_time)
{
logDebug("file: "__FILE__", line: %d, "
"close channel to server %s:%u because idle too long",
__LINE__, channel->task->server_ip, channel->task->port);
if (FC_LOG_BY_LEVEL(LOG_DEBUG)) {
format_ip_address(channel->task->server_ip, formatted_ip);
logDebug("file: "__FILE__", line: %d, "
"close channel to server %s:%u because idle too long",
__LINE__, formatted_ip, channel->task->port);
}
close_channel_request(channel->task);
}
}
@ -463,40 +514,61 @@ static void *receipt_alloc_thread_extra_data(const int thread_index)
{
IdempotencyReceiptThreadContext *ctx;
ctx = receipt_thread_contexts + thread_index;
ctx = RECEIPT_THREAD_CONTEXTS + thread_index;
FC_INIT_LIST_HEAD(&ctx->head);
return ctx;
}
static int do_init()
static int do_init(FCAddressPtrArray *address_array)
{
const int task_arg_size = 0;
const bool double_buffers = false;
const bool need_shrink_task_buffer = false;
const bool explicit_post_recv = false;
int result;
int bytes;
SFNetworkHandler *rdma_handler;
struct ibv_pd *pd;
bytes = sizeof(IdempotencyReceiptThreadContext) * SF_G_WORK_THREADS;
receipt_thread_contexts = (IdempotencyReceiptThreadContext *)
RECEIPT_THREAD_CONTEXTS = (IdempotencyReceiptThreadContext *)
fc_malloc(bytes);
if (receipt_thread_contexts == NULL) {
if (RECEIPT_THREAD_CONTEXTS == NULL) {
return ENOMEM;
}
memset(receipt_thread_contexts, 0, bytes);
memset(RECEIPT_THREAD_CONTEXTS, 0, bytes);
return sf_service_init_ex2(&g_sf_context,
if ((rdma_handler=sf_get_rdma_network_handler(&g_sf_context)) != NULL) {
if ((result=sf_alloc_rdma_pd(&g_sf_context, address_array)) != 0) {
return result;
}
TASK_PADDING_SIZE = rdma_handler->get_connection_size();
RDMA_INIT_CONNECTION = rdma_handler->init_connection;
pd = rdma_handler->pd;
} else {
TASK_PADDING_SIZE = 0;
RDMA_INIT_CONNECTION = NULL;
pd = NULL;
}
return sf_service_init_ex2(&g_sf_context, "idemp-receipt",
receipt_alloc_thread_extra_data, receipt_thread_loop_callback,
NULL, sf_proto_set_body_length, receipt_deal_task,
NULL, sf_proto_set_body_length, NULL, NULL, receipt_deal_task,
receipt_task_finish_cleanup, receipt_recv_timeout_callback,
1000, sizeof(SFCommonProtoHeader), 0, receipt_init_task);
1000, sizeof(SFCommonProtoHeader), TASK_PADDING_SIZE,
task_arg_size, double_buffers, need_shrink_task_buffer,
explicit_post_recv, receipt_init_task, pd, NULL);
}
int receipt_handler_init()
int receipt_handler_init(FCAddressPtrArray *address_array)
{
int result;
if ((result=do_init()) != 0) {
if ((result=do_init(address_array)) != 0) {
return result;
}
sf_enable_thread_notify(true);
sf_set_remove_from_ready_list(false);
fc_sleep_ms(100);
return 0;

View File

@ -24,7 +24,7 @@
extern "C" {
#endif
int receipt_handler_init();
int receipt_handler_init(FCAddressPtrArray *address_array);
int receipt_handler_destroy();
#ifdef __cplusplus

View File

@ -19,7 +19,7 @@
#include "../../sf_configs.h"
#define SF_CLIENT_IDEMPOTENCY_UPDATE_WRAPPER(client_ctx, \
#define SF_CLIENT_IDEMPOTENCY_UPDATE_WRAPPER(client_ctx, conn_manager, \
GET_MASTER_CONNECTION, get_conn_arg1, update_callback, ...) \
ConnectionInfo *conn; \
IdempotencyClientChannel *old_channel; \
@ -30,28 +30,28 @@
uint64_t req_id; \
SFNetRetryIntervalContext net_retry_ctx; \
\
if ((conn=GET_MASTER_CONNECTION(client_ctx, \
if ((conn=GET_MASTER_CONNECTION(conn_manager, \
get_conn_arg1, &result)) == NULL) \
{ \
return SF_UNIX_ERRNO(result, EIO); \
} \
connection_params = client_ctx->conn_manager. \
get_connection_params(client_ctx, conn); \
connection_params = (conn_manager)->ops. \
get_connection_params(conn_manager, conn); \
idempotency_enabled = client_ctx->idempotency_enabled && \
connection_params != NULL; \
\
sf_init_net_retry_interval_context(&net_retry_ctx, \
&client_ctx->net_retry_cfg.interval_mm, \
&client_ctx->net_retry_cfg.network); \
&client_ctx->common_cfg.net_retry_cfg.interval_mm, \
&client_ctx->common_cfg.net_retry_cfg.network); \
\
if (idempotency_enabled) { \
req_id = idempotency_client_channel_next_seq_id( \
connection_params->channel); \
} else { \
req_id = 0; \
} \
\
while (1) { \
if (idempotency_enabled) { \
req_id = idempotency_client_channel_next_seq_id( \
connection_params->channel); \
} else { \
req_id = 0; \
} \
\
old_channel = connection_params != NULL ? \
connection_params->channel : NULL; \
i = 0; \
@ -79,9 +79,10 @@
connection_params->channel) == 0) \
{ \
if ((conn_result=sf_proto_rebind_idempotency_channel( \
conn, connection_params->channel->id, \
connection_params->channel->key, \
client_ctx->network_timeout)) == 0) \
conn, (conn_manager)->module_name, \
connection_params->channel->id, \
connection_params->channel->key, \
client_ctx->common_cfg.network_timeout)) == 0) \
{ \
continue; \
} \
@ -89,21 +90,21 @@
} \
\
SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx, client_ctx-> \
net_retry_cfg.network.times, ++i, result); \
common_cfg.net_retry_cfg.network.times, ++i, result); \
/* \
logInfo("file: "__FILE__", line: %d, func: %s, " \
"net retry result: %d, retry count: %d", \
__LINE__, __FUNCTION__, result, i); \
*/ \
SF_CLIENT_RELEASE_CONNECTION(client_ctx, conn, conn_result); \
if ((conn=GET_MASTER_CONNECTION(client_ctx, \
SF_CLIENT_RELEASE_CONNECTION(conn_manager, conn, conn_result); \
if ((conn=GET_MASTER_CONNECTION(conn_manager, \
get_conn_arg1, &result)) == NULL) \
{ \
return SF_UNIX_ERRNO(result, EIO); \
} \
\
connection_params = client_ctx->conn_manager. \
get_connection_params(client_ctx, conn); \
connection_params = (conn_manager)->ops. \
get_connection_params(conn_manager, conn); \
if (connection_params != NULL && connection_params->channel != \
old_channel) \
{ \
@ -124,26 +125,26 @@
break; \
} \
\
SF_CLIENT_RELEASE_CONNECTION(client_ctx, conn, result); \
SF_CLIENT_RELEASE_CONNECTION(conn_manager, conn, result); \
return SF_UNIX_ERRNO(result, EIO)
#define SF_CLIENT_IDEMPOTENCY_QUERY_WRAPPER(client_ctx, \
#define SF_CLIENT_IDEMPOTENCY_QUERY_WRAPPER(client_ctx, conn_manager, \
GET_READABLE_CONNECTION, get_conn_arg1, query_callback, ...) \
ConnectionInfo *conn; \
int result; \
int i; \
SFNetRetryIntervalContext net_retry_ctx; \
\
if ((conn=GET_READABLE_CONNECTION(client_ctx, \
if ((conn=GET_READABLE_CONNECTION(conn_manager, \
get_conn_arg1, &result)) == NULL) \
{ \
return SF_UNIX_ERRNO(result, EIO); \
} \
\
sf_init_net_retry_interval_context(&net_retry_ctx, \
&client_ctx->net_retry_cfg.interval_mm, \
&client_ctx->net_retry_cfg.network); \
&client_ctx->common_cfg.net_retry_cfg.interval_mm, \
&client_ctx->common_cfg.net_retry_cfg.network); \
i = 0; \
while (1) { \
if ((result=query_callback(client_ctx, \
@ -152,21 +153,21 @@
break; \
} \
SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx, client_ctx-> \
net_retry_cfg.network.times, ++i, result); \
common_cfg.net_retry_cfg.network.times, ++i, result); \
/* \
logInfo("file: "__FILE__", line: %d, func: %s, " \
"net retry result: %d, retry count: %d", \
__LINE__, __FUNCTION__, result, i); \
*/ \
SF_CLIENT_RELEASE_CONNECTION(client_ctx, conn, result); \
if ((conn=GET_READABLE_CONNECTION(client_ctx, \
SF_CLIENT_RELEASE_CONNECTION(conn_manager, conn, result); \
if ((conn=GET_READABLE_CONNECTION(conn_manager, \
get_conn_arg1, &result)) == NULL) \
{ \
return SF_UNIX_ERRNO(result, EIO); \
} \
} \
\
SF_CLIENT_RELEASE_CONNECTION(client_ctx, conn, result); \
SF_CLIENT_RELEASE_CONNECTION(conn_manager, conn, result); \
return SF_UNIX_ERRNO(result, EIO)

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _IDEMPOTENCY_COMMON_TYPES_H
#define _IDEMPOTENCY_COMMON_TYPES_H
#include "fastcommon/common_define.h"
#define SF_IDEMPOTENCY_CHANNEL_ID_BITS 16
#define SF_IDEMPOTENCY_REQUEST_ID_BITS (64 - SF_IDEMPOTENCY_CHANNEL_ID_BITS)
#define SF_IDEMPOTENCY_MAX_CHANNEL_COUNT ((1 << SF_IDEMPOTENCY_CHANNEL_ID_BITS) - 1)
#define SF_IDEMPOTENCY_MAX_CHANNEL_ID SF_IDEMPOTENCY_MAX_CHANNEL_COUNT
#define SF_IDEMPOTENCY_SERVER_ID_OFFSET 48
#define SF_IDEMPOTENCY_CHANNEL_ID_OFFSET 32
#define SF_IDEMPOTENCY_NEXT_REQ_ID(server_id, channel_id, seq) \
(((int64_t)server_id) << SF_IDEMPOTENCY_SERVER_ID_OFFSET) | \
(((int64_t)channel_id) << SF_IDEMPOTENCY_CHANNEL_ID_OFFSET) | \
(int64_t)seq
#define SF_IDEMPOTENCY_EXTRACT_SERVER_ID(req_id) \
(int)((req_id >> SF_IDEMPOTENCY_SERVER_ID_OFFSET) & 0xFFFF)
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,250 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <limits.h>
#include <sys/stat.h>
#include "fastcommon/shared_func.h"
#include "fastcommon/logger.h"
#include "fastcommon/fc_atomic.h"
#include "sf/sf_global.h"
#include "request_metadata.h"
static struct {
int process_interval_ms;
int master_side_timeout; //in seconds
struct {
IdempotencyRequestMetadataContext *head;
IdempotencyRequestMetadataContext *tail;
} list;
} g_request_metadata = {1000, 300, {NULL, NULL}};
#define CHECK_MASTER_METADATA(meta) \
(meta != NULL && g_current_time - (long)meta->enqueue_time > \
g_request_metadata.master_side_timeout)
static void process_master_side(IdempotencyRequestMetadataContext *ctx)
{
struct fast_mblock_chain chain;
struct fast_mblock_node *node;
chain.head = chain.tail = NULL;
PTHREAD_MUTEX_LOCK(&ctx->lock);
if (CHECK_MASTER_METADATA(ctx->list.head)) {
do {
node = fast_mblock_to_node_ptr(ctx->list.head);
if (chain.head == NULL) {
chain.head = node;
} else {
chain.tail->next = node;
}
chain.tail = node;
ctx->list.head = ctx->list.head->next;
} while (CHECK_MASTER_METADATA(ctx->list.head));
if (ctx->list.head == NULL) {
ctx->list.tail = NULL;
}
chain.tail->next = NULL;
}
if (chain.head != NULL) {
fast_mblock_batch_free(&ctx->allocator, &chain);
}
PTHREAD_MUTEX_UNLOCK(&ctx->lock);
}
#define CHECK_SLAVE_METADATA(meta, dv) \
(meta != NULL && meta->data_version <= dv)
static void process_slave_side(IdempotencyRequestMetadataContext *ctx,
const int64_t data_version)
{
struct fast_mblock_chain chain;
struct fast_mblock_node *node;
chain.head = chain.tail = NULL;
PTHREAD_MUTEX_LOCK(&ctx->lock);
if (CHECK_SLAVE_METADATA(ctx->list.head, data_version)) {
do {
node = fast_mblock_to_node_ptr(ctx->list.head);
if (chain.head == NULL) {
chain.head = node;
} else {
chain.tail->next = node;
}
chain.tail = node;
ctx->list.head = ctx->list.head->next;
} while (CHECK_SLAVE_METADATA(ctx->list.head, data_version));
if (ctx->list.head == NULL) {
ctx->list.tail = NULL;
}
chain.tail->next = NULL;
}
if (chain.head != NULL) {
fast_mblock_batch_free(&ctx->allocator, &chain);
}
PTHREAD_MUTEX_UNLOCK(&ctx->lock);
}
static void *thread_run(void *arg)
{
IdempotencyRequestMetadataContext *ctx;
int64_t data_version;
#ifdef OS_LINUX
prctl(PR_SET_NAME, "idemp-req-meta");
#endif
ctx = g_request_metadata.list.head;
while (SF_G_CONTINUE_FLAG) {
fc_sleep_ms(g_request_metadata.process_interval_ms);
if (ctx->is_master_callback.func(ctx->is_master_callback.
arg, &data_version))
{
process_master_side(ctx);
} else if (data_version > 0) {
process_slave_side(ctx, data_version);
}
ctx = ctx->next;
if (ctx == NULL) {
ctx = g_request_metadata.list.head;
}
}
return NULL;
}
int idempotency_request_metadata_init(IdempotencyRequestMetadataContext
*ctx, sf_is_master_callback is_master_callback, void *arg)
{
int result;
if ((result=fast_mblock_init_ex1(&ctx->allocator, "req-metadata-info",
sizeof(IdempotencyRequestMetadata), 8192, 0,
NULL, NULL, false)) != 0)
{
return result;
}
if ((result=init_pthread_lock(&ctx->lock)) != 0) {
return result;
}
ctx->is_master_callback.func = is_master_callback;
ctx->is_master_callback.arg = arg;
ctx->list.head = ctx->list.tail = NULL;
ctx->next = NULL;
if (g_request_metadata.list.head == NULL) {
g_request_metadata.list.head = ctx;
} else {
g_request_metadata.list.tail->next = ctx;
}
g_request_metadata.list.tail = ctx;
return 0;
}
int idempotency_request_metadata_start(const int process_interval_ms,
const int master_side_timeout)
{
pthread_t tid;
if (g_request_metadata.list.head == NULL) {
logError("file: "__FILE__", line: %d, "
"list is empty!", __LINE__);
return ENOENT;
}
if (process_interval_ms <= 0) {
logError("file: "__FILE__", line: %d, "
"invalid process interval: %d!",
__LINE__, process_interval_ms);
return EINVAL;
}
if (master_side_timeout <= 0) {
logError("file: "__FILE__", line: %d, "
"invalid master side timeout: %d!",
__LINE__, master_side_timeout);
return EINVAL;
}
g_request_metadata.process_interval_ms = process_interval_ms;
g_request_metadata.master_side_timeout = master_side_timeout;
return fc_create_thread(&tid, thread_run, NULL,
SF_G_THREAD_STACK_SIZE);
}
int idempotency_request_metadata_add(IdempotencyRequestMetadataContext
*ctx, const SFRequestMetadata *metadata, const int n)
{
IdempotencyRequestMetadata *idemp_meta;
PTHREAD_MUTEX_LOCK(&ctx->lock);
do {
if ((idemp_meta=fast_mblock_alloc_object(&ctx->allocator)) == NULL) {
break;
}
idemp_meta->req_id = metadata->req_id;
idemp_meta->data_version = metadata->data_version;
idemp_meta->n = n;
idemp_meta->enqueue_time = g_current_time;
idemp_meta->next = NULL;
if (ctx->list.head == NULL) {
ctx->list.head = idemp_meta;
} else {
ctx->list.tail->next = idemp_meta;
}
ctx->list.tail = idemp_meta;
} while (0);
PTHREAD_MUTEX_UNLOCK(&ctx->lock);
return (idemp_meta != NULL ? 0 : ENOMEM);
}
int idempotency_request_metadata_get(IdempotencyRequestMetadataContext
*ctx, const int64_t req_id, int64_t *data_version, int *n)
{
int result;
IdempotencyRequestMetadata *meta;
result = ENOENT;
PTHREAD_MUTEX_LOCK(&ctx->lock);
meta = ctx->list.head;
while (meta != NULL) {
if (req_id == meta->req_id) {
result = 0;
*data_version = meta->data_version;
if (n != NULL) {
*n = meta->n;
}
break;
}
meta = meta->next;
}
PTHREAD_MUTEX_UNLOCK(&ctx->lock);
return result;
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _SF_IDEMPOTENCY_REQUEST_METADATA_H
#define _SF_IDEMPOTENCY_REQUEST_METADATA_H
#include "server_types.h"
typedef bool (*sf_is_master_callback)(void *arg, int64_t *data_version);
typedef struct idempotency_request_metadata {
int64_t req_id;
int64_t data_version;
int n; //integer argument
uint32_t enqueue_time;
struct idempotency_request_metadata *next;
} IdempotencyRequestMetadata;
typedef struct idempotency_request_metadata_context {
struct {
sf_is_master_callback func;
void *arg;
} is_master_callback;
struct fast_mblock_man allocator; //element: IdempotencyRequestMetadata
pthread_mutex_t lock;
struct {
IdempotencyRequestMetadata *head;
IdempotencyRequestMetadata *tail;
} list;
struct idempotency_request_metadata_context *next;
} IdempotencyRequestMetadataContext;
#ifdef __cplusplus
extern "C" {
#endif
int idempotency_request_metadata_init(IdempotencyRequestMetadataContext
*ctx, sf_is_master_callback is_master_callback, void *arg);
int idempotency_request_metadata_start(const int process_interval_ms,
const int master_side_timeout);
int idempotency_request_metadata_add(IdempotencyRequestMetadataContext
*ctx, const SFRequestMetadata *metadata, const int n);
int idempotency_request_metadata_get(IdempotencyRequestMetadataContext
*ctx, const int64_t req_id, int64_t *data_version, int *n);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -100,7 +100,7 @@ int idempotency_channel_init(const uint32_t max_channel_id,
element_size = sizeof(IdempotencyChannel) + sizeof(IdempotencyRequest *) *
request_htable_capacity;
if ((result=fast_mblock_init_ex1(&channel_context.channel_allocator,
"channel_info", element_size, 1024, max_channel_id,
"channel-info", element_size, 1024, max_channel_id,
idempotency_channel_alloc_init, NULL, true)) != 0)
{
return result;

View File

@ -37,12 +37,9 @@
#include "server_channel.h"
#include "server_handler.h"
#define SF_TASK_BODY_LENGTH(task) \
(task->length - sizeof(SFCommonProtoHeader))
int sf_server_deal_setup_channel(struct fast_task_info *task,
int *task_type, IdempotencyChannel **channel,
SFResponseInfo *response)
int *task_type, const int server_id, IdempotencyChannel
**channel, SFResponseInfo *response)
{
int result;
SFProtoSetupChannelReq *req;
@ -52,13 +49,13 @@ int sf_server_deal_setup_channel(struct fast_task_info *task,
response->header.cmd = SF_SERVICE_PROTO_SETUP_CHANNEL_RESP;
if ((result=sf_server_expect_body_length(response,
SF_TASK_BODY_LENGTH(task),
SF_RECV_BODY_LENGTH(task),
sizeof(SFProtoSetupChannelReq))) != 0)
{
return result;
}
req = (SFProtoSetupChannelReq *)(task->data + sizeof(SFCommonProtoHeader));
req = (SFProtoSetupChannelReq *)SF_PROTO_RECV_BODY(task);
channel_id = buff2int(req->channel_id);
key = buff2int(req->key);
if (*channel != NULL) {
@ -74,14 +71,13 @@ int sf_server_deal_setup_channel(struct fast_task_info *task,
"alloc channel fail, hint channel id: %d", channel_id);
return ENOMEM;
}
*task_type = SF_SERVER_TASK_TYPE_CHANNEL_HOLDER;
resp = (SFProtoSetupChannelResp *)(task->data +
sizeof(SFCommonProtoHeader));
resp = (SFProtoSetupChannelResp *)SF_PROTO_SEND_BODY(task);
int2buff((*channel)->id, resp->channel_id);
int2buff((*channel)->key, resp->key);
int2buff(task->size, resp->buffer_size);
int2buff(server_id, resp->server_id);
int2buff(task->send.ptr->size, resp->buffer_size);
response->header.body_len = sizeof(SFProtoSetupChannelResp);
return 0;
}
@ -135,19 +131,19 @@ int sf_server_deal_report_req_receipt(struct fast_task_info *task,
SFProtoReportReqReceiptBody *body_part;
SFProtoReportReqReceiptBody *body_end;
response->header.cmd = SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_RESP;
if ((result=check_holder_channel(task_type, channel, response)) != 0) {
return result;
}
body_len = SF_TASK_BODY_LENGTH(task);
body_len = SF_RECV_BODY_LENGTH(task);
if ((result=sf_server_check_min_body_length(response, body_len,
sizeof(SFProtoReportReqReceiptHeader))) != 0)
{
return result;
}
body_header = (SFProtoReportReqReceiptHeader *)
(task->data + sizeof(SFCommonProtoHeader));
body_header = (SFProtoReportReqReceiptHeader *)SF_PROTO_RECV_BODY(task);
count = buff2int(body_header->count);
calc_body_len = sizeof(SFProtoReportReqReceiptHeader) +
sizeof(SFProtoReportReqReceiptBody) * count;
@ -169,12 +165,11 @@ int sf_server_deal_report_req_receipt(struct fast_task_info *task,
}
//logInfo("receipt count: %d, success: %d", count, success);
response->header.cmd = SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_RESP;
return 0;
}
IdempotencyRequest *sf_server_update_prepare_and_check(
struct fast_task_info *task, struct fast_mblock_man *
SFRequestInfo *req, struct fast_mblock_man *
request_allocator, IdempotencyChannel *channel,
SFResponseInfo *response, int *result)
{
@ -188,9 +183,8 @@ IdempotencyRequest *sf_server_update_prepare_and_check(
return NULL;
}
adheader = (SFProtoIdempotencyAdditionalHeader *)
(task->data + sizeof(SFCommonProtoHeader));
request = (IdempotencyRequest *)fast_mblock_alloc_object(request_allocator);
adheader = (SFProtoIdempotencyAdditionalHeader *)req->body;
request = fast_mblock_alloc_object(request_allocator);
if (request == NULL) {
*result = ENOMEM;
return NULL;
@ -221,7 +215,7 @@ int sf_server_deal_rebind_channel(struct fast_task_info *task,
SFProtoRebindChannelReq *req;
if ((result=sf_server_expect_body_length(response,
SF_TASK_BODY_LENGTH(task),
SF_RECV_BODY_LENGTH(task),
sizeof(SFProtoRebindChannelReq))) != 0)
{
return result;
@ -241,7 +235,7 @@ int sf_server_deal_rebind_channel(struct fast_task_info *task,
}
idempotency_channel_release(*channel, false);
req = (SFProtoRebindChannelReq *)(task->data + sizeof(SFCommonProtoHeader));
req = (SFProtoRebindChannelReq *)SF_PROTO_RECV_BODY(task);
channel_id = buff2int(req->channel_id);
key = buff2int(req->key);
*channel = idempotency_channel_find_and_hold(channel_id, key, &result);

View File

@ -25,8 +25,8 @@ extern "C" {
#endif
int sf_server_deal_setup_channel(struct fast_task_info *task,
int *task_type, IdempotencyChannel **channel,
SFResponseInfo *response);
int *task_type, const int server_id, IdempotencyChannel
**channel, SFResponseInfo *response);
int sf_server_deal_close_channel(struct fast_task_info *task,
int *task_type, IdempotencyChannel **channel,
@ -37,7 +37,7 @@ int sf_server_deal_report_req_receipt(struct fast_task_info *task,
SFResponseInfo *response);
IdempotencyRequest *sf_server_update_prepare_and_check(
struct fast_task_info *task, struct fast_mblock_man *
SFRequestInfo *req, struct fast_mblock_man *
request_allocator, IdempotencyChannel *channel,
SFResponseInfo *response, int *result);

View File

@ -19,11 +19,7 @@
#include "fastcommon/fast_mblock.h"
#include "fastcommon/fast_timer.h"
#define SF_IDEMPOTENCY_CHANNEL_ID_BITS 16
#define SF_IDEMPOTENCY_REQUEST_ID_BITS (64 - SF_IDEMPOTENCY_CHANNEL_ID_BITS)
#define SF_IDEMPOTENCY_MAX_CHANNEL_COUNT ((1 << SF_IDEMPOTENCY_CHANNEL_ID_BITS) - 1)
#define SF_IDEMPOTENCY_MAX_CHANNEL_ID SF_IDEMPOTENCY_MAX_CHANNEL_COUNT
#include "sf/idempotency/common/idempotency_types.h"
#define SF_IDEMPOTENCY_DEFAULT_REQUEST_HINT_CAPACITY 1023
#define SF_IDEMPOTENCY_DEFAULT_CHANNEL_RESERVE_INTERVAL 600
@ -32,14 +28,14 @@
typedef struct idempotency_request_result {
short rsize; //response size defined by application
short flags; //for application
int result;
void *response;
volatile int result;
void * volatile response;
} IdempotencyRequestResult;
typedef struct idempotency_request {
uint64_t req_id;
volatile int ref_count;
bool finished;
volatile char finished;
IdempotencyRequestResult output;
struct fast_mblock_man *allocator; //for free
struct idempotency_request *next;
@ -61,6 +57,17 @@ typedef struct idempotency_channel {
struct idempotency_channel *next;
} IdempotencyChannel;
typedef struct sf_request_metadata {
int64_t req_id;
int64_t data_version;
} SFRequestMetadata;
typedef struct sf_request_metadata_array {
SFRequestMetadata *elts;
int count;
int alloc;
} SFRequestMetadataArray;
#ifdef __cplusplus
extern "C" {
#endif

274
src/sf_binlog_index.c Normal file
View File

@ -0,0 +1,274 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "fastcommon/logger.h"
#include "fastcommon/shared_func.h"
#include "fastcommon/fc_memory.h"
#include "sf_binlog_index.h"
#define SF_BINLOG_HEADER_FIELD_COUNT 2
#define SF_BINLOG_HEADER_FIELD_INDEX_RECORD_COUNT 0
#define SF_BINLOG_HEADER_FIELD_INDEX_LAST_VERSION 1
void sf_binlog_index_init(SFBinlogIndexContext *ctx, const char *name,
const char *filename, const int record_max_size,
const int array_elt_size, pack_record_func pack_record,
unpack_record_func unpack_record)
{
memset(ctx, 0, sizeof(SFBinlogIndexContext));
ctx->name = name;
ctx->filename = fc_strdup(filename);
ctx->record_max_size = record_max_size;
ctx->array_elt_size = array_elt_size;
ctx->pack_record = pack_record;
ctx->unpack_record = unpack_record;
}
static int parse_header(const string_t *line, int *record_count,
int64_t *last_version, char *error_info)
{
int count;
char *endptr;
string_t cols[SF_BINLOG_HEADER_FIELD_COUNT];
count = split_string_ex(line, ' ', cols,
SF_BINLOG_HEADER_FIELD_COUNT, false);
if (count != SF_BINLOG_HEADER_FIELD_COUNT) {
sprintf(error_info, "field count: %d != %d",
count, SF_BINLOG_HEADER_FIELD_COUNT);
return EINVAL;
}
SF_BINLOG_PARSE_INT_SILENCE(*record_count, "record count",
SF_BINLOG_HEADER_FIELD_INDEX_RECORD_COUNT, ' ', 0);
SF_BINLOG_PARSE_INT_SILENCE(*last_version, "last version",
SF_BINLOG_HEADER_FIELD_INDEX_LAST_VERSION, '\n', 0);
return 0;
}
static int parse(SFBinlogIndexContext *ctx, const string_t *lines,
const int row_count)
{
int result;
int record_count;
char error_info[256];
const string_t *line;
const string_t *end;
void *bindex;
if ((result=parse_header(lines, &record_count, &ctx->
last_version, error_info)) != 0)
{
logError("file: "__FILE__", line: %d, "
"%s index file: %s, parse header fail, error info: %s",
__LINE__, ctx->name, ctx->filename, error_info);
return result;
}
if (row_count != record_count + 1) {
logError("file: "__FILE__", line: %d, "
"%s index file: %s, line count: %d != record count: "
"%d + 1", __LINE__, ctx->name, ctx->filename,
row_count, record_count + 1);
return EINVAL;
}
ctx->index_array.alloc = 64;
while (ctx->index_array.alloc < record_count) {
ctx->index_array.alloc *= 2;
}
ctx->index_array.indexes = fc_malloc(ctx->array_elt_size *
ctx->index_array.alloc);
if (ctx->index_array.indexes == NULL) {
return ENOMEM;
}
end = lines + row_count;
bindex = ctx->index_array.indexes;
for (line=lines+1; line<end; line++) {
if ((result=ctx->unpack_record(line, bindex, error_info)) != 0) {
logError("file: "__FILE__", line: %d, "
"%s index file: %s, parse line #%d fail, error "
"info: %s", __LINE__, ctx->name, ctx->filename,
(int)(line - lines) + 1, error_info);
return result;
}
bindex = (char *)bindex + ctx->array_elt_size;
}
ctx->index_array.count = row_count - 1;
return 0;
}
static int load(SFBinlogIndexContext *ctx)
{
int result;
int row_count;
int64_t file_size;
string_t context;
string_t *lines;
if ((result=getFileContent(ctx->filename, &context.str,
&file_size)) != 0)
{
return result;
}
context.len = file_size;
row_count = getOccurCount(context.str, '\n');
lines = (string_t *)fc_malloc(sizeof(string_t) * row_count);
if (lines == NULL) {
free(context.str);
return ENOMEM;
}
row_count = split_string_ex(&context, '\n', lines, row_count, true);
if (row_count > 0) {
result = parse(ctx, lines, row_count);
}
free(lines);
free(context.str);
return result;
}
int sf_binlog_index_load(SFBinlogIndexContext *ctx)
{
int result;
if (access(ctx->filename, F_OK) == 0) {
return load(ctx);
} else if (errno == ENOENT) {
return 0;
} else {
result = errno != 0 ? errno : EPERM;
logError("file: "__FILE__", line: %d, "
"access file %s fail, "
"errno: %d, error info: %s", __LINE__,
ctx->filename, result, STRERROR(result));
return result;
}
}
static int save(SFBinlogIndexContext *ctx, const char *filename)
{
char buff[16 * 1024];
char *bend;
void *index;
char *p;
int fd;
int len;
int i;
int result;
if ((fd=open(filename, O_WRONLY | O_CREAT | O_TRUNC |
O_CLOEXEC, 0644)) < 0)
{
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"open file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
return result;
}
result = 0;
p = buff;
bend = buff + sizeof(buff);
p += fc_itoa(ctx->index_array.count, p);
*p++ = ' ';
p += fc_itoa(ctx->last_version, p);
*p++ = '\n';
index = ctx->index_array.indexes;
for (i=0; i<ctx->index_array.count; i++) {
if (bend - p < ctx->record_max_size) {
len = p - buff;
if (fc_safe_write(fd, buff, len) != len) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"write file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
break;
}
p = buff;
}
p += ctx->pack_record(p, index);
index = (char *)index + ctx->array_elt_size;
}
if (result == 0) {
len = p - buff;
if (len > 0 && fc_safe_write(fd, buff, len) != len) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"write file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
}
}
close(fd);
return result;
}
int sf_binlog_index_save(SFBinlogIndexContext *ctx)
{
int result;
char tmp_filename[PATH_MAX];
fc_combine_two_strings(ctx->filename, "tmp", '.', tmp_filename);
if ((result=save(ctx, tmp_filename)) != 0) {
return result;
}
if (rename(tmp_filename, ctx->filename) != 0) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"rename file \"%s\" to \"%s\" fail, "
"errno: %d, error info: %s",
__LINE__, tmp_filename, ctx->filename,
result, STRERROR(result));
return result;
}
return 0;
}
int sf_binlog_index_expand_array(SFBinlogIndexArray *array,
const int elt_size)
{
int alloc;
void *indexes;
if (array->alloc == 0) {
alloc = 1024;
} else {
alloc = array->alloc * 2;
}
indexes = fc_malloc(elt_size * alloc);
if (indexes == NULL) {
return ENOMEM;
}
if (array->count > 0) {
memcpy(indexes, array->indexes, elt_size * array->count);
free(array->indexes);
}
array->indexes = indexes;
array->alloc = alloc;
return 0;
}

100
src/sf_binlog_index.h Normal file
View File

@ -0,0 +1,100 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//sf_binlog_index.h
#ifndef _SF_BINLOG_INDEX_H_
#define _SF_BINLOG_INDEX_H_
#include "fastcommon/common_define.h"
#define SF_BINLOG_PARSE_INT_SILENCE(var, caption, index, endchr, min_val) \
do { \
var = strtoll(cols[index].str, &endptr, 10); \
if (*endptr != endchr || var < min_val) { \
sprintf(error_info, "invalid %s: %.*s", \
caption, cols[index].len, cols[index].str); \
return EINVAL; \
} \
} while (0)
#define SF_BINLOG_PARSE_INT_SILENCE2(var, caption, index, echr1, echr2, min_val) \
do { \
var = strtoll(cols[index].str, &endptr, 10); \
if (!(*endptr == echr1 || *endptr == echr2) || (var < min_val)) { \
sprintf(error_info, "invalid %s: %.*s", \
caption, cols[index].len, cols[index].str); \
return EINVAL; \
} \
} while (0)
typedef int (*pack_record_func)(char *buff, void *record);
typedef int (*unpack_record_func)(const string_t *line,
void *record, char *error_info);
typedef struct sf_binlog_index_array {
void *indexes;
int alloc;
int count;
} SFBinlogIndexArray;
typedef struct sf_binlog_index_context {
const char *name;
char *filename;
int record_max_size;
int array_elt_size;
pack_record_func pack_record;
unpack_record_func unpack_record;
SFBinlogIndexArray index_array;
int64_t last_version;
} SFBinlogIndexContext;
#ifdef __cplusplus
extern "C" {
#endif
void sf_binlog_index_init(SFBinlogIndexContext *ctx, const char *name,
const char *filename, const int record_max_size,
const int array_elt_size, pack_record_func pack_record,
unpack_record_func unpack_record);
int sf_binlog_index_load(SFBinlogIndexContext *ctx);
int sf_binlog_index_save(SFBinlogIndexContext *ctx);
int sf_binlog_index_expand_array(SFBinlogIndexArray *array,
const int elt_size);
static inline int sf_binlog_index_expand(SFBinlogIndexContext *ctx)
{
return sf_binlog_index_expand_array(&ctx->
index_array, ctx->array_elt_size);
}
static inline void sf_binlog_index_free(SFBinlogIndexContext *ctx)
{
if (ctx->index_array.indexes != NULL) {
free(ctx->index_array.indexes);
ctx->index_array.indexes = NULL;
ctx->index_array.alloc = ctx->index_array.count = 0;
}
}
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -19,24 +19,25 @@
#define _SF_BINLOG_WRITER_H_
#include "fastcommon/fc_queue.h"
#include "fastcommon/fc_atomic.h"
#include "sf_types.h"
#include "sf_file_writer.h"
#define SF_BINLOG_THREAD_ORDER_MODE_FIXED 0
#define SF_BINLOG_THREAD_ORDER_MODE_VARY 1
#define SF_BINLOG_THREAD_TYPE_ORDER_BY_NONE 0
#define SF_BINLOG_THREAD_TYPE_ORDER_BY_VERSION 1
#define SF_BINLOG_WRITER_TYPE_ORDER_BY_NONE 0
#define SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION 1
#define SF_BINLOG_BUFFER_TYPE_WRITE_TO_FILE 0 //default type, must be 0
#define SF_BINLOG_BUFFER_TYPE_SET_NEXT_VERSION 1
#define SF_BINLOG_SUBDIR_NAME_SIZE 128
#define SF_BINLOG_FILE_MAX_SIZE (1024 * 1024 * 1024) //for binlog rotating by size
#define SF_BINLOG_FILE_PREFIX "binlog"
#define SF_BINLOG_FILE_EXT_FMT ".%06d"
#define SF_BINLOG_BUFFER_LENGTH(buffer) ((buffer).end - (buffer).buff)
#define SF_BINLOG_BUFFER_REMAIN(buffer) ((buffer).end - (buffer).current)
#define SF_BINLOG_BUFFER_TYPE_WRITE_TO_FILE 0 //default type, must be 0
#define SF_BINLOG_BUFFER_TYPE_SET_NEXT_VERSION 1
#define SF_BINLOG_BUFFER_TYPE_CHANGE_ORDER_TYPE 2
#define SF_BINLOG_BUFFER_TYPE_CHANGE_PASSIVE_WRITE 3
#define SF_BINLOG_BUFFER_TYPE_CHANGE_CALL_FSYNC 4
#define SF_BINLOG_BUFFER_TYPE_SET_WRITE_INDEX 5
#define SF_BINLOG_BUFFER_TYPE_ROTATE_FILE 6
#define SF_BINLOG_BUFFER_TYPE_NOTIFY_EXIT 7
#define SF_BINLOG_BUFFER_TYPE_FLUSH_FILE 8
#define SF_BINLOG_BUFFER_SET_VERSION(buffer, ver) \
(buffer)->version.first = (buffer)->version.last = ver
@ -46,8 +47,8 @@ struct sf_binlog_writer_info;
typedef struct sf_binlog_writer_buffer {
SFVersionRange version;
BufferInfo bf;
int64_t tag;
int type; //for versioned writer
int type;
uint32_t timestamp; //for flow ctrol
struct sf_binlog_writer_info *writer;
struct sf_binlog_writer_buffer *next;
} SFBinlogWriterBuffer;
@ -66,10 +67,18 @@ typedef struct sf_binlog_writer_buffer_ring {
typedef struct binlog_writer_thread {
struct fast_mblock_man mblock;
struct fc_queue queue;
bool running;
char name[64];
volatile bool running;
bool use_fixed_buffer_size;
short order_mode;
short order_by;
bool passive_write;
char order_mode;
int write_interval_ms;
struct {
int max_delay; //in seconds
volatile uint32_t last_timestamp;
int waiting_count;
pthread_lock_cond_pair_t lcp;
} flow_ctrol;
struct {
struct sf_binlog_writer_info *head;
struct sf_binlog_writer_info *tail;
@ -77,29 +86,16 @@ typedef struct binlog_writer_thread {
} SFBinlogWriterThread;
typedef struct sf_binlog_writer_info {
struct {
char subdir_name[SF_BINLOG_SUBDIR_NAME_SIZE];
int max_record_size;
} cfg;
SFFileWriterInfo fw;
struct {
int index;
int compress_index;
} binlog;
struct {
int fd;
int64_t size;
char *name;
} file;
int64_t total_count;
struct {
SFBinlogWriterBufferRing ring;
int64_t next;
volatile int64_t next;
int64_t change_count; //version change count
} version_ctx;
SFBinlogBuffer binlog_buffer;
SFBinlogWriterThread *thread;
short order_by;
struct {
bool in_queue;
struct sf_binlog_writer_info *next;
@ -115,58 +111,187 @@ typedef struct sf_binlog_writer_context {
extern "C" {
#endif
extern char *g_sf_binlog_data_path;
int sf_binlog_writer_init_normal_ex(SFBinlogWriterInfo *writer,
const char *data_path, const char *subdir_name,
const char *file_prefix, const int max_record_size,
const int buffer_size, const int64_t file_rotate_size,
const bool call_fsync);
int sf_binlog_writer_init_normal(SFBinlogWriterInfo *writer,
const char *subdir_name, const int buffer_size);
int sf_binlog_writer_init_by_version(SFBinlogWriterInfo *writer,
const char *subdir_name, const uint64_t next_version,
const int buffer_size, const int ring_size);
int sf_binlog_writer_init_by_version_ex(SFBinlogWriterInfo *writer,
const char *data_path, const char *subdir_name,
const char *file_prefix, const int max_record_size,
const uint64_t next_version, const int buffer_size,
const int ring_size, const int64_t file_rotate_size,
const bool call_fsync);
int sf_binlog_writer_init_thread_ex(SFBinlogWriterThread *thread,
SFBinlogWriterInfo *writer, const short order_mode,
const short order_by, const int max_record_size,
const int writer_count, const bool use_fixed_buffer_size);
const char *name, SFBinlogWriterInfo *writer, const short order_mode,
const int write_interval_ms, const int max_delay,
const int max_record_size, const bool use_fixed_buffer_size,
const bool passive_write);
#define sf_binlog_writer_init_thread(thread, \
writer, order_by, max_record_size) \
sf_binlog_writer_init_thread_ex(thread, writer, \
SF_BINLOG_THREAD_ORDER_MODE_FIXED, \
order_by, max_record_size, 1, true)
#define sf_binlog_writer_init_normal(writer, data_path, \
subdir_name, max_record_size, buffer_size) \
sf_binlog_writer_init_normal_ex(writer, data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, max_record_size, buffer_size, \
SF_BINLOG_DEFAULT_ROTATE_SIZE, true)
static inline int sf_binlog_writer_init(SFBinlogWriterContext *context,
const char *subdir_name, const int buffer_size,
const int max_record_size)
#define sf_binlog_writer_init_by_version(writer, data_path, subdir_name, \
max_record_size, next_version, buffer_size, ring_size) \
sf_binlog_writer_init_by_version_ex(writer, data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, max_record_size, next_version, \
buffer_size, ring_size, SF_BINLOG_DEFAULT_ROTATE_SIZE, true)
#define sf_binlog_writer_init_thread(thread, name, \
writer, write_interval_ms, max_delay, max_record_size) \
sf_binlog_writer_init_thread_ex(thread, name, writer, \
SF_BINLOG_THREAD_ORDER_MODE_FIXED, write_interval_ms, \
max_delay, max_record_size, true, false)
static inline int sf_binlog_writer_init_ex(SFBinlogWriterContext *context,
const char *data_path, const char *subdir_name,
const char *file_prefix, const int buffer_size,
const int write_interval_ms, const int max_delay,
const int max_record_size, const bool call_fsync)
{
int result;
if ((result=sf_binlog_writer_init_normal(&context->writer,
subdir_name, buffer_size)) != 0)
if ((result=sf_binlog_writer_init_normal_ex(&context->writer, data_path,
subdir_name, file_prefix, max_record_size, buffer_size,
SF_BINLOG_DEFAULT_ROTATE_SIZE, call_fsync)) != 0)
{
return result;
}
return sf_binlog_writer_init_thread(&context->thread, &context->writer,
SF_BINLOG_THREAD_TYPE_ORDER_BY_NONE, max_record_size);
return sf_binlog_writer_init_thread(&context->thread, subdir_name,
&context->writer, write_interval_ms, max_delay, max_record_size);
}
int sf_binlog_writer_change_order_by(SFBinlogWriterThread *thread,
#define sf_binlog_writer_init(context, data_path, subdir_name, \
buffer_size, write_interval_ms, max_delay, max_record_size) \
sf_binlog_writer_init_ex(context, data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, buffer_size, write_interval_ms, \
max_delay, max_record_size, true)
void sf_binlog_writer_finish(SFBinlogWriterInfo *writer);
static inline void sf_binlog_writer_destroy_writer(
SFBinlogWriterInfo *writer)
{
sf_file_writer_destroy(&writer->fw);
if (writer->version_ctx.ring.slots != NULL) {
free(writer->version_ctx.ring.slots);
writer->version_ctx.ring.slots = NULL;
}
}
static inline void sf_binlog_writer_destroy_thread(
SFBinlogWriterThread *thread)
{
fast_mblock_destroy(&thread->mblock);
fc_queue_destroy(&thread->queue);
}
static inline void sf_binlog_writer_destroy(
SFBinlogWriterContext *context)
{
sf_binlog_writer_finish(&context->writer);
sf_binlog_writer_destroy_writer(&context->writer);
sf_binlog_writer_destroy_thread(&context->thread);
}
int sf_binlog_writer_change_order_by(SFBinlogWriterInfo *writer,
const short order_by);
int sf_binlog_writer_change_passive_write(SFBinlogWriterInfo *writer,
const bool passive_write);
int sf_binlog_writer_change_call_fsync(SFBinlogWriterInfo *writer,
const bool call_fsync);
int sf_binlog_writer_change_next_version(SFBinlogWriterInfo *writer,
const int64_t next_version);
void sf_binlog_writer_finish(SFBinlogWriterInfo *writer);
static inline int64_t sf_binlog_writer_get_next_version(
SFBinlogWriterInfo *writer)
{
return writer->version_ctx.next;
}
int sf_binlog_get_current_write_index(SFBinlogWriterInfo *writer);
static inline int sf_binlog_writer_get_waiting_count(
SFBinlogWriterInfo *writer)
{
return writer->version_ctx.ring.waiting_count;
}
void sf_binlog_get_current_write_position(SFBinlogWriterInfo *writer,
SFBinlogFilePosition *position);
static inline int sf_binlog_writer_get_thread_waiting_count(
SFBinlogWriterThread *thread)
{
int waiting_count;
PTHREAD_MUTEX_LOCK(&thread->flow_ctrol.lcp.lock);
waiting_count = thread->flow_ctrol.waiting_count;
PTHREAD_MUTEX_UNLOCK(&thread->flow_ctrol.lcp.lock);
return waiting_count;
}
int sf_binlog_writer_rotate_file_ex(SFBinlogWriterInfo *writer,
const bool skip_empty_file);
static inline int sf_binlog_writer_rotate_file(SFBinlogWriterInfo *writer)
{
const bool skip_empty_file = false;
return sf_binlog_writer_rotate_file_ex(writer, skip_empty_file);
}
int sf_binlog_writer_flush_file(SFBinlogWriterInfo *writer);
int sf_binlog_writer_change_write_index(SFBinlogWriterInfo *writer,
const int write_index);
int sf_binlog_writer_notify_exit(SFBinlogWriterInfo *writer);
#define sf_binlog_writer_set_flags(writer, flags) \
sf_file_writer_set_flags(&(writer)->fw, flags)
#define sf_binlog_writer_set_write_done_callback(writer, callback, args) \
sf_file_writer_set_write_done_callback(&(writer)->fw, callback, args)
#define sf_binlog_writer_get_last_version_ex(writer, log_level) \
sf_file_writer_get_last_version_ex(&(writer)->fw, log_level)
#define sf_binlog_writer_get_last_version(writer) \
sf_file_writer_get_last_version(&(writer)->fw)
#define sf_binlog_writer_get_last_version_silence(writer) \
sf_file_writer_get_last_version_silence(&(writer)->fw)
#define sf_binlog_get_indexes(writer, start_index, last_index) \
sf_file_writer_get_indexes(&(writer)->fw, start_index, last_index)
#define sf_binlog_get_start_index(writer) \
sf_file_writer_get_start_index(&(writer)->fw)
#define sf_binlog_get_last_index(writer) \
sf_file_writer_get_last_index(&(writer)->fw)
#define sf_binlog_get_current_write_index(writer) \
sf_file_writer_get_current_write_index(&(writer)->fw)
#define sf_binlog_get_current_write_position(writer, position) \
sf_file_writer_get_current_position(&(writer)->fw, position)
static inline SFBinlogWriterBuffer *sf_binlog_writer_alloc_buffer(
SFBinlogWriterThread *thread)
{
return (SFBinlogWriterBuffer *)fast_mblock_alloc_object(&thread->mblock);
SFBinlogWriterBuffer *buffer;
if ((buffer=(SFBinlogWriterBuffer *)fast_mblock_alloc_object(
&thread->mblock)) != NULL)
{
buffer->type = SF_BINLOG_BUFFER_TYPE_WRITE_TO_FILE;
}
return buffer;
}
#define sf_binlog_writer_alloc_one_version_buffer(writer, version) \
@ -183,6 +308,7 @@ static inline SFBinlogWriterBuffer *sf_binlog_writer_alloc_versioned_buffer_ex(
const int64_t last_version, const int type)
{
SFBinlogWriterBuffer *buffer;
buffer = (SFBinlogWriterBuffer *)fast_mblock_alloc_object(
&writer->thread->mblock);
if (buffer != NULL) {
@ -194,38 +320,55 @@ static inline SFBinlogWriterBuffer *sf_binlog_writer_alloc_versioned_buffer_ex(
return buffer;
}
static inline const char *sf_binlog_writer_get_filepath(const char *subdir_name,
char *filename, const int size)
{
snprintf(filename, size, "%s/%s", g_sf_binlog_data_path, subdir_name);
return filename;
}
void sf_push_to_binlog_write_queue(SFBinlogWriterInfo *writer,
SFBinlogWriterBuffer *buffer);
static inline const char *sf_binlog_writer_get_filename(const char *subdir_name,
const int binlog_index, char *filename, const int size)
{
snprintf(filename, size, "%s/%s/%s"SF_BINLOG_FILE_EXT_FMT,
g_sf_binlog_data_path, subdir_name,
SF_BINLOG_FILE_PREFIX, binlog_index);
return filename;
}
#define sf_binlog_writer_get_filepath(data_path, subdir_name, filepath, size) \
sf_file_writer_get_filepath(data_path, subdir_name, filepath, size)
int sf_binlog_writer_set_binlog_index(SFBinlogWriterInfo *writer,
const int binlog_index);
#define sf_binlog_writer_get_filename_ex(data_path, subdir_name, \
file_prefix, binlog_index, filename, size) \
sf_file_writer_get_filename_ex(data_path, subdir_name, \
file_prefix, binlog_index, filename, size)
#define sf_push_to_binlog_thread_queue(thread, buffer) \
fc_queue_push(&(thread)->queue, buffer)
#define sf_binlog_writer_get_filename(data_path, \
subdir_name, binlog_index, filename, size) \
sf_file_writer_get_filename(data_path, subdir_name, \
binlog_index, filename, size)
static inline void sf_push_to_binlog_write_queue(SFBinlogWriterInfo *writer,
SFBinlogWriterBuffer *buffer)
{
buffer->type = SF_BINLOG_BUFFER_TYPE_WRITE_TO_FILE;
fc_queue_push(&writer->thread->queue, buffer);
}
#define sf_binlog_writer_get_index_filename(data_path, \
subdir_name, filename, size) \
sf_file_writer_get_index_filename(data_path, \
subdir_name, filename, size)
int sf_binlog_writer_get_last_lines(const char *subdir_name,
const int current_write_index, char *buff,
const int buff_size, int *count, int *length);
#define sf_binlog_writer_get_binlog_indexes(data_path, \
subdir_name, start_index, last_index) \
sf_file_writer_get_binlog_indexes(data_path, \
subdir_name, start_index, last_index)
#define sf_binlog_writer_get_binlog_start_index(data_path, \
subdir_name, start_index) \
sf_file_writer_get_binlog_start_index(data_path, \
subdir_name, start_index)
#define sf_binlog_writer_get_binlog_last_index(data_path, \
subdir_name, last_index) \
sf_file_writer_get_binlog_last_index(data_path, \
subdir_name, last_index)
#define sf_binlog_set_indexes(writer, start_index, last_index) \
sf_file_writer_set_indexes(&(writer)->fw, start_index, last_index)
#define sf_binlog_writer_set_binlog_start_index(writer, start_index) \
sf_file_writer_set_binlog_start_index(&(writer)->fw, start_index)
#define sf_binlog_writer_set_binlog_write_index(writer, last_index) \
sf_file_writer_set_binlog_write_index(&(writer)->fw, last_index)
#define sf_binlog_writer_get_last_lines(data_path, subdir_name, \
current_write_index, buff, buff_size, count, length) \
sf_file_writer_get_last_lines(data_path, subdir_name, \
current_write_index, buff, buff_size, count, length)
#ifdef __cplusplus
}

106
src/sf_buffered_writer.h Normal file
View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//sf_buffered_writer.h
#ifndef _SF_BUFFERED_WRITER_H_
#define _SF_BUFFERED_WRITER_H_
#include "sf_types.h"
#include "sf_func.h"
typedef struct {
int fd;
const char *filename;
SFBinlogBuffer buffer;
} SFBufferedWriter;
#define sf_buffered_writer_init(writer, filename) \
sf_buffered_writer_init_ex(writer, filename, 1024 * 1024)
#define SF_BUFFERED_WRITER_LENGTH(bw) \
SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH((bw).buffer)
#define SF_BUFFERED_WRITER_REMAIN(bw) \
SF_BINLOG_BUFFER_PRODUCER_BUFF_REMAIN((bw).buffer)
#ifdef __cplusplus
extern "C" {
#endif
static inline int sf_buffered_writer_init_ex(SFBufferedWriter *writer,
const char *filename, const int buffer_size)
{
int result;
writer->filename = filename;
writer->fd = open(filename, O_WRONLY | O_CREAT |
O_TRUNC | O_CLOEXEC, 0644);
if (writer->fd < 0) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"open file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
return result;
}
if ((result=sf_binlog_buffer_init(&writer->buffer, buffer_size)) != 0) {
return result;
}
return 0;
}
static inline int sf_buffered_writer_save(SFBufferedWriter *writer)
{
int result;
int length;
length = writer->buffer.data_end - writer->buffer.buff;
if (fc_safe_write(writer->fd, writer->buffer.buff, length) != length) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"write to file %s fail, errno: %d, error info: %s",
__LINE__, writer->filename, result, STRERROR(result));
return result;
}
writer->buffer.data_end = writer->buffer.buff;
return 0;
}
static inline int sf_buffered_writer_destroy(SFBufferedWriter *writer)
{
int result;
if (writer->fd >= 0) {
if (fsync(writer->fd) != 0) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"fsync to file %s fail, errno: %d, error info: %s",
__LINE__, writer->filename, result, STRERROR(result));
return result;
}
close(writer->fd);
writer->fd = -1;
}
sf_binlog_buffer_destroy(&writer->buffer);
return 0;
}
#ifdef __cplusplus
}
#endif
#endif

139
src/sf_cluster_cfg.c Normal file
View File

@ -0,0 +1,139 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <limits.h>
#include "fastcommon/fast_buffer.h"
#include "fastcommon/md5.h"
#include "sf_cluster_cfg.h"
static int calc_cluster_config_sign(SFClusterConfig *cluster)
{
FastBuffer buffer;
int result;
if ((result=fast_buffer_init1(&buffer, 1024)) != 0) {
return result;
}
fc_server_to_config_string(&cluster->server_cfg, &buffer);
my_md5_buffer(buffer.data, buffer.length, cluster->md5_digest);
/*
{
char hex_buff[2 * sizeof(cluster->md5_digest) + 1];
logInfo("cluster config length: %d, sign: %s", buffer.length,
bin2hex((const char *)cluster->md5_digest,
sizeof(cluster->md5_digest), hex_buff));
}
*/
fast_buffer_destroy(&buffer);
return 0;
}
static int find_group_indexes_in_cluster_config(SFClusterConfig *cluster,
const char *filename)
{
cluster->cluster_group_index = fc_server_get_group_index(
&cluster->server_cfg, "cluster");
if (cluster->cluster_group_index < 0) {
logError("file: "__FILE__", line: %d, "
"cluster config file: %s, cluster group "
"not configurated", __LINE__, filename);
return ENOENT;
}
cluster->service_group_index = fc_server_get_group_index(
&cluster->server_cfg, "service");
if (cluster->service_group_index < 0) {
logError("file: "__FILE__", line: %d, "
"cluster config file: %s, service group "
"not configurated", __LINE__, filename);
return ENOENT;
}
return 0;
}
static int load_server_cfg(SFClusterConfig *cluster,
const char *cluster_filename, const int default_port,
const bool share_between_groups)
{
IniContext ini_context;
const int min_hosts_each_group = 1;
int result;
if ((result=iniLoadFromFile(cluster_filename, &ini_context)) != 0) {
logError("file: "__FILE__", line: %d, "
"load conf file \"%s\" fail, ret code: %d",
__LINE__, cluster_filename, result);
return result;
}
result = fc_server_load_from_ini_context_ex(&cluster->server_cfg,
&ini_context, cluster_filename, default_port,
min_hosts_each_group, share_between_groups);
iniFreeContext(&ini_context);
return result;
}
int sf_load_cluster_config_by_file(SFClusterConfig *cluster,
const char *full_cluster_filename, const int default_port,
const bool share_between_groups, const bool calc_sign)
{
int result;
if ((result=load_server_cfg(cluster, full_cluster_filename,
default_port, share_between_groups)) != 0)
{
return result;
}
if ((result=find_group_indexes_in_cluster_config(cluster,
full_cluster_filename)) != 0)
{
return result;
}
if (calc_sign) {
if ((result=calc_cluster_config_sign(cluster)) != 0) {
return result;
}
}
return 0;
}
int sf_load_cluster_config_ex1(SFClusterConfig *cluster,
IniFullContext *ini_ctx, const char *cluster_config_item_name,
const int default_port, char *full_cluster_filename, const int size)
{
const bool share_between_groups = true;
char *cluster_config_filename;
cluster_config_filename = iniGetStrValue(ini_ctx->section_name,
cluster_config_item_name, ini_ctx->context);
if (cluster_config_filename == NULL || *cluster_config_filename == '\0') {
logError("file: "__FILE__", line: %d, "
"config file: %s, item \"%s\" not exist or empty",
__LINE__, cluster_config_item_name, ini_ctx->filename);
return ENOENT;
}
resolve_path(ini_ctx->filename, cluster_config_filename,
full_cluster_filename, size);
return sf_load_cluster_config_by_file(cluster, full_cluster_filename,
default_port, share_between_groups, true);
}

66
src/sf_cluster_cfg.h Normal file
View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _SF_CLUSTER_CFG_H
#define _SF_CLUSTER_CFG_H
#include "sf_types.h"
#ifdef __cplusplus
extern "C" {
#endif
int sf_load_cluster_config_ex1(SFClusterConfig *cluster,
IniFullContext *ini_ctx, const char *cluster_config_item_name,
const int default_port, char *full_cluster_filename, const int size);
static inline int sf_load_cluster_config_ex(SFClusterConfig *cluster,
IniFullContext *ini_ctx, const int default_port,
char *full_cluster_filename, const int size)
{
const char *cluster_config_item_name = "cluster_config_filename";
return sf_load_cluster_config_ex1(cluster, ini_ctx,
cluster_config_item_name, default_port,
full_cluster_filename, PATH_MAX);
}
static inline int sf_load_cluster_config1(SFClusterConfig *cluster,
IniFullContext *ini_ctx, const char *cluster_config_item_name,
const int default_port)
{
char full_cluster_filename[PATH_MAX];
return sf_load_cluster_config_ex1(cluster, ini_ctx,
cluster_config_item_name, default_port,
full_cluster_filename, PATH_MAX);
}
static inline int sf_load_cluster_config(SFClusterConfig *cluster,
IniFullContext *ini_ctx, const int default_port)
{
char full_cluster_filename[PATH_MAX];
return sf_load_cluster_config_ex(cluster, ini_ctx, default_port,
full_cluster_filename, PATH_MAX);
}
int sf_load_cluster_config_by_file(SFClusterConfig *cluster,
const char *full_cluster_filename, const int default_port,
const bool share_between_groups, const bool calc_sign);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -25,10 +25,10 @@
#include "fastcommon/logger.h"
#include "sf_configs.h"
#define DEFAULT_RETRY_MAX_INTERVAL_MS 5000
#define DEFAULT_CONNECT_RETRY_TIMES 10
#define DEFAULT_RETRY_MAX_INTERVAL_MS 3000
#define DEFAULT_CONNECT_RETRY_TIMES 200
#define DEFAULT_CONNECT_RETRY_INTERVAL_MS 100
#define DEFAULT_NETWORK_RETRY_TIMES 10
#define DEFAULT_NETWORK_RETRY_TIMES 200
#define DEFAULT_NETWORK_RETRY_INTERVAL_MS 100
int sf_load_net_retry_config(SFNetRetryConfig *net_retry_cfg,
@ -94,13 +94,14 @@ void sf_net_retry_config_to_string(SFNetRetryConfig *net_retry_cfg,
net_retry_cfg->network.interval_ms);
}
void sf_load_read_rule_config_ex(SFDataReadRule *rule,
int sf_load_read_rule_config_ex(SFDataReadRule *rule,
IniFullContext *ini_ctx, const SFDataReadRule def_rule)
{
char *read_rule;
read_rule = iniGetStrValueEx(ini_ctx->section_name,
"read_rule", ini_ctx->context, true);
if (read_rule == NULL || *read_rule == '\0') {
if (read_rule == NULL) {
*rule = def_rule;
} else if (strncasecmp(read_rule, "any", 3) == 0) {
*rule = sf_data_read_rule_any_available;
@ -110,8 +111,62 @@ void sf_load_read_rule_config_ex(SFDataReadRule *rule,
*rule = sf_data_read_rule_master_only;
} else {
logError("file: "__FILE__", line: %d, "
"config file: %s, unkown read_rule: %s, set to any",
"config file: %s, unkown read_rule: %s",
__LINE__, ini_ctx->filename, read_rule);
*rule = sf_data_read_rule_any_available;
return EINVAL;
}
return 0;
}
int sf_load_election_quorum_config_ex(SFElectionQuorum *quorum,
IniFullContext *ini_ctx, const SFElectionQuorum def_quorum)
{
char *str;
str = iniGetStrValue(ini_ctx->section_name,
"quorum", ini_ctx->context);
if (str == NULL) {
*quorum = def_quorum;
} else if (strncasecmp(str, "auto", 4) == 0) {
*quorum = sf_election_quorum_auto;
} else if (strncasecmp(str, "any", 3) == 0) {
*quorum = sf_election_quorum_any;
} else if (strncasecmp(str, "majority", 8) == 0) {
*quorum = sf_election_quorum_majority;
} else {
logError("file: "__FILE__", line: %d, "
"config file: %s, unkown quorum: %s",
__LINE__, ini_ctx->filename, str);
return EINVAL;
}
return 0;
}
int sf_load_replication_quorum_config_ex(SFReplicationQuorum *quorum,
IniFullContext *ini_ctx, const SFReplicationQuorum def_quorum)
{
char *str;
str = iniGetStrValue(ini_ctx->section_name,
"quorum", ini_ctx->context);
if (str == NULL) {
*quorum = def_quorum;
} else if (strncasecmp(str, "auto", 4) == 0) {
*quorum = sf_replication_quorum_auto;
} else if (strncasecmp(str, "any", 3) == 0) {
*quorum = sf_replication_quorum_any;
} else if (strncasecmp(str, "majority", 8) == 0) {
*quorum = sf_replication_quorum_majority;
} else if (strncasecmp(str, "smart", 5) == 0) {
*quorum = sf_replication_quorum_smart;
} else {
logError("file: "__FILE__", line: %d, "
"config file: %s, unkown quorum: %s",
__LINE__, ini_ctx->filename, str);
return EINVAL;
}
return 0;
}

View File

@ -23,39 +23,12 @@
#include "sf_define.h"
#include "sf_types.h"
typedef enum sf_net_retry_interval_mode {
sf_net_retry_interval_mode_fixed,
sf_net_retry_interval_mode_multiple
} SFNetRetryIntervalMode;
typedef struct sf_net_retry_interval_mode_max_pair {
SFNetRetryIntervalMode mode;
int max_interval_ms;
} SFNetRetryIntervalModeMaxPair;
typedef struct sf_net_retry_times_interval_pair {
int times;
int interval_ms;
} SFNetRetryTimesIntervalPair;
typedef struct sf_net_retry_config {
SFNetRetryIntervalModeMaxPair interval_mm;
SFNetRetryTimesIntervalPair connect;
SFNetRetryTimesIntervalPair network;
} SFNetRetryConfig;
typedef struct sf_net_retry_interval_context {
SFNetRetryIntervalModeMaxPair *mm;
SFNetRetryTimesIntervalPair *ti;
const SFNetRetryIntervalModeMaxPair *mm;
const SFNetRetryTimesIntervalPair *ti;
int interval_ms;
} SFNetRetryIntervalContext;
typedef enum sf_data_read_rule {
sf_data_read_rule_any_available,
sf_data_read_rule_slave_first,
sf_data_read_rule_master_only,
} SFDataReadRule;
#ifdef __cplusplus
extern "C" {
#endif
@ -72,8 +45,8 @@ static inline void sf_reset_net_retry_interval(SFNetRetryIntervalContext *ctx)
}
static inline void sf_init_net_retry_interval_context(
SFNetRetryIntervalContext *ctx, SFNetRetryIntervalModeMaxPair *mm,
SFNetRetryTimesIntervalPair *ti)
SFNetRetryIntervalContext *ctx, const SFNetRetryIntervalModeMaxPair *mm,
const SFNetRetryTimesIntervalPair *ti)
{
ctx->mm = mm;
ctx->ti = ti;
@ -94,7 +67,7 @@ static inline int sf_calc_next_retry_interval(SFNetRetryIntervalContext *ctx)
return ctx->interval_ms;
}
void sf_load_read_rule_config_ex(SFDataReadRule *rule,
int sf_load_read_rule_config_ex(SFDataReadRule *rule,
IniFullContext *ini_ctx, const SFDataReadRule def_rule);
static inline const char *sf_get_read_rule_caption(
@ -112,9 +85,113 @@ static inline const char *sf_get_read_rule_caption(
}
}
int sf_load_election_quorum_config_ex(SFElectionQuorum *quorum,
IniFullContext *ini_ctx, const SFElectionQuorum def_quorum);
static inline const char *sf_get_election_quorum_caption(
const SFElectionQuorum quorum)
{
switch (quorum) {
case sf_election_quorum_auto:
return "auto";
case sf_election_quorum_any:
return "any";
case sf_election_quorum_majority:
return "majority";
default:
return "unknown";
}
}
static inline bool sf_election_quorum_check(const SFElectionQuorum quorum,
const bool vote_node_enabled, const int total_count,
const int active_count)
{
switch (quorum) {
case sf_election_quorum_any:
return active_count > 0;
case sf_election_quorum_auto:
if (total_count % 2 == 0 && !vote_node_enabled) {
return active_count > 0; //same as sf_election_quorum_any
}
//continue
case sf_election_quorum_majority:
if (active_count == total_count) {
return true;
} else {
return active_count > total_count / 2;
}
}
}
int sf_load_replication_quorum_config_ex(SFReplicationQuorum *quorum,
IniFullContext *ini_ctx, const SFReplicationQuorum def_quorum);
static inline const char *sf_get_replication_quorum_caption(
const SFReplicationQuorum quorum)
{
switch (quorum) {
case sf_replication_quorum_auto:
return "auto";
case sf_replication_quorum_any:
return "any";
case sf_replication_quorum_majority:
return "majority";
case sf_replication_quorum_smart:
return "smart";
default:
return "unknown";
}
}
#define SF_REPLICATION_QUORUM_MAJORITY(server_count, success_count) \
((success_count == server_count) || (success_count > server_count / 2))
static inline bool sf_replication_quorum_check(const SFReplicationQuorum
quorum, const int server_count, const int success_count)
{
switch (quorum) {
case sf_replication_quorum_any:
return true;
case sf_replication_quorum_auto:
if (server_count % 2 == 0) {
return true; //same as sf_replication_quorum_any
}
//continue
case sf_replication_quorum_smart:
case sf_replication_quorum_majority:
return SF_REPLICATION_QUORUM_MAJORITY(
server_count, success_count);
}
}
#define sf_load_read_rule_config(rule, ini_ctx) \
sf_load_read_rule_config_ex(rule, ini_ctx, sf_data_read_rule_master_only)
#define sf_load_election_quorum_config(quorum, ini_ctx) \
sf_load_election_quorum_config_ex(quorum, ini_ctx, sf_election_quorum_auto)
#define sf_load_replication_quorum_config(quorum, ini_ctx) \
sf_load_replication_quorum_config_ex(quorum, ini_ctx, \
sf_replication_quorum_auto)
#define SF_ELECTION_QUORUM_NEED_REQUEST_VOTE_NODE(quorum, \
vote_node_enabled, server_count, active_count) \
(active_count < server_count && vote_node_enabled && \
quorum != sf_election_quorum_any && server_count % 2 == 0)
#define SF_ELECTION_QUORUM_NEED_CHECK_VOTE_NODE(quorum, \
vote_node_enabled, server_count) \
(vote_node_enabled && quorum != sf_election_quorum_any \
&& server_count % 2 == 0)
#define SF_REPLICATION_QUORUM_NEED_MAJORITY(quorum, server_count) \
(server_count > 1 && (quorum != sf_replication_quorum_any))
#define SF_REPLICATION_QUORUM_NEED_DETECT(quorum, server_count) \
(server_count % 2 == 0 && (quorum == sf_replication_quorum_smart || \
quorum == sf_replication_quorum_auto))
#define SF_NET_RETRY_FINISHED(retry_times, counter, result) \
!((SF_IS_RETRIABLE_ERROR(result) && ((retry_times > 0 && \
counter <= retry_times) || (retry_times < 0))))

971
src/sf_connection_manager.c Normal file
View File

@ -0,0 +1,971 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include "sf_global.h"
#include "sf_configs.h"
#include "sf_proto.h"
#include "fastcommon/shared_func.h"
#include "fastcommon/logger.h"
#include "fastcommon/fc_atomic.h"
#include "sf_connection_manager.h"
static int get_group_servers(SFConnectionManager *cm,
SFCMConnGroupEntry *group);
static ConnectionInfo *get_spec_connection(SFConnectionManager *cm,
const ConnectionInfo *target, const bool shared, int *err_no)
{
return conn_pool_get_connection_ex(&cm->cpool,
target, cm->module_name, shared, err_no);
}
static ConnectionInfo *make_connection(SFConnectionManager *cm,
FCAddressPtrArray *addr_array, const bool shared, int *err_no)
{
FCAddressInfo **current;
FCAddressInfo **addr;
FCAddressInfo **end;
ConnectionInfo *conn;
if (addr_array->count <= 0) {
*err_no = ENOENT;
return NULL;
}
current = addr_array->addrs + addr_array->index;
if ((conn=get_spec_connection(cm, &(*current)->conn,
shared, err_no)) != NULL)
{
return conn;
}
if (addr_array->count == 1) {
return NULL;
}
end = addr_array->addrs + addr_array->count;
for (addr=addr_array->addrs; addr<end; addr++) {
if (addr == current) {
continue;
}
if ((conn=get_spec_connection(cm, &(*addr)->conn,
shared, err_no)) != NULL)
{
addr_array->index = addr - addr_array->addrs;
return conn;
}
}
return NULL;
}
static ConnectionInfo *get_server_connection(SFConnectionManager *cm,
FCServerInfo *server, const bool shared, int *err_no)
{
FCAddressPtrArray *addr_array;
ConnectionInfo *conn;
addr_array = &server->group_addrs[cm->server_group_index].address_array;
if ((conn=make_connection(cm, addr_array, shared, err_no)) == NULL) {
logError("file: "__FILE__", line: %d, "
"%s server id: %d, address count: %d, get_server_connection fail",
__LINE__, cm->module_name, server->id, addr_array->count);
}
return conn;
}
static ConnectionInfo *get_connection(SFConnectionManager *cm,
const int group_index, const bool shared, int *err_no)
{
SFCMServerArray *server_array;
ConnectionInfo *conn;
uint32_t server_hash_code;
int server_index;
int i;
server_array = &cm->groups.entries[group_index].all;
server_hash_code = rand();
server_index = server_hash_code % server_array->count;
if ((conn=make_connection(cm, server_array->servers[server_index].
addr_array, shared, err_no)) != NULL)
{
return conn;
}
if (server_array->count > 1) {
for (i=0; i<server_array->count; i++) {
if (i == server_index) {
continue;
}
if ((conn=make_connection(cm, server_array->servers[i].
addr_array, shared, err_no)) != NULL)
{
return conn;
}
}
}
logError("file: "__FILE__", line: %d, "
"%s data group index: %d, get_connection fail, "
"configured server count: %d", __LINE__, cm->module_name,
group_index, server_array->count);
return NULL;
}
static inline void set_connection_params(ConnectionInfo *conn,
SFCMServerEntry *server, SFCMServerPtrArray *old_alives)
{
SFConnectionParameters *cparam;
cparam = (SFConnectionParameters *)conn->args;
cparam->cm.sentry = server;
cparam->cm.old_alives = old_alives;
}
static inline int push_to_detect_queue(SFConnectionManager *cm,
SFCMConnGroupEntry *group, SFCMServerPtrArray *alives)
{
if (!cm->alive_detect.bg_thread_enabled) {
return 0;
}
if (alives->count < group->all.count) {
if (__sync_bool_compare_and_swap(&group->in_queue, 0, 1)) {
return common_blocked_queue_push(&cm->alive_detect.queue, group);
}
}
return 0;
}
static inline bool alive_array_cas(SFConnectionManager *cm,
SFCMConnGroupEntry *group, SFCMServerPtrArray *old_alives,
SFCMServerPtrArray *new_alives)
{
if (__sync_bool_compare_and_swap(&group->alives,
old_alives, new_alives))
{
logDebug("file: "__FILE__", line: %d, "
"[%s] group_id: %d, old alive server count: %d, "
"new alive server count: %d", __LINE__, cm->module_name,
group->id, old_alives->count, new_alives->count);
push_to_detect_queue(cm, group, new_alives);
fast_mblock_delay_free_object(&cm->sptr_array_allocator, old_alives,
(cm->common_cfg->connect_timeout + cm->common_cfg->
network_timeout) * group->all.count * 4);
return true;
} else {
fast_mblock_free_object(&cm->sptr_array_allocator, new_alives);
return false;
}
}
static int remove_from_alives(SFConnectionManager *cm,
SFCMConnGroupEntry *group, SFCMServerPtrArray *old_alives,
SFCMServerEntry *server)
{
SFCMServerPtrArray *new_alives;
SFCMServerEntry **pp;
SFCMServerEntry **dest;
SFCMServerEntry **end;
new_alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
if (new_alives != old_alives) {
return 0;
}
new_alives = (SFCMServerPtrArray *)fast_mblock_alloc_object(
&cm->sptr_array_allocator);
if (new_alives == NULL) {
return ENOMEM;
}
dest = new_alives->servers;
end = old_alives->servers + old_alives->count;
for (pp=old_alives->servers; pp<end; pp++) {
if (*pp != server) {
*dest++ = *pp;
}
}
new_alives->count = dest - new_alives->servers;
if (alive_array_cas(cm, group, old_alives, new_alives)) {
SFCMServerEntry *master;
master = (SFCMServerEntry *)FC_ATOMIC_GET(group->master);
if (master == server) {
__sync_bool_compare_and_swap(&group->master, master, NULL);
}
}
return 0;
}
static inline ConnectionInfo *make_master_connection(SFConnectionManager *cm,
SFCMConnGroupEntry *group, const bool shared, int *err_no)
{
SFCMServerEntry *master;
ConnectionInfo *conn;
SFCMServerPtrArray *alives;
master = (SFCMServerEntry *)FC_ATOMIC_GET(group->master);
if (master != NULL) {
if ((conn=make_connection(cm, master->addr_array,
shared, err_no)) != NULL)
{
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
set_connection_params(conn, master, alives);
return conn;
} else {
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
if (alives != NULL) {
remove_from_alives(cm, group, alives, master);
}
__sync_bool_compare_and_swap(&group->master, master, NULL);
}
}
*err_no = SF_RETRIABLE_ERROR_NO_SERVER;
return NULL;
}
static inline ConnectionInfo *make_readable_connection(SFConnectionManager *cm,
SFCMConnGroupEntry *group, SFCMServerPtrArray *alives,
const int index, const bool shared, int *err_no)
{
ConnectionInfo *conn;
if ((conn=make_connection(cm, alives->servers[index]->
addr_array, shared, err_no)) == NULL)
{
remove_from_alives(cm, group, alives, alives->servers[index]);
} else {
set_connection_params(conn, alives->servers[index], alives);
}
return conn;
}
static ConnectionInfo *get_master_connection(SFConnectionManager *cm,
const int group_index, const bool shared, int *err_no)
{
SFCMConnGroupEntry *group;
ConnectionInfo *conn;
SFNetRetryIntervalContext net_retry_ctx;
int retry_count;
group = cm->groups.entries + group_index;
sf_init_net_retry_interval_context(&net_retry_ctx,
&cm->common_cfg->net_retry_cfg.interval_mm,
&cm->common_cfg->net_retry_cfg.connect);
retry_count = 0;
while (1) {
if ((conn=make_master_connection(cm, group, shared, err_no)) != NULL) {
return conn;
}
/*
logInfo("file: "__FILE__", line: %d, "
"retry_count: %d, interval_ms: %d, data group id: %d, "
"master: %p, alive count: %d, all count: %d", __LINE__,
retry_count, net_retry_ctx.interval_ms, group->id,
FC_ATOMIC_GET(group->master), ((SFCMServerPtrArray *)
FC_ATOMIC_GET(group->alives))->count, group->all.count);
*/
*err_no = get_group_servers(cm, group);
if (*err_no == 0) {
*err_no = SF_RETRIABLE_ERROR_NO_SERVER; //for try again
}
SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx,
cm->common_cfg->net_retry_cfg.
connect.times, ++retry_count, *err_no);
}
logError("file: "__FILE__", line: %d, "
"%s get_master_connection fail, group id: %d, "
"retry count: %d, " "errno: %d", __LINE__,
cm->module_name, group->id, retry_count, *err_no);
return NULL;
}
static ConnectionInfo *get_readable_connection(SFConnectionManager *cm,
const int group_index, const bool shared, int *err_no)
{
SFCMConnGroupEntry *group;
SFCMServerPtrArray *alives;
ConnectionInfo *conn;
SFNetRetryIntervalContext net_retry_ctx;
uint32_t index;
int retry_count;
group = cm->groups.entries + group_index;
if ((cm->common_cfg->read_rule == sf_data_read_rule_master_only) ||
(group->all.count == 1))
{
return get_master_connection(cm, group_index, shared, err_no);
}
sf_init_net_retry_interval_context(&net_retry_ctx,
&cm->common_cfg->net_retry_cfg.interval_mm,
&cm->common_cfg->net_retry_cfg.connect);
retry_count = 0;
while (1) {
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
if (alives->count > 0) {
index = rand() % alives->count;
if ((conn=make_readable_connection(cm, group, alives,
index, shared, err_no)) != NULL)
{
return conn;
}
}
if (cm->common_cfg->read_rule == sf_data_read_rule_slave_first) {
if ((conn=make_master_connection(cm, group, shared, err_no)) != NULL) {
return conn;
}
}
*err_no = get_group_servers(cm, group);
if (*err_no == 0) {
*err_no = SF_RETRIABLE_ERROR_NO_SERVER; //for try again
}
SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx,
cm->common_cfg->net_retry_cfg.
connect.times, ++retry_count, *err_no);
}
logError("file: "__FILE__", line: %d, "
"%s get_readable_connection fail, retry count: %d, errno: %d",
__LINE__, cm->module_name, retry_count, *err_no);
return NULL;
}
static void release_connection(SFConnectionManager *cm,
ConnectionInfo *conn)
{
SFConnectionParameters *cparam;
cparam = (SFConnectionParameters *)conn->args;
if (cparam->cm.sentry != NULL) {
cparam->cm.sentry = NULL;
cparam->cm.old_alives = NULL;
}
conn_pool_close_connection_ex(&cm->cpool, conn, false);
}
static void close_connection(SFConnectionManager *cm, ConnectionInfo *conn)
{
SFConnectionParameters *cparam;
SFCMServerEntry *server;
SFCMConnGroupEntry *group;
cparam = (SFConnectionParameters *)conn->args;
if (cparam->cm.sentry != NULL) {
server = cparam->cm.sentry;
group = cm->groups.entries + server->group_index;
if (cparam->cm.old_alives != NULL) {
remove_from_alives(cm, group, cparam->cm.old_alives, server);
cparam->cm.old_alives = NULL;
}
__sync_bool_compare_and_swap(&group->master, server, NULL);
cparam->cm.sentry = NULL;
}
conn_pool_close_connection_ex(&cm->cpool, conn, true);
}
static ConnectionInfo *get_leader_connection(SFConnectionManager *cm,
FCServerInfo *server, const bool shared, int *err_no)
{
ConnectionInfo *conn;
SFClientServerEntry leader;
SFNetRetryIntervalContext net_retry_ctx;
char formatted_ip[FORMATTED_IP_SIZE];
int i;
int connect_fails;
sf_init_net_retry_interval_context(&net_retry_ctx,
&cm->common_cfg->net_retry_cfg.interval_mm,
&cm->common_cfg->net_retry_cfg.connect);
i = connect_fails = 0;
while (1) {
do {
if ((conn=get_server_connection(cm, server,
shared, err_no)) == NULL)
{
connect_fails++;
break;
}
if ((*err_no=sf_proto_get_leader(conn, cm->module_name, cm->
common_cfg->network_timeout, &leader)) != 0)
{
close_connection(cm, conn);
break;
}
if (FC_CONNECTION_SERVER_EQUAL1(*conn, leader.conn)) {
return conn;
}
release_connection(cm, conn);
if ((conn=get_spec_connection(cm, &leader.conn,
shared, err_no)) == NULL)
{
if (cm->server_cfg != NULL) {
FCServerInfo *ls;
if ((ls=fc_server_get_by_id(cm->server_cfg,
leader.server_id)) != NULL)
{
if (ls->group_addrs[cm->server_group_index].
address_array.count > 1)
{
if ((conn=get_server_connection(cm, ls,
shared, err_no)) != NULL)
{
return conn;
}
}
}
}
break;
}
return conn;
} while (0);
if (connect_fails == 2) {
break;
}
SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx,
cm->common_cfg->net_retry_cfg.
connect.times, ++i, *err_no);
}
format_ip_address(server->group_addrs[cm->server_group_index].
address_array.addrs[0]->conn.ip_addr, formatted_ip);
logWarning("file: "__FILE__", line: %d, "
"%s get_leader_connection fail, server id: %d, %s:%u, errno: %d",
__LINE__, cm->module_name, server->id, formatted_ip,
server->group_addrs[cm->server_group_index].address_array.
addrs[0]->conn.port, *err_no);
return NULL;
}
const struct sf_connection_parameters *sf_cm_get_connection_params(
SFConnectionManager *cm, ConnectionInfo *conn)
{
return (SFConnectionParameters *)conn->args;
}
int sf_cm_validate_connection_callback(ConnectionInfo *conn, void *args)
{
SFConnectionManager *cm;
SFResponseInfo response;
int result;
cm = (SFConnectionManager *)args;
if ((result=sf_active_test(conn, &response, cm->common_cfg->
network_timeout)) != 0)
{
sf_log_network_error(&response, conn, cm->module_name, result);
}
return result;
}
static int init_group_array(SFConnectionManager *cm,
SFCMConnGroupArray *garray, const int group_count)
{
int bytes;
bytes = sizeof(SFCMConnGroupEntry) * group_count;
garray->entries = (SFCMConnGroupEntry *)fc_malloc(bytes);
if (garray->entries == NULL) {
return ENOMEM;
}
memset(garray->entries, 0, bytes);
garray->count = group_count;
return 0;
}
int sf_connection_manager_init_ex(SFConnectionManager *cm,
const char *module_name, const SFClientCommonConfig *common_cfg,
const int group_count, const int server_group_index,
const int server_count, const int max_count_per_entry,
const int max_idle_time, fc_connection_callback_func
connect_done_callback, void *args, FCServerConfig *server_cfg,
const bool bg_thread_enabled)
{
struct {
ConnectionExtraParams holder;
ConnectionExtraParams *ptr;
} extra_params;
FCServerGroupInfo *server_group;
int htable_capacity;
int result;
if (server_count <= 4) {
htable_capacity = 16;
} else if (server_count <= 16) {
htable_capacity = 64;
} else if (server_count <= 32) {
htable_capacity = 128;
} else if (server_count < 64) {
htable_capacity = 256;
} else {
htable_capacity = 4 * server_count;
}
if ((server_group=fc_server_get_group_by_index(server_cfg,
server_group_index)) == NULL)
{
return ENOENT;
}
if (server_group->comm_type == fc_comm_type_sock) {
extra_params.ptr = NULL;
} else {
if ((result=conn_pool_set_rdma_extra_params(&extra_params.holder,
server_cfg, server_group_index)) != 0)
{
return result;
}
extra_params.ptr = &extra_params.holder;
}
if ((result=conn_pool_init_ex1(&cm->cpool, common_cfg->connect_timeout,
max_count_per_entry, max_idle_time, htable_capacity,
connect_done_callback, args,
sf_cm_validate_connection_callback, cm,
sizeof(SFConnectionParameters),
extra_params.ptr)) != 0)
{
return result;
}
if ((result=init_group_array(cm, &cm->groups, group_count)) != 0) {
return result;
}
if (bg_thread_enabled) {
if ((result=common_blocked_queue_init(&cm->
alive_detect.queue)) != 0)
{
return result;
}
}
cm->server_group_index = server_group_index;
cm->module_name = module_name;
cm->common_cfg = common_cfg;
cm->server_cfg = server_cfg;
cm->alive_detect.bg_thread_enabled = bg_thread_enabled;
cm->max_servers_per_group = 0;
cm->extra = NULL;
cm->exclude_server_id = 0;
cm->ops.get_connection = get_connection;
cm->ops.get_server_connection = get_server_connection;
cm->ops.get_spec_connection = get_spec_connection;
cm->ops.get_master_connection = get_master_connection;
cm->ops.get_readable_connection = get_readable_connection;
cm->ops.get_leader_connection = get_leader_connection;
cm->ops.release_connection = release_connection;
cm->ops.close_connection = close_connection;
cm->ops.get_connection_params = sf_cm_get_connection_params;
return 0;
}
int sf_connection_manager_add(SFConnectionManager *cm, const int group_id,
FCServerInfo **servers, const int count)
{
SFCMConnGroupEntry *group;
FCServerInfo **server;
FCServerInfo **end;
SFCMServerEntry *entry;
int group_index;
if (group_id < 1) {
logError("file: "__FILE__", line: %d, "
"invalid group id: %d < 1",
__LINE__, group_id);
return EINVAL;
}
if (group_id > cm->groups.count) {
logError("file: "__FILE__", line: %d, "
"invalid group id: %d > group count: %d",
__LINE__, group_id, cm->groups.count);
return EINVAL;
}
group_index = group_id - 1;
group = cm->groups.entries + group_index;
group->id = group_id;
group->all.servers = (SFCMServerEntry *)fc_malloc(
sizeof(SFCMServerEntry) * count);
if (group->all.servers == NULL) {
return ENOMEM;
}
group->all.count = count;
end = servers + count;
for (entry=group->all.servers, server=servers;
server<end; entry++, server++)
{
entry->id = (*server)->id;
entry->group_index = group_index;
entry->addr_array = &(*server)->group_addrs[
cm->server_group_index].address_array;
}
if (count > cm->max_servers_per_group) {
cm->max_servers_per_group = count;
}
return 0;
}
static SFCMServerEntry *get_server_by_id(SFCMConnGroupEntry *group,
const int server_id)
{
SFCMServerEntry *server;
SFCMServerEntry *end;
end = group->all.servers + group->all.count;
for (server=group->all.servers; server<end; server++) {
if (server->id == server_id) {
return server;
}
}
return NULL;
}
static SFCMServerPtrArray *convert_to_sptr_array(SFConnectionManager *cm,
SFCMConnGroupEntry *group, SFGroupServerArray *sarray, int *err_no)
{
SFCMServerPtrArray *alives;
SFGroupServerInfo *server;
SFGroupServerInfo *end;
SFCMServerEntry *sentry;
if (sarray->count > cm->max_servers_per_group) {
logError("file: "__FILE__", line: %d, "
"group id: %d, response server count: %d > "
"max count: %d!", __LINE__, group->id,
sarray->count, cm->max_servers_per_group);
*err_no = EOVERFLOW;
return NULL;
}
alives = (SFCMServerPtrArray *)fast_mblock_alloc_object(
&cm->sptr_array_allocator);
if (alives == NULL) {
*err_no = ENOMEM;
return NULL;
}
alives->count = 0;
end = sarray->servers + sarray->count;
for (server=sarray->servers; server<end; server++) {
if ((sentry=get_server_by_id(group, server->id)) == NULL) {
logError("file: "__FILE__", line: %d, "
"group id: %d, response server count: %d > "
"max count: %d!", __LINE__, group->id,
sarray->count, cm->max_servers_per_group);
*err_no = ENOENT;
fast_mblock_free_object(&cm->sptr_array_allocator, alives);
return NULL;
}
if (server->is_master) {
FC_ATOMIC_SET(group->master, sentry);
if (cm->common_cfg->read_rule != sf_data_read_rule_slave_first) {
alives->servers[alives->count++] = sentry;
}
} else if (server->is_active) {
alives->servers[alives->count++] = sentry;
}
}
*err_no = 0;
return alives;
}
static int sptr_array_compare(SFCMServerPtrArray *a1,
SFCMServerPtrArray *a2)
{
int sub;
int i;
if ((sub=(a1->count - a2->count)) != 0) {
return sub;
}
for (i = 0; i < a1->count; i++) {
if ((sub=(a1->servers[i]->id - a2->servers[i]->id)) != 0) {
return sub;
}
}
return 0;
}
static int do_get_group_servers(SFConnectionManager *cm,
SFCMConnGroupEntry *group, ConnectionInfo *conn)
{
#define MAX_GROUP_SERVER_COUNT 128
int result;
SFGroupServerInfo fixed_servers[MAX_GROUP_SERVER_COUNT];
SFGroupServerArray sarray;
SFCMServerPtrArray *old_alives;
SFCMServerPtrArray *new_alives;
sarray.alloc = MAX_GROUP_SERVER_COUNT;
sarray.count = 0;
sarray.servers = fixed_servers;
if ((result=sf_proto_get_group_servers(conn, cm->module_name, cm->
common_cfg->network_timeout, group->id, &sarray)) != 0)
{
return result;
}
if ((new_alives=convert_to_sptr_array(cm, group,
&sarray, &result)) == NULL)
{
return result;
}
old_alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
if (sptr_array_compare(old_alives, new_alives) == 0) {
push_to_detect_queue(cm, group, new_alives);
fast_mblock_free_object(&cm->sptr_array_allocator, new_alives);
return 0;
}
alive_array_cas(cm, group, old_alives, new_alives);
return 0;
}
static int get_group_servers_by_active(SFConnectionManager *cm,
SFCMConnGroupEntry *group)
{
const bool shared = true;
SFCMServerPtrArray *alives;
SFCMServerEntry **server;
SFCMServerEntry **end;
ConnectionInfo *conn;
int result;
result = ENOENT;
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
if (alives->count == 0) {
return result;
}
end = alives->servers + alives->count;
for (server=alives->servers; server<end; server++) {
if ((conn=make_connection(cm, (*server)->addr_array,
shared, &result)) == NULL)
{
continue;
}
result = do_get_group_servers(cm, group, conn);
conn_pool_close_connection_ex(&cm->cpool, conn, result != 0);
if (result == 0) {
return 0;
}
}
return result;
}
static int get_group_servers_by_all(SFConnectionManager *cm,
SFCMConnGroupEntry *group)
{
const bool shared = true;
SFCMServerEntry *server;
SFCMServerEntry *end;
ConnectionInfo *conn;
int result;
result = ENOENT;
if (group->all.count == 0) {
return result;
}
end = group->all.servers + group->all.count;
for (server=group->all.servers; server<end; server++) {
if (server->id == cm->exclude_server_id) {
continue;
}
if ((conn=make_connection(cm, server->addr_array,
shared, &result)) == NULL)
{
continue;
}
result = do_get_group_servers(cm, group, conn);
conn_pool_close_connection_ex(&cm->cpool, conn, result != 0);
if (result == 0) {
return 0;
}
}
return result;
}
static int get_group_servers(SFConnectionManager *cm,
SFCMConnGroupEntry *group)
{
int result;
if ((result=get_group_servers_by_active(cm, group)) == 0) {
return 0;
}
return get_group_servers_by_all(cm, group);
}
static void deal_nodes(SFConnectionManager *cm,
struct common_blocked_node *node)
{
SFCMConnGroupEntry *group;
SFCMServerPtrArray *alives;
do {
group = (SFCMConnGroupEntry *)node->data;
__sync_bool_compare_and_swap(&group->in_queue, 1, 0);
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
if (alives->count < group->all.count) {
logDebug("file: "__FILE__", line: %d, "
"[%s] group_id: %d, alive server count: %d, "
"all server count: %d", __LINE__, cm->module_name,
group->id, alives->count, group->all.count);
if (get_group_servers(cm, group) != 0) {
push_to_detect_queue(cm, group, (SFCMServerPtrArray *)
FC_ATOMIC_GET(group->alives));
}
}
node = node->next;
} while (node != NULL);
}
static void *connection_manager_thread_func(void *arg)
{
SFConnectionManager *cm;
struct common_blocked_node *head;
#ifdef OS_LINUX
prctl(PR_SET_NAME, "cm-alive-detect");
#endif
cm = (SFConnectionManager *)arg;
logDebug("file: "__FILE__", line: %d, "
"[%s] connection manager thread start",
__LINE__, cm->module_name);
while (1) {
sleep(1);
if ((head=common_blocked_queue_pop_all_nodes(&cm->
alive_detect.queue)) == NULL)
{
continue;
}
deal_nodes(cm, head);
common_blocked_queue_free_all_nodes(&cm->alive_detect.queue, head);
}
return NULL;
}
static int sptr_array_alloc_init(void *element, void *args)
{
SFCMServerPtrArray *sptr_array;
sptr_array = (SFCMServerPtrArray *)element;
sptr_array->servers = (SFCMServerEntry **)(sptr_array + 1);
return 0;
}
int sf_connection_manager_prepare(SFConnectionManager *cm)
{
int result;
int element_size;
SFCMConnGroupEntry *group;
SFCMConnGroupEntry *end;
SFCMServerPtrArray *sptr_array;
element_size = sizeof(SFCMServerPtrArray) +
sizeof(SFCMServerEntry *) * cm->max_servers_per_group;
if ((result=fast_mblock_init_ex1(&cm->sptr_array_allocator,
"server-ptr-array", element_size, 4 * 1024, 0,
sptr_array_alloc_init, NULL, true)) != 0)
{
return result;
}
end = cm->groups.entries + cm->groups.count;
for (group=cm->groups.entries; group<end; group++) {
if (group->all.count == 0) {
logError("file: "__FILE__", line: %d, "
"group id: %d, no servers!",
__LINE__, group->id);
return ENOENT;
}
sptr_array = (SFCMServerPtrArray *)fast_mblock_alloc_object(
&cm->sptr_array_allocator);
if (sptr_array == NULL) {
return ENOMEM;
}
__sync_bool_compare_and_swap(&group->alives, NULL, sptr_array);
}
return 0;
}
int sf_connection_manager_start(SFConnectionManager *cm)
{
pthread_t tid;
if (cm->alive_detect.bg_thread_enabled) {
return fc_create_thread(&tid, connection_manager_thread_func,
cm, SF_G_THREAD_STACK_SIZE);
} else {
return 0;
}
}

173
src/sf_connection_manager.h Normal file
View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//sf_connection_manager.h
#ifndef _SF_CONNECTION_MANAGER_H
#define _SF_CONNECTION_MANAGER_H
#include "fastcommon/common_blocked_queue.h"
#include "fastcommon/server_id_func.h"
#include "fastcommon/connection_pool.h"
#include "sf_types.h"
struct sf_connection_manager;
typedef ConnectionInfo *(*sf_get_connection_func)(
struct sf_connection_manager *cm, const int group_index,
const bool shared, int *err_no);
typedef ConnectionInfo *(*sf_get_server_connection_func)(
struct sf_connection_manager *cm,
FCServerInfo *server, const bool shared, int *err_no);
typedef ConnectionInfo *(*sf_get_spec_connection_func)(
struct sf_connection_manager *cm,
const ConnectionInfo *target,
const bool shared, int *err_no);
typedef void (*sf_release_connection_func)(
struct sf_connection_manager *cm, ConnectionInfo *conn);
typedef void (*sf_close_connection_func)(
struct sf_connection_manager *cm, ConnectionInfo *conn);
typedef const struct sf_connection_parameters * (*sf_get_connection_parameters)(
struct sf_connection_manager *cm, ConnectionInfo *conn);
typedef struct sf_cm_server_entry {
int id;
int group_index;
FCAddressPtrArray *addr_array;
} SFCMServerEntry;
typedef struct sf_cm_server_array {
SFCMServerEntry *servers;
int count;
} SFCMServerArray;
typedef struct sf_cm_server_ptr_array {
SFCMServerEntry **servers;
int count;
} SFCMServerPtrArray;
typedef struct sf_cm_conn_group_entry {
int id;
volatile char in_queue; //if in active detect queue
SFCMServerArray all;
volatile SFCMServerEntry *master;
volatile SFCMServerPtrArray *alives;
} SFCMConnGroupEntry;
typedef struct sf_cm_conn_group_array {
SFCMConnGroupEntry *entries;
int count;
} SFCMConnGroupArray;
typedef struct sf_cm_operations {
/* get the specify connection by ip and port */
sf_get_spec_connection_func get_spec_connection;
/* get one connection of the configured servers by data group */
sf_get_connection_func get_connection;
/* get one connection of the server */
sf_get_server_connection_func get_server_connection;
/* get the master connection from the server */
sf_get_connection_func get_master_connection;
/* get one readable connection from the server */
sf_get_connection_func get_readable_connection;
/* get the leader connection from the server */
sf_get_server_connection_func get_leader_connection;
/* push back to connection pool when use connection pool */
sf_release_connection_func release_connection;
/* disconnect the connecton on network error */
sf_close_connection_func close_connection;
sf_get_connection_parameters get_connection_params;
} SFCMOperations;
typedef struct sf_connection_manager {
int exclude_server_id; //for server side
uint16_t max_servers_per_group;
uint8_t server_group_index;
struct {
bool bg_thread_enabled;
struct common_blocked_queue queue;
} alive_detect;
const char *module_name;
const SFClientCommonConfig *common_cfg;
SFCMConnGroupArray groups;
ConnectionPool cpool;
struct fast_mblock_man sptr_array_allocator; //element: SFCMServerPtrArray
SFCMOperations ops;
FCServerConfig *server_cfg;
void *extra; //for simple connection manager
} SFConnectionManager;
int sf_connection_manager_init_ex(SFConnectionManager *cm,
const char *module_name, const SFClientCommonConfig *common_cfg,
const int group_count, const int server_group_index,
const int server_count, const int max_count_per_entry,
const int max_idle_time, fc_connection_callback_func
connect_done_callback, void *args, FCServerConfig *server_cfg,
const bool bg_thread_enabled);
static inline int sf_connection_manager_init(SFConnectionManager *cm,
const char *module_name, const SFClientCommonConfig *common_cfg,
const int group_count, const int server_group_index,
const int server_count, const int max_count_per_entry,
const int max_idle_time, fc_connection_callback_func
connect_done_callback, void *args)
{
const bool bg_thread_enabled = true;
return sf_connection_manager_init_ex(cm, module_name,
common_cfg, group_count, server_group_index,
server_count, max_count_per_entry, max_idle_time,
connect_done_callback, args, NULL, bg_thread_enabled);
}
static inline void sf_connection_manager_set_exclude_server_id(
SFConnectionManager *cm, const int exclude_server_id)
{
cm->exclude_server_id = exclude_server_id;
}
int sf_connection_manager_add(SFConnectionManager *cm, const int group_id,
FCServerInfo **servers, const int count);
int sf_connection_manager_prepare(SFConnectionManager *cm);
//start thread
int sf_connection_manager_start(SFConnectionManager *cm);
int sf_cm_validate_connection_callback(ConnectionInfo *conn, void *args);
const struct sf_connection_parameters *sf_cm_get_connection_params(
SFConnectionManager *cm, ConnectionInfo *conn);
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@ -20,13 +20,17 @@
#include "fastcommon/sockopt.h"
#define SF_DEF_THREAD_STACK_SIZE (64 * 1024)
#define SF_DEFAULT_CONNECT_TIMEOUT 10
#define SF_DEFAULT_NETWORK_TIMEOUT 60
#define SF_DEF_THREAD_STACK_SIZE (256 * 1024)
#define SF_MIN_THREAD_STACK_SIZE (64 * 1024)
#define SF_MAX_THREAD_STACK_SIZE (2 * 1024 * 1024 * 1024LL)
#define SF_DEF_MAX_PACKAGE_SIZE (64 * 1024)
#define SF_DEF_MAX_PACKAGE_SIZE (256 * 1024)
#define SF_DEF_MIN_BUFF_SIZE (64 * 1024)
#define SF_DEF_MAX_BUFF_SIZE (64 * 1024)
#define SF_DEF_MAX_BUFF_SIZE (256 * 1024)
#define SF_MAX_NETWORK_BUFF_SIZE (2 * 1024 * 1024 * 1024LL)
#define SF_DEF_SYNC_LOG_BUFF_INTERVAL 1
#define SF_NIO_STAGE_NONE 0
#define SF_NIO_STAGE_INIT 1 //set ioevent
@ -40,6 +44,10 @@
#define SF_NIO_TASK_STAGE_FETCH(task) task->nio_stages.current
#define SF_SESSION_ERROR_NOT_EXIST 9992
#define SF_CLUSTER_ERROR_NOT_LEADER 9995
#define SF_CLUSTER_ERROR_LEADER_VERSION_INCONSISTENT 9996
#define SF_CLUSTER_ERROR_BINLOG_MISSED 9997
#define SF_CLUSTER_ERROR_BINLOG_INCONSISTENT 9998
#define SF_CLUSTER_ERROR_LEADER_INCONSISTENT 9999
#define SF_CLUSTER_ERROR_MASTER_INCONSISTENT SF_CLUSTER_ERROR_LEADER_INCONSISTENT
@ -57,8 +65,13 @@
#define SF_ERROR_EBUSY 8816
#define SF_ERROR_EINVAL 8822
#define SF_ERROR_EAGAIN 8835
#define SF_ERROR_EINPROGRESS 8836
#define SF_ERROR_EOVERFLOW 8884
#define SF_ERROR_EOPNOTSUPP 8895
#define SF_ERROR_ENOLINK 8867
#define SF_ERROR_ENODATA 8861
#define SF_ERROR_ENOTEMPTY 8839
#define SF_ERROR_ELOOP 8840
#define SF_FORCE_CLOSE_CONNECTION_ERROR_MIN SF_RETRIABLE_ERROR_NOT_MASTER
#define SF_FORCE_CLOSE_CONNECTION_ERROR_MAX SF_RETRIABLE_ERROR_MAX
@ -83,6 +96,8 @@
#define SF_BINLOG_SOURCE_USER 'U' //by user call
#define SF_BINLOG_SOURCE_REPLAY 'R' //by binlog replay
#define SF_LOG_SCHEDULE_ENTRIES_COUNT 3
#ifdef __cplusplus
extern "C" {
#endif

707
src/sf_file_writer.c Normal file
View File

@ -0,0 +1,707 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <limits.h>
#include <fcntl.h>
#include <pthread.h>
#include "fastcommon/logger.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/shared_func.h"
#include "fastcommon/pthread_func.h"
#include "fastcommon/sched_thread.h"
#include "sf_global.h"
#include "sf_func.h"
#include "sf_file_writer.h"
#define BINLOG_INDEX_ITEM_START_INDEX_STR "start_index"
#define BINLOG_INDEX_ITEM_START_INDEX_LEN \
(sizeof(BINLOG_INDEX_ITEM_START_INDEX_STR) - 1)
#define BINLOG_INDEX_ITEM_CURRENT_WRITE_STR "current_write"
#define BINLOG_INDEX_ITEM_CURRENT_WRITE_LEN \
(sizeof(BINLOG_INDEX_ITEM_CURRENT_WRITE_STR) - 1)
#define BINLOG_INDEX_ITEM_CURRENT_COMPRESS_STR "current_compress"
#define BINLOG_INDEX_ITEM_CURRENT_COMPRESS_LEN \
(sizeof(BINLOG_INDEX_ITEM_CURRENT_COMPRESS_STR) - 1)
static inline void sf_file_writer_get_binlog_filename(SFFileWriterInfo *writer)
{
sf_file_writer_get_filename_ex(
writer->cfg.data_path, writer->cfg.subdir_name,
writer->cfg.file_prefix, writer->binlog.last_index,
writer->file.name.str, writer->file.name.size);
}
static inline void sf_file_writer_get_index_filename_ex(
const char *data_path, const char *subdir_name,
const char *file_prefix, const int file_prefix_len,
char *filename, const int size)
{
#define INDEX_FILENAME_AFFIX_STR "_index.dat"
#define INDEX_FILENAME_AFFIX_LEN (sizeof(INDEX_FILENAME_AFFIX_STR) - 1)
char *p;
int data_path_len;
int subdir_name_len;
data_path_len = strlen(data_path);
subdir_name_len = strlen(subdir_name);
if (data_path_len + 1 + subdir_name_len + 1 + file_prefix_len +
INDEX_FILENAME_AFFIX_LEN >= size)
{
*filename = '\0';
return;
}
memcpy(filename, data_path, data_path_len);
p = filename + data_path_len;
*p++ = '/';
memcpy(p, subdir_name, subdir_name_len);
p += subdir_name_len;
*p++ = '/';
memcpy(p, file_prefix, file_prefix_len);
p += file_prefix_len;
memcpy(p, INDEX_FILENAME_AFFIX_STR, INDEX_FILENAME_AFFIX_LEN);
p += INDEX_FILENAME_AFFIX_LEN;
*p = '\0';
}
const char *sf_file_writer_get_index_filename(const char *data_path,
const char *subdir_name, char *filename, const int size)
{
sf_file_writer_get_index_filename_ex(data_path, subdir_name,
SF_BINLOG_FILE_PREFIX_STR, SF_BINLOG_FILE_PREFIX_LEN,
filename, size);
return filename;
}
int sf_file_writer_write_to_binlog_index_file_ex(const char *data_path,
const char *subdir_name, const char *file_prefix,
const int start_index, const int last_index,
const int compress_index)
{
char filename[PATH_MAX];
char buff[256];
char *p;
int result;
int len;
sf_file_writer_get_index_filename_ex(data_path, subdir_name, file_prefix,
strlen(file_prefix), filename, sizeof(filename));
p = buff;
memcpy(p, BINLOG_INDEX_ITEM_START_INDEX_STR,
BINLOG_INDEX_ITEM_START_INDEX_LEN);
p += BINLOG_INDEX_ITEM_START_INDEX_LEN;
*p++ = '=';
p += fc_itoa(start_index, p);
*p++ = '\n';
memcpy(p, BINLOG_INDEX_ITEM_CURRENT_WRITE_STR,
BINLOG_INDEX_ITEM_CURRENT_WRITE_LEN);
p += BINLOG_INDEX_ITEM_CURRENT_WRITE_LEN;
*p++ = '=';
p += fc_itoa(last_index, p);
*p++ = '\n';
memcpy(p, BINLOG_INDEX_ITEM_CURRENT_COMPRESS_STR,
BINLOG_INDEX_ITEM_CURRENT_COMPRESS_LEN);
p += BINLOG_INDEX_ITEM_CURRENT_COMPRESS_LEN;
*p++ = '=';
p += fc_itoa(compress_index, p);
*p++ = '\n';
len = p - buff;
if ((result=safeWriteToFile(filename, buff, len)) != 0) {
logError("file: "__FILE__", line: %d, "
"write to file \"%s\" fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
}
return result;
}
static inline int write_to_binlog_index_file(SFFileWriterInfo *writer)
{
return sf_file_writer_write_to_binlog_index_file_ex(
writer->cfg.data_path, writer->cfg.subdir_name,
writer->cfg.file_prefix, writer->binlog.start_index,
writer->binlog.last_index, writer->binlog.compress_index);
}
static int get_binlog_info_from_file(const char *data_path,
const char *subdir_name, int *start_index,
int *last_index, int *compress_index)
{
char full_filename[PATH_MAX];
IniContext ini_context;
int result;
sf_file_writer_get_index_filename_ex(data_path, subdir_name,
SF_BINLOG_FILE_PREFIX_STR, SF_BINLOG_FILE_PREFIX_LEN,
full_filename, sizeof(full_filename));
if (access(full_filename, F_OK) != 0) {
return errno != 0 ? errno : EPERM;
}
if ((result=iniLoadFromFile(full_filename, &ini_context)) != 0) {
logError("file: "__FILE__", line: %d, "
"load from file \"%s\" fail, error code: %d",
__LINE__, full_filename, result);
return result;
}
*start_index = iniGetIntValue(NULL,
BINLOG_INDEX_ITEM_START_INDEX_STR,
&ini_context, 0);
*last_index = iniGetIntValue(NULL,
BINLOG_INDEX_ITEM_CURRENT_WRITE_STR,
&ini_context, 0);
*compress_index = iniGetIntValue(NULL,
BINLOG_INDEX_ITEM_CURRENT_COMPRESS_STR,
&ini_context, 0);
iniFreeContext(&ini_context);
return 0;
}
int sf_file_writer_get_binlog_indexes(const char *data_path,
const char *subdir_name, int *start_index, int *last_index)
{
int result;
int compress_index;
result = get_binlog_info_from_file(data_path, subdir_name,
start_index, last_index, &compress_index);
if (result == ENOENT) {
*start_index = *last_index = 0;
return 0;
} else {
return result;
}
}
static inline int get_binlog_index_from_file(SFFileWriterInfo *writer)
{
int result;
result = get_binlog_info_from_file(writer->cfg.data_path,
writer->cfg.subdir_name, &writer->binlog.start_index,
&writer->binlog.last_index, &writer->binlog.compress_index);
if (result == ENOENT) {
writer->binlog.start_index = 0;
writer->binlog.last_index = 0;
writer->binlog.compress_index = 0;
if (writer->cfg.file_rotate_size > 0) {
return write_to_binlog_index_file(writer);
} else {
return 0;
}
} else {
return result;
}
}
static int open_writable_binlog(SFFileWriterInfo *writer)
{
if (writer->file.fd >= 0) {
close(writer->file.fd);
}
sf_file_writer_get_binlog_filename(writer);
writer->file.fd = open(writer->file.name.str, O_WRONLY |
O_CREAT | O_APPEND | O_CLOEXEC, 0644);
if (writer->file.fd < 0) {
logError("file: "__FILE__", line: %d, "
"open file \"%s\" fail, "
"errno: %d, error info: %s",
__LINE__, writer->file.name.str,
errno, STRERROR(errno));
return errno != 0 ? errno : EACCES;
}
writer->file.size = lseek(writer->file.fd, 0, SEEK_END);
if (writer->file.size < 0) {
logError("file: "__FILE__", line: %d, "
"lseek file \"%s\" fail, "
"errno: %d, error info: %s",
__LINE__, writer->file.name.str,
errno, STRERROR(errno));
return errno != 0 ? errno : EIO;
}
return 0;
}
static int open_next_binlog(SFFileWriterInfo *writer)
{
sf_file_writer_get_binlog_filename(writer);
if (access(writer->file.name.str, F_OK) == 0) {
char bak_filename[PATH_MAX];
char date_str[32];
formatDatetime(g_current_time, "%Y%m%d%H%M%S",
date_str, sizeof(date_str));
fc_combine_two_strings(writer->file.name.str,
date_str, '.', bak_filename);
if (rename(writer->file.name.str, bak_filename) == 0) {
logWarning("file: "__FILE__", line: %d, "
"binlog file %s exist, rename to %s",
__LINE__, writer->file.name.str, bak_filename);
} else {
logError("file: "__FILE__", line: %d, "
"rename binlog %s to backup %s fail, "
"errno: %d, error info: %s",
__LINE__, writer->file.name.str, bak_filename,
errno, STRERROR(errno));
return errno != 0 ? errno : EPERM;
}
}
return open_writable_binlog(writer);
}
static int do_write_to_file(SFFileWriterInfo *writer,
char *buff, const int len)
{
int result;
if (fc_safe_write(writer->file.fd, buff, len) != len) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"write to binlog file \"%s\" fail, "
"errno: %d, error info: %s",
__LINE__, writer->file.name.str,
result, STRERROR(result));
return result;
}
if (writer->cfg.call_fsync) {
if (fsync(writer->file.fd) != 0) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"fsync to binlog file \"%s\" fail, errno: %d, "
"error info: %s", __LINE__, writer->file.name.str,
result, STRERROR(result));
return result;
}
}
writer->file.size += len;
if (writer->write_done_callback.func != NULL) {
writer->write_done_callback.func(writer,
writer->write_done_callback.args);
}
return 0;
}
int sf_file_writer_direct_write(SFFileWriterInfo *writer,
char *buff, const int len)
{
int result;
if ((writer->cfg.file_rotate_size <= 0) || (writer->file.size
+ len <= writer->cfg.file_rotate_size))
{
return do_write_to_file(writer, buff, len);
}
writer->binlog.last_index++; //binlog rotate
if ((result=write_to_binlog_index_file(writer)) == 0) {
result = open_next_binlog(writer);
}
if (result != 0) {
logError("file: "__FILE__", line: %d, "
"open binlog file \"%s\" fail",
__LINE__, writer->file.name.str);
return result;
}
return do_write_to_file(writer, buff, len);
}
int sf_file_writer_flush(SFFileWriterInfo *writer)
{
int result;
int len;
len = SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(writer->binlog_buffer);
if (len == 0) {
return 0;
}
if ((result=sf_file_writer_direct_write(writer, writer->
binlog_buffer.buff, len)) == 0)
{
if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
writer->last_versions.done = writer->last_versions.pending;
}
}
writer->binlog_buffer.data_end = writer->binlog_buffer.buff;
return result;
}
int sf_file_writer_fsync(SFFileWriterInfo *writer)
{
int result;
if ((result=sf_file_writer_flush(writer)) != 0) {
return result;
}
if (fsync(writer->file.fd) == 0) {
return 0;
} else {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"fsync to binlog file \"%s\" fail, errno: %d, "
"error info: %s", __LINE__, writer->file.name.str,
result, STRERROR(result));
return result;
}
}
int sf_file_writer_get_indexes(SFFileWriterInfo *writer,
int *start_index, int *last_index)
{
int result;
if (writer == NULL) { //for data recovery
*start_index = *last_index = 0;
return 0;
}
if (writer->binlog.last_index < 0) {
if ((result=get_binlog_index_from_file(writer)) != 0) {
*start_index = *last_index = -1;
return result;
}
}
*start_index = writer->binlog.start_index;
*last_index = writer->binlog.last_index;
return 0;
}
int sf_file_writer_deal_versioned_buffer(SFFileWriterInfo *writer,
BufferInfo *buffer, const int64_t version)
{
int result;
if (buffer->length >= writer->binlog_buffer.size / 4) {
if (SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(writer->binlog_buffer) > 0) {
if ((result=sf_file_writer_flush(writer)) != 0) {
return result;
}
}
if ((result=sf_file_writer_direct_write(writer, buffer->
buff, buffer->length)) == 0)
{
if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
writer->last_versions.pending = version;
writer->last_versions.done = version;
}
}
return result;
}
if (writer->cfg.file_rotate_size > 0 && writer->file.size +
SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(writer->binlog_buffer) +
buffer->length > writer->cfg.file_rotate_size)
{
if ((result=sf_file_writer_flush(writer)) != 0) {
return result;
}
} else if (SF_BINLOG_BUFFER_PRODUCER_BUFF_REMAIN(
writer->binlog_buffer) < buffer->length)
{
if ((result=sf_file_writer_flush(writer)) != 0) {
return result;
}
}
if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
writer->last_versions.pending = version;
}
memcpy(writer->binlog_buffer.data_end, buffer->buff, buffer->length);
writer->binlog_buffer.data_end += buffer->length;
return 0;
}
int sf_file_writer_save_buffer_ex(SFFileWriterInfo *writer,
const int length, const bool flush)
{
int result;
if (writer->cfg.file_rotate_size > 0 && writer->file.size +
SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(writer->binlog_buffer) +
length > writer->cfg.file_rotate_size)
{
if ((result=sf_file_writer_flush(writer)) != 0) {
return result;
}
}
writer->binlog_buffer.data_end += length;
if (flush || SF_BINLOG_BUFFER_PRODUCER_BUFF_REMAIN(writer->
binlog_buffer) < writer->cfg.max_record_size)
{
return sf_file_writer_flush(writer);
} else {
return 0;
}
}
int sf_file_writer_init(SFFileWriterInfo *writer, const char *data_path,
const char *subdir_name, const char *file_prefix,
const int max_record_size, const int buffer_size,
const int64_t file_rotate_size, const bool call_fsync)
{
int result;
int path_len;
bool create;
char filepath[PATH_MAX];
writer->total_count = 0;
writer->last_versions.pending = 0;
writer->last_versions.done = 0;
writer->flags = 0;
sf_file_writer_set_write_done_callback(writer, NULL, NULL);
if ((result=sf_binlog_buffer_init(&writer->
binlog_buffer, buffer_size)) != 0)
{
return result;
}
writer->cfg.max_record_size = max_record_size;
writer->cfg.call_fsync = call_fsync;
writer->cfg.file_rotate_size = file_rotate_size;
writer->cfg.data_path = data_path;
path_len = fc_combine_full_filepath(data_path, subdir_name, filepath);
if ((result=fc_check_mkdir_ex(filepath, 0775, &create)) != 0) {
return result;
}
if (create) {
SF_CHOWN_TO_RUNBY_RETURN_ON_ERROR(filepath);
}
writer->file.fd = -1;
fc_safe_strcpy(writer->cfg.subdir_name, subdir_name);
fc_safe_strcpy(writer->cfg.file_prefix, file_prefix);
writer->file.name.size = path_len + 32;
writer->file.name.str = (char *)fc_malloc(writer->file.name.size);
if (writer->file.name.str == NULL) {
return ENOMEM;
}
if ((result=get_binlog_index_from_file(writer)) != 0) {
return result;
}
if ((result=open_writable_binlog(writer)) != 0) {
return result;
}
return 0;
}
void sf_file_writer_destroy(SFFileWriterInfo *writer)
{
if (writer->file.fd >= 0) {
close(writer->file.fd);
writer->file.fd = -1;
}
if (writer->file.name.str != NULL) {
free(writer->file.name.str);
writer->file.name.str = NULL;
}
sf_binlog_buffer_destroy(&writer->binlog_buffer);
}
int sf_file_writer_set_indexes(SFFileWriterInfo *writer,
const int start_index, const int last_index)
{
int result;
if (writer->binlog.start_index != start_index ||
writer->binlog.last_index != last_index)
{
writer->binlog.start_index = start_index;
writer->binlog.last_index = last_index;
if ((result=write_to_binlog_index_file(writer)) != 0) {
return result;
}
}
return 0;
}
int sf_file_writer_set_binlog_start_index(SFFileWriterInfo *writer,
const int start_index)
{
int result;
if (writer->binlog.start_index != start_index) {
writer->binlog.start_index = start_index;
if ((result=write_to_binlog_index_file(writer)) != 0) {
return result;
}
}
return 0;
}
int sf_file_writer_set_binlog_write_index(SFFileWriterInfo *writer,
const int last_index)
{
int result;
if (writer->binlog.last_index != last_index) {
writer->binlog.last_index = last_index;
if ((result=write_to_binlog_index_file(writer)) != 0) {
return result;
}
}
return open_writable_binlog(writer);
}
int sf_file_writer_get_last_lines(const char *data_path,
const char *subdir_name, const int current_write_index,
char *buff, const int buff_size, int *count, int *length)
{
int result;
int target_count;
int count1;
char filename[PATH_MAX];
string_t lines;
target_count = *count;
sf_file_writer_get_filename(data_path, subdir_name,
current_write_index, filename, sizeof(filename));
if (access(filename, F_OK) == 0) {
if ((result=fc_get_last_lines(filename, buff, buff_size,
&lines, count)) != 0)
{
if (result != ENOENT) {
return result;
}
}
if (*count >= target_count || current_write_index == 0) {
memmove(buff, lines.str, lines.len);
*length = lines.len;
return 0;
}
} else {
result = errno != 0 ? errno : EPERM;
if (result == ENOENT) {
*count = 0;
*length = 0;
return 0;
} else {
logError("file: "__FILE__", line: %d, "
"stat file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
*count = 0;
*length = 0;
return result;
}
}
sf_file_writer_get_filename(data_path, subdir_name,
current_write_index - 1, filename, sizeof(filename));
if (access(filename, F_OK) != 0) {
result = errno != 0 ? errno : EPERM;
if (result == ENOENT) {
memmove(buff, lines.str, lines.len);
*length = lines.len;
return 0;
} else {
logError("file: "__FILE__", line: %d, "
"stat file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
*count = 0;
*length = 0;
return result;
}
}
count1 = target_count - *count;
if ((result=fc_get_last_lines(filename, buff,
buff_size, &lines, &count1)) != 0)
{
*count = 0;
*length = 0;
return result;
}
memmove(buff, lines.str, lines.len);
*length = lines.len;
if (*count == 0) {
*count = count1;
} else {
sf_file_writer_get_filename(data_path, subdir_name,
current_write_index, filename, sizeof(filename));
if ((result=fc_get_first_lines(filename, buff + (*length),
buff_size - (*length), &lines, count)) != 0)
{
*count = 0;
*length = 0;
return result;
}
*count += count1;
*length += lines.len;
}
return 0;
}
int sf_file_writer_get_last_line(const char *data_path,
const char *subdir_name, char *buff,
const int buff_size, int *length)
{
int result;
int last_index;
int count = 1;
if ((result=sf_file_writer_get_binlog_last_index(data_path,
subdir_name, &last_index)) != 0)
{
*length = 0;
return result;
}
return sf_file_writer_get_last_lines(data_path, subdir_name,
last_index, buff, buff_size, &count, length);
}

323
src/sf_file_writer.h Normal file
View File

@ -0,0 +1,323 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//sf_file_writer.h
#ifndef _SF_FILE_WRITER_H_
#define _SF_FILE_WRITER_H_
#include "fastcommon/fc_queue.h"
#include "sf_types.h"
#define SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION 1
#define SF_BINLOG_SUBDIR_NAME_SIZE 128
#define SF_BINLOG_FILE_PREFIX_STR_SIZE 64
#define SF_BINLOG_DEFAULT_ROTATE_SIZE (1024 * 1024 * 1024)
#define SF_BINLOG_NEVER_ROTATE_FILE 0
#define SF_BINLOG_FILE_PREFIX_STR "binlog"
#define SF_BINLOG_FILE_PREFIX_LEN (sizeof(SF_BINLOG_FILE_PREFIX_STR) - 1)
#define SF_BINLOG_FILE_EXT_LEN 6
#define SF_BINLOG_FILE_EXT_FMT ".%0"FC_MACRO_TOSTRING(SF_BINLOG_FILE_EXT_LEN)"d"
struct sf_file_writer_info;
typedef void (*sf_file_write_done_callback)(
struct sf_file_writer_info *writer, void *args);
typedef struct sf_file_writer_info {
struct {
const char *data_path;
char subdir_name[SF_BINLOG_SUBDIR_NAME_SIZE];
char file_prefix[SF_BINLOG_FILE_PREFIX_STR_SIZE];
int64_t file_rotate_size;
int max_record_size;
bool call_fsync;
} cfg;
struct {
int start_index; //for read only
int last_index; //for write
int compress_index;
} binlog;
struct {
int fd;
int64_t size; //file size
struct {
char *str;
int size;
} name;
} file;
int64_t total_count;
SFBinlogBuffer binlog_buffer;
short flags;
struct {
int64_t pending;
volatile int64_t done;
} last_versions;
struct {
sf_file_write_done_callback func;
void *args;
} write_done_callback;
} SFFileWriterInfo;
#ifdef __cplusplus
extern "C" {
#endif
int sf_file_writer_init(SFFileWriterInfo *writer, const char *data_path,
const char *subdir_name, const char *file_prefix,
const int max_record_size, const int buffer_size,
const int64_t file_rotate_size, const bool call_fsync);
void sf_file_writer_destroy(SFFileWriterInfo *writer);
int sf_file_writer_direct_write(SFFileWriterInfo *writer,
char *buff, const int len);
int sf_file_writer_deal_versioned_buffer(SFFileWriterInfo *writer,
BufferInfo *buffer, const int64_t version);
#define sf_file_writer_deal_buffer(writer, buffer) \
sf_file_writer_deal_versioned_buffer(writer, buffer, 0)
int sf_file_writer_flush(SFFileWriterInfo *writer);
int sf_file_writer_fsync(SFFileWriterInfo *writer);
#define SF_FILE_WRITER_DATA_END_BUFF(writer) (writer)->binlog_buffer.data_end
#define SF_FILE_WRITER_CURRENT_DATA_VERSION(writer) \
(writer)->last_versions.pending
#define SF_FILE_WRITER_NEXT_DATA_VERSION(writer) \
++((writer)->last_versions.pending)
int sf_file_writer_save_buffer_ex(SFFileWriterInfo *writer,
const int length, const bool flush);
static inline int sf_file_writer_save_buffer(
SFFileWriterInfo *writer, const int length)
{
const bool flush = false;
return sf_file_writer_save_buffer_ex(writer, length, flush);
}
static inline int sf_file_writer_flush_buffer(
SFFileWriterInfo *writer, const int length)
{
const bool flush = true;
return sf_file_writer_save_buffer_ex(writer, length, flush);
}
static inline void sf_file_writer_set_flags(
SFFileWriterInfo *writer, const short flags)
{
writer->flags = flags;
}
static inline void sf_file_writer_set_call_fsync(
SFFileWriterInfo *writer, const bool call_fsync)
{
writer->cfg.call_fsync = call_fsync;
}
static inline void sf_file_writer_set_write_done_callback (
SFFileWriterInfo *writer, sf_file_write_done_callback callback,
void *args)
{
writer->write_done_callback.func = callback;
writer->write_done_callback.args = args;
}
static inline int64_t sf_file_writer_get_last_version_ex(
SFFileWriterInfo *writer, const int log_level)
{
if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
return writer->last_versions.done;
} else {
if (FC_LOG_BY_LEVEL(log_level)) {
log_it_ex(&g_log_context, log_level, "file: %s, line: %d, "
"writer: %s, should set writer flags to %d!",
__FILE__, __LINE__, writer->cfg.subdir_name,
SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION);
}
return -1;
}
}
#define sf_file_writer_get_last_version(writer) \
sf_file_writer_get_last_version_ex(writer, LOG_ERR)
#define sf_file_writer_get_last_version_silence(writer) \
sf_file_writer_get_last_version_ex(writer, LOG_NOTHING)
int sf_file_writer_get_binlog_indexes(const char *data_path,
const char *subdir_name, int *start_index, int *last_index);
static inline int sf_file_writer_get_binlog_start_index(
const char *data_path, const char *subdir_name,
int *start_index)
{
int last_index;
return sf_file_writer_get_binlog_indexes(data_path,
subdir_name, start_index, &last_index);
}
static inline int sf_file_writer_get_binlog_last_index(
const char *data_path, const char *subdir_name,
int *last_index)
{
int start_index;
return sf_file_writer_get_binlog_indexes(data_path,
subdir_name, &start_index, last_index);
}
int sf_file_writer_set_indexes(SFFileWriterInfo *writer,
const int start_index, const int last_index);
int sf_file_writer_get_indexes(SFFileWriterInfo *writer,
int *start_index, int *last_index);
static inline int sf_file_writer_get_start_index(SFFileWriterInfo *writer)
{
int start_index;
int last_index;
sf_file_writer_get_indexes(writer, &start_index, &last_index);
return start_index;
}
static inline int sf_file_writer_get_last_index(SFFileWriterInfo *writer)
{
int start_index;
int last_index;
sf_file_writer_get_indexes(writer, &start_index, &last_index);
return last_index;
}
#define sf_file_writer_get_current_write_index(writer) \
sf_file_writer_get_last_index(writer)
static inline void sf_file_writer_get_current_position(
SFFileWriterInfo *writer, SFBinlogFilePosition *position)
{
position->index = writer->binlog.last_index;
position->offset = writer->file.size;
}
static inline const char *sf_file_writer_get_filepath(
const char *data_path, const char *subdir_name,
char *filepath, const int size)
{
fc_get_full_filepath_ex(data_path, strlen(data_path),
subdir_name, strlen(subdir_name), filepath, size);
return filepath;
}
static inline const char *sf_file_writer_get_filename_ex(
const char *data_path, const char *subdir_name,
const char *file_prefix, const int binlog_index,
char *filename, const int size)
{
char *p;
int data_path_len;
int subdir_name_len;
int file_prefix_len;
data_path_len = strlen(data_path);
subdir_name_len = strlen(subdir_name);
file_prefix_len = strlen(file_prefix);
if (data_path_len + subdir_name_len + file_prefix_len +
4 + SF_BINLOG_FILE_EXT_LEN >= size)
{
snprintf(filename, size, "%s/%s/%s"SF_BINLOG_FILE_EXT_FMT,
data_path, subdir_name, file_prefix, binlog_index);
return filename;
}
p = filename;
memcpy(p, data_path, data_path_len);
p += data_path_len;
*p++ = '/';
memcpy(p, subdir_name, subdir_name_len);
p += subdir_name_len;
*p++ = '/';
memcpy(p, file_prefix, file_prefix_len);
p += file_prefix_len;
*p++ = '.';
fc_ltostr_ex(binlog_index, p, SF_BINLOG_FILE_EXT_LEN);
return filename;
}
#define sf_file_writer_get_filename(data_path, subdir_name, \
binlog_index, filename, size) \
sf_file_writer_get_filename_ex(data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, binlog_index, filename, size)
const char *sf_file_writer_get_index_filename(const char *data_path,
const char *subdir_name, char *filename, const int size);
int sf_file_writer_set_binlog_start_index(SFFileWriterInfo *writer,
const int start_index);
int sf_file_writer_set_binlog_write_index(SFFileWriterInfo *writer,
const int last_index);
static inline int sf_file_writer_rotate_file(SFFileWriterInfo *writer)
{
int last_index;
last_index = sf_file_writer_get_current_write_index(writer);
return sf_file_writer_set_binlog_write_index(writer, last_index + 1);
}
int sf_file_writer_get_last_lines(const char *data_path,
const char *subdir_name, const int current_write_index,
char *buff, const int buff_size, int *count, int *length);
static inline int sf_file_writer_get_last_line_ex(const char *data_path,
const char *subdir_name, const int current_write_index,
char *buff, const int buff_size, int *length)
{
int count = 1;
return sf_file_writer_get_last_lines(data_path, subdir_name,
current_write_index, buff, buff_size, &count, length);
}
int sf_file_writer_get_last_line(const char *data_path,
const char *subdir_name, char *buff,
const int buff_size, int *length);
int sf_file_writer_write_to_binlog_index_file_ex(const char *data_path,
const char *subdir_name, const char *file_prefix,
const int start_index, const int last_index,
const int compress_index);
#define sf_file_writer_write_to_binlog_index_file(data_path, \
subdir_name, start_index, last_index) \
sf_file_writer_write_to_binlog_index_file_ex(data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, start_index, last_index, 0)
#ifdef __cplusplus
}
#endif
#endif

View File

@ -38,16 +38,17 @@ int sf_connect_to_server(const char *ip_addr, const int port, int *sock)
if(*sock < 0) {
return errno != 0 ? errno : ENOMEM;
}
tcpsetserveropt(*sock, g_sf_global_vars.network_timeout);
tcpsetserveropt(*sock, g_sf_global_vars.net_buffer_cfg.network_timeout);
if ((result=tcpsetnonblockopt(*sock)) != 0) {
close(*sock);
*sock = -1;
return result;
}
FC_SET_CLOEXEC(*sock);
if ((result=connectserverbyip_nb(*sock, ip_addr, port,
g_sf_global_vars.connect_timeout)) != 0)
if ((result=connectserverbyip_nb(*sock, ip_addr, port, g_sf_global_vars.
net_buffer_cfg.connect_timeout)) != 0)
{
close(*sock);
*sock = -1;

View File

@ -18,7 +18,8 @@
#ifndef _SF_FUNC_H
#define _SF_FUNC_H
#include "fastcommon/common_define.h"
#include "fastcommon/pthread_func.h"
#include "fastcommon/fc_atomic.h"
#include "sf_types.h"
#include "sf_global.h"
@ -36,7 +37,7 @@ static inline void sf_terminate_myself_ex(const char *file,
{
g_sf_global_vars.continue_flag = false;
if (kill(getpid(), SIGQUIT) == 0) { //signal myself to quit
logInfo("file: "__FILE__", line: %d, "
logWarning("file: "__FILE__", line: %d, "
"kill myself from caller {file: %s, line: %d, func: %s}",
__LINE__, file, line, func);
} else {
@ -55,7 +56,8 @@ static inline int sf_binlog_buffer_init(SFBinlogBuffer *buffer, const int size)
return ENOMEM;
}
buffer->current = buffer->end = buffer->buff;
buffer->current = buffer->data_end = buffer->buff;
buffer->buff_end = buffer->buff + size;
buffer->size = size;
return 0;
}
@ -64,11 +66,87 @@ static inline void sf_binlog_buffer_destroy(SFBinlogBuffer *buffer)
{
if (buffer->buff != NULL) {
free(buffer->buff);
buffer->current = buffer->end = buffer->buff = NULL;
buffer->current = buffer->buff = NULL;
buffer->data_end = buffer->buff_end = NULL;
buffer->size = 0;
}
}
static inline int sf_synchronize_ctx_init(SFSynchronizeContext *sctx)
{
sctx->waiting_count = 0;
return init_pthread_lock_cond_pair(&sctx->lcp);
}
static inline void sf_synchronize_ctx_destroy(SFSynchronizeContext *sctx)
{
destroy_pthread_lock_cond_pair(&sctx->lcp);
}
static inline void sf_synchronize_counter_add(
SFSynchronizeContext *sctx, const int count)
{
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
sctx->waiting_count += count;
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
}
static inline void sf_synchronize_counter_sub(
SFSynchronizeContext *sctx, const int count)
{
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
sctx->waiting_count -= count;
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
}
static inline void sf_synchronize_counter_notify(
SFSynchronizeContext *sctx, const int count)
{
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
sctx->waiting_count -= count;
if (sctx->waiting_count == 0) {
pthread_cond_signal(&sctx->lcp.cond);
}
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
}
static inline void sf_synchronize_counter_wait(SFSynchronizeContext *sctx)
{
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
while (sctx->waiting_count != 0 && SF_G_CONTINUE_FLAG) {
pthread_cond_wait(&sctx->lcp.cond, &sctx->lcp.lock);
}
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
}
#define sf_synchronize_finished_notify_no_lock(sctx, err_no) \
(sctx)->finished = true; \
(sctx)->result = err_no; \
pthread_cond_signal(&(sctx)->lcp.cond)
static inline void sf_synchronize_finished_notify(
SFSynchronizeContext *sctx, const int result)
{
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
sf_synchronize_finished_notify_no_lock(sctx, result);
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
}
static inline int sf_synchronize_finished_wait(SFSynchronizeContext *sctx)
{
int result;
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
while (!sctx->finished && SF_G_CONTINUE_FLAG) {
pthread_cond_wait(&sctx->lcp.cond, &sctx->lcp.lock);
}
result = sctx->result;
sctx->finished = false; //for next notify
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
return result;
}
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -30,27 +30,36 @@ typedef struct sf_connection_stat {
} SFConnectionStat;
typedef struct sf_global_variables {
int connect_timeout;
int network_timeout;
char base_path[MAX_PATH_SIZE];
struct {
char str[MAX_PATH_SIZE];
int len;
bool inited;
bool created;
} base_path;
volatile bool continue_flag;
bool tcp_quick_ack;
int max_connections;
int max_pkg_size;
int min_buff_size;
int max_buff_size;
bool epoll_edge_trigger;
SFNetBufferConfig net_buffer_cfg;
int task_buffer_extra_size;
int thread_stack_size;
time_t up_time;
gid_t run_by_gid;
uid_t run_by_uid;
char run_by_group[32];
char run_by_user[32];
struct {
bool inited;
gid_t gid;
uid_t uid;
char group[32];
char user[32];
} run_by;
SFLogConfig error_log;
SFConnectionStat connection_stat;
sf_error_handler_callback error_handler;
string_t empty;
volatile time_t last_binlog_writer_log_timestamp;
} SFGlobalVariables;
typedef struct sf_context_ini_config {
@ -58,6 +67,9 @@ typedef struct sf_context_ini_config {
int default_inner_port;
int default_outer_port;
int default_work_threads;
int max_pkg_size_min_value;
FCCommunicationType comm_type;
const char *max_pkg_size_item_name;
} SFContextIniConfig;
#ifdef __cplusplus
@ -67,96 +79,189 @@ extern "C" {
extern SFGlobalVariables g_sf_global_vars;
extern SFContext g_sf_context;
#define SF_G_BASE_PATH g_sf_global_vars.base_path
#define SF_G_BASE_PATH_STR g_sf_global_vars.base_path.str
#define SF_G_BASE_PATH_LEN g_sf_global_vars.base_path.len
#define SF_G_BASE_PATH_INITED g_sf_global_vars.base_path.inited
#define SF_G_BASE_PATH_CREATED g_sf_global_vars.base_path.created
#define SF_G_CONTINUE_FLAG g_sf_global_vars.continue_flag
#define SF_G_CONNECT_TIMEOUT g_sf_global_vars.connect_timeout
#define SF_G_NETWORK_TIMEOUT g_sf_global_vars.network_timeout
#define SF_G_MAX_CONNECTIONS g_sf_global_vars.max_connections
#define SF_G_CONNECT_TIMEOUT g_sf_global_vars.net_buffer_cfg.connect_timeout
#define SF_G_NETWORK_TIMEOUT g_sf_global_vars.net_buffer_cfg.network_timeout
#define SF_G_MAX_CONNECTIONS g_sf_global_vars.net_buffer_cfg.max_connections
#define SF_G_THREAD_STACK_SIZE g_sf_global_vars.thread_stack_size
#define SF_G_UP_TIME g_sf_global_vars.up_time
#define SF_G_SOCK_HANDLER (g_sf_context.handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].handlers + \
SF_SOCKET_NETWORK_HANDLER_INDEX)
#define SF_G_OUTER_PORT SF_G_SOCK_HANDLER->outer.port
#define SF_G_INNER_PORT SF_G_SOCK_HANDLER->inner.port
#define SF_G_OUTER_BIND_ADDR4 g_sf_context.handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].outer_bind_addr
#define SF_G_INNER_BIND_ADDR4 g_sf_context.handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].inner_bind_addr
#define SF_G_OUTER_BIND_ADDR6 g_sf_context.handlers \
[SF_IPV6_ADDRESS_FAMILY_INDEX].outer_bind_addr
#define SF_G_INNER_BIND_ADDR6 g_sf_context.handlers \
[SF_IPV6_ADDRESS_FAMILY_INDEX].inner_bind_addr
#define SF_G_IPV4_ENABLED (g_sf_context.handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].af == AF_INET)
#define SF_G_IPV6_ENABLED (g_sf_context.handlers \
[SF_IPV6_ADDRESS_FAMILY_INDEX].af == AF_INET6)
#define SF_G_ACCEPT_THREADS g_sf_context.accept_threads
#define SF_G_WORK_THREADS g_sf_context.work_threads
#define SF_G_ALIVE_THREAD_COUNT g_sf_context.thread_count
#define SF_G_THREAD_INDEX(tdata) (int)(tdata - g_sf_context.thread_data)
#define SF_G_CONN_CURRENT_COUNT g_sf_global_vars.connection_stat.current_count
#define SF_G_CONN_MAX_COUNT g_sf_global_vars.connection_stat.max_count
#define SF_WORK_THREADS(sf_context) sf_context.work_threads
#define SF_ALIVE_THREAD_COUNT(sf_context) sf_context.thread_count
#define SF_THREAD_INDEX(sf_context, tdata) (int)(tdata - sf_context.thread_data)
#define SF_G_ERROR_HANDLER g_sf_global_vars.error_handler
#define SF_G_EMPTY_STRING g_sf_global_vars.empty
#define LAST_BINLOG_WRITER_LOG_TIMESTAMP g_sf_global_vars. \
last_binlog_writer_log_timestamp
#define SF_G_EPOLL_EDGE_TRIGGER g_sf_global_vars.epoll_edge_trigger
#define SF_WORK_THREADS(sf_context) (sf_context).work_threads
#define SF_ALIVE_THREAD_COUNT(sf_context) (sf_context).thread_count
#define SF_THREAD_INDEX(sf_context, tdata) (int)(tdata - (sf_context).thread_data)
#define SF_IPV4_ENABLED(sf_context) ((sf_context).handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].af == AF_INET)
#define SF_IPV6_ENABLED(sf_context) ((sf_context).handlers \
[SF_IPV6_ADDRESS_FAMILY_INDEX].af == AF_INET6)
#define SF_CHOWN_RETURN_ON_ERROR(path, current_uid, current_gid) \
do { \
if (!(g_sf_global_vars.run_by_gid == current_gid && \
g_sf_global_vars.run_by_uid == current_uid)) \
{ \
if (chown(path, g_sf_global_vars.run_by_uid, \
g_sf_global_vars.run_by_gid) != 0) \
if (g_sf_global_vars.run_by.inited && !(g_sf_global_vars. \
run_by.gid == current_gid && g_sf_global_vars. \
run_by.uid == current_uid)) \
{ \
logError("file: "__FILE__", line: %d, " \
"chown \"%s\" fail, " \
"errno: %d, error info: %s", \
__LINE__, path, errno, STRERROR(errno)); \
return errno != 0 ? errno : EPERM; \
if (chown(path, g_sf_global_vars.run_by.uid, \
g_sf_global_vars.run_by.gid) != 0) \
{ \
logError("file: "__FILE__", line: %d, " \
"chown \"%s\" fail, " \
"errno: %d, error info: %s", \
__LINE__, path, errno, STRERROR(errno)); \
return errno != 0 ? errno : EPERM; \
} \
} \
} \
} while (0)
#define SF_SET_CONTEXT_INI_CONFIG(config, filename, pIniContext, \
section_name, def_inner_port, def_outer_port, def_work_threads) \
#define SF_CHOWN_TO_RUNBY_RETURN_ON_ERROR(path) \
SF_CHOWN_RETURN_ON_ERROR(path, geteuid(), getegid())
#define SF_FCHOWN_RETURN_ON_ERROR(fd, path, current_uid, current_gid) \
do { \
if (g_sf_global_vars.run_by.inited && !(g_sf_global_vars. \
run_by.gid == current_gid && g_sf_global_vars. \
run_by.uid == current_uid)) \
{ \
if (fchown(fd, g_sf_global_vars.run_by.uid, \
g_sf_global_vars.run_by.gid) != 0) \
{ \
logError("file: "__FILE__", line: %d, " \
"fchown \"%s\" fail, " \
"errno: %d, error info: %s", \
__LINE__, path, errno, STRERROR(errno)); \
return errno != 0 ? errno : EPERM; \
} \
} \
} while (0)
#define SF_FCHOWN_TO_RUNBY_RETURN_ON_ERROR(fd, path) \
SF_FCHOWN_RETURN_ON_ERROR(fd, path, geteuid(), getegid())
#define SF_SET_CONTEXT_INI_CONFIG_EX(config, the_comm_type, filename, \
pIniContext, section_name, def_inner_port, def_outer_port, \
def_work_threads, max_pkg_size_item_nm, max_pkg_size_min_val) \
do { \
FAST_INI_SET_FULL_CTX_EX(config.ini_ctx, filename, \
section_name, pIniContext); \
config.comm_type = the_comm_type; \
config.default_inner_port = def_inner_port; \
config.default_outer_port = def_outer_port; \
config.default_work_threads = def_work_threads; \
config.max_pkg_size_item_name = max_pkg_size_item_nm; \
config.max_pkg_size_min_value = max_pkg_size_min_val; \
} while (0)
int sf_load_global_config_ex(const char *server_name,
IniFullContext *ini_ctx, const bool load_network_params,
const int task_buffer_extra_size);
#define SF_SET_CONTEXT_INI_CONFIG(config, the_comm_type, \
filename, pIniContext, section_name, def_inner_port, \
def_outer_port, def_work_threads) \
SF_SET_CONTEXT_INI_CONFIG_EX(config, the_comm_type, filename, \
pIniContext, section_name, def_inner_port, def_outer_port, \
def_work_threads, "max_pkg_size", 0)
static inline int sf_load_global_config(const char *server_name,
int sf_load_global_config_ex(const char *log_filename_prefix,
IniFullContext *ini_ctx, const bool load_network_params,
const char *max_pkg_size_item_nm, const int fixed_buff_size,
const int task_buffer_extra_size, const bool need_set_run_by);
static inline int sf_load_global_config(const char *log_filename_prefix,
IniFullContext *ini_ctx)
{
const bool load_network_params = true;
const char *max_pkg_size_item_nm = "max_pkg_size";
const int fixed_buff_size = 0;
const int task_buffer_extra_size = 0;
const bool need_set_run_by = true;
return sf_load_global_config_ex(server_name, ini_ctx,
load_network_params, task_buffer_extra_size);
return sf_load_global_config_ex(log_filename_prefix, ini_ctx,
load_network_params, max_pkg_size_item_nm, fixed_buff_size,
task_buffer_extra_size, need_set_run_by);
}
int sf_load_config_ex(const char *server_name,
SFContextIniConfig *config, const int task_buffer_extra_size);
int sf_load_config_ex(const char *log_filename_prefix,
SFContextIniConfig *config, const int fixed_buff_size,
const int task_buffer_extra_size, const bool need_set_run_by);
static inline int sf_load_config(const char *server_name,
static inline int sf_load_config(const char *log_filename_prefix,
const FCCommunicationType comm_type,
const char *filename, IniContext *pIniContext,
const char *section_name, const int default_inner_port,
const int default_outer_port, const int task_buffer_extra_size)
const int default_outer_port, const int fixed_buff_size,
const int task_buffer_extra_size)
{
const bool need_set_run_by = true;
SFContextIniConfig config;
SF_SET_CONTEXT_INI_CONFIG(config, filename, pIniContext,
SF_SET_CONTEXT_INI_CONFIG(config, comm_type, filename, pIniContext,
section_name, default_inner_port, default_outer_port,
DEFAULT_WORK_THREADS);
return sf_load_config_ex(server_name, &config, task_buffer_extra_size);
return sf_load_config_ex(log_filename_prefix, &config, fixed_buff_size,
task_buffer_extra_size, need_set_run_by);
}
int sf_load_context_from_config_ex(SFContext *sf_context,
SFContextIniConfig *config);
SFContextIniConfig *config, const int fixed_buff_size,
const int task_buffer_extra_size);
static inline int sf_load_context_from_config(SFContext *sf_context,
const FCCommunicationType comm_type,
const char *filename, IniContext *pIniContext,
const char *section_name, const int default_inner_port,
const int default_outer_port)
const int default_outer_port, const int fixed_buff_size,
const int task_buffer_extra_size)
{
SFContextIniConfig config;
SF_SET_CONTEXT_INI_CONFIG(config, filename, pIniContext,
SF_SET_CONTEXT_INI_CONFIG(config, comm_type, filename, pIniContext,
section_name, default_inner_port, default_outer_port,
DEFAULT_WORK_THREADS);
return sf_load_context_from_config_ex(sf_context, &config);
return sf_load_context_from_config_ex(sf_context, &config,
fixed_buff_size, task_buffer_extra_size);
}
int sf_alloc_rdma_pd(SFContext *sf_context,
FCAddressPtrArray *address_array);
void sf_set_address_family_by_ip(SFContext *sf_context,
FCAddressPtrArray *address_array);
int sf_load_log_config(IniFullContext *ini_ctx, LogContext *log_ctx,
SFLogConfig *log_cfg);
@ -169,7 +274,7 @@ static inline int sf_load_slow_log_config(const char *config_file,
{
IniFullContext ini_ctx;
FAST_INI_SET_FULL_CTX_EX(ini_ctx, config_file, "slow_log", ini_context);
FAST_INI_SET_FULL_CTX_EX(ini_ctx, config_file, "slow-log", ini_context);
return sf_load_slow_log_config_ex(&ini_ctx, log_ctx, slow_log_cfg);
}
@ -181,7 +286,14 @@ void sf_log_config_to_string_ex(SFLogConfig *log_cfg, const char *caption,
void sf_slow_log_config_to_string(SFSlowLogConfig *slow_log_cfg,
const char *caption, char *output, const int size);
void sf_global_config_to_string(char *output, const int size);
void sf_global_config_to_string_ex(const char *max_pkg_size_item_nm,
char *output, const int size);
static inline void sf_global_config_to_string(char *output, const int size)
{
const char *max_pkg_size_item_nm = "max_pkg_size";
sf_global_config_to_string_ex(max_pkg_size_item_nm, output, size);
}
void sf_context_config_to_string(const SFContext *sf_context,
char *output, const int size);
@ -193,6 +305,31 @@ void sf_log_config_ex(const char *other_config);
#define sf_log_config_to_string(log_cfg, caption, output, size) \
sf_log_config_to_string_ex(log_cfg, caption, NULL, output, size)
int sf_get_base_path_from_conf_file(const char *config_filename);
int sf_load_global_base_path(IniFullContext *ini_ctx);
int sf_load_data_path_config_ex(IniFullContext *ini_ctx,
const char *item_name, const char *default_value, string_t *path);
#define sf_load_data_path_config(ini_ctx, path) \
sf_load_data_path_config_ex(ini_ctx, "data_path", "data", path)
static inline void sf_set_global_base_path(const char *base_path)
{
string_t path_string;
FC_SET_STRING(path_string, (char *)base_path);
SF_G_BASE_PATH_LEN = normalize_path(NULL, &path_string,
SF_G_BASE_PATH_STR, sizeof(SF_G_BASE_PATH_STR));
SF_G_BASE_PATH_INITED = true;
}
static inline void sf_set_error_handler(
sf_error_handler_callback error_handler)
{
SF_G_ERROR_HANDLER = error_handler;
}
#ifdef __cplusplus
}
#endif

197
src/sf_iov.c Normal file
View File

@ -0,0 +1,197 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "fastcommon/logger.h"
#include "fastcommon/sockopt.h"
#include "sf_define.h"
#include "sf_iov.h"
int sf_iova_consume(SFDynamicIOVArray *iova, const int consume_len)
{
struct iovec *iob;
struct iovec *end;
int sum_bytes;
int remain_len;
int result;
if (iova->cnt <= 0) {
logError("file: "__FILE__", line: %d, "
"invalid iov count: %d", __LINE__, iova->cnt);
return EINVAL;
}
if ((result=sf_iova_check_alloc(iova)) != 0) {
return result;
}
end = iova->iov + iova->cnt;
iob = iova->iov;
sum_bytes = iob->iov_len;
for (iob=iob + 1; sum_bytes <= consume_len && iob < end; iob++) {
sum_bytes += iob->iov_len;
}
if (sum_bytes < consume_len) {
logError("file: "__FILE__", line: %d, "
"iov length: %d < consume length: %d",
__LINE__, sum_bytes, consume_len);
return EOVERFLOW;
}
iova->cnt -= (iob - iova->iov);
iova->iov = iob;
if (iova->cnt == 0) {
struct iovec *last;
/* update the last iov for next slice */
last = iob - 1;
last->iov_base = (char *)last->iov_base + last->iov_len;
last->iov_len = 0;
} else {
/* adjust the first element */
remain_len = sum_bytes - consume_len;
if (remain_len < iob->iov_len) {
iob->iov_base = (char *)iob->iov_base +
(iob->iov_len - remain_len);
iob->iov_len = remain_len;
}
}
return 0;
}
static inline int iova_slice(SFDynamicIOVArray *iova, const int slice_len)
{
struct iovec *iob;
struct iovec *end;
int sum_bytes;
int exceed_len;
sum_bytes = 0;
end = iova->ptr + iova->input.cnt;
for (iob=iova->iov; iob<end; iob++) {
sum_bytes += iob->iov_len;
if (sum_bytes > slice_len) {
exceed_len = sum_bytes - slice_len;
iob->iov_len -= exceed_len;
break;
} else if (sum_bytes == slice_len) {
break;
}
}
if (iob < end) {
iova->cnt = (iob - iova->iov) + 1;
return 0;
} else {
logError("file: "__FILE__", line: %d, "
"iov remain bytes: %d < slice length: %d",
__LINE__, sum_bytes, slice_len);
iova->cnt = 0;
return EOVERFLOW;
}
}
int sf_iova_first_slice(SFDynamicIOVArray *iova, const int slice_len)
{
int result;
if ((result=sf_iova_check_alloc(iova)) != 0) {
return result;
}
return iova_slice(iova, slice_len);
}
int sf_iova_next_slice(SFDynamicIOVArray *iova,
const int consume_len, const int slice_len)
{
struct iovec *last;
const struct iovec *origin;
int remain_len;
int result;
if ((result=sf_iova_consume(iova, consume_len)) != 0) {
return result;
}
last = iova->iov + iova->cnt - 1;
origin = iova->input.iov + (last - iova->ptr);
remain_len = ((char *)origin->iov_base + origin->iov_len) -
(char *)last->iov_base;
if (last->iov_len != remain_len) {
last->iov_len = remain_len;
if (iova->cnt == 0) {
iova->iov = last;
}
}
return iova_slice(iova, slice_len);
}
int sf_iova_memset_ex(const struct iovec *iov, const int iovcnt,
int c, const int offset, const int length)
{
const struct iovec *iob;
const struct iovec *end;
int sum_bytes;
int remain_len;
int left_bytes;
char *start;
if (length == 0) {
return 0;
}
sum_bytes = 0;
end = iov + iovcnt;
for (iob=iov; iob<end; iob++) {
sum_bytes += iob->iov_len;
if (sum_bytes > offset) {
break;
}
}
if (iob == end) {
logError("file: "__FILE__", line: %d, "
"iov length: %d < (offset: %d + length: %d)",
__LINE__, sum_bytes, offset, length);
return EOVERFLOW;
}
remain_len = sum_bytes - offset;
start = (char *)iob->iov_base + (iob->iov_len - remain_len);
if (length <= remain_len) {
memset(start, c, length);
return 0;
}
memset(start, c, remain_len);
left_bytes = length - remain_len;
while (++iob < end) {
if (left_bytes <= iob->iov_len) {
memset(iob->iov_base, c, left_bytes);
return 0;
}
memset(iob->iov_base, c, iob->iov_len);
left_bytes -= iob->iov_len;
}
logError("file: "__FILE__", line: %d, "
"iov length is too short, overflow bytes: %d",
__LINE__, left_bytes);
return EOVERFLOW;
}

116
src/sf_iov.h Normal file
View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _SF_IOV_H
#define _SF_IOV_H
#include "fastcommon/shared_func.h"
#include "sf_types.h"
#define SF_IOV_FIXED_SIZE 256
typedef struct sf_dynamic_iov_array {
struct iovec holder[SF_IOV_FIXED_SIZE];
struct iovec *ptr;
struct {
const struct iovec *iov;
int cnt;
} input;
struct iovec *iov;
int cnt;
} SFDynamicIOVArray;
#define sf_iova_init(iova, _iov, _cnt) \
(iova).input.iov = _iov; \
(iova).iov = (struct iovec *)_iov; \
(iova).cnt = (iova).input.cnt = _cnt
#define sf_iova_destroy(iova) \
if ((iova).iov != (struct iovec *)(iova).input.iov && \
(iova).ptr != (iova).holder) \
free((iova).ptr)
#ifdef __cplusplus
extern "C" {
#endif
static inline int sf_iova_check_alloc(SFDynamicIOVArray *iova)
{
if (iova->iov == (struct iovec *)iova->input.iov) {
if (iova->input.cnt <= SF_IOV_FIXED_SIZE) {
iova->ptr = iova->holder;
} else {
iova->ptr = fc_malloc(iova->input.cnt *
sizeof(struct iovec));
if (iova->ptr == NULL) {
return ENOMEM;
}
}
memcpy(iova->ptr, iova->input.iov, iova->input.cnt *
sizeof(struct iovec));
iova->iov = iova->ptr;
}
return 0;
}
int sf_iova_consume(SFDynamicIOVArray *iova, const int consume_len);
int sf_iova_first_slice(SFDynamicIOVArray *iova, const int slice_len);
int sf_iova_next_slice(SFDynamicIOVArray *iova,
const int consume_len, const int slice_len);
int sf_iova_memset_ex(const struct iovec *iov, const int iovcnt,
int c, const int offset, const int length);
#define sf_iova_memset(iova, c, offset, length) \
sf_iova_memset_ex((iova).iov, (iova).cnt, c, offset, length)
static inline void sf_iova_memcpy_ex(const struct iovec *iov,
const int iovcnt, const char *buff, const int length)
{
const struct iovec *iob;
const struct iovec *end;
const char *current;
int remain;
int bytes;
current = buff;
remain = length;
end = iov + iovcnt;
for (iob=iov; iob<end; iob++) {
bytes = FC_MIN(remain, iob->iov_len);
memcpy(iob->iov_base, current, bytes);
remain -= bytes;
if (remain == 0) {
break;
}
current += bytes;
}
}
#define sf_iova_memcpy(iova, buff, length) \
sf_iova_memcpy_ex((iova).iov, (iova).cnt, buff, length)
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -22,8 +22,13 @@
#include <stdlib.h>
#include <string.h>
#include "fastcommon/fast_task_queue.h"
#include "fastcommon/ioevent_loop.h"
#include "sf_define.h"
#include "sf_types.h"
#include "sf_global.h"
#define SF_CTX (task->handler->fh->ctx)
#define SF_NET_BUFFER_CFG SF_CTX->net_buffer_cfg
#ifdef __cplusplus
extern "C" {
@ -31,49 +36,63 @@ extern "C" {
void sf_set_parameters_ex(SFContext *sf_context, const int header_size,
sf_set_body_length_callback set_body_length_func,
sf_deal_task_func deal_func, TaskCleanUpCallback cleanup_func,
sf_recv_timeout_callback timeout_callback);
sf_alloc_recv_buffer_callback alloc_recv_buffer_func,
sf_send_done_callback send_done_callback,
sf_deal_task_callback deal_func, TaskCleanUpCallback cleanup_func,
sf_recv_timeout_callback timeout_callback, sf_release_buffer_callback
release_buffer_callback);
#define sf_set_parameters(header_size, set_body_length_func, \
deal_func, cleanup_func, timeout_callback) \
alloc_recv_buffer_func, deal_func, cleanup_func, timeout_callback) \
sf_set_parameters_ex(&g_sf_context, header_size, \
set_body_length_func, deal_func, \
cleanup_func, timeout_callback)
set_body_length_func, alloc_recv_buffer_func, \
deal_func, cleanup_func, timeout_callback, NULL)
static inline void sf_set_deal_task_func_ex(SFContext *sf_context,
sf_deal_task_func deal_func)
static inline void sf_set_deal_task_callback_ex(SFContext *sf_context,
sf_deal_task_callback deal_func)
{
sf_context->deal_task = deal_func;
sf_context->callbacks.deal_task = deal_func;
}
#define sf_set_deal_task_func(deal_func) \
sf_set_deal_task_func_ex(&g_sf_context, deal_func)
#define sf_set_deal_task_callback(deal_func) \
sf_set_deal_task_callback_ex(&g_sf_context, deal_func)
static inline void sf_set_remove_from_ready_list_ex(SFContext *sf_context,
const bool enabled)
static inline void sf_set_connect_done_callback_ex(SFContext *sf_context,
sf_connect_done_callback done_callback)
{
sf_context->remove_from_ready_list = enabled;
sf_context->callbacks.connect_done = done_callback;
}
#define sf_set_remove_from_ready_list(enabled) \
sf_set_remove_from_ready_list_ex(&g_sf_context, enabled);
#define sf_set_connect_done_callback(done_callback) \
sf_set_connect_done_callback_ex(&g_sf_context, done_callback)
static inline TaskCleanUpCallback sf_get_task_cleanup_func_ex(
static inline TaskCleanUpCallback sf_get_task_cleanup_callback_ex(
SFContext *sf_context)
{
return sf_context->task_cleanup_func;
return sf_context->callbacks.task_cleanup;
}
#define sf_get_task_cleanup_func() \
sf_get_task_cleanup_func_ex(&g_sf_context)
#define sf_get_task_cleanup_callback() \
sf_get_task_cleanup_callback_ex(&g_sf_context)
#define sf_nio_task_is_idle(task) \
(task->offset == 0 && task->length == 0)
#define sf_nio_task_send_done(task) \
(task->send.ptr->offset == 0 && task->send.ptr->length == 0)
void sf_recv_notify_read(int sock, short event, void *arg);
static inline void sf_nio_reset_task_length(struct fast_task_info *task)
{
task->send.ptr->length = 0;
task->send.ptr->offset = 0;
if (task->recv.ptr != task->send.ptr) {
task->recv.ptr->length = 0;
task->recv.ptr->offset = 0;
}
}
void sf_socket_close_connection(struct fast_task_info *task);
void sf_recv_notify_read(int sock, const int event, void *arg);
int sf_send_add_event(struct fast_task_info *task);
int sf_client_sock_write(int sock, short event, void *arg);
int sf_client_sock_read(int sock, short event, void *arg);
void sf_task_finish_clean_up(struct fast_task_info *task);
@ -86,6 +105,42 @@ void sf_task_switch_thread(struct fast_task_info *task,
void sf_task_detach_thread(struct fast_task_info *task);
static inline int sf_set_body_length(struct fast_task_info *task)
{
if (SF_CTX->callbacks.set_body_length(task) != 0) {
return -1;
}
if (task->recv.ptr->length < 0) {
logError("file: "__FILE__", line: %d, "
"client ip: %s, pkg length: %d < 0",
__LINE__, task->client_ip,
task->recv.ptr->length);
return -1;
}
task->recv.ptr->length += SF_CTX->header_size;
if (task->recv.ptr->length > SF_NET_BUFFER_CFG.max_pkg_size) {
logError("file: "__FILE__", line: %d, "
"client ip: %s, pkg length: %d > "
"max pkg size: %d", __LINE__,
task->client_ip, task->recv.ptr->length,
SF_NET_BUFFER_CFG.max_pkg_size);
return -1;
}
return 0;
}
int sf_socket_async_connect_server(struct fast_task_info *task);
int sf_socket_async_connect_check(struct fast_task_info *task);
ssize_t sf_socket_send_data(struct fast_task_info *task,
SFCommAction *action, bool *send_done);
ssize_t sf_socket_recv_data(struct fast_task_info *task,
const bool call_post_recv, SFCommAction *action);
int sf_rdma_busy_polling_callback(struct nio_thread_data *thread_data);
static inline int sf_nio_forward_request(struct fast_task_info *task,
const int new_thread_index)
{
@ -93,11 +148,18 @@ static inline int sf_nio_forward_request(struct fast_task_info *task,
return sf_nio_notify(task, SF_NIO_STAGE_FORWARDED);
}
static inline bool sf_client_sock_in_read_stage(struct fast_task_info *task)
static inline void sf_nio_add_to_deleted_list(struct nio_thread_data
*thread_data, struct fast_task_info *task)
{
return (task->event.callback == (IOEventCallback)sf_client_sock_read);
if (task->thread_data == thread_data) {
ioevent_add_to_deleted_list(task);
} else {
sf_nio_notify(task, SF_NIO_STAGE_CLOSE);
}
}
bool sf_client_sock_in_read_stage(struct fast_task_info *task);
#ifdef __cplusplus
}
#endif

260
src/sf_ordered_writer.c Normal file
View File

@ -0,0 +1,260 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <limits.h>
#include <fcntl.h>
#include <pthread.h>
#include "fastcommon/logger.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/shared_func.h"
#include "fastcommon/pthread_func.h"
#include "fastcommon/sched_thread.h"
#include "sf_global.h"
#include "sf_func.h"
#include "sf_ordered_writer.h"
#define deal_binlog_one_record(writer, wb) \
sf_file_writer_deal_versioned_buffer(&(writer)->fw, &wb->bf, wb->version)
static inline int flush_writer_files(SFOrderedWriterInfo *writer)
{
int result;
if ((result=sf_file_writer_flush(&writer->fw)) != 0) {
return result;
}
if (writer->fw.flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
writer->fw.last_versions.done = writer->fw.last_versions.pending;
}
return 0;
}
static inline int deal_versioned_binlog(SFOrderedWriterContext *context)
{
SFOrderedWriterBuffer *wb;
int result;
while (1) {
if ((wb=sorted_queue_pop(&context->thread.queues.buffer,
&context->thread.waiting)) != NULL)
{
context->writer.fw.total_count++;
result = deal_binlog_one_record(&context->writer, wb);
fast_mblock_free_object(&context->thread.allocators.buffer, wb);
return result;
}
}
return 0;
}
static int deal_version_chain(SFOrderedWriterContext *context,
struct fc_queue_info *qinfo)
{
int result;
SFWriterVersionEntry *current_ver;
struct fast_mblock_node *prev_node;
struct fast_mblock_node *curr_node;
struct fast_mblock_chain node_chain;
current_ver = qinfo->head;
prev_node = NULL;
do {
curr_node = fast_mblock_to_node_ptr(current_ver);
if (prev_node != NULL) {
prev_node->next = curr_node;
}
prev_node = curr_node;
context->thread.waiting.version = current_ver->version;
if ((result=deal_versioned_binlog(context)) != 0) {
return result;
}
} while ((current_ver=current_ver->next) != NULL);
node_chain.head = fast_mblock_to_node_ptr(qinfo->head);
node_chain.tail = prev_node;
prev_node->next = NULL;
fast_mblock_batch_free(&context->thread.allocators.version, &node_chain);
return flush_writer_files(&context->writer);
}
void sf_ordered_writer_finish(SFOrderedWriterContext *ctx)
{
int count;
if (ctx->writer.fw.file.name.str != NULL) {
fc_queue_terminate(&ctx->thread.queues.version);
count = 0;
while (ctx->thread.running && ++count < 300) {
fc_sleep_ms(10);
}
if (ctx->thread.running) {
logWarning("file: "__FILE__", line: %d, "
"%s binlog write thread still running, exit anyway!",
__LINE__, ctx->writer.fw.cfg.subdir_name);
}
free(ctx->writer.fw.file.name.str);
ctx->writer.fw.file.name.str = NULL;
}
if (ctx->writer.fw.file.fd >= 0) {
close(ctx->writer.fw.file.fd);
ctx->writer.fw.file.fd = -1;
}
}
static void *binlog_writer_func(void *arg)
{
SFOrderedWriterContext *context;
SFOrderedWriterThread *thread;
struct fc_queue_info qinfo;
context = (SFOrderedWriterContext *)arg;
thread = &context->thread;
#ifdef OS_LINUX
{
char thread_name[64];
fc_combine_two_strings(thread->name, "writer", '-', thread_name);
prctl(PR_SET_NAME, thread_name);
}
#endif
thread->running = true;
while (SF_G_CONTINUE_FLAG) {
fc_queue_pop_to_queue(&thread->queues.version, &qinfo);
if (qinfo.head== NULL) {
continue;
}
if (deal_version_chain(context, &qinfo) != 0) {
logCrit("file: "__FILE__", line: %d, "
"deal_version_chain fail, "
"program exit!", __LINE__);
sf_terminate_myself();
}
}
thread->running = false;
return NULL;
}
static int binlog_wbuffer_alloc_init(void *element, void *args)
{
SFOrderedWriterBuffer *wbuffer;
SFOrderedWriterInfo *writer;
wbuffer = (SFOrderedWriterBuffer *)element;
writer = (SFOrderedWriterInfo *)args;
wbuffer->bf.alloc_size = writer->fw.cfg.max_record_size;
wbuffer->bf.buff = (char *)(wbuffer + 1);
return 0;
}
static int push_compare_buffer_version(const SFOrderedWriterBuffer *entry1,
const SFOrderedWriterBuffer *entry2)
{
return fc_compare_int64(entry1->version, entry2->version);
}
static int pop_compare_buffer_version(const SFOrderedWriterBuffer *entry,
const SFOrderedWriterBuffer *less_equal, void *arg)
{
return fc_compare_int64(entry->version, less_equal->version);
}
static int sf_ordered_writer_init_thread(SFOrderedWriterContext *context,
const char *name, const int max_record_size)
{
const int alloc_elements_once = 1024;
SFOrderedWriterThread *thread;
SFOrderedWriterInfo *writer;
int element_size;
pthread_t tid;
int result;
thread = &context->thread;
writer = &context->writer;
fc_safe_strcpy(thread->name, name);
writer->fw.cfg.max_record_size = max_record_size;
writer->thread = thread;
if ((result=fast_mblock_init_ex1(&thread->allocators.version,
"writer-ver-info", sizeof(SFWriterVersionEntry),
8 * 1024, 0, NULL, NULL, true)) != 0)
{
return result;
}
element_size = sizeof(SFOrderedWriterBuffer) + max_record_size;
if ((result=fast_mblock_init_ex1(&thread->allocators.buffer,
"sorted-wbuffer", element_size, alloc_elements_once,
0, binlog_wbuffer_alloc_init, writer, true)) != 0)
{
return result;
}
if ((result=fc_queue_init(&thread->queues.version, (unsigned long)
(&((SFWriterVersionEntry *)NULL)->next))) != 0)
{
return result;
}
if ((result=sorted_queue_init(&thread->queues.buffer, (unsigned long)
(&((SFOrderedWriterBuffer *)NULL)->dlink),
(int (*)(const void *, const void *))
push_compare_buffer_version,
(int (*)(const void *, const void *, void *arg))
pop_compare_buffer_version, NULL)) != 0)
{
return result;
}
return fc_create_thread(&tid, binlog_writer_func,
context, SF_G_THREAD_STACK_SIZE);
}
int sf_ordered_writer_init_ex(SFOrderedWriterContext *context,
const char *data_path, const char *subdir_name,
const char *file_prefix, const int buffer_size,
const int max_record_size, const int64_t file_rotate_size,
const bool call_fsync)
{
int result;
if ((result=sf_file_writer_init(&context->writer.fw, data_path,
subdir_name, file_prefix, max_record_size,
buffer_size, file_rotate_size, call_fsync)) != 0)
{
return result;
}
return sf_ordered_writer_init_thread(context,
subdir_name, max_record_size);
}

142
src/sf_ordered_writer.h Normal file
View File

@ -0,0 +1,142 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//sf_ordered_writer.h
#ifndef _SF_ORDERED_WRITER_H_
#define _SF_ORDERED_WRITER_H_
#include "fastcommon/sorted_queue.h"
#include "sf_file_writer.h"
typedef struct sf_writer_version_entry {
int64_t version;
struct sf_writer_version_entry *next;
} SFWriterVersionEntry;
typedef struct sf_ordered_writer_buffer {
int64_t version;
BufferInfo bf;
struct fc_list_head dlink;
} SFOrderedWriterBuffer;
typedef struct sf_orderd_writer_thread {
struct {
struct fast_mblock_man version;
struct fast_mblock_man buffer;
} allocators;
struct {
struct fc_queue version;
struct sorted_queue buffer;
} queues;
char name[64];
volatile bool running;
SFOrderedWriterBuffer waiting; //for less equal than object
} SFOrderedWriterThread;
typedef struct sf_ordered_writer_info {
SFFileWriterInfo fw;
SFBinlogBuffer binlog_buffer;
SFOrderedWriterThread *thread;
} SFOrderedWriterInfo;
typedef struct sf_ordered_writer_context {
SFOrderedWriterInfo writer;
SFOrderedWriterThread thread;
} SFOrderedWriterContext;
#ifdef __cplusplus
extern "C" {
#endif
int sf_ordered_writer_init_ex(SFOrderedWriterContext *context,
const char *data_path, const char *subdir_name,
const char *file_prefix, const int buffer_size,
const int max_record_size, const int64_t file_rotate_size,
const bool call_fsync);
#define sf_ordered_writer_init(context, data_path, \
subdir_name, buffer_size, max_record_size) \
sf_ordered_writer_init_ex(context, data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, buffer_size, max_record_size, \
SF_BINLOG_DEFAULT_ROTATE_SIZE, true)
#define sf_ordered_writer_set_flags(ctx, flags) \
sf_file_writer_set_flags(&(ctx)->writer.fw, flags)
#define sf_ordered_writer_get_last_version(ctx) \
sf_ordered_writer_get_last_version(&(ctx)->writer.fw)
void sf_ordered_writer_finish(SFOrderedWriterContext *ctx);
#define sf_ordered_writer_get_current_index(ctx) \
sf_file_writer_get_current_index(&(ctx)->writer.fw)
#define sf_ordered_writer_get_current_position(ctx, position) \
sf_file_writer_get_current_position(&(ctx)->writer.fw, position)
static inline int sf_ordered_writer_alloc_versions(
SFOrderedWriterContext *ctx, const int count,
struct fc_queue_info *chain)
{
return fc_queue_alloc_chain(&ctx->thread.queues.version,
&ctx->thread.allocators.version, count, chain);
}
static inline void sf_ordered_writer_push_versions(
SFOrderedWriterContext *ctx, struct fc_queue_info *chain)
{
fc_queue_push_queue_to_tail(&ctx->thread.queues.version, chain);
}
static inline SFOrderedWriterBuffer *sf_ordered_writer_alloc_buffer(
SFOrderedWriterContext *ctx, const int64_t version)
{
SFOrderedWriterBuffer *buffer;
buffer = (SFOrderedWriterBuffer *)fast_mblock_alloc_object(
&ctx->thread.allocators.buffer);
if (buffer != NULL) {
buffer->version = version;
}
return buffer;
}
#define sf_ordered_writer_get_filepath(data_path, subdir_name, filename, size) \
sf_file_writer_get_filepath(data_path, subdir_name, filename, size)
#define sf_ordered_writer_get_filename(data_path, \
subdir_name, binlog_index, filename, size) \
sf_file_writer_get_filename(data_path, subdir_name, \
binlog_index, filename, size)
#define sf_ordered_writer_set_binlog_index(ctx, binlog_index) \
sf_file_writer_set_binlog_index(&(ctx)->writer.fw, binlog_index)
#define sf_ordered_writer_push_to_thread_queue(ctx, buffer) \
sorted_queue_push(&(ctx)->thread.queues.buffer, buffer)
static inline void sf_ordered_writer_push_to_queue(
SFOrderedWriterContext *ctx,
SFOrderedWriterBuffer *buffer)
{
sorted_queue_push(&ctx->thread.queues.buffer, buffer);
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -14,27 +14,36 @@
*/
#include <errno.h>
#include "fastcommon/shared_func.h"
#include "sf_util.h"
#include "sf_nio.h"
#include "sf_proto.h"
static SFHandlerContext sf_handler_ctx = {NULL, {NULL, NULL}};
static int64_t log_slower_than_us = 0;
#define GET_CMD_CAPTION(cmd) sf_handler_ctx.callbacks.get_cmd_caption(cmd)
#define GET_CMD_LOG_LEVEL(cmd) sf_handler_ctx.callbacks.get_cmd_log_level(cmd)
int sf_proto_set_body_length(struct fast_task_info *task)
{
SFCommonProtoHeader *header;
char formatted_ip[FORMATTED_IP_SIZE];
header = (SFCommonProtoHeader *)task->data;
header = (SFCommonProtoHeader *)task->recv.ptr->data;
if (!SF_PROTO_CHECK_MAGIC(header->magic)) {
format_ip_address(task->client_ip, formatted_ip);
logError("file: "__FILE__", line: %d, "
"peer %s:%u, magic "SF_PROTO_MAGIC_FORMAT
" is invalid, expect: "SF_PROTO_MAGIC_FORMAT,
__LINE__, task->client_ip, task->port,
"%s peer %s:%u, magic "SF_PROTO_MAGIC_FORMAT" is invalid, "
"expect: "SF_PROTO_MAGIC_FORMAT", cmd: %d, body length: %d",
__LINE__, (task->handler != NULL ? task->handler->fh->ctx->
name : ""), formatted_ip, task->port,
SF_PROTO_MAGIC_PARAMS(header->magic),
SF_PROTO_MAGIC_EXPECT_PARAMS);
SF_PROTO_MAGIC_EXPECT_PARAMS, header->cmd,
buff2int(header->body_len));
return EINVAL;
}
task->length = buff2int(header->body_len); //set body length
task->recv.ptr->length = buff2int(header->body_len); //set body length
return 0;
}
@ -63,8 +72,14 @@ int sf_check_response(ConnectionInfo *conn, SFResponseInfo *response,
response->error.length = response->header.body_len;
}
if ((result=tcprecvdata_nb_ex(conn->sock, response->error.message,
response->error.length, network_timeout, &recv_bytes)) == 0)
if (conn->comm_type == fc_comm_type_rdma) {
memcpy(response->error.message, G_RDMA_CONNECTION_CALLBACKS.
get_recv_buffer(conn)->buff + sizeof(SFCommonProtoHeader),
response->error.length);
response->error.message[response->error.length] = '\0';
} else if ((result=tcprecvdata_nb_ex(conn->sock, response->
error.message, response->error.length,
network_timeout, &recv_bytes)) == 0)
{
response->error.message[response->error.length] = '\0';
} else {
@ -85,32 +100,66 @@ int sf_check_response(ConnectionInfo *conn, SFResponseInfo *response,
return response->header.status;
}
static inline int sf_recv_response_header(ConnectionInfo *conn,
SFResponseInfo *response, const int network_timeout)
{
int result;
BufferInfo *buffer;
SFCommonProtoHeader header_proto;
if (conn->comm_type == fc_comm_type_rdma) {
buffer = G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn);
if (buffer->length < sizeof(SFCommonProtoHeader)) {
response->error.length = sprintf(response->error.message,
"recv pkg length: %d < header size: %d",
buffer->length, (int)sizeof(SFCommonProtoHeader));
return EINVAL;
}
if ((result=sf_proto_parse_header((SFCommonProtoHeader *)
buffer->buff, response)) != 0)
{
return result;
}
if (buffer->length != (sizeof(SFCommonProtoHeader) +
response->header.body_len))
{
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"recv package length: %d != calculate: %d",
buffer->length, (int)(sizeof(SFCommonProtoHeader) +
response->header.body_len));
return EINVAL;
}
return 0;
} else {
if ((result=tcprecvdata_nb(conn->sock, &header_proto,
sizeof(SFCommonProtoHeader), network_timeout)) != 0)
{
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"recv data fail, errno: %d, error info: %s",
result, STRERROR(result));
return result;
}
return sf_proto_parse_header(&header_proto, response);
}
}
int sf_send_and_recv_response_header(ConnectionInfo *conn, char *data,
const int len, SFResponseInfo *response, const int network_timeout)
{
int result;
SFCommonProtoHeader header_proto;
if ((result=tcpsenddata_nb(conn->sock, data, len, network_timeout)) != 0) {
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"send data fail, errno: %d, error info: %s",
result, STRERROR(result));
return result;
}
if ((result=tcprecvdata_nb(conn->sock, &header_proto,
sizeof(SFCommonProtoHeader), network_timeout)) != 0)
if ((result=sf_proto_send_buf1(conn, data, len,
response, network_timeout)) != 0)
{
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"recv data fail, errno: %d, error info: %s",
result, STRERROR(result));
return result;
}
sf_proto_extract_header(&header_proto, &response->header);
return 0;
return sf_recv_response_header(conn, response, network_timeout);
}
int sf_send_and_recv_response_ex(ConnectionInfo *conn, char *send_data,
@ -169,7 +218,10 @@ int sf_send_and_recv_response_ex(ConnectionInfo *conn, char *send_data,
return 0;
}
if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, response->
if (conn->comm_type == fc_comm_type_rdma) {
memcpy(recv_data, G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn)->
buff + sizeof(SFCommonProtoHeader), response->header.body_len);
} else if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, response->
header.body_len, network_timeout, &recv_bytes)) != 0)
{
response->error.length = snprintf(response->error.message,
@ -209,7 +261,11 @@ int sf_send_and_recv_response_ex1(ConnectionInfo *conn, char *send_data,
return EOVERFLOW;
}
if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, response->
if (conn->comm_type == fc_comm_type_rdma) {
memcpy(recv_data, G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn)->
buff + sizeof(SFCommonProtoHeader), response->header.body_len);
*body_len = response->header.body_len;
} else if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, response->
header.body_len, network_timeout, body_len)) != 0)
{
response->error.length = snprintf(response->error.message,
@ -227,19 +283,12 @@ int sf_recv_response(ConnectionInfo *conn, SFResponseInfo *response,
{
int result;
int recv_bytes;
SFCommonProtoHeader header_proto;
if ((result=tcprecvdata_nb(conn->sock, &header_proto,
sizeof(SFCommonProtoHeader), network_timeout)) != 0)
if ((result=sf_recv_response_header(conn, response,
network_timeout)) != 0)
{
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"recv data fail, errno: %d, error info: %s",
result, STRERROR(result));
return result;
}
sf_proto_extract_header(&header_proto, &response->header);
if ((result=sf_check_response(conn, response, network_timeout,
expect_cmd)) != 0)
{
@ -257,8 +306,11 @@ int sf_recv_response(ConnectionInfo *conn, SFResponseInfo *response,
return 0;
}
if ((result=tcprecvdata_nb_ex(conn->sock, recv_data,
expect_body_len, network_timeout, &recv_bytes)) != 0)
if (conn->comm_type == fc_comm_type_rdma) {
memcpy(recv_data, G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn)->
buff + sizeof(SFCommonProtoHeader), response->header.body_len);
} else if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, expect_body_len,
network_timeout, &recv_bytes)) != 0)
{
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
@ -271,6 +323,94 @@ int sf_recv_response(ConnectionInfo *conn, SFResponseInfo *response,
return result;
}
int sf_recv_vary_response(ConnectionInfo *conn, SFResponseInfo *response,
const int network_timeout, const unsigned char expect_cmd,
SFProtoRecvBuffer *buffer, const int min_body_len)
{
int result;
int recv_bytes;
if ((result=sf_recv_response_header(conn, response,
network_timeout)) != 0)
{
return result;
}
if ((result=sf_check_response(conn, response, network_timeout,
expect_cmd)) != 0)
{
return result;
}
if (response->header.body_len < min_body_len) {
response->error.length = sprintf(response->error.message,
"response body length: %d < %d",
response->header.body_len, min_body_len);
return EINVAL;
}
if (response->header.body_len <= buffer->alloc_size) {
if (response->header.body_len == 0) {
return 0;
}
} else {
int alloc_size;
char *buff;
if (buffer->alloc_size > 0) {
alloc_size = 2 * buffer->alloc_size;
} else {
alloc_size = 64 * 1024;
}
while (alloc_size < response->header.body_len) {
alloc_size *= 2;
}
buff = (char *)fc_malloc(alloc_size);
if (buff == NULL) {
return ENOMEM;
}
if (buffer->buff != buffer->fixed && buffer->buff != NULL) {
free(buffer->buff);
}
buffer->buff = buff;
buffer->alloc_size = alloc_size;
}
if (conn->comm_type == fc_comm_type_rdma) {
memcpy(buffer->buff, G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn)->
buff + sizeof(SFCommonProtoHeader), response->header.body_len);
} else if ((result=tcprecvdata_nb_ex(conn->sock, buffer->buff, response->
header.body_len, network_timeout, &recv_bytes)) != 0)
{
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"recv body fail, recv bytes: %d, expect body length: %d, "
"errno: %d, error info: %s", recv_bytes,
response->header.body_len,
result, STRERROR(result));
}
return result;
}
int sf_send_and_recv_vary_response(ConnectionInfo *conn,
char *send_data, const int send_len, SFResponseInfo *response,
const int network_timeout, const unsigned char expect_cmd,
SFProtoRecvBuffer *buffer, const int min_body_len)
{
int result;
if ((result=sf_proto_send_buf1(conn, send_data, send_len,
response, network_timeout)) != 0)
{
return result;
}
return sf_recv_vary_response(conn, response, network_timeout,
expect_cmd, buffer, min_body_len);
}
const char *sf_get_cmd_caption(const int cmd)
{
switch (cmd) {
@ -296,6 +436,18 @@ const char *sf_get_cmd_caption(const int cmd)
return "REPORT_REQ_RECEIPT_REQ";
case SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_RESP:
return "REPORT_REQ_RECEIPT_RESP";
case SF_SERVICE_PROTO_GET_GROUP_SERVERS_REQ:
return "GET_GROUP_SERVERS_REQ";
case SF_SERVICE_PROTO_GET_GROUP_SERVERS_RESP:
return "GET_GROUP_SERVERS_RESP";
case SF_SERVICE_PROTO_GET_LEADER_REQ:
return "GET_LEADER_REQ";
case SF_SERVICE_PROTO_GET_LEADER_RESP:
return "GET_LEADER_RESP";
case SF_CLUSTER_PROTO_GET_SERVER_STATUS_REQ:
return "GET_SERVER_STATUS_REQ";
case SF_CLUSTER_PROTO_GET_SERVER_STATUS_RESP:
return "GET_SERVER_STATUS_RESP";
default:
return "UNKOWN";
}
@ -304,14 +456,17 @@ const char *sf_get_cmd_caption(const int cmd)
int sf_proto_deal_ack(struct fast_task_info *task,
SFRequestInfo *request, SFResponseInfo *response)
{
char formatted_ip[FORMATTED_IP_SIZE];
if (request->header.status != 0) {
if (request->header.body_len > 0) {
int remain_size;
int len;
format_ip_address(task->client_ip, formatted_ip);
response->error.length = sprintf(response->error.message,
"message from peer %s:%u => ",
task->client_ip, task->port);
formatted_ip, task->port);
remain_size = sizeof(response->error.message) -
response->error.length;
if (request->header.body_len >= remain_size) {
@ -339,7 +494,8 @@ int sf_proto_deal_ack(struct fast_task_info *task,
}
int sf_proto_rebind_idempotency_channel(ConnectionInfo *conn,
const uint32_t channel_id, const int key, const int network_timeout)
const char *service_name, const uint32_t channel_id,
const int key, const int network_timeout)
{
char out_buff[sizeof(SFCommonProtoHeader) +
sizeof(SFProtoRebindChannelReq)];
@ -359,8 +515,215 @@ int sf_proto_rebind_idempotency_channel(ConnectionInfo *conn,
sizeof(out_buff), &response, network_timeout,
SF_SERVICE_PROTO_REBIND_CHANNEL_RESP)) != 0)
{
sf_log_network_error(&response, conn, result);
sf_log_network_error(&response, conn, service_name, result);
}
return result;
}
int sf_proto_get_group_servers(ConnectionInfo *conn,
const char *service_name, const int network_timeout,
const int group_id, SFGroupServerArray *sarray)
{
char out_buff[sizeof(SFCommonProtoHeader) +
sizeof(SFProtoGetGroupServersReq)];
char in_buff[1024];
char formatted_ip[FORMATTED_IP_SIZE];
SFCommonProtoHeader *header;
SFProtoGetGroupServersReq *req;
SFProtoGetGroupServersRespBodyHeader *body_header;
SFProtoGetGroupServersRespBodyPart *body_part;
SFGroupServerInfo *server;
SFGroupServerInfo *end;
SFResponseInfo response;
int result;
int body_len;
int count;
header = (SFCommonProtoHeader *)out_buff;
req = (SFProtoGetGroupServersReq *)(header + 1);
int2buff(group_id, req->group_id);
SF_PROTO_SET_HEADER(header, SF_SERVICE_PROTO_GET_GROUP_SERVERS_REQ,
sizeof(SFProtoGetGroupServersReq));
response.error.length = 0;
if ((result=sf_send_and_recv_response_ex1(conn, out_buff,
sizeof(out_buff), &response, network_timeout,
SF_SERVICE_PROTO_GET_GROUP_SERVERS_RESP, in_buff,
sizeof(in_buff), &body_len)) != 0)
{
sf_log_network_error(&response, conn, service_name, result);
return result;
}
if (body_len < sizeof(SFProtoGetGroupServersRespBodyHeader)) {
format_ip_address(conn->ip_addr, formatted_ip);
logError("file: "__FILE__", line: %d, "
"server %s:%u response body length: %d < %d",
__LINE__, formatted_ip, conn->port, body_len,
(int)sizeof(SFProtoGetGroupServersRespBodyHeader));
return EINVAL;
}
body_header = (SFProtoGetGroupServersRespBodyHeader *)in_buff;
count = buff2int(body_header->count);
if (count <= 0) {
format_ip_address(conn->ip_addr, formatted_ip);
logError("file: "__FILE__", line: %d, "
"server %s:%u response server count: %d <= 0",
__LINE__, formatted_ip, conn->port, count);
return EINVAL;
}
if (count > sarray->alloc) {
format_ip_address(conn->ip_addr, formatted_ip);
logError("file: "__FILE__", line: %d, "
"server %s:%u response server count: %d is too large, "
"exceeds %d", __LINE__, formatted_ip, conn->port,
count, sarray->alloc);
return EOVERFLOW;
}
sarray->count = count;
body_part = (SFProtoGetGroupServersRespBodyPart *)(body_header + 1);
end = sarray->servers + sarray->count;
for (server=sarray->servers; server<end; server++, body_part++) {
server->id = buff2int(body_part->server_id);
server->is_master = body_part->is_master;
server->is_active = body_part->is_active;
}
return 0;
}
int sf_proto_get_leader(ConnectionInfo *conn, const char *service_name,
const int network_timeout, SFClientServerEntry *leader)
{
int result;
SFCommonProtoHeader *header;
SFResponseInfo response;
SFProtoGetServerResp server_resp;
char out_buff[sizeof(SFCommonProtoHeader)];
header = (SFCommonProtoHeader *)out_buff;
SF_PROTO_SET_HEADER(header, SF_SERVICE_PROTO_GET_LEADER_REQ,
sizeof(out_buff) - sizeof(SFCommonProtoHeader));
if ((result=sf_send_and_recv_response(conn, out_buff,
sizeof(out_buff), &response, network_timeout,
SF_SERVICE_PROTO_GET_LEADER_RESP, (char *)&server_resp,
sizeof(SFProtoGetServerResp))) != 0)
{
sf_log_network_error(&response, conn, service_name, result);
} else {
leader->server_id = buff2int(server_resp.server_id);
memcpy(leader->conn.ip_addr, server_resp.ip_addr, IP_ADDRESS_SIZE);
*(leader->conn.ip_addr + IP_ADDRESS_SIZE - 1) = '\0';
leader->conn.port = buff2short(server_resp.port);
leader->conn.comm_type = conn->comm_type;
}
return result;
}
void sf_proto_set_handler_context(const SFHandlerContext *ctx)
{
sf_handler_ctx = *ctx;
log_slower_than_us = ctx->slow_log->cfg.log_slower_than_ms * 1000;
}
int sf_proto_deal_task_done(struct fast_task_info *task,
const char *service_name, SFCommonTaskContext *ctx)
{
SFCommonProtoHeader *proto_header;
int status;
int r;
int64_t time_used;
int log_level;
char formatted_ip[FORMATTED_IP_SIZE];
char time_buff[32];
if (ctx->log_level != LOG_NOTHING && ctx->response.error.length > 0) {
log_it_ex(&g_log_context, ctx->log_level,
"file: "__FILE__", line: %d, %s "
"peer %s:%u, cmd: %d (%s), req body length: %d, "
"resp status: %d, %s", __LINE__, service_name,
format_ip_address(task->client_ip, formatted_ip),
task->port, ctx->request.header.cmd,
GET_CMD_CAPTION(ctx->request.header.cmd),
ctx->request.header.body_len, ctx->response.header.status,
ctx->response.error.message);
}
if (!ctx->need_response) {
if (sf_handler_ctx.callbacks.get_cmd_log_level != NULL) {
time_used = get_current_time_us() - ctx->req_start_time;
log_level = GET_CMD_LOG_LEVEL(ctx->request.header.cmd);
log_it_ex(&g_log_context, log_level, "file: "__FILE__", line: %d, "
"%s client %s:%u, req cmd: %d (%s), req body_len: %d, "
"resp status: %d, time used: %s us", __LINE__, service_name,
format_ip_address(task->client_ip, formatted_ip),
task->port, ctx->request.header.cmd,
GET_CMD_CAPTION(ctx->request.header.cmd),
ctx->request.header.body_len, ctx->response.header.status,
long_to_comma_str(time_used, time_buff));
}
if (ctx->response.header.status == 0) {
return sf_set_read_event(task);
} else {
return FC_NEGATIVE(ctx->response.header.status);
}
}
proto_header = (SFCommonProtoHeader *)task->send.ptr->data;
if (!ctx->response_done) {
ctx->response.header.body_len = ctx->response.error.length;
if (ctx->response.error.length > 0) {
memcpy(task->send.ptr->data + sizeof(SFCommonProtoHeader),
ctx->response.error.message, ctx->response.error.length);
}
}
status = sf_unify_errno(FC_ABS(ctx->response.header.status));
short2buff(status, proto_header->status);
short2buff(ctx->response.header.flags, proto_header->flags);
proto_header->cmd = ctx->response.header.cmd;
int2buff(ctx->response.header.body_len, proto_header->body_len);
task->send.ptr->length = sizeof(SFCommonProtoHeader) +
ctx->response.header.body_len;
r = sf_send_add_event(task);
time_used = get_current_time_us() - ctx->req_start_time;
if ((sf_handler_ctx.slow_log != NULL) && (sf_handler_ctx.slow_log->
cfg.enabled && time_used > log_slower_than_us))
{
char buff[256];
int blen;
blen = sprintf(buff, "timed used: %s us, %s client %s:%u, "
"req cmd: %d (%s), req body len: %d, resp cmd: %d (%s), "
"status: %d, resp body len: %d", long_to_comma_str(time_used,
time_buff), service_name, format_ip_address(task->
client_ip, formatted_ip), task->port, ctx->request.
header.cmd, GET_CMD_CAPTION(ctx->request.header.cmd),
ctx->request.header.body_len, ctx->response.header.cmd,
GET_CMD_CAPTION(ctx->response.header.cmd),
ctx->response.header.status, ctx->response.header.body_len);
log_it_ex2(&sf_handler_ctx.slow_log->ctx, NULL, buff, blen, false, true);
}
if (sf_handler_ctx.callbacks.get_cmd_log_level != NULL) {
log_level = GET_CMD_LOG_LEVEL(ctx->request.header.cmd);
log_it_ex(&g_log_context, log_level, "file: "__FILE__", line: %d, "
"%s client %s:%u, req cmd: %d (%s), req body_len: %d, "
"resp cmd: %d (%s), status: %d, resp body_len: %d, "
"time used: %s us", __LINE__, service_name,
format_ip_address(task->client_ip, formatted_ip),
task->port, ctx->request.header.cmd,
GET_CMD_CAPTION(ctx->request.header.cmd),
ctx->request.header.body_len, ctx->response.header.cmd,
GET_CMD_CAPTION(ctx->response.header.cmd),
ctx->response.header.status, ctx->response.header.body_len,
long_to_comma_str(time_used, time_buff));
}
return r == 0 ? ctx->response.header.status : r;
}

View File

@ -27,6 +27,12 @@
#include "sf_types.h"
#include "sf_util.h"
//for connection manager
#define SF_SERVICE_PROTO_GET_GROUP_SERVERS_REQ 111
#define SF_SERVICE_PROTO_GET_GROUP_SERVERS_RESP 112
#define SF_SERVICE_PROTO_GET_LEADER_REQ 113
#define SF_SERVICE_PROTO_GET_LEADER_RESP 114
#define SF_PROTO_ACK 116
#define SF_PROTO_ACTIVE_TEST_REQ 117
@ -42,6 +48,10 @@
#define SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_REQ 125
#define SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_RESP 126
#define SF_CLUSTER_PROTO_GET_SERVER_STATUS_REQ 201
#define SF_CLUSTER_PROTO_GET_SERVER_STATUS_RESP 202
#define SF_PROTO_MAGIC_CHAR '@'
#define SF_PROTO_SET_MAGIC(m) \
m[0] = m[1] = m[2] = m[3] = SF_PROTO_MAGIC_CHAR
@ -58,19 +68,17 @@
#define SF_PROTO_MAGIC_PARAMS(m) \
m[0], m[1], m[2], m[3]
#define SF_PROTO_SET_HEADER(header, _cmd, _body_len) \
#define SF_PROTO_SET_HEADER_EX(header, _cmd, _flags, _body_len) \
do { \
SF_PROTO_SET_MAGIC((header)->magic); \
(header)->cmd = _cmd; \
(header)->status[0] = (header)->status[1] = 0; \
short2buff(_flags, (header)->flags); \
int2buff(_body_len, (header)->body_len); \
} while (0)
#define SF_PROTO_SET_HEADER_EX(header, _cmd, _flags, _body_len) \
do { \
SF_PROTO_SET_HEADER(header, _cmd, _body_len); \
short2buff(_flags, (header)->flags); \
} while (0)
#define SF_PROTO_SET_HEADER(header, _cmd, _body_len) \
SF_PROTO_SET_HEADER_EX(header, _cmd, 0, _body_len)
#define SF_PROTO_SET_RESPONSE_HEADER(proto_header, resp_header) \
do { \
@ -80,6 +88,52 @@
} while (0)
#define SF_PROTO_SEND_BODY(task) \
(task->send.ptr->data + sizeof(SFCommonProtoHeader))
#define SF_PROTO_RECV_BODY(task) \
(task->recv.ptr->data + sizeof(SFCommonProtoHeader))
#define SF_RECV_BODY_LENGTH(task) \
(task->recv.ptr->length - sizeof(SFCommonProtoHeader))
#define SF_SEND_BUFF_END(task) (task->send.ptr->data + task->send.ptr->size)
#define SF_RECV_BUFF_END(task) (task->recv.ptr->data + task->recv.ptr->size)
#define SF_PROTO_UPDATE_EXTRA_BODY_SIZE \
sizeof(SFProtoIdempotencyAdditionalHeader) + FCFS_AUTH_SESSION_ID_LEN
#define SF_PROTO_QUERY_EXTRA_BODY_SIZE FCFS_AUTH_SESSION_ID_LEN
#define SF_PROTO_CLIENT_SET_REQ_EX(client_ctx, auth_enabled, \
out_buff, header, req, the_req_id, out_bytes) \
do { \
char *the_req_start; \
header = (SFCommonProtoHeader *)out_buff; \
the_req_start = (char *)(header + 1); \
out_bytes = sizeof(SFCommonProtoHeader) + sizeof(*req); \
if (auth_enabled) { \
out_bytes += FCFS_AUTH_SESSION_ID_LEN; \
memcpy(the_req_start, client_ctx->auth.ctx-> \
session.id, FCFS_AUTH_SESSION_ID_LEN); \
the_req_start += FCFS_AUTH_SESSION_ID_LEN; \
} \
if (the_req_id > 0) { \
long2buff(the_req_id, ((SFProtoIdempotencyAdditionalHeader *)\
the_req_start)->req_id); \
out_bytes += sizeof(SFProtoIdempotencyAdditionalHeader); \
req = (typeof(req))(the_req_start + \
sizeof(SFProtoIdempotencyAdditionalHeader)); \
} else { \
req = (typeof(req))the_req_start; \
} \
} while (0)
#define SF_PROTO_CLIENT_SET_REQ(client_ctx, out_buff, \
header, req, the_req_id, out_bytes) \
SF_PROTO_CLIENT_SET_REQ_EX(client_ctx, client_ctx->auth.enabled, \
out_buff, header, req, the_req_id, out_bytes)
typedef struct sf_common_proto_header {
unsigned char magic[4]; //magic number
char body_len[4]; //body length
@ -89,6 +143,39 @@ typedef struct sf_common_proto_header {
char padding[3];
} SFCommonProtoHeader;
typedef struct sf_proto_limit_info {
char offset[4];
char count[4];
} SFProtoLimitInfo;
typedef struct sf_proto_get_group_servers_req {
char group_id[4];
char padding[4];
} SFProtoGetGroupServersReq;
typedef struct sf_proto_get_group_servers_resp_body_header {
char count[4];
char padding[4];
} SFProtoGetGroupServersRespBodyHeader;
typedef struct sf_proto_get_group_servers_resp_body_part {
char server_id[4];
char is_master;
char is_active;
char padding[2];
} SFProtoGetGroupServersRespBodyPart;
typedef struct sf_proto_get_server_resp {
char ip_addr[IP_ADDRESS_SIZE];
char server_id[4];
char port[2];
char padding[2];
} SFProtoGetServerResp;
typedef struct sf_proto_empty_body_req {
char nothing[0];
} SFProtoEmptyBodyReq;
typedef struct sf_proto_idempotency_additional_header {
char req_id[8];
} SFProtoIdempotencyAdditionalHeader;
@ -101,8 +188,8 @@ typedef struct sf_proto_setup_channel_req {
typedef struct sf_proto_setup_channel_resp {
char channel_id[4];
char key[4];
char server_id[4];
char buffer_size[4];
char padding[4];
} SFProtoSetupChannelResp;
typedef struct sf_proto_rebind_channel_req {
@ -119,52 +206,281 @@ typedef struct sf_proto_report_req_receipt_body {
char req_id[8];
} SFProtoReportReqReceiptBody;
typedef struct {
unsigned char servers[SF_CLUSTER_CONFIG_SIGN_LEN];
unsigned char cluster[SF_CLUSTER_CONFIG_SIGN_LEN];
} SFProtoConfigSigns;
typedef struct sf_proto_get_server_status_req {
SFProtoConfigSigns config_signs;
char server_id[4]; //my server id
union {
char is_leader;
char is_master;
};
char auth_enabled;
char padding[2];
} SFProtoGetServerStatusReq;
typedef struct sf_get_server_status_request {
const unsigned char *servers_sign;
const unsigned char *cluster_sign;
int server_id; //my server id
union {
bool is_leader;
bool is_master;
};
bool auth_enabled;
} SFGetServerStatusRequest;
typedef struct sf_group_server_info {
int id;
bool is_leader;
bool is_master;
bool is_active;
char padding[1];
} SFGroupServerInfo;
typedef struct sf_group_server_array {
SFGroupServerInfo *servers;
int alloc;
int count;
} SFGroupServerArray;
typedef struct sf_client_server_entry {
int server_id;
ConnectionInfo conn;
} SFClientServerEntry;
typedef const char *(*sf_get_cmd_caption_func)(const int cmd);
typedef int (*sf_get_cmd_log_level_func)(const int cmd);
typedef struct {
int alloc_size;
int fixed_size;
char *fixed;
char *buff;
} SFProtoRecvBuffer;
typedef struct {
char fixed[64 * 1024];
SFProtoRecvBuffer buffer;
} SFProtoRBufferFixedWrapper;
typedef struct {
sf_get_cmd_caption_func get_cmd_caption;
sf_get_cmd_log_level_func get_cmd_log_level;
} SFCommandCallbacks;
typedef struct {
SFSlowLogContext *slow_log;
SFCommandCallbacks callbacks;
} SFHandlerContext;
#ifdef __cplusplus
extern "C" {
#endif
void sf_proto_set_handler_context(const SFHandlerContext *ctx);
int sf_proto_set_body_length(struct fast_task_info *task);
const char *sf_get_cmd_caption(const int cmd);
int sf_proto_deal_task_done(struct fast_task_info *task,
const char *service_name, SFCommonTaskContext *ctx);
static inline void sf_proto_init_task_magic(struct fast_task_info *task)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
if (task->recv.ptr != task->send.ptr) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
}
static inline void sf_proto_init_task_context(struct fast_task_info *task,
SFCommonTaskContext *ctx)
{
ctx->req_start_time = get_current_time_us();
ctx->response.header.cmd = SF_PROTO_ACK;
ctx->response.header.body_len = 0;
ctx->response.header.status = 0;
ctx->response.header.flags = 0;
ctx->response.error.length = 0;
ctx->response.error.message[0] = '\0';
ctx->log_level = LOG_ERR;
ctx->response_done = false;
ctx->need_response = true;
ctx->request.header.cmd = ((SFCommonProtoHeader *)
task->recv.ptr->data)->cmd;
ctx->request.header.body_len = SF_RECV_BODY_LENGTH(task);
ctx->request.header.status = buff2short(((SFCommonProtoHeader *)
task->recv.ptr->data)->status);
ctx->request.header.flags = buff2short(((SFCommonProtoHeader *)
task->recv.ptr->data)->flags);
if (task->recv_body != NULL) {
ctx->request.body = task->recv_body;
} else {
ctx->request.body = SF_PROTO_RECV_BODY(task);
}
}
/* task send and recv buffer operations */
static inline int sf_set_task_send_buffer_size(
struct fast_task_info *task, const int expect_size)
{
int result;
if ((result=free_queue_set_buffer_size(task, task->send.ptr,
expect_size)) == 0)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
}
return result;
}
static inline int sf_set_task_recv_buffer_size(
struct fast_task_info *task, const int expect_size)
{
int result;
if ((result=free_queue_set_buffer_size(task, task->recv.ptr,
expect_size)) == 0)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
return result;
}
static inline int sf_set_task_send_max_buffer_size(
struct fast_task_info *task)
{
int result;
if ((result=free_queue_set_max_buffer_size(task, task->send.ptr)) == 0) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
}
return result;
}
static inline int sf_set_task_recv_max_buffer_size(
struct fast_task_info *task)
{
int result;
if ((result=free_queue_set_max_buffer_size(task, task->recv.ptr)) == 0) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
return result;
}
static inline int sf_realloc_task_send_buffer(
struct fast_task_info *task, const int expect_size)
{
int result;
if ((result=free_queue_realloc_buffer(task, task->send.ptr,
expect_size)) == 0)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
}
return result;
}
static inline int sf_realloc_task_recv_buffer(
struct fast_task_info *task, const int expect_size)
{
int result;
if ((result=free_queue_realloc_buffer(task, task->recv.ptr,
expect_size)) == 0)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
return result;
}
static inline int sf_realloc_task_send_max_buffer(
struct fast_task_info *task)
{
int result;
if ((result=free_queue_realloc_max_buffer(task, task->send.ptr)) == 0) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
}
return result;
}
static inline int sf_realloc_task_recv_max_buffer(
struct fast_task_info *task)
{
int result;
if ((result=free_queue_realloc_max_buffer(task, task->recv.ptr)) == 0) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
return result;
}
static inline void sf_log_network_error_ex1(SFResponseInfo *response,
const ConnectionInfo *conn, const int result,
const int log_level, const char *file, const int line)
const ConnectionInfo *conn, const char *service_name,
const int result, const int log_level,
const char *file, const int line)
{
if (response->error.length > 0) {
log_it_ex(&g_log_context, log_level,
"file: %s, line: %d, "
"server %s:%u, %s", file, line,
log_it_ex(&g_log_context, log_level, "file: %s, line: %d, "
"%s%sserver %s:%u response message: %s", file, line,
(service_name != NULL ? service_name : ""),
(service_name != NULL ? " ": ""),
conn->ip_addr, conn->port,
response->error.message);
} else {
log_it_ex(&g_log_context, log_level,
"file: %s, line: %d, "
"communicate with server %s:%u fail, "
log_it_ex(&g_log_context, log_level, "file: %s, line: %d, "
"communicate with %s%sserver %s:%u fail, "
"errno: %d, error info: %s", file, line,
(service_name != NULL ? service_name : ""),
(service_name != NULL ? " ": ""),
conn->ip_addr, conn->port,
result, STRERROR(result));
}
}
#define sf_log_network_error_ex(response, conn, result, log_level) \
sf_log_network_error_ex1(response, conn, result, \
log_level, __FILE__, __LINE__)
#define sf_log_network_error_ex(response, conn, \
service_name, result, log_level) \
sf_log_network_error_ex1(response, conn, service_name, \
result, log_level, __FILE__, __LINE__)
#define sf_log_network_error(response, conn, result) \
sf_log_network_error_ex1(response, conn, result, \
#define sf_log_network_error(response, conn, service_name, result) \
sf_log_network_error_ex1(response, conn, service_name, result, \
LOG_ERR, __FILE__, __LINE__)
#define sf_log_network_error_for_update(response, conn, result) \
sf_log_network_error_ex(response, conn, result, \
(result == SF_RETRIABLE_ERROR_CHANNEL_INVALID) ? \
LOG_DEBUG : LOG_ERR)
#define sf_log_network_error_for_update_ex(response, conn, \
service_name, result, enoent_log_level, file, line) \
sf_log_network_error_ex1(response, conn, service_name, result, \
(result == SF_RETRIABLE_ERROR_CHANNEL_INVALID) ? \
LOG_DEBUG : ((result == ENOENT || result == ENODATA) ? \
enoent_log_level : LOG_ERR), file, line)
#define sf_log_network_error_for_delete(response, \
conn, result, enoent_log_level) \
sf_log_network_error_ex(response, conn, result, \
#define sf_log_network_error_for_update(response, conn, service_name, result) \
sf_log_network_error_for_update_ex(response, conn, service_name, \
result, LOG_ERR, __FILE__, __LINE__)
#define sf_log_network_error_for_delete_ex(response, conn, \
service_name, result, enoent_log_level, file, line) \
sf_log_network_error_ex1(response, conn, service_name, result, \
(result == SF_RETRIABLE_ERROR_CHANNEL_INVALID) ? \
LOG_DEBUG : ((result == ENOENT) ? enoent_log_level : LOG_ERR))
LOG_DEBUG : ((result == ENOENT || result == ENODATA) ? \
enoent_log_level : LOG_ERR), file, line)
#define sf_log_network_error_for_delete(response, \
conn, service_name, result, enoent_log_level) \
sf_log_network_error_for_delete_ex(response, conn, service_name, \
result, enoent_log_level, __FILE__, __LINE__)
static inline int sf_server_expect_body_length(SFResponseInfo *response,
const int body_length, const int expect_body_len)
@ -222,6 +538,23 @@ static inline int sf_server_check_body_length(
body_length, max_body_length);
}
#define server_expect_body_length(expect_body_len) \
sf_server_expect_body_length(&RESPONSE, REQUEST.header.body_len, \
expect_body_len)
#define server_check_min_body_length(min_body_length) \
sf_server_check_min_body_length(&RESPONSE, REQUEST.header.body_len, \
min_body_length)
#define server_check_max_body_length(max_body_length) \
sf_server_check_max_body_length(&RESPONSE, REQUEST.header.body_len, \
max_body_length)
#define server_check_body_length(min_body_length, max_body_length) \
sf_server_check_body_length(&RESPONSE, REQUEST.header.body_len, \
min_body_length, max_body_length)
int sf_check_response(ConnectionInfo *conn, SFResponseInfo *response,
const int network_timeout, const unsigned char expect_cmd);
@ -229,6 +562,100 @@ int sf_recv_response(ConnectionInfo *conn, SFResponseInfo *response,
const int network_timeout, const unsigned char expect_cmd,
char *recv_data, const int expect_body_len);
static inline int sf_recv_none_body_response(ConnectionInfo *conn,
SFResponseInfo *response, const int network_timeout,
const unsigned char expect_cmd)
{
char *recv_data = NULL;
const int expect_body_len = 0;
return sf_recv_response(conn, response, network_timeout,
expect_cmd, recv_data, expect_body_len);
}
int sf_recv_vary_response(ConnectionInfo *conn, SFResponseInfo *response,
const int network_timeout, const unsigned char expect_cmd,
SFProtoRecvBuffer *buffer, const int min_body_len);
static inline void sf_init_recv_buffer_by_wrapper(
SFProtoRBufferFixedWrapper *wrapper)
{
wrapper->buffer.fixed_size = sizeof(wrapper->fixed);
wrapper->buffer.alloc_size = sizeof(wrapper->fixed);
wrapper->buffer.fixed = wrapper->fixed;
wrapper->buffer.buff = wrapper->fixed;
}
static inline int sf_init_recv_buffer(SFProtoRecvBuffer *buffer,
const int init_size)
{
buffer->alloc_size = init_size;
buffer->fixed_size = 0;
buffer->fixed = NULL;
buffer->buff = (char *)fc_malloc(init_size);
return buffer->buff != NULL ? 0 : ENOMEM;
}
static inline void sf_free_recv_buffer(SFProtoRecvBuffer *buffer)
{
if (buffer->buff != buffer->fixed) {
if (buffer->buff != NULL) {
free(buffer->buff);
}
buffer->alloc_size = buffer->fixed_size;
buffer->buff = buffer->fixed;
}
}
static inline int sf_proto_send_buf1(ConnectionInfo *conn, char *data,
const int len, SFResponseInfo *response, const int network_timeout)
{
int result;
if (conn->comm_type == fc_comm_type_rdma) {
result = G_RDMA_CONNECTION_CALLBACKS.request_by_buf1(
conn, data, len, network_timeout * 1000);
} else {
result = tcpsenddata_nb(conn->sock, data, len, network_timeout);
}
if (result != 0) {
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"send data fail, errno: %d, error info: %s",
result, STRERROR(result));
}
return result;
}
static inline int sf_proto_send_buf2(ConnectionInfo *conn, char *buff1,
const int length1, char *buff2, const int length2,
SFResponseInfo *response, const int network_timeout)
{
int result;
if (conn->comm_type == fc_comm_type_rdma) {
result = G_RDMA_CONNECTION_CALLBACKS.request_by_buf2(
conn, buff1, length1, buff2, length2,
network_timeout * 1000);
} else {
if ((result=tcpsenddata_nb(conn->sock, buff1, length1,
network_timeout)) == 0)
{
result = tcpsenddata_nb(conn->sock, buff2, length2,
network_timeout);
}
}
if (result != 0) {
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"send data fail, errno: %d, error info: %s",
result, STRERROR(result));
}
return result;
}
int sf_send_and_recv_response_header(ConnectionInfo *conn, char *data,
const int len, SFResponseInfo *response, const int network_timeout);
@ -284,16 +711,46 @@ static inline int sf_send_and_recv_none_body_response(ConnectionInfo *conn,
network_timeout, expect_cmd, recv_data, expect_body_len);
}
static inline void sf_proto_extract_header(SFCommonProtoHeader *header_proto,
SFHeaderInfo *header_info)
int sf_send_and_recv_vary_response(ConnectionInfo *conn,
char *send_data, const int send_len, SFResponseInfo *response,
const int network_timeout, const unsigned char expect_cmd,
SFProtoRecvBuffer *buffer, const int min_body_len);
static inline int sf_proto_parse_header(const SFCommonProtoHeader
*header_proto, SFResponseInfo *response)
{
header_info->cmd = header_proto->cmd;
header_info->body_len = buff2int(header_proto->body_len);
header_info->flags = buff2short(header_proto->flags);
header_info->status = buff2short(header_proto->status);
if (header_info->status > 255) {
header_info->status = sf_localize_errno(header_info->status);
if (!SF_PROTO_CHECK_MAGIC(header_proto->magic)) {
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"magic "SF_PROTO_MAGIC_FORMAT" is invalid, "
"expect: "SF_PROTO_MAGIC_FORMAT,
SF_PROTO_MAGIC_PARAMS(header_proto->magic),
SF_PROTO_MAGIC_EXPECT_PARAMS);
return EINVAL;
}
response->header.cmd = header_proto->cmd;
response->header.body_len = buff2int(header_proto->body_len);
response->header.flags = buff2short(header_proto->flags);
response->header.status = buff2short(header_proto->status);
if (response->header.status > 255) {
response->header.status = sf_localize_errno(response->header.status);
}
return 0;
}
static inline void sf_proto_pack_limit(const SFListLimitInfo
*limit_info, SFProtoLimitInfo *limit_proto)
{
int2buff(limit_info->offset, limit_proto->offset);
int2buff(limit_info->count, limit_proto->count);
}
static inline void sf_proto_extract_limit(const SFProtoLimitInfo
*limit_proto, SFListLimitInfo *limit_info)
{
limit_info->offset = buff2int(limit_proto->offset);
limit_info->count = buff2int(limit_proto->count);
}
static inline int sf_active_test(ConnectionInfo *conn,
@ -318,18 +775,43 @@ int sf_proto_deal_ack(struct fast_task_info *task,
SFRequestInfo *request, SFResponseInfo *response);
int sf_proto_rebind_idempotency_channel(ConnectionInfo *conn,
const uint32_t channel_id, const int key, const int network_timeout);
const char *service_name, const uint32_t channel_id,
const int key, const int network_timeout);
#define SF_CLIENT_RELEASE_CONNECTION(client_ctx, conn, result) \
do { \
int sf_proto_get_group_servers(ConnectionInfo *conn,
const char *service_name, const int network_timeout,
const int group_id, SFGroupServerArray *sarray);
int sf_proto_get_leader(ConnectionInfo *conn, const char *service_name,
const int network_timeout, SFClientServerEntry *leader);
static inline void sf_proto_get_server_status_pack(
const SFGetServerStatusRequest *r,
SFProtoGetServerStatusReq *req)
{
int2buff(r->server_id, req->server_id);
req->is_leader = (r->is_leader ? 1 : 0);
req->auth_enabled = (r->auth_enabled ? 1 : 0);
memcpy(req->config_signs.servers, r->servers_sign,
SF_CLUSTER_CONFIG_SIGN_LEN);
if (r->cluster_sign != NULL) {
memcpy(req->config_signs.cluster, r->cluster_sign,
SF_CLUSTER_CONFIG_SIGN_LEN);
} else {
memset(req->config_signs.cluster, 0,
SF_CLUSTER_CONFIG_SIGN_LEN);
}
}
#define SF_CLIENT_RELEASE_CONNECTION(cm, conn, result) \
do { \
if (SF_FORCE_CLOSE_CONNECTION_ERROR(result)) { \
client_ctx->conn_manager.close_connection(client_ctx, conn); \
} else if (client_ctx->conn_manager.release_connection != NULL) { \
client_ctx->conn_manager.release_connection(client_ctx, conn); \
(cm)->ops.close_connection(cm, conn); \
} else if ((cm)->ops.release_connection != NULL) { \
(cm)->ops.release_connection(cm, conn); \
} \
} while (0)
#ifdef __cplusplus
}
#endif

521
src/sf_serializer.c Normal file
View File

@ -0,0 +1,521 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include <errno.h>
#include "fastcommon/shared_func.h"
#include "fastcommon/logger.h"
#include "sf_serializer.h"
#define FIELD_ID_AND_TYPE_FORMAT "fid: %d, type: %s"
#define FIELD_ID_AND_TYPE_PARAMS it->field.fid, \
value_type_configs[it->field.type].name
typedef struct {
const char *name;
int min_size;
int elt_size;
} SFSerializerTypeConfig;
static SFSerializerTypeConfig value_type_configs[SF_SERIALIZER_VALUE_TYPE_COUNT] =
{
{"int8", sizeof(SFSerializerPackFieldInt8), 0},
{"int16", sizeof(SFSerializerPackFieldInt16), 0},
{"int32", sizeof(SFSerializerPackFieldInt32), 0},
{"int64", sizeof(SFSerializerPackFieldInt64), 0},
{"string", sizeof(SFSerializerPackStringValue), 0},
{"int8_array", sizeof(SFSerializerPackFieldArray), 1},
{"int16_array", sizeof(SFSerializerPackFieldArray), 2},
{"int32_array", sizeof(SFSerializerPackFieldArray), 4},
{"int64_array", sizeof(SFSerializerPackFieldArray), 8},
{"string_array", sizeof(SFSerializerPackFieldArray),
sizeof(SFSerializerPackStringValue)},
{"id_name_array", sizeof(SFSerializerPackFieldArray),
sizeof(int64_t) + sizeof(SFSerializerPackStringValue)},
{"map", sizeof(SFSerializerPackFieldArray), 2 *
sizeof(SFSerializerPackStringValue)}
};
int sf_serializer_unpack(SFSerializerIterator *it, const string_t *content)
{
SFSerializerPackHeader *header;
int length;
int calc_crc32;
int header_crc32;
if (content->len < sizeof(SFSerializerPackHeader)) {
snprintf(it->error_info, sizeof(it->error_info),
"content length: %d is too small which < %d",
content->len, (int)sizeof(SFSerializerPackHeader));
return EINVAL;
}
header = (SFSerializerPackHeader *)content->str;
length = buff2int(header->length);
if (content->len != length + sizeof(SFSerializerPackHeader)) {
snprintf(it->error_info, sizeof(it->error_info),
"content length: %d != %d", content->len,
(int)(length + sizeof(SFSerializerPackHeader)));
return EINVAL;
}
calc_crc32 = CRC32(header + 1, length);
header_crc32 = buff2int(header->crc32);
if (header_crc32 != calc_crc32) {
snprintf(it->error_info, sizeof(it->error_info),
"header crc32: %d != calculated: %d",
header_crc32, calc_crc32);
return EINVAL;
}
it->p = (const char *)(header + 1);
it->end = content->str + content->len;
return 0;
}
static int check_field_type(SFSerializerIterator *it,
const int remain_len, const SFSerializerValueType type)
{
if (!(type >= 0 && type < SF_SERIALIZER_VALUE_TYPE_COUNT)) {
snprintf(it->error_info, sizeof(it->error_info),
"fid: %d, unknown type: %d", it->field.fid, type);
return EINVAL;
}
if (remain_len < value_type_configs[type].min_size) {
snprintf(it->error_info, sizeof(it->error_info),
FIELD_ID_AND_TYPE_FORMAT", remain length: %d "
"is too small which < %d", FIELD_ID_AND_TYPE_PARAMS,
remain_len, value_type_configs[type].min_size);
return EINVAL;
}
return 0;
}
static inline int check_string_value(SFSerializerIterator *it,
const int remain_len, const string_t *s)
{
if (s->len < 0) {
snprintf(it->error_info, sizeof(it->error_info),
FIELD_ID_AND_TYPE_FORMAT", invalid string length: %d < 0",
FIELD_ID_AND_TYPE_PARAMS, s->len);
return EINVAL;
}
if (s->len > remain_len) {
snprintf(it->error_info, sizeof(it->error_info),
FIELD_ID_AND_TYPE_FORMAT", string length: %d is too "
"large > remain length: %d", FIELD_ID_AND_TYPE_PARAMS,
s->len, remain_len);
return EINVAL;
}
return 0;
}
static inline int unpack_array_count(SFSerializerIterator *it,
const int remain_len, int *count)
{
int min_size;
*count = buff2int(((SFSerializerPackFieldArray *)it->p)->value.count);
if (*count < 0) {
snprintf(it->error_info, sizeof(it->error_info),
FIELD_ID_AND_TYPE_FORMAT", invalid array count: %d < 0",
FIELD_ID_AND_TYPE_PARAMS, *count);
return EINVAL;
}
min_size = value_type_configs[it->field.type].elt_size * (*count);
if (remain_len < min_size) {
snprintf(it->error_info, sizeof(it->error_info),
FIELD_ID_AND_TYPE_FORMAT", remain length: %d is too "
"small < array min bytes: %d", FIELD_ID_AND_TYPE_PARAMS,
remain_len, min_size);
return EINVAL;
}
return 0;
}
static int array_expand(SFSerializerIterator *it, void_array_t *array,
const int elt_size, const int target_count, int *alloc_size)
{
int new_alloc;
void *new_elts;
if (*alloc_size == 0) {
new_alloc = 256;
} else {
new_alloc = (*alloc_size) * 2;
}
while (new_alloc < target_count) {
new_alloc *= 2;
}
new_elts = fc_malloc(elt_size * new_alloc);
if (new_elts == NULL) {
snprintf(it->error_info, sizeof(it->error_info),
FIELD_ID_AND_TYPE_FORMAT", malloc %d bytes fail",
FIELD_ID_AND_TYPE_PARAMS, elt_size * new_alloc);
return ENOMEM;
}
if (array->elts != NULL) {
free(array->elts);
}
array->elts = new_elts;
*alloc_size = new_alloc;
return 0;
}
static inline int unpack_string(SFSerializerIterator *it, const int remain_len,
SFSerializerPackStringValue *input, string_t *output)
{
if (remain_len < sizeof(SFSerializerPackStringValue)) {
snprintf(it->error_info, sizeof(it->error_info),
FIELD_ID_AND_TYPE_FORMAT", remain length: %d "
"is too small < %d", FIELD_ID_AND_TYPE_PARAMS,
remain_len, (int)sizeof(SFSerializerPackStringValue));
return EINVAL;
}
output->len = buff2int(input->len);
output->str = input->str;
it->p += sizeof(SFSerializerPackStringValue) + output->len;
return check_string_value(it, remain_len -
sizeof(SFSerializerPackStringValue), output);
}
static int unpack_integer_array(SFSerializerIterator *it, const int remain_len)
{
int result;
int count;
int64_t *pn;
int64_t *end;
if ((result=unpack_array_count(it, remain_len, &count)) != 0) {
return result;
}
if (count > it->int_array_alloc) {
if ((result=array_expand(it, (void_array_t *)&it->int_array,
sizeof(int64_t), count, &it->int_array_alloc)) != 0)
{
return result;
}
}
it->p += sizeof(SFSerializerPackFieldArray);
end = it->int_array.elts + count;
for (pn=it->int_array.elts; pn<end; pn++) {
switch (it->field.type) {
case sf_serializer_value_type_int8_array:
*pn = *it->p;
break;
case sf_serializer_value_type_int16_array:
*pn = buff2short(it->p);
break;
case sf_serializer_value_type_int32_array:
*pn = buff2int(it->p);
break;
default:
*pn = buff2long(it->p);
break;
}
it->p += value_type_configs[it->field.type].elt_size;
}
it->int_array.count = count;
return 0;
}
static int unpack_string_array(SFSerializerIterator *it, const int remain_len)
{
int result;
int count;
string_t *str;
string_t *end;
if ((result=unpack_array_count(it, remain_len, &count)) != 0) {
return result;
}
if (count > it->str_array_alloc) {
if ((result=array_expand(it, (void_array_t *)&it->str_array,
sizeof(string_t), count, &it->str_array_alloc)) != 0)
{
return result;
}
}
it->p += sizeof(SFSerializerPackFieldArray);
end = it->str_array.strings + count;
for (str=it->str_array.strings; str<end; str++) {
if ((result=unpack_string(it, it->end - it->p,
(SFSerializerPackStringValue *)
it->p, str)) != 0)
{
return result;
}
}
it->str_array.count = count;
return 0;
}
static int unpack_id_name_array(SFSerializerIterator *it, const int remain_len)
{
int result;
int count;
id_name_pair_t *pair;
id_name_pair_t *end;
if ((result=unpack_array_count(it, remain_len, &count)) != 0) {
return result;
}
if (count > it->id_name_array_alloc) {
if ((result=array_expand(it, (void_array_t *)&it->id_name_array,
sizeof(id_name_pair_t), count,
&it->id_name_array_alloc)) != 0)
{
return result;
}
}
it->p += sizeof(SFSerializerPackFieldArray);
end = it->id_name_array.elts + count;
for (pair=it->id_name_array.elts; pair<end; pair++) {
if ((it->end - it->p) < (sizeof(int64_t) +
sizeof(SFSerializerPackStringValue)))
{
snprintf(it->error_info, sizeof(it->error_info),
FIELD_ID_AND_TYPE_FORMAT", remain length: %d "
"is too small < %d", FIELD_ID_AND_TYPE_PARAMS,
(int)(it->end - it->p), (int)(sizeof(int64_t) +
sizeof(SFSerializerPackStringValue)));
return EINVAL;
}
pair->id = buff2long(it->p);
it->p += sizeof(int64_t);
if ((result=unpack_string(it, it->end - it->p,
(SFSerializerPackStringValue *)it->p,
&pair->name)) != 0)
{
return result;
}
}
it->id_name_array.count = count;
return 0;
}
static int unpack_map(SFSerializerIterator *it, const int remain_len)
{
int result;
int count;
key_value_pair_t *pair;
key_value_pair_t *end;
if ((result=unpack_array_count(it, remain_len, &count)) != 0) {
return result;
}
if (count > it->kv_array_alloc) {
if ((result=array_expand(it, (void_array_t *)&it->kv_array,
sizeof(key_value_pair_t), count,
&it->kv_array_alloc)) != 0)
{
return result;
}
}
it->p += sizeof(SFSerializerPackFieldArray);
end = it->kv_array.kv_pairs + count;
for (pair=it->kv_array.kv_pairs; pair<end; pair++) {
if ((result=unpack_string(it, it->end - it->p,
(SFSerializerPackStringValue *)it->p,
&pair->key)) != 0)
{
return result;
}
if ((result=unpack_string(it, it->end - it->p,
(SFSerializerPackStringValue *)it->p,
&pair->value)) != 0)
{
return result;
}
}
it->kv_array.count = count;
return 0;
}
const SFSerializerFieldValue *sf_serializer_next(SFSerializerIterator *it)
{
int remain_len;
SFSerializerPackFieldInfo *field;
SFSerializerPackFieldString *fs;
remain_len = it->end - it->p;
if (remain_len == 0) {
return NULL;
}
if (remain_len <= sizeof(SFSerializerPackFieldInfo)) {
snprintf(it->error_info, sizeof(it->error_info),
"remain length: %d is too small which <= %d",
remain_len, (int)sizeof(SFSerializerPackFieldInfo));
it->error_no = EINVAL;
return NULL;
}
field = (SFSerializerPackFieldInfo *)it->p;
it->field.fid = field->id;
it->field.type = field->type;
if ((it->error_no=check_field_type(it, remain_len, field->type)) != 0) {
return NULL;
}
switch (field->type) {
case sf_serializer_value_type_int8:
it->field.value.n = ((SFSerializerPackFieldInt8 *)it->p)->value;
it->p += sizeof(SFSerializerPackFieldInt8);
break;
case sf_serializer_value_type_int16:
it->field.value.n = buff2short(
((SFSerializerPackFieldInt16 *)
it->p)->value);
it->p += sizeof(SFSerializerPackFieldInt16);
break;
case sf_serializer_value_type_int32:
it->field.value.n = buff2int(
((SFSerializerPackFieldInt32 *)
it->p)->value);
it->p += sizeof(SFSerializerPackFieldInt32);
break;
case sf_serializer_value_type_int64:
it->field.value.n = buff2long(
((SFSerializerPackFieldInt64 *)
it->p)->value);
it->p += sizeof(SFSerializerPackFieldInt64);
break;
case sf_serializer_value_type_string:
fs = (SFSerializerPackFieldString *)it->p;
it->p += sizeof(SFSerializerPackFieldInfo);
if ((it->error_no=unpack_string(it, remain_len -
sizeof(SFSerializerPackFieldInfo),
&fs->value, &it->field.value.s)) != 0)
{
return NULL;
}
break;
case sf_serializer_value_type_int8_array:
case sf_serializer_value_type_int16_array:
case sf_serializer_value_type_int32_array:
case sf_serializer_value_type_int64_array:
if ((it->error_no=unpack_integer_array(it, remain_len -
sizeof(SFSerializerPackFieldArray))) != 0)
{
return NULL;
}
it->field.value.int_array = it->int_array;
break;
case sf_serializer_value_type_string_array:
if ((it->error_no=unpack_string_array(it, remain_len - sizeof(
SFSerializerPackFieldArray))) != 0)
{
return NULL;
}
it->field.value.str_array = it->str_array;
break;
case sf_serializer_value_type_id_name_array:
if ((it->error_no=unpack_id_name_array(it, remain_len -
sizeof(SFSerializerPackFieldArray))) != 0)
{
return NULL;
}
it->field.value.id_name_array = it->id_name_array;
break;
case sf_serializer_value_type_map:
if ((it->error_no=unpack_map(it, remain_len - sizeof(
SFSerializerPackFieldArray))) != 0)
{
return NULL;
}
it->field.value.kv_array = it->kv_array;
break;
}
return &it->field;
}
int sf_serializer_read_message(int fd, BufferInfo *buffer,
const int max_size)
{
SFSerializerPackHeader *header;
char *new_buff;
int new_alloc;
int length;
int total_bytes;
if (fc_safe_read(fd, buffer->buff, sizeof(*header)) != sizeof(*header)) {
return ENODATA;
}
header = (SFSerializerPackHeader *)buffer->buff;
length = buff2int(header->length);
if (length <= 0 || length > max_size) {
return EINVAL;
}
total_bytes = sizeof(*header) + length;
if (buffer->alloc_size < total_bytes) {
new_alloc = buffer->alloc_size * 2;
while (new_alloc < total_bytes) {
new_alloc *= 2;
}
new_buff = (char *)fc_malloc(new_alloc);
if (new_buff == NULL) {
return ENOMEM;
}
memcpy(new_buff, buffer->buff, sizeof(*header));
free(buffer->buff);
buffer->buff = new_buff;
buffer->alloc_size = new_alloc;
}
if (fc_safe_read(fd, buffer->buff + sizeof(*header),
length) != length)
{
return ENODATA;
}
buffer->length = total_bytes;
return 0;
}

563
src/sf_serializer.h Normal file
View File

@ -0,0 +1,563 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//sf_serializer.h
#ifndef _SF_SERIALIZER_H
#define _SF_SERIALIZER_H
#include "fastcommon/common_define.h"
#include "fastcommon/shared_func.h"
#include "fastcommon/fast_buffer.h"
#include "fastcommon/uniq_skiplist.h"
#include "fastcommon/hash.h"
#define SF_SERIALIZER_VALUE_TYPE_COUNT 12
typedef enum {
sf_serializer_value_type_int8 = 0,
sf_serializer_value_type_int16,
sf_serializer_value_type_int32,
sf_serializer_value_type_int64,
sf_serializer_value_type_string,
sf_serializer_value_type_int8_array,
sf_serializer_value_type_int16_array,
sf_serializer_value_type_int32_array,
sf_serializer_value_type_int64_array,
sf_serializer_value_type_string_array,
sf_serializer_value_type_id_name_array,
sf_serializer_value_type_map
} SFSerializerValueType;
typedef struct sf_serializer_pack_header {
char length[4];
char crc32[4];
} SFSerializerPackHeader;
typedef struct sf_serializer_pack_field_info {
unsigned char id;
unsigned char type;
} SFSerializerPackFieldInfo;
typedef struct sf_serializer_pack_field_int8 {
SFSerializerPackFieldInfo field;
char value;
} SFSerializerPackFieldInt8;
typedef struct sf_serializer_pack_field_int16 {
SFSerializerPackFieldInfo field;
char value[2];
} SFSerializerPackFieldInt16;
typedef struct sf_serializer_pack_field_int32 {
SFSerializerPackFieldInfo field;
char value[4];
} SFSerializerPackFieldInt32;
typedef struct sf_serializer_pack_field_int64 {
SFSerializerPackFieldInfo field;
char value[8];
} SFSerializerPackFieldInt64;
typedef struct sf_serializer_pack_string_value {
char len[4];
char str[0];
} SFSerializerPackStringValue;
typedef struct sf_serializer_pack_field_string {
SFSerializerPackFieldInfo field;
SFSerializerPackStringValue value;
} SFSerializerPackFieldString;
typedef struct sf_serializer_pack_field_array {
SFSerializerPackFieldInfo field;
struct {
char count[4];
char ptr[0];
} value;
} SFSerializerPackFieldArray;
typedef struct sf_serializer_field_value {
unsigned char fid;
SFSerializerValueType type;
union {
int64_t n;
string_t s;
int64_array_t int_array;
string_array_t str_array;
id_name_array_t id_name_array;
key_value_array_t kv_array;
} value;
} SFSerializerFieldValue;
typedef struct sf_serializer_iterator {
const char *p;
const char *end;
int64_array_t int_array; //int64_t array holder
string_array_t str_array; //string_t array holder
id_name_array_t id_name_array; //id name array holder
key_value_array_t kv_array; //key value array holder
int int_array_alloc;
int str_array_alloc;
int id_name_array_alloc;
int kv_array_alloc;
SFSerializerFieldValue field;
int error_no;
char error_info[256];
} SFSerializerIterator;
#ifdef __cplusplus
extern "C" {
#endif
static inline void sf_serializer_pack_begin(FastBuffer *buffer)
{
buffer->length = sizeof(SFSerializerPackHeader);
}
static inline int sf_serializer_pack_int8(FastBuffer *buffer,
const unsigned char fid, const int8_t value)
{
int result;
SFSerializerPackFieldInt8 *obj;
if ((result=fast_buffer_check_inc_size(buffer,
sizeof(SFSerializerPackFieldInt8))) != 0)
{
return result;
}
obj = (SFSerializerPackFieldInt8 *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_int8;
obj->value = value;
buffer->length += sizeof(SFSerializerPackFieldInt8);
return 0;
}
static inline int sf_serializer_pack_int16(FastBuffer *buffer,
const unsigned char fid, const int16_t value)
{
int result;
SFSerializerPackFieldInt16 *obj;
if ((result=fast_buffer_check_inc_size(buffer,
sizeof(SFSerializerPackFieldInt16))) != 0)
{
return result;
}
obj = (SFSerializerPackFieldInt16 *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_int16;
short2buff(value, obj->value);
buffer->length += sizeof(SFSerializerPackFieldInt16);
return 0;
}
static inline int sf_serializer_pack_int32(FastBuffer *buffer,
const unsigned char fid, const int32_t value)
{
int result;
SFSerializerPackFieldInt32 *obj;
if ((result=fast_buffer_check_inc_size(buffer,
sizeof(SFSerializerPackFieldInt32))) != 0)
{
return result;
}
obj = (SFSerializerPackFieldInt32 *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_int32;
int2buff(value, obj->value);
buffer->length += sizeof(SFSerializerPackFieldInt32);
return 0;
}
static inline int sf_serializer_pack_int64(FastBuffer *buffer,
const unsigned char fid, const int64_t value)
{
int result;
SFSerializerPackFieldInt64 *obj;
if ((result=fast_buffer_check_inc_size(buffer,
sizeof(SFSerializerPackFieldInt64))) != 0)
{
return result;
}
obj = (SFSerializerPackFieldInt64 *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_int64;
long2buff(value, obj->value);
buffer->length += sizeof(SFSerializerPackFieldInt64);
return 0;
}
static inline int sf_serializer_pack_integer(FastBuffer *buffer,
const unsigned char fid, const int64_t value)
{
if (value >= INT16_MIN && value <= INT16_MAX) {
if (value >= INT8_MIN && value <= INT8_MAX) {
return sf_serializer_pack_int8(buffer, fid, value);
} else {
return sf_serializer_pack_int16(buffer, fid, value);
}
} else {
if (value >= INT32_MIN && value <= INT32_MAX) {
return sf_serializer_pack_int32(buffer, fid, value);
} else {
return sf_serializer_pack_int64(buffer, fid, value);
}
}
}
#define SF_SERIALIZER_PACK_STRING(ps, value) \
int2buff((value)->len, (ps)->len); \
memcpy((ps)->str, (value)->str, (value)->len)
#define SF_SERIALIZER_PACK_STRING_AND_MOVE_PTR(p, value) \
SF_SERIALIZER_PACK_STRING((SFSerializerPackStringValue *)p, value); \
p += (sizeof(SFSerializerPackStringValue) + (value)->len)
static inline int sf_serializer_pack_string(FastBuffer *buffer,
const unsigned char fid, const string_t *value)
{
int result;
int length;
SFSerializerPackFieldString *obj;
length = sizeof(SFSerializerPackFieldString) + value->len;
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldString *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_string;
SF_SERIALIZER_PACK_STRING(&obj->value, value);
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_buffer(FastBuffer *buffer,
const unsigned char fid, const FastBuffer *value)
{
string_t str;
FC_SET_STRING_EX(str, value->data, value->length);
return sf_serializer_pack_string(buffer, fid, &str);
}
static inline int sf_serializer_pack_int8_array(FastBuffer *buffer,
const unsigned char fid, const int8_t *array, const int count)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const int8_t *pn;
const int8_t *end;
char *ps;
length = sizeof(SFSerializerPackFieldArray) + count * 1;
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_int8_array;
int2buff(count, obj->value.count);
end = array + count;
for (pn=array, ps=obj->value.ptr; pn<end; pn++) {
*ps++ = *pn;
}
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_int16_array(FastBuffer *buffer,
const unsigned char fid, const int16_t *array, const int count)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const int16_t *pn;
const int16_t *end;
char *ps;
length = sizeof(SFSerializerPackFieldArray) + count * 2;
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_int16_array;
int2buff(count, obj->value.count);
end = array + count;
for (pn=array, ps=obj->value.ptr; pn<end; pn++, ps+=2) {
short2buff(*pn, ps);
}
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_int32_array(FastBuffer *buffer,
const unsigned char fid, const int32_t *array, const int count)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const int32_t *pn;
const int32_t *end;
char *ps;
length = sizeof(SFSerializerPackFieldArray) + count * 4;
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_int32_array;
int2buff(count, obj->value.count);
end = array + count;
for (pn=array, ps=obj->value.ptr; pn<end; pn++, ps+=4) {
int2buff(*pn, ps);
}
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_int64_array(FastBuffer *buffer,
const unsigned char fid, const int64_t *array, const int count)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const int64_t *pn;
const int64_t *end;
char *ps;
length = sizeof(SFSerializerPackFieldArray) + count * 8;
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_int64_array;
int2buff(count, obj->value.count);
end = array + count;
for (pn=array, ps=obj->value.ptr; pn<end; pn++, ps+=8) {
long2buff(*pn, ps);
}
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_string_array(FastBuffer *buffer,
const unsigned char fid, const string_t *strings, const int count)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const string_t *str;
const string_t *end;
char *p;
length = sizeof(SFSerializerPackFieldArray);
end = strings + count;
for (str=strings; str<end; str++) {
length += sizeof(SFSerializerPackStringValue) + str->len;
}
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_string_array;
int2buff(count, obj->value.count);
p = obj->value.ptr;
for (str=strings; str<end; str++) {
SF_SERIALIZER_PACK_STRING_AND_MOVE_PTR(p, str);
}
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_id_name_array(FastBuffer *buffer,
const unsigned char fid, const id_name_pair_t *in_pairs,
const int count)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const id_name_pair_t *pair;
const id_name_pair_t *end;
char *p;
length = sizeof(SFSerializerPackFieldArray);
end = in_pairs + count;
for (pair=in_pairs; pair<end; pair++) {
length += sizeof(int64_t) + pair->name.len +
sizeof(SFSerializerPackStringValue);
}
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_id_name_array;
int2buff(count, obj->value.count);
p = obj->value.ptr;
for (pair=in_pairs; pair<end; pair++) {
long2buff(pair->id, p);
p += sizeof(int64_t);
SF_SERIALIZER_PACK_STRING_AND_MOVE_PTR(p, &pair->name);
}
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_id_name_skiplist(
FastBuffer *buffer, const unsigned char fid,
UniqSkiplist *sl)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const id_name_pair_t *pair;
UniqSkiplistIterator it;
char *p;
length = sizeof(SFSerializerPackFieldArray);
uniq_skiplist_iterator(sl, &it);
while ((pair=uniq_skiplist_next(&it)) != NULL) {
length += sizeof(int64_t) + pair->name.len +
sizeof(SFSerializerPackStringValue);
}
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_id_name_array;
int2buff(uniq_skiplist_count(sl), obj->value.count);
p = obj->value.ptr;
uniq_skiplist_iterator(sl, &it);
while ((pair=uniq_skiplist_next(&it)) != NULL) {
long2buff(pair->id, p);
p += sizeof(int64_t);
SF_SERIALIZER_PACK_STRING_AND_MOVE_PTR(p, &pair->name);
}
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_map(FastBuffer *buffer,
const unsigned char fid, const key_value_pair_t *kv_pairs,
const int count)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const key_value_pair_t *pair;
const key_value_pair_t *end;
char *p;
length = sizeof(SFSerializerPackFieldArray);
end = kv_pairs + count;
for (pair=kv_pairs; pair<end; pair++) {
length += sizeof(SFSerializerPackStringValue) * 2 +
pair->key.len + pair->value.len;
}
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_map;
int2buff(count, obj->value.count);
p = obj->value.ptr;
for (pair=kv_pairs; pair<end; pair++) {
SF_SERIALIZER_PACK_STRING_AND_MOVE_PTR(p, &pair->key);
SF_SERIALIZER_PACK_STRING_AND_MOVE_PTR(p, &pair->value);
}
buffer->length += length;
return 0;
}
static inline void sf_serializer_pack_end(FastBuffer *buffer)
{
SFSerializerPackHeader *header;
int length;
int crc32;
header = (SFSerializerPackHeader *)buffer->data;
length = buffer->length - sizeof(SFSerializerPackHeader);
crc32 = CRC32(header + 1, length);
int2buff(length, header->length);
int2buff(crc32, header->crc32);
}
static inline void sf_serializer_iterator_init(SFSerializerIterator *it)
{
memset(it, 0, sizeof(SFSerializerIterator));
}
static inline void sf_serializer_iterator_destroy(SFSerializerIterator *it)
{
if (it->int_array.elts != NULL) {
free(it->int_array.elts);
it->int_array_alloc = 0;
}
if (it->kv_array.kv_pairs != NULL) {
free(it->kv_array.kv_pairs);
it->kv_array_alloc = 0;
}
if (it->str_array.strings != NULL) {
free(it->str_array.strings);
it->str_array_alloc = 0;
}
}
int sf_serializer_unpack(SFSerializerIterator *it, const string_t *content);
const SFSerializerFieldValue *sf_serializer_next(SFSerializerIterator *it);
int sf_serializer_read_message(int fd, BufferInfo *buffer,
const int max_size);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -25,6 +25,7 @@
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <ifaddrs.h>
#include "fastcommon/logger.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/shared_func.h"
@ -32,9 +33,8 @@
#include "fastcommon/sched_thread.h"
#include "fastcommon/ioevent_loop.h"
#include "fastcommon/fc_memory.h"
#include "sf_nio.h"
#include "sf_proto.h"
#include "sf_util.h"
#include "sf_global.h"
#include "sf_service.h"
#if defined(OS_LINUX)
@ -57,77 +57,69 @@ struct worker_thread_context {
struct nio_thread_data *thread_data;
};
struct accept_thread_context {
SFContext *sf_context;
int server_sock;
};
int sf_init_task(struct fast_task_info *task)
{
task->connect_timeout = SF_G_CONNECT_TIMEOUT; //for client side
task->network_timeout = SF_G_NETWORK_TIMEOUT;
return 0;
}
static void *worker_thread_entrance(void *arg);
static int sf_init_free_queues(const int task_arg_size,
TaskInitCallback init_callback)
static int sf_init_free_queue(SFContext *sf_context, const char *name,
const bool double_buffers, const bool need_shrink_task_buffer,
const int task_padding_size, const int task_arg_size,
TaskInitCallback init_callback, void *init_arg)
{
#define ALLOC_CONNECTIONS_ONCE 1024
static bool sf_inited = false;
int result;
int buffer_size;
int m;
int init_connections;
int max_m;
int alloc_conn_once;
if (sf_inited) {
return 0;
}
sf_inited = true;
if ((result=set_rand_seed()) != 0) {
logCrit("file: "__FILE__", line: %d, "
"set_rand_seed fail, program exit!", __LINE__);
return result;
}
m = g_sf_global_vars.min_buff_size / (64 * 1024);
if (strcmp(name, "cluster") == 0 || strcmp(name, "replica") == 0) {
buffer_size = FC_MAX(4 * 1024 * 1024, sf_context->
net_buffer_cfg.max_buff_size);
max_m = 64;
} else {
buffer_size = sf_context->net_buffer_cfg.min_buff_size;
max_m = 16;
}
m = buffer_size / (64 * 1024);
if (m == 0) {
m = 1;
} else if (m > 16) {
m = 16;
}
alloc_conn_once = ALLOC_CONNECTIONS_ONCE / m;
init_connections = g_sf_global_vars.max_connections < alloc_conn_once ?
g_sf_global_vars.max_connections : alloc_conn_once;
if ((result=free_queue_init_ex2(g_sf_global_vars.max_connections,
init_connections, alloc_conn_once, g_sf_global_vars.
min_buff_size, g_sf_global_vars.max_buff_size,
task_arg_size, init_callback != NULL ?
init_callback : sf_init_task)) != 0)
{
return result;
} else if (m > max_m) {
m = max_m;
}
alloc_conn_once = 256 / m;
return 0;
return free_queue_init_ex2(&sf_context->free_queue, name, double_buffers,
need_shrink_task_buffer, sf_context->net_buffer_cfg.max_connections,
alloc_conn_once, sf_context->net_buffer_cfg.min_buff_size,
sf_context->net_buffer_cfg.max_buff_size, task_padding_size,
task_arg_size, init_callback, init_arg);
}
int sf_service_init_ex2(SFContext *sf_context,
int sf_service_init_ex2(SFContext *sf_context, const char *name,
sf_alloc_thread_extra_data_callback
alloc_thread_extra_data_callback,
ThreadLoopCallback thread_loop_callback,
sf_accept_done_callback accept_done_callback,
sf_set_body_length_callback set_body_length_func,
sf_deal_task_func deal_func, TaskCleanUpCallback task_cleanup_func,
sf_alloc_recv_buffer_callback alloc_recv_buffer_func,
sf_send_done_callback send_done_callback,
sf_deal_task_callback deal_func, TaskCleanUpCallback task_cleanup_func,
sf_recv_timeout_callback timeout_callback, const int net_timeout_ms,
const int proto_header_size, const int task_arg_size,
TaskInitCallback init_callback)
const int proto_header_size, const int task_padding_size,
const int task_arg_size, const bool double_buffers,
const bool need_shrink_task_buffer, const bool explicit_post_recv,
TaskInitCallback init_callback, void *init_arg,
sf_release_buffer_callback release_buffer_callback)
{
int result;
int bytes;
int extra_events;
int max_entries;
int i;
struct worker_thread_context *thread_contexts;
struct worker_thread_context *thread_ctx;
struct nio_thread_data *thread_data;
@ -135,13 +127,26 @@ int sf_service_init_ex2(SFContext *sf_context,
pthread_t tid;
pthread_attr_t thread_attr;
sf_context->realloc_task_buffer = g_sf_global_vars.
min_buff_size < g_sf_global_vars.max_buff_size;
sf_context->accept_done_func = accept_done_callback;
sf_set_parameters_ex(sf_context, proto_header_size, set_body_length_func,
deal_func, task_cleanup_func, timeout_callback);
fc_safe_strcpy(sf_context->name, name);
sf_context->connect_need_log = true;
sf_context->realloc_task_buffer = sf_context->net_buffer_cfg.
min_buff_size < sf_context->net_buffer_cfg.max_buff_size;
sf_context->callbacks.accept_done = accept_done_callback;
sf_set_parameters_ex(sf_context, proto_header_size,
set_body_length_func, alloc_recv_buffer_func,
send_done_callback, deal_func, task_cleanup_func,
timeout_callback, release_buffer_callback);
if (explicit_post_recv) {
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
sf_context->handlers[i].handlers[SF_RDMACM_NETWORK_HANDLER_INDEX].
explicit_post_recv = true;
}
}
if ((result=sf_init_free_queues(task_arg_size, init_callback)) != 0) {
if ((result=sf_init_free_queue(sf_context, name, double_buffers,
need_shrink_task_buffer, task_padding_size,
task_arg_size, init_callback, init_arg)) != 0)
{
return result;
}
@ -166,11 +171,71 @@ int sf_service_init_ex2(SFContext *sf_context,
return ENOMEM;
}
if (SF_G_EPOLL_EDGE_TRIGGER) {
#ifdef OS_LINUX
#if IOEVENT_USE_EPOLL
extra_events = EPOLLET;
#else
extra_events = 0;
#endif
#elif defined(OS_FREEBSD)
extra_events = EV_CLEAR;
#else
extra_events = 0;
#endif
} else {
extra_events = 0;
}
max_entries = (sf_context->net_buffer_cfg.max_connections +
sf_context->work_threads - 1) / sf_context->work_threads;
if (strcmp(sf_context->name, "cluster") == 0 ||
strcmp(sf_context->name, "replica") == 0)
{
if (max_entries < 1024) {
max_entries += 8;
} else {
max_entries = 1024;
}
} else {
if (max_entries < 4 * 1024) {
max_entries = max_entries * 2;
} else if (max_entries < 8 * 1024) {
max_entries = (max_entries * 3) / 2;
} else if (max_entries < 16 * 1024) {
max_entries = (max_entries * 5) / 4;
} else if (max_entries < 32 * 1024) {
max_entries = (max_entries * 6) / 5;
} else if (max_entries < 64 * 1024) {
max_entries = (max_entries * 11) / 10;
} else if (max_entries < 128 * 1024) {
max_entries = (max_entries * 21) / 20;
}
#if IOEVENT_USE_URING
if (sf_context->use_io_uring) {
if (max_entries > 32 * 1024) {
max_entries = 32 * 1024;
}
}
#endif
}
g_current_time = time(NULL);
sf_context->thread_count = 0;
data_end = sf_context->thread_data + sf_context->work_threads;
for (thread_data=sf_context->thread_data,thread_ctx=thread_contexts;
thread_data<data_end; thread_data++,thread_ctx++)
{
thread_data->timeout_ms = net_timeout_ms;
FC_INIT_LIST_HEAD(&thread_data->polling_queue);
if (sf_context->smart_polling.enabled) {
thread_data->busy_polling_callback =
sf_rdma_busy_polling_callback;
} else {
thread_data->busy_polling_callback = NULL;
}
thread_data->thread_loop_callback = thread_loop_callback;
if (alloc_thread_extra_data_callback != NULL) {
thread_data->arg = alloc_thread_extra_data_callback(
@ -180,32 +245,60 @@ int sf_service_init_ex2(SFContext *sf_context,
thread_data->arg = NULL;
}
if (ioevent_init(&thread_data->ev_puller,
g_sf_global_vars.max_connections + 2, net_timeout_ms, 0) != 0)
if ((result=ioevent_init(&thread_data->ev_puller, sf_context->
name, sf_context->use_io_uring, max_entries,
net_timeout_ms, extra_events)) != 0)
{
result = errno != 0 ? errno : ENOMEM;
char prompt[256];
#if IOEVENT_USE_URING
if (sf_context->use_io_uring) {
if (result == EPERM) {
strcpy(prompt, " make sure kernel."
"io_uring_disabled set to 0");
} else if (result == EINVAL) {
sprintf(prompt, " maybe max_connections: %d is too large"
" or [%s]'s work_threads: %d is too small",
sf_context->net_buffer_cfg.max_connections,
sf_context->name, sf_context->work_threads);
} else {
*prompt = '\0';
}
} else {
#endif
*prompt = '\0';
#if IOEVENT_USE_URING
}
#endif
logError("file: "__FILE__", line: %d, "
"ioevent_init fail, "
"errno: %d, error info: %s",
__LINE__, result, strerror(result));
"ioevent_init fail, errno: %d, error info: %s.%s"
, __LINE__, result, strerror(result), prompt);
return result;
}
result = fast_timer_init(&thread_data->timer,
2 * g_sf_global_vars.network_timeout, g_current_time);
#if IOEVENT_USE_URING
if (sf_context->use_io_uring && send_done_callback != NULL) {
ioevent_set_send_zc_done_notify(&thread_data->ev_puller, true);
}
#endif
result = fast_timer_init(&thread_data->timer, 2 * sf_context->
net_buffer_cfg.network_timeout, g_current_time);
if (result != 0) {
logError("file: "__FILE__", line: %d, "
"fast_timer_init fail, "
"errno: %d, error info: %s",
__LINE__, result, strerror(result));
"fast_timer_init fail, errno: %d, error info: %s",
__LINE__, result, strerror(result));
return result;
}
if ((result=init_pthread_lock(&thread_data->waiting_queue.lock)) != 0) {
if ((result=init_pthread_lock(&thread_data->
waiting_queue.lock)) != 0)
{
return result;
}
#if defined(OS_LINUX)
FC_NOTIFY_READ_FD(thread_data) = eventfd(0, EFD_NONBLOCK);
FC_NOTIFY_READ_FD(thread_data) = eventfd(0,
EFD_NONBLOCK | EFD_CLOEXEC);
if (FC_NOTIFY_READ_FD(thread_data) < 0) {
result = errno != 0 ? errno : EPERM;
logError("file: "__FILE__", line: %d, "
@ -229,12 +322,14 @@ int sf_service_init_ex2(SFContext *sf_context,
{
break;
}
FC_SET_CLOEXEC(FC_NOTIFY_READ_FD(thread_data));
FC_SET_CLOEXEC(FC_NOTIFY_WRITE_FD(thread_data));
#endif
thread_ctx->sf_context = sf_context;
thread_ctx->thread_data = thread_data;
if ((result=pthread_create(&tid, &thread_attr,
worker_thread_entrance, thread_ctx)) != 0)
worker_thread_entrance, thread_ctx)) != 0)
{
logError("file: "__FILE__", line: %d, "
"create thread failed, startup threads: %d, "
@ -253,7 +348,7 @@ int sf_service_destroy_ex(SFContext *sf_context)
{
struct nio_thread_data *data_end, *thread_data;
free_queue_destroy();
free_queue_destroy(&sf_context->free_queue);
data_end = sf_context->thread_data + sf_context->work_threads;
for (thread_data=sf_context->thread_data; thread_data<data_end;
thread_data++)
@ -284,6 +379,17 @@ static void *worker_thread_entrance(void *arg)
int thread_count;
thread_ctx = (struct worker_thread_context *)arg;
#ifdef OS_LINUX
{
char thread_name[32];
snprintf(thread_name, sizeof(thread_name), "%s-net[%d]",
thread_ctx->sf_context->name, (int)(thread_ctx->
thread_data - thread_ctx->sf_context->thread_data));
prctl(PR_SET_NAME, thread_name);
}
#endif
thread_count = __sync_add_and_fetch(&thread_ctx->
sf_context->thread_count, 1);
@ -295,7 +401,7 @@ static void *worker_thread_entrance(void *arg)
ioevent_loop(thread_ctx->thread_data,
sf_recv_notify_read,
thread_ctx->sf_context->task_cleanup_func,
thread_ctx->sf_context->callbacks.task_cleanup,
&g_sf_global_vars.continue_flag);
ioevent_destroy(&thread_ctx->thread_data->ev_puller);
@ -310,15 +416,20 @@ static void *worker_thread_entrance(void *arg)
return NULL;
}
static int _socket_server(const char *bind_addr, int port, int *sock)
int sf_socket_create_server(SFListener *listener,
int af, const char *bind_addr)
{
int result;
*sock = socketServer(bind_addr, port, &result);
if (*sock < 0) {
listener->sock = socketServer2(af, bind_addr,
listener->port, &result);
if (listener->sock < 0) {
return result;
}
if ((result=tcpsetserveropt(*sock, g_sf_global_vars.network_timeout)) != 0) {
if ((result=tcpsetserveropt(listener->sock, listener->handler->
fh->ctx->net_buffer_cfg.network_timeout)) != 0)
{
return result;
}
@ -328,101 +439,215 @@ static int _socket_server(const char *bind_addr, int port, int *sock)
int sf_socket_server_ex(SFContext *sf_context)
{
int result;
int i;
bool dual_ports;
const char *bind_addr;
SFAddressFamilyHandler *fh;
SFNetworkHandler *handler;
SFNetworkHandler *end;
sf_context->inner_sock = sf_context->outer_sock = -1;
if (sf_context->outer_port == sf_context->inner_port) {
if (*sf_context->outer_bind_addr == '\0' ||
*sf_context->inner_bind_addr == '\0') {
bind_addr = "";
return _socket_server(bind_addr, sf_context->outer_port,
&sf_context->outer_sock);
} else if (strcmp(sf_context->outer_bind_addr,
sf_context->inner_bind_addr) == 0) {
bind_addr = sf_context->outer_bind_addr;
if (is_private_ip(bind_addr)) {
return _socket_server(bind_addr, sf_context->
inner_port, &sf_context->inner_sock);
} else {
return _socket_server(bind_addr, sf_context->
outer_port, &sf_context->outer_sock);
}
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
fh = sf_context->handlers + i;
if (fh->af == AF_UNSPEC) {
continue;
}
}
if ((result=_socket_server(sf_context->outer_bind_addr,
sf_context->outer_port, &sf_context->outer_sock)) != 0)
{
return result;
}
end = fh->handlers + SF_NETWORK_HANDLER_COUNT;
for (handler=fh->handlers; handler<end; handler++) {
if (!handler->enabled) {
continue;
}
if ((result=_socket_server(sf_context->inner_bind_addr,
sf_context->inner_port, &sf_context->inner_sock)) != 0)
{
return result;
handler->inner.enabled = false;
handler->outer.enabled = false;
if (handler->outer.port == handler->inner.port) {
if (*fh->outer_bind_addr == '\0' ||
*fh->inner_bind_addr == '\0')
{
bind_addr = "";
if ((result=handler->create_server(&handler->
outer, fh->af, bind_addr)) != 0)
{
return result;
}
handler->outer.enabled = true;
dual_ports = false;
} else if (strcmp(fh->outer_bind_addr,
fh->inner_bind_addr) == 0)
{
bind_addr = fh->outer_bind_addr;
if (is_private_ip(bind_addr)) {
if ((result=handler->create_server(&handler->
inner, fh->af, bind_addr)) != 0)
{
return result;
}
handler->inner.enabled = true;
} else {
if ((result=handler->create_server(&handler->
outer, fh->af, bind_addr)) != 0)
{
return result;
}
handler->outer.enabled = true;
}
dual_ports = false;
} else {
dual_ports = true;
}
} else {
dual_ports = true;
}
if (dual_ports) {
if ((result=handler->create_server(&handler->outer,
fh->af, fh->outer_bind_addr)) != 0)
{
return result;
}
if ((result=handler->create_server(&handler->inner,
fh->af, fh->inner_bind_addr)) != 0)
{
return result;
}
handler->inner.enabled = true;
handler->outer.enabled = true;
}
/*
logInfo("%p [%d] inner {port: %d, enabled: %d}, "
"outer {port: %d, enabled: %d}", sf_context,
(int)(handler-sf_context->handlers),
handler->inner.port, handler->inner.enabled,
handler->outer.port, handler->outer.enabled);
*/
}
}
return 0;
}
static void *accept_thread_entrance(void *arg)
void sf_socket_close_server(SFListener *listener)
{
if (listener->sock >= 0) {
close(listener->sock);
listener->sock = -1;
}
}
struct fast_task_info *sf_socket_accept_connection(SFListener *listener)
{
struct accept_thread_context *accept_context;
int incomesock;
int port;
struct sockaddr_in inaddr;
socklen_t sockaddr_len;
struct fast_task_info *task;
accept_context = (struct accept_thread_context *)arg;
while (g_sf_global_vars.continue_flag) {
sockaddr_len = sizeof(inaddr);
incomesock = accept(accept_context->server_sock,
(struct sockaddr*)&inaddr, &sockaddr_len);
if (incomesock < 0) { //error
if (!(errno == EINTR || errno == EAGAIN)) {
logError("file: "__FILE__", line: %d, "
"accept fail, errno: %d, error info: %s",
__LINE__, errno, strerror(errno));
sockaddr_len = sizeof(listener->inaddr);
incomesock = accept(listener->sock, (struct sockaddr *)
&listener->inaddr, &sockaddr_len);
if (incomesock < 0) { //error
if (!(errno == EINTR || errno == EAGAIN)) {
logError("file: "__FILE__", line: %d, "
"accept fail, errno: %d, error info: %s",
__LINE__, errno, strerror(errno));
}
return NULL;
}
if (tcpsetnonblockopt(incomesock) != 0) {
close(incomesock);
return NULL;
}
FC_SET_CLOEXEC(incomesock);
if ((task=sf_alloc_init_server_task(listener->handler,
incomesock)) == NULL)
{
close(incomesock);
return NULL;
}
getPeerIpAddPort(incomesock, task->client_ip,
sizeof(task->client_ip), &port);
task->port = port;
return task;
}
void sf_socket_close_ex(SFContext *sf_context)
{
int i;
SFNetworkHandler *handler;
SFNetworkHandler *end;
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->handlers[i].af == AF_UNSPEC) {
continue;
}
end = sf_context->handlers[i].handlers + SF_NETWORK_HANDLER_COUNT;
for (handler=sf_context->handlers[i].handlers; handler<end; handler++) {
if (!handler->enabled) {
continue;
}
if (handler->outer.enabled) {
handler->close_server(&handler->outer);
}
if (handler->inner.enabled) {
handler->close_server(&handler->inner);
}
}
}
}
static void accept_run(SFListener *listener)
{
struct fast_task_info *task;
while (g_sf_global_vars.continue_flag) {
if ((task=listener->handler->accept_connection(listener)) == NULL) {
continue;
}
if (tcpsetnonblockopt(incomesock) != 0) {
close(incomesock);
continue;
}
if ((task=sf_alloc_init_task(accept_context->
sf_context, incomesock)) == NULL)
{
close(incomesock);
continue;
}
getPeerIpAddPort(incomesock, task->client_ip,
sizeof(task->client_ip), &port);
task->port = port;
task->thread_data = accept_context->sf_context->thread_data +
incomesock % accept_context->sf_context->work_threads;
if (accept_context->sf_context->accept_done_func != NULL) {
accept_context->sf_context->accept_done_func(task,
accept_context->server_sock ==
accept_context->sf_context->inner_sock);
task->thread_data = listener->handler->fh->ctx->thread_data +
task->event.fd % listener->handler->fh->ctx->work_threads;
if (listener->handler->fh->ctx->callbacks.accept_done != NULL) {
if (listener->handler->fh->ctx->callbacks.accept_done(task,
listener->inaddr.sin_addr.s_addr,
listener->is_inner) != 0)
{
listener->handler->close_connection(task);
sf_release_task(task);
continue;
}
}
if (sf_nio_notify(task, SF_NIO_STAGE_INIT) != 0) {
close(incomesock);
listener->handler->close_connection(task);
sf_release_task(task);
}
}
}
static void *accept_thread_entrance(SFListener *listener)
{
#ifdef OS_LINUX
{
char thread_name[32];
snprintf(thread_name, sizeof(thread_name), "%s-%s-listen",
listener->handler->comm_type == fc_comm_type_sock ?
"sock" : "rdma", listener->handler->fh->ctx->name);
prctl(PR_SET_NAME, thread_name);
}
#endif
accept_run(listener);
return NULL;
}
void _accept_loop(struct accept_thread_context *accept_context,
const int accept_threads)
int _accept_loop(SFListener *listener, const int accept_threads)
{
pthread_t tid;
pthread_attr_t thread_attr;
@ -430,7 +655,7 @@ void _accept_loop(struct accept_thread_context *accept_context,
int i;
if (accept_threads <= 0) {
return;
return 0;
}
if ((result=init_pthread_attr(&thread_attr, g_sf_global_vars.
@ -438,68 +663,83 @@ void _accept_loop(struct accept_thread_context *accept_context,
{
logWarning("file: "__FILE__", line: %d, "
"init_pthread_attr fail!", __LINE__);
return result;
}
else {
for (i=0; i<accept_threads; i++) {
if ((result=pthread_create(&tid, &thread_attr,
accept_thread_entrance,
accept_context)) != 0)
{
logError("file: "__FILE__", line: %d, "
"create thread failed, startup threads: %d, "
"errno: %d, error info: %s",
__LINE__, i, result, strerror(result));
break;
}
}
pthread_attr_destroy(&thread_attr);
for (i=0; i<accept_threads; i++) {
if ((result=pthread_create(&tid, &thread_attr,
(void * (*)(void *))accept_thread_entrance,
listener)) != 0)
{
logError("file: "__FILE__", line: %d, "
"create thread failed, startup threads: %d, "
"errno: %d, error info: %s",
__LINE__, i, result, strerror(result));
return result;
}
}
pthread_attr_destroy(&thread_attr);
return 0;
}
void sf_accept_loop_ex(SFContext *sf_context, const bool block)
int sf_accept_loop_ex(SFContext *sf_context, const bool blocked)
{
struct accept_thread_context *accept_contexts;
int count;
int bytes;
int i;
SFNetworkHandler *handler;
SFNetworkHandler *hend;
SFListener *listeners[SF_ADDRESS_FAMILY_COUNT *
SF_NETWORK_HANDLER_COUNT * 2];
SFListener **listener;
SFListener **last;
SFListener **lend;
if (sf_context->outer_sock >= 0) {
count = 2;
listener = listeners;
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->handlers[i].af == AF_UNSPEC) {
continue;
}
hend = sf_context->handlers[i].handlers + SF_NETWORK_HANDLER_COUNT;
for (handler=sf_context->handlers[i].handlers;
handler<hend; handler++)
{
if (!handler->enabled) {
continue;
}
if (handler->inner.enabled) {
*listener++ = &handler->inner;
}
if (handler->outer.enabled) {
*listener++ = &handler->outer;
}
}
}
if (listener == listeners) {
logError("file: "__FILE__", line: %d, "
"no listener!", __LINE__);
return ENOENT;
}
last = listener - 1;
if (blocked) {
lend = listener - 1;
} else {
count = 1;
lend = listener;
}
bytes = sizeof(struct accept_thread_context) * count;
accept_contexts = (struct accept_thread_context *)fc_malloc(bytes);
if (accept_contexts == NULL) {
return;
for (listener=listeners; listener<lend; listener++) {
_accept_loop(*listener, sf_context->accept_threads);
}
accept_contexts[0].sf_context = sf_context;
accept_contexts[0].server_sock = sf_context->inner_sock;
if (sf_context->outer_sock >= 0) {
accept_contexts[1].sf_context = sf_context;
accept_contexts[1].server_sock = sf_context->outer_sock;
if (sf_context->inner_sock >= 0) {
_accept_loop(accept_contexts, sf_context->accept_threads);
}
if (block) {
_accept_loop(accept_contexts + 1, sf_context->accept_threads - 1);
accept_thread_entrance(accept_contexts + 1);
} else {
_accept_loop(accept_contexts + 1, sf_context->accept_threads);
}
} else {
if (block) {
_accept_loop(accept_contexts, sf_context->accept_threads - 1);
accept_thread_entrance(accept_contexts);
} else {
_accept_loop(accept_contexts, sf_context->accept_threads);
}
if (blocked) {
_accept_loop(*last, sf_context->accept_threads - 1);
accept_run(*last);
}
return 0;
}
#if defined(DEBUG_FLAG)
@ -515,7 +755,7 @@ static void sigDumpHandler(int sig)
bDumpFlag = true;
snprintf(filename, sizeof(filename),
"%s/logs/sf_dump.log", g_sf_global_vars.base_path);
"%s/logs/sf_dump.log", SF_G_BASE_PATH_STR);
//manager_dump_global_vars_to_file(filename);
bDumpFlag = false;
@ -613,15 +853,13 @@ int sf_setup_signal_handler()
return 0;
}
#define LOG_SCHEDULE_ENTRIES_COUNT 3
int sf_startup_schedule(pthread_t *schedule_tid)
{
ScheduleArray scheduleArray;
ScheduleEntry scheduleEntries[LOG_SCHEDULE_ENTRIES_COUNT];
ScheduleEntry scheduleEntries[SF_LOG_SCHEDULE_ENTRIES_COUNT];
scheduleArray.entries = scheduleEntries;
sf_setup_schedule(&g_log_context, &g_sf_global_vars.error_log,
sf_logger_setup_schedule(&g_log_context, &g_sf_global_vars.error_log,
&scheduleArray);
return sched_start(&scheduleArray, schedule_tid,
g_sf_global_vars.thread_stack_size, (bool * volatile)
@ -632,7 +870,7 @@ int sf_add_slow_log_schedule(SFSlowLogContext *slowlog_ctx)
{
int result;
ScheduleArray scheduleArray;
ScheduleEntry scheduleEntries[LOG_SCHEDULE_ENTRIES_COUNT];
ScheduleEntry scheduleEntries[SF_LOG_SCHEDULE_ENTRIES_COUNT];
if (!slowlog_ctx->cfg.enabled) {
return 0;
@ -645,8 +883,8 @@ int sf_add_slow_log_schedule(SFSlowLogContext *slowlog_ctx)
}
scheduleArray.entries = scheduleEntries;
sf_setup_schedule(&slowlog_ctx->ctx, &slowlog_ctx->cfg.log_cfg,
&scheduleArray);
sf_logger_setup_schedule(&slowlog_ctx->ctx, &slowlog_ctx->
cfg.log_cfg, &scheduleArray);
return sched_add_entries(&scheduleArray);
}
@ -657,6 +895,12 @@ void sf_set_current_time()
srand(g_sf_global_vars.up_time);
}
int sf_global_init(const char *log_filename_prefix)
{
sf_set_current_time();
return log_set_prefix(SF_G_BASE_PATH_STR, log_filename_prefix);
}
void sf_enable_thread_notify_ex(SFContext *sf_context, const bool enabled)
{
struct nio_thread_data *thread_data;
@ -678,6 +922,17 @@ struct nio_thread_data *sf_get_random_thread_data_ex(SFContext *sf_context)
return sf_context->thread_data + index;
}
void sf_notify_all_threads_ex(SFContext *sf_context)
{
struct nio_thread_data *tdata;
struct nio_thread_data *tend;
tend = sf_context->thread_data + sf_context->work_threads;
for (tdata=sf_context->thread_data; tdata<tend; tdata++) {
ioevent_notify_thread(tdata);
}
}
void sf_set_sig_quit_handler(sf_sig_quit_handler quit_handler)
{
sig_quit_handler = quit_handler;

View File

@ -25,6 +25,9 @@
#include "fastcommon/ioevent.h"
#include "fastcommon/fast_task_queue.h"
#include "sf_types.h"
#include "sf_proto.h"
#include "sf_global.h"
#include "sf_nio.h"
typedef void* (*sf_alloc_thread_extra_data_callback)(const int thread_index);
typedef void (*sf_sig_quit_handler)(int sig);
@ -33,34 +36,41 @@ typedef void (*sf_sig_quit_handler)(int sig);
extern "C" {
#endif
int sf_service_init_ex2(SFContext *sf_context,
int sf_service_init_ex2(SFContext *sf_context, const char *name,
sf_alloc_thread_extra_data_callback
alloc_thread_extra_data_callback,
ThreadLoopCallback thread_loop_callback,
sf_accept_done_callback accept_done_callback,
sf_set_body_length_callback set_body_length_func,
sf_deal_task_func deal_func, TaskCleanUpCallback task_cleanup_func,
sf_alloc_recv_buffer_callback alloc_recv_buffer_func,
sf_send_done_callback send_done_callback,
sf_deal_task_callback deal_func, TaskCleanUpCallback task_cleanup_func,
sf_recv_timeout_callback timeout_callback, const int net_timeout_ms,
const int proto_header_size, const int task_arg_size,
TaskInitCallback init_callback);
const int proto_header_size, const int task_padding_size,
const int task_arg_size, const bool double_buffers,
const bool need_shrink_task_buffer, const bool explicit_post_recv,
TaskInitCallback init_callback, void *init_arg,
sf_release_buffer_callback release_buffer_callback);
#define sf_service_init_ex(sf_context, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \
deal_func, task_cleanup_func, timeout_callback, net_timeout_ms, \
proto_header_size, task_arg_size) \
sf_service_init_ex2(sf_context, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \
deal_func, task_cleanup_func, timeout_callback, net_timeout_ms, \
proto_header_size, task_arg_size, NULL)
#define sf_service_init_ex(sf_context, name, alloc_thread_extra_data_callback,\
thread_loop_callback, accept_done_callback, set_body_length_func, \
send_done_callback, deal_func, task_cleanup_func, timeout_callback, \
net_timeout_ms, proto_header_size, task_arg_size) \
sf_service_init_ex2(sf_context, name, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \
NULL, send_done_callback, deal_func, task_cleanup_func, \
timeout_callback, net_timeout_ms, proto_header_size, \
0, task_arg_size, false, true, false, NULL, NULL, NULL)
#define sf_service_init(alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \
deal_func, task_cleanup_func, timeout_callback, net_timeout_ms, \
proto_header_size, task_arg_size) \
sf_service_init_ex2(&g_sf_context, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \
deal_func, task_cleanup_func, timeout_callback, net_timeout_ms, \
proto_header_size, task_arg_size, NULL)
#define sf_service_init(name, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \
send_done_callback, deal_func, task_cleanup_func, timeout_callback, \
net_timeout_ms, proto_header_size, task_arg_size) \
sf_service_init_ex2(&g_sf_context, name, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, NULL,\
send_done_callback, deal_func, task_cleanup_func, timeout_callback, \
net_timeout_ms, proto_header_size, 0, task_arg_size, false, true, \
false, NULL, NULL, NULL)
int sf_service_destroy_ex(SFContext *sf_context);
@ -72,17 +82,43 @@ void sf_service_set_thread_loop_callback_ex(SFContext *sf_context,
#define sf_service_set_thread_loop_callback(thread_loop_callback) \
sf_service_set_thread_loop_callback_ex(&g_sf_context, thread_loop_callback)
static inline void sf_service_set_smart_polling_ex(SFContext *sf_context,
const FCSmartPollingConfig *smart_polling)
{
sf_context->smart_polling = *smart_polling;
}
#define sf_service_set_smart_polling(smart_polling) \
sf_service_set_smart_polling_ex(&g_sf_context, smart_polling)
static inline void sf_service_set_connect_need_log_ex(
SFContext *sf_context, const bool need_log)
{
sf_context->connect_need_log = need_log;
}
#define sf_service_set_connect_need_log(need_log) \
sf_service_set_connect_need_log_ex(&g_sf_context, need_log)
int sf_setup_signal_handler();
int sf_startup_schedule(pthread_t *schedule_tid);
int sf_add_slow_log_schedule(SFSlowLogContext *slowlog_ctx);
void sf_set_current_time();
int sf_global_init(const char *log_filename_prefix);
int sf_socket_create_server(SFListener *listener,
int af, const char *bind_addr);
void sf_socket_close_server(SFListener *listener);
struct fast_task_info *sf_socket_accept_connection(SFListener *listener);
int sf_socket_server_ex(SFContext *sf_context);
#define sf_socket_server() sf_socket_server_ex(&g_sf_context)
void sf_accept_loop_ex(SFContext *sf_context, const bool block);
void sf_socket_close_ex(SFContext *sf_context);
#define sf_socket_close() sf_socket_close_ex(&g_sf_context)
int sf_accept_loop_ex(SFContext *sf_context, const bool blocked);
#define sf_accept_loop() sf_accept_loop_ex(&g_sf_context, true)
@ -105,16 +141,22 @@ struct nio_thread_data *sf_get_random_thread_data_ex(SFContext *sf_context);
#define sf_get_random_thread_data() \
sf_get_random_thread_data_ex(&g_sf_context)
void sf_notify_all_threads_ex(SFContext *sf_context);
#define sf_notify_all_threads() \
sf_notify_all_threads_ex(&g_sf_context)
void sf_set_sig_quit_handler(sf_sig_quit_handler quit_handler);
int sf_init_task(struct fast_task_info *task);
static inline struct fast_task_info *sf_alloc_init_task(
SFContext *sf_context, const int sock)
static inline struct fast_task_info *sf_alloc_init_task_ex(
SFNetworkHandler *handler, const int fd,
const int reffer_count)
{
struct fast_task_info *task;
task = free_queue_pop();
task = free_queue_pop(&handler->fh->ctx->free_queue);
if (task == NULL) {
logError("file: "__FILE__", line: %d, "
"malloc task buff failed, you should "
@ -122,19 +164,57 @@ static inline struct fast_task_info *sf_alloc_init_task(
__LINE__);
return NULL;
}
__sync_add_and_fetch(&task->reffer_count, 1);
if (task->shrinked) {
task->shrinked = false;
sf_proto_init_task_magic(task);
}
__sync_add_and_fetch(&task->reffer_count, reffer_count);
__sync_bool_compare_and_swap(&task->canceled, 1, 0);
task->ctx = sf_context;
task->event.fd = sock;
task->handler = handler;
task->event.fd = fd;
return task;
}
#define sf_hold_task_ex(task, inc_count) fc_hold_task_ex(task, inc_count)
#define sf_hold_task(task) fc_hold_task(task)
#define sf_alloc_init_task(handler, fd) sf_alloc_init_task_ex(handler, fd, 1)
static inline struct fast_task_info *sf_alloc_init_server_task(
SFNetworkHandler *handler, const int fd)
{
const int reffer_count = 1;
struct fast_task_info *task;
if ((task=sf_alloc_init_task_ex(handler, fd, reffer_count)) != NULL) {
#if IOEVENT_USE_URING
FC_URING_IS_CLIENT(task) = false;
#endif
}
return task;
}
#define sf_hold_task(task) __sync_add_and_fetch(&task->reffer_count, 1)
static inline struct fast_task_info *sf_alloc_init_client_task(
SFNetworkHandler *handler)
{
const int fd = -1;
const int reffer_count = 1;
struct fast_task_info *task;
if ((task=sf_alloc_init_task_ex(handler, fd, reffer_count)) != NULL) {
#if IOEVENT_USE_URING
FC_URING_IS_CLIENT(task) = true;
#endif
}
return task;
}
static inline void sf_release_task(struct fast_task_info *task)
{
//int reffer_count;
if (__sync_sub_and_fetch(&task->reffer_count, 1) == 0) {
/*
int free_count = free_queue_count();
@ -145,13 +225,99 @@ static inline void sf_release_task(struct fast_task_info *task)
alloc_count, alloc_count - free_count, free_count);
*/
#if IOEVENT_USE_URING
if (SF_CTX->use_io_uring) {
task->handler->close_connection(task);
__sync_fetch_and_sub(&g_sf_global_vars.
connection_stat.current_count, 1);
}
#endif
free_queue_push(task);
} else {
/*
logInfo("file: "__FILE__", line: %d, "
"release task %p, current reffer: %d",
__LINE__, task, reffer_count);
*/
}
}
static inline SFNetworkHandler *sf_get_first_network_handler_ex(
SFContext *sf_context)
{
int i;
SFNetworkHandler *handler;
SFNetworkHandler *end;
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->handlers[i].af == AF_UNSPEC) {
continue;
}
end = sf_context->handlers[i].handlers + SF_NETWORK_HANDLER_COUNT;
for (handler=sf_context->handlers[i].handlers; handler<end; handler++) {
if (handler->enabled) {
return handler;
}
}
}
return NULL;
}
#define sf_get_first_network_handler() \
sf_get_first_network_handler_ex(&g_sf_context)
static inline SFNetworkHandler *sf_get_rdma_network_handler(
SFContext *sf_context)
{
int i;
SFNetworkHandler *handler;
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->handlers[i].af != AF_UNSPEC) {
handler = sf_context->handlers[i].handlers +
SF_RDMACM_NETWORK_HANDLER_INDEX;
if (handler->enabled) {
return handler;
}
}
}
return NULL;
}
static inline SFNetworkHandler *sf_get_rdma_network_handler2(
SFContext *sf_context1, SFContext *sf_context2)
{
SFNetworkHandler *handler;
if ((handler=sf_get_rdma_network_handler(sf_context1)) != NULL) {
return handler;
}
return sf_get_rdma_network_handler(sf_context2);
}
static inline SFNetworkHandler *sf_get_rdma_network_handler3(
SFContext *sf_context1, SFContext *sf_context2,
SFContext *sf_context3)
{
SFNetworkHandler *handler;
if ((handler=sf_get_rdma_network_handler(sf_context1)) != NULL) {
return handler;
}
if ((handler=sf_get_rdma_network_handler(sf_context2)) != NULL) {
return handler;
}
return sf_get_rdma_network_handler(sf_context3);
}
static inline bool sf_get_double_buffers_flag(FCServerGroupInfo *server_group)
{
if (server_group->comm_type == fc_comm_type_sock) {
#if IOEVENT_USE_URING
return true;
#else
return false;
#endif
} else { //RDMA
return true;
}
}

View File

@ -15,6 +15,7 @@
#include <stdlib.h>
#include "fastcommon/shared_func.h"
#include "fastcommon/fc_atomic.h"
#include "sf_sharding_htable.h"
static int init_allocators(SFHtableShardingContext *sharding_ctx,
@ -44,7 +45,7 @@ static int init_allocators(SFHtableShardingContext *sharding_ctx,
end = sharding_ctx->allocators.elts + allocator_count;
for (pa=sharding_ctx->allocators.elts; pa<end; pa++) {
if ((result=fast_mblock_init_ex1(pa, "sharding_hkey", element_size,
if ((result=fast_mblock_init_ex1(pa, "sharding-hkey", element_size,
alloc_elts_once, 0, NULL, NULL, true)) != 0)
{
return result;
@ -78,7 +79,7 @@ static int init_sharding(SFHtableSharding *sharding,
sharding->hashtable.capacity = per_capacity;
sharding->element_count = 0;
sharding->last_reclaim_time_sec = get_current_time();
sharding->last_reclaim_time_ms = 1000LL * (int64_t)get_current_time();
FC_INIT_LIST_HEAD(&sharding->lru);
return 0;
}
@ -114,15 +115,16 @@ static int init_sharding_array(SFHtableShardingContext *sharding_ctx,
return 0;
}
int sf_sharding_htable_init(SFHtableShardingContext *sharding_ctx,
int sf_sharding_htable_init_ex(SFHtableShardingContext *sharding_ctx,
const SFShardingHtableKeyType key_type,
sf_sharding_htable_insert_callback insert_callback,
sf_sharding_htable_find_callback find_callback,
sf_sharding_htable_delete_callback delete_callback,
sf_sharding_htable_accept_reclaim_callback reclaim_callback,
const int sharding_count, const int64_t htable_capacity,
const int allocator_count, const int element_size,
int64_t element_limit, const int64_t min_ttl_sec,
const int64_t max_ttl_sec)
int64_t element_limit, const int64_t min_ttl_ms,
const int64_t max_ttl_ms, const double low_water_mark_ratio)
{
int result;
int64_t per_elt_limit;
@ -131,6 +133,7 @@ int sf_sharding_htable_init(SFHtableShardingContext *sharding_ctx,
if (element_limit <= 0) {
element_limit = 1000 * 1000;
}
if ((result=init_allocators(sharding_ctx, allocator_count,
element_size, element_limit)) != 0)
{
@ -148,19 +151,23 @@ int sf_sharding_htable_init(SFHtableShardingContext *sharding_ctx,
sharding_ctx->key_type = key_type;
sharding_ctx->insert_callback = insert_callback;
sharding_ctx->find_callback = find_callback;
sharding_ctx->delete_callback = delete_callback;
sharding_ctx->accept_reclaim_callback = reclaim_callback;
sharding_ctx->sharding_reclaim.elt_water_mark = per_elt_limit * 0.10;
sharding_ctx->sharding_reclaim.min_ttl_sec = min_ttl_sec;
sharding_ctx->sharding_reclaim.max_ttl_sec = max_ttl_sec;
sharding_ctx->sharding_reclaim.elt_ttl_sec = (double)(sharding_ctx->
sharding_reclaim.max_ttl_sec - sharding_ctx->
sharding_reclaim.min_ttl_sec) / per_elt_limit;
sharding_ctx->sharding_reclaim.enabled = (delete_callback == NULL);
sharding_ctx->sharding_reclaim.elt_water_mark =
per_elt_limit * low_water_mark_ratio;
sharding_ctx->sharding_reclaim.min_ttl_ms = min_ttl_ms;
sharding_ctx->sharding_reclaim.max_ttl_ms = max_ttl_ms;
sharding_ctx->sharding_reclaim.elt_ttl_ms = (double)(sharding_ctx->
sharding_reclaim.max_ttl_ms - sharding_ctx->
sharding_reclaim.min_ttl_ms) / per_elt_limit;
/*
logInfo("per_elt_limit: %"PRId64", elt_water_mark: %d, "
"elt_ttl_sec: %.2f", per_elt_limit, (int)sharding_ctx->
"elt_ttl_ms: %.2f", per_elt_limit, (int)sharding_ctx->
sharding_reclaim.elt_water_mark, sharding_ctx->
sharding_reclaim.elt_ttl_sec);
sharding_reclaim.elt_ttl_ms);
*/
return 0;
}
@ -220,9 +227,10 @@ static inline void htable_insert(SFHtableShardingContext *sharding_ctx,
fc_list_add_internal(&entry->dlinks.htable, previous, previous->next);
}
static SFShardingHashEntry *otid_entry_reclaim(SFHtableSharding *sharding)
static SFShardingHashEntry *hash_entry_reclaim(SFHtableSharding *sharding)
{
int64_t reclaim_ttl_sec;
int64_t current_time_ms;
int64_t reclaim_ttl_ms;
int64_t delta;
int64_t reclaim_count;
int64_t reclaim_limit;
@ -232,7 +240,14 @@ static SFShardingHashEntry *otid_entry_reclaim(SFHtableSharding *sharding)
if (sharding->element_count <= sharding->element_limit) {
delta = sharding->element_count;
reclaim_limit = sharding->ctx->sharding_reclaim.elt_water_mark;
if (sharding->ctx->sharding_reclaim.elt_water_mark > 0) {
reclaim_count = sharding->element_count - sharding->ctx->
sharding_reclaim.elt_water_mark;
reclaim_limit = FC_MIN(reclaim_count, sharding->ctx->
sharding_reclaim.elt_water_mark);
} else {
reclaim_limit = sharding->element_count;
}
} else {
delta = sharding->element_limit;
reclaim_limit = (sharding->element_count - sharding->element_limit) +
@ -241,12 +256,11 @@ static SFShardingHashEntry *otid_entry_reclaim(SFHtableSharding *sharding)
first = NULL;
reclaim_count = 0;
reclaim_ttl_sec = (int64_t)(sharding->ctx->sharding_reclaim.max_ttl_sec -
sharding->ctx->sharding_reclaim.elt_ttl_sec * delta);
current_time_ms = 1000LL * (int64_t)get_current_time();
reclaim_ttl_ms = (int64_t)(sharding->ctx->sharding_reclaim.max_ttl_ms -
sharding->ctx->sharding_reclaim.elt_ttl_ms * delta);
fc_list_for_each_entry_safe(entry, tmp, &sharding->lru, dlinks.lru) {
if (get_current_time() - entry->last_update_time_sec <=
reclaim_ttl_sec)
{
if (current_time_ms - entry->last_update_time_ms <= reclaim_ttl_ms) {
break;
}
@ -271,28 +285,37 @@ static SFShardingHashEntry *otid_entry_reclaim(SFHtableSharding *sharding)
}
if (reclaim_count > 0) {
logInfo("sharding index: %d, element_count: %"PRId64", "
"reclaim_ttl_sec: %"PRId64" ms, reclaim_count: %"PRId64", "
logDebug("sharding index: %d, element_count: %"PRId64", "
"reclaim_ttl_ms: %"PRId64" ms, reclaim_count: %"PRId64", "
"reclaim_limit: %"PRId64, (int)(sharding - sharding->ctx->
sharding_array.entries), sharding->element_count,
reclaim_ttl_sec, reclaim_count, reclaim_limit);
reclaim_ttl_ms, reclaim_count, reclaim_limit);
}
return first;
}
static inline SFShardingHashEntry *htable_entry_alloc(
SFHtableShardingContext *sharding_ctx,
SFHtableSharding *sharding)
{
SFShardingHashEntry *entry;
int64_t current_time_ms;
int64_t last_reclaim_time_ms;
if (sharding->element_count > sharding->ctx->sharding_reclaim.
elt_water_mark && get_current_time() - sharding->
last_reclaim_time_sec > 1000)
if (sharding_ctx->sharding_reclaim.enabled &&
(sharding->element_count > sharding->ctx->
sharding_reclaim.elt_water_mark))
{
sharding->last_reclaim_time_sec = get_current_time();
if ((entry=otid_entry_reclaim(sharding)) != NULL) {
return entry;
current_time_ms = 1000LL * (int64_t)get_current_time();
last_reclaim_time_ms = FC_ATOMIC_GET(sharding->last_reclaim_time_ms);
if (current_time_ms - last_reclaim_time_ms > 100 &&
__sync_bool_compare_and_swap(&sharding->last_reclaim_time_ms,
last_reclaim_time_ms, current_time_ms))
{
if ((entry=hash_entry_reclaim(sharding)) != NULL) {
return entry;
}
}
}
@ -309,7 +332,6 @@ static inline SFShardingHashEntry *htable_entry_alloc(
#define SET_SHARDING_AND_BUCKET(sharding_ctx, key) \
SFHtableSharding *sharding; \
struct fc_list_head *bucket; \
SFShardingHashEntry *entry; \
uint64_t hash_code; \
\
hash_code = sf_sharding_htable_key_ids_one == sharding_ctx-> \
@ -324,6 +346,7 @@ void *sf_sharding_htable_find(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg)
{
void *data;
SFShardingHashEntry *entry;
SET_SHARDING_AND_BUCKET(sharding_ctx, key);
PTHREAD_MUTEX_LOCK(&sharding->lock);
@ -338,9 +361,43 @@ void *sf_sharding_htable_find(SFHtableShardingContext
return data;
}
int sf_sharding_htable_delete(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg)
{
int result;
SFShardingHashEntry *entry;
if (sharding_ctx->delete_callback != NULL) {
SET_SHARDING_AND_BUCKET(sharding_ctx, key);
PTHREAD_MUTEX_LOCK(&sharding->lock);
entry = htable_find(sharding_ctx, key, bucket);
if (entry != NULL) {
if (sharding_ctx->delete_callback(entry, arg)) {
fc_list_del_init(&entry->dlinks.htable);
if (sharding_ctx->sharding_reclaim.enabled) {
fc_list_del_init(&entry->dlinks.lru);
}
fast_mblock_free_object(sharding->allocator, entry);
sharding->element_count--;
}
result = 0;
} else {
result = ENOENT;
}
PTHREAD_MUTEX_UNLOCK(&sharding->lock);
} else {
logError("file: "__FILE__", line: %d, "
"delete callback is NULL!", __LINE__);
result = EINVAL;
}
return result;
}
int sf_sharding_htable_insert(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg)
{
SFShardingHashEntry *entry;
bool new_create;
int result;
SET_SHARDING_AND_BUCKET(sharding_ctx, key);
@ -348,7 +405,7 @@ int sf_sharding_htable_insert(SFHtableShardingContext
PTHREAD_MUTEX_LOCK(&sharding->lock);
do {
if ((entry=htable_find(sharding_ctx, key, bucket)) == NULL) {
if ((entry=htable_entry_alloc(sharding)) == NULL) {
if ((entry=htable_entry_alloc(sharding_ctx, sharding)) == NULL) {
result = ENOMEM;
break;
}
@ -356,13 +413,17 @@ int sf_sharding_htable_insert(SFHtableShardingContext
new_create = true;
entry->key = *key;
htable_insert(sharding_ctx, entry, bucket);
fc_list_add_tail(&entry->dlinks.lru, &sharding->lru);
if (sharding_ctx->sharding_reclaim.enabled) {
fc_list_add_tail(&entry->dlinks.lru, &sharding->lru);
}
} else {
new_create = false;
fc_list_move_tail(&entry->dlinks.lru, &sharding->lru);
if (sharding_ctx->sharding_reclaim.enabled) {
fc_list_move_tail(&entry->dlinks.lru, &sharding->lru);
}
}
entry->last_update_time_sec = get_current_time();
entry->last_update_time_ms = 1000LL * (int64_t)get_current_time();
result = sharding_ctx->insert_callback(
entry, arg, new_create);
} while (0);

View File

@ -18,7 +18,6 @@
#include <limits.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fastcommon/common_define.h"
#include "fastcommon/fc_list.h"
#include "fastcommon/pthread_func.h"
@ -37,6 +36,9 @@ typedef int (*sf_sharding_htable_insert_callback)
typedef void *(*sf_sharding_htable_find_callback)
(struct sf_sharding_hash_entry *entry, void *arg);
typedef bool (*sf_sharding_htable_delete_callback)
(struct sf_sharding_hash_entry *entry, void *arg);
typedef bool (*sf_sharding_htable_accept_reclaim_callback)
(struct sf_sharding_hash_entry *entry);
@ -59,7 +61,7 @@ typedef struct sf_sharding_hash_entry {
struct fc_list_head htable; //for hashtable
struct fc_list_head lru; //for LRU chain
} dlinks;
int64_t last_update_time_sec;
int64_t last_update_time_ms;
struct sf_htable_sharding *sharding; //hold for lock
} SFShardingHashEntry;
@ -76,7 +78,7 @@ typedef struct sf_htable_sharding {
SFDlinkHashtable hashtable;
int64_t element_count;
int64_t element_limit;
int64_t last_reclaim_time_sec;
volatile int64_t last_reclaim_time_ms;
struct sf_htable_sharding_context *ctx;
} SFHtableSharding;
@ -87,20 +89,22 @@ typedef struct sf_htable_sharding_array {
typedef struct sf_htable_sharding_context {
struct {
int64_t min_ttl_sec;
int64_t max_ttl_sec;
double elt_ttl_sec;
int64_t min_ttl_ms;
int64_t max_ttl_ms;
double elt_ttl_ms;
int elt_water_mark; //trigger reclaim when elements exceeds water mark
bool enabled;
} sharding_reclaim;
struct {
int count;
struct fast_mblock_man *elts;
} allocators;
} allocators; //shared allocators
SFShardingHtableKeyType key_type; //id count in the hash entry
sf_sharding_htable_insert_callback insert_callback;
sf_sharding_htable_find_callback find_callback;
sf_sharding_htable_delete_callback delete_callback;
sf_sharding_htable_accept_reclaim_callback accept_reclaim_callback;
SFHtableShardingArray sharding_array;
} SFHtableShardingContext;
@ -109,15 +113,35 @@ typedef struct sf_htable_sharding_context {
extern "C" {
#endif
int sf_sharding_htable_init(SFHtableShardingContext *sharding_ctx,
int sf_sharding_htable_init_ex(SFHtableShardingContext *sharding_ctx,
const SFShardingHtableKeyType key_type,
sf_sharding_htable_insert_callback insert_callback,
sf_sharding_htable_find_callback find_callback,
sf_sharding_htable_delete_callback delete_callback,
sf_sharding_htable_accept_reclaim_callback reclaim_callback,
const int sharding_count, const int64_t htable_capacity,
const int allocator_count, const int element_size,
int64_t element_limit, const int64_t min_ttl_sec,
const int64_t max_ttl_sec);
int64_t element_limit, const int64_t min_ttl_ms,
const int64_t max_ttl_ms, const double low_water_mark_ratio);
static inline int sf_sharding_htable_init(SFHtableShardingContext
*sharding_ctx, const SFShardingHtableKeyType key_type,
sf_sharding_htable_insert_callback insert_callback,
sf_sharding_htable_find_callback find_callback,
sf_sharding_htable_delete_callback delete_callback,
sf_sharding_htable_accept_reclaim_callback reclaim_callback,
const int sharding_count, const int64_t htable_capacity,
const int allocator_count, const int element_size,
int64_t element_limit, const int64_t min_ttl_ms,
const int64_t max_ttl_ms)
{
const double low_water_mark_ratio = 0.10;
return sf_sharding_htable_init_ex(sharding_ctx, key_type,
insert_callback, find_callback, delete_callback,
reclaim_callback, sharding_count, htable_capacity,
allocator_count, element_size, element_limit,
min_ttl_ms, max_ttl_ms, low_water_mark_ratio);
}
int sf_sharding_htable_insert(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg);
@ -125,6 +149,9 @@ extern "C" {
void *sf_sharding_htable_find(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg);
int sf_sharding_htable_delete(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg);
#ifdef __cplusplus
}
#endif

94
src/sf_shared_mbuffer.c Normal file
View File

@ -0,0 +1,94 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the Lesser GNU General Public License, version 3
* or later ("LGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the Lesser GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "sf_shared_mbuffer.h"
static int sf_shared_mbuffer_alloc_init(void *element, void *args)
{
SFSharedMBuffer *buffer;
buffer = (SFSharedMBuffer *)((char *)element +
sizeof(struct fast_allocator_wrapper));
buffer->ctx = (SFSharedMBufferContext *)args;
return 0;
}
int sf_shared_mbuffer_init_ex(SFSharedMBufferContext *context,
const char *name_prefix, const int buff_extra_size,
const int min_buff_size, const int max_buff_size,
const int min_alloc_once, const int64_t memory_limit,
const bool need_lock)
{
const double expect_usage_ratio = 0.75;
const int reclaim_interval = 1;
struct fast_region_info regions[32];
struct fast_mblock_object_callbacks object_callbacks;
int count;
int start;
int end;
int alloc_once;
int buff_size;
int i;
alloc_once = (4 * 1024 * 1024) / max_buff_size;
if (alloc_once == 0) {
alloc_once = min_alloc_once;
} else {
i = min_alloc_once;
while (i < alloc_once) {
i *= 2;
}
alloc_once = i;
}
count = 1;
buff_size = min_buff_size;
while (buff_size < max_buff_size) {
buff_size *= 2;
++count;
alloc_once *= 2;
}
buff_size = min_buff_size;
start = 0;
end = buff_extra_size + buff_size;
FAST_ALLOCATOR_INIT_REGION(regions[0], start, end,
end - start, alloc_once);
//logInfo("[1] start: %d, end: %d, alloc_once: %d", start, end, alloc_once);
start = end;
for (i=1; i<count; i++) {
buff_size *= 2;
alloc_once /= 2;
end = buff_extra_size + buff_size;
FAST_ALLOCATOR_INIT_REGION(regions[i], start, end,
end - start, alloc_once);
//logInfo("[%d] start: %d, end: %d, alloc_once: %d", i + 1, start, end, alloc_once);
start = end;
}
object_callbacks.init_func = sf_shared_mbuffer_alloc_init;
object_callbacks.destroy_func = NULL;
object_callbacks.args = context;
return fast_allocator_init_ex(&context->allocator, name_prefix,
sizeof(SFSharedMBuffer), &object_callbacks, regions, count,
memory_limit, expect_usage_ratio, reclaim_interval, need_lock);
}
void sf_shared_mbuffer_destroy(SFSharedMBufferContext *context)
{
fast_allocator_destroy(&context->allocator);
}

114
src/sf_shared_mbuffer.h Normal file
View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the Lesser GNU General Public License, version 3
* or later ("LGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the Lesser GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _SF_SHARED_MBUFFER_H__
#define _SF_SHARED_MBUFFER_H__
#include "fastcommon/fc_list.h"
#include "fastcommon/fast_task_queue.h"
#include "fastcommon/shared_func.h"
#include "fastcommon/logger.h"
#include "fastcommon/fast_allocator.h"
typedef struct sf_shared_mbuffer_context {
struct fast_allocator_context allocator;
} SFSharedMBufferContext;
typedef struct sf_shared_mbuffer {
int length;
volatile int reffer_count;
SFSharedMBufferContext *ctx;
char buff[0]; //must be last
} SFSharedMBuffer;
#ifdef __cplusplus
extern "C" {
#endif
#define sf_shared_mbuffer_init(context, name_prefix, buff_extra_size, \
min_buff_size, max_buff_size, min_alloc_once, memory_limit) \
sf_shared_mbuffer_init_ex(context, name_prefix, buff_extra_size, \
min_buff_size, max_buff_size, min_alloc_once, memory_limit, true)
int sf_shared_mbuffer_init_ex(SFSharedMBufferContext *context,
const char *name_prefix, const int buff_extra_size,
const int min_buff_size, const int max_buff_size,
const int min_alloc_once, const int64_t memory_limit,
const bool need_lock);
void sf_shared_mbuffer_destroy(SFSharedMBufferContext *context);
#define sf_shared_mbuffer_alloc(context, buffer_size) \
sf_shared_mbuffer_alloc_ex(context, buffer_size, 1)
static inline SFSharedMBuffer *sf_shared_mbuffer_alloc_ex(
SFSharedMBufferContext *context, const int buffer_size,
const int init_reffer_count)
{
SFSharedMBuffer *buffer;
int sleep_ms;
sleep_ms = 5;
while ((buffer=fast_allocator_alloc(&context->allocator,
buffer_size)) == NULL)
{
if (sleep_ms < 100) {
sleep_ms *= 2;
}
fc_sleep_ms(sleep_ms);
}
if (init_reffer_count > 0) {
__sync_add_and_fetch(&buffer->reffer_count, init_reffer_count);
}
/*
logInfo("file: "__FILE__", line: %d, "
"alloc shared buffer: %p, buff: %p, reffer_count: %d",
__LINE__, buffer, buffer->buff, __sync_add_and_fetch(&buffer->reffer_count, 0));
*/
return buffer;
}
static inline void sf_shared_mbuffer_hold(SFSharedMBuffer *buffer)
{
__sync_add_and_fetch(&buffer->reffer_count, 1);
}
static inline void sf_shared_mbuffer_release(SFSharedMBuffer *buffer)
{
if (__sync_sub_and_fetch(&buffer->reffer_count, 1) == 0) {
/*
logInfo("file: "__FILE__", line: %d, "
"free shared buffer: %p", __LINE__, buffer);
*/
fast_allocator_free(&buffer->ctx->allocator, buffer);
}
}
static inline void sf_release_task_shared_mbuffer(struct fast_task_info *task)
{
SFSharedMBuffer *mbuffer;
mbuffer = fc_list_entry(task->recv_body, SFSharedMBuffer, buff);
sf_shared_mbuffer_release(mbuffer);
task->recv_body = NULL;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -25,47 +25,185 @@
#include <time.h>
#include "fastcommon/connection_pool.h"
#include "fastcommon/fast_task_queue.h"
#include "fastcommon/server_id_func.h"
#define SF_ERROR_INFO_SIZE 256
#define SF_CLUSTER_CONFIG_SIGN_LEN 16
#define SF_SERVER_TASK_TYPE_NONE 0
#define SF_SERVER_TASK_TYPE_CHANNEL_HOLDER 101 //for request idempotency
#define SF_SERVER_TASK_TYPE_CHANNEL_USER 102 //for request idempotency
typedef void (*sf_accept_done_callback)(struct fast_task_info *task,
const bool bInnerPort);
#define SF_ADDRESS_FAMILY_COUNT 2
#define SF_IPV4_ADDRESS_FAMILY_INDEX 0
#define SF_IPV6_ADDRESS_FAMILY_INDEX 1
#define SF_NETWORK_HANDLER_COUNT 2
#define SF_SOCKET_NETWORK_HANDLER_INDEX 0
#define SF_RDMACM_NETWORK_HANDLER_INDEX 1
#define SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(bf) ((bf).data_end - (bf).buff)
#define SF_BINLOG_BUFFER_PRODUCER_BUFF_REMAIN(bf) ((bf).buff_end - (bf).data_end)
#define SF_BINLOG_BUFFER_CONSUMER_DATA_LENGTH(bf) ((bf).current - (bf).buff)
#define SF_BINLOG_BUFFER_CONSUMER_DATA_REMAIN(bf) ((bf).data_end - (bf).current)
typedef int (*sf_accept_done_callback)(struct fast_task_info *task,
const in_addr_64_t client_addr, const bool bInnerPort);
typedef int (*sf_set_body_length_callback)(struct fast_task_info *task);
typedef int (*sf_deal_task_func)(struct fast_task_info *task, const int stage);
typedef char *(*sf_alloc_recv_buffer_callback)(struct fast_task_info *task,
const int buff_size, bool *new_alloc);
typedef int (*sf_deal_task_callback)(struct fast_task_info *task, const int stage);
typedef int (*sf_recv_timeout_callback)(struct fast_task_info *task);
typedef int (*sf_send_done_callback)(struct fast_task_info *task,
const int length, int *next_stage);
typedef void (*sf_connect_done_callback)(struct fast_task_info *task,
const int err_no);
/* calback for release iovec buffer */
typedef void (*sf_release_buffer_callback)(struct fast_task_info *task);
typedef int (*sf_error_handler_callback)(const int errnum);
typedef enum {
sf_comm_action_continue = 'c',
sf_comm_action_break = 'b',
sf_comm_action_finish = 'f'
} SFCommAction;
typedef enum {
sf_address_family_auto = 0,
sf_address_family_ipv4 = 1,
sf_address_family_ipv6 = 2,
sf_address_family_both = 3
} SFAddressFamily;
struct ibv_pd;
struct sf_listener;
typedef int (*sf_get_connection_size_callback)();
typedef int (*sf_init_connection_callback)(
struct fast_task_info *task, void *arg);
#define sf_alloc_pd_callback fc_alloc_pd_callback
typedef int (*sf_create_server_callback)(struct sf_listener
*listener, int af, const char *bind_addr);
typedef void (*sf_close_server_callback)(struct sf_listener *listener);
typedef struct fast_task_info * (*sf_accept_connection_callback)(
struct sf_listener *listener);
typedef int (*sf_async_connect_server_callback)(struct fast_task_info *task);
typedef int (*sf_async_connect_check_callback)(struct fast_task_info *task);
typedef void (*sf_close_connection_callback)(struct fast_task_info *task);
typedef ssize_t (*sf_send_data_callback)(struct fast_task_info *task,
SFCommAction *action, bool *send_done);
typedef ssize_t (*sf_recv_data_callback)(struct fast_task_info *task,
const bool call_post_recv, SFCommAction *action);
typedef int (*sf_post_recv_callback)(struct fast_task_info *task);
struct sf_network_handler;
typedef struct sf_listener {
struct sf_network_handler *handler;
int port;
bool enabled;
bool is_inner;
union {
int sock; //for socket
void *id; //for rdma_cm
};
struct sockaddr_in inaddr; //for accept
} SFListener;
struct sf_context;
struct sf_address_family_handler;
typedef struct sf_network_handler {
bool enabled;
bool explicit_post_recv;
FCCommunicationType comm_type;
struct sf_address_family_handler *fh;
struct ibv_pd *pd;
SFListener inner;
SFListener outer;
/* for server side */
sf_get_connection_size_callback get_connection_size;
sf_init_connection_callback init_connection;
sf_alloc_pd_callback alloc_pd;
sf_create_server_callback create_server;
sf_close_server_callback close_server;
sf_accept_connection_callback accept_connection;
/* for client side */
sf_async_connect_server_callback async_connect_server;
sf_async_connect_check_callback async_connect_check;
/* server and client both */
sf_close_connection_callback close_connection;
sf_send_data_callback send_data;
sf_recv_data_callback recv_data;
sf_post_recv_callback post_recv; //for rdma
} SFNetworkHandler;
typedef struct sf_nio_callbacks {
TaskCleanUpCallback task_cleanup;
sf_deal_task_callback deal_task;
sf_set_body_length_callback set_body_length;
sf_alloc_recv_buffer_callback alloc_recv_buffer;
sf_accept_done_callback accept_done;
sf_connect_done_callback connect_done;
sf_send_done_callback send_done;
sf_recv_timeout_callback task_timeout;
sf_release_buffer_callback release_buffer;
} SFNIOCallbacks;
typedef struct sf_address_family_handler {
int af; //AF_UNSPEC for disabled
SFNetworkHandler handlers[SF_NETWORK_HANDLER_COUNT];
char inner_bind_addr[IP_ADDRESS_SIZE];
char outer_bind_addr[IP_ADDRESS_SIZE];
struct sf_context *ctx;
} SFAddressFamilyHandler;
typedef struct sf_net_buffer_config {
int connect_timeout;
int network_timeout;
int max_connections;
int max_pkg_size;
int min_buff_size;
int max_buff_size;
} SFNetBufferConfig;
typedef struct sf_context {
char name[64];
struct nio_thread_data *thread_data;
volatile int thread_count;
int outer_sock;
int inner_sock;
int outer_port;
int inner_port;
bool is_client; //since v1.2.5
bool use_io_uring; //since v1.2.9
bool use_send_zc; //since v1.2.9
SFAddressFamily address_family;
SFAddressFamilyHandler handlers[SF_ADDRESS_FAMILY_COUNT];
SFNetBufferConfig net_buffer_cfg;
int accept_threads;
int work_threads;
char inner_bind_addr[IP_ADDRESS_SIZE];
char outer_bind_addr[IP_ADDRESS_SIZE];
int header_size;
bool remove_from_ready_list;
bool realloc_task_buffer;
sf_deal_task_func deal_task;
sf_set_body_length_callback set_body_length;
sf_accept_done_callback accept_done_func;
TaskCleanUpCallback task_cleanup_func;
sf_recv_timeout_callback timeout_callback;
bool connect_need_log; //for client connect
FCSmartPollingConfig smart_polling;
SFNIOCallbacks callbacks;
struct fast_task_queue free_queue;
} SFContext;
typedef struct {
int body_len; //body length
short flags;
short status;
volatile short status;
unsigned char cmd; //command
} SFHeaderInfo;
@ -84,16 +222,26 @@ typedef struct {
SFErrorInfo error;
} SFResponseInfo;
typedef struct {
int64_t req_start_time; //unit: microsecond (us)
SFRequestInfo request;
SFResponseInfo response;
bool response_done;
char log_level; //level for error log
bool need_response;
} SFCommonTaskContext;
typedef struct sf_binlog_file_position {
int index; //current binlog file
int64_t offset; //current file offset
} SFBinlogFilePosition;
typedef struct server_binlog_buffer {
char *buff; //the buffer pointer
char *current; //for the consumer
char *end; //data end ptr
int size; //the buffer size (capacity)
char *buff; //the buffer pointer
char *current; //for the consumer
char *data_end; //data end ptr
char *buff_end; //buffer end ptr
int size; //the buffer size (capacity)
} SFBinlogBuffer;
typedef struct sf_space_stat {
@ -102,6 +250,13 @@ typedef struct sf_space_stat {
int64_t used;
} SFSpaceStat;
typedef struct sf_binlog_writer_stat {
int64_t total_count;
int64_t next_version;
int waiting_count;
int max_waitings;
} SFBinlogWriterStat;
typedef struct sf_version_range {
int64_t first; //including
int64_t last; //including
@ -130,4 +285,123 @@ typedef struct sf_slow_log_context {
LogContext ctx;
} SFSlowLogContext;
typedef enum sf_data_read_rule {
sf_data_read_rule_any_available,
sf_data_read_rule_slave_first,
sf_data_read_rule_master_only
} SFDataReadRule;
typedef enum sf_net_retry_interval_mode {
sf_net_retry_interval_mode_fixed,
sf_net_retry_interval_mode_multiple
} SFNetRetryIntervalMode;
typedef struct sf_net_retry_interval_mode_max_pair {
SFNetRetryIntervalMode mode;
int max_interval_ms;
} SFNetRetryIntervalModeMaxPair;
typedef struct sf_net_retry_times_interval_pair {
int times;
int interval_ms;
} SFNetRetryTimesIntervalPair;
typedef struct sf_net_retry_config {
SFNetRetryIntervalModeMaxPair interval_mm;
SFNetRetryTimesIntervalPair connect;
SFNetRetryTimesIntervalPair network;
} SFNetRetryConfig;
typedef struct sf_client_common_config {
SFDataReadRule read_rule; //the rule for read
int connect_timeout;
int network_timeout;
SFNetRetryConfig net_retry_cfg;
} SFClientCommonConfig;
struct sf_cm_server_entry;
struct sf_cm_server_ptr_array;
typedef struct sf_connection_parameters {
int buffer_size;
struct {
struct sf_cm_server_entry *sentry;
struct sf_cm_server_ptr_array *old_alives;
} cm; //for connection manager
struct idempotency_client_channel *channel;
} SFConnectionParameters;
typedef struct sf_key_value_array {
key_value_pair_t *elts;
int count;
int alloc;
} SFKeyValueArray;
typedef struct sf_cmd_option {
string_t name;
int val;
bool has_arg;
const char *desc;
} SFCMDOption;
typedef struct sf_memory_watermark {
int64_t low;
int64_t high;
} SFMemoryWatermark;
typedef struct sf_list_limit_info {
int offset;
int count;
} SFListLimitInfo;
typedef enum sf_server_group_index_type {
sf_server_group_index_type_cluster = 1,
sf_server_group_index_type_service
} SFServerGroupIndexType;
typedef struct sf_cluster_config {
FCServerConfig server_cfg;
unsigned char md5_digest[SF_CLUSTER_CONFIG_SIGN_LEN];
int cluster_group_index;
int service_group_index;
} SFClusterConfig;
typedef struct sf_synchronize_context {
pthread_lock_cond_pair_t lcp;
int result;
union {
bool finished;
bool ready;
int waiting_count;
};
} SFSynchronizeContext;
typedef enum sf_election_quorum {
sf_election_quorum_auto,
sf_election_quorum_any,
sf_election_quorum_majority
} SFElectionQuorum;
typedef enum sf_replication_quorum {
sf_replication_quorum_auto,
sf_replication_quorum_any,
sf_replication_quorum_majority,
sf_replication_quorum_smart
} SFReplicationQuorum;
typedef struct sf_block_key {
int64_t oid; //object id
int64_t offset; //aligned by block size
uint64_t hash_code;
} SFBlockKey;
typedef struct sf_slice_size {
int offset; //offset within the block
int length; //slice length
} SFSliceSize;
typedef struct sf_block_slice_key_info {
SFBlockKey block;
SFSliceSize slice;
} SFBlockSliceKeyInfo;
#endif

View File

@ -22,7 +22,6 @@
#include <stdio.h>
#include <string.h>
#include <pthread.h>
#include "sf_global.h"
#include "sf_define.h"
#include "sf_util.h"
@ -93,32 +92,175 @@ int sf_printbuffer(char* buffer,int32_t len)
return(0);
}
void sf_usage(const char *program)
void sf_usage_ex(const char *program, const SFCMDOption *other_options)
{
fprintf(stderr, "Usage: %s <config_file> [--without-daemon | --no-daemon] "
"[start | stop | restart]\n", program);
}
fprintf(stderr, "\nUsage: %s [options] <config_file> "
"[start | stop | restart | status]\n\noptions:\n", program);
void sf_parse_daemon_mode_and_action_ex(int argc, char *argv[],
bool *daemon_mode, char **action, const char *default_action)
{
int i;
*daemon_mode = true;
for (i=2; i<argc; i++) {
if (strcmp(argv[i], "--without-daemon") == 0 ||
strcmp(argv[i], "--no-daemon") == 0)
{
*daemon_mode = false;
break;
if (other_options != NULL) {
const SFCMDOption *option;
option = other_options;
while (option->name.str != NULL) {
fprintf(stderr, "\t%s\n", option->desc);
option++;
}
}
if (argc - (*daemon_mode ? 0 : 1) > 2) {
*action = argv[argc - 1];
fprintf(stderr, "\t-N | --no-daemon: run in foreground\n"
"\t-V | --version: show version info\n"
"\t-h | --help: for this usage\n\n");
}
static int match_option(const char *str, const SFCMDOption *option)
{
const char *start;
const char *end;
if (str[1] == '-') {
start = str + 2;
while (option->name.str != NULL) {
if (strncmp(option->name.str, start,
option->name.len) == 0)
{
end = start + option->name.len;
if (*end == '\0') {
return option->has_arg ? 2 : 1;
} else if (*end == '=') {
return 1;
}
}
option++;
}
} else {
while (option->name.str != NULL) {
if (option->val == str[1]) {
if (str[2] == '\0') {
return option->has_arg ? 2 : 1;
} else {
return 1;
}
}
option++;
}
}
return 0;
}
const char *sf_parse_daemon_mode_and_action_ex(int argc, char *argv[],
const Version *version, bool *daemon_mode, char **action,
const char *default_action, const SFCMDOption *other_options)
{
#define CMD_NORMAL_ARG_COUNT 2
int i;
int inc;
struct {
int argc;
char *argv[CMD_NORMAL_ARG_COUNT];
} normal;
const char *config_filepath;
normal.argc = 0;
*daemon_mode = true;
i = 1;
while (i < argc) {
if (argv[i][0] != '-') {
if (normal.argc == CMD_NORMAL_ARG_COUNT) {
fprintf(stderr, "\nError: too many arguments!\n");
sf_usage_ex(argv[0], other_options);
return NULL;
}
normal.argv[normal.argc++] = argv[i++];
continue;
}
if (other_options != NULL) {
inc = match_option(argv[i], other_options);
if (inc > 0) {
i += inc;
if (i > argc) {
fprintf(stderr, "\nError: expect argument!\n");
sf_usage_ex(argv[0], other_options);
return NULL;
}
continue;
}
}
if (strcmp(argv[i], "-V") == 0 ||
strcmp(argv[i], "--version") == 0)
{
char *last_slash;
char *proc_name;
if ((last_slash=strrchr(argv[0], '/')) != NULL) {
proc_name = last_slash + 1;
} else {
proc_name = argv[0];
}
printf("\n%s V%d.%d.%d\n\n", proc_name, version->major,
version->minor, version->patch);
return NULL;
}
if (strcmp(argv[i], "-h") == 0 ||
strcmp(argv[i], "--help") == 0)
{
sf_usage_ex(argv[0], other_options);
return NULL;
}
if (strcmp(argv[i], "-N") == 0 ||
strcmp(argv[i], "--no-daemon") == 0)
{
*daemon_mode = false;
i++;
} else {
fprintf(stderr, "\nError: unrecognized option: %s\n", argv[i]);
sf_usage_ex(argv[0], other_options);
return NULL;
}
}
if (normal.argc == 0) {
fprintf(stderr, "\nError: expect config file!\n");
sf_usage_ex(argv[0], other_options);
return NULL;
}
config_filepath = normal.argv[0];
if (normal.argc > 1) {
*action = normal.argv[1];
} else {
*action = (char *)default_action;
}
return config_filepath;
}
void sf_parse_cmd_option_bool(int argc, char *argv[],
const string_t *short_option, const string_t *long_option,
bool *value)
{
char **pp;
char **end;
int len;
*value = false;
end = argv + argc;
for (pp=argv + 1; pp<end; pp++) {
if (**pp != '-') {
continue;
}
len = strlen(*pp);
if (fc_string_equals2(short_option, *pp, len) ||
fc_string_equals2(long_option, *pp, len))
{
*value = true;
break;
}
}
}
int sf_logger_init(LogContext *pContext, const char *filename_prefix)
@ -128,7 +270,7 @@ int sf_logger_init(LogContext *pContext, const char *filename_prefix)
return result;
}
if ((result=log_set_prefix_ex(pContext, g_sf_global_vars.base_path,
if ((result=log_set_prefix_ex(pContext, SF_G_BASE_PATH_STR,
filename_prefix)) != 0)
{
return result;
@ -139,7 +281,7 @@ int sf_logger_init(LogContext *pContext, const char *filename_prefix)
return 0;
}
ScheduleEntry *sf_logger_set_schedule_entry(struct log_context *pContext,
ScheduleEntry *sf_logger_set_schedule_entries(struct log_context *pContext,
SFLogConfig *log_cfg, ScheduleEntry *pScheduleEntry)
{
INIT_SCHEDULE_ENTRY(*pScheduleEntry, sched_generate_next_id(),
@ -169,6 +311,8 @@ ScheduleEntry *sf_logger_set_schedule_entry(struct log_context *pContext,
const char *sf_strerror(const int errnum)
{
switch (errnum) {
case SF_CLUSTER_ERROR_BINLOG_MISSED:
return "binlog missed";
case SF_CLUSTER_ERROR_BINLOG_INCONSISTENT:
return "binlog inconsistent";
case SF_CLUSTER_ERROR_LEADER_INCONSISTENT:
@ -187,8 +331,12 @@ const char *sf_strerror(const int errnum)
return STRERROR(EINVAL);
case SF_ERROR_EAGAIN:
return STRERROR(EAGAIN);
case SF_ERROR_EINPROGRESS:
return STRERROR(EINPROGRESS);
case SF_ERROR_EOVERFLOW:
return STRERROR(EOVERFLOW);
case SF_ERROR_ENODATA:
return STRERROR(ENODATA);
default:
return STRERROR(errnum);
}

View File

@ -18,10 +18,12 @@
#ifndef _SF_UTIL_H_
#define _SF_UTIL_H_
#include <getopt.h>
#include "fastcommon/logger.h"
#include "fastcommon/sched_thread.h"
#include "sf_define.h"
#include "sf_types.h"
#include "sf_global.h"
#ifdef DEBUG_FLAG /*only for format check*/
@ -58,34 +60,50 @@ __FILE__, eln, eres, emsg, strerror(eres))
#define dszoffset(cls, mem) ((char*)&((cls*)0)->mem - ((char*)0))
#define sf_parse_daemon_mode_and_action(argc, argv, daemon_mode, action) \
sf_parse_daemon_mode_and_action_ex(argc, argv, daemon_mode, action, "start")
#define sf_usage(program) sf_usage_ex(program, NULL)
#define sf_parse_daemon_mode_and_action(argc, argv, \
version, daemon_mode, action) \
sf_parse_daemon_mode_and_action_ex(argc, argv, \
version, daemon_mode, action, "start", NULL)
#define SF_COMMON_OPT_STRING "NVh"
#define SF_COMMON_LONG_OPTIONS \
{"no-daemon", no_argument, NULL, 'N'}, \
{"version", no_argument, NULL, 'V'}, \
{"help", no_argument, NULL, 'h'}
#ifdef __cplusplus
extern "C" {
#endif
int64_t getticks() ;
int64_t getticks();
void log_plus(const int priority, const char* file, int line, const char* fmt, ...);
void log_plus(const int priority, const char *file,
int line, const char *fmt, ...);
int sf_printbuffer(char* buffer,int32_t len);
int sf_printbuffer(char *buffer,int32_t len);
void sf_usage(const char *program);
void sf_usage_ex(const char *program, const SFCMDOption *other_options);
void sf_parse_daemon_mode_and_action_ex(int argc, char *argv[],
bool *daemon_mode, char **action, const char *default_action);
const char *sf_parse_daemon_mode_and_action_ex(int argc, char *argv[],
const Version *version, bool *daemon_mode, char **action,
const char *default_action, const SFCMDOption *other_options);
void sf_parse_cmd_option_bool(int argc, char *argv[],
const string_t *short_option, const string_t *long_option,
bool *value);
int sf_logger_init(LogContext *pContext, const char *filename_prefix);
ScheduleEntry *sf_logger_set_schedule_entry(struct log_context *pContext,
ScheduleEntry *sf_logger_set_schedule_entries(struct log_context *pContext,
SFLogConfig *log_cfg, ScheduleEntry *pScheduleEntry);
static inline void sf_setup_schedule(struct log_context *pContext,
static inline void sf_logger_setup_schedule(struct log_context *pContext,
SFLogConfig *log_cfg, ScheduleArray *scheduleArray)
{
ScheduleEntry *scheduleEntry;
scheduleEntry = sf_logger_set_schedule_entry(pContext,
scheduleEntry = sf_logger_set_schedule_entries(pContext,
log_cfg, scheduleArray->entries);
scheduleArray->count = scheduleEntry - scheduleArray->entries;
}
@ -99,17 +117,31 @@ static inline int sf_unify_errno(const int errnum)
return SF_ERROR_EINVAL;
case EAGAIN:
return SF_ERROR_EAGAIN;
case EINPROGRESS:
return SF_ERROR_EINPROGRESS;
case EOVERFLOW:
return SF_ERROR_EOVERFLOW;
case EOPNOTSUPP:
return SF_ERROR_EOPNOTSUPP;
case ENODATA:
return SF_ERROR_ENODATA;
case ENOLINK:
return SF_ERROR_ENOLINK;
case ENOTEMPTY:
return SF_ERROR_ENOTEMPTY;
case ELOOP:
return SF_ERROR_ELOOP;
default:
return errnum;
}
}
static inline int sf_localize_errno(const int errnum)
static inline int sf_localize_errno(int errnum)
{
if (SF_G_ERROR_HANDLER != NULL) {
errnum = SF_G_ERROR_HANDLER(errnum);
}
switch (errnum) {
case SF_ERROR_EBUSY:
return EBUSY;
@ -117,10 +149,22 @@ static inline int sf_localize_errno(const int errnum)
return EINVAL;
case SF_ERROR_EAGAIN:
return EAGAIN;
case SF_ERROR_EINPROGRESS:
return EINPROGRESS;
case SF_ERROR_EOVERFLOW:
return EOVERFLOW;
case SF_ERROR_EOPNOTSUPP:
return EOPNOTSUPP;
case SF_ERROR_ENODATA:
return ENODATA;
case SF_SESSION_ERROR_NOT_EXIST:
return EPERM;
case SF_ERROR_ENOLINK:
return ENOLINK;
case SF_ERROR_ENOTEMPTY:
return ENOTEMPTY;
case SF_ERROR_ELOOP:
return ELOOP;
default:
return errnum;
}